Code
stringlengths
103
85.9k
Summary
listlengths
0
94
Please provide a description of the function:def make_model(self, add_indra_json=True): self.add_indra_json = add_indra_json for stmt in self.statements: if isinstance(stmt, Modification): self._add_modification(stmt) if isinstance(stmt, SelfModification): self._add_self_modification(stmt) elif isinstance(stmt, RegulateActivity) or \ isinstance(stmt, RegulateAmount): self._add_regulation(stmt) elif isinstance(stmt, Complex): self._add_complex(stmt) elif isinstance(stmt, Gef): self._add_gef(stmt) elif isinstance(stmt, Gap): self._add_gap(stmt) elif isinstance(stmt, Influence): self._add_influence(stmt) network_description = '' self.cx['networkAttributes'].append({'n': 'name', 'v': self.network_name}) self.cx['networkAttributes'].append({'n': 'description', 'v': network_description}) cx_str = self.print_cx() return cx_str
[ "Assemble the CX network from the collected INDRA Statements.\n\n This method assembles a CX network from the set of INDRA Statements.\n The assembled network is set as the assembler's cx argument.\n\n Parameters\n ----------\n add_indra_json : Optional[bool]\n If True, the INDRA Statement JSON annotation is added to each\n edge in the network. Default: True\n\n Returns\n -------\n cx_str : str\n The json serialized CX model.\n " ]
Please provide a description of the function:def print_cx(self, pretty=True): def _get_aspect_metadata(aspect): count = len(self.cx.get(aspect)) if self.cx.get(aspect) else 0 if not count: return None data = {'name': aspect, 'idCounter': self._id_counter, 'consistencyGroup': 1, 'elementCount': count} return data full_cx = OrderedDict() full_cx['numberVerification'] = [{'longNumber': 281474976710655}] aspects = ['nodes', 'edges', 'supports', 'citations', 'edgeAttributes', 'edgeCitations', 'edgeSupports', 'networkAttributes', 'nodeAttributes', 'cartesianLayout'] full_cx['metaData'] = [] for aspect in aspects: metadata = _get_aspect_metadata(aspect) if metadata: full_cx['metaData'].append(metadata) for k, v in self.cx.items(): full_cx[k] = v full_cx['status'] = [{'error': '', 'success': True}] full_cx = [{k: v} for k, v in full_cx.items()] if pretty: json_str = json.dumps(full_cx, indent=2) else: json_str = json.dumps(full_cx) return json_str
[ "Return the assembled CX network as a json string.\n\n Parameters\n ----------\n pretty : bool\n If True, the CX string is formatted with indentation (for human\n viewing) otherwise no indentation is used.\n\n Returns\n -------\n json_str : str\n A json formatted string representation of the CX network.\n " ]
Please provide a description of the function:def save_model(self, file_name='model.cx'): with open(file_name, 'wt') as fh: cx_str = self.print_cx() fh.write(cx_str)
[ "Save the assembled CX network in a file.\n\n Parameters\n ----------\n file_name : Optional[str]\n The name of the file to save the CX network to. Default: model.cx\n " ]
Please provide a description of the function:def upload_model(self, ndex_cred=None, private=True, style='default'): cx_str = self.print_cx() if not ndex_cred: username, password = ndex_client.get_default_ndex_cred({}) ndex_cred = {'user': username, 'password': password} network_id = ndex_client.create_network(cx_str, ndex_cred, private) if network_id and style: template_id = None if style == 'default' else style ndex_client.set_style(network_id, ndex_cred, template_id) return network_id
[ "Creates a new NDEx network of the assembled CX model.\n\n To upload the assembled CX model to NDEx, you need to have\n a registered account on NDEx (http://ndexbio.org/) and have\n the `ndex` python package installed. The uploaded network\n is private by default.\n\n Parameters\n ----------\n ndex_cred : Optional[dict]\n A dictionary with the following entries:\n 'user': NDEx user name\n 'password': NDEx password\n private : Optional[bool]\n Whether or not the created network will be private on NDEX.\n style : Optional[str]\n This optional parameter can either be (1)\n The UUID of an existing NDEx network whose style should be applied\n to the new network. (2) Unspecified or 'default' to use\n the default INDRA-assembled network style. (3) None to\n not set a network style.\n\n Returns\n -------\n network_id : str\n The UUID of the NDEx network that was created by uploading\n the assembled CX model.\n " ]
Please provide a description of the function:def set_context(self, cell_type): node_names = [node['n'] for node in self.cx['nodes']] res_expr = context_client.get_protein_expression(node_names, [cell_type]) res_mut = context_client.get_mutations(node_names, [cell_type]) res_expr = res_expr.get(cell_type) res_mut = res_mut.get(cell_type) if not res_expr: msg = 'Could not get protein expression for %s cell type.' % \ cell_type logger.warning(msg) if not res_mut: msg = 'Could not get mutational status for %s cell type.' % \ cell_type logger.warning(msg) if not res_expr and not res_mut: return self.cx['networkAttributes'].append({'n': 'cellular_context', 'v': cell_type}) counter = 0 for node in self.cx['nodes']: amount = res_expr.get(node['n']) mut = res_mut.get(node['n']) if amount is not None: node_attribute = {'po': node['@id'], 'n': 'expression_amount', 'v': int(amount)} self.cx['nodeAttributes'].append(node_attribute) if mut is not None: is_mutated = 1 if mut else 0 node_attribute = {'po': node['@id'], 'n': 'is_mutated', 'v': is_mutated} self.cx['nodeAttributes'].append(node_attribute) if mut is not None or amount is not None: counter += 1 logger.info('Set context for %d nodes.' % counter)
[ "Set protein expression data and mutational status as node attribute\n\n This method uses :py:mod:`indra.databases.context_client` to get\n protein expression levels and mutational status for a given cell type\n and set a node attribute for proteins accordingly.\n\n Parameters\n ----------\n cell_type : str\n Cell type name for which expression levels are queried.\n The cell type name follows the CCLE database conventions.\n Example: LOXIMVI_SKIN, BT20_BREAST\n " ]
Please provide a description of the function:def get_publications(gene_names, save_json_name=None): if len(gene_names) != 2: logger.warning('Other than 2 gene names given.') return [] res_dict = _send_request(gene_names) if not res_dict: return [] if save_json_name is not None: # The json module produces strings, not bytes, so the file should be # opened in text mode with open(save_json_name, 'wt') as fh: json.dump(res_dict, fh, indent=1) publications = _extract_publications(res_dict, gene_names) return publications
[ "Return evidence publications for interaction between the given genes.\n\n Parameters\n ----------\n gene_names : list[str]\n A list of gene names (HGNC symbols) to query interactions between.\n Currently supports exactly two genes only.\n save_json_name : Optional[str]\n A file name to save the raw BioGRID web service output in. By default,\n the raw output is not saved.\n\n Return\n ------\n publications : list[Publication]\n A list of Publication objects that provide evidence for interactions\n between the given list of genes.\n " ]
Please provide a description of the function:def _n(name): n = name.encode('ascii', errors='ignore').decode('ascii') n = re.sub('[^A-Za-z0-9_]', '_', n) n = re.sub(r'(^[0-9].*)', r'p\1', n) return n
[ "Return valid PySB name." ]
Please provide a description of the function:def get_hash_statements_dict(self): res = {stmt_hash: stmts_from_json([stmt])[0] for stmt_hash, stmt in self.__statement_jsons.items()} return res
[ "Return a dict of Statements keyed by hashes." ]
Please provide a description of the function:def merge_results(self, other_processor): if not isinstance(other_processor, self.__class__): raise ValueError("Can only extend with another %s instance." % self.__class__.__name__) self.statements.extend(other_processor.statements) if other_processor.statements_sample is not None: if self.statements_sample is None: self.statements_sample = other_processor.statements_sample else: self.statements_sample.extend(other_processor.statements_sample) self._merge_json(other_processor.__statement_jsons, other_processor.__evidence_counts) return
[ "Merge the results of this processor with those of another." ]
Please provide a description of the function:def wait_until_done(self, timeout=None): start = datetime.now() if not self.__th: raise IndraDBRestResponseError("There is no thread waiting to " "complete.") self.__th.join(timeout) now = datetime.now() dt = now - start if self.__th.is_alive(): logger.warning("Timed out after %0.3f seconds waiting for " "statement load to complete." % dt.total_seconds()) ret = False else: logger.info("Waited %0.3f seconds for statements to finish loading." % dt.total_seconds()) ret = True return ret
[ "Wait for the background load to complete." ]
Please provide a description of the function:def _merge_json(self, stmt_json, ev_counts): # Where there is overlap, there _should_ be agreement. self.__evidence_counts.update(ev_counts) for k, sj in stmt_json.items(): if k not in self.__statement_jsons: self.__statement_jsons[k] = sj # This should be most of them else: # This should only happen rarely. for evj in sj['evidence']: self.__statement_jsons[k]['evidence'].append(evj) if not self.__started: self.statements_sample = stmts_from_json( self.__statement_jsons.values()) self.__started = True return
[ "Merge these statement jsons with new jsons." ]
Please provide a description of the function:def _run_queries(self, agent_strs, stmt_types, params, persist): self._query_over_statement_types(agent_strs, stmt_types, params) assert len(self.__done_dict) == len(stmt_types) \ or None in self.__done_dict.keys(), \ "Done dict was not initiated for all stmt_type's." # Check if we want to keep going. if not persist: self._compile_statements() return # Get the rest of the content. while not self._all_done(): self._query_over_statement_types(agent_strs, stmt_types, params) # Create the actual statements. self._compile_statements() return
[ "Use paging to get all statements requested." ]
Please provide a description of the function:def get_ids(search_term, **kwargs): use_text_word = kwargs.pop('use_text_word', True) if use_text_word: search_term += '[tw]' params = {'term': search_term, 'retmax': 100000, 'retstart': 0, 'db': 'pubmed', 'sort': 'pub+date'} params.update(kwargs) tree = send_request(pubmed_search, params) if tree is None: return [] if tree.find('ERROR') is not None: logger.error(tree.find('ERROR').text) return [] if tree.find('ErrorList') is not None: for err in tree.find('ErrorList').getchildren(): logger.error('Error - %s: %s' % (err.tag, err.text)) return [] count = int(tree.find('Count').text) id_terms = tree.findall('IdList/Id') if id_terms is None: return [] ids = [idt.text for idt in id_terms] if count != len(ids): logger.warning('Not all ids were retrieved for search %s;\n' 'limited at %d.' % (search_term, params['retmax'])) return ids
[ "Search Pubmed for paper IDs given a search term.\n\n Search options can be passed as keyword arguments, some of which are\n custom keywords identified by this function, while others are passed on\n as parameters for the request to the PubMed web service\n For details on parameters that can be used in PubMed searches, see\n https://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.ESearch Some useful\n parameters to pass are db='pmc' to search PMC instead of pubmed reldate=2\n to search for papers within the last 2 days mindate='2016/03/01',\n maxdate='2016/03/31' to search for papers in March 2016.\n\n PubMed, by default, limits returned PMIDs to a small number, and this\n number can be controlled by the \"retmax\" parameter. This function\n uses a retmax value of 100,000 by default that can be changed via the\n corresponding keyword argument.\n\n Parameters\n ----------\n search_term : str\n A term for which the PubMed search should be performed.\n use_text_word : Optional[bool]\n If True, the \"[tw]\" string is appended to the search term to constrain\n the search to \"text words\", that is words that appear as whole\n in relevant parts of the PubMed entry (excl. for instance the journal\n name or publication date) like the title and abstract. Using this\n option can eliminate spurious search results such as all articles\n published in June for a search for the \"JUN\" gene, or journal names\n that contain Acad for a search for the \"ACAD\" gene.\n See also: https://www.nlm.nih.gov/bsd/disted/pubmedtutorial/020_760.html\n Default : True\n kwargs : kwargs\n Additional keyword arguments to pass to the PubMed search as\n parameters.\n " ]
Please provide a description of the function:def get_id_count(search_term): params = {'term': search_term, 'rettype': 'count', 'db': 'pubmed'} tree = send_request(pubmed_search, params) if tree is None: return None else: count = tree.getchildren()[0].text return int(count)
[ "Get the number of citations in Pubmed for a search query.\n\n Parameters\n ----------\n search_term : str\n A term for which the PubMed search should be performed.\n\n Returns\n -------\n int or None\n The number of citations for the query, or None if the query fails.\n " ]
Please provide a description of the function:def get_ids_for_gene(hgnc_name, **kwargs): # Get the HGNC ID for the HGNC name hgnc_id = hgnc_client.get_hgnc_id(hgnc_name) if hgnc_id is None: raise ValueError('Invalid HGNC name.') # Get the Entrez ID entrez_id = hgnc_client.get_entrez_id(hgnc_id) if entrez_id is None: raise ValueError('Entrez ID not found in HGNC table.') # Query the Entrez Gene database params = {'db': 'gene', 'retmode': 'xml', 'id': entrez_id} params.update(kwargs) tree = send_request(pubmed_fetch, params) if tree is None: return [] if tree.find('ERROR') is not None: logger.error(tree.find('ERROR').text) return [] # Get all PMIDs from the XML tree id_terms = tree.findall('.//PubMedId') if id_terms is None: return [] # Use a set to remove duplicate IDs ids = list(set([idt.text for idt in id_terms])) return ids
[ "Get the curated set of articles for a gene in the Entrez database.\n\n Search parameters for the Gene database query can be passed in as\n keyword arguments. \n\n Parameters\n ----------\n hgnc_name : string\n The HGNC name of the gene. This is used to obtain the HGNC ID\n (using the hgnc_client module) and in turn used to obtain the Entrez\n ID associated with the gene. Entrez is then queried for that ID.\n " ]
Please provide a description of the function:def get_article_xml(pubmed_id): if pubmed_id.upper().startswith('PMID'): pubmed_id = pubmed_id[4:] params = {'db': 'pubmed', 'retmode': 'xml', 'id': pubmed_id} tree = send_request(pubmed_fetch, params) if tree is None: return None article = tree.find('PubmedArticle/MedlineCitation/Article') return article
[ "Get the XML metadata for a single article from the Pubmed database.\n " ]
Please provide a description of the function:def get_abstract(pubmed_id, prepend_title=True): article = get_article_xml(pubmed_id) if article is None: return None return _abstract_from_article_element(article, prepend_title)
[ "Get the abstract of an article in the Pubmed database." ]
Please provide a description of the function:def get_metadata_from_xml_tree(tree, get_issns_from_nlm=False, get_abstracts=False, prepend_title=False, mesh_annotations=False): # Iterate over the articles and build the results dict results = {} pm_articles = tree.findall('./PubmedArticle') for art_ix, pm_article in enumerate(pm_articles): medline_citation = pm_article.find('./MedlineCitation') article_info = _get_article_info(medline_citation, pm_article.find('PubmedData')) journal_info = _get_journal_info(medline_citation, get_issns_from_nlm) context_info = _get_annotations(medline_citation) # Build the result result = {} result.update(article_info) result.update(journal_info) result.update(context_info) # Get the abstracts if requested if get_abstracts: abstract = _abstract_from_article_element( medline_citation.find('Article'), prepend_title=prepend_title ) result['abstract'] = abstract # Add to dict results[article_info['pmid']] = result return results
[ "Get metadata for an XML tree containing PubmedArticle elements.\n\n Documentation on the XML structure can be found at:\n - https://www.nlm.nih.gov/bsd/licensee/elements_descriptions.html\n - https://www.nlm.nih.gov/bsd/licensee/elements_alphabetical.html\n\n Parameters\n ----------\n tree : xml.etree.ElementTree\n ElementTree containing one or more PubmedArticle elements.\n get_issns_from_nlm : boolean\n Look up the full list of ISSN number for the journal associated with\n the article, which helps to match articles to CrossRef search results.\n Defaults to False, since it slows down performance.\n get_abstracts : boolean\n Indicates whether to include the Pubmed abstract in the results.\n prepend_title : boolean\n If get_abstracts is True, specifies whether the article title should\n be prepended to the abstract text.\n mesh_annotations : boolean\n If True, extract mesh annotations from the pubmed entries and include\n in the returned data. If false, don't.\n\n Returns\n -------\n dict of dicts\n Dictionary indexed by PMID. Each value is a dict containing the\n following fields: 'doi', 'title', 'authors', 'journal_title',\n 'journal_abbrev', 'journal_nlm_id', 'issn_list', 'page'.\n " ]
Please provide a description of the function:def get_metadata_for_ids(pmid_list, get_issns_from_nlm=False, get_abstracts=False, prepend_title=False): if len(pmid_list) > 200: raise ValueError("Metadata query is limited to 200 PMIDs at a time.") params = {'db': 'pubmed', 'retmode': 'xml', 'id': pmid_list} tree = send_request(pubmed_fetch, params) if tree is None: return None return get_metadata_from_xml_tree(tree, get_issns_from_nlm, get_abstracts, prepend_title)
[ "Get article metadata for up to 200 PMIDs from the Pubmed database.\n\n Parameters\n ----------\n pmid_list : list of PMIDs as strings\n Can contain 1-200 PMIDs.\n get_issns_from_nlm : boolean\n Look up the full list of ISSN number for the journal associated with\n the article, which helps to match articles to CrossRef search results.\n Defaults to False, since it slows down performance.\n get_abstracts : boolean\n Indicates whether to include the Pubmed abstract in the results.\n prepend_title : boolean\n If get_abstracts is True, specifies whether the article title should\n be prepended to the abstract text.\n\n Returns\n -------\n dict of dicts\n Dictionary indexed by PMID. Each value is a dict containing the\n following fields: 'doi', 'title', 'authors', 'journal_title',\n 'journal_abbrev', 'journal_nlm_id', 'issn_list', 'page'.\n " ]
Please provide a description of the function:def get_issns_for_journal(nlm_id): params = {'db': 'nlmcatalog', 'retmode': 'xml', 'id': nlm_id} tree = send_request(pubmed_fetch, params) if tree is None: return None issn_list = tree.findall('.//ISSN') issn_linking = tree.findall('.//ISSNLinking') issns = issn_list + issn_linking # No ISSNs found! if not issns: return None else: return [issn.text for issn in issns]
[ "Get a list of the ISSN numbers for a journal given its NLM ID.\n\n Information on NLM XML DTDs is available at\n https://www.nlm.nih.gov/databases/dtd/\n " ]
Please provide a description of the function:def expand_pagination(pages): # If there is no hyphen, it's a single page, and we're good to go parts = pages.split('-') if len(parts) == 1: # No hyphen, so no split return pages elif len(parts) == 2: start = parts[0] end = parts[1] # If the end is the same number of digits as the start, then we # don't change anything! if len(start) == len(end): return pages # Otherwise, replace the last digits of start with the digits of end num_end_digits = len(end) new_end = start[:-num_end_digits] + end return '%s-%s' % (start, new_end) else: # More than one hyphen, something weird happened logger.warning("Multiple hyphens in page number: %s" % pages) return pages
[ "Convert a page number to long form, e.g., from 456-7 to 456-457." ]
Please provide a description of the function:def _find_sources_with_paths(im, target, sources, polarity): # First, create a list of visited nodes # Adapted from # http://stackoverflow.com/questions/8922060/ # how-to-trace-the-path-in-a-breadth-first-search # FIXME: the sign information for the target should be associated with # the observable itself queue = deque([[(target, 1)]]) while queue: # Get the first path in the queue path = queue.popleft() node, node_sign = path[-1] # If there's only one node in the path, it's the observable we're # starting from, so the path is positive # if len(path) == 1: # sign = 1 # Because the path runs from target back to source, we have to reverse # the path to calculate the overall polarity #else: # sign = _path_polarity(im, reversed(path)) # Don't allow trivial paths consisting only of the target observable if (sources is None or node in sources) and node_sign == polarity \ and len(path) > 1: logger.debug('Found path: %s' % str(_flip(im, path))) yield tuple(path) for predecessor, sign in _get_signed_predecessors(im, node, node_sign): # Only add predecessors to the path if it's not already in the # path--prevents loops if (predecessor, sign) in path: continue # Otherwise, the new path is a copy of the old one plus the new # predecessor new_path = list(path) new_path.append((predecessor, sign)) queue.append(new_path) return
[ "Get the subset of source nodes with paths to the target.\n\n Given a target, a list of sources, and a path polarity, perform a\n breadth-first search upstream from the target to find paths to any of the\n upstream sources.\n\n Parameters\n ----------\n im : networkx.MultiDiGraph\n Graph containing the influence map.\n target : str\n The node (rule name) in the influence map to start looking upstream for\n marching sources.\n sources : list of str\n The nodes (rules) corresponding to the subject or upstream influence\n being checked.\n polarity : int\n Required polarity of the path between source and target.\n\n Returns\n -------\n generator of path\n Yields paths as lists of nodes (rule names). If there are no paths\n to any of the given source nodes, the generator is empty.\n " ]
Please provide a description of the function:def remove_im_params(model, im): for param in model.parameters: # If the node doesn't exist e.g., it may have already been removed), # skip over the parameter without error try: im.remove_node(param.name) except: pass
[ "Remove parameter nodes from the influence map.\n\n Parameters\n ----------\n model : pysb.core.Model\n PySB model.\n im : networkx.MultiDiGraph\n Influence map.\n\n Returns\n -------\n networkx.MultiDiGraph\n Influence map with the parameter nodes removed.\n " ]
Please provide a description of the function:def _find_sources(im, target, sources, polarity): # First, create a list of visited nodes # Adapted from # networkx.algorithms.traversal.breadth_first_search.bfs_edges visited = set([(target, 1)]) # Generate list of predecessor nodes with a sign updated according to the # sign of the target node target_tuple = (target, 1) # The queue holds tuples of "parents" (in this case downstream nodes) and # their "children" (in this case their upstream influencers) queue = deque([(target_tuple, _get_signed_predecessors(im, target, 1), 0)]) while queue: parent, children, path_length = queue[0] try: # Get the next child in the list (child, sign) = next(children) # Is this child one of the source nodes we're looking for? If so, # yield it along with path length. if (sources is None or child in sources) and sign == polarity: logger.debug("Found path to %s from %s with desired sign %s " "with length %d" % (target, child, polarity, path_length+1)) yield (child, sign, path_length+1) # Check this child against the visited list. If we haven't visited # it already (accounting for the path to the node), then add it # to the queue. if (child, sign) not in visited: visited.add((child, sign)) queue.append(((child, sign), _get_signed_predecessors(im, child, sign), path_length + 1)) # Once we've finished iterating over the children of the current node, # pop the node off and go to the next one in the queue except StopIteration: queue.popleft() # There was no path; this will produce an empty generator return
[ "Get the subset of source nodes with paths to the target.\n\n Given a target, a list of sources, and a path polarity, perform a\n breadth-first search upstream from the target to determine whether any of\n the queried sources have paths to the target with the appropriate polarity.\n For efficiency, does not return the full path, but identifies the upstream\n sources and the length of the path.\n\n Parameters\n ----------\n im : networkx.MultiDiGraph\n Graph containing the influence map.\n target : str\n The node (rule name) in the influence map to start looking upstream for\n marching sources.\n sources : list of str\n The nodes (rules) corresponding to the subject or upstream influence\n being checked.\n polarity : int\n Required polarity of the path between source and target.\n\n Returns\n -------\n generator of (source, polarity, path_length)\n Yields tuples of source node (string), polarity (int) and path length\n (int). If there are no paths to any of the given source nodes, the\n generator isignempty.\n " ]
Please provide a description of the function:def _get_signed_predecessors(im, node, polarity): signed_pred_list = [] for pred in im.predecessors(node): pred_edge = (pred, node) yield (pred, _get_edge_sign(im, pred_edge) * polarity)
[ "Get upstream nodes in the influence map.\n\n Return the upstream nodes along with the overall polarity of the path\n to that node by account for the polarity of the path to the given node\n and the polarity of the edge between the given node and its immediate\n predecessors.\n\n Parameters\n ----------\n im : networkx.MultiDiGraph\n Graph containing the influence map.\n node : str\n The node (rule name) in the influence map to get predecessors (upstream\n nodes) for.\n polarity : int\n Polarity of the overall path to the given node.\n\n\n Returns\n -------\n generator of tuples, (node, polarity)\n Each tuple returned contains two elements, a node (string) and the\n polarity of the overall path (int) to that node.\n " ]
Please provide a description of the function:def _get_edge_sign(im, edge): edge_data = im[edge[0]][edge[1]] # Handle possible multiple edges between nodes signs = list(set([v['sign'] for v in edge_data.values() if v.get('sign')])) if len(signs) > 1: logger.warning("Edge %s has conflicting polarities; choosing " "positive polarity by default" % str(edge)) sign = 1 else: sign = signs[0] if sign is None: raise Exception('No sign attribute for edge.') elif abs(sign) == 1: return sign else: raise Exception('Unexpected edge sign: %s' % edge.attr['sign'])
[ "Get the polarity of the influence by examining the edge sign." ]
Please provide a description of the function:def _add_modification_to_agent(agent, mod_type, residue, position): new_mod = ModCondition(mod_type, residue, position) # Check if this modification already exists for old_mod in agent.mods: if old_mod.equals(new_mod): return agent new_agent = deepcopy(agent) new_agent.mods.append(new_mod) return new_agent
[ "Add a modification condition to an Agent." ]
Please provide a description of the function:def _match_lhs(cp, rules): rule_matches = [] for rule in rules: reactant_pattern = rule.rule_expression.reactant_pattern for rule_cp in reactant_pattern.complex_patterns: if _cp_embeds_into(rule_cp, cp): rule_matches.append(rule) break return rule_matches
[ "Get rules with a left-hand side matching the given ComplexPattern." ]
Please provide a description of the function:def _cp_embeds_into(cp1, cp2): # Check that any state in cp2 is matched in cp1 # If the thing we're matching to is just a monomer pattern, that makes # things easier--we just need to find the corresponding monomer pattern # in cp1 if cp1 is None or cp2 is None: return False cp1 = as_complex_pattern(cp1) cp2 = as_complex_pattern(cp2) if len(cp2.monomer_patterns) == 1: mp2 = cp2.monomer_patterns[0] # Iterate over the monomer patterns in cp1 and see if there is one # that has the same name for mp1 in cp1.monomer_patterns: if _mp_embeds_into(mp1, mp2): return True return False
[ "Check that any state in ComplexPattern2 is matched in ComplexPattern1.\n " ]
Please provide a description of the function:def _mp_embeds_into(mp1, mp2): sc_matches = [] if mp1.monomer.name != mp2.monomer.name: return False # Check that all conditions in mp2 are met in mp1 for site_name, site_state in mp2.site_conditions.items(): if site_name not in mp1.site_conditions or \ site_state != mp1.site_conditions[site_name]: return False return True
[ "Check that conditions in MonomerPattern2 are met in MonomerPattern1." ]
Please provide a description of the function:def _monomer_pattern_label(mp): site_strs = [] for site, cond in mp.site_conditions.items(): if isinstance(cond, tuple) or isinstance(cond, list): assert len(cond) == 2 if cond[1] == WILD: site_str = '%s_%s' % (site, cond[0]) else: site_str = '%s_%s%s' % (site, cond[0], cond[1]) elif isinstance(cond, numbers.Real): continue else: site_str = '%s_%s' % (site, cond) site_strs.append(site_str) return '%s_%s' % (mp.monomer.name, '_'.join(site_strs))
[ "Return a string label for a MonomerPattern." ]
Please provide a description of the function:def _stmt_from_rule(model, rule_name, stmts): stmt_uuid = None for ann in model.annotations: if ann.predicate == 'from_indra_statement': if ann.subject == rule_name: stmt_uuid = ann.object break if stmt_uuid: for stmt in stmts: if stmt.uuid == stmt_uuid: return stmt
[ "Return the INDRA Statement corresponding to a given rule by name." ]
Please provide a description of the function:def generate_im(self, model): kappa = kappy.KappaStd() model_str = export.export(model, 'kappa') kappa.add_model_string(model_str) kappa.project_parse() imap = kappa.analyses_influence_map(accuracy='medium') graph = im_json_to_graph(imap) return graph
[ "Return a graph representing the influence map generated by Kappa\n\n Parameters\n ----------\n model : pysb.Model\n The PySB model whose influence map is to be generated\n\n Returns\n -------\n graph : networkx.MultiDiGraph\n A MultiDiGraph representing the influence map\n " ]
Please provide a description of the function:def draw_im(self, fname): im = self.get_im() im_agraph = nx.nx_agraph.to_agraph(im) im_agraph.draw(fname, prog='dot')
[ "Draw and save the influence map in a file.\n\n Parameters\n ----------\n fname : str\n The name of the file to save the influence map in.\n The extension of the file will determine the file format,\n typically png or pdf.\n " ]
Please provide a description of the function:def get_im(self, force_update=False): if self._im and not force_update: return self._im if not self.model: raise Exception("Cannot get influence map if there is no model.") def add_obs_for_agent(agent): obj_mps = list(pa.grounded_monomer_patterns(self.model, agent)) if not obj_mps: logger.debug('No monomer patterns found in model for agent %s, ' 'skipping' % agent) return obs_list = [] for obj_mp in obj_mps: obs_name = _monomer_pattern_label(obj_mp) + '_obs' # Add the observable obj_obs = Observable(obs_name, obj_mp, _export=False) obs_list.append(obs_name) try: self.model.add_component(obj_obs) except ComponentDuplicateNameError as e: pass return obs_list # Create observables for all statements to check, and add to model # Remove any existing observables in the model self.model.observables = ComponentSet([]) for stmt in self.statements: # Generate observables for Modification statements if isinstance(stmt, Modification): mod_condition_name = modclass_to_modtype[stmt.__class__] if isinstance(stmt, RemoveModification): mod_condition_name = modtype_to_inverse[mod_condition_name] # Add modification to substrate agent modified_sub = _add_modification_to_agent(stmt.sub, mod_condition_name, stmt.residue, stmt.position) obs_list = add_obs_for_agent(modified_sub) # Associate this statement with this observable self.stmt_to_obs[stmt] = obs_list # Generate observables for Activation/Inhibition statements elif isinstance(stmt, RegulateActivity): regulated_obj, polarity = \ _add_activity_to_agent(stmt.obj, stmt.obj_activity, stmt.is_activation) obs_list = add_obs_for_agent(regulated_obj) # Associate this statement with this observable self.stmt_to_obs[stmt] = obs_list elif isinstance(stmt, RegulateAmount): obs_list = add_obs_for_agent(stmt.obj) self.stmt_to_obs[stmt] = obs_list elif isinstance(stmt, Influence): obs_list = add_obs_for_agent(stmt.obj.concept) self.stmt_to_obs[stmt] = obs_list # Add observables for each agent for ag in self.agent_obs: obs_list = add_obs_for_agent(ag) self.agent_to_obs[ag] = obs_list logger.info("Generating influence map") self._im = self.generate_im(self.model) #self._im.is_multigraph = lambda: False # Now, for every rule in the model, check if there are any observables # downstream; alternatively, for every observable in the model, get a # list of rules. # We'll need the dictionary to check if nodes are observables node_attributes = nx.get_node_attributes(self._im, 'node_type') for rule in self.model.rules: obs_list = [] # Get successors of the rule node for neighb in self._im.neighbors(rule.name): # Check if the node is an observable if node_attributes[neighb] != 'variable': continue # Get the edge and check the polarity edge_sign = _get_edge_sign(self._im, (rule.name, neighb)) obs_list.append((neighb, edge_sign)) self.rule_obs_dict[rule.name] = obs_list return self._im
[ "Get the influence map for the model, generating it if necessary.\n\n Parameters\n ----------\n force_update : bool\n Whether to generate the influence map when the function is called.\n If False, returns the previously generated influence map if\n available. Defaults to True.\n\n Returns\n -------\n networkx MultiDiGraph object containing the influence map.\n The influence map can be rendered as a pdf using the dot layout\n program as follows::\n\n im_agraph = nx.nx_agraph.to_agraph(influence_map)\n im_agraph.draw('influence_map.pdf', prog='dot')\n " ]
Please provide a description of the function:def check_model(self, max_paths=1, max_path_length=5): results = [] for stmt in self.statements: result = self.check_statement(stmt, max_paths, max_path_length) results.append((stmt, result)) return results
[ "Check all the statements added to the ModelChecker.\n\n Parameters\n ----------\n max_paths : Optional[int]\n The maximum number of specific paths to return for each Statement\n to be explained. Default: 1\n max_path_length : Optional[int]\n The maximum length of specific paths to return. Default: 5\n\n Returns\n -------\n list of (Statement, PathResult)\n Each tuple contains the Statement checked against the model and\n a PathResult object describing the results of model checking.\n " ]
Please provide a description of the function:def check_statement(self, stmt, max_paths=1, max_path_length=5): # Make sure the influence map is initialized self.get_im() # Check if this is one of the statement types that we can check if not isinstance(stmt, (Modification, RegulateAmount, RegulateActivity, Influence)): return PathResult(False, 'STATEMENT_TYPE_NOT_HANDLED', max_paths, max_path_length) # Get the polarity for the statement if isinstance(stmt, Modification): target_polarity = -1 if isinstance(stmt, RemoveModification) else 1 elif isinstance(stmt, RegulateActivity): target_polarity = 1 if stmt.is_activation else -1 elif isinstance(stmt, RegulateAmount): target_polarity = -1 if isinstance(stmt, DecreaseAmount) else 1 elif isinstance(stmt, Influence): target_polarity = -1 if stmt.overall_polarity() == -1 else 1 # Get the subject and object (works also for Modifications) subj, obj = stmt.agent_list() # Get a list of monomer patterns matching the subject FIXME Currently # this will match rules with the corresponding monomer pattern on it. # In future, this statement should (possibly) also match rules in which # 1) the agent is in its active form, or 2) the agent is tagged as the # enzyme in a rule of the appropriate activity (e.g., a phosphorylation # rule) FIXME if subj is not None: subj_mps = list(pa.grounded_monomer_patterns(self.model, subj, ignore_activities=True)) if not subj_mps: logger.debug('No monomers found corresponding to agent %s' % subj) return PathResult(False, 'SUBJECT_MONOMERS_NOT_FOUND', max_paths, max_path_length) else: subj_mps = [None] # Observables may not be found for an activation since there may be no # rule in the model activating the object, and the object may not have # an "active" site of the appropriate type obs_names = self.stmt_to_obs[stmt] if not obs_names: logger.debug("No observables for stmt %s, returning False" % stmt) return PathResult(False, 'OBSERVABLES_NOT_FOUND', max_paths, max_path_length) for subj_mp, obs_name in itertools.product(subj_mps, obs_names): # NOTE: Returns on the path found for the first enz_mp/obs combo result = self._find_im_paths(subj_mp, obs_name, target_polarity, max_paths, max_path_length) # If a path was found, then we return it; otherwise, that means # there was no path for this observable, so we have to try the next # one if result.path_found: return result # If we got here, then there was no path for any observable return PathResult(False, 'NO_PATHS_FOUND', max_paths, max_path_length)
[ "Check a single Statement against the model.\n\n Parameters\n ----------\n stmt : indra.statements.Statement\n The Statement to check.\n max_paths : Optional[int]\n The maximum number of specific paths to return for each Statement\n to be explained. Default: 1\n max_path_length : Optional[int]\n The maximum length of specific paths to return. Default: 5\n\n Returns\n -------\n boolean\n True if the model satisfies the Statement.\n " ]
Please provide a description of the function:def _find_im_paths(self, subj_mp, obs_name, target_polarity, max_paths=1, max_path_length=5): logger.info(('Running path finding with max_paths=%d,' ' max_path_length=%d') % (max_paths, max_path_length)) # Find rules in the model corresponding to the input if subj_mp is None: input_rule_set = None else: input_rule_set = self._get_input_rules(subj_mp) if not input_rule_set: return PathResult(False, 'INPUT_RULES_NOT_FOUND', max_paths, max_path_length) logger.info('Checking path metrics between %s and %s with polarity %s' % (subj_mp, obs_name, target_polarity)) # -- Route to the path sampling function -- if self.do_sampling: if not has_pg: raise Exception('The paths_graph package could not be ' 'imported.') return self._sample_paths(input_rule_set, obs_name, target_polarity, max_paths, max_path_length) # -- Do Breadth-First Enumeration -- # Generate the predecessors to our observable and count the paths path_lengths = [] path_metrics = [] for source, polarity, path_length in \ _find_sources(self.get_im(), obs_name, input_rule_set, target_polarity): pm = PathMetric(source, obs_name, polarity, path_length) path_metrics.append(pm) path_lengths.append(path_length) logger.info('Finding paths between %s and %s with polarity %s' % (subj_mp, obs_name, target_polarity)) # Now, look for paths paths = [] if path_metrics and max_paths == 0: pr = PathResult(True, 'MAX_PATHS_ZERO', max_paths, max_path_length) pr.path_metrics = path_metrics return pr elif path_metrics: if min(path_lengths) <= max_path_length: pr = PathResult(True, 'PATHS_FOUND', max_paths, max_path_length) pr.path_metrics = path_metrics # Get the first path path_iter = enumerate(_find_sources_with_paths( self.get_im(), obs_name, input_rule_set, target_polarity)) for path_ix, path in path_iter: flipped = _flip(self.get_im(), path) pr.add_path(flipped) if len(pr.paths) >= max_paths: break return pr # There are no paths shorter than the max path length, so we # don't bother trying to get them else: pr = PathResult(True, 'MAX_PATH_LENGTH_EXCEEDED', max_paths, max_path_length) pr.path_metrics = path_metrics return pr else: return PathResult(False, 'NO_PATHS_FOUND', max_paths, max_path_length)
[ "Check for a source/target path in the influence map.\n\n Parameters\n ----------\n subj_mp : pysb.MonomerPattern\n MonomerPattern corresponding to the subject of the Statement\n being checked.\n obs_name : str\n Name of the PySB model Observable corresponding to the\n object/target of the Statement being checked.\n target_polarity : int\n Whether the influence in the Statement is positive (1) or negative\n (-1).\n\n Returns\n -------\n PathResult\n PathResult object indicating the results of the attempt to find\n a path.\n " ]
Please provide a description of the function:def score_paths(self, paths, agents_values, loss_of_function=False, sigma=0.15, include_final_node=False): obs_model = lambda x: scipy.stats.norm(x, sigma) # Build up dict mapping observables to values obs_dict = {} for ag, val in agents_values.items(): obs_list = self.agent_to_obs[ag] if obs_list is not None: for obs in obs_list: obs_dict[obs] = val # For every path... path_scores = [] for path in paths: logger.info('------') logger.info("Scoring path:") logger.info(path) # Look at every node in the path, excluding the final # observable... path_score = 0 last_path_node_index = -1 if include_final_node else -2 for node, sign in path[:last_path_node_index]: # ...and for each node check the sign to see if it matches the # data. So the first thing is to look at what's downstream # of the rule # affected_obs is a list of observable names alogn for affected_obs, rule_obs_sign in self.rule_obs_dict[node]: flip_polarity = -1 if loss_of_function else 1 pred_sign = sign * rule_obs_sign * flip_polarity # Check to see if this observable is in the data logger.info('%s %s: effect %s %s' % (node, sign, affected_obs, pred_sign)) measured_val = obs_dict.get(affected_obs) if measured_val: # For negative predictions use CDF (prob that given # measured value, true value lies below 0) if pred_sign <= 0: prob_correct = obs_model(measured_val).logcdf(0) # For positive predictions, use log survival function # (SF = 1 - CDF, i.e., prob that true value is # above 0) else: prob_correct = obs_model(measured_val).logsf(0) logger.info('Actual: %s, Log Probability: %s' % (measured_val, prob_correct)) path_score += prob_correct if not self.rule_obs_dict[node]: logger.info('%s %s' % (node, sign)) prob_correct = obs_model(0).logcdf(0) logger.info('Unmeasured node, Log Probability: %s' % (prob_correct)) path_score += prob_correct # Normalized path #path_score = path_score / len(path) logger.info("Path score: %s" % path_score) path_scores.append(path_score) path_tuples = list(zip(paths, path_scores)) # Sort first by path length sorted_by_length = sorted(path_tuples, key=lambda x: len(x[0])) # Sort by probability; sort in reverse order to large values # (higher probabilities) are ranked higher scored_paths = sorted(sorted_by_length, key=lambda x: x[1], reverse=True) return scored_paths
[ "Return scores associated with a given set of paths.\n\n Parameters\n ----------\n paths : list[list[tuple[str, int]]]\n A list of paths obtained from path finding. Each path is a list\n of tuples (which are edges in the path), with the first element\n of the tuple the name of a rule, and the second element its\n polarity in the path.\n agents_values : dict[indra.statements.Agent, float]\n A dictionary of INDRA Agents and their corresponding measured\n value in a given experimental condition.\n loss_of_function : Optional[boolean]\n If True, flip the polarity of the path. For instance, if the effect\n of an inhibitory drug is explained, set this to True.\n Default: False\n sigma : Optional[float]\n The estimated standard deviation for the normally distributed\n measurement error in the observation model used to score paths\n with respect to data. Default: 0.15\n include_final_node : Optional[boolean]\n Determines whether the final node of the path is included in the\n score. Default: False\n " ]
Please provide a description of the function:def prune_influence_map(self): im = self.get_im() # First, remove all self-loops logger.info('Removing self loops') edges_to_remove = [] for e in im.edges(): if e[0] == e[1]: logger.info('Removing self loop: %s', e) edges_to_remove.append((e[0], e[1])) # Now remove all the edges to be removed with a single call im.remove_edges_from(edges_to_remove) # Remove parameter nodes from influence map remove_im_params(self.model, im) # Now compare nodes pairwise and look for overlap between child nodes logger.info('Get successorts of each node') succ_dict = {} for node in im.nodes(): succ_dict[node] = set(im.successors(node)) # Sort and then group nodes by number of successors logger.info('Compare combinations of successors') group_key_fun = lambda x: len(succ_dict[x]) nodes_sorted = sorted(im.nodes(), key=group_key_fun) groups = itertools.groupby(nodes_sorted, key=group_key_fun) # Now iterate over each group and then construct combinations # within the group to check for shared sucessors edges_to_remove = [] for gix, group in groups: combos = itertools.combinations(group, 2) for ix, (p1, p2) in enumerate(combos): # Children are identical except for mutual relationship if succ_dict[p1].difference(succ_dict[p2]) == set([p2]) and \ succ_dict[p2].difference(succ_dict[p1]) == set([p1]): for u, v in ((p1, p2), (p2, p1)): edges_to_remove.append((u, v)) logger.debug('Will remove edge (%s, %s)', u, v) logger.info('Removing %d edges from influence map' % len(edges_to_remove)) # Now remove all the edges to be removed with a single call im.remove_edges_from(edges_to_remove)
[ "Remove edges between rules causing problematic non-transitivity.\n\n First, all self-loops are removed. After this initial step, edges are\n removed between rules when they share *all* child nodes except for each\n other; that is, they have a mutual relationship with each other and\n share all of the same children.\n\n Note that edges must be removed in batch at the end to prevent edge\n removal from affecting the lists of rule children during the comparison\n process.\n " ]
Please provide a description of the function:def prune_influence_map_subj_obj(self): def get_rule_info(r): result = {} for ann in self.model.annotations: if ann.subject == r: if ann.predicate == 'rule_has_subject': result['subject'] = ann.object elif ann.predicate == 'rule_has_object': result['object'] = ann.object return result im = self.get_im() rules = im.nodes() edges_to_prune = [] for r1, r2 in itertools.permutations(rules, 2): if (r1, r2) not in im.edges(): continue r1_info = get_rule_info(r1) r2_info = get_rule_info(r2) if 'object' not in r1_info or 'subject' not in r2_info: continue if r1_info['object'] != r2_info['subject']: logger.info("Removing edge %s --> %s" % (r1, r2)) edges_to_prune.append((r1, r2)) im.remove_edges_from(edges_to_prune)
[ "Prune influence map to include only edges where the object of the\n upstream rule matches the subject of the downstream rule." ]
Please provide a description of the function:def add_section(self, section_name): self.section_headings.append(section_name) if section_name in self.sections: raise ValueError("Section %s already exists." % section_name) self.sections[section_name] = [] return
[ "Create a section of the report, to be headed by section_name\n\n Text and images can be added by using the `section` argument of the\n `add_text` and `add_image` methods. Sections can also be ordered by\n using the `set_section_order` method.\n\n By default, text and images that have no section will be placed after\n all the sections, in the order they were added. This behavior may be\n altered using the `sections_first` attribute of the `make_report`\n method.\n " ]
Please provide a description of the function:def set_section_order(self, section_name_list): self.section_headings = section_name_list[:] for section_name in self.sections.keys(): if section_name not in section_name_list: self.section_headings.append(section_name) return
[ "Set the order of the sections, which are by default unorderd.\n\n Any unlisted sections that exist will be placed at the end of the\n document in no particular order.\n " ]
Please provide a description of the function:def add_text(self, text, *args, **kwargs): # Pull down some kwargs. section_name = kwargs.pop('section', None) # Actually do the formatting. para, sp = self._preformat_text(text, *args, **kwargs) # Select the appropriate list to update if section_name is None: relevant_list = self.story else: relevant_list = self.sections[section_name] # Add the new content to list. relevant_list.append(para) relevant_list.append(sp) return
[ "Add text to the document.\n\n Text is shown on the final document in the order it is added, either\n within the given section or as part of the un-sectioned list of content.\n\n Parameters\n ----------\n text : str\n The text to be added.\n style : str\n Choose the style of the text. Options include 'Normal', 'Code',\n 'Title', 'h1'. For others, see `getSampleStyleSheet` from\n `reportlab.lib.styles`.\n space : tuple (num spaces, font size)\n The number and size of spaces to follow this section of text.\n Default is (1, 12).\n fontsize : int\n The integer font size of the text (e.g. 12 for 12 point font).\n Default is 12.\n alignment : str\n The alignment of the text. Options include 'left', 'right', and\n 'center'. Default is 'left'.\n section : str\n (This must be a keyword) Select a section in which to place this\n text. Default is None, in which case the text will be simply be\n added to a default list of text and images.\n " ]
Please provide a description of the function:def add_image(self, image_path, width=None, height=None, section=None): if width is not None: width = width*inch if height is not None: height = height*inch im = Image(image_path, width, height) if section is None: self.story.append(im) else: self.sections[section].append(im) return
[ "Add an image to the document.\n\n Images are shown on the final document in the order they are added,\n either within the given section or as part of the un-sectioned list of\n content.\n\n Parameters\n ----------\n image_path : str\n A path to the image on the local file system.\n width : int or float\n The width of the image in the document in inches.\n height : int or float\n The height of the image in the document in incehs.\n section : str\n (This must be a keyword) Select a section in which to place this\n image. Default is None, in which case the image will be simply be\n added to a default list of text and images.\n " ]
Please provide a description of the function:def make_report(self, sections_first=True, section_header_params=None): full_story = list(self._preformat_text(self.title, style='Title', fontsize=18, alignment='center')) # Set the default section header parameters if section_header_params is None: section_header_params = {'style': 'h1', 'fontsize': 14, 'alignment': 'center'} # Merge the sections and the rest of the story. if sections_first: full_story += self._make_sections(**section_header_params) full_story += self.story else: full_story += self.story full_story += self._make_sections(**section_header_params) fname = self.name + '.pdf' doc = SimpleDocTemplate(fname, pagesize=letter, rightMargin=72, leftMargin=72, topMargin=72, bottomMargin=18) doc.build(full_story) return fname
[ "Create the pdf document with name `self.name + '.pdf'`.\n\n Parameters\n ----------\n sections_first : bool\n If True (default), text and images with sections are presented first\n and un-sectioned content is appended afterword. If False, sectioned\n text and images will be placed before the sections.\n section_header_params : dict or None\n Optionally overwrite/extend the default formatting for the section\n headers. Default is None.\n " ]
Please provide a description of the function:def _make_sections(self, **section_hdr_params): sect_story = [] if not self.section_headings and len(self.sections): self.section_headings = self.sections.keys() for section_name in self.section_headings: section_story = self.sections[section_name] line = '-'*20 section_head_text = '%s %s %s' % (line, section_name, line) title, title_sp = self._preformat_text(section_head_text, **section_hdr_params) sect_story += [title, title_sp] + section_story return sect_story
[ "Flatten the sections into a single story list." ]
Please provide a description of the function:def _preformat_text(self, text, style='Normal', space=None, fontsize=12, alignment='left'): if space is None: space=(1,12) ptext = ('<para alignment=\"%s\"><font size=%d>%s</font></para>' % (alignment, fontsize, text)) para = Paragraph(ptext, self.styles[style]) sp = Spacer(*space) return para, sp
[ "Format the text for addition to a story list." ]
Please provide a description of the function:def get_mesh_name_from_web(mesh_id): url = MESH_URL + mesh_id + '.json' resp = requests.get(url) if resp.status_code != 200: return None mesh_json = resp.json() try: label = mesh_json['@graph'][0]['label']['@value'] except (KeyError, IndexError) as e: return None return label
[ "Get the MESH label for the given MESH ID using the NLM REST API.\n\n Parameters\n ----------\n mesh_id : str\n MESH Identifier, e.g. 'D003094'.\n\n Returns\n -------\n str\n Label for the MESH ID, or None if the query failed or no label was\n found.\n " ]
Please provide a description of the function:def get_mesh_name(mesh_id, offline=False): indra_mesh_mapping = mesh_id_to_name.get(mesh_id) if offline or indra_mesh_mapping is not None: return indra_mesh_mapping # Look up the MESH mapping from NLM if we don't have it locally return get_mesh_name_from_web(mesh_id)
[ "Get the MESH label for the given MESH ID.\n\n Uses the mappings table in `indra/resources`; if the MESH ID is not listed\n there, falls back on the NLM REST API.\n\n Parameters\n ----------\n mesh_id : str\n MESH Identifier, e.g. 'D003094'.\n offline : bool\n Whether to allow queries to the NLM REST API if the given MESH ID is not\n contained in INDRA's internal MESH mappings file. Default is False\n (allows REST API queries).\n\n Returns\n -------\n str\n Label for the MESH ID, or None if the query failed or no label was\n found.\n " ]
Please provide a description of the function:def get_mesh_id_name(mesh_term, offline=False): indra_mesh_id = mesh_name_to_id.get(mesh_term) if indra_mesh_id is not None: return indra_mesh_id, mesh_term indra_mesh_id, new_term = \ mesh_name_to_id_name.get(mesh_term, (None, None)) if indra_mesh_id is not None: return indra_mesh_id, new_term if offline: return None, None # Look up the MESH mapping from NLM if we don't have it locally return get_mesh_id_name_from_web(mesh_term)
[ "Get the MESH ID and name for the given MESH term.\n\n Uses the mappings table in `indra/resources`; if the MESH term is not\n listed there, falls back on the NLM REST API.\n\n Parameters\n ----------\n mesh_term : str\n MESH Descriptor or Concept name, e.g. 'Breast Cancer'.\n offline : bool\n Whether to allow queries to the NLM REST API if the given MESH term is\n not contained in INDRA's internal MESH mappings file. Default is False\n (allows REST API queries).\n\n Returns\n -------\n tuple of strs\n Returns a 2-tuple of the form `(id, name)` with the ID of the\n descriptor corresponding to the MESH label, and the descriptor name\n (which may not exactly match the name provided as an argument if it is\n a Concept name). If the query failed, or no descriptor corresponding to\n the name was found, returns a tuple of (None, None).\n " ]
Please provide a description of the function:def get_mesh_id_name_from_web(mesh_term): url = MESH_URL + 'sparql' query = % (mesh_term, mesh_term) args = {'query': query, 'format': 'JSON', 'inference': 'true'} # Interestingly, the following call using requests.get to package the # query does not work: # resp = requests.get(url, data=args) # But if the query string is explicitly urlencoded using urllib, it works: query_string = '%s?%s' % (url, urlencode(args)) resp = requests.get(query_string) # Check status if resp.status_code != 200: return None, None try: # Try to parse the json response (this can raise exceptions if we # got no response). mesh_json = resp.json() # Choose the first entry (should usually be only one) id_uri = mesh_json['results']['bindings'][0]['d']['value'] name = mesh_json['results']['bindings'][0]['dName']['value'] except (KeyError, IndexError, json.decoder.JSONDecodeError) as e: return None, None # Strip the MESH prefix off the ID URI m = re.match('http://id.nlm.nih.gov/mesh/([A-Za-z0-9]*)', id_uri) assert m is not None id = m.groups()[0] return id, name
[ "Get the MESH ID and name for the given MESH term using the NLM REST API.\n\n Parameters\n ----------\n mesh_term : str\n MESH Descriptor or Concept name, e.g. 'Breast Cancer'.\n\n Returns\n -------\n tuple of strs\n Returns a 2-tuple of the form `(id, name)` with the ID of the\n descriptor corresponding to the MESH label, and the descriptor name\n (which may not exactly match the name provided as an argument if it is\n a Concept name). If the query failed, or no descriptor corresponding to\n the name was found, returns a tuple of (None, None).\n ", "\n PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>\n PREFIX owl: <http://www.w3.org/2002/07/owl#>\n PREFIX meshv: <http://id.nlm.nih.gov/mesh/vocab#>\n PREFIX mesh: <http://id.nlm.nih.gov/mesh/>\n PREFIX mesh2019: <http://id.nlm.nih.gov/mesh/2019/>\n PREFIX mesh2018: <http://id.nlm.nih.gov/mesh/2018/>\n PREFIX mesh2017: <http://id.nlm.nih.gov/mesh/2017/>\n\n SELECT ?d ?dName ?c ?cName \n FROM <http://id.nlm.nih.gov/mesh>\n WHERE {\n ?d a meshv:Descriptor .\n ?d meshv:concept ?c .\n ?d rdfs:label ?dName .\n ?c rdfs:label ?cName\n FILTER (REGEX(?dName,'^%s$','i') || REGEX(?cName,'^%s$','i'))\n }\n ORDER BY ?d\n " ]
Please provide a description of the function:def make(directory): if os.path.exists(directory): if os.path.isdir(directory): click.echo('Directory already exists') else: click.echo('Path exists and is not a directory') sys.exit() os.makedirs(directory) os.mkdir(os.path.join(directory, 'jsons')) copy_default_config(os.path.join(directory, 'config.yaml'))
[ "Makes a RAS Machine directory" ]
Please provide a description of the function:def run_with_search(model_path, config, num_days): from indra.tools.machine.machine import run_with_search_helper run_with_search_helper(model_path, config, num_days=num_days)
[ "Run with PubMed search for new papers." ]
Please provide a description of the function:def run_with_pmids(model_path, pmids): from indra.tools.machine.machine import run_with_pmids_helper run_with_pmids_helper(model_path, pmids)
[ "Run with given list of PMIDs." ]
Please provide a description of the function:def id_lookup(paper_id, idtype=None): if idtype is not None and idtype not in ('pmid', 'pmcid', 'doi'): raise ValueError("Invalid idtype %s; must be 'pmid', 'pmcid', " "or 'doi'." % idtype) if paper_id.upper().startswith('PMC'): idtype = 'pmcid' # Strip off any prefix if paper_id.upper().startswith('PMID'): paper_id = paper_id[4:] elif paper_id.upper().startswith('DOI'): paper_id = paper_id[3:] data = {'ids': paper_id} if idtype is not None: data['idtype'] = idtype try: tree = pubmed_client.send_request(pmid_convert_url, data) except Exception as e: logger.error('Error looking up PMID in PMC: %s' % e) return {} if tree is None: return {} record = tree.find('record') if record is None: return {} doi = record.attrib.get('doi') pmid = record.attrib.get('pmid') pmcid = record.attrib.get('pmcid') ids = {'doi': doi, 'pmid': pmid, 'pmcid': pmcid} return ids
[ "This function takes a Pubmed ID, Pubmed Central ID, or DOI\n and use the Pubmed ID mapping\n service and looks up all other IDs from one\n of these. The IDs are returned in a dictionary." ]
Please provide a description of the function:def get_xml(pmc_id): if pmc_id.upper().startswith('PMC'): pmc_id = pmc_id[3:] # Request params params = {} params['verb'] = 'GetRecord' params['identifier'] = 'oai:pubmedcentral.nih.gov:%s' % pmc_id params['metadataPrefix'] = 'pmc' # Submit the request res = requests.get(pmc_url, params) if not res.status_code == 200: logger.warning("Couldn't download %s" % pmc_id) return None # Read the bytestream xml_bytes = res.content # Check for any XML errors; xml_str should still be bytes tree = ET.XML(xml_bytes, parser=UTB()) xmlns = "http://www.openarchives.org/OAI/2.0/" err_tag = tree.find('{%s}error' % xmlns) if err_tag is not None: err_code = err_tag.attrib['code'] err_text = err_tag.text logger.warning('PMC client returned with error %s: %s' % (err_code, err_text)) return None # If no error, return the XML as a unicode string else: return xml_bytes.decode('utf-8')
[ "Returns XML for the article corresponding to a PMC ID." ]
Please provide a description of the function:def extract_paragraphs(xml_string): tree = etree.fromstring(xml_string.encode('utf-8')) paragraphs = [] # In NLM xml, all plaintext is within <p> tags, and is the only thing # that can be contained in <p> tags. To handle to possibility of namespaces # uses regex to search for tags either of the form 'p' or '{<namespace>}p' for element in tree.iter(): if isinstance(element.tag, basestring) and \ re.search('(^|})[p|title]$', element.tag) and element.text: paragraph = ' '.join(element.itertext()) paragraphs.append(paragraph) return paragraphs
[ "Returns list of paragraphs in an NLM XML.\n\n Parameters\n ----------\n xml_string : str\n String containing valid NLM XML.\n\n Returns\n -------\n list of str\n List of extracted paragraphs in an NLM XML\n " ]
Please provide a description of the function:def filter_pmids(pmid_list, source_type): global pmids_fulltext_dict # Check args if source_type not in ('fulltext', 'oa_xml', 'oa_txt', 'auth_xml'): raise ValueError("source_type must be one of: 'fulltext', 'oa_xml', " "'oa_txt', or 'auth_xml'.") # Check if we've loaded this type, and lazily initialize if pmids_fulltext_dict.get(source_type) is None: fulltext_list_path = os.path.join(os.path.dirname(__file__), 'pmids_%s.txt' % source_type) with open(fulltext_list_path, 'rb') as f: fulltext_list = set([line.strip().decode('utf-8') for line in f.readlines()]) pmids_fulltext_dict[source_type] = fulltext_list return list(set(pmid_list).intersection( pmids_fulltext_dict.get(source_type)))
[ "Filter a list of PMIDs for ones with full text from PMC.\n\n Parameters\n ----------\n pmid_list : list of str\n List of PMIDs to filter.\n source_type : string\n One of 'fulltext', 'oa_xml', 'oa_txt', or 'auth_xml'.\n\n Returns\n -------\n list of str\n PMIDs available in the specified source/format type.\n " ]
Please provide a description of the function:def get_example_extractions(fname): "Get extractions from one of the examples in `cag_examples`." with open(fname, 'r') as f: sentences = f.read().splitlines() rdf_xml_dict = {} for sentence in sentences: logger.info("Reading \"%s\"..." % sentence) html = tc.send_query(sentence, 'cwms') try: rdf_xml_dict[sentence] = tc.get_xml(html, 'rdf:RDF', fail_if_empty=True) except AssertionError as e: logger.error("Got error for %s." % sentence) logger.exception(e) return rdf_xml_dict
[]
Please provide a description of the function:def make_example_graphs(): "Make graphs from all the examples in cag_examples." cag_example_rdfs = {} for i, fname in enumerate(os.listdir('cag_examples')): cag_example_rdfs[i+1] = get_example_extractions(fname) return make_cag_graphs(cag_example_rdfs)
[]
Please provide a description of the function:def _assemble_agent_str(agent): agent_str = agent.name # Only do the more detailed assembly for molecular agents if not isinstance(agent, ist.Agent): return agent_str # Handle mutation conditions if agent.mutations: is_generic = False mut_strs = [] for mut in agent.mutations: res_to = mut.residue_to if mut.residue_to else '' res_from = mut.residue_from if mut.residue_from else '' pos = mut.position if mut.position else '' mut_str = '%s%s%s' % (res_from, pos, res_to) # If this is the only mutation and there are no details # then this is a generic mutant if not mut_str and len(agent.mutations) == 1: is_generic = True break mut_strs.append(mut_str) if is_generic: agent_str = 'mutated ' + agent_str else: mut_strs = '/'.join(mut_strs) agent_str = '%s-%s' % (agent_str, mut_strs) # Handle location if agent.location is not None: agent_str += ' in the ' + agent.location if not agent.mods and not agent.bound_conditions and not agent.activity: return agent_str # Handle bound conditions bound_to = [bc.agent.name for bc in agent.bound_conditions if bc.is_bound] not_bound_to = [bc.agent.name for bc in agent.bound_conditions if not bc.is_bound] if bound_to: agent_str += ' bound to ' + _join_list(bound_to) if not_bound_to: agent_str += ' and not bound to ' +\ _join_list(not_bound_to) else: if not_bound_to: agent_str += ' not bound to ' +\ _join_list(not_bound_to) # Handle modification conditions if agent.mods: # Special case if len(agent.mods) == 1 and agent.mods[0].position is None: prefix = _mod_state_str(agent.mods[0].mod_type) if agent.mods[0].residue is not None: residue_str =\ ist.amino_acids[agent.mods[0].residue]['full_name'] prefix = residue_str + '-' + prefix agent_str = prefix + ' ' + agent_str else: if agent.bound_conditions: agent_str += ' and' agent_str += ' %s on ' % _mod_state_str(agent.mods[0].mod_type) mod_lst = [] for m in agent.mods: if m.position is None: if m.residue is not None: residue_str =\ ist.amino_acids[m.residue]['full_name'] mod_lst.append(residue_str) else: mod_lst.append('an unknown residue') elif m.position is not None and m.residue is None: mod_lst.append('amino acid %s' % m.position) else: mod_lst.append(m.residue + m.position) agent_str += _join_list(mod_lst) # Handle activity conditions if agent.activity is not None: # Get the modifier specific to the activity type, if any pre_prefix = \ activity_type_prefix.get(agent.activity.activity_type, '') if agent.activity.is_active: prefix = pre_prefix + 'active' else: # See if there is a special override for the inactive form if agent.activity.activity_type in inactivity_type_prefix_override: pre_prefix = inactivity_type_prefix_override[ agent.activity.activity_type] prefix = pre_prefix + 'inactive' agent_str = prefix + ' ' + agent_str return agent_str
[ "Assemble an Agent object to text." ]
Please provide a description of the function:def _join_list(lst, oxford=False): if len(lst) > 2: s = ', '.join(lst[:-1]) if oxford: s += ',' s += ' and ' + lst[-1] elif len(lst) == 2: s = lst[0] + ' and ' + lst[1] elif len(lst) == 1: s = lst[0] else: s = '' return s
[ "Join a list of words in a gramatically correct way." ]
Please provide a description of the function:def _assemble_activeform(stmt): subj_str = _assemble_agent_str(stmt.agent) if stmt.is_active: is_active_str = 'active' else: is_active_str = 'inactive' if stmt.activity == 'activity': stmt_str = subj_str + ' is ' + is_active_str elif stmt.activity == 'kinase': stmt_str = subj_str + ' is kinase-' + is_active_str elif stmt.activity == 'phosphatase': stmt_str = subj_str + ' is phosphatase-' + is_active_str elif stmt.activity == 'catalytic': stmt_str = subj_str + ' is catalytically ' + is_active_str elif stmt.activity == 'transcription': stmt_str = subj_str + ' is transcriptionally ' + is_active_str elif stmt.activity == 'gtpbound': stmt_str = subj_str + ' is GTP-bound ' + is_active_str return _make_sentence(stmt_str)
[ "Assemble ActiveForm statements into text." ]
Please provide a description of the function:def _assemble_modification(stmt): sub_str = _assemble_agent_str(stmt.sub) if stmt.enz is not None: enz_str = _assemble_agent_str(stmt.enz) if _get_is_direct(stmt): mod_str = ' ' + _mod_process_verb(stmt) + ' ' else: mod_str = ' leads to the ' + _mod_process_noun(stmt) + ' of ' stmt_str = enz_str + mod_str + sub_str else: stmt_str = sub_str + ' is ' + _mod_state_stmt(stmt) if stmt.residue is not None: if stmt.position is None: mod_str = 'on ' + ist.amino_acids[stmt.residue]['full_name'] else: mod_str = 'on ' + stmt.residue + stmt.position else: mod_str = '' stmt_str += ' ' + mod_str return _make_sentence(stmt_str)
[ "Assemble Modification statements into text." ]
Please provide a description of the function:def _assemble_association(stmt): member_strs = [_assemble_agent_str(m.concept) for m in stmt.members] stmt_str = member_strs[0] + ' is associated with ' + \ _join_list(member_strs[1:]) return _make_sentence(stmt_str)
[ "Assemble Association statements into text." ]
Please provide a description of the function:def _assemble_complex(stmt): member_strs = [_assemble_agent_str(m) for m in stmt.members] stmt_str = member_strs[0] + ' binds ' + _join_list(member_strs[1:]) return _make_sentence(stmt_str)
[ "Assemble Complex statements into text." ]
Please provide a description of the function:def _assemble_autophosphorylation(stmt): enz_str = _assemble_agent_str(stmt.enz) stmt_str = enz_str + ' phosphorylates itself' if stmt.residue is not None: if stmt.position is None: mod_str = 'on ' + ist.amino_acids[stmt.residue]['full_name'] else: mod_str = 'on ' + stmt.residue + stmt.position else: mod_str = '' stmt_str += ' ' + mod_str return _make_sentence(stmt_str)
[ "Assemble Autophosphorylation statements into text." ]
Please provide a description of the function:def _assemble_regulate_activity(stmt): subj_str = _assemble_agent_str(stmt.subj) obj_str = _assemble_agent_str(stmt.obj) if stmt.is_activation: rel_str = ' activates ' else: rel_str = ' inhibits ' stmt_str = subj_str + rel_str + obj_str return _make_sentence(stmt_str)
[ "Assemble RegulateActivity statements into text." ]
Please provide a description of the function:def _assemble_regulate_amount(stmt): obj_str = _assemble_agent_str(stmt.obj) if stmt.subj is not None: subj_str = _assemble_agent_str(stmt.subj) if isinstance(stmt, ist.IncreaseAmount): rel_str = ' increases the amount of ' elif isinstance(stmt, ist.DecreaseAmount): rel_str = ' decreases the amount of ' stmt_str = subj_str + rel_str + obj_str else: if isinstance(stmt, ist.IncreaseAmount): stmt_str = obj_str + ' is produced' elif isinstance(stmt, ist.DecreaseAmount): stmt_str = obj_str + ' is degraded' return _make_sentence(stmt_str)
[ "Assemble RegulateAmount statements into text." ]
Please provide a description of the function:def _assemble_translocation(stmt): agent_str = _assemble_agent_str(stmt.agent) stmt_str = agent_str + ' translocates' if stmt.from_location is not None: stmt_str += ' from the ' + stmt.from_location if stmt.to_location is not None: stmt_str += ' to the ' + stmt.to_location return _make_sentence(stmt_str)
[ "Assemble Translocation statements into text." ]
Please provide a description of the function:def _assemble_gap(stmt): subj_str = _assemble_agent_str(stmt.gap) obj_str = _assemble_agent_str(stmt.ras) stmt_str = subj_str + ' is a GAP for ' + obj_str return _make_sentence(stmt_str)
[ "Assemble Gap statements into text." ]
Please provide a description of the function:def _assemble_gef(stmt): subj_str = _assemble_agent_str(stmt.gef) obj_str = _assemble_agent_str(stmt.ras) stmt_str = subj_str + ' is a GEF for ' + obj_str return _make_sentence(stmt_str)
[ "Assemble Gef statements into text." ]
Please provide a description of the function:def _assemble_conversion(stmt): reactants = _join_list([_assemble_agent_str(r) for r in stmt.obj_from]) products = _join_list([_assemble_agent_str(r) for r in stmt.obj_to]) if stmt.subj is not None: subj_str = _assemble_agent_str(stmt.subj) stmt_str = '%s catalyzes the conversion of %s into %s' % \ (subj_str, reactants, products) else: stmt_str = '%s is converted into %s' % (reactants, products) return _make_sentence(stmt_str)
[ "Assemble a Conversion statement into text." ]
Please provide a description of the function:def _assemble_influence(stmt): subj_str = _assemble_agent_str(stmt.subj.concept) obj_str = _assemble_agent_str(stmt.obj.concept) # Note that n is prepended to increase to make it "an increase" if stmt.subj.delta['polarity'] is not None: subj_delta_str = ' decrease' if stmt.subj.delta['polarity'] == -1 \ else 'n increase' subj_str = 'a%s in %s' % (subj_delta_str, subj_str) if stmt.obj.delta['polarity'] is not None: obj_delta_str = ' decrease' if stmt.obj.delta['polarity'] == -1 \ else 'n increase' obj_str = 'a%s in %s' % (obj_delta_str, obj_str) stmt_str = '%s causes %s' % (subj_str, obj_str) return _make_sentence(stmt_str)
[ "Assemble an Influence statement into text." ]
Please provide a description of the function:def _make_sentence(txt): #Make sure first letter is capitalized txt = txt.strip(' ') txt = txt[0].upper() + txt[1:] + '.' return txt
[ "Make a sentence from a piece of text." ]
Please provide a description of the function:def _get_is_hypothesis(stmt): '''Returns true if there is evidence that the statement is only hypothetical. If all of the evidences associated with the statement indicate a hypothetical interaction then we assume the interaction is hypothetical.''' for ev in stmt.evidence: if not ev.epistemics.get('hypothesis') is True: return True return False
[]
Please provide a description of the function:def make_model(self): stmt_strs = [] for stmt in self.statements: if isinstance(stmt, ist.Modification): stmt_strs.append(_assemble_modification(stmt)) elif isinstance(stmt, ist.Autophosphorylation): stmt_strs.append(_assemble_autophosphorylation(stmt)) elif isinstance(stmt, ist.Association): stmt_strs.append(_assemble_association(stmt)) elif isinstance(stmt, ist.Complex): stmt_strs.append(_assemble_complex(stmt)) elif isinstance(stmt, ist.Influence): stmt_strs.append(_assemble_influence(stmt)) elif isinstance(stmt, ist.RegulateActivity): stmt_strs.append(_assemble_regulate_activity(stmt)) elif isinstance(stmt, ist.RegulateAmount): stmt_strs.append(_assemble_regulate_amount(stmt)) elif isinstance(stmt, ist.ActiveForm): stmt_strs.append(_assemble_activeform(stmt)) elif isinstance(stmt, ist.Translocation): stmt_strs.append(_assemble_translocation(stmt)) elif isinstance(stmt, ist.Gef): stmt_strs.append(_assemble_gef(stmt)) elif isinstance(stmt, ist.Gap): stmt_strs.append(_assemble_gap(stmt)) elif isinstance(stmt, ist.Conversion): stmt_strs.append(_assemble_conversion(stmt)) else: logger.warning('Unhandled statement type: %s.' % type(stmt)) if stmt_strs: return ' '.join(stmt_strs) else: return ''
[ "Assemble text from the set of collected INDRA Statements.\n\n Returns\n -------\n stmt_strs : str\n Return the assembled text as unicode string. By default, the text\n is a single string consisting of one or more sentences with\n periods at the end.\n " ]
Please provide a description of the function:def add_statements(self, stmts): for stmt in stmts: if not self.statement_exists(stmt): self.statements.append(stmt)
[ "Add INDRA Statements to the assembler's list of statements.\n\n Parameters\n ----------\n stmts : list[indra.statements.Statement]\n A list of :py:class:`indra.statements.Statement`\n to be added to the statement list of the assembler.\n " ]
Please provide a description of the function:def make_model(self): ppa = PysbPreassembler(self.statements) ppa.replace_activities() self.statements = ppa.statements self.sbgn = emaker.sbgn() self._map = emaker.map() self.sbgn.append(self._map) for stmt in self.statements: if isinstance(stmt, Modification): self._assemble_modification(stmt) elif isinstance(stmt, RegulateActivity): self._assemble_regulateactivity(stmt) elif isinstance(stmt, RegulateAmount): self._assemble_regulateamount(stmt) elif isinstance(stmt, Complex): self._assemble_complex(stmt) elif isinstance(stmt, ActiveForm): #self._assemble_activeform(stmt) pass else: logger.warning("Unhandled Statement type %s" % type(stmt)) continue sbgn_str = self.print_model() return sbgn_str
[ "Assemble the SBGN model from the collected INDRA Statements.\n\n This method assembles an SBGN model from the set of INDRA Statements.\n The assembled model is set as the assembler's sbgn attribute (it is\n represented as an XML ElementTree internally). The model is returned\n as a serialized XML string.\n\n Returns\n -------\n sbgn_str : str\n The XML serialized SBGN model.\n " ]
Please provide a description of the function:def print_model(self, pretty=True, encoding='utf8'): return lxml.etree.tostring(self.sbgn, pretty_print=pretty, encoding=encoding, xml_declaration=True)
[ "Return the assembled SBGN model as an XML string.\n\n Parameters\n ----------\n pretty : Optional[bool]\n If True, the SBGN string is formatted with indentation (for human\n viewing) otherwise no indentation is used. Default: True\n\n Returns\n -------\n sbgn_str : bytes (str in Python 2)\n An XML string representation of the SBGN model.\n " ]
Please provide a description of the function:def save_model(self, file_name='model.sbgn'): model = self.print_model() with open(file_name, 'wb') as fh: fh.write(model)
[ "Save the assembled SBGN model in a file.\n\n Parameters\n ----------\n file_name : Optional[str]\n The name of the file to save the SBGN network to.\n Default: model.sbgn\n " ]
Please provide a description of the function:def _glyph_for_complex_pattern(self, pattern): # Make the main glyph for the agent monomer_glyphs = [] for monomer_pattern in pattern.monomer_patterns: glyph = self._glyph_for_monomer_pattern(monomer_pattern) monomer_glyphs.append(glyph) if len(monomer_glyphs) > 1: pattern.matches_key = lambda: str(pattern) agent_id = self._make_agent_id(pattern) complex_glyph = \ emaker.glyph(emaker.bbox(**self.complex_style), class_('complex'), id=agent_id) for glyph in monomer_glyphs: glyph.attrib['id'] = agent_id + glyph.attrib['id'] complex_glyph.append(glyph) return complex_glyph return monomer_glyphs[0]
[ "Add glyph and member glyphs for a PySB ComplexPattern." ]
Please provide a description of the function:def _glyph_for_monomer_pattern(self, pattern): pattern.matches_key = lambda: str(pattern) agent_id = self._make_agent_id(pattern) # Handle sources and sinks if pattern.monomer.name in ('__source', '__sink'): return None # Handle molecules glyph = emaker.glyph(emaker.label(text=pattern.monomer.name), emaker.bbox(**self.monomer_style), class_('macromolecule'), id=agent_id) # Temporarily remove this # Add a glyph for type #type_glyph = emaker.glyph(emaker.label(text='mt:prot'), # class_('unit of information'), # emaker.bbox(**self.entity_type_style), # id=self._make_id()) #glyph.append(type_glyph) for site, value in pattern.site_conditions.items(): if value is None or isinstance(value, int): continue # Make some common abbreviations if site == 'phospho': site = 'p' elif site == 'activity': site = 'act' if value == 'active': value = 'a' elif value == 'inactive': value = 'i' state = emaker.state(variable=site, value=value) state_glyph = \ emaker.glyph(state, emaker.bbox(**self.entity_state_style), class_('state variable'), id=self._make_id()) glyph.append(state_glyph) return glyph
[ "Add glyph for a PySB MonomerPattern." ]
Please provide a description of the function:def load_go_graph(go_fname): global _go_graph if _go_graph is None: _go_graph = rdflib.Graph() logger.info("Parsing GO OWL file") _go_graph.parse(os.path.abspath(go_fname)) return _go_graph
[ "Load the GO data from an OWL file and parse into an RDF graph.\n\n Parameters\n ----------\n go_fname : str\n Path to the GO OWL file. Can be downloaded from\n http://geneontology.org/ontology/go.owl.\n\n Returns\n -------\n rdflib.Graph\n RDF graph containing GO data.\n " ]
Please provide a description of the function:def update_id_mappings(g): g = load_go_graph(go_owl_path) query = _prefixes + logger.info("Querying for GO ID mappings") res = g.query(query) mappings = [] for id_lit, label_lit in sorted(res, key=lambda x: x[0]): mappings.append((id_lit.value, label_lit.value)) # Write to file write_unicode_csv(go_mappings_file, mappings, delimiter='\t')
[ "Compile all ID->label mappings and save to a TSV file.\n\n Parameters\n ----------\n g : rdflib.Graph\n RDF graph containing GO data.\n ", "\n SELECT ?id ?label\n WHERE {\n ?class oboInOwl:id ?id .\n ?class rdfs:label ?label\n }\n " ]
Please provide a description of the function:def get_default_ndex_cred(ndex_cred): if ndex_cred: username = ndex_cred.get('user') password = ndex_cred.get('password') if username is not None and password is not None: return username, password username = get_config('NDEX_USERNAME') password = get_config('NDEX_PASSWORD') return username, password
[ "Gets the NDEx credentials from the dict, or tries the environment if None" ]
Please provide a description of the function:def send_request(ndex_service_url, params, is_json=True, use_get=False): if use_get: res = requests.get(ndex_service_url, json=params) else: res = requests.post(ndex_service_url, json=params) status = res.status_code # If response is immediate, we get 200 if status == 200: if is_json: return res.json() else: return res.text # If there is a continuation of the message we get status 300, handled below. # Otherwise we return None. elif status != 300: logger.error('Request returned with code %d' % status) return None # In case the response is not immediate, a task ID can be used to get # the result. task_id = res.json().get('task_id') logger.info('NDEx task submitted...') time_used = 0 try: while status != 200: res = requests.get(ndex_base_url + '/task/' + task_id) status = res.status_code if status != 200: time.sleep(5) time_used += 5 except KeyError: next return None logger.info('NDEx task complete.') if is_json: return res.json() else: return res.text
[ "Send a request to the NDEx server.\n\n Parameters\n ----------\n ndex_service_url : str\n The URL of the service to use for the request.\n params : dict\n A dictionary of parameters to send with the request. Parameter keys\n differ based on the type of request.\n is_json : bool\n True if the response is in json format, otherwise it is assumed to be\n text. Default: False\n use_get : bool\n True if the request needs to use GET instead of POST.\n\n Returns\n -------\n res : str\n Depending on the type of service and the is_json parameter, this\n function either returns a text string or a json dict.\n " ]
Please provide a description of the function:def create_network(cx_str, ndex_cred=None, private=True): username, password = get_default_ndex_cred(ndex_cred) nd = ndex2.client.Ndex2('http://public.ndexbio.org', username=username, password=password) cx_stream = io.BytesIO(cx_str.encode('utf-8')) try: logger.info('Uploading network to NDEx.') network_uri = nd.save_cx_stream_as_new_network(cx_stream) except Exception as e: logger.error('Could not upload network to NDEx.') logger.error(e) return network_id = network_uri.rsplit('/')[-1] if not private: nd.make_network_public(network_id) logger.info('The UUID for the uploaded network is: %s' % network_id) logger.info('View at: http://ndexbio.org/#/network/%s' % network_id) return network_id
[ "Creates a new NDEx network of the assembled CX model.\n\n To upload the assembled CX model to NDEx, you need to have\n a registered account on NDEx (http://ndexbio.org/) and have\n the `ndex` python package installed. The uploaded network\n is private by default.\n\n Parameters\n ----------\n ndex_cred : dict\n A dictionary with the following entries:\n 'user': NDEx user name\n 'password': NDEx password\n\n Returns\n -------\n network_id : str\n The UUID of the NDEx network that was created by uploading\n the assembled CX model.\n " ]
Please provide a description of the function:def update_network(cx_str, network_id, ndex_cred=None): server = 'http://public.ndexbio.org' username, password = get_default_ndex_cred(ndex_cred) nd = ndex2.client.Ndex2(server, username, password) try: logger.info('Getting network summary...') summary = nd.get_network_summary(network_id) except Exception as e: logger.error('Could not get NDEx network summary.') logger.error(e) return # Update network content try: logger.info('Updating network...') cx_stream = io.BytesIO(cx_str.encode('utf-8')) nd.update_cx_network(cx_stream, network_id) except Exception as e: logger.error('Could not update NDEx network.') logger.error(e) return # Update network profile ver_str = summary.get('version') new_ver = _increment_ndex_ver(ver_str) profile = {'name': summary.get('name'), 'description': summary.get('description'), 'version': new_ver, } logger.info('Updating NDEx network (%s) profile to %s', network_id, profile) profile_retries = 5 for _ in range(profile_retries): try: time.sleep(5) nd.update_network_profile(network_id, profile) break except Exception as e: logger.error('Could not update NDEx network profile.') logger.error(e) set_style(network_id, ndex_cred)
[ "Update an existing CX network on NDEx with new CX content.\n\n Parameters\n ----------\n cx_str : str\n String containing the CX content.\n network_id : str\n UUID of the network on NDEx.\n ndex_cred : dict\n A dictionary with the following entries:\n 'user': NDEx user name\n 'password': NDEx password\n " ]
Please provide a description of the function:def set_style(network_id, ndex_cred=None, template_id=None): if not template_id: template_id = "ea4ea3b7-6903-11e7-961c-0ac135e8bacf" server = 'http://public.ndexbio.org' username, password = get_default_ndex_cred(ndex_cred) source_network = ndex2.create_nice_cx_from_server(username=username, password=password, uuid=network_id, server=server) source_network.apply_template(server, template_id) source_network.update_to(network_id, server=server, username=username, password=password)
[ "Set the style of the network to a given template network's style\n\n Parameters\n ----------\n network_id : str\n The UUID of the NDEx network whose style is to be changed.\n ndex_cred : dict\n A dictionary of NDEx credentials.\n template_id : Optional[str]\n The UUID of the NDEx network whose style is used on the\n network specified in the first argument.\n " ]
Please provide a description of the function:def initialize(self, cfg_file=None, mode=None): self.sim = ScipyOdeSimulator(self.model) self.state = numpy.array(copy.copy(self.sim.initials)[0]) self.time = numpy.array(0.0) self.status = 'initialized'
[ "Initialize the model for simulation, possibly given a config file.\n\n Parameters\n ----------\n cfg_file : Optional[str]\n The name of the configuration file to load, optional.\n " ]
Please provide a description of the function:def update(self, dt=None): # EMELI passes dt = -1 so we need to handle that here dt = dt if (dt is not None and dt > 0) else self.dt tspan = [0, dt] # Run simulaton with initials set to current state res = self.sim.run(tspan=tspan, initials=self.state) # Set the state based on the result here self.state = res.species[-1] self.time += dt if self.time > self.stop_time: self.DONE = True print((self.time, self.state)) self.time_course.append((self.time.copy(), self.state.copy()))
[ "Simulate the model for a given time interval.\n\n Parameters\n ----------\n dt : Optional[float]\n The time step to simulate, if None, the default built-in time step\n is used.\n " ]
Please provide a description of the function:def set_value(self, var_name, value): if var_name in self.outside_name_map: var_name = self.outside_name_map[var_name] print('%s=%.5f' % (var_name, 1e9*value)) if var_name == 'Precipitation': value = 1e9*value species_idx = self.species_name_map[var_name] self.state[species_idx] = value
[ "Set the value of a given variable to a given value.\n\n Parameters\n ----------\n var_name : str\n The name of the variable in the model whose value should be set.\n\n value : float\n The value the variable should be set to\n " ]
Please provide a description of the function:def get_value(self, var_name): if var_name in self.outside_name_map: var_name = self.outside_name_map[var_name] species_idx = self.species_name_map[var_name] return self.state[species_idx]
[ "Return the value of a given variable.\n\n Parameters\n ----------\n var_name : str\n The name of the variable whose value should be returned\n\n Returns\n -------\n value : float\n The value of the given variable in the current state\n " ]
Please provide a description of the function:def get_input_var_names(self): in_vars = copy.copy(self.input_vars) for idx, var in enumerate(in_vars): if self._map_in_out(var) is not None: in_vars[idx] = self._map_in_out(var) return in_vars
[ "Return a list of variables names that can be set as input.\n\n Returns\n -------\n var_names : list[str]\n A list of variable names that can be set from the outside\n " ]
Please provide a description of the function:def get_output_var_names(self): # Return all the variables that aren't input variables all_vars = list(self.species_name_map.keys()) output_vars = list(set(all_vars) - set(self.input_vars)) # Re-map to outside var names if needed for idx, var in enumerate(output_vars): if self._map_in_out(var) is not None: output_vars[idx] = self._map_in_out(var) return output_vars
[ "Return a list of variables names that can be read as output.\n\n Returns\n -------\n var_names : list[str]\n A list of variable names that can be read from the outside\n " ]
Please provide a description of the function:def make_repository_component(self): component = etree.Element('component') comp_name = etree.Element('comp_name') comp_name.text = self.model.name component.append(comp_name) mod_path = etree.Element('module_path') mod_path.text = os.getcwd() component.append(mod_path) mod_name = etree.Element('module_name') mod_name.text = self.model.name component.append(mod_name) class_name = etree.Element('class_name') class_name.text = 'model_class' component.append(class_name) model_name = etree.Element('model_name') model_name.text = self.model.name component.append(model_name) lang = etree.Element('language') lang.text = 'python' component.append(lang) ver = etree.Element('version') ver.text = self.get_attribute('version') component.append(ver) au = etree.Element('author') au.text = self.get_attribute('author_name') component.append(au) hu = etree.Element('help_url') hu.text = 'http://github.com/sorgerlab/indra' component.append(hu) for tag in ('cfg_template', 'time_step_type', 'time_units', 'grid_type', 'description', 'comp_type', 'uses_types'): elem = etree.Element(tag) elem.text = tag component.append(elem) return etree.tounicode(component, pretty_print=True)
[ "Return an XML string representing this BMI in a workflow.\n\n This description is required by EMELI to discover and load models.\n\n Returns\n -------\n xml : str\n String serialized XML representation of the component in the\n model repository.\n " ]
Please provide a description of the function:def export_into_python(self): pkl_path = self.model.name + '.pkl' with open(pkl_path, 'wb') as fh: pickle.dump(self, fh, protocol=2) py_str = % os.path.abspath(pkl_path) py_str = textwrap.dedent(py_str) py_path = self.model.name + '.py' with open(py_path, 'w') as fh: fh.write(py_str)
[ "Write the model into a pickle and create a module that loads it.\n\n The model basically exports itself as a pickle file and a Python\n file is then written which loads the pickle file. This allows importing\n the model in the simulation workflow.\n ", "\n import pickle\n with open('%s', 'rb') as fh:\n model_class = pickle.load(fh)\n " ]
Please provide a description of the function:def _map_in_out(self, inside_var_name): for out_name, in_name in self.outside_name_map.items(): if inside_var_name == in_name: return out_name return None
[ "Return the external name of a variable mapped from inside." ]