Code
stringlengths
103
85.9k
Summary
listlengths
0
94
Please provide a description of the function:def add_reverse_effects(self): # TODO: generalize to other modification sites pos_mod_sites = {} neg_mod_sites = {} syntheses = [] degradations = [] for stmt in self.statements: if isinstance(stmt, Phosphorylation): agent = stmt.sub.name try: pos_mod_sites[agent].append((stmt.residue, stmt.position)) except KeyError: pos_mod_sites[agent] = [(stmt.residue, stmt.position)] elif isinstance(stmt, Dephosphorylation): agent = stmt.sub.name try: neg_mod_sites[agent].append((stmt.residue, stmt.position)) except KeyError: neg_mod_sites[agent] = [(stmt.residue, stmt.position)] elif isinstance(stmt, Influence): if stmt.overall_polarity() == 1: syntheses.append(stmt.obj.name) elif stmt.overall_polarity() == -1: degradations.append(stmt.obj.name) elif isinstance(stmt, IncreaseAmount): syntheses.append(stmt.obj.name) elif isinstance(stmt, DecreaseAmount): degradations.append(stmt.obj.name) new_stmts = [] for agent_name, pos_sites in pos_mod_sites.items(): neg_sites = neg_mod_sites.get(agent_name, []) no_neg_site = set(pos_sites).difference(set(neg_sites)) for residue, position in no_neg_site: st = Dephosphorylation(Agent('phosphatase'), Agent(agent_name), residue, position) new_stmts.append(st) for agent_name in syntheses: if agent_name not in degradations: st = DecreaseAmount(None, Agent(agent_name)) new_stmts.append(st) self.statements += new_stmts
[ "Add Statements for the reverse effects of some Statements.\n\n For instance, if a protein is phosphorylated but never dephosphorylated\n in the model, we add a generic dephosphorylation here. This step is\n usually optional in the assembly process.\n " ]
Please provide a description of the function:def _get_uniprot_id(agent): up_id = agent.db_refs.get('UP') hgnc_id = agent.db_refs.get('HGNC') if up_id is None: if hgnc_id is None: # If both UniProt and HGNC refs are missing we can't # sequence check and so don't report a failure. return None # Try to get UniProt ID from HGNC up_id = hgnc_client.get_uniprot_id(hgnc_id) # If this fails, again, we can't sequence check if up_id is None: return None # If the UniProt ID is a list then choose the first one. if not isinstance(up_id, basestring) and \ isinstance(up_id[0], basestring): up_id = up_id[0] return up_id
[ "Return the UniProt ID for an agent, looking up in HGNC if necessary.\n\n If the UniProt ID is a list then return the first ID by default.\n " ]
Please provide a description of the function:def map_sites(self, stmts): valid_statements = [] mapped_statements = [] for stmt in stmts: mapped_stmt = self.map_stmt_sites(stmt) # If we got a MappedStatement as a return value, we add that to the # list of mapped statements, otherwise, the original Statement is # not invalid so we add it to the other list directly. if mapped_stmt is not None: mapped_statements.append(mapped_stmt) else: valid_statements.append(stmt) return valid_statements, mapped_statements
[ "Check a set of statements for invalid modification sites.\n\n Statements are checked against Uniprot reference sequences to determine\n if residues referred to by post-translational modifications exist at\n the given positions.\n\n If there is nothing amiss with a statement (modifications on any of the\n agents, modifications made in the statement, etc.), then the statement\n goes into the list of valid statements. If there is a problem with the\n statement, the offending modifications are looked up in the site map\n (:py:attr:`site_map`), and an instance of :py:class:`MappedStatement`\n is added to the list of mapped statements.\n\n Parameters\n ----------\n stmts : list of :py:class:`indra.statement.Statement`\n The statements to check for site errors.\n\n Returns\n -------\n tuple\n 2-tuple containing (valid_statements, mapped_statements). The first\n element of the tuple is a list of valid statements\n (:py:class:`indra.statement.Statement`) that were not found to\n contain any site errors. The second element of the tuple is a list\n of mapped statements (:py:class:`MappedStatement`) with information\n on the incorrect sites and corresponding statements with correctly\n mapped sites.\n " ]
Please provide a description of the function:def _map_agent_sites(self, agent): # If there are no modifications on this agent, then we can return the # copy of the agent if agent is None or not agent.mods: return [], agent new_agent = deepcopy(agent) mapped_sites = [] # Now iterate over all the modifications and map each one for idx, mod_condition in enumerate(agent.mods): mapped_site = \ self._map_agent_mod(agent, mod_condition) # If we couldn't do the mapping or the mapped site isn't invalid # then we don't need to change the existing ModCondition if not mapped_site or mapped_site.not_invalid(): continue # Otherwise, if there is a mapping, we replace the old ModCondition # with the new one where only the residue and position are updated, # the mod type and the is modified flag are kept. if mapped_site.has_mapping(): mc = ModCondition(mod_condition.mod_type, mapped_site.mapped_res, mapped_site.mapped_pos, mod_condition.is_modified) new_agent.mods[idx] = mc # Finally, whether or not we have a mapping, we keep track of mapped # sites and make them available to the caller mapped_sites.append(mapped_site) return mapped_sites, new_agent
[ "Check an agent for invalid sites and update if necessary.\n\n Parameters\n ----------\n agent : :py:class:`indra.statements.Agent`\n Agent to check for invalid modification sites.\n\n Returns\n -------\n tuple\n The first element is a list of MappedSite objects, the second\n element is either the original Agent, if unchanged, or a copy\n of it.\n " ]
Please provide a description of the function:def _map_agent_mod(self, agent, mod_condition): # Get the UniProt ID of the agent, if not found, return up_id = _get_uniprot_id(agent) if not up_id: logger.debug("No uniprot ID for %s" % agent.name) return None # If no site information for this residue, skip if mod_condition.position is None or mod_condition.residue is None: return None # Otherwise, try to map it and return the mapped site mapped_site = \ self.map_to_human_ref(up_id, 'uniprot', mod_condition.residue, mod_condition.position, do_methionine_offset=self.do_methionine_offset, do_orthology_mapping=self.do_orthology_mapping, do_isoform_mapping=self.do_isoform_mapping) return mapped_site
[ "Map a single modification condition on an agent.\n\n Parameters\n ----------\n agent : :py:class:`indra.statements.Agent`\n Agent to check for invalid modification sites.\n mod_condition : :py:class:`indra.statements.ModCondition`\n Modification to check for validity and map.\n\n Returns\n -------\n protmapper.MappedSite or None\n A MappedSite object is returned if a UniProt ID was found for the\n agent, and if both the position and residue for the modification\n condition were available. Otherwise None is returned.\n " ]
Please provide a description of the function:def _get_graph_reductions(graph): def frontier(g, nd): if g.out_degree(nd) == 0: return set([nd]) else: frontiers = set() for n in g.successors(nd): frontiers = frontiers.union(frontier(graph, n)) return frontiers reductions = {} nodes_sort = list(networkx.algorithms.dag.topological_sort(graph)) frontiers = [frontier(graph, n) for n in nodes_sort] # This loop ensures that if a node n2 comes after node n1 in the topological # sort, and their frontiers are identical then n1 can be reduced to n2. # If their frontiers aren't identical, the reduction cannot be done. for i, n1 in enumerate(nodes_sort): for j, n2 in enumerate(nodes_sort): if i > j: continue if frontiers[i] == frontiers[j]: reductions[n1] = n2 return reductions
[ "Return transitive reductions on a DAG.\n\n This is used to reduce the set of activities of a BaseAgent to the most\n specific one(s) possible. For instance, if a BaseAgent is know to have\n 'activity', 'catalytic' and 'kinase' activity, then this function will\n return {'activity': 'kinase', 'catalytic': 'kinase', 'kinase': 'kinase'}\n as the set of reductions.\n ", "Return the nodes after nd in the topological sort that are at the\n lowest possible level of the topological sort." ]
Please provide a description of the function:def gather_explicit_activities(self): for stmt in self.statements: agents = stmt.agent_list() # Activity types given as ActivityConditions for agent in agents: if agent is not None and agent.activity is not None: agent_base = self._get_base(agent) agent_base.add_activity(agent.activity.activity_type) # Object activities given in RegulateActivity statements if isinstance(stmt, RegulateActivity): if stmt.obj is not None: obj_base = self._get_base(stmt.obj) obj_base.add_activity(stmt.obj_activity) # Activity types given in ActiveForms elif isinstance(stmt, ActiveForm): agent_base = self._get_base(stmt.agent) agent_base.add_activity(stmt.activity) if stmt.is_active: agent_base.add_active_state(stmt.activity, stmt.agent, stmt.evidence) else: agent_base.add_inactive_state(stmt.activity, stmt.agent, stmt.evidence)
[ "Aggregate all explicit activities and active forms of Agents.\n\n This function iterates over self.statements and extracts explicitly\n stated activity types and active forms for Agents.\n " ]
Please provide a description of the function:def gather_implicit_activities(self): for stmt in self.statements: if isinstance(stmt, Phosphorylation) or \ isinstance(stmt, Transphosphorylation) or \ isinstance(stmt, Autophosphorylation): if stmt.enz is not None: enz_base = self._get_base(stmt.enz) enz_base.add_activity('kinase') enz_base.add_active_state('kinase', stmt.enz.mods) elif isinstance(stmt, Dephosphorylation): if stmt.enz is not None: enz_base = self._get_base(stmt.enz) enz_base.add_activity('phosphatase') enz_base.add_active_state('phosphatase', stmt.enz.mods) elif isinstance(stmt, Modification): if stmt.enz is not None: enz_base = self._get_base(stmt.enz) enz_base.add_activity('catalytic') enz_base.add_active_state('catalytic', stmt.enz.mods) elif isinstance(stmt, SelfModification): if stmt.enz is not None: enz_base = self._get_base(stmt.enz) enz_base.add_activity('catalytic') enz_base.add_active_state('catalytic', stmt.enz.mods) elif isinstance(stmt, Gef): if stmt.gef is not None: gef_base = self._get_base(stmt.gef) gef_base.add_activity('gef') if stmt.gef.activity is not None: act = stmt.gef.activity.activity_type else: act = 'activity' gef_base.add_active_state(act, stmt.gef.mods) elif isinstance(stmt, Gap): if stmt.gap is not None: gap_base = self._get_base(stmt.gap) gap_base.add_activity('gap') if stmt.gap.activity is not None: act = stmt.gap.activity.activity_type else: act = 'activity' gap_base.add_active_state('act', stmt.gap.mods) elif isinstance(stmt, RegulateActivity): if stmt.subj is not None: subj_base = self._get_base(stmt.subj) subj_base.add_activity(stmt.j)
[ "Aggregate all implicit activities and active forms of Agents.\n\n Iterate over self.statements and collect the implied activities\n and active forms of Agents that appear in the Statements.\n\n Note that using this function to collect implied Agent activities can\n be risky. Assume, for instance, that a Statement from a reading\n system states that EGF bound to EGFR phosphorylates ERK. This would\n be interpreted as implicit evidence for the EGFR-bound form of EGF\n to have 'kinase' activity, which is clearly incorrect.\n\n In contrast the alternative pair of this function:\n gather_explicit_activities collects only explicitly stated activities.\n " ]
Please provide a description of the function:def require_active_forms(self): logger.info('Setting required active forms on %d statements...' % len(self.statements)) new_stmts = [] for stmt in self.statements: if isinstance(stmt, Modification): if stmt.enz is None: new_stmts.append(stmt) continue enz_base = self._get_base(stmt.enz) active_forms = enz_base.get_active_forms() if not active_forms: new_stmts.append(stmt) else: for af in active_forms: new_stmt = fast_deepcopy(stmt) new_stmt.uuid = str(uuid.uuid4()) evs = af.apply_to(new_stmt.enz) new_stmt.partial_evidence = evs new_stmts.append(new_stmt) elif isinstance(stmt, RegulateAmount) or \ isinstance(stmt, RegulateActivity): if stmt.subj is None: new_stmts.append(stmt) continue subj_base = self._get_base(stmt.subj) active_forms = subj_base.get_active_forms() if not active_forms: new_stmts.append(stmt) else: for af in active_forms: new_stmt = fast_deepcopy(stmt) new_stmt.uuid = str(uuid.uuid4()) evs = af.apply_to(new_stmt.subj) new_stmt.partial_evidence = evs new_stmts.append(new_stmt) else: new_stmts.append(stmt) self.statements = new_stmts return new_stmts
[ "Rewrites Statements with Agents' active forms in active positions.\n\n As an example, the enzyme in a Modification Statement can be expected\n to be in an active state. Similarly, subjects of RegulateAmount and\n RegulateActivity Statements can be expected to be in an active form.\n This function takes the collected active states of Agents in their\n corresponding BaseAgents and then rewrites other Statements to apply\n the active Agent states to them.\n\n Returns\n -------\n new_stmts : list[indra.statements.Statement]\n A list of Statements which includes the newly rewritten Statements.\n This list is also set as the internal Statement list of the\n MechLinker.\n " ]
Please provide a description of the function:def reduce_activities(self): for stmt in self.statements: agents = stmt.agent_list() for agent in agents: if agent is not None and agent.activity is not None: agent_base = self._get_base(agent) act_red = agent_base.get_activity_reduction( agent.activity.activity_type) if act_red is not None: agent.activity.activity_type = act_red if isinstance(stmt, RegulateActivity): if stmt.obj is not None: obj_base = self._get_base(stmt.obj) act_red = \ obj_base.get_activity_reduction(stmt.obj_activity) if act_red is not None: stmt.obj_activity = act_red elif isinstance(stmt, ActiveForm): agent_base = self._get_base(stmt.agent) act_red = agent_base.get_activity_reduction(stmt.activity) if act_red is not None: stmt.activity = act_red
[ "Rewrite the activity types referenced in Statements for consistency.\n\n Activity types are reduced to the most specific form whenever possible.\n For instance, if 'kinase' is the only specific activity type known\n for the BaseAgent of BRAF, its generic 'activity' forms are rewritten\n to 'kinase'.\n " ]
Please provide a description of the function:def infer_complexes(stmts): interact_stmts = _get_statements_by_type(stmts, Modification) linked_stmts = [] for mstmt in interact_stmts: if mstmt.enz is None: continue st = Complex([mstmt.enz, mstmt.sub], evidence=mstmt.evidence) linked_stmts.append(st) return linked_stmts
[ "Return inferred Complex from Statements implying physical interaction.\n\n Parameters\n ----------\n stmts : list[indra.statements.Statement]\n A list of Statements to infer Complexes from.\n\n Returns\n -------\n linked_stmts : list[indra.mechlinker.LinkedStatement]\n A list of LinkedStatements representing the inferred Statements.\n " ]
Please provide a description of the function:def infer_activations(stmts): linked_stmts = [] af_stmts = _get_statements_by_type(stmts, ActiveForm) mod_stmts = _get_statements_by_type(stmts, Modification) for af_stmt, mod_stmt in itertools.product(*(af_stmts, mod_stmts)): # There has to be an enzyme and the substrate and the # agent of the active form have to match if mod_stmt.enz is None or \ (not af_stmt.agent.entity_matches(mod_stmt.sub)): continue # We now check the modifications to make sure they are consistent if not af_stmt.agent.mods: continue found = False for mc in af_stmt.agent.mods: if mc.mod_type == modclass_to_modtype[mod_stmt.__class__] and \ mc.residue == mod_stmt.residue and \ mc.position == mod_stmt.position: found = True if not found: continue # Collect evidence ev = mod_stmt.evidence # Finally, check the polarity of the ActiveForm if af_stmt.is_active: st = Activation(mod_stmt.enz, mod_stmt.sub, af_stmt.activity, evidence=ev) else: st = Inhibition(mod_stmt.enz, mod_stmt.sub, af_stmt.activity, evidence=ev) linked_stmts.append(LinkedStatement([af_stmt, mod_stmt], st)) return linked_stmts
[ "Return inferred RegulateActivity from Modification + ActiveForm.\n\n This function looks for combinations of Modification and ActiveForm\n Statements and infers Activation/Inhibition Statements from them.\n For example, if we know that A phosphorylates B, and the\n phosphorylated form of B is active, then we can infer that\n A activates B. This can also be viewed as having \"explained\" a given\n Activation/Inhibition Statement with a combination of more mechanistic\n Modification + ActiveForm Statements.\n\n Parameters\n ----------\n stmts : list[indra.statements.Statement]\n A list of Statements to infer RegulateActivity from.\n\n Returns\n -------\n linked_stmts : list[indra.mechlinker.LinkedStatement]\n A list of LinkedStatements representing the inferred Statements.\n " ]
Please provide a description of the function:def infer_active_forms(stmts): linked_stmts = [] for act_stmt in _get_statements_by_type(stmts, RegulateActivity): # TODO: revise the conditions here if not (act_stmt.subj.activity is not None and act_stmt.subj.activity.activity_type == 'kinase' and act_stmt.subj.activity.is_active): continue matching = [] ev = act_stmt.evidence for mod_stmt in _get_statements_by_type(stmts, Modification): if mod_stmt.enz is not None: if mod_stmt.enz.entity_matches(act_stmt.subj) and \ mod_stmt.sub.entity_matches(act_stmt.obj): matching.append(mod_stmt) ev.extend(mod_stmt.evidence) if not matching: continue mods = [] for mod_stmt in matching: mod_type_name = mod_stmt.__class__.__name__.lower() if isinstance(mod_stmt, AddModification): is_modified = True else: is_modified = False mod_type_name = mod_type_name[2:] mc = ModCondition(mod_type_name, mod_stmt.residue, mod_stmt.position, is_modified) mods.append(mc) source_stmts = [act_stmt] + [m for m in matching] st = ActiveForm(Agent(act_stmt.obj.name, mods=mods, db_refs=act_stmt.obj.db_refs), act_stmt.obj_activity, act_stmt.is_activation, evidence=ev) linked_stmts.append(LinkedStatement(source_stmts, st)) logger.info('inferred: %s' % st) return linked_stmts
[ "Return inferred ActiveForm from RegulateActivity + Modification.\n\n This function looks for combinations of Activation/Inhibition\n Statements and Modification Statements, and infers an ActiveForm\n from them. For example, if we know that A activates B and\n A phosphorylates B, then we can infer that the phosphorylated form\n of B is active.\n\n Parameters\n ----------\n stmts : list[indra.statements.Statement]\n A list of Statements to infer ActiveForms from.\n\n Returns\n -------\n linked_stmts : list[indra.mechlinker.LinkedStatement]\n A list of LinkedStatements representing the inferred Statements.\n " ]
Please provide a description of the function:def infer_modifications(stmts): linked_stmts = [] for act_stmt in _get_statements_by_type(stmts, RegulateActivity): for af_stmt in _get_statements_by_type(stmts, ActiveForm): if not af_stmt.agent.entity_matches(act_stmt.obj): continue mods = af_stmt.agent.mods # Make sure the ActiveForm only involves modified sites if af_stmt.agent.mutations or \ af_stmt.agent.bound_conditions or \ af_stmt.agent.location: continue if not af_stmt.agent.mods: continue for mod in af_stmt.agent.mods: evs = act_stmt.evidence + af_stmt.evidence for ev in evs: ev.epistemics['direct'] = False if mod.is_modified: mod_type_name = mod.mod_type else: mod_type_name = modtype_to_inverse[mod.mod_type] mod_class = modtype_to_modclass[mod_type_name] if not mod_class: continue st = mod_class(act_stmt.subj, act_stmt.obj, mod.residue, mod.position, evidence=evs) ls = LinkedStatement([act_stmt, af_stmt], st) linked_stmts.append(ls) logger.info('inferred: %s' % st) return linked_stmts
[ "Return inferred Modification from RegulateActivity + ActiveForm.\n\n This function looks for combinations of Activation/Inhibition Statements\n and ActiveForm Statements that imply a Modification Statement.\n For example, if we know that A activates B, and phosphorylated B is\n active, then we can infer that A leads to the phosphorylation of B.\n An additional requirement when making this assumption is that the\n activity of B should only be dependent on the modified state and not\n other context - otherwise the inferred Modification is not necessarily\n warranted.\n\n Parameters\n ----------\n stmts : list[indra.statements.Statement]\n A list of Statements to infer Modifications from.\n\n Returns\n -------\n linked_stmts : list[indra.mechlinker.LinkedStatement]\n A list of LinkedStatements representing the inferred Statements.\n " ]
Please provide a description of the function:def replace_complexes(self, linked_stmts=None): if linked_stmts is None: linked_stmts = self.infer_complexes(self.statements) new_stmts = [] for stmt in self.statements: if not isinstance(stmt, Complex): new_stmts.append(stmt) continue found = False for linked_stmt in linked_stmts: if linked_stmt.refinement_of(stmt, hierarchies): found = True if not found: new_stmts.append(stmt) else: logger.info('Removing complex: %s' % stmt) self.statements = new_stmts
[ "Remove Complex Statements that can be inferred out.\n\n This function iterates over self.statements and looks for Complex\n Statements that either match or are refined by inferred Complex\n Statements that were linked (provided as the linked_stmts argument).\n It removes Complex Statements from self.statements that can be\n explained by the linked statements.\n\n Parameters\n ----------\n linked_stmts : Optional[list[indra.mechlinker.LinkedStatement]]\n A list of linked statements, optionally passed from outside.\n If None is passed, the MechLinker runs self.infer_complexes to\n infer Complexes and obtain a list of LinkedStatements that are\n then used for removing existing Complexes in self.statements.\n " ]
Please provide a description of the function:def replace_activations(self, linked_stmts=None): if linked_stmts is None: linked_stmts = self.infer_activations(self.statements) new_stmts = [] for stmt in self.statements: if not isinstance(stmt, RegulateActivity): new_stmts.append(stmt) continue found = False for linked_stmt in linked_stmts: inferred_stmt = linked_stmt.inferred_stmt if stmt.is_activation == inferred_stmt.is_activation and \ stmt.subj.entity_matches(inferred_stmt.subj) and \ stmt.obj.entity_matches(inferred_stmt.obj): found = True if not found: new_stmts.append(stmt) else: logger.info('Removing regulate activity: %s' % stmt) self.statements = new_stmts
[ "Remove RegulateActivity Statements that can be inferred out.\n\n This function iterates over self.statements and looks for\n RegulateActivity Statements that either match or are refined by\n inferred RegulateActivity Statements that were linked\n (provided as the linked_stmts argument).\n It removes RegulateActivity Statements from self.statements that can be\n explained by the linked statements.\n\n Parameters\n ----------\n linked_stmts : Optional[list[indra.mechlinker.LinkedStatement]]\n A list of linked statements, optionally passed from outside.\n If None is passed, the MechLinker runs self.infer_activations to\n infer RegulateActivities and obtain a list of LinkedStatements\n that are then used for removing existing Complexes\n in self.statements.\n " ]
Please provide a description of the function:def get_create_base_agent(self, agent): try: base_agent = self.agents[agent.name] except KeyError: base_agent = BaseAgent(agent.name) self.agents[agent.name] = base_agent return base_agent
[ "Return BaseAgent from an Agent, creating it if needed.\n\n Parameters\n ----------\n agent : indra.statements.Agent\n\n Returns\n -------\n base_agent : indra.mechlinker.BaseAgent\n " ]
Please provide a description of the function:def apply_to(self, agent): agent.bound_conditions = self.bound_conditions agent.mods = self.mods agent.mutations = self.mutations agent.location = self.location return self.evidence
[ "Apply this object's state to an Agent.\n\n Parameters\n ----------\n agent : indra.statements.Agent\n The agent to which the state should be applied\n " ]
Please provide a description of the function:def submit_curation(): if request.json is None: abort(Response('Missing application/json header.', 415)) # Get input parameters corpus_id = request.json.get('corpus_id') curations = request.json.get('curations', {}) try: curator.submit_curation(corpus_id, curations) except InvalidCorpusError: abort(Response('The corpus_id "%s" is unknown.' % corpus_id, 400)) return return jsonify({})
[ "Submit curations for a given corpus.\n\n The submitted curations are handled to update the probability model but\n there is no return value here. The update_belief function can be called\n separately to calculate update belief scores.\n\n Parameters\n ----------\n corpus_id : str\n The ID of the corpus for which the curation is submitted.\n curations : dict\n A set of curations where each key is a Statement UUID in the given\n corpus and each key is 0 or 1 with 0 corresponding to incorrect and\n 1 corresponding to correct.\n " ]
Please provide a description of the function:def update_beliefs(): if request.json is None: abort(Response('Missing application/json header.', 415)) # Get input parameters corpus_id = request.json.get('corpus_id') try: belief_dict = curator.update_beliefs(corpus_id) except InvalidCorpusError: abort(Response('The corpus_id "%s" is unknown.' % corpus_id, 400)) return return jsonify(belief_dict)
[ "Return updated beliefs based on current probability model." ]
Please provide a description of the function:def reset_scorer(self): self.scorer = get_eidos_bayesian_scorer() for corpus_id, corpus in self.corpora.items(): corpus.curations = {}
[ "Reset the scorer used for couration." ]
Please provide a description of the function:def get_corpus(self, corpus_id): try: corpus = self.corpora[corpus_id] return corpus except KeyError: raise InvalidCorpusError
[ "Return a corpus given an ID.\n\n If the corpus ID cannot be found, an InvalidCorpusError is raised.\n\n Parameters\n ----------\n corpus_id : str\n The ID of the corpus to return.\n\n Returns\n -------\n Corpus\n The corpus with the given ID.\n " ]
Please provide a description of the function:def submit_curation(self, corpus_id, curations): corpus = self.get_corpus(corpus_id) # Start tabulating the curation counts prior_counts = {} subtype_counts = {} # Take each curation from the input for uuid, correct in curations.items(): # Save the curation in the corpus # TODO: handle already existing curation stmt = corpus.statements.get(uuid) if stmt is None: logger.warning('%s is not in the corpus.' % uuid) continue corpus.curations[uuid] = correct # Now take all the evidences of the statement and assume that # they follow the correctness of the curation and contribute to # counts for their sources for ev in stmt.evidence: # Make the index in the curation count list idx = 0 if correct else 1 extraction_rule = ev.annotations.get('found_by') # If there is no extraction rule then we just score the source if not extraction_rule: try: prior_counts[ev.source_api][idx] += 1 except KeyError: prior_counts[ev.source_api] = [0, 0] prior_counts[ev.source_api][idx] += 1 # Otherwise we score the specific extraction rule else: try: subtype_counts[ev.source_api][extraction_rule][idx] \ += 1 except KeyError: if ev.source_api not in subtype_counts: subtype_counts[ev.source_api] = {} subtype_counts[ev.source_api][extraction_rule] = [0, 0] subtype_counts[ev.source_api][extraction_rule][idx] \ += 1 # Finally, we update the scorer with the new curation counts self.scorer.update_counts(prior_counts, subtype_counts)
[ "Submit correct/incorrect curations fo a given corpus.\n\n Parameters\n ----------\n corpus_id : str\n The ID of the corpus to which the curations apply.\n curations : dict\n A dict of curations with keys corresponding to Statement UUIDs and\n values corresponding to correct/incorrect feedback.\n " ]
Please provide a description of the function:def update_beliefs(self, corpus_id): corpus = self.get_corpus(corpus_id) be = BeliefEngine(self.scorer) stmts = list(corpus.statements.values()) be.set_prior_probs(stmts) # Here we set beliefs based on actual curation for uuid, correct in corpus.curations.items(): stmt = corpus.statements.get(uuid) if stmt is None: logger.warning('%s is not in the corpus.' % uuid) continue stmt.belief = correct belief_dict = {st.uuid: st.belief for st in stmts} return belief_dict
[ "Return updated belief scores for a given corpus.\n\n Parameters\n ----------\n corpus_id : str\n The ID of the corpus for which beliefs are to be updated.\n\n Returns\n -------\n dict\n A dictionary of belief scores with keys corresponding to Statement\n UUIDs and values to new belief scores.\n " ]
Please provide a description of the function:def get_python_list(scala_list): python_list = [] for i in range(scala_list.length()): python_list.append(scala_list.apply(i)) return python_list
[ "Return list from elements of scala.collection.immutable.List" ]
Please provide a description of the function:def get_python_dict(scala_map): python_dict = {} keys = get_python_list(scala_map.keys().toList()) for key in keys: python_dict[key] = scala_map.apply(key) return python_dict
[ "Return a dict from entries in a scala.collection.immutable.Map" ]
Please provide a description of the function:def get_python_json(scala_json): def convert_node(node): if node.__class__.__name__ in ('org.json4s.JsonAST$JValue', 'org.json4s.JsonAST$JObject'): # Make a dictionary and then convert each value values_raw = get_python_dict(node.values()) values = {} for k, v in values_raw.items(): values[k] = convert_node(v) return values elif node.__class__.__name__.startswith('scala.collection.immutable.Map') or \ node.__class__.__name__ == \ 'scala.collection.immutable.HashMap$HashTrieMap': values_raw = get_python_dict(node) values = {} for k, v in values_raw.items(): values[k] = convert_node(v) return values elif node.__class__.__name__ == 'org.json4s.JsonAST$JArray': entries_raw = get_python_list(node.values()) entries = [] for entry in entries_raw: entries.append(convert_node(entry)) return entries elif node.__class__.__name__ == 'scala.collection.immutable.$colon$colon': entries_raw = get_python_list(node) entries = [] for entry in entries_raw: entries.append(convert_node(entry)) return entries elif node.__class__.__name__ == 'scala.math.BigInt': return node.intValue() elif node.__class__.__name__ == 'scala.None$': return None elif node.__class__.__name__ == 'scala.collection.immutable.Nil$': return [] elif isinstance(node, (str, int, float)): return node else: logger.error('Cannot convert %s into Python' % node.__class__.__name__) return node.__class__.__name__ python_json = convert_node(scala_json) return python_json
[ "Return a JSON dict from a org.json4s.JsonAST" ]
Please provide a description of the function:def get_heat_kernel(network_id): url = ndex_relevance + '/%s/generate_ndex_heat_kernel' % network_id res = ndex_client.send_request(url, {}, is_json=True, use_get=True) if res is None: logger.error('Could not get heat kernel for network %s.' % network_id) return None kernel_id = res.get('kernel_id') if kernel_id is None: logger.error('Could not get heat kernel for network %s.' % network_id) return None return kernel_id
[ "Return the identifier of a heat kernel calculated for a given network.\n\n Parameters\n ----------\n network_id : str\n The UUID of the network in NDEx.\n\n Returns\n -------\n kernel_id : str\n The identifier of the heat kernel calculated for the given network.\n " ]
Please provide a description of the function:def get_relevant_nodes(network_id, query_nodes): url = ndex_relevance + '/rank_entities' kernel_id = get_heat_kernel(network_id) if kernel_id is None: return None if isinstance(query_nodes, basestring): query_nodes = [query_nodes] params = {'identifier_set': query_nodes, 'kernel_id': kernel_id} res = ndex_client.send_request(url, params, is_json=True) if res is None: logger.error("ndex_client.send_request returned None.") return None ranked_entities = res.get('ranked_entities') if ranked_entities is None: logger.error('Could not get ranked entities.') return None return ranked_entities
[ "Return a set of network nodes relevant to a given query set.\n\n A heat diffusion algorithm is used on a pre-computed heat kernel for the\n given network which starts from the given query nodes. The nodes\n in the network are ranked according to heat score which is a measure\n of relevance with respect to the query nodes.\n\n Parameters\n ----------\n network_id : str\n The UUID of the network in NDEx.\n query_nodes : list[str]\n A list of node names with respect to which relevance is queried.\n\n Returns\n -------\n ranked_entities : list[(str, float)]\n A list containing pairs of node names and their relevance scores.\n " ]
Please provide a description of the function:def _get_belief_package(stmt): # This list will contain the belief packages for the given statement belief_packages = [] # Iterate over all the support parents for st in stmt.supports: # Recursively get all the belief packages of the parent parent_packages = _get_belief_package(st) package_stmt_keys = [pkg.statement_key for pkg in belief_packages] for package in parent_packages: # Only add this belief package if it hasn't already been added if package.statement_key not in package_stmt_keys: belief_packages.append(package) # Now make the Statement's own belief package and append it to the list belief_package = BeliefPackage(stmt.matches_key(), stmt.evidence) belief_packages.append(belief_package) return belief_packages
[ "Return the belief packages of a given statement recursively." ]
Please provide a description of the function:def sample_statements(stmts, seed=None): if seed: numpy.random.seed(seed) new_stmts = [] r = numpy.random.rand(len(stmts)) for i, stmt in enumerate(stmts): if r[i] < stmt.belief: new_stmts.append(stmt) return new_stmts
[ "Return statements sampled according to belief.\n\n Statements are sampled independently according to their\n belief scores. For instance, a Staement with a belief\n score of 0.7 will end up in the returned Statement list\n with probability 0.7.\n\n Parameters\n ----------\n stmts : list[indra.statements.Statement]\n A list of INDRA Statements to sample.\n seed : Optional[int]\n A seed for the random number generator used for sampling.\n\n Returns\n -------\n new_stmts : list[indra.statements.Statement]\n A list of INDRA Statements that were chosen by random sampling\n according to their respective belief scores.\n " ]
Please provide a description of the function:def evidence_random_noise_prior(evidence, type_probs, subtype_probs): (stype, subtype) = tag_evidence_subtype(evidence) # Get the subtype, if available # Return the subtype random noise prior, if available if subtype_probs is not None: if stype in subtype_probs: if subtype in subtype_probs[stype]: return subtype_probs[stype][subtype] # Fallback to just returning the overall evidence type random noise prior return type_probs[stype]
[ "Determines the random-noise prior probability for this evidence.\n\n If the evidence corresponds to a subtype, and that subtype has a curated\n prior noise probability, use that.\n\n Otherwise, gives the random-noise prior for the overall rule type.\n " ]
Please provide a description of the function:def score_evidence_list(self, evidences): def _score(evidences): if not evidences: return 0 # Collect all unique sources sources = [ev.source_api for ev in evidences] uniq_sources = numpy.unique(sources) # Calculate the systematic error factors given unique sources syst_factors = {s: self.prior_probs['syst'][s] for s in uniq_sources} # Calculate the radom error factors for each source rand_factors = {k: [] for k in uniq_sources} for ev in evidences: rand_factors[ev.source_api].append( evidence_random_noise_prior( ev, self.prior_probs['rand'], self.subtype_probs)) # The probability of incorrectness is the product of the # source-specific probabilities neg_prob_prior = 1 for s in uniq_sources: neg_prob_prior *= (syst_factors[s] + numpy.prod(rand_factors[s])) # Finally, the probability of correctness is one minus incorrect prob_prior = 1 - neg_prob_prior return prob_prior pos_evidence = [ev for ev in evidences if not ev.epistemics.get('negated')] neg_evidence = [ev for ev in evidences if ev.epistemics.get('negated')] pp = _score(pos_evidence) np = _score(neg_evidence) # The basic assumption is that the positive and negative evidence # can't simultaneously be correct. # There are two cases to consider. (1) If the positive evidence is # incorrect then there is no Statement and the belief should be 0, # irrespective of the negative evidence. # (2) If the positive evidence is correct and the negative evidence # is incorrect. # This amounts to the following formula: # 0 * (1-pp) + 1 * (pp * (1-np)) which we simplify below score = pp * (1 - np) return score
[ "Return belief score given a list of supporting evidences." ]
Please provide a description of the function:def score_statement(self, st, extra_evidence=None): if extra_evidence is None: extra_evidence = [] all_evidence = st.evidence + extra_evidence return self.score_evidence_list(all_evidence)
[ "Computes the prior belief probability for an INDRA Statement.\n\n The Statement is assumed to be de-duplicated. In other words,\n the Statement is assumed to have\n a list of Evidence objects that supports it. The prior probability of\n the Statement is calculated based on the number of Evidences it has\n and their sources.\n\n Parameters\n ----------\n st : indra.statements.Statement\n An INDRA Statements whose belief scores are to\n be calculated.\n extra_evidence : list[indra.statements.Evidence]\n A list of Evidences that are supporting the Statement (that aren't\n already included in the Statement's own evidence list.\n\n Returns\n -------\n belief_score : float\n The computed prior probability for the statement\n " ]
Please provide a description of the function:def check_prior_probs(self, statements): sources = set() for stmt in statements: sources |= set([ev.source_api for ev in stmt.evidence]) for err_type in ('rand', 'syst'): for source in sources: if source not in self.prior_probs[err_type]: msg = 'BeliefEngine missing probability parameter' + \ ' for source: %s' % source raise Exception(msg)
[ "Throw Exception if BeliefEngine parameter is missing.\n\n Make sure the scorer has all the information needed to compute\n belief scores of each statement in the provided list, and raises an\n exception otherwise.\n\n Parameters\n ----------\n statements : list[indra.statements.Statement]\n List of statements to check\n " ]
Please provide a description of the function:def update_probs(self): # We deal with the prior probsfirst # This is a fixed assumed value for systematic error syst_error = 0.05 prior_probs = {'syst': {}, 'rand': {}} for source, (p, n) in self.prior_counts.items(): # Skip if there are no actual counts if n + p == 0: continue prior_probs['syst'][source] = syst_error prior_probs['rand'][source] = \ 1 - min((float(p) / (n + p), 1-syst_error)) - syst_error # Next we deal with subtype probs based on counts subtype_probs = {} for source, entry in self.subtype_counts.items(): for rule, (p, n) in entry.items(): # Skip if there are no actual counts if n + p == 0: continue if source not in subtype_probs: subtype_probs[source] = {} subtype_probs[source][rule] = \ 1 - min((float(p) / (n + p), 1-syst_error)) - syst_error # Finally we propagate this into the full probability # data structures of the parent class super(BayesianScorer, self).update_probs(prior_probs, subtype_probs)
[ "Update the internal probability values given the counts." ]
Please provide a description of the function:def update_counts(self, prior_counts, subtype_counts): for source, (pos, neg) in prior_counts.items(): if source not in self.prior_counts: self.prior_counts[source] = [0, 0] self.prior_counts[source][0] += pos self.prior_counts[source][1] += neg for source, subtype_dict in subtype_counts.items(): if source not in self.subtype_counts: self.subtype_counts[source] = {} for subtype, (pos, neg) in subtype_dict.items(): if subtype not in self.subtype_counts[source]: self.subtype_counts[source][subtype] = [0, 0] self.subtype_counts[source][subtype][0] += pos self.subtype_counts[source][subtype][1] += neg self.update_probs()
[ "Update the internal counts based on given new counts.\n\n Parameters\n ----------\n prior_counts : dict\n A dictionary of counts of the form [pos, neg] for\n each source.\n subtype_counts : dict\n A dictionary of counts of the form [pos, neg] for\n each subtype within a source.\n " ]
Please provide a description of the function:def set_prior_probs(self, statements): self.scorer.check_prior_probs(statements) for st in statements: st.belief = self.scorer.score_statement(st)
[ "Sets the prior belief probabilities for a list of INDRA Statements.\n\n The Statements are assumed to be de-duplicated. In other words,\n each Statement in the list passed to this function is assumed to have\n a list of Evidence objects that support it. The prior probability of\n each Statement is calculated based on the number of Evidences it has\n and their sources.\n\n Parameters\n ----------\n statements : list[indra.statements.Statement]\n A list of INDRA Statements whose belief scores are to\n be calculated. Each Statement object's belief attribute is updated\n by this function.\n " ]
Please provide a description of the function:def set_hierarchy_probs(self, statements): def build_hierarchy_graph(stmts): g = networkx.DiGraph() for st1 in stmts: g.add_node(st1.matches_key(), stmt=st1) for st2 in st1.supported_by: g.add_node(st2.matches_key(), stmt=st2) g.add_edge(st2.matches_key(), st1.matches_key()) return g def get_ranked_stmts(g): node_ranks = networkx.algorithms.dag.topological_sort(g) node_ranks = reversed(list(node_ranks)) stmts = [g.node[n]['stmt'] for n in node_ranks] return stmts def assert_no_cycle(g): try: cyc = networkx.algorithms.cycles.find_cycle(g) except networkx.exception.NetworkXNoCycle: return msg = 'Cycle found in hierarchy graph: %s' % cyc assert False, msg g = build_hierarchy_graph(statements) assert_no_cycle(g) ranked_stmts = get_ranked_stmts(g) for st in ranked_stmts: bps = _get_belief_package(st) supporting_evidences = [] # NOTE: the last belief package in the list is this statement's own for bp in bps[:-1]: # Iterate over all the parent evidences and add only # non-negated ones for ev in bp.evidences: if not ev.epistemics.get('negated'): supporting_evidences.append(ev) # Now add the Statement's own evidence # Now score all the evidences belief = self.scorer.score_statement(st, supporting_evidences) st.belief = belief
[ "Sets hierarchical belief probabilities for INDRA Statements.\n\n The Statements are assumed to be in a hierarchical relation graph with\n the supports and supported_by attribute of each Statement object having\n been set.\n The hierarchical belief probability of each Statement is calculated\n based on its prior probability and the probabilities propagated from\n Statements supporting it in the hierarchy graph.\n\n Parameters\n ----------\n statements : list[indra.statements.Statement]\n A list of INDRA Statements whose belief scores are to\n be calculated. Each Statement object's belief attribute is updated\n by this function.\n ", "Return a DiGraph based on matches keys and Statement supports", "Return a topological sort of statement matches keys from a graph.\n ", "If the graph has cycles, throws AssertionError." ]
Please provide a description of the function:def set_linked_probs(self, linked_statements): for st in linked_statements: source_probs = [s.belief for s in st.source_stmts] st.inferred_stmt.belief = numpy.prod(source_probs)
[ "Sets the belief probabilities for a list of linked INDRA Statements.\n\n The list of LinkedStatement objects is assumed to come from the\n MechanismLinker. The belief probability of the inferred Statement is\n assigned the joint probability of its source Statements.\n\n Parameters\n ----------\n linked_statements : list[indra.mechlinker.LinkedStatement]\n A list of INDRA LinkedStatements whose belief scores are to\n be calculated. The belief attribute of the inferred Statement in\n the LinkedStatement object is updated by this function.\n " ]
Please provide a description of the function:def get_agent_from_entity_info(entity_info): # This will be the default name. If we get a gene name, it will # override this rawtext name. raw_text = entity_info['entityText'] name = raw_text # Get the db refs. refs = {'TEXT': raw_text} ref_counts = Counter([entry['source'] for entry in entity_info['entityId']]) for source, count in ref_counts.items(): if source in ('Entrez', 'UniProt') and count > 1: logger.info('%s has %d entries for %s, skipping' % (raw_text, count, source)) return None, None muts = [] for id_dict in entity_info['entityId']: if id_dict['source'] == 'Entrez': refs['EGID'] = id_dict['idString'] hgnc_id = hgnc_client.get_hgnc_from_entrez(id_dict['idString']) if hgnc_id is not None: # Check against what we may have already inferred from # UniProt. If it disagrees with this, let it be. Inference # from Entrez isn't as reliable. if 'HGNC' in refs.keys(): if refs['HGNC'] != hgnc_id: msg = ('HGNC:%s previously set does not' ' match HGNC:%s from EGID:%s') % \ (refs['HGNC'], hgnc_id, refs['EGID']) logger.info(msg) else: refs['HGNC'] = hgnc_id elif id_dict['source'] == 'UniProt': refs['UP'] = id_dict['idString'] gene_name = uniprot_client.get_gene_name(id_dict['idString']) if gene_name is not None: name = gene_name hgnc_id = hgnc_client.get_hgnc_id(gene_name) if hgnc_id is not None: # Check to see if we have a conflict with an HGNC id # found from the Entrez id. If so, overwrite with this # one, in which we have greater faith. if 'HGNC' in refs.keys() and refs['HGNC'] != hgnc_id: msg = ('Inferred HGNC:%s from UP:%s does not' ' match HGNC:%s from EGID:%s') % \ (refs['HGNC'], refs['UP'], hgnc_id, refs['EGID']) logger.info(msg) refs['HGNC'] = hgnc_id elif id_dict['source'] in ('Tax', 'NCBI'): refs['TAX'] = id_dict['idString'] elif id_dict['source'] == 'CHEBI': refs['CHEBI'] = 'CHEBI:%s' % id_dict['idString'] # These we take as is elif id_dict['source'] in ('MESH', 'OMIM', 'CTD'): refs[id_dict['source']] = id_dict['idString'] # Handle mutations elif id_dict['source'] == 'Unk' and \ id_dict['entityType'] == 'ProteinMutation': # {'idString': 'p|SUB|Y|268|A', 'source': 'Unk', # 'tool': 'PubTator', 'entityType': 'ProteinMutation'} # Mpk1(Y268A)' if id_dict['idString'].startswith('p|SUB|'): try: # Handle special cases like p|SUB|A|30|P;RS#:104893878 parts = id_dict['idString'].split(';')[0].split('|') residue_from, pos, residue_to = parts[2:5] mut = MutCondition(pos, residue_from, residue_to) muts.append(mut) except Exception as e: logger.info('Could not process mutation %s' % id_dict['idString']) else: logger.info('Unhandled mutation: %s' % id_dict['idString']) else: logger.warning("Unhandled id type: {source}={idString}" .format(**id_dict)) raw_coords = (entity_info['charStart'], entity_info['charEnd']) return Agent(name, db_refs=refs, mutations=muts), raw_coords
[ "Return an INDRA Agent by processing an entity_info dict." ]
Please provide a description of the function:def extract_statements(self): for p_info in self._json: para = RlimspParagraph(p_info, self.doc_id_type) self.statements.extend(para.get_statements()) return
[ "Extract the statements from the json." ]
Please provide a description of the function:def _get_agent(self, entity_id): if entity_id is None: return None entity_info = self._entity_dict.get(entity_id) if entity_info is None: logger.warning("Entity key did not resolve to entity.") return None return get_agent_from_entity_info(entity_info)
[ "Convert the entity dictionary into an INDRA Agent." ]
Please provide a description of the function:def _get_evidence(self, trigger_id, args, agent_coords, site_coords): trigger_info = self._entity_dict[trigger_id] # Get the sentence index from the trigger word. s_idx_set = {self._entity_dict[eid]['sentenceIndex'] for eid in args.values() if 'sentenceIndex' in self._entity_dict[eid]} if s_idx_set: i_min = min(s_idx_set) i_max = max(s_idx_set) text = '. '.join(self._sentences[i_min:(i_max+1)]) + '.' s_start = self._sentence_starts[i_min] annotations = { 'agents': {'coords': [_fix_coords(coords, s_start) for coords in agent_coords]}, 'trigger': {'coords': _fix_coords([trigger_info['charStart'], trigger_info['charEnd']], s_start)} } else: logger.info('Unable to get sentence index') annotations = {} text = None if site_coords: annotations['site'] = {'coords': _fix_coords(site_coords, s_start)} return Evidence(text_refs=self._text_refs.copy(), text=text, source_api='rlimsp', pmid=self._text_refs.get('PMID'), annotations=annotations)
[ "Get the evidence using the info in the trigger entity." ]
Please provide a description of the function:def get_reader_classes(parent=Reader): children = parent.__subclasses__() descendants = children[:] for child in children: grandchildren = get_reader_classes(child) if grandchildren: descendants.remove(child) descendants.extend(grandchildren) return descendants
[ "Get all childless the descendants of a parent class, recursively." ]
Please provide a description of the function:def get_reader_class(reader_name): for reader_class in get_reader_classes(): if reader_class.name.lower() == reader_name.lower(): return reader_class else: logger.error("No such reader: %s" % reader_name) return None
[ "Get a particular reader class by name." ]
Please provide a description of the function:def from_file(cls, file_path, compressed=False, encoded=False): file_id = '.'.join(path.basename(file_path).split('.')[:-1]) file_format = file_path.split('.')[-1] content = cls(file_id, file_format, compressed, encoded) content.file_exists = True content._location = path.dirname(file_path) return content
[ "Create a content object from a file path." ]
Please provide a description of the function:def from_string(cls, id, format, raw_content, compressed=False, encoded=False): content = cls(id, format, compressed, encoded) content._raw_content = raw_content return content
[ "Create a Content object from string/bytes content." ]
Please provide a description of the function:def change_id(self, new_id): self._load_raw_content() self._id = new_id self.get_filename(renew=True) self.get_filepath(renew=True) return
[ "Change the id of this content." ]
Please provide a description of the function:def change_format(self, new_format): self._load_raw_content() self._format = new_format self.get_filename(renew=True) self.get_filepath(renew=True) return
[ "Change the format label of this content.\n\n Note that this does NOT actually alter the format of the content, only\n the label.\n " ]
Please provide a description of the function:def set_location(self, new_location): self._load_raw_content() self._location = new_location self.get_filepath(renew=True) return
[ "Set/change the location of this content.\n\n Note that this does NOT change the actual location of the file. To do\n so, use the `copy_to` method.\n " ]
Please provide a description of the function:def get_text(self): self._load_raw_content() if self._text is None: assert self._raw_content is not None ret_cont = self._raw_content if self.compressed: ret_cont = zlib.decompress(ret_cont, zlib.MAX_WBITS+16) if self.encoded: ret_cont = ret_cont.decode('utf-8') self._text = ret_cont assert self._text is not None return self._text
[ "Get the loaded, decompressed, and decoded text of this content." ]
Please provide a description of the function:def get_filename(self, renew=False): if self._fname is None or renew: self._fname = '%s.%s' % (self._id, self._format) return self._fname
[ "Get the filename of this content.\n\n If the file name doesn't already exist, we created it as {id}.{format}.\n " ]
Please provide a description of the function:def get_filepath(self, renew=False): if self._location is None or renew: self._location = '.' return path.join(self._location, self.get_filename())
[ "Get the file path, joining the name and location for this file.\n\n If no location is given, it is assumed to be \"here\", e.g. \".\".\n " ]
Please provide a description of the function:def get_statements(self, reprocess=False): if self._statements is None or reprocess: # Handle the case that there is no content. if self.content is None: self._statements = [] return [] # Map to the different processors. if self.reader == ReachReader.name: if self.format == formats.JSON: # Process the reach json into statements. json_str = json.dumps(self.content) processor = reach.process_json_str(json_str) else: raise ReadingError("Incorrect format for Reach output: %s." % self.format) elif self.reader == SparserReader.name: if self.format == formats.JSON: # Process the sparser content into statements processor = sparser.process_json_dict(self.content) if processor is not None: processor.set_statements_pmid(None) else: raise ReadingError("Sparser should only ever be JSON, not " "%s." % self.format) elif self.reader == TripsReader.name: processor = trips.process_xml(self.content) else: raise ReadingError("Unknown reader: %s." % self.reader) # Get the statements from the processor, if it was resolved. if processor is None: logger.error("Production of statements from %s failed for %s." % (self.reader, self.content_id)) stmts = [] else: stmts = processor.statements self._statements = stmts[:] else: stmts = self._statements[:] return stmts
[ "General method to create statements." ]
Please provide a description of the function:def add_result(self, content_id, content, **kwargs): result_object = self.ResultClass(content_id, self.name, self.version, formats.JSON, content, **kwargs) self.results.append(result_object) return
[ "\"Add a result to the list of results." ]
Please provide a description of the function:def _check_content(self, content_str): if self.do_content_check: space_ratio = float(content_str.count(' '))/len(content_str) if space_ratio > self.max_space_ratio: return "space-ratio: %f > %f" % (space_ratio, self.max_space_ratio) if len(content_str) > self.input_character_limit: return "too long: %d > %d" % (len(content_str), self.input_character_limit) return None
[ "Check if the content is likely to be successfully read." ]
Please provide a description of the function:def _join_json_files(cls, prefix, clear=False): filetype_list = ['entities', 'events', 'sentences'] json_dict = {} try: for filetype in filetype_list: fname = prefix + '.uaz.' + filetype + '.json' with open(fname, 'rt') as f: json_dict[filetype] = json.load(f) if clear: remove(fname) logger.debug("Removed %s." % fname) except IOError as e: logger.error( 'Failed to open JSON files for %s; REACH error?' % prefix ) logger.exception(e) return None return json_dict
[ "Join different REACH output JSON files into a single JSON object.\n\n The output of REACH is broken into three files that need to be joined\n before processing. Specifically, there will be three files of the form:\n `<prefix>.uaz.<subcategory>.json`.\n\n Parameters\n ----------\n prefix : str\n The absolute path up to the extensions that reach will add.\n clear : bool\n Default False - if True, delete the files as soon as they are\n loaded.\n\n Returns\n -------\n json_obj : dict\n The result of joining the files, keyed by the three subcategories.\n " ]
Please provide a description of the function:def _check_reach_env(): # Get the path to the REACH JAR path_to_reach = get_config('REACHPATH') if path_to_reach is None: path_to_reach = environ.get('REACHPATH', None) if path_to_reach is None or not path.exists(path_to_reach): raise ReachError( 'Reach path unset or invalid. Check REACHPATH environment var ' 'and/or config file.' ) logger.debug('Using REACH jar at: %s' % path_to_reach) # Get the reach version. reach_version = get_config('REACH_VERSION') if reach_version is None: reach_version = environ.get('REACH_VERSION', None) if reach_version is None: logger.debug('REACH version not set in REACH_VERSION') m = re.match('reach-(.*?)\.jar', path.basename(path_to_reach)) reach_version = re.sub('-SNAP.*?$', '', m.groups()[0]) logger.debug('Using REACH version: %s' % reach_version) return path_to_reach, reach_version
[ "Check that the environment supports runnig reach." ]
Please provide a description of the function:def prep_input(self, read_list): logger.info("Prepping input.") i = 0 for content in read_list: # Check the quality of the text, and skip if there are any issues. quality_issue = self._check_content(content.get_text()) if quality_issue is not None: logger.warning("Skipping %d due to: %s" % (content.get_id(), quality_issue)) continue # Look for things that are more like file names, rather than ids. cid = content.get_id() if isinstance(cid, str) and re.match('^\w*?\d+$', cid) is None: new_id = 'FILE%06d' % i i += 1 self.id_maps[new_id] = cid content.change_id(new_id) new_fpath = content.copy_to(self.input_dir) else: # Put the content in the appropriate directory. new_fpath = content.copy_to(self.input_dir) self.num_input += 1 logger.debug('%s saved for reading by reach.' % new_fpath) return
[ "Apply the readers to the content." ]
Please provide a description of the function:def get_output(self): logger.info("Getting outputs.") # Get the set of prefixes (each will correspond to three json files.) json_files = glob.glob(path.join(self.output_dir, '*.json')) json_prefixes = set() for json_file in json_files: # Remove .uaz.<subfile type>.json prefix = '.'.join(path.basename(json_file).split('.')[:-3]) json_prefixes.add(path.join(self.output_dir, prefix)) # Join each set of json files and store the json dict. for prefix in json_prefixes: base_prefix = path.basename(prefix) if base_prefix.isdecimal(): base_prefix = int(base_prefix) elif base_prefix in self.id_maps.keys(): base_prefix = self.id_maps[base_prefix] try: content = self._join_json_files(prefix, clear=True) except Exception as e: logger.exception(e) logger.error("Could not load result for prefix %s." % prefix) content = None self.add_result(base_prefix, content) logger.debug('Joined files for prefix %s.' % base_prefix) return self.results
[ "Get the output of a reading job as a list of filenames." ]
Please provide a description of the function:def clear_input(self): for item in listdir(self.input_dir): item_path = path.join(self.input_dir, item) if path.isfile(item_path): remove(item_path) logger.debug('Removed input %s.' % item_path) return
[ "Remove all the input files (at the end of a reading)." ]
Please provide a description of the function:def read(self, read_list, verbose=False, log=False): ret = [] mem_tot = _get_mem_total() if mem_tot is not None and mem_tot <= self.REACH_MEM + self.MEM_BUFFER: logger.error( "Too little memory to run reach. At least %s required." % (self.REACH_MEM + self.MEM_BUFFER) ) logger.info("REACH not run.") return ret # Prep the content self.prep_input(read_list) if self.num_input > 0: # Run REACH! logger.info("Beginning reach.") args = [ 'java', '-Dconfig.file=%s' % self.conf_file_path, '-jar', self.exec_path ] p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) log_file_str = '' for line in iter(p.stdout.readline, b''): log_line = 'REACH: ' + line.strip().decode('utf8') if verbose: logger.info(log_line) if log: log_file_str += log_line + '\n' if log: with open('reach_run.log', 'ab') as f: f.write(log_file_str.encode('utf8')) p_out, p_err = p.communicate() if p.returncode: logger.error('Problem running REACH:') logger.error('Stdout: %s' % p_out.decode('utf-8')) logger.error('Stderr: %s' % p_err.decode('utf-8')) raise ReachError("Problem running REACH") logger.info("Reach finished.") ret = self.get_output() self.clear_input() return ret
[ "Read the content, returning a list of ReadingData objects." ]
Please provide a description of the function:def prep_input(self, read_list): "Prepare the list of files or text content objects to be read." logger.info('Prepping input for sparser.') self.file_list = [] for content in read_list: quality_issue = self._check_content(content.get_text()) if quality_issue is not None: logger.warning("Skipping %d due to: %s" % (content.get_id(), quality_issue)) continue if content.is_format('nxml'): # If it is already an nxml, we just need to adjust the # name a bit, if anything. if not content.get_filename().startswith('PMC'): content.change_id('PMC' + str(content.get_id())) fpath = content.copy_to(self.tmp_dir) self.file_list.append(fpath) elif content.is_format('txt', 'text'): # Otherwise we need to frame the content in xml and put it # in a new file with the appropriate name. nxml_str = sparser.make_nxml_from_text(content.get_text()) new_content = Content.from_string('PMC' + str(content.get_id()), 'nxml', nxml_str) fpath = new_content.copy_to(self.tmp_dir) self.file_list.append(fpath) else: raise SparserError("Unrecognized format %s." % content.format) return
[]
Please provide a description of the function:def get_output(self, output_files, clear=True): "Get the output files as an id indexed dict." patt = re.compile(r'(.*?)-semantics.*?') for outpath in output_files: if outpath is None: logger.warning("Found outpath with value None. Skipping.") continue re_out = patt.match(path.basename(outpath)) if re_out is None: raise SparserError("Could not get prefix from output path %s." % outpath) prefix = re_out.groups()[0] if prefix.startswith('PMC'): prefix = prefix[3:] if prefix.isdecimal(): # In this case we assume the prefix is a tcid. prefix = int(prefix) try: with open(outpath, 'rt') as f: content = json.load(f) except Exception as e: logger.exception(e) logger.error("Could not load reading content from %s." % outpath) content = None self.add_result(prefix, content) if clear: input_path = outpath.replace('-semantics.json', '.nxml') try: remove(outpath) remove(input_path) except Exception as e: logger.exception(e) logger.error("Could not remove sparser files %s and %s." % (outpath, input_path)) return self.results
[]
Please provide a description of the function:def read_some(self, fpath_list, outbuf=None, verbose=False): "Perform a few readings." outpath_list = [] for fpath in fpath_list: output, outbuf = self.read_one(fpath, outbuf, verbose) if output is not None: outpath_list.append(output) return outpath_list, outbuf
[]
Please provide a description of the function:def read(self, read_list, verbose=False, log=False, n_per_proc=None): "Perform the actual reading." ret = [] self.prep_input(read_list) L = len(self.file_list) if L == 0: return ret logger.info("Beginning to run sparser.") output_file_list = [] if log: log_name = 'sparser_run_%s.log' % _time_stamp() outbuf = open(log_name, 'wb') else: outbuf = None try: if self.n_proc == 1: for fpath in self.file_list: outpath, _ = self.read_one(fpath, outbuf, verbose) if outpath is not None: output_file_list.append(outpath) else: if n_per_proc is None: n_per_proc = max(1, min(1000, L//self.n_proc//2)) pool = None try: pool = Pool(self.n_proc) if n_per_proc is not 1: batches = [self.file_list[n*n_per_proc:(n+1)*n_per_proc] for n in range(L//n_per_proc + 1)] out_lists_and_buffs = pool.map(self.read_some, batches) else: out_files_and_buffs = pool.map(self.read_one, self.file_list) out_lists_and_buffs = [([out_files], buffs) for out_files, buffs in out_files_and_buffs] finally: if pool is not None: pool.close() pool.join() for i, (out_list, buff) in enumerate(out_lists_and_buffs): if out_list is not None: output_file_list += out_list if log: outbuf.write(b'Log for producing output %d/%d.\n' % (i, len(out_lists_and_buffs))) if buff is not None: buff.seek(0) outbuf.write(buff.read() + b'\n') else: outbuf.write(b'ERROR: no buffer was None. ' b'No logs available.\n') outbuf.flush() finally: if log: outbuf.close() if verbose: logger.info("Sparser logs may be found at %s." % log_name) ret = self.get_output(output_file_list) return ret
[]
Please provide a description of the function:def process_text(text, pmid=None, cleanup=True, add_grounding=True): # Create a temporary directory to store the proprocessed input pp_dir = tempfile.mkdtemp('indra_isi_pp_output') pp = IsiPreprocessor(pp_dir) extra_annotations = {} pp.preprocess_plain_text_string(text, pmid, extra_annotations) # Run the ISI reader and extract statements ip = process_preprocessed(pp) if add_grounding: ip.add_grounding() if cleanup: # Remove temporary directory with processed input shutil.rmtree(pp_dir) else: logger.info('Not cleaning up %s' % pp_dir) return ip
[ "Process a string using the ISI reader and extract INDRA statements.\n\n Parameters\n ----------\n text : str\n A text string to process\n pmid : Optional[str]\n The PMID associated with this text (or None if not specified)\n cleanup : Optional[bool]\n If True, the temporary folders created for preprocessed reading input\n and output are removed. Default: True\n add_grounding : Optional[bool]\n If True the extracted Statements' grounding is mapped\n\n Returns\n -------\n ip : indra.sources.isi.processor.IsiProcessor\n A processor containing statements\n " ]
Please provide a description of the function:def process_nxml(nxml_filename, pmid=None, extra_annotations=None, cleanup=True, add_grounding=True): if extra_annotations is None: extra_annotations = {} # Create a temporary directory to store the proprocessed input pp_dir = tempfile.mkdtemp('indra_isi_pp_output') pp = IsiPreprocessor(pp_dir) extra_annotations = {} pp.preprocess_nxml_file(nxml_filename, pmid, extra_annotations) # Run the ISI reader and extract statements ip = process_preprocessed(pp) if add_grounding: ip.add_grounding() if cleanup: # Remove temporary directory with processed input shutil.rmtree(pp_dir) else: logger.info('Not cleaning up %s' % pp_dir) return ip
[ "Process an NXML file using the ISI reader\n\n First converts NXML to plain text and preprocesses it, then runs the ISI\n reader, and processes the output to extract INDRA Statements.\n\n Parameters\n ----------\n nxml_filename : str\n nxml file to process\n pmid : Optional[str]\n pmid of this nxml file, to be added to the Evidence object of the\n extracted INDRA statements\n extra_annotations : Optional[dict]\n Additional annotations to add to the Evidence object of all extracted\n INDRA statements. Extra annotations called 'interaction' are ignored\n since this is used by the processor to store the corresponding\n raw ISI output.\n cleanup : Optional[bool]\n If True, the temporary folders created for preprocessed reading input\n and output are removed. Default: True\n add_grounding : Optional[bool]\n If True the extracted Statements' grounding is mapped\n\n Returns\n -------\n ip : indra.sources.isi.processor.IsiProcessor\n A processor containing extracted Statements\n " ]
Please provide a description of the function:def process_preprocessed(isi_preprocessor, num_processes=1, output_dir=None, cleanup=True, add_grounding=True): # Create a temporary directory to store the output if output_dir is None: output_dir = tempfile.mkdtemp('indra_isi_processor_output') else: output_dir = os.path.abspath(output_dir) tmp_dir = tempfile.mkdtemp('indra_isi_processor_tmp') # Form the command to invoke the ISI reader via Docker dir_name = isi_preprocessor.preprocessed_dir # We call realpath on all these paths so that any symbolic links # are generated out - this is needed on Mac input_binding = os.path.realpath(dir_name) + ':/input:ro' output_binding = os.path.realpath(output_dir) + ':/output:rw' tmp_binding = os.path.realpath(tmp_dir) + ':/temp:rw' command = ['docker', 'run', '-it', '--rm', '-v', input_binding, '-v', output_binding, '-v', tmp_binding, 'sahilgar/bigmechisi', './myprocesspapers.sh', '-c', str(num_processes)] # Invoke the ISI reader logger.info('Running command:') logger.info(' '.join(command)) ret = subprocess.call(command) if ret != 0: logger.error('Docker returned non-zero status code') ips = [] for basename, pmid in isi_preprocessor.pmids.items(): fname = os.path.join(output_dir, '%s.json' % basename) ip = process_json_file(fname, pmid=pmid, extra_annotations=isi_preprocessor.extra_annotations.get(fname, {}), add_grounding=False) ips.append(ip) # Remove the temporary output directory if output_dir is None: if cleanup: shutil.rmtree(output_dir) else: logger.info('Not cleaning up %s' % output_dir) if cleanup: shutil.rmtree(tmp_dir) else: logger.info('Not cleaning up %s' % output_dir) if len(ips) > 1: for ip in ips[1:]: ips[0].statements += ip.statements if ips: if add_grounding: ips[0].add_grounding() return ips[0] else: return None
[ "Process a directory of abstracts and/or papers preprocessed using the\n specified IsiPreprocessor, to produce a list of extracted INDRA statements.\n\n Parameters\n ----------\n isi_preprocessor : indra.sources.isi.preprocessor.IsiPreprocessor\n Preprocessor object that has already preprocessed the documents we\n want to read and process with the ISI reader\n num_processes : Optional[int]\n Number of processes to parallelize over\n output_dir : Optional[str]\n The directory into which to put reader output; if omitted or None,\n uses a temporary directory.\n cleanup : Optional[bool]\n If True, the temporary folders created for preprocessed reading input\n and output are removed. Default: True\n add_grounding : Optional[bool]\n If True the extracted Statements' grounding is mapped\n\n Returns\n -------\n ip : indra.sources.isi.processor.IsiProcessor\n A processor containing extracted statements\n " ]
Please provide a description of the function:def process_output_folder(folder_path, pmids=None, extra_annotations=None, add_grounding=True): pmids = pmids if pmids is not None else {} extra_annotations = extra_annotations if \ extra_annotations is not None else {} ips = [] for entry in glob.glob(os.path.join(folder_path, '*.json')): entry_key = os.path.splitext(os.path.basename(entry))[0] # Extract the corresponding file id pmid = pmids.get(entry_key) extra_annotation = extra_annotations.get(entry_key) ip = process_json_file(entry, pmid, extra_annotation, False) ips.append(ip) if len(ips) > 1: for ip in ips[1:]: ips[0].statements += ip.statements if ips: if add_grounding: ips[0].add_grounding() return ips[0] else: return None
[ "Recursively extracts statements from all ISI output files in the\n given directory and subdirectories.\n\n Parameters\n ----------\n folder_path : str\n The directory to traverse\n pmids : Optional[str]\n PMID mapping to be added to the Evidence of the extracted INDRA\n Statements\n extra_annotations : Optional[dict]\n Additional annotations to add to the Evidence object of all extracted\n INDRA statements. Extra annotations called 'interaction' are ignored\n since this is used by the processor to store the corresponding\n raw ISI output.\n add_grounding : Optional[bool]\n If True the extracted Statements' grounding is mapped\n " ]
Please provide a description of the function:def process_json_file(file_path, pmid=None, extra_annotations=None, add_grounding=True): logger.info('Extracting from %s' % file_path) with open(file_path, 'rb') as fh: jd = json.load(fh) ip = IsiProcessor(jd, pmid, extra_annotations) ip.get_statements() if add_grounding: ip.add_grounding() return ip
[ "Extracts statements from the given ISI output file.\n\n Parameters\n ----------\n file_path : str\n The ISI output file from which to extract statements\n pmid : int\n The PMID of the document being preprocessed, or None if not\n specified\n extra_annotations : dict\n Extra annotations to be added to each statement from this document\n (can be the empty dictionary)\n add_grounding : Optional[bool]\n If True the extracted Statements' grounding is mapped\n " ]
Please provide a description of the function:def process_text(text, save_xml='cwms_output.xml'): xml = client.send_query(text, 'cwmsreader') # There are actually two EKBs in the xml document. Extract the second. first_end = xml.find('</ekb>') # End of first EKB second_start = xml.find('<ekb', first_end) # Start of second EKB second_end = xml.find('</ekb>', second_start) # End of second EKB second_ekb = xml[second_start:second_end+len('</ekb>')] # second EKB if save_xml: with open(save_xml, 'wb') as fh: fh.write(second_ekb.encode('utf-8')) return process_ekb(second_ekb)
[ "Processes text using the CWMS web service.\n\n Parameters\n ----------\n text : str\n Text to process\n\n Returns\n -------\n cp : indra.sources.cwms.CWMSProcessor\n A CWMSProcessor, which contains a list of INDRA statements in its\n statements attribute.\n " ]
Please provide a description of the function:def process_ekb_file(fname): # Process EKB XML file into statements with open(fname, 'rb') as fh: ekb_str = fh.read().decode('utf-8') return process_ekb(ekb_str)
[ "Processes an EKB file produced by CWMS.\n\n Parameters\n ----------\n fname : str\n Path to the EKB file to process.\n\n Returns\n -------\n cp : indra.sources.cwms.CWMSProcessor\n A CWMSProcessor, which contains a list of INDRA statements in its\n statements attribute.\n " ]
Please provide a description of the function:def im_json_to_graph(im_json): imap_data = im_json['influence map']['map'] # Initialize the graph graph = MultiDiGraph() id_node_dict = {} # Add each node to the graph for node_dict in imap_data['nodes']: # There is always just one entry here with the node type e.g. "rule" # as key, and all the node data as the value node_type, node = list(node_dict.items())[0] # Add the node to the graph with its label and type attrs = {'fillcolor': '#b7d2ff' if node_type == 'rule' else '#cdffc9', 'shape': 'box' if node_type == 'rule' else 'oval', 'style': 'filled'} graph.add_node(node['label'], node_type=node_type, **attrs) # Save the key of the node to refer to it later new_key = '%s%s' % (node_type, node['id']) id_node_dict[new_key] = node['label'] def add_edges(link_list, edge_sign): attrs = {'sign': edge_sign, 'color': 'green' if edge_sign == 1 else 'red', 'arrowhead': 'normal' if edge_sign == 1 else 'tee'} for link_dict in link_list: source = link_dict['source'] for target_dict in link_dict['target map']: target = target_dict['target'] src_id = '%s%s' % list(source.items())[0] tgt_id = '%s%s' % list(target.items())[0] graph.add_edge(id_node_dict[src_id], id_node_dict[tgt_id], **attrs) # Add all the edges from the positive and negative influences add_edges(imap_data['wake-up map'], 1) add_edges(imap_data['inhibition map'], -1) return graph
[ "Return networkx graph from Kappy's influence map JSON.\n\n Parameters\n ----------\n im_json : dict\n A JSON dict which contains an influence map generated by Kappy.\n\n Returns\n -------\n graph : networkx.MultiDiGraph\n A graph representing the influence map.\n " ]
Please provide a description of the function:def cm_json_to_graph(im_json): cmap_data = im_json['contact map']['map'] # Initialize the graph graph = AGraph() # In this loop we add sites as nodes and clusters around sites to the # graph. We also collect edges to be added between sites later. edges = [] for node_idx, node in enumerate(cmap_data): sites_in_node = [] for site_idx, site in enumerate(node['node_sites']): # We map the unique ID of the site to its name site_key = (node_idx, site_idx) sites_in_node.append(site_key) graph.add_node(site_key, label=site['site_name'], style='filled', shape='ellipse') # Each port link is an edge from the current site to the # specified site if not site['site_type'] or not site['site_type'][0] == 'port': continue for port_link in site['site_type'][1]['port_links']: edge = (site_key, tuple(port_link)) edges.append(edge) graph.add_subgraph(sites_in_node, name='cluster_%s' % node['node_type'], label=node['node_type']) # Finally we add the edges between the sites for source, target in edges: graph.add_edge(source, target) return graph
[ "Return pygraphviz Agraph from Kappy's contact map JSON.\n\n Parameters\n ----------\n im_json : dict\n A JSON dict which contains a contact map generated by Kappy.\n\n Returns\n -------\n graph : pygraphviz.Agraph\n A graph representing the contact map.\n " ]
Please provide a description of the function:def fetch_email(M, msg_id): res, data = M.fetch(msg_id, '(RFC822)') if res == 'OK': # Data here is a list with 1 element containing a tuple # whose 2nd element is a long string containing the email # The content is a bytes that must be decoded raw_msg_txt = data[0][1] # In Python3, we call message_from_bytes, but this function doesn't # exist in Python 2. try: msg = email.message_from_bytes(raw_msg_txt) except AttributeError: msg = email.message_from_string(raw_msg_txt) # At this point, we have a message containing bytes (not unicode) # fields that will still need to be decoded, ideally according to the # character set specified in the message. return msg else: return None
[ "Returns the given email message as a unicode string." ]
Please provide a description of the function:def get_headers(msg): headers = {} for k in msg.keys(): # decode_header decodes header but does not convert charset, so these # may still be bytes, even in Python 3. However, if it's ASCII # only (hence unambiguous encoding), the header fields come back # as str (unicode) in Python 3. (header_txt, charset) = email.header.decode_header(msg[k])[0] if charset is not None: header_txt = header_txt.decode(charset) headers[k] = header_txt return headers
[ "Takes email.message.Message object initialized from unicode string,\n returns dict with header fields." ]
Please provide a description of the function:def populate_config_dict(config_path): try: config_dict = {} parser = RawConfigParser() parser.optionxform = lambda x: x parser.read(config_path) sections = parser.sections() for section in sections: options = parser.options(section) for option in options: config_dict[option] = str(parser.get(section, option)) except Exception as e: logger.warning("Could not load configuration file due to exception. " "Only environment variable equivalents will be used.") return None for key in config_dict.keys(): if config_dict[key] == '': config_dict[key] = None elif isinstance(config_dict[key], str): config_dict[key] = os.path.expanduser(config_dict[key]) return config_dict
[ "Load the configuration file into the config_file dictionary\n\n A ConfigParser-style configuration file can have multiple sections, but\n we ignore the section distinction and load the key/value pairs from all\n sections into a single key/value list.\n " ]
Please provide a description of the function:def get_config(key, failure_ok=True): err_msg = "Key %s not in environment or config file." % key if key in os.environ: return os.environ[key] elif key in CONFIG_DICT: val = CONFIG_DICT[key] # We interpret an empty value in the config file as a failure if val is None and not failure_ok: msg = 'Key %s is set to an empty value in config file.' % key raise IndraConfigError(msg) else: return val elif not failure_ok: raise IndraConfigError(err_msg) else: logger.warning(err_msg) return None
[ "Get value by key from config file or environment.\n\n Returns the configuration value, first checking the environment\n variables and then, if it's not present there, checking the configuration\n file.\n\n Parameters\n ----------\n key : str\n The key for the configuration value to fetch\n failure_ok : Optional[bool]\n If False and the configuration is missing, an IndraConfigError is\n raised. If True, None is returned and no error is raised in case\n of a missing configuration. Default: True\n\n Returns\n -------\n value : str or None\n The configuration value or None if the configuration value doesn't\n exist and failure_ok is set to True.\n " ]
Please provide a description of the function:def read_unicode_csv_fileobj(fileobj, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL, lineterminator='\n', encoding='utf-8', skiprows=0): # Python 3 version if sys.version_info[0] >= 3: # Next, get the csv reader, with unicode delimiter and quotechar csv_reader = csv.reader(fileobj, delimiter=delimiter, quotechar=quotechar, quoting=quoting, lineterminator=lineterminator) # Now, return the (already decoded) unicode csv_reader generator # Skip rows if necessary for skip_ix in range(skiprows): next(csv_reader) for row in csv_reader: yield row # Python 2 version else: # Next, get the csv reader, passing delimiter and quotechar as # bytestrings rather than unicode csv_reader = csv.reader(fileobj, delimiter=delimiter.encode(encoding), quotechar=quotechar.encode(encoding), quoting=quoting, lineterminator=lineterminator) # Iterate over the file and decode each string into unicode # Skip rows if necessary for skip_ix in range(skiprows): next(csv_reader) for row in csv_reader: yield [cell.decode(encoding) for cell in row]
[ "fileobj can be a StringIO in Py3, but should be a BytesIO in Py2." ]
Please provide a description of the function:def fast_deepcopy(obj): with BytesIO() as buf: pickle.dump(obj, buf) buf.seek(0) obj_new = pickle.load(buf) return obj_new
[ "This is a faster implementation of deepcopy via pickle.\n\n It is meant primarily for sets of Statements with complex hierarchies\n but can be used for any object.\n " ]
Please provide a description of the function:def flatten(l): return sum(map(flatten, l), []) \ if isinstance(l, list) or isinstance(l, tuple) else [l]
[ "Flatten a nested list." ]
Please provide a description of the function:def batch_iter(iterator, batch_size, return_func=None, padding=None): for batch in zip_longest(*[iter(iterator)]*batch_size, fillvalue=padding): gen = (thing for thing in batch if thing is not padding) if return_func is None: yield gen else: yield return_func(gen)
[ "Break an iterable into batches of size batch_size\n\n Note that `padding` should be set to something (anything) which is NOT a\n valid member of the iterator. For example, None works for [0,1,2,...10], but\n not for ['a', None, 'c', 'd'].\n\n Parameters\n ----------\n iterator : iterable\n A python object which is iterable.\n batch_size : int\n The size of batches you wish to produce from the iterator.\n return_func : executable or None\n Pass a function that takes a generator and returns an iterable (e.g.\n `list` or `set`). If None, a generator will be returned.\n padding : anything\n This is used internally to ensure that the remainder of the list is\n included. This MUST NOT be a valid element of the iterator.\n\n Returns\n -------\n An iterator over lists or generators, depending on `return_lists`.\n " ]
Please provide a description of the function:def read_pmid_sentences(pmid_sentences, **drum_args): def _set_pmid(statements, pmid): for stmt in statements: for evidence in stmt.evidence: evidence.pmid = pmid # See if we need to start DRUM as a subprocess run_drum = drum_args.get('run_drum', False) drum_process = None all_statements = {} # Iterate over all the keys and sentences to read for pmid, sentences in pmid_sentences.items(): logger.info('================================') logger.info('Processing %d sentences for %s' % (len(sentences), pmid)) ts = time.time() # Make a DrumReader instance drum_args['name'] = 'DrumReader%s' % pmid dr = DrumReader(**drum_args) time.sleep(3) # If there is no DRUM process set yet, we get the one that was # just started by the DrumReader if run_drum and drum_process is None: drum_args.pop('run_drum', None) drum_process = dr.drum_system # By setting this, we ensuer that the reference to the # process is passed in to all future DrumReaders drum_args['drum_system'] = drum_process # Now read each sentence for this key for sentence in sentences: dr.read_text(sentence) # Start receiving results and exit when done try: dr.start() except SystemExit: pass statements = [] # Process all the extractions into INDRA Statements for extraction in dr.extractions: # Sometimes we get nothing back if not extraction: continue tp = process_xml(extraction) statements += tp.statements # Set the PMIDs for the evidences of the Statements _set_pmid(statements, pmid) te = time.time() logger.info('Reading took %d seconds and produced %d Statements.' % (te-ts, len(statements))) all_statements[pmid] = statements # If we were running a DRUM process, we should kill it if drum_process and dr.drum_system: dr._kill_drum() return all_statements
[ "Read sentences from a PMID-keyed dictonary and return all Statements\n\n Parameters\n ----------\n pmid_sentences : dict[str, list[str]]\n A dictonary where each key is a PMID pointing to a list of sentences\n to be read.\n\n **drum_args\n Keyword arguments passed directly to the DrumReader. Typical\n things to specify are `host` and `port`. If `run_drum` is specified\n as True, this process will internally run the DRUM reading system\n as a subprocess. Otherwise, DRUM is expected to be running\n independently.\n\n Returns\n -------\n all_statements : list[indra.statement.Statement]\n A list of INDRA Statements resulting from the reading\n " ]
Please provide a description of the function:def graph_query(kind, source, target=None, neighbor_limit=1, database_filter=None): default_databases = ['wp', 'smpdb', 'reconx', 'reactome', 'psp', 'pid', 'panther', 'netpath', 'msigdb', 'mirtarbase', 'kegg', 'intact', 'inoh', 'humancyc', 'hprd', 'drugbank', 'dip', 'corum'] if not database_filter: query_databases = default_databases else: query_databases = database_filter # excluded: ctd params = {} params['format'] = 'BIOPAX' params['organism'] = '9606' params['datasource'] = query_databases # Get the "kind" string kind_str = kind.lower() if kind not in ['neighborhood', 'pathsbetween', 'pathsfromto']: logger.warn('Invalid query type %s' % kind_str) return None params['kind'] = kind_str # Get the source string if isinstance(source, basestring): source_str = source else: source_str = ','.join(source) params['source'] = source_str try: neighbor_limit = int(neighbor_limit) params['limit'] = neighbor_limit except (TypeError, ValueError): logger.warn('Invalid neighborhood limit %s' % neighbor_limit) return None if target is not None: if isinstance(target, basestring): target_str = target else: target_str = ','.join(target) params['target'] = target_str logger.info('Sending Pathway Commons query with parameters: ') for k, v in params.items(): logger.info(' %s: %s' % (k, v)) logger.info('Sending Pathway Commons query...') res = requests.get(pc2_url + 'graph', params=params) if not res.status_code == 200: logger.error('Response is HTTP code %d.' % res.status_code) if res.status_code == 500: logger.error('Note: HTTP code 500 can mean empty ' 'results for a valid query.') return None # We don't decode to Unicode here because owl_str_to_model expects # a byte stream model = owl_str_to_model(res.content) if model is not None: logger.info('Pathway Commons query returned a model...') return model
[ "Perform a graph query on PathwayCommons.\n\n For more information on these queries, see\n http://www.pathwaycommons.org/pc2/#graph\n\n Parameters\n ----------\n kind : str\n The kind of graph query to perform. Currently 3 options are\n implemented, 'neighborhood', 'pathsbetween' and 'pathsfromto'.\n source : list[str]\n A list of gene names which are the source set for the graph query.\n target : Optional[list[str]]\n A list of gene names which are the target set for the graph query.\n Only needed for 'pathsfromto' queries.\n neighbor_limit : Optional[int]\n This limits the length of the longest path considered in\n the graph query. Default: 1\n\n Returns\n -------\n model : org.biopax.paxtools.model.Model\n A BioPAX model (java object).\n " ]
Please provide a description of the function:def owl_str_to_model(owl_str): io_class = autoclass('org.biopax.paxtools.io.SimpleIOHandler') io = io_class(autoclass('org.biopax.paxtools.model.BioPAXLevel').L3) bais = autoclass('java.io.ByteArrayInputStream') scs = autoclass('java.nio.charset.StandardCharsets') jstr = autoclass('java.lang.String') istream = bais(owl_str) biopax_model = io.convertFromOWL(istream) return biopax_model
[ "Return a BioPAX model object from an OWL string.\n\n Parameters\n ----------\n owl_str : str\n The model as an OWL string.\n\n Returns\n -------\n biopax_model : org.biopax.paxtools.model.Model\n A BioPAX model object (java object).\n " ]
Please provide a description of the function:def owl_to_model(fname): io_class = autoclass('org.biopax.paxtools.io.SimpleIOHandler') io = io_class(autoclass('org.biopax.paxtools.model.BioPAXLevel').L3) try: file_is = autoclass('java.io.FileInputStream')(fname) except JavaException: logger.error('Could not open data file %s' % fname) return try: biopax_model = io.convertFromOWL(file_is) except JavaException as e: logger.error('Could not convert data file %s to BioPax model' % fname) logger.error(e) return file_is.close() return biopax_model
[ "Return a BioPAX model object from an OWL file.\n\n Parameters\n ----------\n fname : str\n The name of the OWL file containing the model.\n\n Returns\n -------\n biopax_model : org.biopax.paxtools.model.Model\n A BioPAX model object (java object).\n " ]
Please provide a description of the function:def model_to_owl(model, fname): io_class = autoclass('org.biopax.paxtools.io.SimpleIOHandler') io = io_class(autoclass('org.biopax.paxtools.model.BioPAXLevel').L3) try: fileOS = autoclass('java.io.FileOutputStream')(fname) except JavaException: logger.error('Could not open data file %s' % fname) return l3_factory = autoclass('org.biopax.paxtools.model.BioPAXLevel').L3.getDefaultFactory() model_out = l3_factory.createModel() for r in model.getObjects().toArray(): model_out.add(r) io.convertToOWL(model_out, fileOS) fileOS.close()
[ "Save a BioPAX model object as an OWL file.\n\n Parameters\n ----------\n model : org.biopax.paxtools.model.Model\n A BioPAX model object (java object).\n fname : str\n The name of the OWL file to save the model in.\n " ]
Please provide a description of the function:def make_model(self, *args, **kwargs): for stmt in self.statements: if isinstance(stmt, RegulateActivity): self._add_regulate_activity(stmt) elif isinstance(stmt, RegulateAmount): self._add_regulate_amount(stmt) elif isinstance(stmt, Modification): self._add_modification(stmt) elif isinstance(stmt, SelfModification): self._add_selfmodification(stmt) elif isinstance(stmt, Gef): self._add_gef(stmt) elif isinstance(stmt, Gap): self._add_gap(stmt) elif isinstance(stmt, Complex): self._add_complex(stmt) else: logger.warning('Unhandled statement type: %s' % stmt.__class__.__name__) if kwargs.get('grouping'): self._group_nodes() self._group_edges() return self.print_cyjs_graph()
[ "Assemble a Cytoscape JS network from INDRA Statements.\n\n This method assembles a Cytoscape JS network from the set of INDRA\n Statements added to the assembler.\n\n Parameters\n ----------\n grouping : bool\n If True, the nodes with identical incoming and outgoing edges\n are grouped and the corresponding edges are merged.\n\n Returns\n -------\n cyjs_str : str\n The json serialized Cytoscape JS model.\n " ]
Please provide a description of the function:def get_gene_names(self): # Collect all gene names in network gene_names = [] for node in self._nodes: members = node['data'].get('members') if members: gene_names += list(members.keys()) else: if node['data']['name'].startswith('Group'): continue gene_names.append(node['data']['name']) self._gene_names = gene_names
[ "Gather gene names of all nodes and node members" ]
Please provide a description of the function:def set_CCLE_context(self, cell_types): self.get_gene_names() # Get expression and mutations from context client exp_values = \ context_client.get_protein_expression(self._gene_names, cell_types) mut_values = \ context_client.get_mutations(self._gene_names, cell_types) # Make a dict of presence/absence of mutations muts = {cell_line: {} for cell_line in cell_types} for cell_line, entries in mut_values.items(): if entries is not None: for gene, mutations in entries.items(): if mutations: muts[cell_line][gene] = 1 else: muts[cell_line][gene] = 0 # Create bins for the exp values # because colorbrewer only does 3-9 bins and I don't feel like # reinventing color scheme theory, this will only bin 3-9 bins def bin_exp(expression_dict): d = expression_dict exp_values = [] for line in d: for gene in d[line]: val = d[line][gene] if val is not None: exp_values.append(val) thr_dict = {} for n_bins in range(3, 10): bin_thr = np.histogram(np.log10(exp_values), n_bins)[1][1:] thr_dict[n_bins] = bin_thr # this dict isn't yet binned, that happens in the loop binned_dict = {x: deepcopy(expression_dict) for x in range(3, 10)} for n_bins in binned_dict: for line in binned_dict[n_bins]: for gene in binned_dict[n_bins][line]: # last bin is reserved for None if binned_dict[n_bins][line][gene] is None: binned_dict[n_bins][line][gene] = n_bins else: val = np.log10(binned_dict[n_bins][line][gene]) for thr_idx, thr in enumerate(thr_dict[n_bins]): if val <= thr: binned_dict[n_bins][line][gene] = thr_idx break return binned_dict binned_exp = bin_exp(exp_values) context = {'bin_expression': binned_exp, 'mutation': muts} self._context['CCLE'] = context
[ "Set context of all nodes and node members from CCLE." ]
Please provide a description of the function:def print_cyjs_graph(self): cyjs_dict = {'edges': self._edges, 'nodes': self._nodes} cyjs_str = json.dumps(cyjs_dict, indent=1, sort_keys=True) return cyjs_str
[ "Return the assembled Cytoscape JS network as a json string.\n\n Returns\n -------\n cyjs_str : str\n A json string representation of the Cytoscape JS network.\n " ]
Please provide a description of the function:def print_cyjs_context(self): context = self._context context_str = json.dumps(context, indent=1, sort_keys=True) return context_str
[ "Return a list of node names and their respective context.\n\n Returns\n -------\n cyjs_str_context : str\n A json string of the context dictionary. e.g. -\n {'CCLE' : {'bin_expression' : {'cell_line1' : {'gene1':'val1'} },\n 'bin_expression' : {'cell_line' : {'gene1':'val1'} }\n }}\n " ]
Please provide a description of the function:def save_json(self, fname_prefix='model'): cyjs_str = self.print_cyjs_graph() # outputs the graph with open(fname_prefix + '.json', 'wb') as fh: fh.write(cyjs_str.encode('utf-8')) # outputs the context of graph nodes context_str = self.print_cyjs_context() with open(fname_prefix + '_context.json', 'wb') as fh: fh.write(context_str.encode('utf-8'))
[ "Save the assembled Cytoscape JS network in a json file.\n\n This method saves two files based on the file name prefix given.\n It saves one json file with the graph itself, and another json\n file with the context.\n\n Parameters\n ----------\n fname_prefix : Optional[str]\n The prefix of the files to save the Cytoscape JS network and\n context to.\n Default: model\n " ]
Please provide a description of the function:def save_model(self, fname='model.js'): exp_colorscale_str = json.dumps(self._exp_colorscale) mut_colorscale_str = json.dumps(self._mut_colorscale) cyjs_dict = {'edges': self._edges, 'nodes': self._nodes} model_str = json.dumps(cyjs_dict, indent=1, sort_keys=True) model_dict = {'exp_colorscale_str': exp_colorscale_str, 'mut_colorscale_str': mut_colorscale_str, 'model_elements_str': model_str} s = '' s += 'var exp_colorscale = %s;\n' % model_dict['exp_colorscale_str'] s += 'var mut_colorscale = %s;\n' % model_dict['mut_colorscale_str'] s += 'var model_elements = %s;\n' % model_dict['model_elements_str'] with open(fname, 'wb') as fh: fh.write(s.encode('utf-8'))
[ "Save the assembled Cytoscape JS network in a js file.\n\n Parameters\n ----------\n file_name : Optional[str]\n The name of the file to save the Cytoscape JS network to.\n Default: model.js\n " ]
Please provide a description of the function:def _get_edge_dict(self): edge_dict = collections.defaultdict(lambda: []) if len(self._edges) > 0: for e in self._edges: data = e['data'] key = tuple([data['i'], data['source'], data['target'], data['polarity']]) edge_dict[key] = data['id'] return edge_dict
[ "Return a dict of edges.\n\n Keyed tuples of (i, source, target, polarity)\n with lists of edge ids [id1, id2, ...]\n " ]
Please provide a description of the function:def _get_node_key(self, node_dict_item): s = tuple(sorted(node_dict_item['sources'])) t = tuple(sorted(node_dict_item['targets'])) return (s, t)
[ "Return a tuple of sorted sources and targets given a node dict." ]
Please provide a description of the function:def _get_node_groups(self): node_dict = {node['data']['id']: {'sources': [], 'targets': []} for node in self._nodes} for edge in self._edges: # Add edge as a source for its target node edge_data = (edge['data']['i'], edge['data']['polarity'], edge['data']['source']) node_dict[edge['data']['target']]['sources'].append(edge_data) # Add edge as target for its source node edge_data = (edge['data']['i'], edge['data']['polarity'], edge['data']['target']) node_dict[edge['data']['source']]['targets'].append(edge_data) # Make a dictionary of nodes based on source/target as a key node_key_dict = collections.defaultdict(lambda: []) for node_id, node_d in node_dict.items(): key = self._get_node_key(node_d) node_key_dict[key].append(node_id) # Constrain the groups to ones that have more than 1 member node_groups = [g for g in node_key_dict.values() if (len(g) > 1)] return node_groups
[ "Return a list of node id lists that are topologically identical.\n\n First construct a node_dict which is keyed to the node id and\n has a value which is a dict with keys 'sources' and 'targets'.\n The 'sources' and 'targets' each contain a list of tuples\n (i, polarity, source) edge of the node. node_dict is then processed\n by _get_node_key() which returns a tuple of (s,t) where s,t are\n sorted tuples of the ids for the source and target nodes. (s,t) is\n then used as a key in node_key_dict where the values are the node\n ids. node_groups is restricted to groups greater than 1 node.\n " ]
Please provide a description of the function:def _group_edges(self): # edit edges on parent nodes and make new edges for them edges_to_add = [[], []] # [group_edges, uuid_lists] for e in self._edges: new_edge = deepcopy(e) new_edge['data'].pop('id', None) uuid_list = new_edge['data'].pop('uuid_list', []) # Check if edge source or target are contained in a parent # If source or target in parent edit edge # Nodes may only point within their container source = e['data']['source'] target = e['data']['target'] source_node = [x for x in self._nodes if x['data']['id'] == source][0] target_node = [x for x in self._nodes if x['data']['id'] == target][0] # If the source node is in a group, we change the source of this # edge to the group if source_node['data']['parent'] != '': new_edge['data']['source'] = source_node['data']['parent'] e['data']['i'] = 'Virtual' # If the targete node is in a group, we change the target of this # edge to the group if target_node['data']['parent'] != '': new_edge['data']['target'] = target_node['data']['parent'] e['data']['i'] = 'Virtual' if e['data']['i'] == 'Virtual': if new_edge not in edges_to_add[0]: edges_to_add[0].append(new_edge) edges_to_add[1].append(uuid_list) else: idx = edges_to_add[0].index(new_edge) edges_to_add[1][idx] += uuid_list edges_to_add[1][idx] = list(set(edges_to_add[1][idx])) for ze in zip(*edges_to_add): edge = ze[0] edge['data']['id'] = self._get_new_id() edge['data']['uuid_list'] = ze[1] self._edges.append(edge)
[ "Group all edges that are topologically identical.\n\n This means that (i, source, target, polarity) are the same, then sets\n edges on parent (i.e. - group) nodes to 'Virtual' and creates a new\n edge to represent all of them.\n " ]