repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
sorgerlab/indra | indra/mechlinker/__init__.py | MechLinker.gather_implicit_activities | def gather_implicit_activities(self):
"""Aggregate all implicit activities and active forms of Agents.
Iterate over self.statements and collect the implied activities
and active forms of Agents that appear in the Statements.
Note that using this function to collect implied Agent activities can
be risky. Assume, for instance, that a Statement from a reading
system states that EGF bound to EGFR phosphorylates ERK. This would
be interpreted as implicit evidence for the EGFR-bound form of EGF
to have 'kinase' activity, which is clearly incorrect.
In contrast the alternative pair of this function:
gather_explicit_activities collects only explicitly stated activities.
"""
for stmt in self.statements:
if isinstance(stmt, Phosphorylation) or \
isinstance(stmt, Transphosphorylation) or \
isinstance(stmt, Autophosphorylation):
if stmt.enz is not None:
enz_base = self._get_base(stmt.enz)
enz_base.add_activity('kinase')
enz_base.add_active_state('kinase', stmt.enz.mods)
elif isinstance(stmt, Dephosphorylation):
if stmt.enz is not None:
enz_base = self._get_base(stmt.enz)
enz_base.add_activity('phosphatase')
enz_base.add_active_state('phosphatase', stmt.enz.mods)
elif isinstance(stmt, Modification):
if stmt.enz is not None:
enz_base = self._get_base(stmt.enz)
enz_base.add_activity('catalytic')
enz_base.add_active_state('catalytic', stmt.enz.mods)
elif isinstance(stmt, SelfModification):
if stmt.enz is not None:
enz_base = self._get_base(stmt.enz)
enz_base.add_activity('catalytic')
enz_base.add_active_state('catalytic', stmt.enz.mods)
elif isinstance(stmt, Gef):
if stmt.gef is not None:
gef_base = self._get_base(stmt.gef)
gef_base.add_activity('gef')
if stmt.gef.activity is not None:
act = stmt.gef.activity.activity_type
else:
act = 'activity'
gef_base.add_active_state(act, stmt.gef.mods)
elif isinstance(stmt, Gap):
if stmt.gap is not None:
gap_base = self._get_base(stmt.gap)
gap_base.add_activity('gap')
if stmt.gap.activity is not None:
act = stmt.gap.activity.activity_type
else:
act = 'activity'
gap_base.add_active_state('act', stmt.gap.mods)
elif isinstance(stmt, RegulateActivity):
if stmt.subj is not None:
subj_base = self._get_base(stmt.subj)
subj_base.add_activity(stmt.j) | python | def gather_implicit_activities(self):
"""Aggregate all implicit activities and active forms of Agents.
Iterate over self.statements and collect the implied activities
and active forms of Agents that appear in the Statements.
Note that using this function to collect implied Agent activities can
be risky. Assume, for instance, that a Statement from a reading
system states that EGF bound to EGFR phosphorylates ERK. This would
be interpreted as implicit evidence for the EGFR-bound form of EGF
to have 'kinase' activity, which is clearly incorrect.
In contrast the alternative pair of this function:
gather_explicit_activities collects only explicitly stated activities.
"""
for stmt in self.statements:
if isinstance(stmt, Phosphorylation) or \
isinstance(stmt, Transphosphorylation) or \
isinstance(stmt, Autophosphorylation):
if stmt.enz is not None:
enz_base = self._get_base(stmt.enz)
enz_base.add_activity('kinase')
enz_base.add_active_state('kinase', stmt.enz.mods)
elif isinstance(stmt, Dephosphorylation):
if stmt.enz is not None:
enz_base = self._get_base(stmt.enz)
enz_base.add_activity('phosphatase')
enz_base.add_active_state('phosphatase', stmt.enz.mods)
elif isinstance(stmt, Modification):
if stmt.enz is not None:
enz_base = self._get_base(stmt.enz)
enz_base.add_activity('catalytic')
enz_base.add_active_state('catalytic', stmt.enz.mods)
elif isinstance(stmt, SelfModification):
if stmt.enz is not None:
enz_base = self._get_base(stmt.enz)
enz_base.add_activity('catalytic')
enz_base.add_active_state('catalytic', stmt.enz.mods)
elif isinstance(stmt, Gef):
if stmt.gef is not None:
gef_base = self._get_base(stmt.gef)
gef_base.add_activity('gef')
if stmt.gef.activity is not None:
act = stmt.gef.activity.activity_type
else:
act = 'activity'
gef_base.add_active_state(act, stmt.gef.mods)
elif isinstance(stmt, Gap):
if stmt.gap is not None:
gap_base = self._get_base(stmt.gap)
gap_base.add_activity('gap')
if stmt.gap.activity is not None:
act = stmt.gap.activity.activity_type
else:
act = 'activity'
gap_base.add_active_state('act', stmt.gap.mods)
elif isinstance(stmt, RegulateActivity):
if stmt.subj is not None:
subj_base = self._get_base(stmt.subj)
subj_base.add_activity(stmt.j) | [
"def",
"gather_implicit_activities",
"(",
"self",
")",
":",
"for",
"stmt",
"in",
"self",
".",
"statements",
":",
"if",
"isinstance",
"(",
"stmt",
",",
"Phosphorylation",
")",
"or",
"isinstance",
"(",
"stmt",
",",
"Transphosphorylation",
")",
"or",
"isinstance",
"(",
"stmt",
",",
"Autophosphorylation",
")",
":",
"if",
"stmt",
".",
"enz",
"is",
"not",
"None",
":",
"enz_base",
"=",
"self",
".",
"_get_base",
"(",
"stmt",
".",
"enz",
")",
"enz_base",
".",
"add_activity",
"(",
"'kinase'",
")",
"enz_base",
".",
"add_active_state",
"(",
"'kinase'",
",",
"stmt",
".",
"enz",
".",
"mods",
")",
"elif",
"isinstance",
"(",
"stmt",
",",
"Dephosphorylation",
")",
":",
"if",
"stmt",
".",
"enz",
"is",
"not",
"None",
":",
"enz_base",
"=",
"self",
".",
"_get_base",
"(",
"stmt",
".",
"enz",
")",
"enz_base",
".",
"add_activity",
"(",
"'phosphatase'",
")",
"enz_base",
".",
"add_active_state",
"(",
"'phosphatase'",
",",
"stmt",
".",
"enz",
".",
"mods",
")",
"elif",
"isinstance",
"(",
"stmt",
",",
"Modification",
")",
":",
"if",
"stmt",
".",
"enz",
"is",
"not",
"None",
":",
"enz_base",
"=",
"self",
".",
"_get_base",
"(",
"stmt",
".",
"enz",
")",
"enz_base",
".",
"add_activity",
"(",
"'catalytic'",
")",
"enz_base",
".",
"add_active_state",
"(",
"'catalytic'",
",",
"stmt",
".",
"enz",
".",
"mods",
")",
"elif",
"isinstance",
"(",
"stmt",
",",
"SelfModification",
")",
":",
"if",
"stmt",
".",
"enz",
"is",
"not",
"None",
":",
"enz_base",
"=",
"self",
".",
"_get_base",
"(",
"stmt",
".",
"enz",
")",
"enz_base",
".",
"add_activity",
"(",
"'catalytic'",
")",
"enz_base",
".",
"add_active_state",
"(",
"'catalytic'",
",",
"stmt",
".",
"enz",
".",
"mods",
")",
"elif",
"isinstance",
"(",
"stmt",
",",
"Gef",
")",
":",
"if",
"stmt",
".",
"gef",
"is",
"not",
"None",
":",
"gef_base",
"=",
"self",
".",
"_get_base",
"(",
"stmt",
".",
"gef",
")",
"gef_base",
".",
"add_activity",
"(",
"'gef'",
")",
"if",
"stmt",
".",
"gef",
".",
"activity",
"is",
"not",
"None",
":",
"act",
"=",
"stmt",
".",
"gef",
".",
"activity",
".",
"activity_type",
"else",
":",
"act",
"=",
"'activity'",
"gef_base",
".",
"add_active_state",
"(",
"act",
",",
"stmt",
".",
"gef",
".",
"mods",
")",
"elif",
"isinstance",
"(",
"stmt",
",",
"Gap",
")",
":",
"if",
"stmt",
".",
"gap",
"is",
"not",
"None",
":",
"gap_base",
"=",
"self",
".",
"_get_base",
"(",
"stmt",
".",
"gap",
")",
"gap_base",
".",
"add_activity",
"(",
"'gap'",
")",
"if",
"stmt",
".",
"gap",
".",
"activity",
"is",
"not",
"None",
":",
"act",
"=",
"stmt",
".",
"gap",
".",
"activity",
".",
"activity_type",
"else",
":",
"act",
"=",
"'activity'",
"gap_base",
".",
"add_active_state",
"(",
"'act'",
",",
"stmt",
".",
"gap",
".",
"mods",
")",
"elif",
"isinstance",
"(",
"stmt",
",",
"RegulateActivity",
")",
":",
"if",
"stmt",
".",
"subj",
"is",
"not",
"None",
":",
"subj_base",
"=",
"self",
".",
"_get_base",
"(",
"stmt",
".",
"subj",
")",
"subj_base",
".",
"add_activity",
"(",
"stmt",
".",
"j",
")"
]
| Aggregate all implicit activities and active forms of Agents.
Iterate over self.statements and collect the implied activities
and active forms of Agents that appear in the Statements.
Note that using this function to collect implied Agent activities can
be risky. Assume, for instance, that a Statement from a reading
system states that EGF bound to EGFR phosphorylates ERK. This would
be interpreted as implicit evidence for the EGFR-bound form of EGF
to have 'kinase' activity, which is clearly incorrect.
In contrast the alternative pair of this function:
gather_explicit_activities collects only explicitly stated activities. | [
"Aggregate",
"all",
"implicit",
"activities",
"and",
"active",
"forms",
"of",
"Agents",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/mechlinker/__init__.py#L68-L127 | train |
sorgerlab/indra | indra/mechlinker/__init__.py | MechLinker.require_active_forms | def require_active_forms(self):
"""Rewrites Statements with Agents' active forms in active positions.
As an example, the enzyme in a Modification Statement can be expected
to be in an active state. Similarly, subjects of RegulateAmount and
RegulateActivity Statements can be expected to be in an active form.
This function takes the collected active states of Agents in their
corresponding BaseAgents and then rewrites other Statements to apply
the active Agent states to them.
Returns
-------
new_stmts : list[indra.statements.Statement]
A list of Statements which includes the newly rewritten Statements.
This list is also set as the internal Statement list of the
MechLinker.
"""
logger.info('Setting required active forms on %d statements...' %
len(self.statements))
new_stmts = []
for stmt in self.statements:
if isinstance(stmt, Modification):
if stmt.enz is None:
new_stmts.append(stmt)
continue
enz_base = self._get_base(stmt.enz)
active_forms = enz_base.get_active_forms()
if not active_forms:
new_stmts.append(stmt)
else:
for af in active_forms:
new_stmt = fast_deepcopy(stmt)
new_stmt.uuid = str(uuid.uuid4())
evs = af.apply_to(new_stmt.enz)
new_stmt.partial_evidence = evs
new_stmts.append(new_stmt)
elif isinstance(stmt, RegulateAmount) or \
isinstance(stmt, RegulateActivity):
if stmt.subj is None:
new_stmts.append(stmt)
continue
subj_base = self._get_base(stmt.subj)
active_forms = subj_base.get_active_forms()
if not active_forms:
new_stmts.append(stmt)
else:
for af in active_forms:
new_stmt = fast_deepcopy(stmt)
new_stmt.uuid = str(uuid.uuid4())
evs = af.apply_to(new_stmt.subj)
new_stmt.partial_evidence = evs
new_stmts.append(new_stmt)
else:
new_stmts.append(stmt)
self.statements = new_stmts
return new_stmts | python | def require_active_forms(self):
"""Rewrites Statements with Agents' active forms in active positions.
As an example, the enzyme in a Modification Statement can be expected
to be in an active state. Similarly, subjects of RegulateAmount and
RegulateActivity Statements can be expected to be in an active form.
This function takes the collected active states of Agents in their
corresponding BaseAgents and then rewrites other Statements to apply
the active Agent states to them.
Returns
-------
new_stmts : list[indra.statements.Statement]
A list of Statements which includes the newly rewritten Statements.
This list is also set as the internal Statement list of the
MechLinker.
"""
logger.info('Setting required active forms on %d statements...' %
len(self.statements))
new_stmts = []
for stmt in self.statements:
if isinstance(stmt, Modification):
if stmt.enz is None:
new_stmts.append(stmt)
continue
enz_base = self._get_base(stmt.enz)
active_forms = enz_base.get_active_forms()
if not active_forms:
new_stmts.append(stmt)
else:
for af in active_forms:
new_stmt = fast_deepcopy(stmt)
new_stmt.uuid = str(uuid.uuid4())
evs = af.apply_to(new_stmt.enz)
new_stmt.partial_evidence = evs
new_stmts.append(new_stmt)
elif isinstance(stmt, RegulateAmount) or \
isinstance(stmt, RegulateActivity):
if stmt.subj is None:
new_stmts.append(stmt)
continue
subj_base = self._get_base(stmt.subj)
active_forms = subj_base.get_active_forms()
if not active_forms:
new_stmts.append(stmt)
else:
for af in active_forms:
new_stmt = fast_deepcopy(stmt)
new_stmt.uuid = str(uuid.uuid4())
evs = af.apply_to(new_stmt.subj)
new_stmt.partial_evidence = evs
new_stmts.append(new_stmt)
else:
new_stmts.append(stmt)
self.statements = new_stmts
return new_stmts | [
"def",
"require_active_forms",
"(",
"self",
")",
":",
"logger",
".",
"info",
"(",
"'Setting required active forms on %d statements...'",
"%",
"len",
"(",
"self",
".",
"statements",
")",
")",
"new_stmts",
"=",
"[",
"]",
"for",
"stmt",
"in",
"self",
".",
"statements",
":",
"if",
"isinstance",
"(",
"stmt",
",",
"Modification",
")",
":",
"if",
"stmt",
".",
"enz",
"is",
"None",
":",
"new_stmts",
".",
"append",
"(",
"stmt",
")",
"continue",
"enz_base",
"=",
"self",
".",
"_get_base",
"(",
"stmt",
".",
"enz",
")",
"active_forms",
"=",
"enz_base",
".",
"get_active_forms",
"(",
")",
"if",
"not",
"active_forms",
":",
"new_stmts",
".",
"append",
"(",
"stmt",
")",
"else",
":",
"for",
"af",
"in",
"active_forms",
":",
"new_stmt",
"=",
"fast_deepcopy",
"(",
"stmt",
")",
"new_stmt",
".",
"uuid",
"=",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"evs",
"=",
"af",
".",
"apply_to",
"(",
"new_stmt",
".",
"enz",
")",
"new_stmt",
".",
"partial_evidence",
"=",
"evs",
"new_stmts",
".",
"append",
"(",
"new_stmt",
")",
"elif",
"isinstance",
"(",
"stmt",
",",
"RegulateAmount",
")",
"or",
"isinstance",
"(",
"stmt",
",",
"RegulateActivity",
")",
":",
"if",
"stmt",
".",
"subj",
"is",
"None",
":",
"new_stmts",
".",
"append",
"(",
"stmt",
")",
"continue",
"subj_base",
"=",
"self",
".",
"_get_base",
"(",
"stmt",
".",
"subj",
")",
"active_forms",
"=",
"subj_base",
".",
"get_active_forms",
"(",
")",
"if",
"not",
"active_forms",
":",
"new_stmts",
".",
"append",
"(",
"stmt",
")",
"else",
":",
"for",
"af",
"in",
"active_forms",
":",
"new_stmt",
"=",
"fast_deepcopy",
"(",
"stmt",
")",
"new_stmt",
".",
"uuid",
"=",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"evs",
"=",
"af",
".",
"apply_to",
"(",
"new_stmt",
".",
"subj",
")",
"new_stmt",
".",
"partial_evidence",
"=",
"evs",
"new_stmts",
".",
"append",
"(",
"new_stmt",
")",
"else",
":",
"new_stmts",
".",
"append",
"(",
"stmt",
")",
"self",
".",
"statements",
"=",
"new_stmts",
"return",
"new_stmts"
]
| Rewrites Statements with Agents' active forms in active positions.
As an example, the enzyme in a Modification Statement can be expected
to be in an active state. Similarly, subjects of RegulateAmount and
RegulateActivity Statements can be expected to be in an active form.
This function takes the collected active states of Agents in their
corresponding BaseAgents and then rewrites other Statements to apply
the active Agent states to them.
Returns
-------
new_stmts : list[indra.statements.Statement]
A list of Statements which includes the newly rewritten Statements.
This list is also set as the internal Statement list of the
MechLinker. | [
"Rewrites",
"Statements",
"with",
"Agents",
"active",
"forms",
"in",
"active",
"positions",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/mechlinker/__init__.py#L166-L221 | train |
sorgerlab/indra | indra/mechlinker/__init__.py | MechLinker.reduce_activities | def reduce_activities(self):
"""Rewrite the activity types referenced in Statements for consistency.
Activity types are reduced to the most specific form whenever possible.
For instance, if 'kinase' is the only specific activity type known
for the BaseAgent of BRAF, its generic 'activity' forms are rewritten
to 'kinase'.
"""
for stmt in self.statements:
agents = stmt.agent_list()
for agent in agents:
if agent is not None and agent.activity is not None:
agent_base = self._get_base(agent)
act_red = agent_base.get_activity_reduction(
agent.activity.activity_type)
if act_red is not None:
agent.activity.activity_type = act_red
if isinstance(stmt, RegulateActivity):
if stmt.obj is not None:
obj_base = self._get_base(stmt.obj)
act_red = \
obj_base.get_activity_reduction(stmt.obj_activity)
if act_red is not None:
stmt.obj_activity = act_red
elif isinstance(stmt, ActiveForm):
agent_base = self._get_base(stmt.agent)
act_red = agent_base.get_activity_reduction(stmt.activity)
if act_red is not None:
stmt.activity = act_red | python | def reduce_activities(self):
"""Rewrite the activity types referenced in Statements for consistency.
Activity types are reduced to the most specific form whenever possible.
For instance, if 'kinase' is the only specific activity type known
for the BaseAgent of BRAF, its generic 'activity' forms are rewritten
to 'kinase'.
"""
for stmt in self.statements:
agents = stmt.agent_list()
for agent in agents:
if agent is not None and agent.activity is not None:
agent_base = self._get_base(agent)
act_red = agent_base.get_activity_reduction(
agent.activity.activity_type)
if act_red is not None:
agent.activity.activity_type = act_red
if isinstance(stmt, RegulateActivity):
if stmt.obj is not None:
obj_base = self._get_base(stmt.obj)
act_red = \
obj_base.get_activity_reduction(stmt.obj_activity)
if act_red is not None:
stmt.obj_activity = act_red
elif isinstance(stmt, ActiveForm):
agent_base = self._get_base(stmt.agent)
act_red = agent_base.get_activity_reduction(stmt.activity)
if act_red is not None:
stmt.activity = act_red | [
"def",
"reduce_activities",
"(",
"self",
")",
":",
"for",
"stmt",
"in",
"self",
".",
"statements",
":",
"agents",
"=",
"stmt",
".",
"agent_list",
"(",
")",
"for",
"agent",
"in",
"agents",
":",
"if",
"agent",
"is",
"not",
"None",
"and",
"agent",
".",
"activity",
"is",
"not",
"None",
":",
"agent_base",
"=",
"self",
".",
"_get_base",
"(",
"agent",
")",
"act_red",
"=",
"agent_base",
".",
"get_activity_reduction",
"(",
"agent",
".",
"activity",
".",
"activity_type",
")",
"if",
"act_red",
"is",
"not",
"None",
":",
"agent",
".",
"activity",
".",
"activity_type",
"=",
"act_red",
"if",
"isinstance",
"(",
"stmt",
",",
"RegulateActivity",
")",
":",
"if",
"stmt",
".",
"obj",
"is",
"not",
"None",
":",
"obj_base",
"=",
"self",
".",
"_get_base",
"(",
"stmt",
".",
"obj",
")",
"act_red",
"=",
"obj_base",
".",
"get_activity_reduction",
"(",
"stmt",
".",
"obj_activity",
")",
"if",
"act_red",
"is",
"not",
"None",
":",
"stmt",
".",
"obj_activity",
"=",
"act_red",
"elif",
"isinstance",
"(",
"stmt",
",",
"ActiveForm",
")",
":",
"agent_base",
"=",
"self",
".",
"_get_base",
"(",
"stmt",
".",
"agent",
")",
"act_red",
"=",
"agent_base",
".",
"get_activity_reduction",
"(",
"stmt",
".",
"activity",
")",
"if",
"act_red",
"is",
"not",
"None",
":",
"stmt",
".",
"activity",
"=",
"act_red"
]
| Rewrite the activity types referenced in Statements for consistency.
Activity types are reduced to the most specific form whenever possible.
For instance, if 'kinase' is the only specific activity type known
for the BaseAgent of BRAF, its generic 'activity' forms are rewritten
to 'kinase'. | [
"Rewrite",
"the",
"activity",
"types",
"referenced",
"in",
"Statements",
"for",
"consistency",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/mechlinker/__init__.py#L223-L251 | train |
sorgerlab/indra | indra/mechlinker/__init__.py | MechLinker.infer_complexes | def infer_complexes(stmts):
"""Return inferred Complex from Statements implying physical interaction.
Parameters
----------
stmts : list[indra.statements.Statement]
A list of Statements to infer Complexes from.
Returns
-------
linked_stmts : list[indra.mechlinker.LinkedStatement]
A list of LinkedStatements representing the inferred Statements.
"""
interact_stmts = _get_statements_by_type(stmts, Modification)
linked_stmts = []
for mstmt in interact_stmts:
if mstmt.enz is None:
continue
st = Complex([mstmt.enz, mstmt.sub], evidence=mstmt.evidence)
linked_stmts.append(st)
return linked_stmts | python | def infer_complexes(stmts):
"""Return inferred Complex from Statements implying physical interaction.
Parameters
----------
stmts : list[indra.statements.Statement]
A list of Statements to infer Complexes from.
Returns
-------
linked_stmts : list[indra.mechlinker.LinkedStatement]
A list of LinkedStatements representing the inferred Statements.
"""
interact_stmts = _get_statements_by_type(stmts, Modification)
linked_stmts = []
for mstmt in interact_stmts:
if mstmt.enz is None:
continue
st = Complex([mstmt.enz, mstmt.sub], evidence=mstmt.evidence)
linked_stmts.append(st)
return linked_stmts | [
"def",
"infer_complexes",
"(",
"stmts",
")",
":",
"interact_stmts",
"=",
"_get_statements_by_type",
"(",
"stmts",
",",
"Modification",
")",
"linked_stmts",
"=",
"[",
"]",
"for",
"mstmt",
"in",
"interact_stmts",
":",
"if",
"mstmt",
".",
"enz",
"is",
"None",
":",
"continue",
"st",
"=",
"Complex",
"(",
"[",
"mstmt",
".",
"enz",
",",
"mstmt",
".",
"sub",
"]",
",",
"evidence",
"=",
"mstmt",
".",
"evidence",
")",
"linked_stmts",
".",
"append",
"(",
"st",
")",
"return",
"linked_stmts"
]
| Return inferred Complex from Statements implying physical interaction.
Parameters
----------
stmts : list[indra.statements.Statement]
A list of Statements to infer Complexes from.
Returns
-------
linked_stmts : list[indra.mechlinker.LinkedStatement]
A list of LinkedStatements representing the inferred Statements. | [
"Return",
"inferred",
"Complex",
"from",
"Statements",
"implying",
"physical",
"interaction",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/mechlinker/__init__.py#L254-L274 | train |
sorgerlab/indra | indra/mechlinker/__init__.py | MechLinker.infer_activations | def infer_activations(stmts):
"""Return inferred RegulateActivity from Modification + ActiveForm.
This function looks for combinations of Modification and ActiveForm
Statements and infers Activation/Inhibition Statements from them.
For example, if we know that A phosphorylates B, and the
phosphorylated form of B is active, then we can infer that
A activates B. This can also be viewed as having "explained" a given
Activation/Inhibition Statement with a combination of more mechanistic
Modification + ActiveForm Statements.
Parameters
----------
stmts : list[indra.statements.Statement]
A list of Statements to infer RegulateActivity from.
Returns
-------
linked_stmts : list[indra.mechlinker.LinkedStatement]
A list of LinkedStatements representing the inferred Statements.
"""
linked_stmts = []
af_stmts = _get_statements_by_type(stmts, ActiveForm)
mod_stmts = _get_statements_by_type(stmts, Modification)
for af_stmt, mod_stmt in itertools.product(*(af_stmts, mod_stmts)):
# There has to be an enzyme and the substrate and the
# agent of the active form have to match
if mod_stmt.enz is None or \
(not af_stmt.agent.entity_matches(mod_stmt.sub)):
continue
# We now check the modifications to make sure they are consistent
if not af_stmt.agent.mods:
continue
found = False
for mc in af_stmt.agent.mods:
if mc.mod_type == modclass_to_modtype[mod_stmt.__class__] and \
mc.residue == mod_stmt.residue and \
mc.position == mod_stmt.position:
found = True
if not found:
continue
# Collect evidence
ev = mod_stmt.evidence
# Finally, check the polarity of the ActiveForm
if af_stmt.is_active:
st = Activation(mod_stmt.enz, mod_stmt.sub, af_stmt.activity,
evidence=ev)
else:
st = Inhibition(mod_stmt.enz, mod_stmt.sub, af_stmt.activity,
evidence=ev)
linked_stmts.append(LinkedStatement([af_stmt, mod_stmt], st))
return linked_stmts | python | def infer_activations(stmts):
"""Return inferred RegulateActivity from Modification + ActiveForm.
This function looks for combinations of Modification and ActiveForm
Statements and infers Activation/Inhibition Statements from them.
For example, if we know that A phosphorylates B, and the
phosphorylated form of B is active, then we can infer that
A activates B. This can also be viewed as having "explained" a given
Activation/Inhibition Statement with a combination of more mechanistic
Modification + ActiveForm Statements.
Parameters
----------
stmts : list[indra.statements.Statement]
A list of Statements to infer RegulateActivity from.
Returns
-------
linked_stmts : list[indra.mechlinker.LinkedStatement]
A list of LinkedStatements representing the inferred Statements.
"""
linked_stmts = []
af_stmts = _get_statements_by_type(stmts, ActiveForm)
mod_stmts = _get_statements_by_type(stmts, Modification)
for af_stmt, mod_stmt in itertools.product(*(af_stmts, mod_stmts)):
# There has to be an enzyme and the substrate and the
# agent of the active form have to match
if mod_stmt.enz is None or \
(not af_stmt.agent.entity_matches(mod_stmt.sub)):
continue
# We now check the modifications to make sure they are consistent
if not af_stmt.agent.mods:
continue
found = False
for mc in af_stmt.agent.mods:
if mc.mod_type == modclass_to_modtype[mod_stmt.__class__] and \
mc.residue == mod_stmt.residue and \
mc.position == mod_stmt.position:
found = True
if not found:
continue
# Collect evidence
ev = mod_stmt.evidence
# Finally, check the polarity of the ActiveForm
if af_stmt.is_active:
st = Activation(mod_stmt.enz, mod_stmt.sub, af_stmt.activity,
evidence=ev)
else:
st = Inhibition(mod_stmt.enz, mod_stmt.sub, af_stmt.activity,
evidence=ev)
linked_stmts.append(LinkedStatement([af_stmt, mod_stmt], st))
return linked_stmts | [
"def",
"infer_activations",
"(",
"stmts",
")",
":",
"linked_stmts",
"=",
"[",
"]",
"af_stmts",
"=",
"_get_statements_by_type",
"(",
"stmts",
",",
"ActiveForm",
")",
"mod_stmts",
"=",
"_get_statements_by_type",
"(",
"stmts",
",",
"Modification",
")",
"for",
"af_stmt",
",",
"mod_stmt",
"in",
"itertools",
".",
"product",
"(",
"*",
"(",
"af_stmts",
",",
"mod_stmts",
")",
")",
":",
"# There has to be an enzyme and the substrate and the",
"# agent of the active form have to match",
"if",
"mod_stmt",
".",
"enz",
"is",
"None",
"or",
"(",
"not",
"af_stmt",
".",
"agent",
".",
"entity_matches",
"(",
"mod_stmt",
".",
"sub",
")",
")",
":",
"continue",
"# We now check the modifications to make sure they are consistent",
"if",
"not",
"af_stmt",
".",
"agent",
".",
"mods",
":",
"continue",
"found",
"=",
"False",
"for",
"mc",
"in",
"af_stmt",
".",
"agent",
".",
"mods",
":",
"if",
"mc",
".",
"mod_type",
"==",
"modclass_to_modtype",
"[",
"mod_stmt",
".",
"__class__",
"]",
"and",
"mc",
".",
"residue",
"==",
"mod_stmt",
".",
"residue",
"and",
"mc",
".",
"position",
"==",
"mod_stmt",
".",
"position",
":",
"found",
"=",
"True",
"if",
"not",
"found",
":",
"continue",
"# Collect evidence",
"ev",
"=",
"mod_stmt",
".",
"evidence",
"# Finally, check the polarity of the ActiveForm",
"if",
"af_stmt",
".",
"is_active",
":",
"st",
"=",
"Activation",
"(",
"mod_stmt",
".",
"enz",
",",
"mod_stmt",
".",
"sub",
",",
"af_stmt",
".",
"activity",
",",
"evidence",
"=",
"ev",
")",
"else",
":",
"st",
"=",
"Inhibition",
"(",
"mod_stmt",
".",
"enz",
",",
"mod_stmt",
".",
"sub",
",",
"af_stmt",
".",
"activity",
",",
"evidence",
"=",
"ev",
")",
"linked_stmts",
".",
"append",
"(",
"LinkedStatement",
"(",
"[",
"af_stmt",
",",
"mod_stmt",
"]",
",",
"st",
")",
")",
"return",
"linked_stmts"
]
| Return inferred RegulateActivity from Modification + ActiveForm.
This function looks for combinations of Modification and ActiveForm
Statements and infers Activation/Inhibition Statements from them.
For example, if we know that A phosphorylates B, and the
phosphorylated form of B is active, then we can infer that
A activates B. This can also be viewed as having "explained" a given
Activation/Inhibition Statement with a combination of more mechanistic
Modification + ActiveForm Statements.
Parameters
----------
stmts : list[indra.statements.Statement]
A list of Statements to infer RegulateActivity from.
Returns
-------
linked_stmts : list[indra.mechlinker.LinkedStatement]
A list of LinkedStatements representing the inferred Statements. | [
"Return",
"inferred",
"RegulateActivity",
"from",
"Modification",
"+",
"ActiveForm",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/mechlinker/__init__.py#L277-L328 | train |
sorgerlab/indra | indra/mechlinker/__init__.py | MechLinker.infer_active_forms | def infer_active_forms(stmts):
"""Return inferred ActiveForm from RegulateActivity + Modification.
This function looks for combinations of Activation/Inhibition
Statements and Modification Statements, and infers an ActiveForm
from them. For example, if we know that A activates B and
A phosphorylates B, then we can infer that the phosphorylated form
of B is active.
Parameters
----------
stmts : list[indra.statements.Statement]
A list of Statements to infer ActiveForms from.
Returns
-------
linked_stmts : list[indra.mechlinker.LinkedStatement]
A list of LinkedStatements representing the inferred Statements.
"""
linked_stmts = []
for act_stmt in _get_statements_by_type(stmts, RegulateActivity):
# TODO: revise the conditions here
if not (act_stmt.subj.activity is not None and
act_stmt.subj.activity.activity_type == 'kinase' and
act_stmt.subj.activity.is_active):
continue
matching = []
ev = act_stmt.evidence
for mod_stmt in _get_statements_by_type(stmts, Modification):
if mod_stmt.enz is not None:
if mod_stmt.enz.entity_matches(act_stmt.subj) and \
mod_stmt.sub.entity_matches(act_stmt.obj):
matching.append(mod_stmt)
ev.extend(mod_stmt.evidence)
if not matching:
continue
mods = []
for mod_stmt in matching:
mod_type_name = mod_stmt.__class__.__name__.lower()
if isinstance(mod_stmt, AddModification):
is_modified = True
else:
is_modified = False
mod_type_name = mod_type_name[2:]
mc = ModCondition(mod_type_name, mod_stmt.residue,
mod_stmt.position, is_modified)
mods.append(mc)
source_stmts = [act_stmt] + [m for m in matching]
st = ActiveForm(Agent(act_stmt.obj.name, mods=mods,
db_refs=act_stmt.obj.db_refs),
act_stmt.obj_activity, act_stmt.is_activation,
evidence=ev)
linked_stmts.append(LinkedStatement(source_stmts, st))
logger.info('inferred: %s' % st)
return linked_stmts | python | def infer_active_forms(stmts):
"""Return inferred ActiveForm from RegulateActivity + Modification.
This function looks for combinations of Activation/Inhibition
Statements and Modification Statements, and infers an ActiveForm
from them. For example, if we know that A activates B and
A phosphorylates B, then we can infer that the phosphorylated form
of B is active.
Parameters
----------
stmts : list[indra.statements.Statement]
A list of Statements to infer ActiveForms from.
Returns
-------
linked_stmts : list[indra.mechlinker.LinkedStatement]
A list of LinkedStatements representing the inferred Statements.
"""
linked_stmts = []
for act_stmt in _get_statements_by_type(stmts, RegulateActivity):
# TODO: revise the conditions here
if not (act_stmt.subj.activity is not None and
act_stmt.subj.activity.activity_type == 'kinase' and
act_stmt.subj.activity.is_active):
continue
matching = []
ev = act_stmt.evidence
for mod_stmt in _get_statements_by_type(stmts, Modification):
if mod_stmt.enz is not None:
if mod_stmt.enz.entity_matches(act_stmt.subj) and \
mod_stmt.sub.entity_matches(act_stmt.obj):
matching.append(mod_stmt)
ev.extend(mod_stmt.evidence)
if not matching:
continue
mods = []
for mod_stmt in matching:
mod_type_name = mod_stmt.__class__.__name__.lower()
if isinstance(mod_stmt, AddModification):
is_modified = True
else:
is_modified = False
mod_type_name = mod_type_name[2:]
mc = ModCondition(mod_type_name, mod_stmt.residue,
mod_stmt.position, is_modified)
mods.append(mc)
source_stmts = [act_stmt] + [m for m in matching]
st = ActiveForm(Agent(act_stmt.obj.name, mods=mods,
db_refs=act_stmt.obj.db_refs),
act_stmt.obj_activity, act_stmt.is_activation,
evidence=ev)
linked_stmts.append(LinkedStatement(source_stmts, st))
logger.info('inferred: %s' % st)
return linked_stmts | [
"def",
"infer_active_forms",
"(",
"stmts",
")",
":",
"linked_stmts",
"=",
"[",
"]",
"for",
"act_stmt",
"in",
"_get_statements_by_type",
"(",
"stmts",
",",
"RegulateActivity",
")",
":",
"# TODO: revise the conditions here",
"if",
"not",
"(",
"act_stmt",
".",
"subj",
".",
"activity",
"is",
"not",
"None",
"and",
"act_stmt",
".",
"subj",
".",
"activity",
".",
"activity_type",
"==",
"'kinase'",
"and",
"act_stmt",
".",
"subj",
".",
"activity",
".",
"is_active",
")",
":",
"continue",
"matching",
"=",
"[",
"]",
"ev",
"=",
"act_stmt",
".",
"evidence",
"for",
"mod_stmt",
"in",
"_get_statements_by_type",
"(",
"stmts",
",",
"Modification",
")",
":",
"if",
"mod_stmt",
".",
"enz",
"is",
"not",
"None",
":",
"if",
"mod_stmt",
".",
"enz",
".",
"entity_matches",
"(",
"act_stmt",
".",
"subj",
")",
"and",
"mod_stmt",
".",
"sub",
".",
"entity_matches",
"(",
"act_stmt",
".",
"obj",
")",
":",
"matching",
".",
"append",
"(",
"mod_stmt",
")",
"ev",
".",
"extend",
"(",
"mod_stmt",
".",
"evidence",
")",
"if",
"not",
"matching",
":",
"continue",
"mods",
"=",
"[",
"]",
"for",
"mod_stmt",
"in",
"matching",
":",
"mod_type_name",
"=",
"mod_stmt",
".",
"__class__",
".",
"__name__",
".",
"lower",
"(",
")",
"if",
"isinstance",
"(",
"mod_stmt",
",",
"AddModification",
")",
":",
"is_modified",
"=",
"True",
"else",
":",
"is_modified",
"=",
"False",
"mod_type_name",
"=",
"mod_type_name",
"[",
"2",
":",
"]",
"mc",
"=",
"ModCondition",
"(",
"mod_type_name",
",",
"mod_stmt",
".",
"residue",
",",
"mod_stmt",
".",
"position",
",",
"is_modified",
")",
"mods",
".",
"append",
"(",
"mc",
")",
"source_stmts",
"=",
"[",
"act_stmt",
"]",
"+",
"[",
"m",
"for",
"m",
"in",
"matching",
"]",
"st",
"=",
"ActiveForm",
"(",
"Agent",
"(",
"act_stmt",
".",
"obj",
".",
"name",
",",
"mods",
"=",
"mods",
",",
"db_refs",
"=",
"act_stmt",
".",
"obj",
".",
"db_refs",
")",
",",
"act_stmt",
".",
"obj_activity",
",",
"act_stmt",
".",
"is_activation",
",",
"evidence",
"=",
"ev",
")",
"linked_stmts",
".",
"append",
"(",
"LinkedStatement",
"(",
"source_stmts",
",",
"st",
")",
")",
"logger",
".",
"info",
"(",
"'inferred: %s'",
"%",
"st",
")",
"return",
"linked_stmts"
]
| Return inferred ActiveForm from RegulateActivity + Modification.
This function looks for combinations of Activation/Inhibition
Statements and Modification Statements, and infers an ActiveForm
from them. For example, if we know that A activates B and
A phosphorylates B, then we can infer that the phosphorylated form
of B is active.
Parameters
----------
stmts : list[indra.statements.Statement]
A list of Statements to infer ActiveForms from.
Returns
-------
linked_stmts : list[indra.mechlinker.LinkedStatement]
A list of LinkedStatements representing the inferred Statements. | [
"Return",
"inferred",
"ActiveForm",
"from",
"RegulateActivity",
"+",
"Modification",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/mechlinker/__init__.py#L331-L385 | train |
sorgerlab/indra | indra/mechlinker/__init__.py | MechLinker.infer_modifications | def infer_modifications(stmts):
"""Return inferred Modification from RegulateActivity + ActiveForm.
This function looks for combinations of Activation/Inhibition Statements
and ActiveForm Statements that imply a Modification Statement.
For example, if we know that A activates B, and phosphorylated B is
active, then we can infer that A leads to the phosphorylation of B.
An additional requirement when making this assumption is that the
activity of B should only be dependent on the modified state and not
other context - otherwise the inferred Modification is not necessarily
warranted.
Parameters
----------
stmts : list[indra.statements.Statement]
A list of Statements to infer Modifications from.
Returns
-------
linked_stmts : list[indra.mechlinker.LinkedStatement]
A list of LinkedStatements representing the inferred Statements.
"""
linked_stmts = []
for act_stmt in _get_statements_by_type(stmts, RegulateActivity):
for af_stmt in _get_statements_by_type(stmts, ActiveForm):
if not af_stmt.agent.entity_matches(act_stmt.obj):
continue
mods = af_stmt.agent.mods
# Make sure the ActiveForm only involves modified sites
if af_stmt.agent.mutations or \
af_stmt.agent.bound_conditions or \
af_stmt.agent.location:
continue
if not af_stmt.agent.mods:
continue
for mod in af_stmt.agent.mods:
evs = act_stmt.evidence + af_stmt.evidence
for ev in evs:
ev.epistemics['direct'] = False
if mod.is_modified:
mod_type_name = mod.mod_type
else:
mod_type_name = modtype_to_inverse[mod.mod_type]
mod_class = modtype_to_modclass[mod_type_name]
if not mod_class:
continue
st = mod_class(act_stmt.subj,
act_stmt.obj,
mod.residue, mod.position,
evidence=evs)
ls = LinkedStatement([act_stmt, af_stmt], st)
linked_stmts.append(ls)
logger.info('inferred: %s' % st)
return linked_stmts | python | def infer_modifications(stmts):
"""Return inferred Modification from RegulateActivity + ActiveForm.
This function looks for combinations of Activation/Inhibition Statements
and ActiveForm Statements that imply a Modification Statement.
For example, if we know that A activates B, and phosphorylated B is
active, then we can infer that A leads to the phosphorylation of B.
An additional requirement when making this assumption is that the
activity of B should only be dependent on the modified state and not
other context - otherwise the inferred Modification is not necessarily
warranted.
Parameters
----------
stmts : list[indra.statements.Statement]
A list of Statements to infer Modifications from.
Returns
-------
linked_stmts : list[indra.mechlinker.LinkedStatement]
A list of LinkedStatements representing the inferred Statements.
"""
linked_stmts = []
for act_stmt in _get_statements_by_type(stmts, RegulateActivity):
for af_stmt in _get_statements_by_type(stmts, ActiveForm):
if not af_stmt.agent.entity_matches(act_stmt.obj):
continue
mods = af_stmt.agent.mods
# Make sure the ActiveForm only involves modified sites
if af_stmt.agent.mutations or \
af_stmt.agent.bound_conditions or \
af_stmt.agent.location:
continue
if not af_stmt.agent.mods:
continue
for mod in af_stmt.agent.mods:
evs = act_stmt.evidence + af_stmt.evidence
for ev in evs:
ev.epistemics['direct'] = False
if mod.is_modified:
mod_type_name = mod.mod_type
else:
mod_type_name = modtype_to_inverse[mod.mod_type]
mod_class = modtype_to_modclass[mod_type_name]
if not mod_class:
continue
st = mod_class(act_stmt.subj,
act_stmt.obj,
mod.residue, mod.position,
evidence=evs)
ls = LinkedStatement([act_stmt, af_stmt], st)
linked_stmts.append(ls)
logger.info('inferred: %s' % st)
return linked_stmts | [
"def",
"infer_modifications",
"(",
"stmts",
")",
":",
"linked_stmts",
"=",
"[",
"]",
"for",
"act_stmt",
"in",
"_get_statements_by_type",
"(",
"stmts",
",",
"RegulateActivity",
")",
":",
"for",
"af_stmt",
"in",
"_get_statements_by_type",
"(",
"stmts",
",",
"ActiveForm",
")",
":",
"if",
"not",
"af_stmt",
".",
"agent",
".",
"entity_matches",
"(",
"act_stmt",
".",
"obj",
")",
":",
"continue",
"mods",
"=",
"af_stmt",
".",
"agent",
".",
"mods",
"# Make sure the ActiveForm only involves modified sites",
"if",
"af_stmt",
".",
"agent",
".",
"mutations",
"or",
"af_stmt",
".",
"agent",
".",
"bound_conditions",
"or",
"af_stmt",
".",
"agent",
".",
"location",
":",
"continue",
"if",
"not",
"af_stmt",
".",
"agent",
".",
"mods",
":",
"continue",
"for",
"mod",
"in",
"af_stmt",
".",
"agent",
".",
"mods",
":",
"evs",
"=",
"act_stmt",
".",
"evidence",
"+",
"af_stmt",
".",
"evidence",
"for",
"ev",
"in",
"evs",
":",
"ev",
".",
"epistemics",
"[",
"'direct'",
"]",
"=",
"False",
"if",
"mod",
".",
"is_modified",
":",
"mod_type_name",
"=",
"mod",
".",
"mod_type",
"else",
":",
"mod_type_name",
"=",
"modtype_to_inverse",
"[",
"mod",
".",
"mod_type",
"]",
"mod_class",
"=",
"modtype_to_modclass",
"[",
"mod_type_name",
"]",
"if",
"not",
"mod_class",
":",
"continue",
"st",
"=",
"mod_class",
"(",
"act_stmt",
".",
"subj",
",",
"act_stmt",
".",
"obj",
",",
"mod",
".",
"residue",
",",
"mod",
".",
"position",
",",
"evidence",
"=",
"evs",
")",
"ls",
"=",
"LinkedStatement",
"(",
"[",
"act_stmt",
",",
"af_stmt",
"]",
",",
"st",
")",
"linked_stmts",
".",
"append",
"(",
"ls",
")",
"logger",
".",
"info",
"(",
"'inferred: %s'",
"%",
"st",
")",
"return",
"linked_stmts"
]
| Return inferred Modification from RegulateActivity + ActiveForm.
This function looks for combinations of Activation/Inhibition Statements
and ActiveForm Statements that imply a Modification Statement.
For example, if we know that A activates B, and phosphorylated B is
active, then we can infer that A leads to the phosphorylation of B.
An additional requirement when making this assumption is that the
activity of B should only be dependent on the modified state and not
other context - otherwise the inferred Modification is not necessarily
warranted.
Parameters
----------
stmts : list[indra.statements.Statement]
A list of Statements to infer Modifications from.
Returns
-------
linked_stmts : list[indra.mechlinker.LinkedStatement]
A list of LinkedStatements representing the inferred Statements. | [
"Return",
"inferred",
"Modification",
"from",
"RegulateActivity",
"+",
"ActiveForm",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/mechlinker/__init__.py#L388-L441 | train |
sorgerlab/indra | indra/mechlinker/__init__.py | MechLinker.replace_complexes | def replace_complexes(self, linked_stmts=None):
"""Remove Complex Statements that can be inferred out.
This function iterates over self.statements and looks for Complex
Statements that either match or are refined by inferred Complex
Statements that were linked (provided as the linked_stmts argument).
It removes Complex Statements from self.statements that can be
explained by the linked statements.
Parameters
----------
linked_stmts : Optional[list[indra.mechlinker.LinkedStatement]]
A list of linked statements, optionally passed from outside.
If None is passed, the MechLinker runs self.infer_complexes to
infer Complexes and obtain a list of LinkedStatements that are
then used for removing existing Complexes in self.statements.
"""
if linked_stmts is None:
linked_stmts = self.infer_complexes(self.statements)
new_stmts = []
for stmt in self.statements:
if not isinstance(stmt, Complex):
new_stmts.append(stmt)
continue
found = False
for linked_stmt in linked_stmts:
if linked_stmt.refinement_of(stmt, hierarchies):
found = True
if not found:
new_stmts.append(stmt)
else:
logger.info('Removing complex: %s' % stmt)
self.statements = new_stmts | python | def replace_complexes(self, linked_stmts=None):
"""Remove Complex Statements that can be inferred out.
This function iterates over self.statements and looks for Complex
Statements that either match or are refined by inferred Complex
Statements that were linked (provided as the linked_stmts argument).
It removes Complex Statements from self.statements that can be
explained by the linked statements.
Parameters
----------
linked_stmts : Optional[list[indra.mechlinker.LinkedStatement]]
A list of linked statements, optionally passed from outside.
If None is passed, the MechLinker runs self.infer_complexes to
infer Complexes and obtain a list of LinkedStatements that are
then used for removing existing Complexes in self.statements.
"""
if linked_stmts is None:
linked_stmts = self.infer_complexes(self.statements)
new_stmts = []
for stmt in self.statements:
if not isinstance(stmt, Complex):
new_stmts.append(stmt)
continue
found = False
for linked_stmt in linked_stmts:
if linked_stmt.refinement_of(stmt, hierarchies):
found = True
if not found:
new_stmts.append(stmt)
else:
logger.info('Removing complex: %s' % stmt)
self.statements = new_stmts | [
"def",
"replace_complexes",
"(",
"self",
",",
"linked_stmts",
"=",
"None",
")",
":",
"if",
"linked_stmts",
"is",
"None",
":",
"linked_stmts",
"=",
"self",
".",
"infer_complexes",
"(",
"self",
".",
"statements",
")",
"new_stmts",
"=",
"[",
"]",
"for",
"stmt",
"in",
"self",
".",
"statements",
":",
"if",
"not",
"isinstance",
"(",
"stmt",
",",
"Complex",
")",
":",
"new_stmts",
".",
"append",
"(",
"stmt",
")",
"continue",
"found",
"=",
"False",
"for",
"linked_stmt",
"in",
"linked_stmts",
":",
"if",
"linked_stmt",
".",
"refinement_of",
"(",
"stmt",
",",
"hierarchies",
")",
":",
"found",
"=",
"True",
"if",
"not",
"found",
":",
"new_stmts",
".",
"append",
"(",
"stmt",
")",
"else",
":",
"logger",
".",
"info",
"(",
"'Removing complex: %s'",
"%",
"stmt",
")",
"self",
".",
"statements",
"=",
"new_stmts"
]
| Remove Complex Statements that can be inferred out.
This function iterates over self.statements and looks for Complex
Statements that either match or are refined by inferred Complex
Statements that were linked (provided as the linked_stmts argument).
It removes Complex Statements from self.statements that can be
explained by the linked statements.
Parameters
----------
linked_stmts : Optional[list[indra.mechlinker.LinkedStatement]]
A list of linked statements, optionally passed from outside.
If None is passed, the MechLinker runs self.infer_complexes to
infer Complexes and obtain a list of LinkedStatements that are
then used for removing existing Complexes in self.statements. | [
"Remove",
"Complex",
"Statements",
"that",
"can",
"be",
"inferred",
"out",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/mechlinker/__init__.py#L443-L475 | train |
sorgerlab/indra | indra/mechlinker/__init__.py | MechLinker.replace_activations | def replace_activations(self, linked_stmts=None):
"""Remove RegulateActivity Statements that can be inferred out.
This function iterates over self.statements and looks for
RegulateActivity Statements that either match or are refined by
inferred RegulateActivity Statements that were linked
(provided as the linked_stmts argument).
It removes RegulateActivity Statements from self.statements that can be
explained by the linked statements.
Parameters
----------
linked_stmts : Optional[list[indra.mechlinker.LinkedStatement]]
A list of linked statements, optionally passed from outside.
If None is passed, the MechLinker runs self.infer_activations to
infer RegulateActivities and obtain a list of LinkedStatements
that are then used for removing existing Complexes
in self.statements.
"""
if linked_stmts is None:
linked_stmts = self.infer_activations(self.statements)
new_stmts = []
for stmt in self.statements:
if not isinstance(stmt, RegulateActivity):
new_stmts.append(stmt)
continue
found = False
for linked_stmt in linked_stmts:
inferred_stmt = linked_stmt.inferred_stmt
if stmt.is_activation == inferred_stmt.is_activation and \
stmt.subj.entity_matches(inferred_stmt.subj) and \
stmt.obj.entity_matches(inferred_stmt.obj):
found = True
if not found:
new_stmts.append(stmt)
else:
logger.info('Removing regulate activity: %s' % stmt)
self.statements = new_stmts | python | def replace_activations(self, linked_stmts=None):
"""Remove RegulateActivity Statements that can be inferred out.
This function iterates over self.statements and looks for
RegulateActivity Statements that either match or are refined by
inferred RegulateActivity Statements that were linked
(provided as the linked_stmts argument).
It removes RegulateActivity Statements from self.statements that can be
explained by the linked statements.
Parameters
----------
linked_stmts : Optional[list[indra.mechlinker.LinkedStatement]]
A list of linked statements, optionally passed from outside.
If None is passed, the MechLinker runs self.infer_activations to
infer RegulateActivities and obtain a list of LinkedStatements
that are then used for removing existing Complexes
in self.statements.
"""
if linked_stmts is None:
linked_stmts = self.infer_activations(self.statements)
new_stmts = []
for stmt in self.statements:
if not isinstance(stmt, RegulateActivity):
new_stmts.append(stmt)
continue
found = False
for linked_stmt in linked_stmts:
inferred_stmt = linked_stmt.inferred_stmt
if stmt.is_activation == inferred_stmt.is_activation and \
stmt.subj.entity_matches(inferred_stmt.subj) and \
stmt.obj.entity_matches(inferred_stmt.obj):
found = True
if not found:
new_stmts.append(stmt)
else:
logger.info('Removing regulate activity: %s' % stmt)
self.statements = new_stmts | [
"def",
"replace_activations",
"(",
"self",
",",
"linked_stmts",
"=",
"None",
")",
":",
"if",
"linked_stmts",
"is",
"None",
":",
"linked_stmts",
"=",
"self",
".",
"infer_activations",
"(",
"self",
".",
"statements",
")",
"new_stmts",
"=",
"[",
"]",
"for",
"stmt",
"in",
"self",
".",
"statements",
":",
"if",
"not",
"isinstance",
"(",
"stmt",
",",
"RegulateActivity",
")",
":",
"new_stmts",
".",
"append",
"(",
"stmt",
")",
"continue",
"found",
"=",
"False",
"for",
"linked_stmt",
"in",
"linked_stmts",
":",
"inferred_stmt",
"=",
"linked_stmt",
".",
"inferred_stmt",
"if",
"stmt",
".",
"is_activation",
"==",
"inferred_stmt",
".",
"is_activation",
"and",
"stmt",
".",
"subj",
".",
"entity_matches",
"(",
"inferred_stmt",
".",
"subj",
")",
"and",
"stmt",
".",
"obj",
".",
"entity_matches",
"(",
"inferred_stmt",
".",
"obj",
")",
":",
"found",
"=",
"True",
"if",
"not",
"found",
":",
"new_stmts",
".",
"append",
"(",
"stmt",
")",
"else",
":",
"logger",
".",
"info",
"(",
"'Removing regulate activity: %s'",
"%",
"stmt",
")",
"self",
".",
"statements",
"=",
"new_stmts"
]
| Remove RegulateActivity Statements that can be inferred out.
This function iterates over self.statements and looks for
RegulateActivity Statements that either match or are refined by
inferred RegulateActivity Statements that were linked
(provided as the linked_stmts argument).
It removes RegulateActivity Statements from self.statements that can be
explained by the linked statements.
Parameters
----------
linked_stmts : Optional[list[indra.mechlinker.LinkedStatement]]
A list of linked statements, optionally passed from outside.
If None is passed, the MechLinker runs self.infer_activations to
infer RegulateActivities and obtain a list of LinkedStatements
that are then used for removing existing Complexes
in self.statements. | [
"Remove",
"RegulateActivity",
"Statements",
"that",
"can",
"be",
"inferred",
"out",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/mechlinker/__init__.py#L477-L514 | train |
sorgerlab/indra | indra/mechlinker/__init__.py | BaseAgentSet.get_create_base_agent | def get_create_base_agent(self, agent):
"""Return BaseAgent from an Agent, creating it if needed.
Parameters
----------
agent : indra.statements.Agent
Returns
-------
base_agent : indra.mechlinker.BaseAgent
"""
try:
base_agent = self.agents[agent.name]
except KeyError:
base_agent = BaseAgent(agent.name)
self.agents[agent.name] = base_agent
return base_agent | python | def get_create_base_agent(self, agent):
"""Return BaseAgent from an Agent, creating it if needed.
Parameters
----------
agent : indra.statements.Agent
Returns
-------
base_agent : indra.mechlinker.BaseAgent
"""
try:
base_agent = self.agents[agent.name]
except KeyError:
base_agent = BaseAgent(agent.name)
self.agents[agent.name] = base_agent
return base_agent | [
"def",
"get_create_base_agent",
"(",
"self",
",",
"agent",
")",
":",
"try",
":",
"base_agent",
"=",
"self",
".",
"agents",
"[",
"agent",
".",
"name",
"]",
"except",
"KeyError",
":",
"base_agent",
"=",
"BaseAgent",
"(",
"agent",
".",
"name",
")",
"self",
".",
"agents",
"[",
"agent",
".",
"name",
"]",
"=",
"base_agent",
"return",
"base_agent"
]
| Return BaseAgent from an Agent, creating it if needed.
Parameters
----------
agent : indra.statements.Agent
Returns
-------
base_agent : indra.mechlinker.BaseAgent | [
"Return",
"BaseAgent",
"from",
"an",
"Agent",
"creating",
"it",
"if",
"needed",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/mechlinker/__init__.py#L540-L557 | train |
sorgerlab/indra | indra/mechlinker/__init__.py | AgentState.apply_to | def apply_to(self, agent):
"""Apply this object's state to an Agent.
Parameters
----------
agent : indra.statements.Agent
The agent to which the state should be applied
"""
agent.bound_conditions = self.bound_conditions
agent.mods = self.mods
agent.mutations = self.mutations
agent.location = self.location
return self.evidence | python | def apply_to(self, agent):
"""Apply this object's state to an Agent.
Parameters
----------
agent : indra.statements.Agent
The agent to which the state should be applied
"""
agent.bound_conditions = self.bound_conditions
agent.mods = self.mods
agent.mutations = self.mutations
agent.location = self.location
return self.evidence | [
"def",
"apply_to",
"(",
"self",
",",
"agent",
")",
":",
"agent",
".",
"bound_conditions",
"=",
"self",
".",
"bound_conditions",
"agent",
".",
"mods",
"=",
"self",
".",
"mods",
"agent",
".",
"mutations",
"=",
"self",
".",
"mutations",
"agent",
".",
"location",
"=",
"self",
".",
"location",
"return",
"self",
".",
"evidence"
]
| Apply this object's state to an Agent.
Parameters
----------
agent : indra.statements.Agent
The agent to which the state should be applied | [
"Apply",
"this",
"object",
"s",
"state",
"to",
"an",
"Agent",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/mechlinker/__init__.py#L713-L725 | train |
sorgerlab/indra | indra/tools/live_curation.py | submit_curation | def submit_curation():
"""Submit curations for a given corpus.
The submitted curations are handled to update the probability model but
there is no return value here. The update_belief function can be called
separately to calculate update belief scores.
Parameters
----------
corpus_id : str
The ID of the corpus for which the curation is submitted.
curations : dict
A set of curations where each key is a Statement UUID in the given
corpus and each key is 0 or 1 with 0 corresponding to incorrect and
1 corresponding to correct.
"""
if request.json is None:
abort(Response('Missing application/json header.', 415))
# Get input parameters
corpus_id = request.json.get('corpus_id')
curations = request.json.get('curations', {})
try:
curator.submit_curation(corpus_id, curations)
except InvalidCorpusError:
abort(Response('The corpus_id "%s" is unknown.' % corpus_id, 400))
return
return jsonify({}) | python | def submit_curation():
"""Submit curations for a given corpus.
The submitted curations are handled to update the probability model but
there is no return value here. The update_belief function can be called
separately to calculate update belief scores.
Parameters
----------
corpus_id : str
The ID of the corpus for which the curation is submitted.
curations : dict
A set of curations where each key is a Statement UUID in the given
corpus and each key is 0 or 1 with 0 corresponding to incorrect and
1 corresponding to correct.
"""
if request.json is None:
abort(Response('Missing application/json header.', 415))
# Get input parameters
corpus_id = request.json.get('corpus_id')
curations = request.json.get('curations', {})
try:
curator.submit_curation(corpus_id, curations)
except InvalidCorpusError:
abort(Response('The corpus_id "%s" is unknown.' % corpus_id, 400))
return
return jsonify({}) | [
"def",
"submit_curation",
"(",
")",
":",
"if",
"request",
".",
"json",
"is",
"None",
":",
"abort",
"(",
"Response",
"(",
"'Missing application/json header.'",
",",
"415",
")",
")",
"# Get input parameters",
"corpus_id",
"=",
"request",
".",
"json",
".",
"get",
"(",
"'corpus_id'",
")",
"curations",
"=",
"request",
".",
"json",
".",
"get",
"(",
"'curations'",
",",
"{",
"}",
")",
"try",
":",
"curator",
".",
"submit_curation",
"(",
"corpus_id",
",",
"curations",
")",
"except",
"InvalidCorpusError",
":",
"abort",
"(",
"Response",
"(",
"'The corpus_id \"%s\" is unknown.'",
"%",
"corpus_id",
",",
"400",
")",
")",
"return",
"return",
"jsonify",
"(",
"{",
"}",
")"
]
| Submit curations for a given corpus.
The submitted curations are handled to update the probability model but
there is no return value here. The update_belief function can be called
separately to calculate update belief scores.
Parameters
----------
corpus_id : str
The ID of the corpus for which the curation is submitted.
curations : dict
A set of curations where each key is a Statement UUID in the given
corpus and each key is 0 or 1 with 0 corresponding to incorrect and
1 corresponding to correct. | [
"Submit",
"curations",
"for",
"a",
"given",
"corpus",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/live_curation.py#L239-L265 | train |
sorgerlab/indra | indra/tools/live_curation.py | update_beliefs | def update_beliefs():
"""Return updated beliefs based on current probability model."""
if request.json is None:
abort(Response('Missing application/json header.', 415))
# Get input parameters
corpus_id = request.json.get('corpus_id')
try:
belief_dict = curator.update_beliefs(corpus_id)
except InvalidCorpusError:
abort(Response('The corpus_id "%s" is unknown.' % corpus_id, 400))
return
return jsonify(belief_dict) | python | def update_beliefs():
"""Return updated beliefs based on current probability model."""
if request.json is None:
abort(Response('Missing application/json header.', 415))
# Get input parameters
corpus_id = request.json.get('corpus_id')
try:
belief_dict = curator.update_beliefs(corpus_id)
except InvalidCorpusError:
abort(Response('The corpus_id "%s" is unknown.' % corpus_id, 400))
return
return jsonify(belief_dict) | [
"def",
"update_beliefs",
"(",
")",
":",
"if",
"request",
".",
"json",
"is",
"None",
":",
"abort",
"(",
"Response",
"(",
"'Missing application/json header.'",
",",
"415",
")",
")",
"# Get input parameters",
"corpus_id",
"=",
"request",
".",
"json",
".",
"get",
"(",
"'corpus_id'",
")",
"try",
":",
"belief_dict",
"=",
"curator",
".",
"update_beliefs",
"(",
"corpus_id",
")",
"except",
"InvalidCorpusError",
":",
"abort",
"(",
"Response",
"(",
"'The corpus_id \"%s\" is unknown.'",
"%",
"corpus_id",
",",
"400",
")",
")",
"return",
"return",
"jsonify",
"(",
"belief_dict",
")"
]
| Return updated beliefs based on current probability model. | [
"Return",
"updated",
"beliefs",
"based",
"on",
"current",
"probability",
"model",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/live_curation.py#L269-L280 | train |
sorgerlab/indra | indra/tools/live_curation.py | LiveCurator.reset_scorer | def reset_scorer(self):
"""Reset the scorer used for couration."""
self.scorer = get_eidos_bayesian_scorer()
for corpus_id, corpus in self.corpora.items():
corpus.curations = {} | python | def reset_scorer(self):
"""Reset the scorer used for couration."""
self.scorer = get_eidos_bayesian_scorer()
for corpus_id, corpus in self.corpora.items():
corpus.curations = {} | [
"def",
"reset_scorer",
"(",
"self",
")",
":",
"self",
".",
"scorer",
"=",
"get_eidos_bayesian_scorer",
"(",
")",
"for",
"corpus_id",
",",
"corpus",
"in",
"self",
".",
"corpora",
".",
"items",
"(",
")",
":",
"corpus",
".",
"curations",
"=",
"{",
"}"
]
| Reset the scorer used for couration. | [
"Reset",
"the",
"scorer",
"used",
"for",
"couration",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/live_curation.py#L95-L99 | train |
sorgerlab/indra | indra/tools/live_curation.py | LiveCurator.get_corpus | def get_corpus(self, corpus_id):
"""Return a corpus given an ID.
If the corpus ID cannot be found, an InvalidCorpusError is raised.
Parameters
----------
corpus_id : str
The ID of the corpus to return.
Returns
-------
Corpus
The corpus with the given ID.
"""
try:
corpus = self.corpora[corpus_id]
return corpus
except KeyError:
raise InvalidCorpusError | python | def get_corpus(self, corpus_id):
"""Return a corpus given an ID.
If the corpus ID cannot be found, an InvalidCorpusError is raised.
Parameters
----------
corpus_id : str
The ID of the corpus to return.
Returns
-------
Corpus
The corpus with the given ID.
"""
try:
corpus = self.corpora[corpus_id]
return corpus
except KeyError:
raise InvalidCorpusError | [
"def",
"get_corpus",
"(",
"self",
",",
"corpus_id",
")",
":",
"try",
":",
"corpus",
"=",
"self",
".",
"corpora",
"[",
"corpus_id",
"]",
"return",
"corpus",
"except",
"KeyError",
":",
"raise",
"InvalidCorpusError"
]
| Return a corpus given an ID.
If the corpus ID cannot be found, an InvalidCorpusError is raised.
Parameters
----------
corpus_id : str
The ID of the corpus to return.
Returns
-------
Corpus
The corpus with the given ID. | [
"Return",
"a",
"corpus",
"given",
"an",
"ID",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/live_curation.py#L101-L120 | train |
sorgerlab/indra | indra/tools/live_curation.py | LiveCurator.update_beliefs | def update_beliefs(self, corpus_id):
"""Return updated belief scores for a given corpus.
Parameters
----------
corpus_id : str
The ID of the corpus for which beliefs are to be updated.
Returns
-------
dict
A dictionary of belief scores with keys corresponding to Statement
UUIDs and values to new belief scores.
"""
corpus = self.get_corpus(corpus_id)
be = BeliefEngine(self.scorer)
stmts = list(corpus.statements.values())
be.set_prior_probs(stmts)
# Here we set beliefs based on actual curation
for uuid, correct in corpus.curations.items():
stmt = corpus.statements.get(uuid)
if stmt is None:
logger.warning('%s is not in the corpus.' % uuid)
continue
stmt.belief = correct
belief_dict = {st.uuid: st.belief for st in stmts}
return belief_dict | python | def update_beliefs(self, corpus_id):
"""Return updated belief scores for a given corpus.
Parameters
----------
corpus_id : str
The ID of the corpus for which beliefs are to be updated.
Returns
-------
dict
A dictionary of belief scores with keys corresponding to Statement
UUIDs and values to new belief scores.
"""
corpus = self.get_corpus(corpus_id)
be = BeliefEngine(self.scorer)
stmts = list(corpus.statements.values())
be.set_prior_probs(stmts)
# Here we set beliefs based on actual curation
for uuid, correct in corpus.curations.items():
stmt = corpus.statements.get(uuid)
if stmt is None:
logger.warning('%s is not in the corpus.' % uuid)
continue
stmt.belief = correct
belief_dict = {st.uuid: st.belief for st in stmts}
return belief_dict | [
"def",
"update_beliefs",
"(",
"self",
",",
"corpus_id",
")",
":",
"corpus",
"=",
"self",
".",
"get_corpus",
"(",
"corpus_id",
")",
"be",
"=",
"BeliefEngine",
"(",
"self",
".",
"scorer",
")",
"stmts",
"=",
"list",
"(",
"corpus",
".",
"statements",
".",
"values",
"(",
")",
")",
"be",
".",
"set_prior_probs",
"(",
"stmts",
")",
"# Here we set beliefs based on actual curation",
"for",
"uuid",
",",
"correct",
"in",
"corpus",
".",
"curations",
".",
"items",
"(",
")",
":",
"stmt",
"=",
"corpus",
".",
"statements",
".",
"get",
"(",
"uuid",
")",
"if",
"stmt",
"is",
"None",
":",
"logger",
".",
"warning",
"(",
"'%s is not in the corpus.'",
"%",
"uuid",
")",
"continue",
"stmt",
".",
"belief",
"=",
"correct",
"belief_dict",
"=",
"{",
"st",
".",
"uuid",
":",
"st",
".",
"belief",
"for",
"st",
"in",
"stmts",
"}",
"return",
"belief_dict"
]
| Return updated belief scores for a given corpus.
Parameters
----------
corpus_id : str
The ID of the corpus for which beliefs are to be updated.
Returns
-------
dict
A dictionary of belief scores with keys corresponding to Statement
UUIDs and values to new belief scores. | [
"Return",
"updated",
"belief",
"scores",
"for",
"a",
"given",
"corpus",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/live_curation.py#L174-L200 | train |
sorgerlab/indra | indra/sources/eidos/scala_utils.py | get_python_list | def get_python_list(scala_list):
"""Return list from elements of scala.collection.immutable.List"""
python_list = []
for i in range(scala_list.length()):
python_list.append(scala_list.apply(i))
return python_list | python | def get_python_list(scala_list):
"""Return list from elements of scala.collection.immutable.List"""
python_list = []
for i in range(scala_list.length()):
python_list.append(scala_list.apply(i))
return python_list | [
"def",
"get_python_list",
"(",
"scala_list",
")",
":",
"python_list",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"scala_list",
".",
"length",
"(",
")",
")",
":",
"python_list",
".",
"append",
"(",
"scala_list",
".",
"apply",
"(",
"i",
")",
")",
"return",
"python_list"
]
| Return list from elements of scala.collection.immutable.List | [
"Return",
"list",
"from",
"elements",
"of",
"scala",
".",
"collection",
".",
"immutable",
".",
"List"
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/eidos/scala_utils.py#L7-L12 | train |
sorgerlab/indra | indra/sources/eidos/scala_utils.py | get_python_dict | def get_python_dict(scala_map):
"""Return a dict from entries in a scala.collection.immutable.Map"""
python_dict = {}
keys = get_python_list(scala_map.keys().toList())
for key in keys:
python_dict[key] = scala_map.apply(key)
return python_dict | python | def get_python_dict(scala_map):
"""Return a dict from entries in a scala.collection.immutable.Map"""
python_dict = {}
keys = get_python_list(scala_map.keys().toList())
for key in keys:
python_dict[key] = scala_map.apply(key)
return python_dict | [
"def",
"get_python_dict",
"(",
"scala_map",
")",
":",
"python_dict",
"=",
"{",
"}",
"keys",
"=",
"get_python_list",
"(",
"scala_map",
".",
"keys",
"(",
")",
".",
"toList",
"(",
")",
")",
"for",
"key",
"in",
"keys",
":",
"python_dict",
"[",
"key",
"]",
"=",
"scala_map",
".",
"apply",
"(",
"key",
")",
"return",
"python_dict"
]
| Return a dict from entries in a scala.collection.immutable.Map | [
"Return",
"a",
"dict",
"from",
"entries",
"in",
"a",
"scala",
".",
"collection",
".",
"immutable",
".",
"Map"
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/eidos/scala_utils.py#L15-L21 | train |
sorgerlab/indra | indra/sources/eidos/scala_utils.py | get_python_json | def get_python_json(scala_json):
"""Return a JSON dict from a org.json4s.JsonAST"""
def convert_node(node):
if node.__class__.__name__ in ('org.json4s.JsonAST$JValue',
'org.json4s.JsonAST$JObject'):
# Make a dictionary and then convert each value
values_raw = get_python_dict(node.values())
values = {}
for k, v in values_raw.items():
values[k] = convert_node(v)
return values
elif node.__class__.__name__.startswith('scala.collection.immutable.Map') or \
node.__class__.__name__ == \
'scala.collection.immutable.HashMap$HashTrieMap':
values_raw = get_python_dict(node)
values = {}
for k, v in values_raw.items():
values[k] = convert_node(v)
return values
elif node.__class__.__name__ == 'org.json4s.JsonAST$JArray':
entries_raw = get_python_list(node.values())
entries = []
for entry in entries_raw:
entries.append(convert_node(entry))
return entries
elif node.__class__.__name__ == 'scala.collection.immutable.$colon$colon':
entries_raw = get_python_list(node)
entries = []
for entry in entries_raw:
entries.append(convert_node(entry))
return entries
elif node.__class__.__name__ == 'scala.math.BigInt':
return node.intValue()
elif node.__class__.__name__ == 'scala.None$':
return None
elif node.__class__.__name__ == 'scala.collection.immutable.Nil$':
return []
elif isinstance(node, (str, int, float)):
return node
else:
logger.error('Cannot convert %s into Python' %
node.__class__.__name__)
return node.__class__.__name__
python_json = convert_node(scala_json)
return python_json | python | def get_python_json(scala_json):
"""Return a JSON dict from a org.json4s.JsonAST"""
def convert_node(node):
if node.__class__.__name__ in ('org.json4s.JsonAST$JValue',
'org.json4s.JsonAST$JObject'):
# Make a dictionary and then convert each value
values_raw = get_python_dict(node.values())
values = {}
for k, v in values_raw.items():
values[k] = convert_node(v)
return values
elif node.__class__.__name__.startswith('scala.collection.immutable.Map') or \
node.__class__.__name__ == \
'scala.collection.immutable.HashMap$HashTrieMap':
values_raw = get_python_dict(node)
values = {}
for k, v in values_raw.items():
values[k] = convert_node(v)
return values
elif node.__class__.__name__ == 'org.json4s.JsonAST$JArray':
entries_raw = get_python_list(node.values())
entries = []
for entry in entries_raw:
entries.append(convert_node(entry))
return entries
elif node.__class__.__name__ == 'scala.collection.immutable.$colon$colon':
entries_raw = get_python_list(node)
entries = []
for entry in entries_raw:
entries.append(convert_node(entry))
return entries
elif node.__class__.__name__ == 'scala.math.BigInt':
return node.intValue()
elif node.__class__.__name__ == 'scala.None$':
return None
elif node.__class__.__name__ == 'scala.collection.immutable.Nil$':
return []
elif isinstance(node, (str, int, float)):
return node
else:
logger.error('Cannot convert %s into Python' %
node.__class__.__name__)
return node.__class__.__name__
python_json = convert_node(scala_json)
return python_json | [
"def",
"get_python_json",
"(",
"scala_json",
")",
":",
"def",
"convert_node",
"(",
"node",
")",
":",
"if",
"node",
".",
"__class__",
".",
"__name__",
"in",
"(",
"'org.json4s.JsonAST$JValue'",
",",
"'org.json4s.JsonAST$JObject'",
")",
":",
"# Make a dictionary and then convert each value",
"values_raw",
"=",
"get_python_dict",
"(",
"node",
".",
"values",
"(",
")",
")",
"values",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"values_raw",
".",
"items",
"(",
")",
":",
"values",
"[",
"k",
"]",
"=",
"convert_node",
"(",
"v",
")",
"return",
"values",
"elif",
"node",
".",
"__class__",
".",
"__name__",
".",
"startswith",
"(",
"'scala.collection.immutable.Map'",
")",
"or",
"node",
".",
"__class__",
".",
"__name__",
"==",
"'scala.collection.immutable.HashMap$HashTrieMap'",
":",
"values_raw",
"=",
"get_python_dict",
"(",
"node",
")",
"values",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"values_raw",
".",
"items",
"(",
")",
":",
"values",
"[",
"k",
"]",
"=",
"convert_node",
"(",
"v",
")",
"return",
"values",
"elif",
"node",
".",
"__class__",
".",
"__name__",
"==",
"'org.json4s.JsonAST$JArray'",
":",
"entries_raw",
"=",
"get_python_list",
"(",
"node",
".",
"values",
"(",
")",
")",
"entries",
"=",
"[",
"]",
"for",
"entry",
"in",
"entries_raw",
":",
"entries",
".",
"append",
"(",
"convert_node",
"(",
"entry",
")",
")",
"return",
"entries",
"elif",
"node",
".",
"__class__",
".",
"__name__",
"==",
"'scala.collection.immutable.$colon$colon'",
":",
"entries_raw",
"=",
"get_python_list",
"(",
"node",
")",
"entries",
"=",
"[",
"]",
"for",
"entry",
"in",
"entries_raw",
":",
"entries",
".",
"append",
"(",
"convert_node",
"(",
"entry",
")",
")",
"return",
"entries",
"elif",
"node",
".",
"__class__",
".",
"__name__",
"==",
"'scala.math.BigInt'",
":",
"return",
"node",
".",
"intValue",
"(",
")",
"elif",
"node",
".",
"__class__",
".",
"__name__",
"==",
"'scala.None$'",
":",
"return",
"None",
"elif",
"node",
".",
"__class__",
".",
"__name__",
"==",
"'scala.collection.immutable.Nil$'",
":",
"return",
"[",
"]",
"elif",
"isinstance",
"(",
"node",
",",
"(",
"str",
",",
"int",
",",
"float",
")",
")",
":",
"return",
"node",
"else",
":",
"logger",
".",
"error",
"(",
"'Cannot convert %s into Python'",
"%",
"node",
".",
"__class__",
".",
"__name__",
")",
"return",
"node",
".",
"__class__",
".",
"__name__",
"python_json",
"=",
"convert_node",
"(",
"scala_json",
")",
"return",
"python_json"
]
| Return a JSON dict from a org.json4s.JsonAST | [
"Return",
"a",
"JSON",
"dict",
"from",
"a",
"org",
".",
"json4s",
".",
"JsonAST"
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/eidos/scala_utils.py#L24-L69 | train |
sorgerlab/indra | indra/databases/relevance_client.py | get_heat_kernel | def get_heat_kernel(network_id):
"""Return the identifier of a heat kernel calculated for a given network.
Parameters
----------
network_id : str
The UUID of the network in NDEx.
Returns
-------
kernel_id : str
The identifier of the heat kernel calculated for the given network.
"""
url = ndex_relevance + '/%s/generate_ndex_heat_kernel' % network_id
res = ndex_client.send_request(url, {}, is_json=True, use_get=True)
if res is None:
logger.error('Could not get heat kernel for network %s.' % network_id)
return None
kernel_id = res.get('kernel_id')
if kernel_id is None:
logger.error('Could not get heat kernel for network %s.' % network_id)
return None
return kernel_id | python | def get_heat_kernel(network_id):
"""Return the identifier of a heat kernel calculated for a given network.
Parameters
----------
network_id : str
The UUID of the network in NDEx.
Returns
-------
kernel_id : str
The identifier of the heat kernel calculated for the given network.
"""
url = ndex_relevance + '/%s/generate_ndex_heat_kernel' % network_id
res = ndex_client.send_request(url, {}, is_json=True, use_get=True)
if res is None:
logger.error('Could not get heat kernel for network %s.' % network_id)
return None
kernel_id = res.get('kernel_id')
if kernel_id is None:
logger.error('Could not get heat kernel for network %s.' % network_id)
return None
return kernel_id | [
"def",
"get_heat_kernel",
"(",
"network_id",
")",
":",
"url",
"=",
"ndex_relevance",
"+",
"'/%s/generate_ndex_heat_kernel'",
"%",
"network_id",
"res",
"=",
"ndex_client",
".",
"send_request",
"(",
"url",
",",
"{",
"}",
",",
"is_json",
"=",
"True",
",",
"use_get",
"=",
"True",
")",
"if",
"res",
"is",
"None",
":",
"logger",
".",
"error",
"(",
"'Could not get heat kernel for network %s.'",
"%",
"network_id",
")",
"return",
"None",
"kernel_id",
"=",
"res",
".",
"get",
"(",
"'kernel_id'",
")",
"if",
"kernel_id",
"is",
"None",
":",
"logger",
".",
"error",
"(",
"'Could not get heat kernel for network %s.'",
"%",
"network_id",
")",
"return",
"None",
"return",
"kernel_id"
]
| Return the identifier of a heat kernel calculated for a given network.
Parameters
----------
network_id : str
The UUID of the network in NDEx.
Returns
-------
kernel_id : str
The identifier of the heat kernel calculated for the given network. | [
"Return",
"the",
"identifier",
"of",
"a",
"heat",
"kernel",
"calculated",
"for",
"a",
"given",
"network",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/relevance_client.py#L17-L40 | train |
sorgerlab/indra | indra/databases/relevance_client.py | get_relevant_nodes | def get_relevant_nodes(network_id, query_nodes):
"""Return a set of network nodes relevant to a given query set.
A heat diffusion algorithm is used on a pre-computed heat kernel for the
given network which starts from the given query nodes. The nodes
in the network are ranked according to heat score which is a measure
of relevance with respect to the query nodes.
Parameters
----------
network_id : str
The UUID of the network in NDEx.
query_nodes : list[str]
A list of node names with respect to which relevance is queried.
Returns
-------
ranked_entities : list[(str, float)]
A list containing pairs of node names and their relevance scores.
"""
url = ndex_relevance + '/rank_entities'
kernel_id = get_heat_kernel(network_id)
if kernel_id is None:
return None
if isinstance(query_nodes, basestring):
query_nodes = [query_nodes]
params = {'identifier_set': query_nodes,
'kernel_id': kernel_id}
res = ndex_client.send_request(url, params, is_json=True)
if res is None:
logger.error("ndex_client.send_request returned None.")
return None
ranked_entities = res.get('ranked_entities')
if ranked_entities is None:
logger.error('Could not get ranked entities.')
return None
return ranked_entities | python | def get_relevant_nodes(network_id, query_nodes):
"""Return a set of network nodes relevant to a given query set.
A heat diffusion algorithm is used on a pre-computed heat kernel for the
given network which starts from the given query nodes. The nodes
in the network are ranked according to heat score which is a measure
of relevance with respect to the query nodes.
Parameters
----------
network_id : str
The UUID of the network in NDEx.
query_nodes : list[str]
A list of node names with respect to which relevance is queried.
Returns
-------
ranked_entities : list[(str, float)]
A list containing pairs of node names and their relevance scores.
"""
url = ndex_relevance + '/rank_entities'
kernel_id = get_heat_kernel(network_id)
if kernel_id is None:
return None
if isinstance(query_nodes, basestring):
query_nodes = [query_nodes]
params = {'identifier_set': query_nodes,
'kernel_id': kernel_id}
res = ndex_client.send_request(url, params, is_json=True)
if res is None:
logger.error("ndex_client.send_request returned None.")
return None
ranked_entities = res.get('ranked_entities')
if ranked_entities is None:
logger.error('Could not get ranked entities.')
return None
return ranked_entities | [
"def",
"get_relevant_nodes",
"(",
"network_id",
",",
"query_nodes",
")",
":",
"url",
"=",
"ndex_relevance",
"+",
"'/rank_entities'",
"kernel_id",
"=",
"get_heat_kernel",
"(",
"network_id",
")",
"if",
"kernel_id",
"is",
"None",
":",
"return",
"None",
"if",
"isinstance",
"(",
"query_nodes",
",",
"basestring",
")",
":",
"query_nodes",
"=",
"[",
"query_nodes",
"]",
"params",
"=",
"{",
"'identifier_set'",
":",
"query_nodes",
",",
"'kernel_id'",
":",
"kernel_id",
"}",
"res",
"=",
"ndex_client",
".",
"send_request",
"(",
"url",
",",
"params",
",",
"is_json",
"=",
"True",
")",
"if",
"res",
"is",
"None",
":",
"logger",
".",
"error",
"(",
"\"ndex_client.send_request returned None.\"",
")",
"return",
"None",
"ranked_entities",
"=",
"res",
".",
"get",
"(",
"'ranked_entities'",
")",
"if",
"ranked_entities",
"is",
"None",
":",
"logger",
".",
"error",
"(",
"'Could not get ranked entities.'",
")",
"return",
"None",
"return",
"ranked_entities"
]
| Return a set of network nodes relevant to a given query set.
A heat diffusion algorithm is used on a pre-computed heat kernel for the
given network which starts from the given query nodes. The nodes
in the network are ranked according to heat score which is a measure
of relevance with respect to the query nodes.
Parameters
----------
network_id : str
The UUID of the network in NDEx.
query_nodes : list[str]
A list of node names with respect to which relevance is queried.
Returns
-------
ranked_entities : list[(str, float)]
A list containing pairs of node names and their relevance scores. | [
"Return",
"a",
"set",
"of",
"network",
"nodes",
"relevant",
"to",
"a",
"given",
"query",
"set",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/relevance_client.py#L43-L79 | train |
sorgerlab/indra | indra/belief/__init__.py | _get_belief_package | def _get_belief_package(stmt):
"""Return the belief packages of a given statement recursively."""
# This list will contain the belief packages for the given statement
belief_packages = []
# Iterate over all the support parents
for st in stmt.supports:
# Recursively get all the belief packages of the parent
parent_packages = _get_belief_package(st)
package_stmt_keys = [pkg.statement_key for pkg in belief_packages]
for package in parent_packages:
# Only add this belief package if it hasn't already been added
if package.statement_key not in package_stmt_keys:
belief_packages.append(package)
# Now make the Statement's own belief package and append it to the list
belief_package = BeliefPackage(stmt.matches_key(), stmt.evidence)
belief_packages.append(belief_package)
return belief_packages | python | def _get_belief_package(stmt):
"""Return the belief packages of a given statement recursively."""
# This list will contain the belief packages for the given statement
belief_packages = []
# Iterate over all the support parents
for st in stmt.supports:
# Recursively get all the belief packages of the parent
parent_packages = _get_belief_package(st)
package_stmt_keys = [pkg.statement_key for pkg in belief_packages]
for package in parent_packages:
# Only add this belief package if it hasn't already been added
if package.statement_key not in package_stmt_keys:
belief_packages.append(package)
# Now make the Statement's own belief package and append it to the list
belief_package = BeliefPackage(stmt.matches_key(), stmt.evidence)
belief_packages.append(belief_package)
return belief_packages | [
"def",
"_get_belief_package",
"(",
"stmt",
")",
":",
"# This list will contain the belief packages for the given statement",
"belief_packages",
"=",
"[",
"]",
"# Iterate over all the support parents",
"for",
"st",
"in",
"stmt",
".",
"supports",
":",
"# Recursively get all the belief packages of the parent",
"parent_packages",
"=",
"_get_belief_package",
"(",
"st",
")",
"package_stmt_keys",
"=",
"[",
"pkg",
".",
"statement_key",
"for",
"pkg",
"in",
"belief_packages",
"]",
"for",
"package",
"in",
"parent_packages",
":",
"# Only add this belief package if it hasn't already been added",
"if",
"package",
".",
"statement_key",
"not",
"in",
"package_stmt_keys",
":",
"belief_packages",
".",
"append",
"(",
"package",
")",
"# Now make the Statement's own belief package and append it to the list",
"belief_package",
"=",
"BeliefPackage",
"(",
"stmt",
".",
"matches_key",
"(",
")",
",",
"stmt",
".",
"evidence",
")",
"belief_packages",
".",
"append",
"(",
"belief_package",
")",
"return",
"belief_packages"
]
| Return the belief packages of a given statement recursively. | [
"Return",
"the",
"belief",
"packages",
"of",
"a",
"given",
"statement",
"recursively",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/belief/__init__.py#L415-L431 | train |
sorgerlab/indra | indra/belief/__init__.py | sample_statements | def sample_statements(stmts, seed=None):
"""Return statements sampled according to belief.
Statements are sampled independently according to their
belief scores. For instance, a Staement with a belief
score of 0.7 will end up in the returned Statement list
with probability 0.7.
Parameters
----------
stmts : list[indra.statements.Statement]
A list of INDRA Statements to sample.
seed : Optional[int]
A seed for the random number generator used for sampling.
Returns
-------
new_stmts : list[indra.statements.Statement]
A list of INDRA Statements that were chosen by random sampling
according to their respective belief scores.
"""
if seed:
numpy.random.seed(seed)
new_stmts = []
r = numpy.random.rand(len(stmts))
for i, stmt in enumerate(stmts):
if r[i] < stmt.belief:
new_stmts.append(stmt)
return new_stmts | python | def sample_statements(stmts, seed=None):
"""Return statements sampled according to belief.
Statements are sampled independently according to their
belief scores. For instance, a Staement with a belief
score of 0.7 will end up in the returned Statement list
with probability 0.7.
Parameters
----------
stmts : list[indra.statements.Statement]
A list of INDRA Statements to sample.
seed : Optional[int]
A seed for the random number generator used for sampling.
Returns
-------
new_stmts : list[indra.statements.Statement]
A list of INDRA Statements that were chosen by random sampling
according to their respective belief scores.
"""
if seed:
numpy.random.seed(seed)
new_stmts = []
r = numpy.random.rand(len(stmts))
for i, stmt in enumerate(stmts):
if r[i] < stmt.belief:
new_stmts.append(stmt)
return new_stmts | [
"def",
"sample_statements",
"(",
"stmts",
",",
"seed",
"=",
"None",
")",
":",
"if",
"seed",
":",
"numpy",
".",
"random",
".",
"seed",
"(",
"seed",
")",
"new_stmts",
"=",
"[",
"]",
"r",
"=",
"numpy",
".",
"random",
".",
"rand",
"(",
"len",
"(",
"stmts",
")",
")",
"for",
"i",
",",
"stmt",
"in",
"enumerate",
"(",
"stmts",
")",
":",
"if",
"r",
"[",
"i",
"]",
"<",
"stmt",
".",
"belief",
":",
"new_stmts",
".",
"append",
"(",
"stmt",
")",
"return",
"new_stmts"
]
| Return statements sampled according to belief.
Statements are sampled independently according to their
belief scores. For instance, a Staement with a belief
score of 0.7 will end up in the returned Statement list
with probability 0.7.
Parameters
----------
stmts : list[indra.statements.Statement]
A list of INDRA Statements to sample.
seed : Optional[int]
A seed for the random number generator used for sampling.
Returns
-------
new_stmts : list[indra.statements.Statement]
A list of INDRA Statements that were chosen by random sampling
according to their respective belief scores. | [
"Return",
"statements",
"sampled",
"according",
"to",
"belief",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/belief/__init__.py#L434-L462 | train |
sorgerlab/indra | indra/belief/__init__.py | evidence_random_noise_prior | def evidence_random_noise_prior(evidence, type_probs, subtype_probs):
"""Determines the random-noise prior probability for this evidence.
If the evidence corresponds to a subtype, and that subtype has a curated
prior noise probability, use that.
Otherwise, gives the random-noise prior for the overall rule type.
"""
(stype, subtype) = tag_evidence_subtype(evidence)
# Get the subtype, if available
# Return the subtype random noise prior, if available
if subtype_probs is not None:
if stype in subtype_probs:
if subtype in subtype_probs[stype]:
return subtype_probs[stype][subtype]
# Fallback to just returning the overall evidence type random noise prior
return type_probs[stype] | python | def evidence_random_noise_prior(evidence, type_probs, subtype_probs):
"""Determines the random-noise prior probability for this evidence.
If the evidence corresponds to a subtype, and that subtype has a curated
prior noise probability, use that.
Otherwise, gives the random-noise prior for the overall rule type.
"""
(stype, subtype) = tag_evidence_subtype(evidence)
# Get the subtype, if available
# Return the subtype random noise prior, if available
if subtype_probs is not None:
if stype in subtype_probs:
if subtype in subtype_probs[stype]:
return subtype_probs[stype][subtype]
# Fallback to just returning the overall evidence type random noise prior
return type_probs[stype] | [
"def",
"evidence_random_noise_prior",
"(",
"evidence",
",",
"type_probs",
",",
"subtype_probs",
")",
":",
"(",
"stype",
",",
"subtype",
")",
"=",
"tag_evidence_subtype",
"(",
"evidence",
")",
"# Get the subtype, if available",
"# Return the subtype random noise prior, if available",
"if",
"subtype_probs",
"is",
"not",
"None",
":",
"if",
"stype",
"in",
"subtype_probs",
":",
"if",
"subtype",
"in",
"subtype_probs",
"[",
"stype",
"]",
":",
"return",
"subtype_probs",
"[",
"stype",
"]",
"[",
"subtype",
"]",
"# Fallback to just returning the overall evidence type random noise prior",
"return",
"type_probs",
"[",
"stype",
"]"
]
| Determines the random-noise prior probability for this evidence.
If the evidence corresponds to a subtype, and that subtype has a curated
prior noise probability, use that.
Otherwise, gives the random-noise prior for the overall rule type. | [
"Determines",
"the",
"random",
"-",
"noise",
"prior",
"probability",
"for",
"this",
"evidence",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/belief/__init__.py#L465-L483 | train |
sorgerlab/indra | indra/belief/__init__.py | tag_evidence_subtype | def tag_evidence_subtype(evidence):
"""Returns the type and subtype of an evidence object as a string,
typically the extraction rule or database from which the statement
was generated.
For biopax, this is just the database name.
Parameters
----------
statement: indra.statements.Evidence
The statement which we wish to subtype
Returns
-------
types: tuple
A tuple with (type, subtype), both strings
Returns (type, None) if the type of statement is not yet handled in
this function.
"""
source_api = evidence.source_api
annotations = evidence.annotations
if source_api == 'biopax':
subtype = annotations.get('source_sub_id')
elif source_api in ('reach', 'eidos'):
if 'found_by' in annotations:
from indra.sources.reach.processor import determine_reach_subtype
if source_api == 'reach':
subtype = determine_reach_subtype(annotations['found_by'])
elif source_api == 'eidos':
subtype = annotations['found_by']
else:
subtype = None
else:
logger.debug('Could not find found_by attribute in reach '
'statement annoations')
subtype = None
elif source_api == 'geneways':
subtype = annotations['actiontype']
else:
subtype = None
return (source_api, subtype) | python | def tag_evidence_subtype(evidence):
"""Returns the type and subtype of an evidence object as a string,
typically the extraction rule or database from which the statement
was generated.
For biopax, this is just the database name.
Parameters
----------
statement: indra.statements.Evidence
The statement which we wish to subtype
Returns
-------
types: tuple
A tuple with (type, subtype), both strings
Returns (type, None) if the type of statement is not yet handled in
this function.
"""
source_api = evidence.source_api
annotations = evidence.annotations
if source_api == 'biopax':
subtype = annotations.get('source_sub_id')
elif source_api in ('reach', 'eidos'):
if 'found_by' in annotations:
from indra.sources.reach.processor import determine_reach_subtype
if source_api == 'reach':
subtype = determine_reach_subtype(annotations['found_by'])
elif source_api == 'eidos':
subtype = annotations['found_by']
else:
subtype = None
else:
logger.debug('Could not find found_by attribute in reach '
'statement annoations')
subtype = None
elif source_api == 'geneways':
subtype = annotations['actiontype']
else:
subtype = None
return (source_api, subtype) | [
"def",
"tag_evidence_subtype",
"(",
"evidence",
")",
":",
"source_api",
"=",
"evidence",
".",
"source_api",
"annotations",
"=",
"evidence",
".",
"annotations",
"if",
"source_api",
"==",
"'biopax'",
":",
"subtype",
"=",
"annotations",
".",
"get",
"(",
"'source_sub_id'",
")",
"elif",
"source_api",
"in",
"(",
"'reach'",
",",
"'eidos'",
")",
":",
"if",
"'found_by'",
"in",
"annotations",
":",
"from",
"indra",
".",
"sources",
".",
"reach",
".",
"processor",
"import",
"determine_reach_subtype",
"if",
"source_api",
"==",
"'reach'",
":",
"subtype",
"=",
"determine_reach_subtype",
"(",
"annotations",
"[",
"'found_by'",
"]",
")",
"elif",
"source_api",
"==",
"'eidos'",
":",
"subtype",
"=",
"annotations",
"[",
"'found_by'",
"]",
"else",
":",
"subtype",
"=",
"None",
"else",
":",
"logger",
".",
"debug",
"(",
"'Could not find found_by attribute in reach '",
"'statement annoations'",
")",
"subtype",
"=",
"None",
"elif",
"source_api",
"==",
"'geneways'",
":",
"subtype",
"=",
"annotations",
"[",
"'actiontype'",
"]",
"else",
":",
"subtype",
"=",
"None",
"return",
"(",
"source_api",
",",
"subtype",
")"
]
| Returns the type and subtype of an evidence object as a string,
typically the extraction rule or database from which the statement
was generated.
For biopax, this is just the database name.
Parameters
----------
statement: indra.statements.Evidence
The statement which we wish to subtype
Returns
-------
types: tuple
A tuple with (type, subtype), both strings
Returns (type, None) if the type of statement is not yet handled in
this function. | [
"Returns",
"the",
"type",
"and",
"subtype",
"of",
"an",
"evidence",
"object",
"as",
"a",
"string",
"typically",
"the",
"extraction",
"rule",
"or",
"database",
"from",
"which",
"the",
"statement",
"was",
"generated",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/belief/__init__.py#L486-L528 | train |
sorgerlab/indra | indra/belief/__init__.py | SimpleScorer.score_evidence_list | def score_evidence_list(self, evidences):
"""Return belief score given a list of supporting evidences."""
def _score(evidences):
if not evidences:
return 0
# Collect all unique sources
sources = [ev.source_api for ev in evidences]
uniq_sources = numpy.unique(sources)
# Calculate the systematic error factors given unique sources
syst_factors = {s: self.prior_probs['syst'][s]
for s in uniq_sources}
# Calculate the radom error factors for each source
rand_factors = {k: [] for k in uniq_sources}
for ev in evidences:
rand_factors[ev.source_api].append(
evidence_random_noise_prior(
ev,
self.prior_probs['rand'],
self.subtype_probs))
# The probability of incorrectness is the product of the
# source-specific probabilities
neg_prob_prior = 1
for s in uniq_sources:
neg_prob_prior *= (syst_factors[s] +
numpy.prod(rand_factors[s]))
# Finally, the probability of correctness is one minus incorrect
prob_prior = 1 - neg_prob_prior
return prob_prior
pos_evidence = [ev for ev in evidences if
not ev.epistemics.get('negated')]
neg_evidence = [ev for ev in evidences if
ev.epistemics.get('negated')]
pp = _score(pos_evidence)
np = _score(neg_evidence)
# The basic assumption is that the positive and negative evidence
# can't simultaneously be correct.
# There are two cases to consider. (1) If the positive evidence is
# incorrect then there is no Statement and the belief should be 0,
# irrespective of the negative evidence.
# (2) If the positive evidence is correct and the negative evidence
# is incorrect.
# This amounts to the following formula:
# 0 * (1-pp) + 1 * (pp * (1-np)) which we simplify below
score = pp * (1 - np)
return score | python | def score_evidence_list(self, evidences):
"""Return belief score given a list of supporting evidences."""
def _score(evidences):
if not evidences:
return 0
# Collect all unique sources
sources = [ev.source_api for ev in evidences]
uniq_sources = numpy.unique(sources)
# Calculate the systematic error factors given unique sources
syst_factors = {s: self.prior_probs['syst'][s]
for s in uniq_sources}
# Calculate the radom error factors for each source
rand_factors = {k: [] for k in uniq_sources}
for ev in evidences:
rand_factors[ev.source_api].append(
evidence_random_noise_prior(
ev,
self.prior_probs['rand'],
self.subtype_probs))
# The probability of incorrectness is the product of the
# source-specific probabilities
neg_prob_prior = 1
for s in uniq_sources:
neg_prob_prior *= (syst_factors[s] +
numpy.prod(rand_factors[s]))
# Finally, the probability of correctness is one minus incorrect
prob_prior = 1 - neg_prob_prior
return prob_prior
pos_evidence = [ev for ev in evidences if
not ev.epistemics.get('negated')]
neg_evidence = [ev for ev in evidences if
ev.epistemics.get('negated')]
pp = _score(pos_evidence)
np = _score(neg_evidence)
# The basic assumption is that the positive and negative evidence
# can't simultaneously be correct.
# There are two cases to consider. (1) If the positive evidence is
# incorrect then there is no Statement and the belief should be 0,
# irrespective of the negative evidence.
# (2) If the positive evidence is correct and the negative evidence
# is incorrect.
# This amounts to the following formula:
# 0 * (1-pp) + 1 * (pp * (1-np)) which we simplify below
score = pp * (1 - np)
return score | [
"def",
"score_evidence_list",
"(",
"self",
",",
"evidences",
")",
":",
"def",
"_score",
"(",
"evidences",
")",
":",
"if",
"not",
"evidences",
":",
"return",
"0",
"# Collect all unique sources",
"sources",
"=",
"[",
"ev",
".",
"source_api",
"for",
"ev",
"in",
"evidences",
"]",
"uniq_sources",
"=",
"numpy",
".",
"unique",
"(",
"sources",
")",
"# Calculate the systematic error factors given unique sources",
"syst_factors",
"=",
"{",
"s",
":",
"self",
".",
"prior_probs",
"[",
"'syst'",
"]",
"[",
"s",
"]",
"for",
"s",
"in",
"uniq_sources",
"}",
"# Calculate the radom error factors for each source",
"rand_factors",
"=",
"{",
"k",
":",
"[",
"]",
"for",
"k",
"in",
"uniq_sources",
"}",
"for",
"ev",
"in",
"evidences",
":",
"rand_factors",
"[",
"ev",
".",
"source_api",
"]",
".",
"append",
"(",
"evidence_random_noise_prior",
"(",
"ev",
",",
"self",
".",
"prior_probs",
"[",
"'rand'",
"]",
",",
"self",
".",
"subtype_probs",
")",
")",
"# The probability of incorrectness is the product of the",
"# source-specific probabilities",
"neg_prob_prior",
"=",
"1",
"for",
"s",
"in",
"uniq_sources",
":",
"neg_prob_prior",
"*=",
"(",
"syst_factors",
"[",
"s",
"]",
"+",
"numpy",
".",
"prod",
"(",
"rand_factors",
"[",
"s",
"]",
")",
")",
"# Finally, the probability of correctness is one minus incorrect",
"prob_prior",
"=",
"1",
"-",
"neg_prob_prior",
"return",
"prob_prior",
"pos_evidence",
"=",
"[",
"ev",
"for",
"ev",
"in",
"evidences",
"if",
"not",
"ev",
".",
"epistemics",
".",
"get",
"(",
"'negated'",
")",
"]",
"neg_evidence",
"=",
"[",
"ev",
"for",
"ev",
"in",
"evidences",
"if",
"ev",
".",
"epistemics",
".",
"get",
"(",
"'negated'",
")",
"]",
"pp",
"=",
"_score",
"(",
"pos_evidence",
")",
"np",
"=",
"_score",
"(",
"neg_evidence",
")",
"# The basic assumption is that the positive and negative evidence",
"# can't simultaneously be correct.",
"# There are two cases to consider. (1) If the positive evidence is",
"# incorrect then there is no Statement and the belief should be 0,",
"# irrespective of the negative evidence.",
"# (2) If the positive evidence is correct and the negative evidence",
"# is incorrect.",
"# This amounts to the following formula:",
"# 0 * (1-pp) + 1 * (pp * (1-np)) which we simplify below",
"score",
"=",
"pp",
"*",
"(",
"1",
"-",
"np",
")",
"return",
"score"
]
| Return belief score given a list of supporting evidences. | [
"Return",
"belief",
"score",
"given",
"a",
"list",
"of",
"supporting",
"evidences",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/belief/__init__.py#L110-L154 | train |
sorgerlab/indra | indra/belief/__init__.py | SimpleScorer.score_statement | def score_statement(self, st, extra_evidence=None):
"""Computes the prior belief probability for an INDRA Statement.
The Statement is assumed to be de-duplicated. In other words,
the Statement is assumed to have
a list of Evidence objects that supports it. The prior probability of
the Statement is calculated based on the number of Evidences it has
and their sources.
Parameters
----------
st : indra.statements.Statement
An INDRA Statements whose belief scores are to
be calculated.
extra_evidence : list[indra.statements.Evidence]
A list of Evidences that are supporting the Statement (that aren't
already included in the Statement's own evidence list.
Returns
-------
belief_score : float
The computed prior probability for the statement
"""
if extra_evidence is None:
extra_evidence = []
all_evidence = st.evidence + extra_evidence
return self.score_evidence_list(all_evidence) | python | def score_statement(self, st, extra_evidence=None):
"""Computes the prior belief probability for an INDRA Statement.
The Statement is assumed to be de-duplicated. In other words,
the Statement is assumed to have
a list of Evidence objects that supports it. The prior probability of
the Statement is calculated based on the number of Evidences it has
and their sources.
Parameters
----------
st : indra.statements.Statement
An INDRA Statements whose belief scores are to
be calculated.
extra_evidence : list[indra.statements.Evidence]
A list of Evidences that are supporting the Statement (that aren't
already included in the Statement's own evidence list.
Returns
-------
belief_score : float
The computed prior probability for the statement
"""
if extra_evidence is None:
extra_evidence = []
all_evidence = st.evidence + extra_evidence
return self.score_evidence_list(all_evidence) | [
"def",
"score_statement",
"(",
"self",
",",
"st",
",",
"extra_evidence",
"=",
"None",
")",
":",
"if",
"extra_evidence",
"is",
"None",
":",
"extra_evidence",
"=",
"[",
"]",
"all_evidence",
"=",
"st",
".",
"evidence",
"+",
"extra_evidence",
"return",
"self",
".",
"score_evidence_list",
"(",
"all_evidence",
")"
]
| Computes the prior belief probability for an INDRA Statement.
The Statement is assumed to be de-duplicated. In other words,
the Statement is assumed to have
a list of Evidence objects that supports it. The prior probability of
the Statement is calculated based on the number of Evidences it has
and their sources.
Parameters
----------
st : indra.statements.Statement
An INDRA Statements whose belief scores are to
be calculated.
extra_evidence : list[indra.statements.Evidence]
A list of Evidences that are supporting the Statement (that aren't
already included in the Statement's own evidence list.
Returns
-------
belief_score : float
The computed prior probability for the statement | [
"Computes",
"the",
"prior",
"belief",
"probability",
"for",
"an",
"INDRA",
"Statement",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/belief/__init__.py#L156-L182 | train |
sorgerlab/indra | indra/belief/__init__.py | SimpleScorer.check_prior_probs | def check_prior_probs(self, statements):
"""Throw Exception if BeliefEngine parameter is missing.
Make sure the scorer has all the information needed to compute
belief scores of each statement in the provided list, and raises an
exception otherwise.
Parameters
----------
statements : list[indra.statements.Statement]
List of statements to check
"""
sources = set()
for stmt in statements:
sources |= set([ev.source_api for ev in stmt.evidence])
for err_type in ('rand', 'syst'):
for source in sources:
if source not in self.prior_probs[err_type]:
msg = 'BeliefEngine missing probability parameter' + \
' for source: %s' % source
raise Exception(msg) | python | def check_prior_probs(self, statements):
"""Throw Exception if BeliefEngine parameter is missing.
Make sure the scorer has all the information needed to compute
belief scores of each statement in the provided list, and raises an
exception otherwise.
Parameters
----------
statements : list[indra.statements.Statement]
List of statements to check
"""
sources = set()
for stmt in statements:
sources |= set([ev.source_api for ev in stmt.evidence])
for err_type in ('rand', 'syst'):
for source in sources:
if source not in self.prior_probs[err_type]:
msg = 'BeliefEngine missing probability parameter' + \
' for source: %s' % source
raise Exception(msg) | [
"def",
"check_prior_probs",
"(",
"self",
",",
"statements",
")",
":",
"sources",
"=",
"set",
"(",
")",
"for",
"stmt",
"in",
"statements",
":",
"sources",
"|=",
"set",
"(",
"[",
"ev",
".",
"source_api",
"for",
"ev",
"in",
"stmt",
".",
"evidence",
"]",
")",
"for",
"err_type",
"in",
"(",
"'rand'",
",",
"'syst'",
")",
":",
"for",
"source",
"in",
"sources",
":",
"if",
"source",
"not",
"in",
"self",
".",
"prior_probs",
"[",
"err_type",
"]",
":",
"msg",
"=",
"'BeliefEngine missing probability parameter'",
"+",
"' for source: %s'",
"%",
"source",
"raise",
"Exception",
"(",
"msg",
")"
]
| Throw Exception if BeliefEngine parameter is missing.
Make sure the scorer has all the information needed to compute
belief scores of each statement in the provided list, and raises an
exception otherwise.
Parameters
----------
statements : list[indra.statements.Statement]
List of statements to check | [
"Throw",
"Exception",
"if",
"BeliefEngine",
"parameter",
"is",
"missing",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/belief/__init__.py#L184-L204 | train |
sorgerlab/indra | indra/belief/__init__.py | BayesianScorer.update_probs | def update_probs(self):
"""Update the internal probability values given the counts."""
# We deal with the prior probsfirst
# This is a fixed assumed value for systematic error
syst_error = 0.05
prior_probs = {'syst': {}, 'rand': {}}
for source, (p, n) in self.prior_counts.items():
# Skip if there are no actual counts
if n + p == 0:
continue
prior_probs['syst'][source] = syst_error
prior_probs['rand'][source] = \
1 - min((float(p) / (n + p), 1-syst_error)) - syst_error
# Next we deal with subtype probs based on counts
subtype_probs = {}
for source, entry in self.subtype_counts.items():
for rule, (p, n) in entry.items():
# Skip if there are no actual counts
if n + p == 0:
continue
if source not in subtype_probs:
subtype_probs[source] = {}
subtype_probs[source][rule] = \
1 - min((float(p) / (n + p), 1-syst_error)) - syst_error
# Finally we propagate this into the full probability
# data structures of the parent class
super(BayesianScorer, self).update_probs(prior_probs, subtype_probs) | python | def update_probs(self):
"""Update the internal probability values given the counts."""
# We deal with the prior probsfirst
# This is a fixed assumed value for systematic error
syst_error = 0.05
prior_probs = {'syst': {}, 'rand': {}}
for source, (p, n) in self.prior_counts.items():
# Skip if there are no actual counts
if n + p == 0:
continue
prior_probs['syst'][source] = syst_error
prior_probs['rand'][source] = \
1 - min((float(p) / (n + p), 1-syst_error)) - syst_error
# Next we deal with subtype probs based on counts
subtype_probs = {}
for source, entry in self.subtype_counts.items():
for rule, (p, n) in entry.items():
# Skip if there are no actual counts
if n + p == 0:
continue
if source not in subtype_probs:
subtype_probs[source] = {}
subtype_probs[source][rule] = \
1 - min((float(p) / (n + p), 1-syst_error)) - syst_error
# Finally we propagate this into the full probability
# data structures of the parent class
super(BayesianScorer, self).update_probs(prior_probs, subtype_probs) | [
"def",
"update_probs",
"(",
"self",
")",
":",
"# We deal with the prior probsfirst",
"# This is a fixed assumed value for systematic error",
"syst_error",
"=",
"0.05",
"prior_probs",
"=",
"{",
"'syst'",
":",
"{",
"}",
",",
"'rand'",
":",
"{",
"}",
"}",
"for",
"source",
",",
"(",
"p",
",",
"n",
")",
"in",
"self",
".",
"prior_counts",
".",
"items",
"(",
")",
":",
"# Skip if there are no actual counts",
"if",
"n",
"+",
"p",
"==",
"0",
":",
"continue",
"prior_probs",
"[",
"'syst'",
"]",
"[",
"source",
"]",
"=",
"syst_error",
"prior_probs",
"[",
"'rand'",
"]",
"[",
"source",
"]",
"=",
"1",
"-",
"min",
"(",
"(",
"float",
"(",
"p",
")",
"/",
"(",
"n",
"+",
"p",
")",
",",
"1",
"-",
"syst_error",
")",
")",
"-",
"syst_error",
"# Next we deal with subtype probs based on counts",
"subtype_probs",
"=",
"{",
"}",
"for",
"source",
",",
"entry",
"in",
"self",
".",
"subtype_counts",
".",
"items",
"(",
")",
":",
"for",
"rule",
",",
"(",
"p",
",",
"n",
")",
"in",
"entry",
".",
"items",
"(",
")",
":",
"# Skip if there are no actual counts",
"if",
"n",
"+",
"p",
"==",
"0",
":",
"continue",
"if",
"source",
"not",
"in",
"subtype_probs",
":",
"subtype_probs",
"[",
"source",
"]",
"=",
"{",
"}",
"subtype_probs",
"[",
"source",
"]",
"[",
"rule",
"]",
"=",
"1",
"-",
"min",
"(",
"(",
"float",
"(",
"p",
")",
"/",
"(",
"n",
"+",
"p",
")",
",",
"1",
"-",
"syst_error",
")",
")",
"-",
"syst_error",
"# Finally we propagate this into the full probability",
"# data structures of the parent class",
"super",
"(",
"BayesianScorer",
",",
"self",
")",
".",
"update_probs",
"(",
"prior_probs",
",",
"subtype_probs",
")"
]
| Update the internal probability values given the counts. | [
"Update",
"the",
"internal",
"probability",
"values",
"given",
"the",
"counts",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/belief/__init__.py#L232-L258 | train |
sorgerlab/indra | indra/belief/__init__.py | BayesianScorer.update_counts | def update_counts(self, prior_counts, subtype_counts):
"""Update the internal counts based on given new counts.
Parameters
----------
prior_counts : dict
A dictionary of counts of the form [pos, neg] for
each source.
subtype_counts : dict
A dictionary of counts of the form [pos, neg] for
each subtype within a source.
"""
for source, (pos, neg) in prior_counts.items():
if source not in self.prior_counts:
self.prior_counts[source] = [0, 0]
self.prior_counts[source][0] += pos
self.prior_counts[source][1] += neg
for source, subtype_dict in subtype_counts.items():
if source not in self.subtype_counts:
self.subtype_counts[source] = {}
for subtype, (pos, neg) in subtype_dict.items():
if subtype not in self.subtype_counts[source]:
self.subtype_counts[source][subtype] = [0, 0]
self.subtype_counts[source][subtype][0] += pos
self.subtype_counts[source][subtype][1] += neg
self.update_probs() | python | def update_counts(self, prior_counts, subtype_counts):
"""Update the internal counts based on given new counts.
Parameters
----------
prior_counts : dict
A dictionary of counts of the form [pos, neg] for
each source.
subtype_counts : dict
A dictionary of counts of the form [pos, neg] for
each subtype within a source.
"""
for source, (pos, neg) in prior_counts.items():
if source not in self.prior_counts:
self.prior_counts[source] = [0, 0]
self.prior_counts[source][0] += pos
self.prior_counts[source][1] += neg
for source, subtype_dict in subtype_counts.items():
if source not in self.subtype_counts:
self.subtype_counts[source] = {}
for subtype, (pos, neg) in subtype_dict.items():
if subtype not in self.subtype_counts[source]:
self.subtype_counts[source][subtype] = [0, 0]
self.subtype_counts[source][subtype][0] += pos
self.subtype_counts[source][subtype][1] += neg
self.update_probs() | [
"def",
"update_counts",
"(",
"self",
",",
"prior_counts",
",",
"subtype_counts",
")",
":",
"for",
"source",
",",
"(",
"pos",
",",
"neg",
")",
"in",
"prior_counts",
".",
"items",
"(",
")",
":",
"if",
"source",
"not",
"in",
"self",
".",
"prior_counts",
":",
"self",
".",
"prior_counts",
"[",
"source",
"]",
"=",
"[",
"0",
",",
"0",
"]",
"self",
".",
"prior_counts",
"[",
"source",
"]",
"[",
"0",
"]",
"+=",
"pos",
"self",
".",
"prior_counts",
"[",
"source",
"]",
"[",
"1",
"]",
"+=",
"neg",
"for",
"source",
",",
"subtype_dict",
"in",
"subtype_counts",
".",
"items",
"(",
")",
":",
"if",
"source",
"not",
"in",
"self",
".",
"subtype_counts",
":",
"self",
".",
"subtype_counts",
"[",
"source",
"]",
"=",
"{",
"}",
"for",
"subtype",
",",
"(",
"pos",
",",
"neg",
")",
"in",
"subtype_dict",
".",
"items",
"(",
")",
":",
"if",
"subtype",
"not",
"in",
"self",
".",
"subtype_counts",
"[",
"source",
"]",
":",
"self",
".",
"subtype_counts",
"[",
"source",
"]",
"[",
"subtype",
"]",
"=",
"[",
"0",
",",
"0",
"]",
"self",
".",
"subtype_counts",
"[",
"source",
"]",
"[",
"subtype",
"]",
"[",
"0",
"]",
"+=",
"pos",
"self",
".",
"subtype_counts",
"[",
"source",
"]",
"[",
"subtype",
"]",
"[",
"1",
"]",
"+=",
"neg",
"self",
".",
"update_probs",
"(",
")"
]
| Update the internal counts based on given new counts.
Parameters
----------
prior_counts : dict
A dictionary of counts of the form [pos, neg] for
each source.
subtype_counts : dict
A dictionary of counts of the form [pos, neg] for
each subtype within a source. | [
"Update",
"the",
"internal",
"counts",
"based",
"on",
"given",
"new",
"counts",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/belief/__init__.py#L260-L285 | train |
sorgerlab/indra | indra/belief/__init__.py | BeliefEngine.set_prior_probs | def set_prior_probs(self, statements):
"""Sets the prior belief probabilities for a list of INDRA Statements.
The Statements are assumed to be de-duplicated. In other words,
each Statement in the list passed to this function is assumed to have
a list of Evidence objects that support it. The prior probability of
each Statement is calculated based on the number of Evidences it has
and their sources.
Parameters
----------
statements : list[indra.statements.Statement]
A list of INDRA Statements whose belief scores are to
be calculated. Each Statement object's belief attribute is updated
by this function.
"""
self.scorer.check_prior_probs(statements)
for st in statements:
st.belief = self.scorer.score_statement(st) | python | def set_prior_probs(self, statements):
"""Sets the prior belief probabilities for a list of INDRA Statements.
The Statements are assumed to be de-duplicated. In other words,
each Statement in the list passed to this function is assumed to have
a list of Evidence objects that support it. The prior probability of
each Statement is calculated based on the number of Evidences it has
and their sources.
Parameters
----------
statements : list[indra.statements.Statement]
A list of INDRA Statements whose belief scores are to
be calculated. Each Statement object's belief attribute is updated
by this function.
"""
self.scorer.check_prior_probs(statements)
for st in statements:
st.belief = self.scorer.score_statement(st) | [
"def",
"set_prior_probs",
"(",
"self",
",",
"statements",
")",
":",
"self",
".",
"scorer",
".",
"check_prior_probs",
"(",
"statements",
")",
"for",
"st",
"in",
"statements",
":",
"st",
".",
"belief",
"=",
"self",
".",
"scorer",
".",
"score_statement",
"(",
"st",
")"
]
| Sets the prior belief probabilities for a list of INDRA Statements.
The Statements are assumed to be de-duplicated. In other words,
each Statement in the list passed to this function is assumed to have
a list of Evidence objects that support it. The prior probability of
each Statement is calculated based on the number of Evidences it has
and their sources.
Parameters
----------
statements : list[indra.statements.Statement]
A list of INDRA Statements whose belief scores are to
be calculated. Each Statement object's belief attribute is updated
by this function. | [
"Sets",
"the",
"prior",
"belief",
"probabilities",
"for",
"a",
"list",
"of",
"INDRA",
"Statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/belief/__init__.py#L311-L329 | train |
sorgerlab/indra | indra/belief/__init__.py | BeliefEngine.set_hierarchy_probs | def set_hierarchy_probs(self, statements):
"""Sets hierarchical belief probabilities for INDRA Statements.
The Statements are assumed to be in a hierarchical relation graph with
the supports and supported_by attribute of each Statement object having
been set.
The hierarchical belief probability of each Statement is calculated
based on its prior probability and the probabilities propagated from
Statements supporting it in the hierarchy graph.
Parameters
----------
statements : list[indra.statements.Statement]
A list of INDRA Statements whose belief scores are to
be calculated. Each Statement object's belief attribute is updated
by this function.
"""
def build_hierarchy_graph(stmts):
"""Return a DiGraph based on matches keys and Statement supports"""
g = networkx.DiGraph()
for st1 in stmts:
g.add_node(st1.matches_key(), stmt=st1)
for st2 in st1.supported_by:
g.add_node(st2.matches_key(), stmt=st2)
g.add_edge(st2.matches_key(), st1.matches_key())
return g
def get_ranked_stmts(g):
"""Return a topological sort of statement matches keys from a graph.
"""
node_ranks = networkx.algorithms.dag.topological_sort(g)
node_ranks = reversed(list(node_ranks))
stmts = [g.node[n]['stmt'] for n in node_ranks]
return stmts
def assert_no_cycle(g):
"""If the graph has cycles, throws AssertionError."""
try:
cyc = networkx.algorithms.cycles.find_cycle(g)
except networkx.exception.NetworkXNoCycle:
return
msg = 'Cycle found in hierarchy graph: %s' % cyc
assert False, msg
g = build_hierarchy_graph(statements)
assert_no_cycle(g)
ranked_stmts = get_ranked_stmts(g)
for st in ranked_stmts:
bps = _get_belief_package(st)
supporting_evidences = []
# NOTE: the last belief package in the list is this statement's own
for bp in bps[:-1]:
# Iterate over all the parent evidences and add only
# non-negated ones
for ev in bp.evidences:
if not ev.epistemics.get('negated'):
supporting_evidences.append(ev)
# Now add the Statement's own evidence
# Now score all the evidences
belief = self.scorer.score_statement(st, supporting_evidences)
st.belief = belief | python | def set_hierarchy_probs(self, statements):
"""Sets hierarchical belief probabilities for INDRA Statements.
The Statements are assumed to be in a hierarchical relation graph with
the supports and supported_by attribute of each Statement object having
been set.
The hierarchical belief probability of each Statement is calculated
based on its prior probability and the probabilities propagated from
Statements supporting it in the hierarchy graph.
Parameters
----------
statements : list[indra.statements.Statement]
A list of INDRA Statements whose belief scores are to
be calculated. Each Statement object's belief attribute is updated
by this function.
"""
def build_hierarchy_graph(stmts):
"""Return a DiGraph based on matches keys and Statement supports"""
g = networkx.DiGraph()
for st1 in stmts:
g.add_node(st1.matches_key(), stmt=st1)
for st2 in st1.supported_by:
g.add_node(st2.matches_key(), stmt=st2)
g.add_edge(st2.matches_key(), st1.matches_key())
return g
def get_ranked_stmts(g):
"""Return a topological sort of statement matches keys from a graph.
"""
node_ranks = networkx.algorithms.dag.topological_sort(g)
node_ranks = reversed(list(node_ranks))
stmts = [g.node[n]['stmt'] for n in node_ranks]
return stmts
def assert_no_cycle(g):
"""If the graph has cycles, throws AssertionError."""
try:
cyc = networkx.algorithms.cycles.find_cycle(g)
except networkx.exception.NetworkXNoCycle:
return
msg = 'Cycle found in hierarchy graph: %s' % cyc
assert False, msg
g = build_hierarchy_graph(statements)
assert_no_cycle(g)
ranked_stmts = get_ranked_stmts(g)
for st in ranked_stmts:
bps = _get_belief_package(st)
supporting_evidences = []
# NOTE: the last belief package in the list is this statement's own
for bp in bps[:-1]:
# Iterate over all the parent evidences and add only
# non-negated ones
for ev in bp.evidences:
if not ev.epistemics.get('negated'):
supporting_evidences.append(ev)
# Now add the Statement's own evidence
# Now score all the evidences
belief = self.scorer.score_statement(st, supporting_evidences)
st.belief = belief | [
"def",
"set_hierarchy_probs",
"(",
"self",
",",
"statements",
")",
":",
"def",
"build_hierarchy_graph",
"(",
"stmts",
")",
":",
"\"\"\"Return a DiGraph based on matches keys and Statement supports\"\"\"",
"g",
"=",
"networkx",
".",
"DiGraph",
"(",
")",
"for",
"st1",
"in",
"stmts",
":",
"g",
".",
"add_node",
"(",
"st1",
".",
"matches_key",
"(",
")",
",",
"stmt",
"=",
"st1",
")",
"for",
"st2",
"in",
"st1",
".",
"supported_by",
":",
"g",
".",
"add_node",
"(",
"st2",
".",
"matches_key",
"(",
")",
",",
"stmt",
"=",
"st2",
")",
"g",
".",
"add_edge",
"(",
"st2",
".",
"matches_key",
"(",
")",
",",
"st1",
".",
"matches_key",
"(",
")",
")",
"return",
"g",
"def",
"get_ranked_stmts",
"(",
"g",
")",
":",
"\"\"\"Return a topological sort of statement matches keys from a graph.\n \"\"\"",
"node_ranks",
"=",
"networkx",
".",
"algorithms",
".",
"dag",
".",
"topological_sort",
"(",
"g",
")",
"node_ranks",
"=",
"reversed",
"(",
"list",
"(",
"node_ranks",
")",
")",
"stmts",
"=",
"[",
"g",
".",
"node",
"[",
"n",
"]",
"[",
"'stmt'",
"]",
"for",
"n",
"in",
"node_ranks",
"]",
"return",
"stmts",
"def",
"assert_no_cycle",
"(",
"g",
")",
":",
"\"\"\"If the graph has cycles, throws AssertionError.\"\"\"",
"try",
":",
"cyc",
"=",
"networkx",
".",
"algorithms",
".",
"cycles",
".",
"find_cycle",
"(",
"g",
")",
"except",
"networkx",
".",
"exception",
".",
"NetworkXNoCycle",
":",
"return",
"msg",
"=",
"'Cycle found in hierarchy graph: %s'",
"%",
"cyc",
"assert",
"False",
",",
"msg",
"g",
"=",
"build_hierarchy_graph",
"(",
"statements",
")",
"assert_no_cycle",
"(",
"g",
")",
"ranked_stmts",
"=",
"get_ranked_stmts",
"(",
"g",
")",
"for",
"st",
"in",
"ranked_stmts",
":",
"bps",
"=",
"_get_belief_package",
"(",
"st",
")",
"supporting_evidences",
"=",
"[",
"]",
"# NOTE: the last belief package in the list is this statement's own",
"for",
"bp",
"in",
"bps",
"[",
":",
"-",
"1",
"]",
":",
"# Iterate over all the parent evidences and add only",
"# non-negated ones",
"for",
"ev",
"in",
"bp",
".",
"evidences",
":",
"if",
"not",
"ev",
".",
"epistemics",
".",
"get",
"(",
"'negated'",
")",
":",
"supporting_evidences",
".",
"append",
"(",
"ev",
")",
"# Now add the Statement's own evidence",
"# Now score all the evidences",
"belief",
"=",
"self",
".",
"scorer",
".",
"score_statement",
"(",
"st",
",",
"supporting_evidences",
")",
"st",
".",
"belief",
"=",
"belief"
]
| Sets hierarchical belief probabilities for INDRA Statements.
The Statements are assumed to be in a hierarchical relation graph with
the supports and supported_by attribute of each Statement object having
been set.
The hierarchical belief probability of each Statement is calculated
based on its prior probability and the probabilities propagated from
Statements supporting it in the hierarchy graph.
Parameters
----------
statements : list[indra.statements.Statement]
A list of INDRA Statements whose belief scores are to
be calculated. Each Statement object's belief attribute is updated
by this function. | [
"Sets",
"hierarchical",
"belief",
"probabilities",
"for",
"INDRA",
"Statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/belief/__init__.py#L331-L391 | train |
sorgerlab/indra | indra/belief/__init__.py | BeliefEngine.set_linked_probs | def set_linked_probs(self, linked_statements):
"""Sets the belief probabilities for a list of linked INDRA Statements.
The list of LinkedStatement objects is assumed to come from the
MechanismLinker. The belief probability of the inferred Statement is
assigned the joint probability of its source Statements.
Parameters
----------
linked_statements : list[indra.mechlinker.LinkedStatement]
A list of INDRA LinkedStatements whose belief scores are to
be calculated. The belief attribute of the inferred Statement in
the LinkedStatement object is updated by this function.
"""
for st in linked_statements:
source_probs = [s.belief for s in st.source_stmts]
st.inferred_stmt.belief = numpy.prod(source_probs) | python | def set_linked_probs(self, linked_statements):
"""Sets the belief probabilities for a list of linked INDRA Statements.
The list of LinkedStatement objects is assumed to come from the
MechanismLinker. The belief probability of the inferred Statement is
assigned the joint probability of its source Statements.
Parameters
----------
linked_statements : list[indra.mechlinker.LinkedStatement]
A list of INDRA LinkedStatements whose belief scores are to
be calculated. The belief attribute of the inferred Statement in
the LinkedStatement object is updated by this function.
"""
for st in linked_statements:
source_probs = [s.belief for s in st.source_stmts]
st.inferred_stmt.belief = numpy.prod(source_probs) | [
"def",
"set_linked_probs",
"(",
"self",
",",
"linked_statements",
")",
":",
"for",
"st",
"in",
"linked_statements",
":",
"source_probs",
"=",
"[",
"s",
".",
"belief",
"for",
"s",
"in",
"st",
".",
"source_stmts",
"]",
"st",
".",
"inferred_stmt",
".",
"belief",
"=",
"numpy",
".",
"prod",
"(",
"source_probs",
")"
]
| Sets the belief probabilities for a list of linked INDRA Statements.
The list of LinkedStatement objects is assumed to come from the
MechanismLinker. The belief probability of the inferred Statement is
assigned the joint probability of its source Statements.
Parameters
----------
linked_statements : list[indra.mechlinker.LinkedStatement]
A list of INDRA LinkedStatements whose belief scores are to
be calculated. The belief attribute of the inferred Statement in
the LinkedStatement object is updated by this function. | [
"Sets",
"the",
"belief",
"probabilities",
"for",
"a",
"list",
"of",
"linked",
"INDRA",
"Statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/belief/__init__.py#L393-L409 | train |
sorgerlab/indra | indra/sources/rlimsp/processor.py | RlimspProcessor.extract_statements | def extract_statements(self):
"""Extract the statements from the json."""
for p_info in self._json:
para = RlimspParagraph(p_info, self.doc_id_type)
self.statements.extend(para.get_statements())
return | python | def extract_statements(self):
"""Extract the statements from the json."""
for p_info in self._json:
para = RlimspParagraph(p_info, self.doc_id_type)
self.statements.extend(para.get_statements())
return | [
"def",
"extract_statements",
"(",
"self",
")",
":",
"for",
"p_info",
"in",
"self",
".",
"_json",
":",
"para",
"=",
"RlimspParagraph",
"(",
"p_info",
",",
"self",
".",
"doc_id_type",
")",
"self",
".",
"statements",
".",
"extend",
"(",
"para",
".",
"get_statements",
"(",
")",
")",
"return"
]
| Extract the statements from the json. | [
"Extract",
"the",
"statements",
"from",
"the",
"json",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/rlimsp/processor.py#L19-L24 | train |
sorgerlab/indra | indra/sources/rlimsp/processor.py | RlimspParagraph._get_agent | def _get_agent(self, entity_id):
"""Convert the entity dictionary into an INDRA Agent."""
if entity_id is None:
return None
entity_info = self._entity_dict.get(entity_id)
if entity_info is None:
logger.warning("Entity key did not resolve to entity.")
return None
return get_agent_from_entity_info(entity_info) | python | def _get_agent(self, entity_id):
"""Convert the entity dictionary into an INDRA Agent."""
if entity_id is None:
return None
entity_info = self._entity_dict.get(entity_id)
if entity_info is None:
logger.warning("Entity key did not resolve to entity.")
return None
return get_agent_from_entity_info(entity_info) | [
"def",
"_get_agent",
"(",
"self",
",",
"entity_id",
")",
":",
"if",
"entity_id",
"is",
"None",
":",
"return",
"None",
"entity_info",
"=",
"self",
".",
"_entity_dict",
".",
"get",
"(",
"entity_id",
")",
"if",
"entity_info",
"is",
"None",
":",
"logger",
".",
"warning",
"(",
"\"Entity key did not resolve to entity.\"",
")",
"return",
"None",
"return",
"get_agent_from_entity_info",
"(",
"entity_info",
")"
]
| Convert the entity dictionary into an INDRA Agent. | [
"Convert",
"the",
"entity",
"dictionary",
"into",
"an",
"INDRA",
"Agent",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/rlimsp/processor.py#L49-L58 | train |
sorgerlab/indra | indra/sources/rlimsp/processor.py | RlimspParagraph._get_evidence | def _get_evidence(self, trigger_id, args, agent_coords, site_coords):
"""Get the evidence using the info in the trigger entity."""
trigger_info = self._entity_dict[trigger_id]
# Get the sentence index from the trigger word.
s_idx_set = {self._entity_dict[eid]['sentenceIndex']
for eid in args.values()
if 'sentenceIndex' in self._entity_dict[eid]}
if s_idx_set:
i_min = min(s_idx_set)
i_max = max(s_idx_set)
text = '. '.join(self._sentences[i_min:(i_max+1)]) + '.'
s_start = self._sentence_starts[i_min]
annotations = {
'agents': {'coords': [_fix_coords(coords, s_start)
for coords in agent_coords]},
'trigger': {'coords': _fix_coords([trigger_info['charStart'],
trigger_info['charEnd']],
s_start)}
}
else:
logger.info('Unable to get sentence index')
annotations = {}
text = None
if site_coords:
annotations['site'] = {'coords': _fix_coords(site_coords, s_start)}
return Evidence(text_refs=self._text_refs.copy(), text=text,
source_api='rlimsp', pmid=self._text_refs.get('PMID'),
annotations=annotations) | python | def _get_evidence(self, trigger_id, args, agent_coords, site_coords):
"""Get the evidence using the info in the trigger entity."""
trigger_info = self._entity_dict[trigger_id]
# Get the sentence index from the trigger word.
s_idx_set = {self._entity_dict[eid]['sentenceIndex']
for eid in args.values()
if 'sentenceIndex' in self._entity_dict[eid]}
if s_idx_set:
i_min = min(s_idx_set)
i_max = max(s_idx_set)
text = '. '.join(self._sentences[i_min:(i_max+1)]) + '.'
s_start = self._sentence_starts[i_min]
annotations = {
'agents': {'coords': [_fix_coords(coords, s_start)
for coords in agent_coords]},
'trigger': {'coords': _fix_coords([trigger_info['charStart'],
trigger_info['charEnd']],
s_start)}
}
else:
logger.info('Unable to get sentence index')
annotations = {}
text = None
if site_coords:
annotations['site'] = {'coords': _fix_coords(site_coords, s_start)}
return Evidence(text_refs=self._text_refs.copy(), text=text,
source_api='rlimsp', pmid=self._text_refs.get('PMID'),
annotations=annotations) | [
"def",
"_get_evidence",
"(",
"self",
",",
"trigger_id",
",",
"args",
",",
"agent_coords",
",",
"site_coords",
")",
":",
"trigger_info",
"=",
"self",
".",
"_entity_dict",
"[",
"trigger_id",
"]",
"# Get the sentence index from the trigger word.",
"s_idx_set",
"=",
"{",
"self",
".",
"_entity_dict",
"[",
"eid",
"]",
"[",
"'sentenceIndex'",
"]",
"for",
"eid",
"in",
"args",
".",
"values",
"(",
")",
"if",
"'sentenceIndex'",
"in",
"self",
".",
"_entity_dict",
"[",
"eid",
"]",
"}",
"if",
"s_idx_set",
":",
"i_min",
"=",
"min",
"(",
"s_idx_set",
")",
"i_max",
"=",
"max",
"(",
"s_idx_set",
")",
"text",
"=",
"'. '",
".",
"join",
"(",
"self",
".",
"_sentences",
"[",
"i_min",
":",
"(",
"i_max",
"+",
"1",
")",
"]",
")",
"+",
"'.'",
"s_start",
"=",
"self",
".",
"_sentence_starts",
"[",
"i_min",
"]",
"annotations",
"=",
"{",
"'agents'",
":",
"{",
"'coords'",
":",
"[",
"_fix_coords",
"(",
"coords",
",",
"s_start",
")",
"for",
"coords",
"in",
"agent_coords",
"]",
"}",
",",
"'trigger'",
":",
"{",
"'coords'",
":",
"_fix_coords",
"(",
"[",
"trigger_info",
"[",
"'charStart'",
"]",
",",
"trigger_info",
"[",
"'charEnd'",
"]",
"]",
",",
"s_start",
")",
"}",
"}",
"else",
":",
"logger",
".",
"info",
"(",
"'Unable to get sentence index'",
")",
"annotations",
"=",
"{",
"}",
"text",
"=",
"None",
"if",
"site_coords",
":",
"annotations",
"[",
"'site'",
"]",
"=",
"{",
"'coords'",
":",
"_fix_coords",
"(",
"site_coords",
",",
"s_start",
")",
"}",
"return",
"Evidence",
"(",
"text_refs",
"=",
"self",
".",
"_text_refs",
".",
"copy",
"(",
")",
",",
"text",
"=",
"text",
",",
"source_api",
"=",
"'rlimsp'",
",",
"pmid",
"=",
"self",
".",
"_text_refs",
".",
"get",
"(",
"'PMID'",
")",
",",
"annotations",
"=",
"annotations",
")"
]
| Get the evidence using the info in the trigger entity. | [
"Get",
"the",
"evidence",
"using",
"the",
"info",
"in",
"the",
"trigger",
"entity",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/rlimsp/processor.py#L84-L115 | train |
sorgerlab/indra | indra/tools/reading/readers.py | get_reader_classes | def get_reader_classes(parent=Reader):
"""Get all childless the descendants of a parent class, recursively."""
children = parent.__subclasses__()
descendants = children[:]
for child in children:
grandchildren = get_reader_classes(child)
if grandchildren:
descendants.remove(child)
descendants.extend(grandchildren)
return descendants | python | def get_reader_classes(parent=Reader):
"""Get all childless the descendants of a parent class, recursively."""
children = parent.__subclasses__()
descendants = children[:]
for child in children:
grandchildren = get_reader_classes(child)
if grandchildren:
descendants.remove(child)
descendants.extend(grandchildren)
return descendants | [
"def",
"get_reader_classes",
"(",
"parent",
"=",
"Reader",
")",
":",
"children",
"=",
"parent",
".",
"__subclasses__",
"(",
")",
"descendants",
"=",
"children",
"[",
":",
"]",
"for",
"child",
"in",
"children",
":",
"grandchildren",
"=",
"get_reader_classes",
"(",
"child",
")",
"if",
"grandchildren",
":",
"descendants",
".",
"remove",
"(",
"child",
")",
"descendants",
".",
"extend",
"(",
"grandchildren",
")",
"return",
"descendants"
]
| Get all childless the descendants of a parent class, recursively. | [
"Get",
"all",
"childless",
"the",
"descendants",
"of",
"a",
"parent",
"class",
"recursively",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/readers.py#L761-L770 | train |
sorgerlab/indra | indra/tools/reading/readers.py | get_reader_class | def get_reader_class(reader_name):
"""Get a particular reader class by name."""
for reader_class in get_reader_classes():
if reader_class.name.lower() == reader_name.lower():
return reader_class
else:
logger.error("No such reader: %s" % reader_name)
return None | python | def get_reader_class(reader_name):
"""Get a particular reader class by name."""
for reader_class in get_reader_classes():
if reader_class.name.lower() == reader_name.lower():
return reader_class
else:
logger.error("No such reader: %s" % reader_name)
return None | [
"def",
"get_reader_class",
"(",
"reader_name",
")",
":",
"for",
"reader_class",
"in",
"get_reader_classes",
"(",
")",
":",
"if",
"reader_class",
".",
"name",
".",
"lower",
"(",
")",
"==",
"reader_name",
".",
"lower",
"(",
")",
":",
"return",
"reader_class",
"else",
":",
"logger",
".",
"error",
"(",
"\"No such reader: %s\"",
"%",
"reader_name",
")",
"return",
"None"
]
| Get a particular reader class by name. | [
"Get",
"a",
"particular",
"reader",
"class",
"by",
"name",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/readers.py#L773-L780 | train |
sorgerlab/indra | indra/tools/reading/readers.py | Content.from_file | def from_file(cls, file_path, compressed=False, encoded=False):
"""Create a content object from a file path."""
file_id = '.'.join(path.basename(file_path).split('.')[:-1])
file_format = file_path.split('.')[-1]
content = cls(file_id, file_format, compressed, encoded)
content.file_exists = True
content._location = path.dirname(file_path)
return content | python | def from_file(cls, file_path, compressed=False, encoded=False):
"""Create a content object from a file path."""
file_id = '.'.join(path.basename(file_path).split('.')[:-1])
file_format = file_path.split('.')[-1]
content = cls(file_id, file_format, compressed, encoded)
content.file_exists = True
content._location = path.dirname(file_path)
return content | [
"def",
"from_file",
"(",
"cls",
",",
"file_path",
",",
"compressed",
"=",
"False",
",",
"encoded",
"=",
"False",
")",
":",
"file_id",
"=",
"'.'",
".",
"join",
"(",
"path",
".",
"basename",
"(",
"file_path",
")",
".",
"split",
"(",
"'.'",
")",
"[",
":",
"-",
"1",
"]",
")",
"file_format",
"=",
"file_path",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"content",
"=",
"cls",
"(",
"file_id",
",",
"file_format",
",",
"compressed",
",",
"encoded",
")",
"content",
".",
"file_exists",
"=",
"True",
"content",
".",
"_location",
"=",
"path",
".",
"dirname",
"(",
"file_path",
")",
"return",
"content"
]
| Create a content object from a file path. | [
"Create",
"a",
"content",
"object",
"from",
"a",
"file",
"path",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/readers.py#L100-L107 | train |
sorgerlab/indra | indra/tools/reading/readers.py | Content.change_id | def change_id(self, new_id):
"""Change the id of this content."""
self._load_raw_content()
self._id = new_id
self.get_filename(renew=True)
self.get_filepath(renew=True)
return | python | def change_id(self, new_id):
"""Change the id of this content."""
self._load_raw_content()
self._id = new_id
self.get_filename(renew=True)
self.get_filepath(renew=True)
return | [
"def",
"change_id",
"(",
"self",
",",
"new_id",
")",
":",
"self",
".",
"_load_raw_content",
"(",
")",
"self",
".",
"_id",
"=",
"new_id",
"self",
".",
"get_filename",
"(",
"renew",
"=",
"True",
")",
"self",
".",
"get_filepath",
"(",
"renew",
"=",
"True",
")",
"return"
]
| Change the id of this content. | [
"Change",
"the",
"id",
"of",
"this",
"content",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/readers.py#L123-L129 | train |
sorgerlab/indra | indra/tools/reading/readers.py | Content.change_format | def change_format(self, new_format):
"""Change the format label of this content.
Note that this does NOT actually alter the format of the content, only
the label.
"""
self._load_raw_content()
self._format = new_format
self.get_filename(renew=True)
self.get_filepath(renew=True)
return | python | def change_format(self, new_format):
"""Change the format label of this content.
Note that this does NOT actually alter the format of the content, only
the label.
"""
self._load_raw_content()
self._format = new_format
self.get_filename(renew=True)
self.get_filepath(renew=True)
return | [
"def",
"change_format",
"(",
"self",
",",
"new_format",
")",
":",
"self",
".",
"_load_raw_content",
"(",
")",
"self",
".",
"_format",
"=",
"new_format",
"self",
".",
"get_filename",
"(",
"renew",
"=",
"True",
")",
"self",
".",
"get_filepath",
"(",
"renew",
"=",
"True",
")",
"return"
]
| Change the format label of this content.
Note that this does NOT actually alter the format of the content, only
the label. | [
"Change",
"the",
"format",
"label",
"of",
"this",
"content",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/readers.py#L131-L141 | train |
sorgerlab/indra | indra/tools/reading/readers.py | Content.get_text | def get_text(self):
"""Get the loaded, decompressed, and decoded text of this content."""
self._load_raw_content()
if self._text is None:
assert self._raw_content is not None
ret_cont = self._raw_content
if self.compressed:
ret_cont = zlib.decompress(ret_cont, zlib.MAX_WBITS+16)
if self.encoded:
ret_cont = ret_cont.decode('utf-8')
self._text = ret_cont
assert self._text is not None
return self._text | python | def get_text(self):
"""Get the loaded, decompressed, and decoded text of this content."""
self._load_raw_content()
if self._text is None:
assert self._raw_content is not None
ret_cont = self._raw_content
if self.compressed:
ret_cont = zlib.decompress(ret_cont, zlib.MAX_WBITS+16)
if self.encoded:
ret_cont = ret_cont.decode('utf-8')
self._text = ret_cont
assert self._text is not None
return self._text | [
"def",
"get_text",
"(",
"self",
")",
":",
"self",
".",
"_load_raw_content",
"(",
")",
"if",
"self",
".",
"_text",
"is",
"None",
":",
"assert",
"self",
".",
"_raw_content",
"is",
"not",
"None",
"ret_cont",
"=",
"self",
".",
"_raw_content",
"if",
"self",
".",
"compressed",
":",
"ret_cont",
"=",
"zlib",
".",
"decompress",
"(",
"ret_cont",
",",
"zlib",
".",
"MAX_WBITS",
"+",
"16",
")",
"if",
"self",
".",
"encoded",
":",
"ret_cont",
"=",
"ret_cont",
".",
"decode",
"(",
"'utf-8'",
")",
"self",
".",
"_text",
"=",
"ret_cont",
"assert",
"self",
".",
"_text",
"is",
"not",
"None",
"return",
"self",
".",
"_text"
]
| Get the loaded, decompressed, and decoded text of this content. | [
"Get",
"the",
"loaded",
"decompressed",
"and",
"decoded",
"text",
"of",
"this",
"content",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/readers.py#L164-L176 | train |
sorgerlab/indra | indra/tools/reading/readers.py | Content.get_filename | def get_filename(self, renew=False):
"""Get the filename of this content.
If the file name doesn't already exist, we created it as {id}.{format}.
"""
if self._fname is None or renew:
self._fname = '%s.%s' % (self._id, self._format)
return self._fname | python | def get_filename(self, renew=False):
"""Get the filename of this content.
If the file name doesn't already exist, we created it as {id}.{format}.
"""
if self._fname is None or renew:
self._fname = '%s.%s' % (self._id, self._format)
return self._fname | [
"def",
"get_filename",
"(",
"self",
",",
"renew",
"=",
"False",
")",
":",
"if",
"self",
".",
"_fname",
"is",
"None",
"or",
"renew",
":",
"self",
".",
"_fname",
"=",
"'%s.%s'",
"%",
"(",
"self",
".",
"_id",
",",
"self",
".",
"_format",
")",
"return",
"self",
".",
"_fname"
]
| Get the filename of this content.
If the file name doesn't already exist, we created it as {id}.{format}. | [
"Get",
"the",
"filename",
"of",
"this",
"content",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/readers.py#L178-L185 | train |
sorgerlab/indra | indra/tools/reading/readers.py | Content.get_filepath | def get_filepath(self, renew=False):
"""Get the file path, joining the name and location for this file.
If no location is given, it is assumed to be "here", e.g. ".".
"""
if self._location is None or renew:
self._location = '.'
return path.join(self._location, self.get_filename()) | python | def get_filepath(self, renew=False):
"""Get the file path, joining the name and location for this file.
If no location is given, it is assumed to be "here", e.g. ".".
"""
if self._location is None or renew:
self._location = '.'
return path.join(self._location, self.get_filename()) | [
"def",
"get_filepath",
"(",
"self",
",",
"renew",
"=",
"False",
")",
":",
"if",
"self",
".",
"_location",
"is",
"None",
"or",
"renew",
":",
"self",
".",
"_location",
"=",
"'.'",
"return",
"path",
".",
"join",
"(",
"self",
".",
"_location",
",",
"self",
".",
"get_filename",
"(",
")",
")"
]
| Get the file path, joining the name and location for this file.
If no location is given, it is assumed to be "here", e.g. ".". | [
"Get",
"the",
"file",
"path",
"joining",
"the",
"name",
"and",
"location",
"for",
"this",
"file",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/readers.py#L187-L194 | train |
sorgerlab/indra | indra/tools/reading/readers.py | ReadingData.get_statements | def get_statements(self, reprocess=False):
"""General method to create statements."""
if self._statements is None or reprocess:
# Handle the case that there is no content.
if self.content is None:
self._statements = []
return []
# Map to the different processors.
if self.reader == ReachReader.name:
if self.format == formats.JSON:
# Process the reach json into statements.
json_str = json.dumps(self.content)
processor = reach.process_json_str(json_str)
else:
raise ReadingError("Incorrect format for Reach output: %s."
% self.format)
elif self.reader == SparserReader.name:
if self.format == formats.JSON:
# Process the sparser content into statements
processor = sparser.process_json_dict(self.content)
if processor is not None:
processor.set_statements_pmid(None)
else:
raise ReadingError("Sparser should only ever be JSON, not "
"%s." % self.format)
elif self.reader == TripsReader.name:
processor = trips.process_xml(self.content)
else:
raise ReadingError("Unknown reader: %s." % self.reader)
# Get the statements from the processor, if it was resolved.
if processor is None:
logger.error("Production of statements from %s failed for %s."
% (self.reader, self.content_id))
stmts = []
else:
stmts = processor.statements
self._statements = stmts[:]
else:
stmts = self._statements[:]
return stmts | python | def get_statements(self, reprocess=False):
"""General method to create statements."""
if self._statements is None or reprocess:
# Handle the case that there is no content.
if self.content is None:
self._statements = []
return []
# Map to the different processors.
if self.reader == ReachReader.name:
if self.format == formats.JSON:
# Process the reach json into statements.
json_str = json.dumps(self.content)
processor = reach.process_json_str(json_str)
else:
raise ReadingError("Incorrect format for Reach output: %s."
% self.format)
elif self.reader == SparserReader.name:
if self.format == formats.JSON:
# Process the sparser content into statements
processor = sparser.process_json_dict(self.content)
if processor is not None:
processor.set_statements_pmid(None)
else:
raise ReadingError("Sparser should only ever be JSON, not "
"%s." % self.format)
elif self.reader == TripsReader.name:
processor = trips.process_xml(self.content)
else:
raise ReadingError("Unknown reader: %s." % self.reader)
# Get the statements from the processor, if it was resolved.
if processor is None:
logger.error("Production of statements from %s failed for %s."
% (self.reader, self.content_id))
stmts = []
else:
stmts = processor.statements
self._statements = stmts[:]
else:
stmts = self._statements[:]
return stmts | [
"def",
"get_statements",
"(",
"self",
",",
"reprocess",
"=",
"False",
")",
":",
"if",
"self",
".",
"_statements",
"is",
"None",
"or",
"reprocess",
":",
"# Handle the case that there is no content.",
"if",
"self",
".",
"content",
"is",
"None",
":",
"self",
".",
"_statements",
"=",
"[",
"]",
"return",
"[",
"]",
"# Map to the different processors.",
"if",
"self",
".",
"reader",
"==",
"ReachReader",
".",
"name",
":",
"if",
"self",
".",
"format",
"==",
"formats",
".",
"JSON",
":",
"# Process the reach json into statements.",
"json_str",
"=",
"json",
".",
"dumps",
"(",
"self",
".",
"content",
")",
"processor",
"=",
"reach",
".",
"process_json_str",
"(",
"json_str",
")",
"else",
":",
"raise",
"ReadingError",
"(",
"\"Incorrect format for Reach output: %s.\"",
"%",
"self",
".",
"format",
")",
"elif",
"self",
".",
"reader",
"==",
"SparserReader",
".",
"name",
":",
"if",
"self",
".",
"format",
"==",
"formats",
".",
"JSON",
":",
"# Process the sparser content into statements",
"processor",
"=",
"sparser",
".",
"process_json_dict",
"(",
"self",
".",
"content",
")",
"if",
"processor",
"is",
"not",
"None",
":",
"processor",
".",
"set_statements_pmid",
"(",
"None",
")",
"else",
":",
"raise",
"ReadingError",
"(",
"\"Sparser should only ever be JSON, not \"",
"\"%s.\"",
"%",
"self",
".",
"format",
")",
"elif",
"self",
".",
"reader",
"==",
"TripsReader",
".",
"name",
":",
"processor",
"=",
"trips",
".",
"process_xml",
"(",
"self",
".",
"content",
")",
"else",
":",
"raise",
"ReadingError",
"(",
"\"Unknown reader: %s.\"",
"%",
"self",
".",
"reader",
")",
"# Get the statements from the processor, if it was resolved.",
"if",
"processor",
"is",
"None",
":",
"logger",
".",
"error",
"(",
"\"Production of statements from %s failed for %s.\"",
"%",
"(",
"self",
".",
"reader",
",",
"self",
".",
"content_id",
")",
")",
"stmts",
"=",
"[",
"]",
"else",
":",
"stmts",
"=",
"processor",
".",
"statements",
"self",
".",
"_statements",
"=",
"stmts",
"[",
":",
"]",
"else",
":",
"stmts",
"=",
"self",
".",
"_statements",
"[",
":",
"]",
"return",
"stmts"
]
| General method to create statements. | [
"General",
"method",
"to",
"create",
"statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/readers.py#L241-L282 | train |
sorgerlab/indra | indra/tools/reading/readers.py | Reader.add_result | def add_result(self, content_id, content, **kwargs):
""""Add a result to the list of results."""
result_object = self.ResultClass(content_id, self.name, self.version,
formats.JSON, content, **kwargs)
self.results.append(result_object)
return | python | def add_result(self, content_id, content, **kwargs):
""""Add a result to the list of results."""
result_object = self.ResultClass(content_id, self.name, self.version,
formats.JSON, content, **kwargs)
self.results.append(result_object)
return | [
"def",
"add_result",
"(",
"self",
",",
"content_id",
",",
"content",
",",
"*",
"*",
"kwargs",
")",
":",
"result_object",
"=",
"self",
".",
"ResultClass",
"(",
"content_id",
",",
"self",
".",
"name",
",",
"self",
".",
"version",
",",
"formats",
".",
"JSON",
",",
"content",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"results",
".",
"append",
"(",
"result_object",
")",
"return"
]
| Add a result to the list of results. | [
"Add",
"a",
"result",
"to",
"the",
"list",
"of",
"results",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/readers.py#L316-L321 | train |
sorgerlab/indra | indra/tools/reading/readers.py | Reader._check_content | def _check_content(self, content_str):
"""Check if the content is likely to be successfully read."""
if self.do_content_check:
space_ratio = float(content_str.count(' '))/len(content_str)
if space_ratio > self.max_space_ratio:
return "space-ratio: %f > %f" % (space_ratio,
self.max_space_ratio)
if len(content_str) > self.input_character_limit:
return "too long: %d > %d" % (len(content_str),
self.input_character_limit)
return None | python | def _check_content(self, content_str):
"""Check if the content is likely to be successfully read."""
if self.do_content_check:
space_ratio = float(content_str.count(' '))/len(content_str)
if space_ratio > self.max_space_ratio:
return "space-ratio: %f > %f" % (space_ratio,
self.max_space_ratio)
if len(content_str) > self.input_character_limit:
return "too long: %d > %d" % (len(content_str),
self.input_character_limit)
return None | [
"def",
"_check_content",
"(",
"self",
",",
"content_str",
")",
":",
"if",
"self",
".",
"do_content_check",
":",
"space_ratio",
"=",
"float",
"(",
"content_str",
".",
"count",
"(",
"' '",
")",
")",
"/",
"len",
"(",
"content_str",
")",
"if",
"space_ratio",
">",
"self",
".",
"max_space_ratio",
":",
"return",
"\"space-ratio: %f > %f\"",
"%",
"(",
"space_ratio",
",",
"self",
".",
"max_space_ratio",
")",
"if",
"len",
"(",
"content_str",
")",
">",
"self",
".",
"input_character_limit",
":",
"return",
"\"too long: %d > %d\"",
"%",
"(",
"len",
"(",
"content_str",
")",
",",
"self",
".",
"input_character_limit",
")",
"return",
"None"
]
| Check if the content is likely to be successfully read. | [
"Check",
"if",
"the",
"content",
"is",
"likely",
"to",
"be",
"successfully",
"read",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/readers.py#L323-L333 | train |
sorgerlab/indra | indra/tools/reading/readers.py | ReachReader._check_reach_env | def _check_reach_env():
"""Check that the environment supports runnig reach."""
# Get the path to the REACH JAR
path_to_reach = get_config('REACHPATH')
if path_to_reach is None:
path_to_reach = environ.get('REACHPATH', None)
if path_to_reach is None or not path.exists(path_to_reach):
raise ReachError(
'Reach path unset or invalid. Check REACHPATH environment var '
'and/or config file.'
)
logger.debug('Using REACH jar at: %s' % path_to_reach)
# Get the reach version.
reach_version = get_config('REACH_VERSION')
if reach_version is None:
reach_version = environ.get('REACH_VERSION', None)
if reach_version is None:
logger.debug('REACH version not set in REACH_VERSION')
m = re.match('reach-(.*?)\.jar', path.basename(path_to_reach))
reach_version = re.sub('-SNAP.*?$', '', m.groups()[0])
logger.debug('Using REACH version: %s' % reach_version)
return path_to_reach, reach_version | python | def _check_reach_env():
"""Check that the environment supports runnig reach."""
# Get the path to the REACH JAR
path_to_reach = get_config('REACHPATH')
if path_to_reach is None:
path_to_reach = environ.get('REACHPATH', None)
if path_to_reach is None or not path.exists(path_to_reach):
raise ReachError(
'Reach path unset or invalid. Check REACHPATH environment var '
'and/or config file.'
)
logger.debug('Using REACH jar at: %s' % path_to_reach)
# Get the reach version.
reach_version = get_config('REACH_VERSION')
if reach_version is None:
reach_version = environ.get('REACH_VERSION', None)
if reach_version is None:
logger.debug('REACH version not set in REACH_VERSION')
m = re.match('reach-(.*?)\.jar', path.basename(path_to_reach))
reach_version = re.sub('-SNAP.*?$', '', m.groups()[0])
logger.debug('Using REACH version: %s' % reach_version)
return path_to_reach, reach_version | [
"def",
"_check_reach_env",
"(",
")",
":",
"# Get the path to the REACH JAR",
"path_to_reach",
"=",
"get_config",
"(",
"'REACHPATH'",
")",
"if",
"path_to_reach",
"is",
"None",
":",
"path_to_reach",
"=",
"environ",
".",
"get",
"(",
"'REACHPATH'",
",",
"None",
")",
"if",
"path_to_reach",
"is",
"None",
"or",
"not",
"path",
".",
"exists",
"(",
"path_to_reach",
")",
":",
"raise",
"ReachError",
"(",
"'Reach path unset or invalid. Check REACHPATH environment var '",
"'and/or config file.'",
")",
"logger",
".",
"debug",
"(",
"'Using REACH jar at: %s'",
"%",
"path_to_reach",
")",
"# Get the reach version.",
"reach_version",
"=",
"get_config",
"(",
"'REACH_VERSION'",
")",
"if",
"reach_version",
"is",
"None",
":",
"reach_version",
"=",
"environ",
".",
"get",
"(",
"'REACH_VERSION'",
",",
"None",
")",
"if",
"reach_version",
"is",
"None",
":",
"logger",
".",
"debug",
"(",
"'REACH version not set in REACH_VERSION'",
")",
"m",
"=",
"re",
".",
"match",
"(",
"'reach-(.*?)\\.jar'",
",",
"path",
".",
"basename",
"(",
"path_to_reach",
")",
")",
"reach_version",
"=",
"re",
".",
"sub",
"(",
"'-SNAP.*?$'",
",",
"''",
",",
"m",
".",
"groups",
"(",
")",
"[",
"0",
"]",
")",
"logger",
".",
"debug",
"(",
"'Using REACH version: %s'",
"%",
"reach_version",
")",
"return",
"path_to_reach",
",",
"reach_version"
]
| Check that the environment supports runnig reach. | [
"Check",
"that",
"the",
"environment",
"supports",
"runnig",
"reach",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/readers.py#L409-L433 | train |
sorgerlab/indra | indra/tools/reading/readers.py | ReachReader.prep_input | def prep_input(self, read_list):
"""Apply the readers to the content."""
logger.info("Prepping input.")
i = 0
for content in read_list:
# Check the quality of the text, and skip if there are any issues.
quality_issue = self._check_content(content.get_text())
if quality_issue is not None:
logger.warning("Skipping %d due to: %s"
% (content.get_id(), quality_issue))
continue
# Look for things that are more like file names, rather than ids.
cid = content.get_id()
if isinstance(cid, str) and re.match('^\w*?\d+$', cid) is None:
new_id = 'FILE%06d' % i
i += 1
self.id_maps[new_id] = cid
content.change_id(new_id)
new_fpath = content.copy_to(self.input_dir)
else:
# Put the content in the appropriate directory.
new_fpath = content.copy_to(self.input_dir)
self.num_input += 1
logger.debug('%s saved for reading by reach.'
% new_fpath)
return | python | def prep_input(self, read_list):
"""Apply the readers to the content."""
logger.info("Prepping input.")
i = 0
for content in read_list:
# Check the quality of the text, and skip if there are any issues.
quality_issue = self._check_content(content.get_text())
if quality_issue is not None:
logger.warning("Skipping %d due to: %s"
% (content.get_id(), quality_issue))
continue
# Look for things that are more like file names, rather than ids.
cid = content.get_id()
if isinstance(cid, str) and re.match('^\w*?\d+$', cid) is None:
new_id = 'FILE%06d' % i
i += 1
self.id_maps[new_id] = cid
content.change_id(new_id)
new_fpath = content.copy_to(self.input_dir)
else:
# Put the content in the appropriate directory.
new_fpath = content.copy_to(self.input_dir)
self.num_input += 1
logger.debug('%s saved for reading by reach.'
% new_fpath)
return | [
"def",
"prep_input",
"(",
"self",
",",
"read_list",
")",
":",
"logger",
".",
"info",
"(",
"\"Prepping input.\"",
")",
"i",
"=",
"0",
"for",
"content",
"in",
"read_list",
":",
"# Check the quality of the text, and skip if there are any issues.",
"quality_issue",
"=",
"self",
".",
"_check_content",
"(",
"content",
".",
"get_text",
"(",
")",
")",
"if",
"quality_issue",
"is",
"not",
"None",
":",
"logger",
".",
"warning",
"(",
"\"Skipping %d due to: %s\"",
"%",
"(",
"content",
".",
"get_id",
"(",
")",
",",
"quality_issue",
")",
")",
"continue",
"# Look for things that are more like file names, rather than ids.",
"cid",
"=",
"content",
".",
"get_id",
"(",
")",
"if",
"isinstance",
"(",
"cid",
",",
"str",
")",
"and",
"re",
".",
"match",
"(",
"'^\\w*?\\d+$'",
",",
"cid",
")",
"is",
"None",
":",
"new_id",
"=",
"'FILE%06d'",
"%",
"i",
"i",
"+=",
"1",
"self",
".",
"id_maps",
"[",
"new_id",
"]",
"=",
"cid",
"content",
".",
"change_id",
"(",
"new_id",
")",
"new_fpath",
"=",
"content",
".",
"copy_to",
"(",
"self",
".",
"input_dir",
")",
"else",
":",
"# Put the content in the appropriate directory.",
"new_fpath",
"=",
"content",
".",
"copy_to",
"(",
"self",
".",
"input_dir",
")",
"self",
".",
"num_input",
"+=",
"1",
"logger",
".",
"debug",
"(",
"'%s saved for reading by reach.'",
"%",
"new_fpath",
")",
"return"
]
| Apply the readers to the content. | [
"Apply",
"the",
"readers",
"to",
"the",
"content",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/readers.py#L440-L466 | train |
sorgerlab/indra | indra/tools/reading/readers.py | ReachReader.get_output | def get_output(self):
"""Get the output of a reading job as a list of filenames."""
logger.info("Getting outputs.")
# Get the set of prefixes (each will correspond to three json files.)
json_files = glob.glob(path.join(self.output_dir, '*.json'))
json_prefixes = set()
for json_file in json_files:
# Remove .uaz.<subfile type>.json
prefix = '.'.join(path.basename(json_file).split('.')[:-3])
json_prefixes.add(path.join(self.output_dir, prefix))
# Join each set of json files and store the json dict.
for prefix in json_prefixes:
base_prefix = path.basename(prefix)
if base_prefix.isdecimal():
base_prefix = int(base_prefix)
elif base_prefix in self.id_maps.keys():
base_prefix = self.id_maps[base_prefix]
try:
content = self._join_json_files(prefix, clear=True)
except Exception as e:
logger.exception(e)
logger.error("Could not load result for prefix %s." % prefix)
content = None
self.add_result(base_prefix, content)
logger.debug('Joined files for prefix %s.' % base_prefix)
return self.results | python | def get_output(self):
"""Get the output of a reading job as a list of filenames."""
logger.info("Getting outputs.")
# Get the set of prefixes (each will correspond to three json files.)
json_files = glob.glob(path.join(self.output_dir, '*.json'))
json_prefixes = set()
for json_file in json_files:
# Remove .uaz.<subfile type>.json
prefix = '.'.join(path.basename(json_file).split('.')[:-3])
json_prefixes.add(path.join(self.output_dir, prefix))
# Join each set of json files and store the json dict.
for prefix in json_prefixes:
base_prefix = path.basename(prefix)
if base_prefix.isdecimal():
base_prefix = int(base_prefix)
elif base_prefix in self.id_maps.keys():
base_prefix = self.id_maps[base_prefix]
try:
content = self._join_json_files(prefix, clear=True)
except Exception as e:
logger.exception(e)
logger.error("Could not load result for prefix %s." % prefix)
content = None
self.add_result(base_prefix, content)
logger.debug('Joined files for prefix %s.' % base_prefix)
return self.results | [
"def",
"get_output",
"(",
"self",
")",
":",
"logger",
".",
"info",
"(",
"\"Getting outputs.\"",
")",
"# Get the set of prefixes (each will correspond to three json files.)",
"json_files",
"=",
"glob",
".",
"glob",
"(",
"path",
".",
"join",
"(",
"self",
".",
"output_dir",
",",
"'*.json'",
")",
")",
"json_prefixes",
"=",
"set",
"(",
")",
"for",
"json_file",
"in",
"json_files",
":",
"# Remove .uaz.<subfile type>.json",
"prefix",
"=",
"'.'",
".",
"join",
"(",
"path",
".",
"basename",
"(",
"json_file",
")",
".",
"split",
"(",
"'.'",
")",
"[",
":",
"-",
"3",
"]",
")",
"json_prefixes",
".",
"add",
"(",
"path",
".",
"join",
"(",
"self",
".",
"output_dir",
",",
"prefix",
")",
")",
"# Join each set of json files and store the json dict.",
"for",
"prefix",
"in",
"json_prefixes",
":",
"base_prefix",
"=",
"path",
".",
"basename",
"(",
"prefix",
")",
"if",
"base_prefix",
".",
"isdecimal",
"(",
")",
":",
"base_prefix",
"=",
"int",
"(",
"base_prefix",
")",
"elif",
"base_prefix",
"in",
"self",
".",
"id_maps",
".",
"keys",
"(",
")",
":",
"base_prefix",
"=",
"self",
".",
"id_maps",
"[",
"base_prefix",
"]",
"try",
":",
"content",
"=",
"self",
".",
"_join_json_files",
"(",
"prefix",
",",
"clear",
"=",
"True",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"exception",
"(",
"e",
")",
"logger",
".",
"error",
"(",
"\"Could not load result for prefix %s.\"",
"%",
"prefix",
")",
"content",
"=",
"None",
"self",
".",
"add_result",
"(",
"base_prefix",
",",
"content",
")",
"logger",
".",
"debug",
"(",
"'Joined files for prefix %s.'",
"%",
"base_prefix",
")",
"return",
"self",
".",
"results"
]
| Get the output of a reading job as a list of filenames. | [
"Get",
"the",
"output",
"of",
"a",
"reading",
"job",
"as",
"a",
"list",
"of",
"filenames",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/readers.py#L468-L494 | train |
sorgerlab/indra | indra/tools/reading/readers.py | ReachReader.read | def read(self, read_list, verbose=False, log=False):
"""Read the content, returning a list of ReadingData objects."""
ret = []
mem_tot = _get_mem_total()
if mem_tot is not None and mem_tot <= self.REACH_MEM + self.MEM_BUFFER:
logger.error(
"Too little memory to run reach. At least %s required." %
(self.REACH_MEM + self.MEM_BUFFER)
)
logger.info("REACH not run.")
return ret
# Prep the content
self.prep_input(read_list)
if self.num_input > 0:
# Run REACH!
logger.info("Beginning reach.")
args = [
'java',
'-Dconfig.file=%s' % self.conf_file_path,
'-jar', self.exec_path
]
p = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
log_file_str = ''
for line in iter(p.stdout.readline, b''):
log_line = 'REACH: ' + line.strip().decode('utf8')
if verbose:
logger.info(log_line)
if log:
log_file_str += log_line + '\n'
if log:
with open('reach_run.log', 'ab') as f:
f.write(log_file_str.encode('utf8'))
p_out, p_err = p.communicate()
if p.returncode:
logger.error('Problem running REACH:')
logger.error('Stdout: %s' % p_out.decode('utf-8'))
logger.error('Stderr: %s' % p_err.decode('utf-8'))
raise ReachError("Problem running REACH")
logger.info("Reach finished.")
ret = self.get_output()
self.clear_input()
return ret | python | def read(self, read_list, verbose=False, log=False):
"""Read the content, returning a list of ReadingData objects."""
ret = []
mem_tot = _get_mem_total()
if mem_tot is not None and mem_tot <= self.REACH_MEM + self.MEM_BUFFER:
logger.error(
"Too little memory to run reach. At least %s required." %
(self.REACH_MEM + self.MEM_BUFFER)
)
logger.info("REACH not run.")
return ret
# Prep the content
self.prep_input(read_list)
if self.num_input > 0:
# Run REACH!
logger.info("Beginning reach.")
args = [
'java',
'-Dconfig.file=%s' % self.conf_file_path,
'-jar', self.exec_path
]
p = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
log_file_str = ''
for line in iter(p.stdout.readline, b''):
log_line = 'REACH: ' + line.strip().decode('utf8')
if verbose:
logger.info(log_line)
if log:
log_file_str += log_line + '\n'
if log:
with open('reach_run.log', 'ab') as f:
f.write(log_file_str.encode('utf8'))
p_out, p_err = p.communicate()
if p.returncode:
logger.error('Problem running REACH:')
logger.error('Stdout: %s' % p_out.decode('utf-8'))
logger.error('Stderr: %s' % p_err.decode('utf-8'))
raise ReachError("Problem running REACH")
logger.info("Reach finished.")
ret = self.get_output()
self.clear_input()
return ret | [
"def",
"read",
"(",
"self",
",",
"read_list",
",",
"verbose",
"=",
"False",
",",
"log",
"=",
"False",
")",
":",
"ret",
"=",
"[",
"]",
"mem_tot",
"=",
"_get_mem_total",
"(",
")",
"if",
"mem_tot",
"is",
"not",
"None",
"and",
"mem_tot",
"<=",
"self",
".",
"REACH_MEM",
"+",
"self",
".",
"MEM_BUFFER",
":",
"logger",
".",
"error",
"(",
"\"Too little memory to run reach. At least %s required.\"",
"%",
"(",
"self",
".",
"REACH_MEM",
"+",
"self",
".",
"MEM_BUFFER",
")",
")",
"logger",
".",
"info",
"(",
"\"REACH not run.\"",
")",
"return",
"ret",
"# Prep the content",
"self",
".",
"prep_input",
"(",
"read_list",
")",
"if",
"self",
".",
"num_input",
">",
"0",
":",
"# Run REACH!",
"logger",
".",
"info",
"(",
"\"Beginning reach.\"",
")",
"args",
"=",
"[",
"'java'",
",",
"'-Dconfig.file=%s'",
"%",
"self",
".",
"conf_file_path",
",",
"'-jar'",
",",
"self",
".",
"exec_path",
"]",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"args",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"log_file_str",
"=",
"''",
"for",
"line",
"in",
"iter",
"(",
"p",
".",
"stdout",
".",
"readline",
",",
"b''",
")",
":",
"log_line",
"=",
"'REACH: '",
"+",
"line",
".",
"strip",
"(",
")",
".",
"decode",
"(",
"'utf8'",
")",
"if",
"verbose",
":",
"logger",
".",
"info",
"(",
"log_line",
")",
"if",
"log",
":",
"log_file_str",
"+=",
"log_line",
"+",
"'\\n'",
"if",
"log",
":",
"with",
"open",
"(",
"'reach_run.log'",
",",
"'ab'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"log_file_str",
".",
"encode",
"(",
"'utf8'",
")",
")",
"p_out",
",",
"p_err",
"=",
"p",
".",
"communicate",
"(",
")",
"if",
"p",
".",
"returncode",
":",
"logger",
".",
"error",
"(",
"'Problem running REACH:'",
")",
"logger",
".",
"error",
"(",
"'Stdout: %s'",
"%",
"p_out",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"logger",
".",
"error",
"(",
"'Stderr: %s'",
"%",
"p_err",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"raise",
"ReachError",
"(",
"\"Problem running REACH\"",
")",
"logger",
".",
"info",
"(",
"\"Reach finished.\"",
")",
"ret",
"=",
"self",
".",
"get_output",
"(",
")",
"self",
".",
"clear_input",
"(",
")",
"return",
"ret"
]
| Read the content, returning a list of ReadingData objects. | [
"Read",
"the",
"content",
"returning",
"a",
"list",
"of",
"ReadingData",
"objects",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/readers.py#L505-L549 | train |
sorgerlab/indra | indra/tools/reading/readers.py | SparserReader.prep_input | def prep_input(self, read_list):
"Prepare the list of files or text content objects to be read."
logger.info('Prepping input for sparser.')
self.file_list = []
for content in read_list:
quality_issue = self._check_content(content.get_text())
if quality_issue is not None:
logger.warning("Skipping %d due to: %s"
% (content.get_id(), quality_issue))
continue
if content.is_format('nxml'):
# If it is already an nxml, we just need to adjust the
# name a bit, if anything.
if not content.get_filename().startswith('PMC'):
content.change_id('PMC' + str(content.get_id()))
fpath = content.copy_to(self.tmp_dir)
self.file_list.append(fpath)
elif content.is_format('txt', 'text'):
# Otherwise we need to frame the content in xml and put it
# in a new file with the appropriate name.
nxml_str = sparser.make_nxml_from_text(content.get_text())
new_content = Content.from_string('PMC' + str(content.get_id()),
'nxml', nxml_str)
fpath = new_content.copy_to(self.tmp_dir)
self.file_list.append(fpath)
else:
raise SparserError("Unrecognized format %s."
% content.format)
return | python | def prep_input(self, read_list):
"Prepare the list of files or text content objects to be read."
logger.info('Prepping input for sparser.')
self.file_list = []
for content in read_list:
quality_issue = self._check_content(content.get_text())
if quality_issue is not None:
logger.warning("Skipping %d due to: %s"
% (content.get_id(), quality_issue))
continue
if content.is_format('nxml'):
# If it is already an nxml, we just need to adjust the
# name a bit, if anything.
if not content.get_filename().startswith('PMC'):
content.change_id('PMC' + str(content.get_id()))
fpath = content.copy_to(self.tmp_dir)
self.file_list.append(fpath)
elif content.is_format('txt', 'text'):
# Otherwise we need to frame the content in xml and put it
# in a new file with the appropriate name.
nxml_str = sparser.make_nxml_from_text(content.get_text())
new_content = Content.from_string('PMC' + str(content.get_id()),
'nxml', nxml_str)
fpath = new_content.copy_to(self.tmp_dir)
self.file_list.append(fpath)
else:
raise SparserError("Unrecognized format %s."
% content.format)
return | [
"def",
"prep_input",
"(",
"self",
",",
"read_list",
")",
":",
"logger",
".",
"info",
"(",
"'Prepping input for sparser.'",
")",
"self",
".",
"file_list",
"=",
"[",
"]",
"for",
"content",
"in",
"read_list",
":",
"quality_issue",
"=",
"self",
".",
"_check_content",
"(",
"content",
".",
"get_text",
"(",
")",
")",
"if",
"quality_issue",
"is",
"not",
"None",
":",
"logger",
".",
"warning",
"(",
"\"Skipping %d due to: %s\"",
"%",
"(",
"content",
".",
"get_id",
"(",
")",
",",
"quality_issue",
")",
")",
"continue",
"if",
"content",
".",
"is_format",
"(",
"'nxml'",
")",
":",
"# If it is already an nxml, we just need to adjust the",
"# name a bit, if anything.",
"if",
"not",
"content",
".",
"get_filename",
"(",
")",
".",
"startswith",
"(",
"'PMC'",
")",
":",
"content",
".",
"change_id",
"(",
"'PMC'",
"+",
"str",
"(",
"content",
".",
"get_id",
"(",
")",
")",
")",
"fpath",
"=",
"content",
".",
"copy_to",
"(",
"self",
".",
"tmp_dir",
")",
"self",
".",
"file_list",
".",
"append",
"(",
"fpath",
")",
"elif",
"content",
".",
"is_format",
"(",
"'txt'",
",",
"'text'",
")",
":",
"# Otherwise we need to frame the content in xml and put it",
"# in a new file with the appropriate name.",
"nxml_str",
"=",
"sparser",
".",
"make_nxml_from_text",
"(",
"content",
".",
"get_text",
"(",
")",
")",
"new_content",
"=",
"Content",
".",
"from_string",
"(",
"'PMC'",
"+",
"str",
"(",
"content",
".",
"get_id",
"(",
")",
")",
",",
"'nxml'",
",",
"nxml_str",
")",
"fpath",
"=",
"new_content",
".",
"copy_to",
"(",
"self",
".",
"tmp_dir",
")",
"self",
".",
"file_list",
".",
"append",
"(",
"fpath",
")",
"else",
":",
"raise",
"SparserError",
"(",
"\"Unrecognized format %s.\"",
"%",
"content",
".",
"format",
")",
"return"
]
| Prepare the list of files or text content objects to be read. | [
"Prepare",
"the",
"list",
"of",
"files",
"or",
"text",
"content",
"objects",
"to",
"be",
"read",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/readers.py#L567-L598 | train |
sorgerlab/indra | indra/tools/reading/readers.py | SparserReader.get_output | def get_output(self, output_files, clear=True):
"Get the output files as an id indexed dict."
patt = re.compile(r'(.*?)-semantics.*?')
for outpath in output_files:
if outpath is None:
logger.warning("Found outpath with value None. Skipping.")
continue
re_out = patt.match(path.basename(outpath))
if re_out is None:
raise SparserError("Could not get prefix from output path %s."
% outpath)
prefix = re_out.groups()[0]
if prefix.startswith('PMC'):
prefix = prefix[3:]
if prefix.isdecimal():
# In this case we assume the prefix is a tcid.
prefix = int(prefix)
try:
with open(outpath, 'rt') as f:
content = json.load(f)
except Exception as e:
logger.exception(e)
logger.error("Could not load reading content from %s."
% outpath)
content = None
self.add_result(prefix, content)
if clear:
input_path = outpath.replace('-semantics.json', '.nxml')
try:
remove(outpath)
remove(input_path)
except Exception as e:
logger.exception(e)
logger.error("Could not remove sparser files %s and %s."
% (outpath, input_path))
return self.results | python | def get_output(self, output_files, clear=True):
"Get the output files as an id indexed dict."
patt = re.compile(r'(.*?)-semantics.*?')
for outpath in output_files:
if outpath is None:
logger.warning("Found outpath with value None. Skipping.")
continue
re_out = patt.match(path.basename(outpath))
if re_out is None:
raise SparserError("Could not get prefix from output path %s."
% outpath)
prefix = re_out.groups()[0]
if prefix.startswith('PMC'):
prefix = prefix[3:]
if prefix.isdecimal():
# In this case we assume the prefix is a tcid.
prefix = int(prefix)
try:
with open(outpath, 'rt') as f:
content = json.load(f)
except Exception as e:
logger.exception(e)
logger.error("Could not load reading content from %s."
% outpath)
content = None
self.add_result(prefix, content)
if clear:
input_path = outpath.replace('-semantics.json', '.nxml')
try:
remove(outpath)
remove(input_path)
except Exception as e:
logger.exception(e)
logger.error("Could not remove sparser files %s and %s."
% (outpath, input_path))
return self.results | [
"def",
"get_output",
"(",
"self",
",",
"output_files",
",",
"clear",
"=",
"True",
")",
":",
"patt",
"=",
"re",
".",
"compile",
"(",
"r'(.*?)-semantics.*?'",
")",
"for",
"outpath",
"in",
"output_files",
":",
"if",
"outpath",
"is",
"None",
":",
"logger",
".",
"warning",
"(",
"\"Found outpath with value None. Skipping.\"",
")",
"continue",
"re_out",
"=",
"patt",
".",
"match",
"(",
"path",
".",
"basename",
"(",
"outpath",
")",
")",
"if",
"re_out",
"is",
"None",
":",
"raise",
"SparserError",
"(",
"\"Could not get prefix from output path %s.\"",
"%",
"outpath",
")",
"prefix",
"=",
"re_out",
".",
"groups",
"(",
")",
"[",
"0",
"]",
"if",
"prefix",
".",
"startswith",
"(",
"'PMC'",
")",
":",
"prefix",
"=",
"prefix",
"[",
"3",
":",
"]",
"if",
"prefix",
".",
"isdecimal",
"(",
")",
":",
"# In this case we assume the prefix is a tcid.",
"prefix",
"=",
"int",
"(",
"prefix",
")",
"try",
":",
"with",
"open",
"(",
"outpath",
",",
"'rt'",
")",
"as",
"f",
":",
"content",
"=",
"json",
".",
"load",
"(",
"f",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"exception",
"(",
"e",
")",
"logger",
".",
"error",
"(",
"\"Could not load reading content from %s.\"",
"%",
"outpath",
")",
"content",
"=",
"None",
"self",
".",
"add_result",
"(",
"prefix",
",",
"content",
")",
"if",
"clear",
":",
"input_path",
"=",
"outpath",
".",
"replace",
"(",
"'-semantics.json'",
",",
"'.nxml'",
")",
"try",
":",
"remove",
"(",
"outpath",
")",
"remove",
"(",
"input_path",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"exception",
"(",
"e",
")",
"logger",
".",
"error",
"(",
"\"Could not remove sparser files %s and %s.\"",
"%",
"(",
"outpath",
",",
"input_path",
")",
")",
"return",
"self",
".",
"results"
]
| Get the output files as an id indexed dict. | [
"Get",
"the",
"output",
"files",
"as",
"an",
"id",
"indexed",
"dict",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/readers.py#L600-L639 | train |
sorgerlab/indra | indra/tools/reading/readers.py | SparserReader.read_some | def read_some(self, fpath_list, outbuf=None, verbose=False):
"Perform a few readings."
outpath_list = []
for fpath in fpath_list:
output, outbuf = self.read_one(fpath, outbuf, verbose)
if output is not None:
outpath_list.append(output)
return outpath_list, outbuf | python | def read_some(self, fpath_list, outbuf=None, verbose=False):
"Perform a few readings."
outpath_list = []
for fpath in fpath_list:
output, outbuf = self.read_one(fpath, outbuf, verbose)
if output is not None:
outpath_list.append(output)
return outpath_list, outbuf | [
"def",
"read_some",
"(",
"self",
",",
"fpath_list",
",",
"outbuf",
"=",
"None",
",",
"verbose",
"=",
"False",
")",
":",
"outpath_list",
"=",
"[",
"]",
"for",
"fpath",
"in",
"fpath_list",
":",
"output",
",",
"outbuf",
"=",
"self",
".",
"read_one",
"(",
"fpath",
",",
"outbuf",
",",
"verbose",
")",
"if",
"output",
"is",
"not",
"None",
":",
"outpath_list",
".",
"append",
"(",
"output",
")",
"return",
"outpath_list",
",",
"outbuf"
]
| Perform a few readings. | [
"Perform",
"a",
"few",
"readings",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/readers.py#L662-L669 | train |
sorgerlab/indra | indra/tools/reading/readers.py | SparserReader.read | def read(self, read_list, verbose=False, log=False, n_per_proc=None):
"Perform the actual reading."
ret = []
self.prep_input(read_list)
L = len(self.file_list)
if L == 0:
return ret
logger.info("Beginning to run sparser.")
output_file_list = []
if log:
log_name = 'sparser_run_%s.log' % _time_stamp()
outbuf = open(log_name, 'wb')
else:
outbuf = None
try:
if self.n_proc == 1:
for fpath in self.file_list:
outpath, _ = self.read_one(fpath, outbuf, verbose)
if outpath is not None:
output_file_list.append(outpath)
else:
if n_per_proc is None:
n_per_proc = max(1, min(1000, L//self.n_proc//2))
pool = None
try:
pool = Pool(self.n_proc)
if n_per_proc is not 1:
batches = [self.file_list[n*n_per_proc:(n+1)*n_per_proc]
for n in range(L//n_per_proc + 1)]
out_lists_and_buffs = pool.map(self.read_some,
batches)
else:
out_files_and_buffs = pool.map(self.read_one,
self.file_list)
out_lists_and_buffs = [([out_files], buffs)
for out_files, buffs
in out_files_and_buffs]
finally:
if pool is not None:
pool.close()
pool.join()
for i, (out_list, buff) in enumerate(out_lists_and_buffs):
if out_list is not None:
output_file_list += out_list
if log:
outbuf.write(b'Log for producing output %d/%d.\n'
% (i, len(out_lists_and_buffs)))
if buff is not None:
buff.seek(0)
outbuf.write(buff.read() + b'\n')
else:
outbuf.write(b'ERROR: no buffer was None. '
b'No logs available.\n')
outbuf.flush()
finally:
if log:
outbuf.close()
if verbose:
logger.info("Sparser logs may be found at %s." %
log_name)
ret = self.get_output(output_file_list)
return ret | python | def read(self, read_list, verbose=False, log=False, n_per_proc=None):
"Perform the actual reading."
ret = []
self.prep_input(read_list)
L = len(self.file_list)
if L == 0:
return ret
logger.info("Beginning to run sparser.")
output_file_list = []
if log:
log_name = 'sparser_run_%s.log' % _time_stamp()
outbuf = open(log_name, 'wb')
else:
outbuf = None
try:
if self.n_proc == 1:
for fpath in self.file_list:
outpath, _ = self.read_one(fpath, outbuf, verbose)
if outpath is not None:
output_file_list.append(outpath)
else:
if n_per_proc is None:
n_per_proc = max(1, min(1000, L//self.n_proc//2))
pool = None
try:
pool = Pool(self.n_proc)
if n_per_proc is not 1:
batches = [self.file_list[n*n_per_proc:(n+1)*n_per_proc]
for n in range(L//n_per_proc + 1)]
out_lists_and_buffs = pool.map(self.read_some,
batches)
else:
out_files_and_buffs = pool.map(self.read_one,
self.file_list)
out_lists_and_buffs = [([out_files], buffs)
for out_files, buffs
in out_files_and_buffs]
finally:
if pool is not None:
pool.close()
pool.join()
for i, (out_list, buff) in enumerate(out_lists_and_buffs):
if out_list is not None:
output_file_list += out_list
if log:
outbuf.write(b'Log for producing output %d/%d.\n'
% (i, len(out_lists_and_buffs)))
if buff is not None:
buff.seek(0)
outbuf.write(buff.read() + b'\n')
else:
outbuf.write(b'ERROR: no buffer was None. '
b'No logs available.\n')
outbuf.flush()
finally:
if log:
outbuf.close()
if verbose:
logger.info("Sparser logs may be found at %s." %
log_name)
ret = self.get_output(output_file_list)
return ret | [
"def",
"read",
"(",
"self",
",",
"read_list",
",",
"verbose",
"=",
"False",
",",
"log",
"=",
"False",
",",
"n_per_proc",
"=",
"None",
")",
":",
"ret",
"=",
"[",
"]",
"self",
".",
"prep_input",
"(",
"read_list",
")",
"L",
"=",
"len",
"(",
"self",
".",
"file_list",
")",
"if",
"L",
"==",
"0",
":",
"return",
"ret",
"logger",
".",
"info",
"(",
"\"Beginning to run sparser.\"",
")",
"output_file_list",
"=",
"[",
"]",
"if",
"log",
":",
"log_name",
"=",
"'sparser_run_%s.log'",
"%",
"_time_stamp",
"(",
")",
"outbuf",
"=",
"open",
"(",
"log_name",
",",
"'wb'",
")",
"else",
":",
"outbuf",
"=",
"None",
"try",
":",
"if",
"self",
".",
"n_proc",
"==",
"1",
":",
"for",
"fpath",
"in",
"self",
".",
"file_list",
":",
"outpath",
",",
"_",
"=",
"self",
".",
"read_one",
"(",
"fpath",
",",
"outbuf",
",",
"verbose",
")",
"if",
"outpath",
"is",
"not",
"None",
":",
"output_file_list",
".",
"append",
"(",
"outpath",
")",
"else",
":",
"if",
"n_per_proc",
"is",
"None",
":",
"n_per_proc",
"=",
"max",
"(",
"1",
",",
"min",
"(",
"1000",
",",
"L",
"//",
"self",
".",
"n_proc",
"//",
"2",
")",
")",
"pool",
"=",
"None",
"try",
":",
"pool",
"=",
"Pool",
"(",
"self",
".",
"n_proc",
")",
"if",
"n_per_proc",
"is",
"not",
"1",
":",
"batches",
"=",
"[",
"self",
".",
"file_list",
"[",
"n",
"*",
"n_per_proc",
":",
"(",
"n",
"+",
"1",
")",
"*",
"n_per_proc",
"]",
"for",
"n",
"in",
"range",
"(",
"L",
"//",
"n_per_proc",
"+",
"1",
")",
"]",
"out_lists_and_buffs",
"=",
"pool",
".",
"map",
"(",
"self",
".",
"read_some",
",",
"batches",
")",
"else",
":",
"out_files_and_buffs",
"=",
"pool",
".",
"map",
"(",
"self",
".",
"read_one",
",",
"self",
".",
"file_list",
")",
"out_lists_and_buffs",
"=",
"[",
"(",
"[",
"out_files",
"]",
",",
"buffs",
")",
"for",
"out_files",
",",
"buffs",
"in",
"out_files_and_buffs",
"]",
"finally",
":",
"if",
"pool",
"is",
"not",
"None",
":",
"pool",
".",
"close",
"(",
")",
"pool",
".",
"join",
"(",
")",
"for",
"i",
",",
"(",
"out_list",
",",
"buff",
")",
"in",
"enumerate",
"(",
"out_lists_and_buffs",
")",
":",
"if",
"out_list",
"is",
"not",
"None",
":",
"output_file_list",
"+=",
"out_list",
"if",
"log",
":",
"outbuf",
".",
"write",
"(",
"b'Log for producing output %d/%d.\\n'",
"%",
"(",
"i",
",",
"len",
"(",
"out_lists_and_buffs",
")",
")",
")",
"if",
"buff",
"is",
"not",
"None",
":",
"buff",
".",
"seek",
"(",
"0",
")",
"outbuf",
".",
"write",
"(",
"buff",
".",
"read",
"(",
")",
"+",
"b'\\n'",
")",
"else",
":",
"outbuf",
".",
"write",
"(",
"b'ERROR: no buffer was None. '",
"b'No logs available.\\n'",
")",
"outbuf",
".",
"flush",
"(",
")",
"finally",
":",
"if",
"log",
":",
"outbuf",
".",
"close",
"(",
")",
"if",
"verbose",
":",
"logger",
".",
"info",
"(",
"\"Sparser logs may be found at %s.\"",
"%",
"log_name",
")",
"ret",
"=",
"self",
".",
"get_output",
"(",
"output_file_list",
")",
"return",
"ret"
]
| Perform the actual reading. | [
"Perform",
"the",
"actual",
"reading",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/readers.py#L671-L733 | train |
sorgerlab/indra | indra/sources/isi/api.py | process_text | def process_text(text, pmid=None, cleanup=True, add_grounding=True):
"""Process a string using the ISI reader and extract INDRA statements.
Parameters
----------
text : str
A text string to process
pmid : Optional[str]
The PMID associated with this text (or None if not specified)
cleanup : Optional[bool]
If True, the temporary folders created for preprocessed reading input
and output are removed. Default: True
add_grounding : Optional[bool]
If True the extracted Statements' grounding is mapped
Returns
-------
ip : indra.sources.isi.processor.IsiProcessor
A processor containing statements
"""
# Create a temporary directory to store the proprocessed input
pp_dir = tempfile.mkdtemp('indra_isi_pp_output')
pp = IsiPreprocessor(pp_dir)
extra_annotations = {}
pp.preprocess_plain_text_string(text, pmid, extra_annotations)
# Run the ISI reader and extract statements
ip = process_preprocessed(pp)
if add_grounding:
ip.add_grounding()
if cleanup:
# Remove temporary directory with processed input
shutil.rmtree(pp_dir)
else:
logger.info('Not cleaning up %s' % pp_dir)
return ip | python | def process_text(text, pmid=None, cleanup=True, add_grounding=True):
"""Process a string using the ISI reader and extract INDRA statements.
Parameters
----------
text : str
A text string to process
pmid : Optional[str]
The PMID associated with this text (or None if not specified)
cleanup : Optional[bool]
If True, the temporary folders created for preprocessed reading input
and output are removed. Default: True
add_grounding : Optional[bool]
If True the extracted Statements' grounding is mapped
Returns
-------
ip : indra.sources.isi.processor.IsiProcessor
A processor containing statements
"""
# Create a temporary directory to store the proprocessed input
pp_dir = tempfile.mkdtemp('indra_isi_pp_output')
pp = IsiPreprocessor(pp_dir)
extra_annotations = {}
pp.preprocess_plain_text_string(text, pmid, extra_annotations)
# Run the ISI reader and extract statements
ip = process_preprocessed(pp)
if add_grounding:
ip.add_grounding()
if cleanup:
# Remove temporary directory with processed input
shutil.rmtree(pp_dir)
else:
logger.info('Not cleaning up %s' % pp_dir)
return ip | [
"def",
"process_text",
"(",
"text",
",",
"pmid",
"=",
"None",
",",
"cleanup",
"=",
"True",
",",
"add_grounding",
"=",
"True",
")",
":",
"# Create a temporary directory to store the proprocessed input",
"pp_dir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
"'indra_isi_pp_output'",
")",
"pp",
"=",
"IsiPreprocessor",
"(",
"pp_dir",
")",
"extra_annotations",
"=",
"{",
"}",
"pp",
".",
"preprocess_plain_text_string",
"(",
"text",
",",
"pmid",
",",
"extra_annotations",
")",
"# Run the ISI reader and extract statements",
"ip",
"=",
"process_preprocessed",
"(",
"pp",
")",
"if",
"add_grounding",
":",
"ip",
".",
"add_grounding",
"(",
")",
"if",
"cleanup",
":",
"# Remove temporary directory with processed input",
"shutil",
".",
"rmtree",
"(",
"pp_dir",
")",
"else",
":",
"logger",
".",
"info",
"(",
"'Not cleaning up %s'",
"%",
"pp_dir",
")",
"return",
"ip"
]
| Process a string using the ISI reader and extract INDRA statements.
Parameters
----------
text : str
A text string to process
pmid : Optional[str]
The PMID associated with this text (or None if not specified)
cleanup : Optional[bool]
If True, the temporary folders created for preprocessed reading input
and output are removed. Default: True
add_grounding : Optional[bool]
If True the extracted Statements' grounding is mapped
Returns
-------
ip : indra.sources.isi.processor.IsiProcessor
A processor containing statements | [
"Process",
"a",
"string",
"using",
"the",
"ISI",
"reader",
"and",
"extract",
"INDRA",
"statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/isi/api.py#L17-L55 | train |
sorgerlab/indra | indra/sources/isi/api.py | process_nxml | def process_nxml(nxml_filename, pmid=None, extra_annotations=None,
cleanup=True, add_grounding=True):
"""Process an NXML file using the ISI reader
First converts NXML to plain text and preprocesses it, then runs the ISI
reader, and processes the output to extract INDRA Statements.
Parameters
----------
nxml_filename : str
nxml file to process
pmid : Optional[str]
pmid of this nxml file, to be added to the Evidence object of the
extracted INDRA statements
extra_annotations : Optional[dict]
Additional annotations to add to the Evidence object of all extracted
INDRA statements. Extra annotations called 'interaction' are ignored
since this is used by the processor to store the corresponding
raw ISI output.
cleanup : Optional[bool]
If True, the temporary folders created for preprocessed reading input
and output are removed. Default: True
add_grounding : Optional[bool]
If True the extracted Statements' grounding is mapped
Returns
-------
ip : indra.sources.isi.processor.IsiProcessor
A processor containing extracted Statements
"""
if extra_annotations is None:
extra_annotations = {}
# Create a temporary directory to store the proprocessed input
pp_dir = tempfile.mkdtemp('indra_isi_pp_output')
pp = IsiPreprocessor(pp_dir)
extra_annotations = {}
pp.preprocess_nxml_file(nxml_filename, pmid, extra_annotations)
# Run the ISI reader and extract statements
ip = process_preprocessed(pp)
if add_grounding:
ip.add_grounding()
if cleanup:
# Remove temporary directory with processed input
shutil.rmtree(pp_dir)
else:
logger.info('Not cleaning up %s' % pp_dir)
return ip | python | def process_nxml(nxml_filename, pmid=None, extra_annotations=None,
cleanup=True, add_grounding=True):
"""Process an NXML file using the ISI reader
First converts NXML to plain text and preprocesses it, then runs the ISI
reader, and processes the output to extract INDRA Statements.
Parameters
----------
nxml_filename : str
nxml file to process
pmid : Optional[str]
pmid of this nxml file, to be added to the Evidence object of the
extracted INDRA statements
extra_annotations : Optional[dict]
Additional annotations to add to the Evidence object of all extracted
INDRA statements. Extra annotations called 'interaction' are ignored
since this is used by the processor to store the corresponding
raw ISI output.
cleanup : Optional[bool]
If True, the temporary folders created for preprocessed reading input
and output are removed. Default: True
add_grounding : Optional[bool]
If True the extracted Statements' grounding is mapped
Returns
-------
ip : indra.sources.isi.processor.IsiProcessor
A processor containing extracted Statements
"""
if extra_annotations is None:
extra_annotations = {}
# Create a temporary directory to store the proprocessed input
pp_dir = tempfile.mkdtemp('indra_isi_pp_output')
pp = IsiPreprocessor(pp_dir)
extra_annotations = {}
pp.preprocess_nxml_file(nxml_filename, pmid, extra_annotations)
# Run the ISI reader and extract statements
ip = process_preprocessed(pp)
if add_grounding:
ip.add_grounding()
if cleanup:
# Remove temporary directory with processed input
shutil.rmtree(pp_dir)
else:
logger.info('Not cleaning up %s' % pp_dir)
return ip | [
"def",
"process_nxml",
"(",
"nxml_filename",
",",
"pmid",
"=",
"None",
",",
"extra_annotations",
"=",
"None",
",",
"cleanup",
"=",
"True",
",",
"add_grounding",
"=",
"True",
")",
":",
"if",
"extra_annotations",
"is",
"None",
":",
"extra_annotations",
"=",
"{",
"}",
"# Create a temporary directory to store the proprocessed input",
"pp_dir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
"'indra_isi_pp_output'",
")",
"pp",
"=",
"IsiPreprocessor",
"(",
"pp_dir",
")",
"extra_annotations",
"=",
"{",
"}",
"pp",
".",
"preprocess_nxml_file",
"(",
"nxml_filename",
",",
"pmid",
",",
"extra_annotations",
")",
"# Run the ISI reader and extract statements",
"ip",
"=",
"process_preprocessed",
"(",
"pp",
")",
"if",
"add_grounding",
":",
"ip",
".",
"add_grounding",
"(",
")",
"if",
"cleanup",
":",
"# Remove temporary directory with processed input",
"shutil",
".",
"rmtree",
"(",
"pp_dir",
")",
"else",
":",
"logger",
".",
"info",
"(",
"'Not cleaning up %s'",
"%",
"pp_dir",
")",
"return",
"ip"
]
| Process an NXML file using the ISI reader
First converts NXML to plain text and preprocesses it, then runs the ISI
reader, and processes the output to extract INDRA Statements.
Parameters
----------
nxml_filename : str
nxml file to process
pmid : Optional[str]
pmid of this nxml file, to be added to the Evidence object of the
extracted INDRA statements
extra_annotations : Optional[dict]
Additional annotations to add to the Evidence object of all extracted
INDRA statements. Extra annotations called 'interaction' are ignored
since this is used by the processor to store the corresponding
raw ISI output.
cleanup : Optional[bool]
If True, the temporary folders created for preprocessed reading input
and output are removed. Default: True
add_grounding : Optional[bool]
If True the extracted Statements' grounding is mapped
Returns
-------
ip : indra.sources.isi.processor.IsiProcessor
A processor containing extracted Statements | [
"Process",
"an",
"NXML",
"file",
"using",
"the",
"ISI",
"reader"
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/isi/api.py#L58-L109 | train |
sorgerlab/indra | indra/sources/isi/api.py | process_output_folder | def process_output_folder(folder_path, pmids=None, extra_annotations=None,
add_grounding=True):
"""Recursively extracts statements from all ISI output files in the
given directory and subdirectories.
Parameters
----------
folder_path : str
The directory to traverse
pmids : Optional[str]
PMID mapping to be added to the Evidence of the extracted INDRA
Statements
extra_annotations : Optional[dict]
Additional annotations to add to the Evidence object of all extracted
INDRA statements. Extra annotations called 'interaction' are ignored
since this is used by the processor to store the corresponding
raw ISI output.
add_grounding : Optional[bool]
If True the extracted Statements' grounding is mapped
"""
pmids = pmids if pmids is not None else {}
extra_annotations = extra_annotations if \
extra_annotations is not None else {}
ips = []
for entry in glob.glob(os.path.join(folder_path, '*.json')):
entry_key = os.path.splitext(os.path.basename(entry))[0]
# Extract the corresponding file id
pmid = pmids.get(entry_key)
extra_annotation = extra_annotations.get(entry_key)
ip = process_json_file(entry, pmid, extra_annotation, False)
ips.append(ip)
if len(ips) > 1:
for ip in ips[1:]:
ips[0].statements += ip.statements
if ips:
if add_grounding:
ips[0].add_grounding()
return ips[0]
else:
return None | python | def process_output_folder(folder_path, pmids=None, extra_annotations=None,
add_grounding=True):
"""Recursively extracts statements from all ISI output files in the
given directory and subdirectories.
Parameters
----------
folder_path : str
The directory to traverse
pmids : Optional[str]
PMID mapping to be added to the Evidence of the extracted INDRA
Statements
extra_annotations : Optional[dict]
Additional annotations to add to the Evidence object of all extracted
INDRA statements. Extra annotations called 'interaction' are ignored
since this is used by the processor to store the corresponding
raw ISI output.
add_grounding : Optional[bool]
If True the extracted Statements' grounding is mapped
"""
pmids = pmids if pmids is not None else {}
extra_annotations = extra_annotations if \
extra_annotations is not None else {}
ips = []
for entry in glob.glob(os.path.join(folder_path, '*.json')):
entry_key = os.path.splitext(os.path.basename(entry))[0]
# Extract the corresponding file id
pmid = pmids.get(entry_key)
extra_annotation = extra_annotations.get(entry_key)
ip = process_json_file(entry, pmid, extra_annotation, False)
ips.append(ip)
if len(ips) > 1:
for ip in ips[1:]:
ips[0].statements += ip.statements
if ips:
if add_grounding:
ips[0].add_grounding()
return ips[0]
else:
return None | [
"def",
"process_output_folder",
"(",
"folder_path",
",",
"pmids",
"=",
"None",
",",
"extra_annotations",
"=",
"None",
",",
"add_grounding",
"=",
"True",
")",
":",
"pmids",
"=",
"pmids",
"if",
"pmids",
"is",
"not",
"None",
"else",
"{",
"}",
"extra_annotations",
"=",
"extra_annotations",
"if",
"extra_annotations",
"is",
"not",
"None",
"else",
"{",
"}",
"ips",
"=",
"[",
"]",
"for",
"entry",
"in",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"folder_path",
",",
"'*.json'",
")",
")",
":",
"entry_key",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"entry",
")",
")",
"[",
"0",
"]",
"# Extract the corresponding file id",
"pmid",
"=",
"pmids",
".",
"get",
"(",
"entry_key",
")",
"extra_annotation",
"=",
"extra_annotations",
".",
"get",
"(",
"entry_key",
")",
"ip",
"=",
"process_json_file",
"(",
"entry",
",",
"pmid",
",",
"extra_annotation",
",",
"False",
")",
"ips",
".",
"append",
"(",
"ip",
")",
"if",
"len",
"(",
"ips",
")",
">",
"1",
":",
"for",
"ip",
"in",
"ips",
"[",
"1",
":",
"]",
":",
"ips",
"[",
"0",
"]",
".",
"statements",
"+=",
"ip",
".",
"statements",
"if",
"ips",
":",
"if",
"add_grounding",
":",
"ips",
"[",
"0",
"]",
".",
"add_grounding",
"(",
")",
"return",
"ips",
"[",
"0",
"]",
"else",
":",
"return",
"None"
]
| Recursively extracts statements from all ISI output files in the
given directory and subdirectories.
Parameters
----------
folder_path : str
The directory to traverse
pmids : Optional[str]
PMID mapping to be added to the Evidence of the extracted INDRA
Statements
extra_annotations : Optional[dict]
Additional annotations to add to the Evidence object of all extracted
INDRA statements. Extra annotations called 'interaction' are ignored
since this is used by the processor to store the corresponding
raw ISI output.
add_grounding : Optional[bool]
If True the extracted Statements' grounding is mapped | [
"Recursively",
"extracts",
"statements",
"from",
"all",
"ISI",
"output",
"files",
"in",
"the",
"given",
"directory",
"and",
"subdirectories",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/isi/api.py#L196-L237 | train |
sorgerlab/indra | indra/sources/isi/api.py | process_json_file | def process_json_file(file_path, pmid=None, extra_annotations=None,
add_grounding=True):
"""Extracts statements from the given ISI output file.
Parameters
----------
file_path : str
The ISI output file from which to extract statements
pmid : int
The PMID of the document being preprocessed, or None if not
specified
extra_annotations : dict
Extra annotations to be added to each statement from this document
(can be the empty dictionary)
add_grounding : Optional[bool]
If True the extracted Statements' grounding is mapped
"""
logger.info('Extracting from %s' % file_path)
with open(file_path, 'rb') as fh:
jd = json.load(fh)
ip = IsiProcessor(jd, pmid, extra_annotations)
ip.get_statements()
if add_grounding:
ip.add_grounding()
return ip | python | def process_json_file(file_path, pmid=None, extra_annotations=None,
add_grounding=True):
"""Extracts statements from the given ISI output file.
Parameters
----------
file_path : str
The ISI output file from which to extract statements
pmid : int
The PMID of the document being preprocessed, or None if not
specified
extra_annotations : dict
Extra annotations to be added to each statement from this document
(can be the empty dictionary)
add_grounding : Optional[bool]
If True the extracted Statements' grounding is mapped
"""
logger.info('Extracting from %s' % file_path)
with open(file_path, 'rb') as fh:
jd = json.load(fh)
ip = IsiProcessor(jd, pmid, extra_annotations)
ip.get_statements()
if add_grounding:
ip.add_grounding()
return ip | [
"def",
"process_json_file",
"(",
"file_path",
",",
"pmid",
"=",
"None",
",",
"extra_annotations",
"=",
"None",
",",
"add_grounding",
"=",
"True",
")",
":",
"logger",
".",
"info",
"(",
"'Extracting from %s'",
"%",
"file_path",
")",
"with",
"open",
"(",
"file_path",
",",
"'rb'",
")",
"as",
"fh",
":",
"jd",
"=",
"json",
".",
"load",
"(",
"fh",
")",
"ip",
"=",
"IsiProcessor",
"(",
"jd",
",",
"pmid",
",",
"extra_annotations",
")",
"ip",
".",
"get_statements",
"(",
")",
"if",
"add_grounding",
":",
"ip",
".",
"add_grounding",
"(",
")",
"return",
"ip"
]
| Extracts statements from the given ISI output file.
Parameters
----------
file_path : str
The ISI output file from which to extract statements
pmid : int
The PMID of the document being preprocessed, or None if not
specified
extra_annotations : dict
Extra annotations to be added to each statement from this document
(can be the empty dictionary)
add_grounding : Optional[bool]
If True the extracted Statements' grounding is mapped | [
"Extracts",
"statements",
"from",
"the",
"given",
"ISI",
"output",
"file",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/isi/api.py#L240-L264 | train |
sorgerlab/indra | indra/sources/cwms/api.py | process_text | def process_text(text, save_xml='cwms_output.xml'):
"""Processes text using the CWMS web service.
Parameters
----------
text : str
Text to process
Returns
-------
cp : indra.sources.cwms.CWMSProcessor
A CWMSProcessor, which contains a list of INDRA statements in its
statements attribute.
"""
xml = client.send_query(text, 'cwmsreader')
# There are actually two EKBs in the xml document. Extract the second.
first_end = xml.find('</ekb>') # End of first EKB
second_start = xml.find('<ekb', first_end) # Start of second EKB
second_end = xml.find('</ekb>', second_start) # End of second EKB
second_ekb = xml[second_start:second_end+len('</ekb>')] # second EKB
if save_xml:
with open(save_xml, 'wb') as fh:
fh.write(second_ekb.encode('utf-8'))
return process_ekb(second_ekb) | python | def process_text(text, save_xml='cwms_output.xml'):
"""Processes text using the CWMS web service.
Parameters
----------
text : str
Text to process
Returns
-------
cp : indra.sources.cwms.CWMSProcessor
A CWMSProcessor, which contains a list of INDRA statements in its
statements attribute.
"""
xml = client.send_query(text, 'cwmsreader')
# There are actually two EKBs in the xml document. Extract the second.
first_end = xml.find('</ekb>') # End of first EKB
second_start = xml.find('<ekb', first_end) # Start of second EKB
second_end = xml.find('</ekb>', second_start) # End of second EKB
second_ekb = xml[second_start:second_end+len('</ekb>')] # second EKB
if save_xml:
with open(save_xml, 'wb') as fh:
fh.write(second_ekb.encode('utf-8'))
return process_ekb(second_ekb) | [
"def",
"process_text",
"(",
"text",
",",
"save_xml",
"=",
"'cwms_output.xml'",
")",
":",
"xml",
"=",
"client",
".",
"send_query",
"(",
"text",
",",
"'cwmsreader'",
")",
"# There are actually two EKBs in the xml document. Extract the second.",
"first_end",
"=",
"xml",
".",
"find",
"(",
"'</ekb>'",
")",
"# End of first EKB",
"second_start",
"=",
"xml",
".",
"find",
"(",
"'<ekb'",
",",
"first_end",
")",
"# Start of second EKB",
"second_end",
"=",
"xml",
".",
"find",
"(",
"'</ekb>'",
",",
"second_start",
")",
"# End of second EKB",
"second_ekb",
"=",
"xml",
"[",
"second_start",
":",
"second_end",
"+",
"len",
"(",
"'</ekb>'",
")",
"]",
"# second EKB",
"if",
"save_xml",
":",
"with",
"open",
"(",
"save_xml",
",",
"'wb'",
")",
"as",
"fh",
":",
"fh",
".",
"write",
"(",
"second_ekb",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"return",
"process_ekb",
"(",
"second_ekb",
")"
]
| Processes text using the CWMS web service.
Parameters
----------
text : str
Text to process
Returns
-------
cp : indra.sources.cwms.CWMSProcessor
A CWMSProcessor, which contains a list of INDRA statements in its
statements attribute. | [
"Processes",
"text",
"using",
"the",
"CWMS",
"web",
"service",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/cwms/api.py#L11-L35 | train |
sorgerlab/indra | indra/sources/cwms/api.py | process_ekb_file | def process_ekb_file(fname):
"""Processes an EKB file produced by CWMS.
Parameters
----------
fname : str
Path to the EKB file to process.
Returns
-------
cp : indra.sources.cwms.CWMSProcessor
A CWMSProcessor, which contains a list of INDRA statements in its
statements attribute.
"""
# Process EKB XML file into statements
with open(fname, 'rb') as fh:
ekb_str = fh.read().decode('utf-8')
return process_ekb(ekb_str) | python | def process_ekb_file(fname):
"""Processes an EKB file produced by CWMS.
Parameters
----------
fname : str
Path to the EKB file to process.
Returns
-------
cp : indra.sources.cwms.CWMSProcessor
A CWMSProcessor, which contains a list of INDRA statements in its
statements attribute.
"""
# Process EKB XML file into statements
with open(fname, 'rb') as fh:
ekb_str = fh.read().decode('utf-8')
return process_ekb(ekb_str) | [
"def",
"process_ekb_file",
"(",
"fname",
")",
":",
"# Process EKB XML file into statements",
"with",
"open",
"(",
"fname",
",",
"'rb'",
")",
"as",
"fh",
":",
"ekb_str",
"=",
"fh",
".",
"read",
"(",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"return",
"process_ekb",
"(",
"ekb_str",
")"
]
| Processes an EKB file produced by CWMS.
Parameters
----------
fname : str
Path to the EKB file to process.
Returns
-------
cp : indra.sources.cwms.CWMSProcessor
A CWMSProcessor, which contains a list of INDRA statements in its
statements attribute. | [
"Processes",
"an",
"EKB",
"file",
"produced",
"by",
"CWMS",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/cwms/api.py#L38-L55 | train |
sorgerlab/indra | indra/assemblers/pysb/kappa_util.py | im_json_to_graph | def im_json_to_graph(im_json):
"""Return networkx graph from Kappy's influence map JSON.
Parameters
----------
im_json : dict
A JSON dict which contains an influence map generated by Kappy.
Returns
-------
graph : networkx.MultiDiGraph
A graph representing the influence map.
"""
imap_data = im_json['influence map']['map']
# Initialize the graph
graph = MultiDiGraph()
id_node_dict = {}
# Add each node to the graph
for node_dict in imap_data['nodes']:
# There is always just one entry here with the node type e.g. "rule"
# as key, and all the node data as the value
node_type, node = list(node_dict.items())[0]
# Add the node to the graph with its label and type
attrs = {'fillcolor': '#b7d2ff' if node_type == 'rule' else '#cdffc9',
'shape': 'box' if node_type == 'rule' else 'oval',
'style': 'filled'}
graph.add_node(node['label'], node_type=node_type, **attrs)
# Save the key of the node to refer to it later
new_key = '%s%s' % (node_type, node['id'])
id_node_dict[new_key] = node['label']
def add_edges(link_list, edge_sign):
attrs = {'sign': edge_sign,
'color': 'green' if edge_sign == 1 else 'red',
'arrowhead': 'normal' if edge_sign == 1 else 'tee'}
for link_dict in link_list:
source = link_dict['source']
for target_dict in link_dict['target map']:
target = target_dict['target']
src_id = '%s%s' % list(source.items())[0]
tgt_id = '%s%s' % list(target.items())[0]
graph.add_edge(id_node_dict[src_id], id_node_dict[tgt_id],
**attrs)
# Add all the edges from the positive and negative influences
add_edges(imap_data['wake-up map'], 1)
add_edges(imap_data['inhibition map'], -1)
return graph | python | def im_json_to_graph(im_json):
"""Return networkx graph from Kappy's influence map JSON.
Parameters
----------
im_json : dict
A JSON dict which contains an influence map generated by Kappy.
Returns
-------
graph : networkx.MultiDiGraph
A graph representing the influence map.
"""
imap_data = im_json['influence map']['map']
# Initialize the graph
graph = MultiDiGraph()
id_node_dict = {}
# Add each node to the graph
for node_dict in imap_data['nodes']:
# There is always just one entry here with the node type e.g. "rule"
# as key, and all the node data as the value
node_type, node = list(node_dict.items())[0]
# Add the node to the graph with its label and type
attrs = {'fillcolor': '#b7d2ff' if node_type == 'rule' else '#cdffc9',
'shape': 'box' if node_type == 'rule' else 'oval',
'style': 'filled'}
graph.add_node(node['label'], node_type=node_type, **attrs)
# Save the key of the node to refer to it later
new_key = '%s%s' % (node_type, node['id'])
id_node_dict[new_key] = node['label']
def add_edges(link_list, edge_sign):
attrs = {'sign': edge_sign,
'color': 'green' if edge_sign == 1 else 'red',
'arrowhead': 'normal' if edge_sign == 1 else 'tee'}
for link_dict in link_list:
source = link_dict['source']
for target_dict in link_dict['target map']:
target = target_dict['target']
src_id = '%s%s' % list(source.items())[0]
tgt_id = '%s%s' % list(target.items())[0]
graph.add_edge(id_node_dict[src_id], id_node_dict[tgt_id],
**attrs)
# Add all the edges from the positive and negative influences
add_edges(imap_data['wake-up map'], 1)
add_edges(imap_data['inhibition map'], -1)
return graph | [
"def",
"im_json_to_graph",
"(",
"im_json",
")",
":",
"imap_data",
"=",
"im_json",
"[",
"'influence map'",
"]",
"[",
"'map'",
"]",
"# Initialize the graph",
"graph",
"=",
"MultiDiGraph",
"(",
")",
"id_node_dict",
"=",
"{",
"}",
"# Add each node to the graph",
"for",
"node_dict",
"in",
"imap_data",
"[",
"'nodes'",
"]",
":",
"# There is always just one entry here with the node type e.g. \"rule\"",
"# as key, and all the node data as the value",
"node_type",
",",
"node",
"=",
"list",
"(",
"node_dict",
".",
"items",
"(",
")",
")",
"[",
"0",
"]",
"# Add the node to the graph with its label and type",
"attrs",
"=",
"{",
"'fillcolor'",
":",
"'#b7d2ff'",
"if",
"node_type",
"==",
"'rule'",
"else",
"'#cdffc9'",
",",
"'shape'",
":",
"'box'",
"if",
"node_type",
"==",
"'rule'",
"else",
"'oval'",
",",
"'style'",
":",
"'filled'",
"}",
"graph",
".",
"add_node",
"(",
"node",
"[",
"'label'",
"]",
",",
"node_type",
"=",
"node_type",
",",
"*",
"*",
"attrs",
")",
"# Save the key of the node to refer to it later",
"new_key",
"=",
"'%s%s'",
"%",
"(",
"node_type",
",",
"node",
"[",
"'id'",
"]",
")",
"id_node_dict",
"[",
"new_key",
"]",
"=",
"node",
"[",
"'label'",
"]",
"def",
"add_edges",
"(",
"link_list",
",",
"edge_sign",
")",
":",
"attrs",
"=",
"{",
"'sign'",
":",
"edge_sign",
",",
"'color'",
":",
"'green'",
"if",
"edge_sign",
"==",
"1",
"else",
"'red'",
",",
"'arrowhead'",
":",
"'normal'",
"if",
"edge_sign",
"==",
"1",
"else",
"'tee'",
"}",
"for",
"link_dict",
"in",
"link_list",
":",
"source",
"=",
"link_dict",
"[",
"'source'",
"]",
"for",
"target_dict",
"in",
"link_dict",
"[",
"'target map'",
"]",
":",
"target",
"=",
"target_dict",
"[",
"'target'",
"]",
"src_id",
"=",
"'%s%s'",
"%",
"list",
"(",
"source",
".",
"items",
"(",
")",
")",
"[",
"0",
"]",
"tgt_id",
"=",
"'%s%s'",
"%",
"list",
"(",
"target",
".",
"items",
"(",
")",
")",
"[",
"0",
"]",
"graph",
".",
"add_edge",
"(",
"id_node_dict",
"[",
"src_id",
"]",
",",
"id_node_dict",
"[",
"tgt_id",
"]",
",",
"*",
"*",
"attrs",
")",
"# Add all the edges from the positive and negative influences",
"add_edges",
"(",
"imap_data",
"[",
"'wake-up map'",
"]",
",",
"1",
")",
"add_edges",
"(",
"imap_data",
"[",
"'inhibition map'",
"]",
",",
"-",
"1",
")",
"return",
"graph"
]
| Return networkx graph from Kappy's influence map JSON.
Parameters
----------
im_json : dict
A JSON dict which contains an influence map generated by Kappy.
Returns
-------
graph : networkx.MultiDiGraph
A graph representing the influence map. | [
"Return",
"networkx",
"graph",
"from",
"Kappy",
"s",
"influence",
"map",
"JSON",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/kappa_util.py#L7-L57 | train |
sorgerlab/indra | indra/assemblers/pysb/kappa_util.py | cm_json_to_graph | def cm_json_to_graph(im_json):
"""Return pygraphviz Agraph from Kappy's contact map JSON.
Parameters
----------
im_json : dict
A JSON dict which contains a contact map generated by Kappy.
Returns
-------
graph : pygraphviz.Agraph
A graph representing the contact map.
"""
cmap_data = im_json['contact map']['map']
# Initialize the graph
graph = AGraph()
# In this loop we add sites as nodes and clusters around sites to the
# graph. We also collect edges to be added between sites later.
edges = []
for node_idx, node in enumerate(cmap_data):
sites_in_node = []
for site_idx, site in enumerate(node['node_sites']):
# We map the unique ID of the site to its name
site_key = (node_idx, site_idx)
sites_in_node.append(site_key)
graph.add_node(site_key, label=site['site_name'], style='filled',
shape='ellipse')
# Each port link is an edge from the current site to the
# specified site
if not site['site_type'] or not site['site_type'][0] == 'port':
continue
for port_link in site['site_type'][1]['port_links']:
edge = (site_key, tuple(port_link))
edges.append(edge)
graph.add_subgraph(sites_in_node,
name='cluster_%s' % node['node_type'],
label=node['node_type'])
# Finally we add the edges between the sites
for source, target in edges:
graph.add_edge(source, target)
return graph | python | def cm_json_to_graph(im_json):
"""Return pygraphviz Agraph from Kappy's contact map JSON.
Parameters
----------
im_json : dict
A JSON dict which contains a contact map generated by Kappy.
Returns
-------
graph : pygraphviz.Agraph
A graph representing the contact map.
"""
cmap_data = im_json['contact map']['map']
# Initialize the graph
graph = AGraph()
# In this loop we add sites as nodes and clusters around sites to the
# graph. We also collect edges to be added between sites later.
edges = []
for node_idx, node in enumerate(cmap_data):
sites_in_node = []
for site_idx, site in enumerate(node['node_sites']):
# We map the unique ID of the site to its name
site_key = (node_idx, site_idx)
sites_in_node.append(site_key)
graph.add_node(site_key, label=site['site_name'], style='filled',
shape='ellipse')
# Each port link is an edge from the current site to the
# specified site
if not site['site_type'] or not site['site_type'][0] == 'port':
continue
for port_link in site['site_type'][1]['port_links']:
edge = (site_key, tuple(port_link))
edges.append(edge)
graph.add_subgraph(sites_in_node,
name='cluster_%s' % node['node_type'],
label=node['node_type'])
# Finally we add the edges between the sites
for source, target in edges:
graph.add_edge(source, target)
return graph | [
"def",
"cm_json_to_graph",
"(",
"im_json",
")",
":",
"cmap_data",
"=",
"im_json",
"[",
"'contact map'",
"]",
"[",
"'map'",
"]",
"# Initialize the graph",
"graph",
"=",
"AGraph",
"(",
")",
"# In this loop we add sites as nodes and clusters around sites to the",
"# graph. We also collect edges to be added between sites later.",
"edges",
"=",
"[",
"]",
"for",
"node_idx",
",",
"node",
"in",
"enumerate",
"(",
"cmap_data",
")",
":",
"sites_in_node",
"=",
"[",
"]",
"for",
"site_idx",
",",
"site",
"in",
"enumerate",
"(",
"node",
"[",
"'node_sites'",
"]",
")",
":",
"# We map the unique ID of the site to its name",
"site_key",
"=",
"(",
"node_idx",
",",
"site_idx",
")",
"sites_in_node",
".",
"append",
"(",
"site_key",
")",
"graph",
".",
"add_node",
"(",
"site_key",
",",
"label",
"=",
"site",
"[",
"'site_name'",
"]",
",",
"style",
"=",
"'filled'",
",",
"shape",
"=",
"'ellipse'",
")",
"# Each port link is an edge from the current site to the",
"# specified site",
"if",
"not",
"site",
"[",
"'site_type'",
"]",
"or",
"not",
"site",
"[",
"'site_type'",
"]",
"[",
"0",
"]",
"==",
"'port'",
":",
"continue",
"for",
"port_link",
"in",
"site",
"[",
"'site_type'",
"]",
"[",
"1",
"]",
"[",
"'port_links'",
"]",
":",
"edge",
"=",
"(",
"site_key",
",",
"tuple",
"(",
"port_link",
")",
")",
"edges",
".",
"append",
"(",
"edge",
")",
"graph",
".",
"add_subgraph",
"(",
"sites_in_node",
",",
"name",
"=",
"'cluster_%s'",
"%",
"node",
"[",
"'node_type'",
"]",
",",
"label",
"=",
"node",
"[",
"'node_type'",
"]",
")",
"# Finally we add the edges between the sites",
"for",
"source",
",",
"target",
"in",
"edges",
":",
"graph",
".",
"add_edge",
"(",
"source",
",",
"target",
")",
"return",
"graph"
]
| Return pygraphviz Agraph from Kappy's contact map JSON.
Parameters
----------
im_json : dict
A JSON dict which contains a contact map generated by Kappy.
Returns
-------
graph : pygraphviz.Agraph
A graph representing the contact map. | [
"Return",
"pygraphviz",
"Agraph",
"from",
"Kappy",
"s",
"contact",
"map",
"JSON",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/kappa_util.py#L60-L104 | train |
sorgerlab/indra | indra/tools/machine/gmail_client.py | fetch_email | def fetch_email(M, msg_id):
"""Returns the given email message as a unicode string."""
res, data = M.fetch(msg_id, '(RFC822)')
if res == 'OK':
# Data here is a list with 1 element containing a tuple
# whose 2nd element is a long string containing the email
# The content is a bytes that must be decoded
raw_msg_txt = data[0][1]
# In Python3, we call message_from_bytes, but this function doesn't
# exist in Python 2.
try:
msg = email.message_from_bytes(raw_msg_txt)
except AttributeError:
msg = email.message_from_string(raw_msg_txt)
# At this point, we have a message containing bytes (not unicode)
# fields that will still need to be decoded, ideally according to the
# character set specified in the message.
return msg
else:
return None | python | def fetch_email(M, msg_id):
"""Returns the given email message as a unicode string."""
res, data = M.fetch(msg_id, '(RFC822)')
if res == 'OK':
# Data here is a list with 1 element containing a tuple
# whose 2nd element is a long string containing the email
# The content is a bytes that must be decoded
raw_msg_txt = data[0][1]
# In Python3, we call message_from_bytes, but this function doesn't
# exist in Python 2.
try:
msg = email.message_from_bytes(raw_msg_txt)
except AttributeError:
msg = email.message_from_string(raw_msg_txt)
# At this point, we have a message containing bytes (not unicode)
# fields that will still need to be decoded, ideally according to the
# character set specified in the message.
return msg
else:
return None | [
"def",
"fetch_email",
"(",
"M",
",",
"msg_id",
")",
":",
"res",
",",
"data",
"=",
"M",
".",
"fetch",
"(",
"msg_id",
",",
"'(RFC822)'",
")",
"if",
"res",
"==",
"'OK'",
":",
"# Data here is a list with 1 element containing a tuple",
"# whose 2nd element is a long string containing the email",
"# The content is a bytes that must be decoded",
"raw_msg_txt",
"=",
"data",
"[",
"0",
"]",
"[",
"1",
"]",
"# In Python3, we call message_from_bytes, but this function doesn't",
"# exist in Python 2.",
"try",
":",
"msg",
"=",
"email",
".",
"message_from_bytes",
"(",
"raw_msg_txt",
")",
"except",
"AttributeError",
":",
"msg",
"=",
"email",
".",
"message_from_string",
"(",
"raw_msg_txt",
")",
"# At this point, we have a message containing bytes (not unicode)",
"# fields that will still need to be decoded, ideally according to the",
"# character set specified in the message.",
"return",
"msg",
"else",
":",
"return",
"None"
]
| Returns the given email message as a unicode string. | [
"Returns",
"the",
"given",
"email",
"message",
"as",
"a",
"unicode",
"string",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/machine/gmail_client.py#L28-L47 | train |
sorgerlab/indra | indra/tools/machine/gmail_client.py | get_headers | def get_headers(msg):
"""Takes email.message.Message object initialized from unicode string,
returns dict with header fields."""
headers = {}
for k in msg.keys():
# decode_header decodes header but does not convert charset, so these
# may still be bytes, even in Python 3. However, if it's ASCII
# only (hence unambiguous encoding), the header fields come back
# as str (unicode) in Python 3.
(header_txt, charset) = email.header.decode_header(msg[k])[0]
if charset is not None:
header_txt = header_txt.decode(charset)
headers[k] = header_txt
return headers | python | def get_headers(msg):
"""Takes email.message.Message object initialized from unicode string,
returns dict with header fields."""
headers = {}
for k in msg.keys():
# decode_header decodes header but does not convert charset, so these
# may still be bytes, even in Python 3. However, if it's ASCII
# only (hence unambiguous encoding), the header fields come back
# as str (unicode) in Python 3.
(header_txt, charset) = email.header.decode_header(msg[k])[0]
if charset is not None:
header_txt = header_txt.decode(charset)
headers[k] = header_txt
return headers | [
"def",
"get_headers",
"(",
"msg",
")",
":",
"headers",
"=",
"{",
"}",
"for",
"k",
"in",
"msg",
".",
"keys",
"(",
")",
":",
"# decode_header decodes header but does not convert charset, so these",
"# may still be bytes, even in Python 3. However, if it's ASCII",
"# only (hence unambiguous encoding), the header fields come back",
"# as str (unicode) in Python 3.",
"(",
"header_txt",
",",
"charset",
")",
"=",
"email",
".",
"header",
".",
"decode_header",
"(",
"msg",
"[",
"k",
"]",
")",
"[",
"0",
"]",
"if",
"charset",
"is",
"not",
"None",
":",
"header_txt",
"=",
"header_txt",
".",
"decode",
"(",
"charset",
")",
"headers",
"[",
"k",
"]",
"=",
"header_txt",
"return",
"headers"
]
| Takes email.message.Message object initialized from unicode string,
returns dict with header fields. | [
"Takes",
"email",
".",
"message",
".",
"Message",
"object",
"initialized",
"from",
"unicode",
"string",
"returns",
"dict",
"with",
"header",
"fields",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/machine/gmail_client.py#L49-L62 | train |
sorgerlab/indra | indra/config.py | populate_config_dict | def populate_config_dict(config_path):
"""Load the configuration file into the config_file dictionary
A ConfigParser-style configuration file can have multiple sections, but
we ignore the section distinction and load the key/value pairs from all
sections into a single key/value list.
"""
try:
config_dict = {}
parser = RawConfigParser()
parser.optionxform = lambda x: x
parser.read(config_path)
sections = parser.sections()
for section in sections:
options = parser.options(section)
for option in options:
config_dict[option] = str(parser.get(section, option))
except Exception as e:
logger.warning("Could not load configuration file due to exception. "
"Only environment variable equivalents will be used.")
return None
for key in config_dict.keys():
if config_dict[key] == '':
config_dict[key] = None
elif isinstance(config_dict[key], str):
config_dict[key] = os.path.expanduser(config_dict[key])
return config_dict | python | def populate_config_dict(config_path):
"""Load the configuration file into the config_file dictionary
A ConfigParser-style configuration file can have multiple sections, but
we ignore the section distinction and load the key/value pairs from all
sections into a single key/value list.
"""
try:
config_dict = {}
parser = RawConfigParser()
parser.optionxform = lambda x: x
parser.read(config_path)
sections = parser.sections()
for section in sections:
options = parser.options(section)
for option in options:
config_dict[option] = str(parser.get(section, option))
except Exception as e:
logger.warning("Could not load configuration file due to exception. "
"Only environment variable equivalents will be used.")
return None
for key in config_dict.keys():
if config_dict[key] == '':
config_dict[key] = None
elif isinstance(config_dict[key], str):
config_dict[key] = os.path.expanduser(config_dict[key])
return config_dict | [
"def",
"populate_config_dict",
"(",
"config_path",
")",
":",
"try",
":",
"config_dict",
"=",
"{",
"}",
"parser",
"=",
"RawConfigParser",
"(",
")",
"parser",
".",
"optionxform",
"=",
"lambda",
"x",
":",
"x",
"parser",
".",
"read",
"(",
"config_path",
")",
"sections",
"=",
"parser",
".",
"sections",
"(",
")",
"for",
"section",
"in",
"sections",
":",
"options",
"=",
"parser",
".",
"options",
"(",
"section",
")",
"for",
"option",
"in",
"options",
":",
"config_dict",
"[",
"option",
"]",
"=",
"str",
"(",
"parser",
".",
"get",
"(",
"section",
",",
"option",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"warning",
"(",
"\"Could not load configuration file due to exception. \"",
"\"Only environment variable equivalents will be used.\"",
")",
"return",
"None",
"for",
"key",
"in",
"config_dict",
".",
"keys",
"(",
")",
":",
"if",
"config_dict",
"[",
"key",
"]",
"==",
"''",
":",
"config_dict",
"[",
"key",
"]",
"=",
"None",
"elif",
"isinstance",
"(",
"config_dict",
"[",
"key",
"]",
",",
"str",
")",
":",
"config_dict",
"[",
"key",
"]",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"config_dict",
"[",
"key",
"]",
")",
"return",
"config_dict"
]
| Load the configuration file into the config_file dictionary
A ConfigParser-style configuration file can have multiple sections, but
we ignore the section distinction and load the key/value pairs from all
sections into a single key/value list. | [
"Load",
"the",
"configuration",
"file",
"into",
"the",
"config_file",
"dictionary"
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/config.py#L31-L58 | train |
sorgerlab/indra | indra/config.py | get_config | def get_config(key, failure_ok=True):
"""Get value by key from config file or environment.
Returns the configuration value, first checking the environment
variables and then, if it's not present there, checking the configuration
file.
Parameters
----------
key : str
The key for the configuration value to fetch
failure_ok : Optional[bool]
If False and the configuration is missing, an IndraConfigError is
raised. If True, None is returned and no error is raised in case
of a missing configuration. Default: True
Returns
-------
value : str or None
The configuration value or None if the configuration value doesn't
exist and failure_ok is set to True.
"""
err_msg = "Key %s not in environment or config file." % key
if key in os.environ:
return os.environ[key]
elif key in CONFIG_DICT:
val = CONFIG_DICT[key]
# We interpret an empty value in the config file as a failure
if val is None and not failure_ok:
msg = 'Key %s is set to an empty value in config file.' % key
raise IndraConfigError(msg)
else:
return val
elif not failure_ok:
raise IndraConfigError(err_msg)
else:
logger.warning(err_msg)
return None | python | def get_config(key, failure_ok=True):
"""Get value by key from config file or environment.
Returns the configuration value, first checking the environment
variables and then, if it's not present there, checking the configuration
file.
Parameters
----------
key : str
The key for the configuration value to fetch
failure_ok : Optional[bool]
If False and the configuration is missing, an IndraConfigError is
raised. If True, None is returned and no error is raised in case
of a missing configuration. Default: True
Returns
-------
value : str or None
The configuration value or None if the configuration value doesn't
exist and failure_ok is set to True.
"""
err_msg = "Key %s not in environment or config file." % key
if key in os.environ:
return os.environ[key]
elif key in CONFIG_DICT:
val = CONFIG_DICT[key]
# We interpret an empty value in the config file as a failure
if val is None and not failure_ok:
msg = 'Key %s is set to an empty value in config file.' % key
raise IndraConfigError(msg)
else:
return val
elif not failure_ok:
raise IndraConfigError(err_msg)
else:
logger.warning(err_msg)
return None | [
"def",
"get_config",
"(",
"key",
",",
"failure_ok",
"=",
"True",
")",
":",
"err_msg",
"=",
"\"Key %s not in environment or config file.\"",
"%",
"key",
"if",
"key",
"in",
"os",
".",
"environ",
":",
"return",
"os",
".",
"environ",
"[",
"key",
"]",
"elif",
"key",
"in",
"CONFIG_DICT",
":",
"val",
"=",
"CONFIG_DICT",
"[",
"key",
"]",
"# We interpret an empty value in the config file as a failure",
"if",
"val",
"is",
"None",
"and",
"not",
"failure_ok",
":",
"msg",
"=",
"'Key %s is set to an empty value in config file.'",
"%",
"key",
"raise",
"IndraConfigError",
"(",
"msg",
")",
"else",
":",
"return",
"val",
"elif",
"not",
"failure_ok",
":",
"raise",
"IndraConfigError",
"(",
"err_msg",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"err_msg",
")",
"return",
"None"
]
| Get value by key from config file or environment.
Returns the configuration value, first checking the environment
variables and then, if it's not present there, checking the configuration
file.
Parameters
----------
key : str
The key for the configuration value to fetch
failure_ok : Optional[bool]
If False and the configuration is missing, an IndraConfigError is
raised. If True, None is returned and no error is raised in case
of a missing configuration. Default: True
Returns
-------
value : str or None
The configuration value or None if the configuration value doesn't
exist and failure_ok is set to True. | [
"Get",
"value",
"by",
"key",
"from",
"config",
"file",
"or",
"environment",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/config.py#L85-L122 | train |
sorgerlab/indra | indra/util/__init__.py | read_unicode_csv_fileobj | def read_unicode_csv_fileobj(fileobj, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL, lineterminator='\n',
encoding='utf-8', skiprows=0):
"""fileobj can be a StringIO in Py3, but should be a BytesIO in Py2."""
# Python 3 version
if sys.version_info[0] >= 3:
# Next, get the csv reader, with unicode delimiter and quotechar
csv_reader = csv.reader(fileobj, delimiter=delimiter,
quotechar=quotechar, quoting=quoting,
lineterminator=lineterminator)
# Now, return the (already decoded) unicode csv_reader generator
# Skip rows if necessary
for skip_ix in range(skiprows):
next(csv_reader)
for row in csv_reader:
yield row
# Python 2 version
else:
# Next, get the csv reader, passing delimiter and quotechar as
# bytestrings rather than unicode
csv_reader = csv.reader(fileobj, delimiter=delimiter.encode(encoding),
quotechar=quotechar.encode(encoding),
quoting=quoting, lineterminator=lineterminator)
# Iterate over the file and decode each string into unicode
# Skip rows if necessary
for skip_ix in range(skiprows):
next(csv_reader)
for row in csv_reader:
yield [cell.decode(encoding) for cell in row] | python | def read_unicode_csv_fileobj(fileobj, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL, lineterminator='\n',
encoding='utf-8', skiprows=0):
"""fileobj can be a StringIO in Py3, but should be a BytesIO in Py2."""
# Python 3 version
if sys.version_info[0] >= 3:
# Next, get the csv reader, with unicode delimiter and quotechar
csv_reader = csv.reader(fileobj, delimiter=delimiter,
quotechar=quotechar, quoting=quoting,
lineterminator=lineterminator)
# Now, return the (already decoded) unicode csv_reader generator
# Skip rows if necessary
for skip_ix in range(skiprows):
next(csv_reader)
for row in csv_reader:
yield row
# Python 2 version
else:
# Next, get the csv reader, passing delimiter and quotechar as
# bytestrings rather than unicode
csv_reader = csv.reader(fileobj, delimiter=delimiter.encode(encoding),
quotechar=quotechar.encode(encoding),
quoting=quoting, lineterminator=lineterminator)
# Iterate over the file and decode each string into unicode
# Skip rows if necessary
for skip_ix in range(skiprows):
next(csv_reader)
for row in csv_reader:
yield [cell.decode(encoding) for cell in row] | [
"def",
"read_unicode_csv_fileobj",
"(",
"fileobj",
",",
"delimiter",
"=",
"','",
",",
"quotechar",
"=",
"'\"'",
",",
"quoting",
"=",
"csv",
".",
"QUOTE_MINIMAL",
",",
"lineterminator",
"=",
"'\\n'",
",",
"encoding",
"=",
"'utf-8'",
",",
"skiprows",
"=",
"0",
")",
":",
"# Python 3 version",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
">=",
"3",
":",
"# Next, get the csv reader, with unicode delimiter and quotechar",
"csv_reader",
"=",
"csv",
".",
"reader",
"(",
"fileobj",
",",
"delimiter",
"=",
"delimiter",
",",
"quotechar",
"=",
"quotechar",
",",
"quoting",
"=",
"quoting",
",",
"lineterminator",
"=",
"lineterminator",
")",
"# Now, return the (already decoded) unicode csv_reader generator",
"# Skip rows if necessary",
"for",
"skip_ix",
"in",
"range",
"(",
"skiprows",
")",
":",
"next",
"(",
"csv_reader",
")",
"for",
"row",
"in",
"csv_reader",
":",
"yield",
"row",
"# Python 2 version",
"else",
":",
"# Next, get the csv reader, passing delimiter and quotechar as",
"# bytestrings rather than unicode",
"csv_reader",
"=",
"csv",
".",
"reader",
"(",
"fileobj",
",",
"delimiter",
"=",
"delimiter",
".",
"encode",
"(",
"encoding",
")",
",",
"quotechar",
"=",
"quotechar",
".",
"encode",
"(",
"encoding",
")",
",",
"quoting",
"=",
"quoting",
",",
"lineterminator",
"=",
"lineterminator",
")",
"# Iterate over the file and decode each string into unicode",
"# Skip rows if necessary",
"for",
"skip_ix",
"in",
"range",
"(",
"skiprows",
")",
":",
"next",
"(",
"csv_reader",
")",
"for",
"row",
"in",
"csv_reader",
":",
"yield",
"[",
"cell",
".",
"decode",
"(",
"encoding",
")",
"for",
"cell",
"in",
"row",
"]"
]
| fileobj can be a StringIO in Py3, but should be a BytesIO in Py2. | [
"fileobj",
"can",
"be",
"a",
"StringIO",
"in",
"Py3",
"but",
"should",
"be",
"a",
"BytesIO",
"in",
"Py2",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/util/__init__.py#L113-L141 | train |
sorgerlab/indra | indra/util/__init__.py | fast_deepcopy | def fast_deepcopy(obj):
"""This is a faster implementation of deepcopy via pickle.
It is meant primarily for sets of Statements with complex hierarchies
but can be used for any object.
"""
with BytesIO() as buf:
pickle.dump(obj, buf)
buf.seek(0)
obj_new = pickle.load(buf)
return obj_new | python | def fast_deepcopy(obj):
"""This is a faster implementation of deepcopy via pickle.
It is meant primarily for sets of Statements with complex hierarchies
but can be used for any object.
"""
with BytesIO() as buf:
pickle.dump(obj, buf)
buf.seek(0)
obj_new = pickle.load(buf)
return obj_new | [
"def",
"fast_deepcopy",
"(",
"obj",
")",
":",
"with",
"BytesIO",
"(",
")",
"as",
"buf",
":",
"pickle",
".",
"dump",
"(",
"obj",
",",
"buf",
")",
"buf",
".",
"seek",
"(",
"0",
")",
"obj_new",
"=",
"pickle",
".",
"load",
"(",
"buf",
")",
"return",
"obj_new"
]
| This is a faster implementation of deepcopy via pickle.
It is meant primarily for sets of Statements with complex hierarchies
but can be used for any object. | [
"This",
"is",
"a",
"faster",
"implementation",
"of",
"deepcopy",
"via",
"pickle",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/util/__init__.py#L198-L208 | train |
sorgerlab/indra | indra/util/__init__.py | batch_iter | def batch_iter(iterator, batch_size, return_func=None, padding=None):
"""Break an iterable into batches of size batch_size
Note that `padding` should be set to something (anything) which is NOT a
valid member of the iterator. For example, None works for [0,1,2,...10], but
not for ['a', None, 'c', 'd'].
Parameters
----------
iterator : iterable
A python object which is iterable.
batch_size : int
The size of batches you wish to produce from the iterator.
return_func : executable or None
Pass a function that takes a generator and returns an iterable (e.g.
`list` or `set`). If None, a generator will be returned.
padding : anything
This is used internally to ensure that the remainder of the list is
included. This MUST NOT be a valid element of the iterator.
Returns
-------
An iterator over lists or generators, depending on `return_lists`.
"""
for batch in zip_longest(*[iter(iterator)]*batch_size, fillvalue=padding):
gen = (thing for thing in batch if thing is not padding)
if return_func is None:
yield gen
else:
yield return_func(gen) | python | def batch_iter(iterator, batch_size, return_func=None, padding=None):
"""Break an iterable into batches of size batch_size
Note that `padding` should be set to something (anything) which is NOT a
valid member of the iterator. For example, None works for [0,1,2,...10], but
not for ['a', None, 'c', 'd'].
Parameters
----------
iterator : iterable
A python object which is iterable.
batch_size : int
The size of batches you wish to produce from the iterator.
return_func : executable or None
Pass a function that takes a generator and returns an iterable (e.g.
`list` or `set`). If None, a generator will be returned.
padding : anything
This is used internally to ensure that the remainder of the list is
included. This MUST NOT be a valid element of the iterator.
Returns
-------
An iterator over lists or generators, depending on `return_lists`.
"""
for batch in zip_longest(*[iter(iterator)]*batch_size, fillvalue=padding):
gen = (thing for thing in batch if thing is not padding)
if return_func is None:
yield gen
else:
yield return_func(gen) | [
"def",
"batch_iter",
"(",
"iterator",
",",
"batch_size",
",",
"return_func",
"=",
"None",
",",
"padding",
"=",
"None",
")",
":",
"for",
"batch",
"in",
"zip_longest",
"(",
"*",
"[",
"iter",
"(",
"iterator",
")",
"]",
"*",
"batch_size",
",",
"fillvalue",
"=",
"padding",
")",
":",
"gen",
"=",
"(",
"thing",
"for",
"thing",
"in",
"batch",
"if",
"thing",
"is",
"not",
"padding",
")",
"if",
"return_func",
"is",
"None",
":",
"yield",
"gen",
"else",
":",
"yield",
"return_func",
"(",
"gen",
")"
]
| Break an iterable into batches of size batch_size
Note that `padding` should be set to something (anything) which is NOT a
valid member of the iterator. For example, None works for [0,1,2,...10], but
not for ['a', None, 'c', 'd'].
Parameters
----------
iterator : iterable
A python object which is iterable.
batch_size : int
The size of batches you wish to produce from the iterator.
return_func : executable or None
Pass a function that takes a generator and returns an iterable (e.g.
`list` or `set`). If None, a generator will be returned.
padding : anything
This is used internally to ensure that the remainder of the list is
included. This MUST NOT be a valid element of the iterator.
Returns
-------
An iterator over lists or generators, depending on `return_lists`. | [
"Break",
"an",
"iterable",
"into",
"batches",
"of",
"size",
"batch_size"
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/util/__init__.py#L227-L256 | train |
sorgerlab/indra | indra/tools/reading/run_drum_reading.py | read_pmid_sentences | def read_pmid_sentences(pmid_sentences, **drum_args):
"""Read sentences from a PMID-keyed dictonary and return all Statements
Parameters
----------
pmid_sentences : dict[str, list[str]]
A dictonary where each key is a PMID pointing to a list of sentences
to be read.
**drum_args
Keyword arguments passed directly to the DrumReader. Typical
things to specify are `host` and `port`. If `run_drum` is specified
as True, this process will internally run the DRUM reading system
as a subprocess. Otherwise, DRUM is expected to be running
independently.
Returns
-------
all_statements : list[indra.statement.Statement]
A list of INDRA Statements resulting from the reading
"""
def _set_pmid(statements, pmid):
for stmt in statements:
for evidence in stmt.evidence:
evidence.pmid = pmid
# See if we need to start DRUM as a subprocess
run_drum = drum_args.get('run_drum', False)
drum_process = None
all_statements = {}
# Iterate over all the keys and sentences to read
for pmid, sentences in pmid_sentences.items():
logger.info('================================')
logger.info('Processing %d sentences for %s' % (len(sentences), pmid))
ts = time.time()
# Make a DrumReader instance
drum_args['name'] = 'DrumReader%s' % pmid
dr = DrumReader(**drum_args)
time.sleep(3)
# If there is no DRUM process set yet, we get the one that was
# just started by the DrumReader
if run_drum and drum_process is None:
drum_args.pop('run_drum', None)
drum_process = dr.drum_system
# By setting this, we ensuer that the reference to the
# process is passed in to all future DrumReaders
drum_args['drum_system'] = drum_process
# Now read each sentence for this key
for sentence in sentences:
dr.read_text(sentence)
# Start receiving results and exit when done
try:
dr.start()
except SystemExit:
pass
statements = []
# Process all the extractions into INDRA Statements
for extraction in dr.extractions:
# Sometimes we get nothing back
if not extraction:
continue
tp = process_xml(extraction)
statements += tp.statements
# Set the PMIDs for the evidences of the Statements
_set_pmid(statements, pmid)
te = time.time()
logger.info('Reading took %d seconds and produced %d Statements.' %
(te-ts, len(statements)))
all_statements[pmid] = statements
# If we were running a DRUM process, we should kill it
if drum_process and dr.drum_system:
dr._kill_drum()
return all_statements | python | def read_pmid_sentences(pmid_sentences, **drum_args):
"""Read sentences from a PMID-keyed dictonary and return all Statements
Parameters
----------
pmid_sentences : dict[str, list[str]]
A dictonary where each key is a PMID pointing to a list of sentences
to be read.
**drum_args
Keyword arguments passed directly to the DrumReader. Typical
things to specify are `host` and `port`. If `run_drum` is specified
as True, this process will internally run the DRUM reading system
as a subprocess. Otherwise, DRUM is expected to be running
independently.
Returns
-------
all_statements : list[indra.statement.Statement]
A list of INDRA Statements resulting from the reading
"""
def _set_pmid(statements, pmid):
for stmt in statements:
for evidence in stmt.evidence:
evidence.pmid = pmid
# See if we need to start DRUM as a subprocess
run_drum = drum_args.get('run_drum', False)
drum_process = None
all_statements = {}
# Iterate over all the keys and sentences to read
for pmid, sentences in pmid_sentences.items():
logger.info('================================')
logger.info('Processing %d sentences for %s' % (len(sentences), pmid))
ts = time.time()
# Make a DrumReader instance
drum_args['name'] = 'DrumReader%s' % pmid
dr = DrumReader(**drum_args)
time.sleep(3)
# If there is no DRUM process set yet, we get the one that was
# just started by the DrumReader
if run_drum and drum_process is None:
drum_args.pop('run_drum', None)
drum_process = dr.drum_system
# By setting this, we ensuer that the reference to the
# process is passed in to all future DrumReaders
drum_args['drum_system'] = drum_process
# Now read each sentence for this key
for sentence in sentences:
dr.read_text(sentence)
# Start receiving results and exit when done
try:
dr.start()
except SystemExit:
pass
statements = []
# Process all the extractions into INDRA Statements
for extraction in dr.extractions:
# Sometimes we get nothing back
if not extraction:
continue
tp = process_xml(extraction)
statements += tp.statements
# Set the PMIDs for the evidences of the Statements
_set_pmid(statements, pmid)
te = time.time()
logger.info('Reading took %d seconds and produced %d Statements.' %
(te-ts, len(statements)))
all_statements[pmid] = statements
# If we were running a DRUM process, we should kill it
if drum_process and dr.drum_system:
dr._kill_drum()
return all_statements | [
"def",
"read_pmid_sentences",
"(",
"pmid_sentences",
",",
"*",
"*",
"drum_args",
")",
":",
"def",
"_set_pmid",
"(",
"statements",
",",
"pmid",
")",
":",
"for",
"stmt",
"in",
"statements",
":",
"for",
"evidence",
"in",
"stmt",
".",
"evidence",
":",
"evidence",
".",
"pmid",
"=",
"pmid",
"# See if we need to start DRUM as a subprocess",
"run_drum",
"=",
"drum_args",
".",
"get",
"(",
"'run_drum'",
",",
"False",
")",
"drum_process",
"=",
"None",
"all_statements",
"=",
"{",
"}",
"# Iterate over all the keys and sentences to read",
"for",
"pmid",
",",
"sentences",
"in",
"pmid_sentences",
".",
"items",
"(",
")",
":",
"logger",
".",
"info",
"(",
"'================================'",
")",
"logger",
".",
"info",
"(",
"'Processing %d sentences for %s'",
"%",
"(",
"len",
"(",
"sentences",
")",
",",
"pmid",
")",
")",
"ts",
"=",
"time",
".",
"time",
"(",
")",
"# Make a DrumReader instance",
"drum_args",
"[",
"'name'",
"]",
"=",
"'DrumReader%s'",
"%",
"pmid",
"dr",
"=",
"DrumReader",
"(",
"*",
"*",
"drum_args",
")",
"time",
".",
"sleep",
"(",
"3",
")",
"# If there is no DRUM process set yet, we get the one that was",
"# just started by the DrumReader",
"if",
"run_drum",
"and",
"drum_process",
"is",
"None",
":",
"drum_args",
".",
"pop",
"(",
"'run_drum'",
",",
"None",
")",
"drum_process",
"=",
"dr",
".",
"drum_system",
"# By setting this, we ensuer that the reference to the",
"# process is passed in to all future DrumReaders",
"drum_args",
"[",
"'drum_system'",
"]",
"=",
"drum_process",
"# Now read each sentence for this key",
"for",
"sentence",
"in",
"sentences",
":",
"dr",
".",
"read_text",
"(",
"sentence",
")",
"# Start receiving results and exit when done",
"try",
":",
"dr",
".",
"start",
"(",
")",
"except",
"SystemExit",
":",
"pass",
"statements",
"=",
"[",
"]",
"# Process all the extractions into INDRA Statements",
"for",
"extraction",
"in",
"dr",
".",
"extractions",
":",
"# Sometimes we get nothing back",
"if",
"not",
"extraction",
":",
"continue",
"tp",
"=",
"process_xml",
"(",
"extraction",
")",
"statements",
"+=",
"tp",
".",
"statements",
"# Set the PMIDs for the evidences of the Statements",
"_set_pmid",
"(",
"statements",
",",
"pmid",
")",
"te",
"=",
"time",
".",
"time",
"(",
")",
"logger",
".",
"info",
"(",
"'Reading took %d seconds and produced %d Statements.'",
"%",
"(",
"te",
"-",
"ts",
",",
"len",
"(",
"statements",
")",
")",
")",
"all_statements",
"[",
"pmid",
"]",
"=",
"statements",
"# If we were running a DRUM process, we should kill it",
"if",
"drum_process",
"and",
"dr",
".",
"drum_system",
":",
"dr",
".",
"_kill_drum",
"(",
")",
"return",
"all_statements"
]
| Read sentences from a PMID-keyed dictonary and return all Statements
Parameters
----------
pmid_sentences : dict[str, list[str]]
A dictonary where each key is a PMID pointing to a list of sentences
to be read.
**drum_args
Keyword arguments passed directly to the DrumReader. Typical
things to specify are `host` and `port`. If `run_drum` is specified
as True, this process will internally run the DRUM reading system
as a subprocess. Otherwise, DRUM is expected to be running
independently.
Returns
-------
all_statements : list[indra.statement.Statement]
A list of INDRA Statements resulting from the reading | [
"Read",
"sentences",
"from",
"a",
"PMID",
"-",
"keyed",
"dictonary",
"and",
"return",
"all",
"Statements"
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/run_drum_reading.py#L14-L86 | train |
sorgerlab/indra | indra/sources/biopax/pathway_commons_client.py | graph_query | def graph_query(kind, source, target=None, neighbor_limit=1,
database_filter=None):
"""Perform a graph query on PathwayCommons.
For more information on these queries, see
http://www.pathwaycommons.org/pc2/#graph
Parameters
----------
kind : str
The kind of graph query to perform. Currently 3 options are
implemented, 'neighborhood', 'pathsbetween' and 'pathsfromto'.
source : list[str]
A list of gene names which are the source set for the graph query.
target : Optional[list[str]]
A list of gene names which are the target set for the graph query.
Only needed for 'pathsfromto' queries.
neighbor_limit : Optional[int]
This limits the length of the longest path considered in
the graph query. Default: 1
Returns
-------
model : org.biopax.paxtools.model.Model
A BioPAX model (java object).
"""
default_databases = ['wp', 'smpdb', 'reconx', 'reactome', 'psp', 'pid',
'panther', 'netpath', 'msigdb', 'mirtarbase', 'kegg',
'intact', 'inoh', 'humancyc', 'hprd',
'drugbank', 'dip', 'corum']
if not database_filter:
query_databases = default_databases
else:
query_databases = database_filter
# excluded: ctd
params = {}
params['format'] = 'BIOPAX'
params['organism'] = '9606'
params['datasource'] = query_databases
# Get the "kind" string
kind_str = kind.lower()
if kind not in ['neighborhood', 'pathsbetween', 'pathsfromto']:
logger.warn('Invalid query type %s' % kind_str)
return None
params['kind'] = kind_str
# Get the source string
if isinstance(source, basestring):
source_str = source
else:
source_str = ','.join(source)
params['source'] = source_str
try:
neighbor_limit = int(neighbor_limit)
params['limit'] = neighbor_limit
except (TypeError, ValueError):
logger.warn('Invalid neighborhood limit %s' % neighbor_limit)
return None
if target is not None:
if isinstance(target, basestring):
target_str = target
else:
target_str = ','.join(target)
params['target'] = target_str
logger.info('Sending Pathway Commons query with parameters: ')
for k, v in params.items():
logger.info(' %s: %s' % (k, v))
logger.info('Sending Pathway Commons query...')
res = requests.get(pc2_url + 'graph', params=params)
if not res.status_code == 200:
logger.error('Response is HTTP code %d.' % res.status_code)
if res.status_code == 500:
logger.error('Note: HTTP code 500 can mean empty '
'results for a valid query.')
return None
# We don't decode to Unicode here because owl_str_to_model expects
# a byte stream
model = owl_str_to_model(res.content)
if model is not None:
logger.info('Pathway Commons query returned a model...')
return model | python | def graph_query(kind, source, target=None, neighbor_limit=1,
database_filter=None):
"""Perform a graph query on PathwayCommons.
For more information on these queries, see
http://www.pathwaycommons.org/pc2/#graph
Parameters
----------
kind : str
The kind of graph query to perform. Currently 3 options are
implemented, 'neighborhood', 'pathsbetween' and 'pathsfromto'.
source : list[str]
A list of gene names which are the source set for the graph query.
target : Optional[list[str]]
A list of gene names which are the target set for the graph query.
Only needed for 'pathsfromto' queries.
neighbor_limit : Optional[int]
This limits the length of the longest path considered in
the graph query. Default: 1
Returns
-------
model : org.biopax.paxtools.model.Model
A BioPAX model (java object).
"""
default_databases = ['wp', 'smpdb', 'reconx', 'reactome', 'psp', 'pid',
'panther', 'netpath', 'msigdb', 'mirtarbase', 'kegg',
'intact', 'inoh', 'humancyc', 'hprd',
'drugbank', 'dip', 'corum']
if not database_filter:
query_databases = default_databases
else:
query_databases = database_filter
# excluded: ctd
params = {}
params['format'] = 'BIOPAX'
params['organism'] = '9606'
params['datasource'] = query_databases
# Get the "kind" string
kind_str = kind.lower()
if kind not in ['neighborhood', 'pathsbetween', 'pathsfromto']:
logger.warn('Invalid query type %s' % kind_str)
return None
params['kind'] = kind_str
# Get the source string
if isinstance(source, basestring):
source_str = source
else:
source_str = ','.join(source)
params['source'] = source_str
try:
neighbor_limit = int(neighbor_limit)
params['limit'] = neighbor_limit
except (TypeError, ValueError):
logger.warn('Invalid neighborhood limit %s' % neighbor_limit)
return None
if target is not None:
if isinstance(target, basestring):
target_str = target
else:
target_str = ','.join(target)
params['target'] = target_str
logger.info('Sending Pathway Commons query with parameters: ')
for k, v in params.items():
logger.info(' %s: %s' % (k, v))
logger.info('Sending Pathway Commons query...')
res = requests.get(pc2_url + 'graph', params=params)
if not res.status_code == 200:
logger.error('Response is HTTP code %d.' % res.status_code)
if res.status_code == 500:
logger.error('Note: HTTP code 500 can mean empty '
'results for a valid query.')
return None
# We don't decode to Unicode here because owl_str_to_model expects
# a byte stream
model = owl_str_to_model(res.content)
if model is not None:
logger.info('Pathway Commons query returned a model...')
return model | [
"def",
"graph_query",
"(",
"kind",
",",
"source",
",",
"target",
"=",
"None",
",",
"neighbor_limit",
"=",
"1",
",",
"database_filter",
"=",
"None",
")",
":",
"default_databases",
"=",
"[",
"'wp'",
",",
"'smpdb'",
",",
"'reconx'",
",",
"'reactome'",
",",
"'psp'",
",",
"'pid'",
",",
"'panther'",
",",
"'netpath'",
",",
"'msigdb'",
",",
"'mirtarbase'",
",",
"'kegg'",
",",
"'intact'",
",",
"'inoh'",
",",
"'humancyc'",
",",
"'hprd'",
",",
"'drugbank'",
",",
"'dip'",
",",
"'corum'",
"]",
"if",
"not",
"database_filter",
":",
"query_databases",
"=",
"default_databases",
"else",
":",
"query_databases",
"=",
"database_filter",
"# excluded: ctd",
"params",
"=",
"{",
"}",
"params",
"[",
"'format'",
"]",
"=",
"'BIOPAX'",
"params",
"[",
"'organism'",
"]",
"=",
"'9606'",
"params",
"[",
"'datasource'",
"]",
"=",
"query_databases",
"# Get the \"kind\" string",
"kind_str",
"=",
"kind",
".",
"lower",
"(",
")",
"if",
"kind",
"not",
"in",
"[",
"'neighborhood'",
",",
"'pathsbetween'",
",",
"'pathsfromto'",
"]",
":",
"logger",
".",
"warn",
"(",
"'Invalid query type %s'",
"%",
"kind_str",
")",
"return",
"None",
"params",
"[",
"'kind'",
"]",
"=",
"kind_str",
"# Get the source string",
"if",
"isinstance",
"(",
"source",
",",
"basestring",
")",
":",
"source_str",
"=",
"source",
"else",
":",
"source_str",
"=",
"','",
".",
"join",
"(",
"source",
")",
"params",
"[",
"'source'",
"]",
"=",
"source_str",
"try",
":",
"neighbor_limit",
"=",
"int",
"(",
"neighbor_limit",
")",
"params",
"[",
"'limit'",
"]",
"=",
"neighbor_limit",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"logger",
".",
"warn",
"(",
"'Invalid neighborhood limit %s'",
"%",
"neighbor_limit",
")",
"return",
"None",
"if",
"target",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"target",
",",
"basestring",
")",
":",
"target_str",
"=",
"target",
"else",
":",
"target_str",
"=",
"','",
".",
"join",
"(",
"target",
")",
"params",
"[",
"'target'",
"]",
"=",
"target_str",
"logger",
".",
"info",
"(",
"'Sending Pathway Commons query with parameters: '",
")",
"for",
"k",
",",
"v",
"in",
"params",
".",
"items",
"(",
")",
":",
"logger",
".",
"info",
"(",
"' %s: %s'",
"%",
"(",
"k",
",",
"v",
")",
")",
"logger",
".",
"info",
"(",
"'Sending Pathway Commons query...'",
")",
"res",
"=",
"requests",
".",
"get",
"(",
"pc2_url",
"+",
"'graph'",
",",
"params",
"=",
"params",
")",
"if",
"not",
"res",
".",
"status_code",
"==",
"200",
":",
"logger",
".",
"error",
"(",
"'Response is HTTP code %d.'",
"%",
"res",
".",
"status_code",
")",
"if",
"res",
".",
"status_code",
"==",
"500",
":",
"logger",
".",
"error",
"(",
"'Note: HTTP code 500 can mean empty '",
"'results for a valid query.'",
")",
"return",
"None",
"# We don't decode to Unicode here because owl_str_to_model expects",
"# a byte stream",
"model",
"=",
"owl_str_to_model",
"(",
"res",
".",
"content",
")",
"if",
"model",
"is",
"not",
"None",
":",
"logger",
".",
"info",
"(",
"'Pathway Commons query returned a model...'",
")",
"return",
"model"
]
| Perform a graph query on PathwayCommons.
For more information on these queries, see
http://www.pathwaycommons.org/pc2/#graph
Parameters
----------
kind : str
The kind of graph query to perform. Currently 3 options are
implemented, 'neighborhood', 'pathsbetween' and 'pathsfromto'.
source : list[str]
A list of gene names which are the source set for the graph query.
target : Optional[list[str]]
A list of gene names which are the target set for the graph query.
Only needed for 'pathsfromto' queries.
neighbor_limit : Optional[int]
This limits the length of the longest path considered in
the graph query. Default: 1
Returns
-------
model : org.biopax.paxtools.model.Model
A BioPAX model (java object). | [
"Perform",
"a",
"graph",
"query",
"on",
"PathwayCommons",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/pathway_commons_client.py#L17-L99 | train |
sorgerlab/indra | indra/sources/biopax/pathway_commons_client.py | owl_str_to_model | def owl_str_to_model(owl_str):
"""Return a BioPAX model object from an OWL string.
Parameters
----------
owl_str : str
The model as an OWL string.
Returns
-------
biopax_model : org.biopax.paxtools.model.Model
A BioPAX model object (java object).
"""
io_class = autoclass('org.biopax.paxtools.io.SimpleIOHandler')
io = io_class(autoclass('org.biopax.paxtools.model.BioPAXLevel').L3)
bais = autoclass('java.io.ByteArrayInputStream')
scs = autoclass('java.nio.charset.StandardCharsets')
jstr = autoclass('java.lang.String')
istream = bais(owl_str)
biopax_model = io.convertFromOWL(istream)
return biopax_model | python | def owl_str_to_model(owl_str):
"""Return a BioPAX model object from an OWL string.
Parameters
----------
owl_str : str
The model as an OWL string.
Returns
-------
biopax_model : org.biopax.paxtools.model.Model
A BioPAX model object (java object).
"""
io_class = autoclass('org.biopax.paxtools.io.SimpleIOHandler')
io = io_class(autoclass('org.biopax.paxtools.model.BioPAXLevel').L3)
bais = autoclass('java.io.ByteArrayInputStream')
scs = autoclass('java.nio.charset.StandardCharsets')
jstr = autoclass('java.lang.String')
istream = bais(owl_str)
biopax_model = io.convertFromOWL(istream)
return biopax_model | [
"def",
"owl_str_to_model",
"(",
"owl_str",
")",
":",
"io_class",
"=",
"autoclass",
"(",
"'org.biopax.paxtools.io.SimpleIOHandler'",
")",
"io",
"=",
"io_class",
"(",
"autoclass",
"(",
"'org.biopax.paxtools.model.BioPAXLevel'",
")",
".",
"L3",
")",
"bais",
"=",
"autoclass",
"(",
"'java.io.ByteArrayInputStream'",
")",
"scs",
"=",
"autoclass",
"(",
"'java.nio.charset.StandardCharsets'",
")",
"jstr",
"=",
"autoclass",
"(",
"'java.lang.String'",
")",
"istream",
"=",
"bais",
"(",
"owl_str",
")",
"biopax_model",
"=",
"io",
".",
"convertFromOWL",
"(",
"istream",
")",
"return",
"biopax_model"
]
| Return a BioPAX model object from an OWL string.
Parameters
----------
owl_str : str
The model as an OWL string.
Returns
-------
biopax_model : org.biopax.paxtools.model.Model
A BioPAX model object (java object). | [
"Return",
"a",
"BioPAX",
"model",
"object",
"from",
"an",
"OWL",
"string",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/pathway_commons_client.py#L101-L121 | train |
sorgerlab/indra | indra/sources/biopax/pathway_commons_client.py | owl_to_model | def owl_to_model(fname):
"""Return a BioPAX model object from an OWL file.
Parameters
----------
fname : str
The name of the OWL file containing the model.
Returns
-------
biopax_model : org.biopax.paxtools.model.Model
A BioPAX model object (java object).
"""
io_class = autoclass('org.biopax.paxtools.io.SimpleIOHandler')
io = io_class(autoclass('org.biopax.paxtools.model.BioPAXLevel').L3)
try:
file_is = autoclass('java.io.FileInputStream')(fname)
except JavaException:
logger.error('Could not open data file %s' % fname)
return
try:
biopax_model = io.convertFromOWL(file_is)
except JavaException as e:
logger.error('Could not convert data file %s to BioPax model' % fname)
logger.error(e)
return
file_is.close()
return biopax_model | python | def owl_to_model(fname):
"""Return a BioPAX model object from an OWL file.
Parameters
----------
fname : str
The name of the OWL file containing the model.
Returns
-------
biopax_model : org.biopax.paxtools.model.Model
A BioPAX model object (java object).
"""
io_class = autoclass('org.biopax.paxtools.io.SimpleIOHandler')
io = io_class(autoclass('org.biopax.paxtools.model.BioPAXLevel').L3)
try:
file_is = autoclass('java.io.FileInputStream')(fname)
except JavaException:
logger.error('Could not open data file %s' % fname)
return
try:
biopax_model = io.convertFromOWL(file_is)
except JavaException as e:
logger.error('Could not convert data file %s to BioPax model' % fname)
logger.error(e)
return
file_is.close()
return biopax_model | [
"def",
"owl_to_model",
"(",
"fname",
")",
":",
"io_class",
"=",
"autoclass",
"(",
"'org.biopax.paxtools.io.SimpleIOHandler'",
")",
"io",
"=",
"io_class",
"(",
"autoclass",
"(",
"'org.biopax.paxtools.model.BioPAXLevel'",
")",
".",
"L3",
")",
"try",
":",
"file_is",
"=",
"autoclass",
"(",
"'java.io.FileInputStream'",
")",
"(",
"fname",
")",
"except",
"JavaException",
":",
"logger",
".",
"error",
"(",
"'Could not open data file %s'",
"%",
"fname",
")",
"return",
"try",
":",
"biopax_model",
"=",
"io",
".",
"convertFromOWL",
"(",
"file_is",
")",
"except",
"JavaException",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"'Could not convert data file %s to BioPax model'",
"%",
"fname",
")",
"logger",
".",
"error",
"(",
"e",
")",
"return",
"file_is",
".",
"close",
"(",
")",
"return",
"biopax_model"
]
| Return a BioPAX model object from an OWL file.
Parameters
----------
fname : str
The name of the OWL file containing the model.
Returns
-------
biopax_model : org.biopax.paxtools.model.Model
A BioPAX model object (java object). | [
"Return",
"a",
"BioPAX",
"model",
"object",
"from",
"an",
"OWL",
"file",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/pathway_commons_client.py#L123-L153 | train |
sorgerlab/indra | indra/sources/biopax/pathway_commons_client.py | model_to_owl | def model_to_owl(model, fname):
"""Save a BioPAX model object as an OWL file.
Parameters
----------
model : org.biopax.paxtools.model.Model
A BioPAX model object (java object).
fname : str
The name of the OWL file to save the model in.
"""
io_class = autoclass('org.biopax.paxtools.io.SimpleIOHandler')
io = io_class(autoclass('org.biopax.paxtools.model.BioPAXLevel').L3)
try:
fileOS = autoclass('java.io.FileOutputStream')(fname)
except JavaException:
logger.error('Could not open data file %s' % fname)
return
l3_factory = autoclass('org.biopax.paxtools.model.BioPAXLevel').L3.getDefaultFactory()
model_out = l3_factory.createModel()
for r in model.getObjects().toArray():
model_out.add(r)
io.convertToOWL(model_out, fileOS)
fileOS.close() | python | def model_to_owl(model, fname):
"""Save a BioPAX model object as an OWL file.
Parameters
----------
model : org.biopax.paxtools.model.Model
A BioPAX model object (java object).
fname : str
The name of the OWL file to save the model in.
"""
io_class = autoclass('org.biopax.paxtools.io.SimpleIOHandler')
io = io_class(autoclass('org.biopax.paxtools.model.BioPAXLevel').L3)
try:
fileOS = autoclass('java.io.FileOutputStream')(fname)
except JavaException:
logger.error('Could not open data file %s' % fname)
return
l3_factory = autoclass('org.biopax.paxtools.model.BioPAXLevel').L3.getDefaultFactory()
model_out = l3_factory.createModel()
for r in model.getObjects().toArray():
model_out.add(r)
io.convertToOWL(model_out, fileOS)
fileOS.close() | [
"def",
"model_to_owl",
"(",
"model",
",",
"fname",
")",
":",
"io_class",
"=",
"autoclass",
"(",
"'org.biopax.paxtools.io.SimpleIOHandler'",
")",
"io",
"=",
"io_class",
"(",
"autoclass",
"(",
"'org.biopax.paxtools.model.BioPAXLevel'",
")",
".",
"L3",
")",
"try",
":",
"fileOS",
"=",
"autoclass",
"(",
"'java.io.FileOutputStream'",
")",
"(",
"fname",
")",
"except",
"JavaException",
":",
"logger",
".",
"error",
"(",
"'Could not open data file %s'",
"%",
"fname",
")",
"return",
"l3_factory",
"=",
"autoclass",
"(",
"'org.biopax.paxtools.model.BioPAXLevel'",
")",
".",
"L3",
".",
"getDefaultFactory",
"(",
")",
"model_out",
"=",
"l3_factory",
".",
"createModel",
"(",
")",
"for",
"r",
"in",
"model",
".",
"getObjects",
"(",
")",
".",
"toArray",
"(",
")",
":",
"model_out",
".",
"add",
"(",
"r",
")",
"io",
".",
"convertToOWL",
"(",
"model_out",
",",
"fileOS",
")",
"fileOS",
".",
"close",
"(",
")"
]
| Save a BioPAX model object as an OWL file.
Parameters
----------
model : org.biopax.paxtools.model.Model
A BioPAX model object (java object).
fname : str
The name of the OWL file to save the model in. | [
"Save",
"a",
"BioPAX",
"model",
"object",
"as",
"an",
"OWL",
"file",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/pathway_commons_client.py#L155-L179 | train |
sorgerlab/indra | indra/assemblers/cyjs/assembler.py | CyJSAssembler.make_model | def make_model(self, *args, **kwargs):
"""Assemble a Cytoscape JS network from INDRA Statements.
This method assembles a Cytoscape JS network from the set of INDRA
Statements added to the assembler.
Parameters
----------
grouping : bool
If True, the nodes with identical incoming and outgoing edges
are grouped and the corresponding edges are merged.
Returns
-------
cyjs_str : str
The json serialized Cytoscape JS model.
"""
for stmt in self.statements:
if isinstance(stmt, RegulateActivity):
self._add_regulate_activity(stmt)
elif isinstance(stmt, RegulateAmount):
self._add_regulate_amount(stmt)
elif isinstance(stmt, Modification):
self._add_modification(stmt)
elif isinstance(stmt, SelfModification):
self._add_selfmodification(stmt)
elif isinstance(stmt, Gef):
self._add_gef(stmt)
elif isinstance(stmt, Gap):
self._add_gap(stmt)
elif isinstance(stmt, Complex):
self._add_complex(stmt)
else:
logger.warning('Unhandled statement type: %s' %
stmt.__class__.__name__)
if kwargs.get('grouping'):
self._group_nodes()
self._group_edges()
return self.print_cyjs_graph() | python | def make_model(self, *args, **kwargs):
"""Assemble a Cytoscape JS network from INDRA Statements.
This method assembles a Cytoscape JS network from the set of INDRA
Statements added to the assembler.
Parameters
----------
grouping : bool
If True, the nodes with identical incoming and outgoing edges
are grouped and the corresponding edges are merged.
Returns
-------
cyjs_str : str
The json serialized Cytoscape JS model.
"""
for stmt in self.statements:
if isinstance(stmt, RegulateActivity):
self._add_regulate_activity(stmt)
elif isinstance(stmt, RegulateAmount):
self._add_regulate_amount(stmt)
elif isinstance(stmt, Modification):
self._add_modification(stmt)
elif isinstance(stmt, SelfModification):
self._add_selfmodification(stmt)
elif isinstance(stmt, Gef):
self._add_gef(stmt)
elif isinstance(stmt, Gap):
self._add_gap(stmt)
elif isinstance(stmt, Complex):
self._add_complex(stmt)
else:
logger.warning('Unhandled statement type: %s' %
stmt.__class__.__name__)
if kwargs.get('grouping'):
self._group_nodes()
self._group_edges()
return self.print_cyjs_graph() | [
"def",
"make_model",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"stmt",
"in",
"self",
".",
"statements",
":",
"if",
"isinstance",
"(",
"stmt",
",",
"RegulateActivity",
")",
":",
"self",
".",
"_add_regulate_activity",
"(",
"stmt",
")",
"elif",
"isinstance",
"(",
"stmt",
",",
"RegulateAmount",
")",
":",
"self",
".",
"_add_regulate_amount",
"(",
"stmt",
")",
"elif",
"isinstance",
"(",
"stmt",
",",
"Modification",
")",
":",
"self",
".",
"_add_modification",
"(",
"stmt",
")",
"elif",
"isinstance",
"(",
"stmt",
",",
"SelfModification",
")",
":",
"self",
".",
"_add_selfmodification",
"(",
"stmt",
")",
"elif",
"isinstance",
"(",
"stmt",
",",
"Gef",
")",
":",
"self",
".",
"_add_gef",
"(",
"stmt",
")",
"elif",
"isinstance",
"(",
"stmt",
",",
"Gap",
")",
":",
"self",
".",
"_add_gap",
"(",
"stmt",
")",
"elif",
"isinstance",
"(",
"stmt",
",",
"Complex",
")",
":",
"self",
".",
"_add_complex",
"(",
"stmt",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"'Unhandled statement type: %s'",
"%",
"stmt",
".",
"__class__",
".",
"__name__",
")",
"if",
"kwargs",
".",
"get",
"(",
"'grouping'",
")",
":",
"self",
".",
"_group_nodes",
"(",
")",
"self",
".",
"_group_edges",
"(",
")",
"return",
"self",
".",
"print_cyjs_graph",
"(",
")"
]
| Assemble a Cytoscape JS network from INDRA Statements.
This method assembles a Cytoscape JS network from the set of INDRA
Statements added to the assembler.
Parameters
----------
grouping : bool
If True, the nodes with identical incoming and outgoing edges
are grouped and the corresponding edges are merged.
Returns
-------
cyjs_str : str
The json serialized Cytoscape JS model. | [
"Assemble",
"a",
"Cytoscape",
"JS",
"network",
"from",
"INDRA",
"Statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cyjs/assembler.py#L69-L107 | train |
sorgerlab/indra | indra/assemblers/cyjs/assembler.py | CyJSAssembler.get_gene_names | def get_gene_names(self):
"""Gather gene names of all nodes and node members"""
# Collect all gene names in network
gene_names = []
for node in self._nodes:
members = node['data'].get('members')
if members:
gene_names += list(members.keys())
else:
if node['data']['name'].startswith('Group'):
continue
gene_names.append(node['data']['name'])
self._gene_names = gene_names | python | def get_gene_names(self):
"""Gather gene names of all nodes and node members"""
# Collect all gene names in network
gene_names = []
for node in self._nodes:
members = node['data'].get('members')
if members:
gene_names += list(members.keys())
else:
if node['data']['name'].startswith('Group'):
continue
gene_names.append(node['data']['name'])
self._gene_names = gene_names | [
"def",
"get_gene_names",
"(",
"self",
")",
":",
"# Collect all gene names in network",
"gene_names",
"=",
"[",
"]",
"for",
"node",
"in",
"self",
".",
"_nodes",
":",
"members",
"=",
"node",
"[",
"'data'",
"]",
".",
"get",
"(",
"'members'",
")",
"if",
"members",
":",
"gene_names",
"+=",
"list",
"(",
"members",
".",
"keys",
"(",
")",
")",
"else",
":",
"if",
"node",
"[",
"'data'",
"]",
"[",
"'name'",
"]",
".",
"startswith",
"(",
"'Group'",
")",
":",
"continue",
"gene_names",
".",
"append",
"(",
"node",
"[",
"'data'",
"]",
"[",
"'name'",
"]",
")",
"self",
".",
"_gene_names",
"=",
"gene_names"
]
| Gather gene names of all nodes and node members | [
"Gather",
"gene",
"names",
"of",
"all",
"nodes",
"and",
"node",
"members"
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cyjs/assembler.py#L109-L121 | train |
sorgerlab/indra | indra/assemblers/cyjs/assembler.py | CyJSAssembler.set_CCLE_context | def set_CCLE_context(self, cell_types):
"""Set context of all nodes and node members from CCLE."""
self.get_gene_names()
# Get expression and mutations from context client
exp_values = \
context_client.get_protein_expression(self._gene_names, cell_types)
mut_values = \
context_client.get_mutations(self._gene_names, cell_types)
# Make a dict of presence/absence of mutations
muts = {cell_line: {} for cell_line in cell_types}
for cell_line, entries in mut_values.items():
if entries is not None:
for gene, mutations in entries.items():
if mutations:
muts[cell_line][gene] = 1
else:
muts[cell_line][gene] = 0
# Create bins for the exp values
# because colorbrewer only does 3-9 bins and I don't feel like
# reinventing color scheme theory, this will only bin 3-9 bins
def bin_exp(expression_dict):
d = expression_dict
exp_values = []
for line in d:
for gene in d[line]:
val = d[line][gene]
if val is not None:
exp_values.append(val)
thr_dict = {}
for n_bins in range(3, 10):
bin_thr = np.histogram(np.log10(exp_values), n_bins)[1][1:]
thr_dict[n_bins] = bin_thr
# this dict isn't yet binned, that happens in the loop
binned_dict = {x: deepcopy(expression_dict) for x in range(3, 10)}
for n_bins in binned_dict:
for line in binned_dict[n_bins]:
for gene in binned_dict[n_bins][line]:
# last bin is reserved for None
if binned_dict[n_bins][line][gene] is None:
binned_dict[n_bins][line][gene] = n_bins
else:
val = np.log10(binned_dict[n_bins][line][gene])
for thr_idx, thr in enumerate(thr_dict[n_bins]):
if val <= thr:
binned_dict[n_bins][line][gene] = thr_idx
break
return binned_dict
binned_exp = bin_exp(exp_values)
context = {'bin_expression': binned_exp,
'mutation': muts}
self._context['CCLE'] = context | python | def set_CCLE_context(self, cell_types):
"""Set context of all nodes and node members from CCLE."""
self.get_gene_names()
# Get expression and mutations from context client
exp_values = \
context_client.get_protein_expression(self._gene_names, cell_types)
mut_values = \
context_client.get_mutations(self._gene_names, cell_types)
# Make a dict of presence/absence of mutations
muts = {cell_line: {} for cell_line in cell_types}
for cell_line, entries in mut_values.items():
if entries is not None:
for gene, mutations in entries.items():
if mutations:
muts[cell_line][gene] = 1
else:
muts[cell_line][gene] = 0
# Create bins for the exp values
# because colorbrewer only does 3-9 bins and I don't feel like
# reinventing color scheme theory, this will only bin 3-9 bins
def bin_exp(expression_dict):
d = expression_dict
exp_values = []
for line in d:
for gene in d[line]:
val = d[line][gene]
if val is not None:
exp_values.append(val)
thr_dict = {}
for n_bins in range(3, 10):
bin_thr = np.histogram(np.log10(exp_values), n_bins)[1][1:]
thr_dict[n_bins] = bin_thr
# this dict isn't yet binned, that happens in the loop
binned_dict = {x: deepcopy(expression_dict) for x in range(3, 10)}
for n_bins in binned_dict:
for line in binned_dict[n_bins]:
for gene in binned_dict[n_bins][line]:
# last bin is reserved for None
if binned_dict[n_bins][line][gene] is None:
binned_dict[n_bins][line][gene] = n_bins
else:
val = np.log10(binned_dict[n_bins][line][gene])
for thr_idx, thr in enumerate(thr_dict[n_bins]):
if val <= thr:
binned_dict[n_bins][line][gene] = thr_idx
break
return binned_dict
binned_exp = bin_exp(exp_values)
context = {'bin_expression': binned_exp,
'mutation': muts}
self._context['CCLE'] = context | [
"def",
"set_CCLE_context",
"(",
"self",
",",
"cell_types",
")",
":",
"self",
".",
"get_gene_names",
"(",
")",
"# Get expression and mutations from context client",
"exp_values",
"=",
"context_client",
".",
"get_protein_expression",
"(",
"self",
".",
"_gene_names",
",",
"cell_types",
")",
"mut_values",
"=",
"context_client",
".",
"get_mutations",
"(",
"self",
".",
"_gene_names",
",",
"cell_types",
")",
"# Make a dict of presence/absence of mutations",
"muts",
"=",
"{",
"cell_line",
":",
"{",
"}",
"for",
"cell_line",
"in",
"cell_types",
"}",
"for",
"cell_line",
",",
"entries",
"in",
"mut_values",
".",
"items",
"(",
")",
":",
"if",
"entries",
"is",
"not",
"None",
":",
"for",
"gene",
",",
"mutations",
"in",
"entries",
".",
"items",
"(",
")",
":",
"if",
"mutations",
":",
"muts",
"[",
"cell_line",
"]",
"[",
"gene",
"]",
"=",
"1",
"else",
":",
"muts",
"[",
"cell_line",
"]",
"[",
"gene",
"]",
"=",
"0",
"# Create bins for the exp values",
"# because colorbrewer only does 3-9 bins and I don't feel like",
"# reinventing color scheme theory, this will only bin 3-9 bins",
"def",
"bin_exp",
"(",
"expression_dict",
")",
":",
"d",
"=",
"expression_dict",
"exp_values",
"=",
"[",
"]",
"for",
"line",
"in",
"d",
":",
"for",
"gene",
"in",
"d",
"[",
"line",
"]",
":",
"val",
"=",
"d",
"[",
"line",
"]",
"[",
"gene",
"]",
"if",
"val",
"is",
"not",
"None",
":",
"exp_values",
".",
"append",
"(",
"val",
")",
"thr_dict",
"=",
"{",
"}",
"for",
"n_bins",
"in",
"range",
"(",
"3",
",",
"10",
")",
":",
"bin_thr",
"=",
"np",
".",
"histogram",
"(",
"np",
".",
"log10",
"(",
"exp_values",
")",
",",
"n_bins",
")",
"[",
"1",
"]",
"[",
"1",
":",
"]",
"thr_dict",
"[",
"n_bins",
"]",
"=",
"bin_thr",
"# this dict isn't yet binned, that happens in the loop",
"binned_dict",
"=",
"{",
"x",
":",
"deepcopy",
"(",
"expression_dict",
")",
"for",
"x",
"in",
"range",
"(",
"3",
",",
"10",
")",
"}",
"for",
"n_bins",
"in",
"binned_dict",
":",
"for",
"line",
"in",
"binned_dict",
"[",
"n_bins",
"]",
":",
"for",
"gene",
"in",
"binned_dict",
"[",
"n_bins",
"]",
"[",
"line",
"]",
":",
"# last bin is reserved for None",
"if",
"binned_dict",
"[",
"n_bins",
"]",
"[",
"line",
"]",
"[",
"gene",
"]",
"is",
"None",
":",
"binned_dict",
"[",
"n_bins",
"]",
"[",
"line",
"]",
"[",
"gene",
"]",
"=",
"n_bins",
"else",
":",
"val",
"=",
"np",
".",
"log10",
"(",
"binned_dict",
"[",
"n_bins",
"]",
"[",
"line",
"]",
"[",
"gene",
"]",
")",
"for",
"thr_idx",
",",
"thr",
"in",
"enumerate",
"(",
"thr_dict",
"[",
"n_bins",
"]",
")",
":",
"if",
"val",
"<=",
"thr",
":",
"binned_dict",
"[",
"n_bins",
"]",
"[",
"line",
"]",
"[",
"gene",
"]",
"=",
"thr_idx",
"break",
"return",
"binned_dict",
"binned_exp",
"=",
"bin_exp",
"(",
"exp_values",
")",
"context",
"=",
"{",
"'bin_expression'",
":",
"binned_exp",
",",
"'mutation'",
":",
"muts",
"}",
"self",
".",
"_context",
"[",
"'CCLE'",
"]",
"=",
"context"
]
| Set context of all nodes and node members from CCLE. | [
"Set",
"context",
"of",
"all",
"nodes",
"and",
"node",
"members",
"from",
"CCLE",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cyjs/assembler.py#L123-L177 | train |
sorgerlab/indra | indra/assemblers/cyjs/assembler.py | CyJSAssembler.print_cyjs_graph | def print_cyjs_graph(self):
"""Return the assembled Cytoscape JS network as a json string.
Returns
-------
cyjs_str : str
A json string representation of the Cytoscape JS network.
"""
cyjs_dict = {'edges': self._edges, 'nodes': self._nodes}
cyjs_str = json.dumps(cyjs_dict, indent=1, sort_keys=True)
return cyjs_str | python | def print_cyjs_graph(self):
"""Return the assembled Cytoscape JS network as a json string.
Returns
-------
cyjs_str : str
A json string representation of the Cytoscape JS network.
"""
cyjs_dict = {'edges': self._edges, 'nodes': self._nodes}
cyjs_str = json.dumps(cyjs_dict, indent=1, sort_keys=True)
return cyjs_str | [
"def",
"print_cyjs_graph",
"(",
"self",
")",
":",
"cyjs_dict",
"=",
"{",
"'edges'",
":",
"self",
".",
"_edges",
",",
"'nodes'",
":",
"self",
".",
"_nodes",
"}",
"cyjs_str",
"=",
"json",
".",
"dumps",
"(",
"cyjs_dict",
",",
"indent",
"=",
"1",
",",
"sort_keys",
"=",
"True",
")",
"return",
"cyjs_str"
]
| Return the assembled Cytoscape JS network as a json string.
Returns
-------
cyjs_str : str
A json string representation of the Cytoscape JS network. | [
"Return",
"the",
"assembled",
"Cytoscape",
"JS",
"network",
"as",
"a",
"json",
"string",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cyjs/assembler.py#L179-L189 | train |
sorgerlab/indra | indra/assemblers/cyjs/assembler.py | CyJSAssembler.print_cyjs_context | def print_cyjs_context(self):
"""Return a list of node names and their respective context.
Returns
-------
cyjs_str_context : str
A json string of the context dictionary. e.g. -
{'CCLE' : {'bin_expression' : {'cell_line1' : {'gene1':'val1'} },
'bin_expression' : {'cell_line' : {'gene1':'val1'} }
}}
"""
context = self._context
context_str = json.dumps(context, indent=1, sort_keys=True)
return context_str | python | def print_cyjs_context(self):
"""Return a list of node names and their respective context.
Returns
-------
cyjs_str_context : str
A json string of the context dictionary. e.g. -
{'CCLE' : {'bin_expression' : {'cell_line1' : {'gene1':'val1'} },
'bin_expression' : {'cell_line' : {'gene1':'val1'} }
}}
"""
context = self._context
context_str = json.dumps(context, indent=1, sort_keys=True)
return context_str | [
"def",
"print_cyjs_context",
"(",
"self",
")",
":",
"context",
"=",
"self",
".",
"_context",
"context_str",
"=",
"json",
".",
"dumps",
"(",
"context",
",",
"indent",
"=",
"1",
",",
"sort_keys",
"=",
"True",
")",
"return",
"context_str"
]
| Return a list of node names and their respective context.
Returns
-------
cyjs_str_context : str
A json string of the context dictionary. e.g. -
{'CCLE' : {'bin_expression' : {'cell_line1' : {'gene1':'val1'} },
'bin_expression' : {'cell_line' : {'gene1':'val1'} }
}} | [
"Return",
"a",
"list",
"of",
"node",
"names",
"and",
"their",
"respective",
"context",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cyjs/assembler.py#L191-L204 | train |
sorgerlab/indra | indra/assemblers/cyjs/assembler.py | CyJSAssembler.save_json | def save_json(self, fname_prefix='model'):
"""Save the assembled Cytoscape JS network in a json file.
This method saves two files based on the file name prefix given.
It saves one json file with the graph itself, and another json
file with the context.
Parameters
----------
fname_prefix : Optional[str]
The prefix of the files to save the Cytoscape JS network and
context to.
Default: model
"""
cyjs_str = self.print_cyjs_graph()
# outputs the graph
with open(fname_prefix + '.json', 'wb') as fh:
fh.write(cyjs_str.encode('utf-8'))
# outputs the context of graph nodes
context_str = self.print_cyjs_context()
with open(fname_prefix + '_context.json', 'wb') as fh:
fh.write(context_str.encode('utf-8')) | python | def save_json(self, fname_prefix='model'):
"""Save the assembled Cytoscape JS network in a json file.
This method saves two files based on the file name prefix given.
It saves one json file with the graph itself, and another json
file with the context.
Parameters
----------
fname_prefix : Optional[str]
The prefix of the files to save the Cytoscape JS network and
context to.
Default: model
"""
cyjs_str = self.print_cyjs_graph()
# outputs the graph
with open(fname_prefix + '.json', 'wb') as fh:
fh.write(cyjs_str.encode('utf-8'))
# outputs the context of graph nodes
context_str = self.print_cyjs_context()
with open(fname_prefix + '_context.json', 'wb') as fh:
fh.write(context_str.encode('utf-8')) | [
"def",
"save_json",
"(",
"self",
",",
"fname_prefix",
"=",
"'model'",
")",
":",
"cyjs_str",
"=",
"self",
".",
"print_cyjs_graph",
"(",
")",
"# outputs the graph",
"with",
"open",
"(",
"fname_prefix",
"+",
"'.json'",
",",
"'wb'",
")",
"as",
"fh",
":",
"fh",
".",
"write",
"(",
"cyjs_str",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"# outputs the context of graph nodes",
"context_str",
"=",
"self",
".",
"print_cyjs_context",
"(",
")",
"with",
"open",
"(",
"fname_prefix",
"+",
"'_context.json'",
",",
"'wb'",
")",
"as",
"fh",
":",
"fh",
".",
"write",
"(",
"context_str",
".",
"encode",
"(",
"'utf-8'",
")",
")"
]
| Save the assembled Cytoscape JS network in a json file.
This method saves two files based on the file name prefix given.
It saves one json file with the graph itself, and another json
file with the context.
Parameters
----------
fname_prefix : Optional[str]
The prefix of the files to save the Cytoscape JS network and
context to.
Default: model | [
"Save",
"the",
"assembled",
"Cytoscape",
"JS",
"network",
"in",
"a",
"json",
"file",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cyjs/assembler.py#L206-L227 | train |
sorgerlab/indra | indra/assemblers/cyjs/assembler.py | CyJSAssembler.save_model | def save_model(self, fname='model.js'):
"""Save the assembled Cytoscape JS network in a js file.
Parameters
----------
file_name : Optional[str]
The name of the file to save the Cytoscape JS network to.
Default: model.js
"""
exp_colorscale_str = json.dumps(self._exp_colorscale)
mut_colorscale_str = json.dumps(self._mut_colorscale)
cyjs_dict = {'edges': self._edges, 'nodes': self._nodes}
model_str = json.dumps(cyjs_dict, indent=1, sort_keys=True)
model_dict = {'exp_colorscale_str': exp_colorscale_str,
'mut_colorscale_str': mut_colorscale_str,
'model_elements_str': model_str}
s = ''
s += 'var exp_colorscale = %s;\n' % model_dict['exp_colorscale_str']
s += 'var mut_colorscale = %s;\n' % model_dict['mut_colorscale_str']
s += 'var model_elements = %s;\n' % model_dict['model_elements_str']
with open(fname, 'wb') as fh:
fh.write(s.encode('utf-8')) | python | def save_model(self, fname='model.js'):
"""Save the assembled Cytoscape JS network in a js file.
Parameters
----------
file_name : Optional[str]
The name of the file to save the Cytoscape JS network to.
Default: model.js
"""
exp_colorscale_str = json.dumps(self._exp_colorscale)
mut_colorscale_str = json.dumps(self._mut_colorscale)
cyjs_dict = {'edges': self._edges, 'nodes': self._nodes}
model_str = json.dumps(cyjs_dict, indent=1, sort_keys=True)
model_dict = {'exp_colorscale_str': exp_colorscale_str,
'mut_colorscale_str': mut_colorscale_str,
'model_elements_str': model_str}
s = ''
s += 'var exp_colorscale = %s;\n' % model_dict['exp_colorscale_str']
s += 'var mut_colorscale = %s;\n' % model_dict['mut_colorscale_str']
s += 'var model_elements = %s;\n' % model_dict['model_elements_str']
with open(fname, 'wb') as fh:
fh.write(s.encode('utf-8')) | [
"def",
"save_model",
"(",
"self",
",",
"fname",
"=",
"'model.js'",
")",
":",
"exp_colorscale_str",
"=",
"json",
".",
"dumps",
"(",
"self",
".",
"_exp_colorscale",
")",
"mut_colorscale_str",
"=",
"json",
".",
"dumps",
"(",
"self",
".",
"_mut_colorscale",
")",
"cyjs_dict",
"=",
"{",
"'edges'",
":",
"self",
".",
"_edges",
",",
"'nodes'",
":",
"self",
".",
"_nodes",
"}",
"model_str",
"=",
"json",
".",
"dumps",
"(",
"cyjs_dict",
",",
"indent",
"=",
"1",
",",
"sort_keys",
"=",
"True",
")",
"model_dict",
"=",
"{",
"'exp_colorscale_str'",
":",
"exp_colorscale_str",
",",
"'mut_colorscale_str'",
":",
"mut_colorscale_str",
",",
"'model_elements_str'",
":",
"model_str",
"}",
"s",
"=",
"''",
"s",
"+=",
"'var exp_colorscale = %s;\\n'",
"%",
"model_dict",
"[",
"'exp_colorscale_str'",
"]",
"s",
"+=",
"'var mut_colorscale = %s;\\n'",
"%",
"model_dict",
"[",
"'mut_colorscale_str'",
"]",
"s",
"+=",
"'var model_elements = %s;\\n'",
"%",
"model_dict",
"[",
"'model_elements_str'",
"]",
"with",
"open",
"(",
"fname",
",",
"'wb'",
")",
"as",
"fh",
":",
"fh",
".",
"write",
"(",
"s",
".",
"encode",
"(",
"'utf-8'",
")",
")"
]
| Save the assembled Cytoscape JS network in a js file.
Parameters
----------
file_name : Optional[str]
The name of the file to save the Cytoscape JS network to.
Default: model.js | [
"Save",
"the",
"assembled",
"Cytoscape",
"JS",
"network",
"in",
"a",
"js",
"file",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cyjs/assembler.py#L229-L250 | train |
sorgerlab/indra | indra/assemblers/cyjs/assembler.py | CyJSAssembler._get_edge_dict | def _get_edge_dict(self):
"""Return a dict of edges.
Keyed tuples of (i, source, target, polarity)
with lists of edge ids [id1, id2, ...]
"""
edge_dict = collections.defaultdict(lambda: [])
if len(self._edges) > 0:
for e in self._edges:
data = e['data']
key = tuple([data['i'], data['source'],
data['target'], data['polarity']])
edge_dict[key] = data['id']
return edge_dict | python | def _get_edge_dict(self):
"""Return a dict of edges.
Keyed tuples of (i, source, target, polarity)
with lists of edge ids [id1, id2, ...]
"""
edge_dict = collections.defaultdict(lambda: [])
if len(self._edges) > 0:
for e in self._edges:
data = e['data']
key = tuple([data['i'], data['source'],
data['target'], data['polarity']])
edge_dict[key] = data['id']
return edge_dict | [
"def",
"_get_edge_dict",
"(",
"self",
")",
":",
"edge_dict",
"=",
"collections",
".",
"defaultdict",
"(",
"lambda",
":",
"[",
"]",
")",
"if",
"len",
"(",
"self",
".",
"_edges",
")",
">",
"0",
":",
"for",
"e",
"in",
"self",
".",
"_edges",
":",
"data",
"=",
"e",
"[",
"'data'",
"]",
"key",
"=",
"tuple",
"(",
"[",
"data",
"[",
"'i'",
"]",
",",
"data",
"[",
"'source'",
"]",
",",
"data",
"[",
"'target'",
"]",
",",
"data",
"[",
"'polarity'",
"]",
"]",
")",
"edge_dict",
"[",
"key",
"]",
"=",
"data",
"[",
"'id'",
"]",
"return",
"edge_dict"
]
| Return a dict of edges.
Keyed tuples of (i, source, target, polarity)
with lists of edge ids [id1, id2, ...] | [
"Return",
"a",
"dict",
"of",
"edges",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cyjs/assembler.py#L282-L295 | train |
sorgerlab/indra | indra/assemblers/cyjs/assembler.py | CyJSAssembler._get_node_key | def _get_node_key(self, node_dict_item):
"""Return a tuple of sorted sources and targets given a node dict."""
s = tuple(sorted(node_dict_item['sources']))
t = tuple(sorted(node_dict_item['targets']))
return (s, t) | python | def _get_node_key(self, node_dict_item):
"""Return a tuple of sorted sources and targets given a node dict."""
s = tuple(sorted(node_dict_item['sources']))
t = tuple(sorted(node_dict_item['targets']))
return (s, t) | [
"def",
"_get_node_key",
"(",
"self",
",",
"node_dict_item",
")",
":",
"s",
"=",
"tuple",
"(",
"sorted",
"(",
"node_dict_item",
"[",
"'sources'",
"]",
")",
")",
"t",
"=",
"tuple",
"(",
"sorted",
"(",
"node_dict_item",
"[",
"'targets'",
"]",
")",
")",
"return",
"(",
"s",
",",
"t",
")"
]
| Return a tuple of sorted sources and targets given a node dict. | [
"Return",
"a",
"tuple",
"of",
"sorted",
"sources",
"and",
"targets",
"given",
"a",
"node",
"dict",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cyjs/assembler.py#L360-L364 | train |
sorgerlab/indra | indra/assemblers/cyjs/assembler.py | CyJSAssembler._get_node_groups | def _get_node_groups(self):
"""Return a list of node id lists that are topologically identical.
First construct a node_dict which is keyed to the node id and
has a value which is a dict with keys 'sources' and 'targets'.
The 'sources' and 'targets' each contain a list of tuples
(i, polarity, source) edge of the node. node_dict is then processed
by _get_node_key() which returns a tuple of (s,t) where s,t are
sorted tuples of the ids for the source and target nodes. (s,t) is
then used as a key in node_key_dict where the values are the node
ids. node_groups is restricted to groups greater than 1 node.
"""
node_dict = {node['data']['id']: {'sources': [], 'targets': []}
for node in self._nodes}
for edge in self._edges:
# Add edge as a source for its target node
edge_data = (edge['data']['i'], edge['data']['polarity'],
edge['data']['source'])
node_dict[edge['data']['target']]['sources'].append(edge_data)
# Add edge as target for its source node
edge_data = (edge['data']['i'], edge['data']['polarity'],
edge['data']['target'])
node_dict[edge['data']['source']]['targets'].append(edge_data)
# Make a dictionary of nodes based on source/target as a key
node_key_dict = collections.defaultdict(lambda: [])
for node_id, node_d in node_dict.items():
key = self._get_node_key(node_d)
node_key_dict[key].append(node_id)
# Constrain the groups to ones that have more than 1 member
node_groups = [g for g in node_key_dict.values() if (len(g) > 1)]
return node_groups | python | def _get_node_groups(self):
"""Return a list of node id lists that are topologically identical.
First construct a node_dict which is keyed to the node id and
has a value which is a dict with keys 'sources' and 'targets'.
The 'sources' and 'targets' each contain a list of tuples
(i, polarity, source) edge of the node. node_dict is then processed
by _get_node_key() which returns a tuple of (s,t) where s,t are
sorted tuples of the ids for the source and target nodes. (s,t) is
then used as a key in node_key_dict where the values are the node
ids. node_groups is restricted to groups greater than 1 node.
"""
node_dict = {node['data']['id']: {'sources': [], 'targets': []}
for node in self._nodes}
for edge in self._edges:
# Add edge as a source for its target node
edge_data = (edge['data']['i'], edge['data']['polarity'],
edge['data']['source'])
node_dict[edge['data']['target']]['sources'].append(edge_data)
# Add edge as target for its source node
edge_data = (edge['data']['i'], edge['data']['polarity'],
edge['data']['target'])
node_dict[edge['data']['source']]['targets'].append(edge_data)
# Make a dictionary of nodes based on source/target as a key
node_key_dict = collections.defaultdict(lambda: [])
for node_id, node_d in node_dict.items():
key = self._get_node_key(node_d)
node_key_dict[key].append(node_id)
# Constrain the groups to ones that have more than 1 member
node_groups = [g for g in node_key_dict.values() if (len(g) > 1)]
return node_groups | [
"def",
"_get_node_groups",
"(",
"self",
")",
":",
"node_dict",
"=",
"{",
"node",
"[",
"'data'",
"]",
"[",
"'id'",
"]",
":",
"{",
"'sources'",
":",
"[",
"]",
",",
"'targets'",
":",
"[",
"]",
"}",
"for",
"node",
"in",
"self",
".",
"_nodes",
"}",
"for",
"edge",
"in",
"self",
".",
"_edges",
":",
"# Add edge as a source for its target node",
"edge_data",
"=",
"(",
"edge",
"[",
"'data'",
"]",
"[",
"'i'",
"]",
",",
"edge",
"[",
"'data'",
"]",
"[",
"'polarity'",
"]",
",",
"edge",
"[",
"'data'",
"]",
"[",
"'source'",
"]",
")",
"node_dict",
"[",
"edge",
"[",
"'data'",
"]",
"[",
"'target'",
"]",
"]",
"[",
"'sources'",
"]",
".",
"append",
"(",
"edge_data",
")",
"# Add edge as target for its source node",
"edge_data",
"=",
"(",
"edge",
"[",
"'data'",
"]",
"[",
"'i'",
"]",
",",
"edge",
"[",
"'data'",
"]",
"[",
"'polarity'",
"]",
",",
"edge",
"[",
"'data'",
"]",
"[",
"'target'",
"]",
")",
"node_dict",
"[",
"edge",
"[",
"'data'",
"]",
"[",
"'source'",
"]",
"]",
"[",
"'targets'",
"]",
".",
"append",
"(",
"edge_data",
")",
"# Make a dictionary of nodes based on source/target as a key",
"node_key_dict",
"=",
"collections",
".",
"defaultdict",
"(",
"lambda",
":",
"[",
"]",
")",
"for",
"node_id",
",",
"node_d",
"in",
"node_dict",
".",
"items",
"(",
")",
":",
"key",
"=",
"self",
".",
"_get_node_key",
"(",
"node_d",
")",
"node_key_dict",
"[",
"key",
"]",
".",
"append",
"(",
"node_id",
")",
"# Constrain the groups to ones that have more than 1 member",
"node_groups",
"=",
"[",
"g",
"for",
"g",
"in",
"node_key_dict",
".",
"values",
"(",
")",
"if",
"(",
"len",
"(",
"g",
")",
">",
"1",
")",
"]",
"return",
"node_groups"
]
| Return a list of node id lists that are topologically identical.
First construct a node_dict which is keyed to the node id and
has a value which is a dict with keys 'sources' and 'targets'.
The 'sources' and 'targets' each contain a list of tuples
(i, polarity, source) edge of the node. node_dict is then processed
by _get_node_key() which returns a tuple of (s,t) where s,t are
sorted tuples of the ids for the source and target nodes. (s,t) is
then used as a key in node_key_dict where the values are the node
ids. node_groups is restricted to groups greater than 1 node. | [
"Return",
"a",
"list",
"of",
"node",
"id",
"lists",
"that",
"are",
"topologically",
"identical",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cyjs/assembler.py#L366-L396 | train |
sorgerlab/indra | indra/assemblers/cyjs/assembler.py | CyJSAssembler._group_edges | def _group_edges(self):
"""Group all edges that are topologically identical.
This means that (i, source, target, polarity) are the same, then sets
edges on parent (i.e. - group) nodes to 'Virtual' and creates a new
edge to represent all of them.
"""
# edit edges on parent nodes and make new edges for them
edges_to_add = [[], []] # [group_edges, uuid_lists]
for e in self._edges:
new_edge = deepcopy(e)
new_edge['data'].pop('id', None)
uuid_list = new_edge['data'].pop('uuid_list', [])
# Check if edge source or target are contained in a parent
# If source or target in parent edit edge
# Nodes may only point within their container
source = e['data']['source']
target = e['data']['target']
source_node = [x for x in self._nodes if
x['data']['id'] == source][0]
target_node = [x for x in self._nodes if
x['data']['id'] == target][0]
# If the source node is in a group, we change the source of this
# edge to the group
if source_node['data']['parent'] != '':
new_edge['data']['source'] = source_node['data']['parent']
e['data']['i'] = 'Virtual'
# If the targete node is in a group, we change the target of this
# edge to the group
if target_node['data']['parent'] != '':
new_edge['data']['target'] = target_node['data']['parent']
e['data']['i'] = 'Virtual'
if e['data']['i'] == 'Virtual':
if new_edge not in edges_to_add[0]:
edges_to_add[0].append(new_edge)
edges_to_add[1].append(uuid_list)
else:
idx = edges_to_add[0].index(new_edge)
edges_to_add[1][idx] += uuid_list
edges_to_add[1][idx] = list(set(edges_to_add[1][idx]))
for ze in zip(*edges_to_add):
edge = ze[0]
edge['data']['id'] = self._get_new_id()
edge['data']['uuid_list'] = ze[1]
self._edges.append(edge) | python | def _group_edges(self):
"""Group all edges that are topologically identical.
This means that (i, source, target, polarity) are the same, then sets
edges on parent (i.e. - group) nodes to 'Virtual' and creates a new
edge to represent all of them.
"""
# edit edges on parent nodes and make new edges for them
edges_to_add = [[], []] # [group_edges, uuid_lists]
for e in self._edges:
new_edge = deepcopy(e)
new_edge['data'].pop('id', None)
uuid_list = new_edge['data'].pop('uuid_list', [])
# Check if edge source or target are contained in a parent
# If source or target in parent edit edge
# Nodes may only point within their container
source = e['data']['source']
target = e['data']['target']
source_node = [x for x in self._nodes if
x['data']['id'] == source][0]
target_node = [x for x in self._nodes if
x['data']['id'] == target][0]
# If the source node is in a group, we change the source of this
# edge to the group
if source_node['data']['parent'] != '':
new_edge['data']['source'] = source_node['data']['parent']
e['data']['i'] = 'Virtual'
# If the targete node is in a group, we change the target of this
# edge to the group
if target_node['data']['parent'] != '':
new_edge['data']['target'] = target_node['data']['parent']
e['data']['i'] = 'Virtual'
if e['data']['i'] == 'Virtual':
if new_edge not in edges_to_add[0]:
edges_to_add[0].append(new_edge)
edges_to_add[1].append(uuid_list)
else:
idx = edges_to_add[0].index(new_edge)
edges_to_add[1][idx] += uuid_list
edges_to_add[1][idx] = list(set(edges_to_add[1][idx]))
for ze in zip(*edges_to_add):
edge = ze[0]
edge['data']['id'] = self._get_new_id()
edge['data']['uuid_list'] = ze[1]
self._edges.append(edge) | [
"def",
"_group_edges",
"(",
"self",
")",
":",
"# edit edges on parent nodes and make new edges for them",
"edges_to_add",
"=",
"[",
"[",
"]",
",",
"[",
"]",
"]",
"# [group_edges, uuid_lists]",
"for",
"e",
"in",
"self",
".",
"_edges",
":",
"new_edge",
"=",
"deepcopy",
"(",
"e",
")",
"new_edge",
"[",
"'data'",
"]",
".",
"pop",
"(",
"'id'",
",",
"None",
")",
"uuid_list",
"=",
"new_edge",
"[",
"'data'",
"]",
".",
"pop",
"(",
"'uuid_list'",
",",
"[",
"]",
")",
"# Check if edge source or target are contained in a parent",
"# If source or target in parent edit edge",
"# Nodes may only point within their container",
"source",
"=",
"e",
"[",
"'data'",
"]",
"[",
"'source'",
"]",
"target",
"=",
"e",
"[",
"'data'",
"]",
"[",
"'target'",
"]",
"source_node",
"=",
"[",
"x",
"for",
"x",
"in",
"self",
".",
"_nodes",
"if",
"x",
"[",
"'data'",
"]",
"[",
"'id'",
"]",
"==",
"source",
"]",
"[",
"0",
"]",
"target_node",
"=",
"[",
"x",
"for",
"x",
"in",
"self",
".",
"_nodes",
"if",
"x",
"[",
"'data'",
"]",
"[",
"'id'",
"]",
"==",
"target",
"]",
"[",
"0",
"]",
"# If the source node is in a group, we change the source of this",
"# edge to the group",
"if",
"source_node",
"[",
"'data'",
"]",
"[",
"'parent'",
"]",
"!=",
"''",
":",
"new_edge",
"[",
"'data'",
"]",
"[",
"'source'",
"]",
"=",
"source_node",
"[",
"'data'",
"]",
"[",
"'parent'",
"]",
"e",
"[",
"'data'",
"]",
"[",
"'i'",
"]",
"=",
"'Virtual'",
"# If the targete node is in a group, we change the target of this",
"# edge to the group",
"if",
"target_node",
"[",
"'data'",
"]",
"[",
"'parent'",
"]",
"!=",
"''",
":",
"new_edge",
"[",
"'data'",
"]",
"[",
"'target'",
"]",
"=",
"target_node",
"[",
"'data'",
"]",
"[",
"'parent'",
"]",
"e",
"[",
"'data'",
"]",
"[",
"'i'",
"]",
"=",
"'Virtual'",
"if",
"e",
"[",
"'data'",
"]",
"[",
"'i'",
"]",
"==",
"'Virtual'",
":",
"if",
"new_edge",
"not",
"in",
"edges_to_add",
"[",
"0",
"]",
":",
"edges_to_add",
"[",
"0",
"]",
".",
"append",
"(",
"new_edge",
")",
"edges_to_add",
"[",
"1",
"]",
".",
"append",
"(",
"uuid_list",
")",
"else",
":",
"idx",
"=",
"edges_to_add",
"[",
"0",
"]",
".",
"index",
"(",
"new_edge",
")",
"edges_to_add",
"[",
"1",
"]",
"[",
"idx",
"]",
"+=",
"uuid_list",
"edges_to_add",
"[",
"1",
"]",
"[",
"idx",
"]",
"=",
"list",
"(",
"set",
"(",
"edges_to_add",
"[",
"1",
"]",
"[",
"idx",
"]",
")",
")",
"for",
"ze",
"in",
"zip",
"(",
"*",
"edges_to_add",
")",
":",
"edge",
"=",
"ze",
"[",
"0",
"]",
"edge",
"[",
"'data'",
"]",
"[",
"'id'",
"]",
"=",
"self",
".",
"_get_new_id",
"(",
")",
"edge",
"[",
"'data'",
"]",
"[",
"'uuid_list'",
"]",
"=",
"ze",
"[",
"1",
"]",
"self",
".",
"_edges",
".",
"append",
"(",
"edge",
")"
]
| Group all edges that are topologically identical.
This means that (i, source, target, polarity) are the same, then sets
edges on parent (i.e. - group) nodes to 'Virtual' and creates a new
edge to represent all of them. | [
"Group",
"all",
"edges",
"that",
"are",
"topologically",
"identical",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cyjs/assembler.py#L398-L442 | train |
sorgerlab/indra | indra/sources/trrust/processor.py | make_stmt | def make_stmt(stmt_cls, tf_agent, target_agent, pmid):
"""Return a Statement based on its type, agents, and PMID."""
ev = Evidence(source_api='trrust', pmid=pmid)
return stmt_cls(deepcopy(tf_agent), deepcopy(target_agent),
evidence=[ev]) | python | def make_stmt(stmt_cls, tf_agent, target_agent, pmid):
"""Return a Statement based on its type, agents, and PMID."""
ev = Evidence(source_api='trrust', pmid=pmid)
return stmt_cls(deepcopy(tf_agent), deepcopy(target_agent),
evidence=[ev]) | [
"def",
"make_stmt",
"(",
"stmt_cls",
",",
"tf_agent",
",",
"target_agent",
",",
"pmid",
")",
":",
"ev",
"=",
"Evidence",
"(",
"source_api",
"=",
"'trrust'",
",",
"pmid",
"=",
"pmid",
")",
"return",
"stmt_cls",
"(",
"deepcopy",
"(",
"tf_agent",
")",
",",
"deepcopy",
"(",
"target_agent",
")",
",",
"evidence",
"=",
"[",
"ev",
"]",
")"
]
| Return a Statement based on its type, agents, and PMID. | [
"Return",
"a",
"Statement",
"based",
"on",
"its",
"type",
"agents",
"and",
"PMID",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trrust/processor.py#L37-L41 | train |
sorgerlab/indra | indra/sources/trrust/processor.py | get_grounded_agent | def get_grounded_agent(gene_name):
"""Return a grounded Agent based on an HGNC symbol."""
db_refs = {'TEXT': gene_name}
if gene_name in hgnc_map:
gene_name = hgnc_map[gene_name]
hgnc_id = hgnc_client.get_hgnc_id(gene_name)
if hgnc_id:
db_refs['HGNC'] = hgnc_id
up_id = hgnc_client.get_uniprot_id(hgnc_id)
if up_id:
db_refs['UP'] = up_id
agent = Agent(gene_name, db_refs=db_refs)
return agent | python | def get_grounded_agent(gene_name):
"""Return a grounded Agent based on an HGNC symbol."""
db_refs = {'TEXT': gene_name}
if gene_name in hgnc_map:
gene_name = hgnc_map[gene_name]
hgnc_id = hgnc_client.get_hgnc_id(gene_name)
if hgnc_id:
db_refs['HGNC'] = hgnc_id
up_id = hgnc_client.get_uniprot_id(hgnc_id)
if up_id:
db_refs['UP'] = up_id
agent = Agent(gene_name, db_refs=db_refs)
return agent | [
"def",
"get_grounded_agent",
"(",
"gene_name",
")",
":",
"db_refs",
"=",
"{",
"'TEXT'",
":",
"gene_name",
"}",
"if",
"gene_name",
"in",
"hgnc_map",
":",
"gene_name",
"=",
"hgnc_map",
"[",
"gene_name",
"]",
"hgnc_id",
"=",
"hgnc_client",
".",
"get_hgnc_id",
"(",
"gene_name",
")",
"if",
"hgnc_id",
":",
"db_refs",
"[",
"'HGNC'",
"]",
"=",
"hgnc_id",
"up_id",
"=",
"hgnc_client",
".",
"get_uniprot_id",
"(",
"hgnc_id",
")",
"if",
"up_id",
":",
"db_refs",
"[",
"'UP'",
"]",
"=",
"up_id",
"agent",
"=",
"Agent",
"(",
"gene_name",
",",
"db_refs",
"=",
"db_refs",
")",
"return",
"agent"
]
| Return a grounded Agent based on an HGNC symbol. | [
"Return",
"a",
"grounded",
"Agent",
"based",
"on",
"an",
"HGNC",
"symbol",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trrust/processor.py#L44-L56 | train |
sorgerlab/indra | indra/sources/trrust/processor.py | TrrustProcessor.extract_statements | def extract_statements(self):
"""Process the table to extract Statements."""
for _, (tf, target, effect, refs) in self.df.iterrows():
tf_agent = get_grounded_agent(tf)
target_agent = get_grounded_agent(target)
if effect == 'Activation':
stmt_cls = IncreaseAmount
elif effect == 'Repression':
stmt_cls = DecreaseAmount
else:
continue
pmids = refs.split(';')
for pmid in pmids:
stmt = make_stmt(stmt_cls, tf_agent, target_agent, pmid)
self.statements.append(stmt) | python | def extract_statements(self):
"""Process the table to extract Statements."""
for _, (tf, target, effect, refs) in self.df.iterrows():
tf_agent = get_grounded_agent(tf)
target_agent = get_grounded_agent(target)
if effect == 'Activation':
stmt_cls = IncreaseAmount
elif effect == 'Repression':
stmt_cls = DecreaseAmount
else:
continue
pmids = refs.split(';')
for pmid in pmids:
stmt = make_stmt(stmt_cls, tf_agent, target_agent, pmid)
self.statements.append(stmt) | [
"def",
"extract_statements",
"(",
"self",
")",
":",
"for",
"_",
",",
"(",
"tf",
",",
"target",
",",
"effect",
",",
"refs",
")",
"in",
"self",
".",
"df",
".",
"iterrows",
"(",
")",
":",
"tf_agent",
"=",
"get_grounded_agent",
"(",
"tf",
")",
"target_agent",
"=",
"get_grounded_agent",
"(",
"target",
")",
"if",
"effect",
"==",
"'Activation'",
":",
"stmt_cls",
"=",
"IncreaseAmount",
"elif",
"effect",
"==",
"'Repression'",
":",
"stmt_cls",
"=",
"DecreaseAmount",
"else",
":",
"continue",
"pmids",
"=",
"refs",
".",
"split",
"(",
"';'",
")",
"for",
"pmid",
"in",
"pmids",
":",
"stmt",
"=",
"make_stmt",
"(",
"stmt_cls",
",",
"tf_agent",
",",
"target_agent",
",",
"pmid",
")",
"self",
".",
"statements",
".",
"append",
"(",
"stmt",
")"
]
| Process the table to extract Statements. | [
"Process",
"the",
"table",
"to",
"extract",
"Statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trrust/processor.py#L20-L34 | train |
sorgerlab/indra | indra/tools/machine/machine.py | process_paper | def process_paper(model_name, pmid):
"""Process a paper with the given pubmed identifier
Parameters
----------
model_name : str
The directory for the INDRA machine
pmid : str
The PMID to process.
Returns
-------
rp : ReachProcessor
A ReachProcessor containing the extracted INDRA Statements
in rp.statements.
txt_format : str
A string representing the format of the text
"""
json_directory = os.path.join(model_name, 'jsons')
json_path = os.path.join(json_directory, 'PMID%s.json' % pmid)
if pmid.startswith('api') or pmid.startswith('PMID'):
logger.warning('Invalid PMID: %s' % pmid)
# If the paper has been read, use the json output file
if os.path.exists(json_path):
rp = reach.process_json_file(json_path, citation=pmid)
txt_format = 'existing_json'
# If the paper has not been read, download the text and read
else:
try:
txt, txt_format = get_full_text(pmid, 'pmid')
except Exception:
return None, None
if txt_format == 'pmc_oa_xml':
rp = reach.process_nxml_str(txt, citation=pmid, offline=True,
output_fname=json_path)
elif txt_format == 'elsevier_xml':
# Extract the raw text from the Elsevier XML
txt = elsevier_client.extract_text(txt)
rp = reach.process_text(txt, citation=pmid, offline=True,
output_fname=json_path)
elif txt_format == 'abstract':
rp = reach.process_text(txt, citation=pmid, offline=True,
output_fname=json_path)
else:
rp = None
if rp is not None:
check_pmids(rp.statements)
return rp, txt_format | python | def process_paper(model_name, pmid):
"""Process a paper with the given pubmed identifier
Parameters
----------
model_name : str
The directory for the INDRA machine
pmid : str
The PMID to process.
Returns
-------
rp : ReachProcessor
A ReachProcessor containing the extracted INDRA Statements
in rp.statements.
txt_format : str
A string representing the format of the text
"""
json_directory = os.path.join(model_name, 'jsons')
json_path = os.path.join(json_directory, 'PMID%s.json' % pmid)
if pmid.startswith('api') or pmid.startswith('PMID'):
logger.warning('Invalid PMID: %s' % pmid)
# If the paper has been read, use the json output file
if os.path.exists(json_path):
rp = reach.process_json_file(json_path, citation=pmid)
txt_format = 'existing_json'
# If the paper has not been read, download the text and read
else:
try:
txt, txt_format = get_full_text(pmid, 'pmid')
except Exception:
return None, None
if txt_format == 'pmc_oa_xml':
rp = reach.process_nxml_str(txt, citation=pmid, offline=True,
output_fname=json_path)
elif txt_format == 'elsevier_xml':
# Extract the raw text from the Elsevier XML
txt = elsevier_client.extract_text(txt)
rp = reach.process_text(txt, citation=pmid, offline=True,
output_fname=json_path)
elif txt_format == 'abstract':
rp = reach.process_text(txt, citation=pmid, offline=True,
output_fname=json_path)
else:
rp = None
if rp is not None:
check_pmids(rp.statements)
return rp, txt_format | [
"def",
"process_paper",
"(",
"model_name",
",",
"pmid",
")",
":",
"json_directory",
"=",
"os",
".",
"path",
".",
"join",
"(",
"model_name",
",",
"'jsons'",
")",
"json_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"json_directory",
",",
"'PMID%s.json'",
"%",
"pmid",
")",
"if",
"pmid",
".",
"startswith",
"(",
"'api'",
")",
"or",
"pmid",
".",
"startswith",
"(",
"'PMID'",
")",
":",
"logger",
".",
"warning",
"(",
"'Invalid PMID: %s'",
"%",
"pmid",
")",
"# If the paper has been read, use the json output file",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"json_path",
")",
":",
"rp",
"=",
"reach",
".",
"process_json_file",
"(",
"json_path",
",",
"citation",
"=",
"pmid",
")",
"txt_format",
"=",
"'existing_json'",
"# If the paper has not been read, download the text and read",
"else",
":",
"try",
":",
"txt",
",",
"txt_format",
"=",
"get_full_text",
"(",
"pmid",
",",
"'pmid'",
")",
"except",
"Exception",
":",
"return",
"None",
",",
"None",
"if",
"txt_format",
"==",
"'pmc_oa_xml'",
":",
"rp",
"=",
"reach",
".",
"process_nxml_str",
"(",
"txt",
",",
"citation",
"=",
"pmid",
",",
"offline",
"=",
"True",
",",
"output_fname",
"=",
"json_path",
")",
"elif",
"txt_format",
"==",
"'elsevier_xml'",
":",
"# Extract the raw text from the Elsevier XML",
"txt",
"=",
"elsevier_client",
".",
"extract_text",
"(",
"txt",
")",
"rp",
"=",
"reach",
".",
"process_text",
"(",
"txt",
",",
"citation",
"=",
"pmid",
",",
"offline",
"=",
"True",
",",
"output_fname",
"=",
"json_path",
")",
"elif",
"txt_format",
"==",
"'abstract'",
":",
"rp",
"=",
"reach",
".",
"process_text",
"(",
"txt",
",",
"citation",
"=",
"pmid",
",",
"offline",
"=",
"True",
",",
"output_fname",
"=",
"json_path",
")",
"else",
":",
"rp",
"=",
"None",
"if",
"rp",
"is",
"not",
"None",
":",
"check_pmids",
"(",
"rp",
".",
"statements",
")",
"return",
"rp",
",",
"txt_format"
]
| Process a paper with the given pubmed identifier
Parameters
----------
model_name : str
The directory for the INDRA machine
pmid : str
The PMID to process.
Returns
-------
rp : ReachProcessor
A ReachProcessor containing the extracted INDRA Statements
in rp.statements.
txt_format : str
A string representing the format of the text | [
"Process",
"a",
"paper",
"with",
"the",
"given",
"pubmed",
"identifier"
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/machine/machine.py#L91-L140 | train |
sorgerlab/indra | indra/tools/machine/machine.py | process_paper_helper | def process_paper_helper(model_name, pmid, start_time_local):
"""Wraps processing a paper by either a local or remote service
and caches any uncaught exceptions"""
try:
if not aws_available:
rp, txt_format = process_paper(model_name, pmid)
else:
rp, txt_format = process_paper_aws(pmid, start_time_local)
except:
logger.exception('uncaught exception while processing %s', pmid)
return None, None
return rp, txt_format | python | def process_paper_helper(model_name, pmid, start_time_local):
"""Wraps processing a paper by either a local or remote service
and caches any uncaught exceptions"""
try:
if not aws_available:
rp, txt_format = process_paper(model_name, pmid)
else:
rp, txt_format = process_paper_aws(pmid, start_time_local)
except:
logger.exception('uncaught exception while processing %s', pmid)
return None, None
return rp, txt_format | [
"def",
"process_paper_helper",
"(",
"model_name",
",",
"pmid",
",",
"start_time_local",
")",
":",
"try",
":",
"if",
"not",
"aws_available",
":",
"rp",
",",
"txt_format",
"=",
"process_paper",
"(",
"model_name",
",",
"pmid",
")",
"else",
":",
"rp",
",",
"txt_format",
"=",
"process_paper_aws",
"(",
"pmid",
",",
"start_time_local",
")",
"except",
":",
"logger",
".",
"exception",
"(",
"'uncaught exception while processing %s'",
",",
"pmid",
")",
"return",
"None",
",",
"None",
"return",
"rp",
",",
"txt_format"
]
| Wraps processing a paper by either a local or remote service
and caches any uncaught exceptions | [
"Wraps",
"processing",
"a",
"paper",
"by",
"either",
"a",
"local",
"or",
"remote",
"service",
"and",
"caches",
"any",
"uncaught",
"exceptions"
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/machine/machine.py#L196-L208 | train |
sorgerlab/indra | indra/sources/tas/api.py | _load_data | def _load_data():
"""Load the data from the csv in data.
The "gene_id" is the Entrez gene id, and the "approved_symbol" is the
standard gene symbol. The "hms_id" is the LINCS ID for the drug.
Returns
-------
data : list[dict]
A list of dicts of row values keyed by the column headers extracted from
the csv file, described above.
"""
# Get the cwv reader object.
csv_path = path.join(HERE, path.pardir, path.pardir, 'resources',
DATAFILE_NAME)
data_iter = list(read_unicode_csv(csv_path))
# Get the headers.
headers = data_iter[0]
# For some reason this heading is oddly formatted and inconsistent with the
# rest, or with the usual key-style for dicts.
headers[headers.index('Approved.Symbol')] = 'approved_symbol'
return [{header: val for header, val in zip(headers, line)}
for line in data_iter[1:]] | python | def _load_data():
"""Load the data from the csv in data.
The "gene_id" is the Entrez gene id, and the "approved_symbol" is the
standard gene symbol. The "hms_id" is the LINCS ID for the drug.
Returns
-------
data : list[dict]
A list of dicts of row values keyed by the column headers extracted from
the csv file, described above.
"""
# Get the cwv reader object.
csv_path = path.join(HERE, path.pardir, path.pardir, 'resources',
DATAFILE_NAME)
data_iter = list(read_unicode_csv(csv_path))
# Get the headers.
headers = data_iter[0]
# For some reason this heading is oddly formatted and inconsistent with the
# rest, or with the usual key-style for dicts.
headers[headers.index('Approved.Symbol')] = 'approved_symbol'
return [{header: val for header, val in zip(headers, line)}
for line in data_iter[1:]] | [
"def",
"_load_data",
"(",
")",
":",
"# Get the cwv reader object.",
"csv_path",
"=",
"path",
".",
"join",
"(",
"HERE",
",",
"path",
".",
"pardir",
",",
"path",
".",
"pardir",
",",
"'resources'",
",",
"DATAFILE_NAME",
")",
"data_iter",
"=",
"list",
"(",
"read_unicode_csv",
"(",
"csv_path",
")",
")",
"# Get the headers.",
"headers",
"=",
"data_iter",
"[",
"0",
"]",
"# For some reason this heading is oddly formatted and inconsistent with the",
"# rest, or with the usual key-style for dicts.",
"headers",
"[",
"headers",
".",
"index",
"(",
"'Approved.Symbol'",
")",
"]",
"=",
"'approved_symbol'",
"return",
"[",
"{",
"header",
":",
"val",
"for",
"header",
",",
"val",
"in",
"zip",
"(",
"headers",
",",
"line",
")",
"}",
"for",
"line",
"in",
"data_iter",
"[",
"1",
":",
"]",
"]"
]
| Load the data from the csv in data.
The "gene_id" is the Entrez gene id, and the "approved_symbol" is the
standard gene symbol. The "hms_id" is the LINCS ID for the drug.
Returns
-------
data : list[dict]
A list of dicts of row values keyed by the column headers extracted from
the csv file, described above. | [
"Load",
"the",
"data",
"from",
"the",
"csv",
"in",
"data",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/tas/api.py#L15-L39 | train |
sorgerlab/indra | indra/sources/eidos/cli.py | run_eidos | def run_eidos(endpoint, *args):
"""Run a given enpoint of Eidos through the command line.
Parameters
----------
endpoint : str
The class within the Eidos package to run, for instance
'apps.ExtractFromDirectory' will run
'org.clulab.wm.eidos.apps.ExtractFromDirectory'
*args
Any further arguments to be passed as inputs to the class
being run.
"""
# Make the full path to the class that should be used
call_class = '%s.%s' % (eidos_package, endpoint)
# Assemble the command line command and append optonal args
cmd = ['java', '-Xmx12G', '-cp', eip, call_class] + list(args)
logger.info('Running Eidos with command "%s"' % (' '.join(cmd)))
subprocess.call(cmd) | python | def run_eidos(endpoint, *args):
"""Run a given enpoint of Eidos through the command line.
Parameters
----------
endpoint : str
The class within the Eidos package to run, for instance
'apps.ExtractFromDirectory' will run
'org.clulab.wm.eidos.apps.ExtractFromDirectory'
*args
Any further arguments to be passed as inputs to the class
being run.
"""
# Make the full path to the class that should be used
call_class = '%s.%s' % (eidos_package, endpoint)
# Assemble the command line command and append optonal args
cmd = ['java', '-Xmx12G', '-cp', eip, call_class] + list(args)
logger.info('Running Eidos with command "%s"' % (' '.join(cmd)))
subprocess.call(cmd) | [
"def",
"run_eidos",
"(",
"endpoint",
",",
"*",
"args",
")",
":",
"# Make the full path to the class that should be used",
"call_class",
"=",
"'%s.%s'",
"%",
"(",
"eidos_package",
",",
"endpoint",
")",
"# Assemble the command line command and append optonal args",
"cmd",
"=",
"[",
"'java'",
",",
"'-Xmx12G'",
",",
"'-cp'",
",",
"eip",
",",
"call_class",
"]",
"+",
"list",
"(",
"args",
")",
"logger",
".",
"info",
"(",
"'Running Eidos with command \"%s\"'",
"%",
"(",
"' '",
".",
"join",
"(",
"cmd",
")",
")",
")",
"subprocess",
".",
"call",
"(",
"cmd",
")"
]
| Run a given enpoint of Eidos through the command line.
Parameters
----------
endpoint : str
The class within the Eidos package to run, for instance
'apps.ExtractFromDirectory' will run
'org.clulab.wm.eidos.apps.ExtractFromDirectory'
*args
Any further arguments to be passed as inputs to the class
being run. | [
"Run",
"a",
"given",
"enpoint",
"of",
"Eidos",
"through",
"the",
"command",
"line",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/eidos/cli.py#L20-L38 | train |
sorgerlab/indra | indra/sources/eidos/cli.py | extract_from_directory | def extract_from_directory(path_in, path_out):
"""Run Eidos on a set of text files in a folder.
The output is produced in the specified output folder but
the output files aren't processed by this function.
Parameters
----------
path_in : str
Path to an input folder with some text files
path_out : str
Path to an output folder in which Eidos places the output
JSON-LD files
"""
path_in = os.path.realpath(os.path.expanduser(path_in))
path_out = os.path.realpath(os.path.expanduser(path_out))
logger.info('Running Eidos on input folder %s' % path_in)
run_eidos('apps.ExtractFromDirectory', path_in, path_out) | python | def extract_from_directory(path_in, path_out):
"""Run Eidos on a set of text files in a folder.
The output is produced in the specified output folder but
the output files aren't processed by this function.
Parameters
----------
path_in : str
Path to an input folder with some text files
path_out : str
Path to an output folder in which Eidos places the output
JSON-LD files
"""
path_in = os.path.realpath(os.path.expanduser(path_in))
path_out = os.path.realpath(os.path.expanduser(path_out))
logger.info('Running Eidos on input folder %s' % path_in)
run_eidos('apps.ExtractFromDirectory', path_in, path_out) | [
"def",
"extract_from_directory",
"(",
"path_in",
",",
"path_out",
")",
":",
"path_in",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"path_in",
")",
")",
"path_out",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"path_out",
")",
")",
"logger",
".",
"info",
"(",
"'Running Eidos on input folder %s'",
"%",
"path_in",
")",
"run_eidos",
"(",
"'apps.ExtractFromDirectory'",
",",
"path_in",
",",
"path_out",
")"
]
| Run Eidos on a set of text files in a folder.
The output is produced in the specified output folder but
the output files aren't processed by this function.
Parameters
----------
path_in : str
Path to an input folder with some text files
path_out : str
Path to an output folder in which Eidos places the output
JSON-LD files | [
"Run",
"Eidos",
"on",
"a",
"set",
"of",
"text",
"files",
"in",
"a",
"folder",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/eidos/cli.py#L41-L58 | train |
sorgerlab/indra | indra/sources/eidos/cli.py | extract_and_process | def extract_and_process(path_in, path_out):
"""Run Eidos on a set of text files and process output with INDRA.
The output is produced in the specified output folder but
the output files aren't processed by this function.
Parameters
----------
path_in : str
Path to an input folder with some text files
path_out : str
Path to an output folder in which Eidos places the output
JSON-LD files
Returns
-------
stmts : list[indra.statements.Statements]
A list of INDRA Statements
"""
path_in = os.path.realpath(os.path.expanduser(path_in))
path_out = os.path.realpath(os.path.expanduser(path_out))
extract_from_directory(path_in, path_out)
jsons = glob.glob(os.path.join(path_out, '*.jsonld'))
logger.info('Found %d JSON-LD files to process in %s' %
(len(jsons), path_out))
stmts = []
for json in jsons:
ep = process_json_file(json)
if ep:
stmts += ep.statements
return stmts | python | def extract_and_process(path_in, path_out):
"""Run Eidos on a set of text files and process output with INDRA.
The output is produced in the specified output folder but
the output files aren't processed by this function.
Parameters
----------
path_in : str
Path to an input folder with some text files
path_out : str
Path to an output folder in which Eidos places the output
JSON-LD files
Returns
-------
stmts : list[indra.statements.Statements]
A list of INDRA Statements
"""
path_in = os.path.realpath(os.path.expanduser(path_in))
path_out = os.path.realpath(os.path.expanduser(path_out))
extract_from_directory(path_in, path_out)
jsons = glob.glob(os.path.join(path_out, '*.jsonld'))
logger.info('Found %d JSON-LD files to process in %s' %
(len(jsons), path_out))
stmts = []
for json in jsons:
ep = process_json_file(json)
if ep:
stmts += ep.statements
return stmts | [
"def",
"extract_and_process",
"(",
"path_in",
",",
"path_out",
")",
":",
"path_in",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"path_in",
")",
")",
"path_out",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"path_out",
")",
")",
"extract_from_directory",
"(",
"path_in",
",",
"path_out",
")",
"jsons",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path_out",
",",
"'*.jsonld'",
")",
")",
"logger",
".",
"info",
"(",
"'Found %d JSON-LD files to process in %s'",
"%",
"(",
"len",
"(",
"jsons",
")",
",",
"path_out",
")",
")",
"stmts",
"=",
"[",
"]",
"for",
"json",
"in",
"jsons",
":",
"ep",
"=",
"process_json_file",
"(",
"json",
")",
"if",
"ep",
":",
"stmts",
"+=",
"ep",
".",
"statements",
"return",
"stmts"
]
| Run Eidos on a set of text files and process output with INDRA.
The output is produced in the specified output folder but
the output files aren't processed by this function.
Parameters
----------
path_in : str
Path to an input folder with some text files
path_out : str
Path to an output folder in which Eidos places the output
JSON-LD files
Returns
-------
stmts : list[indra.statements.Statements]
A list of INDRA Statements | [
"Run",
"Eidos",
"on",
"a",
"set",
"of",
"text",
"files",
"and",
"process",
"output",
"with",
"INDRA",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/eidos/cli.py#L61-L91 | train |
sorgerlab/indra | indra/sources/indra_db_rest/api.py | get_statements | def get_statements(subject=None, object=None, agents=None, stmt_type=None,
use_exact_type=False, persist=True, timeout=None,
simple_response=False, ev_limit=10, best_first=True, tries=2,
max_stmts=None):
"""Get a processor for the INDRA DB web API matching given agents and type.
There are two types of responses available. You can just get a list of
INDRA Statements, or you can get an IndraDBRestProcessor object, which allow
Statements to be loaded in a background thread, providing a sample of the
best* content available promptly in the sample_statements attribute, and
populates the statements attribute when the paged load is complete.
The latter should be used in all new code, and where convenient the prior
should be converted to use the processor, as this option may be removed in
the future.
* In the sense of having the most supporting evidence.
Parameters
----------
subject/object : str
Optionally specify the subject and/or object of the statements in
you wish to get from the database. By default, the namespace is assumed
to be HGNC gene names, however you may specify another namespace by
including `@<namespace>` at the end of the name string. For example, if
you want to specify an agent by chebi, you could use `CHEBI:6801@CHEBI`,
or if you wanted to use the HGNC id, you could use `6871@HGNC`.
agents : list[str]
A list of agents, specified in the same manner as subject and object,
but without specifying their grammatical position.
stmt_type : str
Specify the types of interactions you are interested in, as indicated
by the sub-classes of INDRA's Statements. This argument is *not* case
sensitive. If the statement class given has sub-classes
(e.g. RegulateAmount has IncreaseAmount and DecreaseAmount), then both
the class itself, and its subclasses, will be queried, by default. If
you do not want this behavior, set use_exact_type=True. Note that if
max_stmts is set, it is possible only the exact statement type will
be returned, as this is the first searched. The processor then cycles
through the types, getting a page of results for each type and adding it
to the quota, until the max number of statements is reached.
use_exact_type : bool
If stmt_type is given, and you only want to search for that specific
statement type, set this to True. Default is False.
persist : bool
Default is True. When False, if a query comes back limited (not all
results returned), just give up and pass along what was returned.
Otherwise, make further queries to get the rest of the data (which may
take some time).
timeout : positive int or None
If an int, block until the work is done and statements are retrieved, or
until the timeout has expired, in which case the results so far will be
returned in the response object, and further results will be added in
a separate thread as they become available. If simple_response is True,
all statements available will be returned. Otherwise (if None), block
indefinitely until all statements are retrieved. Default is None.
simple_response : bool
If True, a simple list of statements is returned (thus block should also
be True). If block is False, only the original sample will be returned
(as though persist was False), until the statements are done loading, in
which case the rest should appear in the list. This behavior is not
encouraged. Default is False (which breaks backwards compatibility with
usage of INDRA versions from before 1/22/2019). WE ENCOURAGE ALL NEW
USE-CASES TO USE THE PROCESSOR, AS THIS FEATURE MAY BE REMOVED AT A
LATER DATE.
ev_limit : int or None
Limit the amount of evidence returned per Statement. Default is 10.
best_first : bool
If True, the preassembled statements will be sorted by the amount of
evidence they have, and those with the most evidence will be
prioritized. When using `max_stmts`, this means you will get the "best"
statements. If False, statements will be queried in arbitrary order.
tries : int > 0
Set the number of times to try the query. The database often caches
results, so if a query times out the first time, trying again after a
timeout will often succeed fast enough to avoid a timeout. This can also
help gracefully handle an unreliable connection, if you're willing to
wait. Default is 2.
max_stmts : int or None
Select the maximum number of statements to return. When set less than
1000 the effect is much the same as setting persist to false, and will
guarantee a faster response. Default is None.
Returns
-------
processor : :py:class:`IndraDBRestProcessor`
An instance of the IndraDBRestProcessor, which has an attribute
`statements` which will be populated when the query/queries are done.
This is the default behavior, and is encouraged in all future cases,
however a simple list of statements may be returned using the
`simple_response` option described above.
"""
processor = IndraDBRestProcessor(subject, object, agents, stmt_type,
use_exact_type, persist, timeout,
ev_limit, best_first, tries, max_stmts)
# Format the result appropriately.
if simple_response:
ret = processor.statements
else:
ret = processor
return ret | python | def get_statements(subject=None, object=None, agents=None, stmt_type=None,
use_exact_type=False, persist=True, timeout=None,
simple_response=False, ev_limit=10, best_first=True, tries=2,
max_stmts=None):
"""Get a processor for the INDRA DB web API matching given agents and type.
There are two types of responses available. You can just get a list of
INDRA Statements, or you can get an IndraDBRestProcessor object, which allow
Statements to be loaded in a background thread, providing a sample of the
best* content available promptly in the sample_statements attribute, and
populates the statements attribute when the paged load is complete.
The latter should be used in all new code, and where convenient the prior
should be converted to use the processor, as this option may be removed in
the future.
* In the sense of having the most supporting evidence.
Parameters
----------
subject/object : str
Optionally specify the subject and/or object of the statements in
you wish to get from the database. By default, the namespace is assumed
to be HGNC gene names, however you may specify another namespace by
including `@<namespace>` at the end of the name string. For example, if
you want to specify an agent by chebi, you could use `CHEBI:6801@CHEBI`,
or if you wanted to use the HGNC id, you could use `6871@HGNC`.
agents : list[str]
A list of agents, specified in the same manner as subject and object,
but without specifying their grammatical position.
stmt_type : str
Specify the types of interactions you are interested in, as indicated
by the sub-classes of INDRA's Statements. This argument is *not* case
sensitive. If the statement class given has sub-classes
(e.g. RegulateAmount has IncreaseAmount and DecreaseAmount), then both
the class itself, and its subclasses, will be queried, by default. If
you do not want this behavior, set use_exact_type=True. Note that if
max_stmts is set, it is possible only the exact statement type will
be returned, as this is the first searched. The processor then cycles
through the types, getting a page of results for each type and adding it
to the quota, until the max number of statements is reached.
use_exact_type : bool
If stmt_type is given, and you only want to search for that specific
statement type, set this to True. Default is False.
persist : bool
Default is True. When False, if a query comes back limited (not all
results returned), just give up and pass along what was returned.
Otherwise, make further queries to get the rest of the data (which may
take some time).
timeout : positive int or None
If an int, block until the work is done and statements are retrieved, or
until the timeout has expired, in which case the results so far will be
returned in the response object, and further results will be added in
a separate thread as they become available. If simple_response is True,
all statements available will be returned. Otherwise (if None), block
indefinitely until all statements are retrieved. Default is None.
simple_response : bool
If True, a simple list of statements is returned (thus block should also
be True). If block is False, only the original sample will be returned
(as though persist was False), until the statements are done loading, in
which case the rest should appear in the list. This behavior is not
encouraged. Default is False (which breaks backwards compatibility with
usage of INDRA versions from before 1/22/2019). WE ENCOURAGE ALL NEW
USE-CASES TO USE THE PROCESSOR, AS THIS FEATURE MAY BE REMOVED AT A
LATER DATE.
ev_limit : int or None
Limit the amount of evidence returned per Statement. Default is 10.
best_first : bool
If True, the preassembled statements will be sorted by the amount of
evidence they have, and those with the most evidence will be
prioritized. When using `max_stmts`, this means you will get the "best"
statements. If False, statements will be queried in arbitrary order.
tries : int > 0
Set the number of times to try the query. The database often caches
results, so if a query times out the first time, trying again after a
timeout will often succeed fast enough to avoid a timeout. This can also
help gracefully handle an unreliable connection, if you're willing to
wait. Default is 2.
max_stmts : int or None
Select the maximum number of statements to return. When set less than
1000 the effect is much the same as setting persist to false, and will
guarantee a faster response. Default is None.
Returns
-------
processor : :py:class:`IndraDBRestProcessor`
An instance of the IndraDBRestProcessor, which has an attribute
`statements` which will be populated when the query/queries are done.
This is the default behavior, and is encouraged in all future cases,
however a simple list of statements may be returned using the
`simple_response` option described above.
"""
processor = IndraDBRestProcessor(subject, object, agents, stmt_type,
use_exact_type, persist, timeout,
ev_limit, best_first, tries, max_stmts)
# Format the result appropriately.
if simple_response:
ret = processor.statements
else:
ret = processor
return ret | [
"def",
"get_statements",
"(",
"subject",
"=",
"None",
",",
"object",
"=",
"None",
",",
"agents",
"=",
"None",
",",
"stmt_type",
"=",
"None",
",",
"use_exact_type",
"=",
"False",
",",
"persist",
"=",
"True",
",",
"timeout",
"=",
"None",
",",
"simple_response",
"=",
"False",
",",
"ev_limit",
"=",
"10",
",",
"best_first",
"=",
"True",
",",
"tries",
"=",
"2",
",",
"max_stmts",
"=",
"None",
")",
":",
"processor",
"=",
"IndraDBRestProcessor",
"(",
"subject",
",",
"object",
",",
"agents",
",",
"stmt_type",
",",
"use_exact_type",
",",
"persist",
",",
"timeout",
",",
"ev_limit",
",",
"best_first",
",",
"tries",
",",
"max_stmts",
")",
"# Format the result appropriately.",
"if",
"simple_response",
":",
"ret",
"=",
"processor",
".",
"statements",
"else",
":",
"ret",
"=",
"processor",
"return",
"ret"
]
| Get a processor for the INDRA DB web API matching given agents and type.
There are two types of responses available. You can just get a list of
INDRA Statements, or you can get an IndraDBRestProcessor object, which allow
Statements to be loaded in a background thread, providing a sample of the
best* content available promptly in the sample_statements attribute, and
populates the statements attribute when the paged load is complete.
The latter should be used in all new code, and where convenient the prior
should be converted to use the processor, as this option may be removed in
the future.
* In the sense of having the most supporting evidence.
Parameters
----------
subject/object : str
Optionally specify the subject and/or object of the statements in
you wish to get from the database. By default, the namespace is assumed
to be HGNC gene names, however you may specify another namespace by
including `@<namespace>` at the end of the name string. For example, if
you want to specify an agent by chebi, you could use `CHEBI:6801@CHEBI`,
or if you wanted to use the HGNC id, you could use `6871@HGNC`.
agents : list[str]
A list of agents, specified in the same manner as subject and object,
but without specifying their grammatical position.
stmt_type : str
Specify the types of interactions you are interested in, as indicated
by the sub-classes of INDRA's Statements. This argument is *not* case
sensitive. If the statement class given has sub-classes
(e.g. RegulateAmount has IncreaseAmount and DecreaseAmount), then both
the class itself, and its subclasses, will be queried, by default. If
you do not want this behavior, set use_exact_type=True. Note that if
max_stmts is set, it is possible only the exact statement type will
be returned, as this is the first searched. The processor then cycles
through the types, getting a page of results for each type and adding it
to the quota, until the max number of statements is reached.
use_exact_type : bool
If stmt_type is given, and you only want to search for that specific
statement type, set this to True. Default is False.
persist : bool
Default is True. When False, if a query comes back limited (not all
results returned), just give up and pass along what was returned.
Otherwise, make further queries to get the rest of the data (which may
take some time).
timeout : positive int or None
If an int, block until the work is done and statements are retrieved, or
until the timeout has expired, in which case the results so far will be
returned in the response object, and further results will be added in
a separate thread as they become available. If simple_response is True,
all statements available will be returned. Otherwise (if None), block
indefinitely until all statements are retrieved. Default is None.
simple_response : bool
If True, a simple list of statements is returned (thus block should also
be True). If block is False, only the original sample will be returned
(as though persist was False), until the statements are done loading, in
which case the rest should appear in the list. This behavior is not
encouraged. Default is False (which breaks backwards compatibility with
usage of INDRA versions from before 1/22/2019). WE ENCOURAGE ALL NEW
USE-CASES TO USE THE PROCESSOR, AS THIS FEATURE MAY BE REMOVED AT A
LATER DATE.
ev_limit : int or None
Limit the amount of evidence returned per Statement. Default is 10.
best_first : bool
If True, the preassembled statements will be sorted by the amount of
evidence they have, and those with the most evidence will be
prioritized. When using `max_stmts`, this means you will get the "best"
statements. If False, statements will be queried in arbitrary order.
tries : int > 0
Set the number of times to try the query. The database often caches
results, so if a query times out the first time, trying again after a
timeout will often succeed fast enough to avoid a timeout. This can also
help gracefully handle an unreliable connection, if you're willing to
wait. Default is 2.
max_stmts : int or None
Select the maximum number of statements to return. When set less than
1000 the effect is much the same as setting persist to false, and will
guarantee a faster response. Default is None.
Returns
-------
processor : :py:class:`IndraDBRestProcessor`
An instance of the IndraDBRestProcessor, which has an attribute
`statements` which will be populated when the query/queries are done.
This is the default behavior, and is encouraged in all future cases,
however a simple list of statements may be returned using the
`simple_response` option described above. | [
"Get",
"a",
"processor",
"for",
"the",
"INDRA",
"DB",
"web",
"API",
"matching",
"given",
"agents",
"and",
"type",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/indra_db_rest/api.py#L15-L116 | train |
sorgerlab/indra | indra/sources/indra_db_rest/api.py | get_statements_by_hash | def get_statements_by_hash(hash_list, ev_limit=100, best_first=True, tries=2):
"""Get fully formed statements from a list of hashes.
Parameters
----------
hash_list : list[int or str]
A list of statement hashes.
ev_limit : int or None
Limit the amount of evidence returned per Statement. Default is 100.
best_first : bool
If True, the preassembled statements will be sorted by the amount of
evidence they have, and those with the most evidence will be
prioritized. When using `max_stmts`, this means you will get the "best"
statements. If False, statements will be queried in arbitrary order.
tries : int > 0
Set the number of times to try the query. The database often caches
results, so if a query times out the first time, trying again after a
timeout will often succeed fast enough to avoid a timeout. This can
also help gracefully handle an unreliable connection, if you're
willing to wait. Default is 2.
"""
if not isinstance(hash_list, list):
raise ValueError("The `hash_list` input is a list, not %s."
% type(hash_list))
if not hash_list:
return []
if isinstance(hash_list[0], str):
hash_list = [int(h) for h in hash_list]
if not all([isinstance(h, int) for h in hash_list]):
raise ValueError("Hashes must be ints or strings that can be "
"converted into ints.")
resp = submit_statement_request('post', 'from_hashes', ev_limit=ev_limit,
data={'hashes': hash_list},
best_first=best_first, tries=tries)
return stmts_from_json(resp.json()['statements'].values()) | python | def get_statements_by_hash(hash_list, ev_limit=100, best_first=True, tries=2):
"""Get fully formed statements from a list of hashes.
Parameters
----------
hash_list : list[int or str]
A list of statement hashes.
ev_limit : int or None
Limit the amount of evidence returned per Statement. Default is 100.
best_first : bool
If True, the preassembled statements will be sorted by the amount of
evidence they have, and those with the most evidence will be
prioritized. When using `max_stmts`, this means you will get the "best"
statements. If False, statements will be queried in arbitrary order.
tries : int > 0
Set the number of times to try the query. The database often caches
results, so if a query times out the first time, trying again after a
timeout will often succeed fast enough to avoid a timeout. This can
also help gracefully handle an unreliable connection, if you're
willing to wait. Default is 2.
"""
if not isinstance(hash_list, list):
raise ValueError("The `hash_list` input is a list, not %s."
% type(hash_list))
if not hash_list:
return []
if isinstance(hash_list[0], str):
hash_list = [int(h) for h in hash_list]
if not all([isinstance(h, int) for h in hash_list]):
raise ValueError("Hashes must be ints or strings that can be "
"converted into ints.")
resp = submit_statement_request('post', 'from_hashes', ev_limit=ev_limit,
data={'hashes': hash_list},
best_first=best_first, tries=tries)
return stmts_from_json(resp.json()['statements'].values()) | [
"def",
"get_statements_by_hash",
"(",
"hash_list",
",",
"ev_limit",
"=",
"100",
",",
"best_first",
"=",
"True",
",",
"tries",
"=",
"2",
")",
":",
"if",
"not",
"isinstance",
"(",
"hash_list",
",",
"list",
")",
":",
"raise",
"ValueError",
"(",
"\"The `hash_list` input is a list, not %s.\"",
"%",
"type",
"(",
"hash_list",
")",
")",
"if",
"not",
"hash_list",
":",
"return",
"[",
"]",
"if",
"isinstance",
"(",
"hash_list",
"[",
"0",
"]",
",",
"str",
")",
":",
"hash_list",
"=",
"[",
"int",
"(",
"h",
")",
"for",
"h",
"in",
"hash_list",
"]",
"if",
"not",
"all",
"(",
"[",
"isinstance",
"(",
"h",
",",
"int",
")",
"for",
"h",
"in",
"hash_list",
"]",
")",
":",
"raise",
"ValueError",
"(",
"\"Hashes must be ints or strings that can be \"",
"\"converted into ints.\"",
")",
"resp",
"=",
"submit_statement_request",
"(",
"'post'",
",",
"'from_hashes'",
",",
"ev_limit",
"=",
"ev_limit",
",",
"data",
"=",
"{",
"'hashes'",
":",
"hash_list",
"}",
",",
"best_first",
"=",
"best_first",
",",
"tries",
"=",
"tries",
")",
"return",
"stmts_from_json",
"(",
"resp",
".",
"json",
"(",
")",
"[",
"'statements'",
"]",
".",
"values",
"(",
")",
")"
]
| Get fully formed statements from a list of hashes.
Parameters
----------
hash_list : list[int or str]
A list of statement hashes.
ev_limit : int or None
Limit the amount of evidence returned per Statement. Default is 100.
best_first : bool
If True, the preassembled statements will be sorted by the amount of
evidence they have, and those with the most evidence will be
prioritized. When using `max_stmts`, this means you will get the "best"
statements. If False, statements will be queried in arbitrary order.
tries : int > 0
Set the number of times to try the query. The database often caches
results, so if a query times out the first time, trying again after a
timeout will often succeed fast enough to avoid a timeout. This can
also help gracefully handle an unreliable connection, if you're
willing to wait. Default is 2. | [
"Get",
"fully",
"formed",
"statements",
"from",
"a",
"list",
"of",
"hashes",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/indra_db_rest/api.py#L120-L154 | train |
sorgerlab/indra | indra/sources/indra_db_rest/api.py | get_statements_for_paper | def get_statements_for_paper(ids, ev_limit=10, best_first=True, tries=2,
max_stmts=None):
"""Get the set of raw Statements extracted from a paper given by the id.
Parameters
----------
ids : list[(<id type>, <id value>)]
A list of tuples with ids and their type. The type can be any one of
'pmid', 'pmcid', 'doi', 'pii', 'manuscript id', or 'trid', which is the
primary key id of the text references in the database.
ev_limit : int or None
Limit the amount of evidence returned per Statement. Default is 10.
best_first : bool
If True, the preassembled statements will be sorted by the amount of
evidence they have, and those with the most evidence will be
prioritized. When using `max_stmts`, this means you will get the "best"
statements. If False, statements will be queried in arbitrary order.
tries : int > 0
Set the number of times to try the query. The database often caches
results, so if a query times out the first time, trying again after a
timeout will often succeed fast enough to avoid a timeout. This can also
help gracefully handle an unreliable connection, if you're willing to
wait. Default is 2.
max_stmts : int or None
Select a maximum number of statements to be returned. Default is None.
Returns
-------
stmts : list[:py:class:`indra.statements.Statement`]
A list of INDRA Statement instances.
"""
id_l = [{'id': id_val, 'type': id_type} for id_type, id_val in ids]
resp = submit_statement_request('post', 'from_papers', data={'ids': id_l},
ev_limit=ev_limit, best_first=best_first,
tries=tries, max_stmts=max_stmts)
stmts_json = resp.json()['statements']
return stmts_from_json(stmts_json.values()) | python | def get_statements_for_paper(ids, ev_limit=10, best_first=True, tries=2,
max_stmts=None):
"""Get the set of raw Statements extracted from a paper given by the id.
Parameters
----------
ids : list[(<id type>, <id value>)]
A list of tuples with ids and their type. The type can be any one of
'pmid', 'pmcid', 'doi', 'pii', 'manuscript id', or 'trid', which is the
primary key id of the text references in the database.
ev_limit : int or None
Limit the amount of evidence returned per Statement. Default is 10.
best_first : bool
If True, the preassembled statements will be sorted by the amount of
evidence they have, and those with the most evidence will be
prioritized. When using `max_stmts`, this means you will get the "best"
statements. If False, statements will be queried in arbitrary order.
tries : int > 0
Set the number of times to try the query. The database often caches
results, so if a query times out the first time, trying again after a
timeout will often succeed fast enough to avoid a timeout. This can also
help gracefully handle an unreliable connection, if you're willing to
wait. Default is 2.
max_stmts : int or None
Select a maximum number of statements to be returned. Default is None.
Returns
-------
stmts : list[:py:class:`indra.statements.Statement`]
A list of INDRA Statement instances.
"""
id_l = [{'id': id_val, 'type': id_type} for id_type, id_val in ids]
resp = submit_statement_request('post', 'from_papers', data={'ids': id_l},
ev_limit=ev_limit, best_first=best_first,
tries=tries, max_stmts=max_stmts)
stmts_json = resp.json()['statements']
return stmts_from_json(stmts_json.values()) | [
"def",
"get_statements_for_paper",
"(",
"ids",
",",
"ev_limit",
"=",
"10",
",",
"best_first",
"=",
"True",
",",
"tries",
"=",
"2",
",",
"max_stmts",
"=",
"None",
")",
":",
"id_l",
"=",
"[",
"{",
"'id'",
":",
"id_val",
",",
"'type'",
":",
"id_type",
"}",
"for",
"id_type",
",",
"id_val",
"in",
"ids",
"]",
"resp",
"=",
"submit_statement_request",
"(",
"'post'",
",",
"'from_papers'",
",",
"data",
"=",
"{",
"'ids'",
":",
"id_l",
"}",
",",
"ev_limit",
"=",
"ev_limit",
",",
"best_first",
"=",
"best_first",
",",
"tries",
"=",
"tries",
",",
"max_stmts",
"=",
"max_stmts",
")",
"stmts_json",
"=",
"resp",
".",
"json",
"(",
")",
"[",
"'statements'",
"]",
"return",
"stmts_from_json",
"(",
"stmts_json",
".",
"values",
"(",
")",
")"
]
| Get the set of raw Statements extracted from a paper given by the id.
Parameters
----------
ids : list[(<id type>, <id value>)]
A list of tuples with ids and their type. The type can be any one of
'pmid', 'pmcid', 'doi', 'pii', 'manuscript id', or 'trid', which is the
primary key id of the text references in the database.
ev_limit : int or None
Limit the amount of evidence returned per Statement. Default is 10.
best_first : bool
If True, the preassembled statements will be sorted by the amount of
evidence they have, and those with the most evidence will be
prioritized. When using `max_stmts`, this means you will get the "best"
statements. If False, statements will be queried in arbitrary order.
tries : int > 0
Set the number of times to try the query. The database often caches
results, so if a query times out the first time, trying again after a
timeout will often succeed fast enough to avoid a timeout. This can also
help gracefully handle an unreliable connection, if you're willing to
wait. Default is 2.
max_stmts : int or None
Select a maximum number of statements to be returned. Default is None.
Returns
-------
stmts : list[:py:class:`indra.statements.Statement`]
A list of INDRA Statement instances. | [
"Get",
"the",
"set",
"of",
"raw",
"Statements",
"extracted",
"from",
"a",
"paper",
"given",
"by",
"the",
"id",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/indra_db_rest/api.py#L158-L194 | train |
sorgerlab/indra | indra/sources/indra_db_rest/api.py | submit_curation | def submit_curation(hash_val, tag, curator, text=None,
source='indra_rest_client', ev_hash=None, is_test=False):
"""Submit a curation for the given statement at the relevant level.
Parameters
----------
hash_val : int
The hash corresponding to the statement.
tag : str
A very short phrase categorizing the error or type of curation,
e.g. "grounding" for a grounding error, or "correct" if you are
marking a statement as correct.
curator : str
The name or identifier for the curator.
text : str
A brief description of the problem.
source : str
The name of the access point through which the curation was performed.
The default is 'direct_client', meaning this function was used
directly. Any higher-level application should identify itself here.
ev_hash : int
A hash of the sentence and other evidence information. Elsewhere
referred to as `source_hash`.
is_test : bool
Used in testing. If True, no curation will actually be added to the
database.
"""
data = {'tag': tag, 'text': text, 'curator': curator, 'source': source,
'ev_hash': ev_hash}
url = 'curation/submit/%s' % hash_val
if is_test:
qstr = '?test'
else:
qstr = ''
return make_db_rest_request('post', url, qstr, data=data) | python | def submit_curation(hash_val, tag, curator, text=None,
source='indra_rest_client', ev_hash=None, is_test=False):
"""Submit a curation for the given statement at the relevant level.
Parameters
----------
hash_val : int
The hash corresponding to the statement.
tag : str
A very short phrase categorizing the error or type of curation,
e.g. "grounding" for a grounding error, or "correct" if you are
marking a statement as correct.
curator : str
The name or identifier for the curator.
text : str
A brief description of the problem.
source : str
The name of the access point through which the curation was performed.
The default is 'direct_client', meaning this function was used
directly. Any higher-level application should identify itself here.
ev_hash : int
A hash of the sentence and other evidence information. Elsewhere
referred to as `source_hash`.
is_test : bool
Used in testing. If True, no curation will actually be added to the
database.
"""
data = {'tag': tag, 'text': text, 'curator': curator, 'source': source,
'ev_hash': ev_hash}
url = 'curation/submit/%s' % hash_val
if is_test:
qstr = '?test'
else:
qstr = ''
return make_db_rest_request('post', url, qstr, data=data) | [
"def",
"submit_curation",
"(",
"hash_val",
",",
"tag",
",",
"curator",
",",
"text",
"=",
"None",
",",
"source",
"=",
"'indra_rest_client'",
",",
"ev_hash",
"=",
"None",
",",
"is_test",
"=",
"False",
")",
":",
"data",
"=",
"{",
"'tag'",
":",
"tag",
",",
"'text'",
":",
"text",
",",
"'curator'",
":",
"curator",
",",
"'source'",
":",
"source",
",",
"'ev_hash'",
":",
"ev_hash",
"}",
"url",
"=",
"'curation/submit/%s'",
"%",
"hash_val",
"if",
"is_test",
":",
"qstr",
"=",
"'?test'",
"else",
":",
"qstr",
"=",
"''",
"return",
"make_db_rest_request",
"(",
"'post'",
",",
"url",
",",
"qstr",
",",
"data",
"=",
"data",
")"
]
| Submit a curation for the given statement at the relevant level.
Parameters
----------
hash_val : int
The hash corresponding to the statement.
tag : str
A very short phrase categorizing the error or type of curation,
e.g. "grounding" for a grounding error, or "correct" if you are
marking a statement as correct.
curator : str
The name or identifier for the curator.
text : str
A brief description of the problem.
source : str
The name of the access point through which the curation was performed.
The default is 'direct_client', meaning this function was used
directly. Any higher-level application should identify itself here.
ev_hash : int
A hash of the sentence and other evidence information. Elsewhere
referred to as `source_hash`.
is_test : bool
Used in testing. If True, no curation will actually be added to the
database. | [
"Submit",
"a",
"curation",
"for",
"the",
"given",
"statement",
"at",
"the",
"relevant",
"level",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/indra_db_rest/api.py#L197-L231 | train |
sorgerlab/indra | indra/sources/indra_db_rest/api.py | get_statement_queries | def get_statement_queries(stmts, **params):
"""Get queries used to search based on a statement.
In addition to the stmts, you can enter any parameters standard to the
query. See https://github.com/indralab/indra_db/rest_api for a full list.
Parameters
----------
stmts : list[Statement]
A list of INDRA statements.
"""
def pick_ns(ag):
for ns in ['HGNC', 'FPLX', 'CHEMBL', 'CHEBI', 'GO', 'MESH']:
if ns in ag.db_refs.keys():
dbid = ag.db_refs[ns]
break
else:
ns = 'TEXT'
dbid = ag.name
return '%s@%s' % (dbid, ns)
queries = []
url_base = get_url_base('statements/from_agents')
non_binary_statements = [Complex, SelfModification, ActiveForm]
for stmt in stmts:
kwargs = {}
if type(stmt) not in non_binary_statements:
for pos, ag in zip(['subject', 'object'], stmt.agent_list()):
if ag is not None:
kwargs[pos] = pick_ns(ag)
else:
for i, ag in enumerate(stmt.agent_list()):
if ag is not None:
kwargs['agent%d' % i] = pick_ns(ag)
kwargs['type'] = stmt.__class__.__name__
kwargs.update(params)
query_str = '?' + '&'.join(['%s=%s' % (k, v) for k, v in kwargs.items()
if v is not None])
queries.append(url_base + query_str)
return queries | python | def get_statement_queries(stmts, **params):
"""Get queries used to search based on a statement.
In addition to the stmts, you can enter any parameters standard to the
query. See https://github.com/indralab/indra_db/rest_api for a full list.
Parameters
----------
stmts : list[Statement]
A list of INDRA statements.
"""
def pick_ns(ag):
for ns in ['HGNC', 'FPLX', 'CHEMBL', 'CHEBI', 'GO', 'MESH']:
if ns in ag.db_refs.keys():
dbid = ag.db_refs[ns]
break
else:
ns = 'TEXT'
dbid = ag.name
return '%s@%s' % (dbid, ns)
queries = []
url_base = get_url_base('statements/from_agents')
non_binary_statements = [Complex, SelfModification, ActiveForm]
for stmt in stmts:
kwargs = {}
if type(stmt) not in non_binary_statements:
for pos, ag in zip(['subject', 'object'], stmt.agent_list()):
if ag is not None:
kwargs[pos] = pick_ns(ag)
else:
for i, ag in enumerate(stmt.agent_list()):
if ag is not None:
kwargs['agent%d' % i] = pick_ns(ag)
kwargs['type'] = stmt.__class__.__name__
kwargs.update(params)
query_str = '?' + '&'.join(['%s=%s' % (k, v) for k, v in kwargs.items()
if v is not None])
queries.append(url_base + query_str)
return queries | [
"def",
"get_statement_queries",
"(",
"stmts",
",",
"*",
"*",
"params",
")",
":",
"def",
"pick_ns",
"(",
"ag",
")",
":",
"for",
"ns",
"in",
"[",
"'HGNC'",
",",
"'FPLX'",
",",
"'CHEMBL'",
",",
"'CHEBI'",
",",
"'GO'",
",",
"'MESH'",
"]",
":",
"if",
"ns",
"in",
"ag",
".",
"db_refs",
".",
"keys",
"(",
")",
":",
"dbid",
"=",
"ag",
".",
"db_refs",
"[",
"ns",
"]",
"break",
"else",
":",
"ns",
"=",
"'TEXT'",
"dbid",
"=",
"ag",
".",
"name",
"return",
"'%s@%s'",
"%",
"(",
"dbid",
",",
"ns",
")",
"queries",
"=",
"[",
"]",
"url_base",
"=",
"get_url_base",
"(",
"'statements/from_agents'",
")",
"non_binary_statements",
"=",
"[",
"Complex",
",",
"SelfModification",
",",
"ActiveForm",
"]",
"for",
"stmt",
"in",
"stmts",
":",
"kwargs",
"=",
"{",
"}",
"if",
"type",
"(",
"stmt",
")",
"not",
"in",
"non_binary_statements",
":",
"for",
"pos",
",",
"ag",
"in",
"zip",
"(",
"[",
"'subject'",
",",
"'object'",
"]",
",",
"stmt",
".",
"agent_list",
"(",
")",
")",
":",
"if",
"ag",
"is",
"not",
"None",
":",
"kwargs",
"[",
"pos",
"]",
"=",
"pick_ns",
"(",
"ag",
")",
"else",
":",
"for",
"i",
",",
"ag",
"in",
"enumerate",
"(",
"stmt",
".",
"agent_list",
"(",
")",
")",
":",
"if",
"ag",
"is",
"not",
"None",
":",
"kwargs",
"[",
"'agent%d'",
"%",
"i",
"]",
"=",
"pick_ns",
"(",
"ag",
")",
"kwargs",
"[",
"'type'",
"]",
"=",
"stmt",
".",
"__class__",
".",
"__name__",
"kwargs",
".",
"update",
"(",
"params",
")",
"query_str",
"=",
"'?'",
"+",
"'&'",
".",
"join",
"(",
"[",
"'%s=%s'",
"%",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
"if",
"v",
"is",
"not",
"None",
"]",
")",
"queries",
".",
"append",
"(",
"url_base",
"+",
"query_str",
")",
"return",
"queries"
]
| Get queries used to search based on a statement.
In addition to the stmts, you can enter any parameters standard to the
query. See https://github.com/indralab/indra_db/rest_api for a full list.
Parameters
----------
stmts : list[Statement]
A list of INDRA statements. | [
"Get",
"queries",
"used",
"to",
"search",
"based",
"on",
"a",
"statement",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/indra_db_rest/api.py#L234-L274 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.