repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
sorgerlab/indra | indra/assemblers/pysb/assembler.py | grounded_monomer_patterns | def grounded_monomer_patterns(model, agent, ignore_activities=False):
"""Get monomer patterns for the agent accounting for grounding information.
Parameters
----------
model : pysb.core.Model
The model to search for MonomerPatterns matching the given Agent.
agent : indra.statements.Agent
The Agent to find matching MonomerPatterns for.
ignore_activites : bool
Whether to ignore any ActivityConditions on the agent when determining
the required site conditions for the MonomerPattern. For example, if
set to True, will find a match for the agent `MAPK1(activity=kinase)`
even if the corresponding MAPK1 Monomer in the model has no site
named `kinase`. Default is False (more stringent matching).
Returns
-------
generator of MonomerPatterns
"""
# If it's not a molecular agent
if not isinstance(agent, ist.Agent):
monomer = model.monomers.get(agent.name)
if not monomer:
return
yield monomer()
# Iterate over all model annotations to identify the monomer associated
# with this agent
monomer = None
for ann in model.annotations:
if monomer:
break
if not ann.predicate == 'is':
continue
if not isinstance(ann.subject, Monomer):
continue
(ns, id) = parse_identifiers_url(ann.object)
if ns is None and id is None:
continue
# We now have an identifiers.org namespace/ID for a given monomer;
# we check to see if there is a matching identifier in the db_refs
# for this agent
for db_ns, db_id in agent.db_refs.items():
# We've found a match! Return first match
# FIXME Could also update this to check for alternative
# FIXME matches, or make sure that all grounding IDs match,
# FIXME etc.
if db_ns == ns and db_id == id:
monomer = ann.subject
break
# We looked at all the annotations in the model and didn't find a
# match
if monomer is None:
logger.info('No monomer found corresponding to agent %s' % agent)
return
# Now that we have a monomer for the agent, look for site/state
# combinations corresponding to the state of the agent. For every one of
# the modifications specified in the agent signature, check to see if it
# can be satisfied based on the agent's annotations. For every one we find
# that is consistent, we yield it--there may be more than one.
# FIXME
# Create a list of tuples, each one representing the site conditions
# that can satisfy a particular agent condition. Each entry in the list
# will contain a list of dicts associated with a particular mod/activity
# condition. Each dict will represent a site/state combination satisfying
# the constraints imposed by that mod/activity condition.
sc_list = []
for mod in agent.mods:
# Find all site/state combinations that have the appropriate
# modification type
# As we iterate, build up a dict identifying the annotations of
# particular sites
mod_sites = {}
res_sites = set([])
pos_sites = set([])
for ann in monomer.site_annotations:
# Don't forget to handle Nones!
if ann.predicate == 'is_modification' and \
ann.object == mod.mod_type:
site_state = ann.subject
assert isinstance(site_state, tuple)
assert len(site_state) == 2
mod_sites[site_state[0]] = site_state[1]
elif ann.predicate == 'is_residue' and \
ann.object == mod.residue:
res_sites.add(ann.subject)
elif ann.predicate == 'is_position' and \
ann.object == mod.position:
pos_sites.add(ann.subject)
# If the residue field of the agent is specified,
viable_sites = set(mod_sites.keys())
if mod.residue is not None:
viable_sites = viable_sites.intersection(res_sites)
if mod.position is not None:
viable_sites = viable_sites.intersection(pos_sites)
# If there are no viable sites annotated in the model matching the
# available info in the mod condition, then we won't be able to
# satisfy the conditions on this agent
if not viable_sites:
return
# Otherwise, update the
# If there are any sites left after we subject them to residue
# and position constraints, then return the relevant monomer patterns!
pattern_list = []
for site_name in viable_sites:
pattern_list.append({site_name: (mod_sites[site_name], WILD)})
sc_list.append(pattern_list)
# Now check for monomer patterns satisfying the agent's activity condition
if agent.activity and not ignore_activities:
# Iterate through annotations with this monomer as the subject
# and a has_active_pattern or has_inactive_pattern relationship
# FIXME: Currently activity type is not annotated/checked
# FIXME act_type = agent.activity.activity_type
rel_type = 'has_active_pattern' if agent.activity.is_active \
else 'has_inactive_pattern'
active_form_list = []
for ann in model.annotations:
if ann.subject == monomer and ann.predicate == rel_type:
# The annotation object contains the active/inactive pattern
active_form_list.append(ann.object)
sc_list.append(active_form_list)
# Now that we've got a list of conditions
for pattern_combo in itertools.product(*sc_list):
mp_sc = {}
for pattern in pattern_combo:
mp_sc.update(pattern)
if mp_sc:
yield monomer(**mp_sc)
if not sc_list:
yield monomer() | python | def grounded_monomer_patterns(model, agent, ignore_activities=False):
"""Get monomer patterns for the agent accounting for grounding information.
Parameters
----------
model : pysb.core.Model
The model to search for MonomerPatterns matching the given Agent.
agent : indra.statements.Agent
The Agent to find matching MonomerPatterns for.
ignore_activites : bool
Whether to ignore any ActivityConditions on the agent when determining
the required site conditions for the MonomerPattern. For example, if
set to True, will find a match for the agent `MAPK1(activity=kinase)`
even if the corresponding MAPK1 Monomer in the model has no site
named `kinase`. Default is False (more stringent matching).
Returns
-------
generator of MonomerPatterns
"""
# If it's not a molecular agent
if not isinstance(agent, ist.Agent):
monomer = model.monomers.get(agent.name)
if not monomer:
return
yield monomer()
# Iterate over all model annotations to identify the monomer associated
# with this agent
monomer = None
for ann in model.annotations:
if monomer:
break
if not ann.predicate == 'is':
continue
if not isinstance(ann.subject, Monomer):
continue
(ns, id) = parse_identifiers_url(ann.object)
if ns is None and id is None:
continue
# We now have an identifiers.org namespace/ID for a given monomer;
# we check to see if there is a matching identifier in the db_refs
# for this agent
for db_ns, db_id in agent.db_refs.items():
# We've found a match! Return first match
# FIXME Could also update this to check for alternative
# FIXME matches, or make sure that all grounding IDs match,
# FIXME etc.
if db_ns == ns and db_id == id:
monomer = ann.subject
break
# We looked at all the annotations in the model and didn't find a
# match
if monomer is None:
logger.info('No monomer found corresponding to agent %s' % agent)
return
# Now that we have a monomer for the agent, look for site/state
# combinations corresponding to the state of the agent. For every one of
# the modifications specified in the agent signature, check to see if it
# can be satisfied based on the agent's annotations. For every one we find
# that is consistent, we yield it--there may be more than one.
# FIXME
# Create a list of tuples, each one representing the site conditions
# that can satisfy a particular agent condition. Each entry in the list
# will contain a list of dicts associated with a particular mod/activity
# condition. Each dict will represent a site/state combination satisfying
# the constraints imposed by that mod/activity condition.
sc_list = []
for mod in agent.mods:
# Find all site/state combinations that have the appropriate
# modification type
# As we iterate, build up a dict identifying the annotations of
# particular sites
mod_sites = {}
res_sites = set([])
pos_sites = set([])
for ann in monomer.site_annotations:
# Don't forget to handle Nones!
if ann.predicate == 'is_modification' and \
ann.object == mod.mod_type:
site_state = ann.subject
assert isinstance(site_state, tuple)
assert len(site_state) == 2
mod_sites[site_state[0]] = site_state[1]
elif ann.predicate == 'is_residue' and \
ann.object == mod.residue:
res_sites.add(ann.subject)
elif ann.predicate == 'is_position' and \
ann.object == mod.position:
pos_sites.add(ann.subject)
# If the residue field of the agent is specified,
viable_sites = set(mod_sites.keys())
if mod.residue is not None:
viable_sites = viable_sites.intersection(res_sites)
if mod.position is not None:
viable_sites = viable_sites.intersection(pos_sites)
# If there are no viable sites annotated in the model matching the
# available info in the mod condition, then we won't be able to
# satisfy the conditions on this agent
if not viable_sites:
return
# Otherwise, update the
# If there are any sites left after we subject them to residue
# and position constraints, then return the relevant monomer patterns!
pattern_list = []
for site_name in viable_sites:
pattern_list.append({site_name: (mod_sites[site_name], WILD)})
sc_list.append(pattern_list)
# Now check for monomer patterns satisfying the agent's activity condition
if agent.activity and not ignore_activities:
# Iterate through annotations with this monomer as the subject
# and a has_active_pattern or has_inactive_pattern relationship
# FIXME: Currently activity type is not annotated/checked
# FIXME act_type = agent.activity.activity_type
rel_type = 'has_active_pattern' if agent.activity.is_active \
else 'has_inactive_pattern'
active_form_list = []
for ann in model.annotations:
if ann.subject == monomer and ann.predicate == rel_type:
# The annotation object contains the active/inactive pattern
active_form_list.append(ann.object)
sc_list.append(active_form_list)
# Now that we've got a list of conditions
for pattern_combo in itertools.product(*sc_list):
mp_sc = {}
for pattern in pattern_combo:
mp_sc.update(pattern)
if mp_sc:
yield monomer(**mp_sc)
if not sc_list:
yield monomer() | [
"def",
"grounded_monomer_patterns",
"(",
"model",
",",
"agent",
",",
"ignore_activities",
"=",
"False",
")",
":",
"# If it's not a molecular agent",
"if",
"not",
"isinstance",
"(",
"agent",
",",
"ist",
".",
"Agent",
")",
":",
"monomer",
"=",
"model",
".",
"monomers",
".",
"get",
"(",
"agent",
".",
"name",
")",
"if",
"not",
"monomer",
":",
"return",
"yield",
"monomer",
"(",
")",
"# Iterate over all model annotations to identify the monomer associated",
"# with this agent",
"monomer",
"=",
"None",
"for",
"ann",
"in",
"model",
".",
"annotations",
":",
"if",
"monomer",
":",
"break",
"if",
"not",
"ann",
".",
"predicate",
"==",
"'is'",
":",
"continue",
"if",
"not",
"isinstance",
"(",
"ann",
".",
"subject",
",",
"Monomer",
")",
":",
"continue",
"(",
"ns",
",",
"id",
")",
"=",
"parse_identifiers_url",
"(",
"ann",
".",
"object",
")",
"if",
"ns",
"is",
"None",
"and",
"id",
"is",
"None",
":",
"continue",
"# We now have an identifiers.org namespace/ID for a given monomer;",
"# we check to see if there is a matching identifier in the db_refs",
"# for this agent",
"for",
"db_ns",
",",
"db_id",
"in",
"agent",
".",
"db_refs",
".",
"items",
"(",
")",
":",
"# We've found a match! Return first match",
"# FIXME Could also update this to check for alternative",
"# FIXME matches, or make sure that all grounding IDs match,",
"# FIXME etc.",
"if",
"db_ns",
"==",
"ns",
"and",
"db_id",
"==",
"id",
":",
"monomer",
"=",
"ann",
".",
"subject",
"break",
"# We looked at all the annotations in the model and didn't find a",
"# match",
"if",
"monomer",
"is",
"None",
":",
"logger",
".",
"info",
"(",
"'No monomer found corresponding to agent %s'",
"%",
"agent",
")",
"return",
"# Now that we have a monomer for the agent, look for site/state",
"# combinations corresponding to the state of the agent. For every one of",
"# the modifications specified in the agent signature, check to see if it",
"# can be satisfied based on the agent's annotations. For every one we find",
"# that is consistent, we yield it--there may be more than one.",
"# FIXME",
"# Create a list of tuples, each one representing the site conditions",
"# that can satisfy a particular agent condition. Each entry in the list",
"# will contain a list of dicts associated with a particular mod/activity",
"# condition. Each dict will represent a site/state combination satisfying",
"# the constraints imposed by that mod/activity condition.",
"sc_list",
"=",
"[",
"]",
"for",
"mod",
"in",
"agent",
".",
"mods",
":",
"# Find all site/state combinations that have the appropriate",
"# modification type",
"# As we iterate, build up a dict identifying the annotations of",
"# particular sites",
"mod_sites",
"=",
"{",
"}",
"res_sites",
"=",
"set",
"(",
"[",
"]",
")",
"pos_sites",
"=",
"set",
"(",
"[",
"]",
")",
"for",
"ann",
"in",
"monomer",
".",
"site_annotations",
":",
"# Don't forget to handle Nones!",
"if",
"ann",
".",
"predicate",
"==",
"'is_modification'",
"and",
"ann",
".",
"object",
"==",
"mod",
".",
"mod_type",
":",
"site_state",
"=",
"ann",
".",
"subject",
"assert",
"isinstance",
"(",
"site_state",
",",
"tuple",
")",
"assert",
"len",
"(",
"site_state",
")",
"==",
"2",
"mod_sites",
"[",
"site_state",
"[",
"0",
"]",
"]",
"=",
"site_state",
"[",
"1",
"]",
"elif",
"ann",
".",
"predicate",
"==",
"'is_residue'",
"and",
"ann",
".",
"object",
"==",
"mod",
".",
"residue",
":",
"res_sites",
".",
"add",
"(",
"ann",
".",
"subject",
")",
"elif",
"ann",
".",
"predicate",
"==",
"'is_position'",
"and",
"ann",
".",
"object",
"==",
"mod",
".",
"position",
":",
"pos_sites",
".",
"add",
"(",
"ann",
".",
"subject",
")",
"# If the residue field of the agent is specified,",
"viable_sites",
"=",
"set",
"(",
"mod_sites",
".",
"keys",
"(",
")",
")",
"if",
"mod",
".",
"residue",
"is",
"not",
"None",
":",
"viable_sites",
"=",
"viable_sites",
".",
"intersection",
"(",
"res_sites",
")",
"if",
"mod",
".",
"position",
"is",
"not",
"None",
":",
"viable_sites",
"=",
"viable_sites",
".",
"intersection",
"(",
"pos_sites",
")",
"# If there are no viable sites annotated in the model matching the",
"# available info in the mod condition, then we won't be able to",
"# satisfy the conditions on this agent",
"if",
"not",
"viable_sites",
":",
"return",
"# Otherwise, update the ",
"# If there are any sites left after we subject them to residue",
"# and position constraints, then return the relevant monomer patterns!",
"pattern_list",
"=",
"[",
"]",
"for",
"site_name",
"in",
"viable_sites",
":",
"pattern_list",
".",
"append",
"(",
"{",
"site_name",
":",
"(",
"mod_sites",
"[",
"site_name",
"]",
",",
"WILD",
")",
"}",
")",
"sc_list",
".",
"append",
"(",
"pattern_list",
")",
"# Now check for monomer patterns satisfying the agent's activity condition",
"if",
"agent",
".",
"activity",
"and",
"not",
"ignore_activities",
":",
"# Iterate through annotations with this monomer as the subject",
"# and a has_active_pattern or has_inactive_pattern relationship",
"# FIXME: Currently activity type is not annotated/checked",
"# FIXME act_type = agent.activity.activity_type",
"rel_type",
"=",
"'has_active_pattern'",
"if",
"agent",
".",
"activity",
".",
"is_active",
"else",
"'has_inactive_pattern'",
"active_form_list",
"=",
"[",
"]",
"for",
"ann",
"in",
"model",
".",
"annotations",
":",
"if",
"ann",
".",
"subject",
"==",
"monomer",
"and",
"ann",
".",
"predicate",
"==",
"rel_type",
":",
"# The annotation object contains the active/inactive pattern",
"active_form_list",
".",
"append",
"(",
"ann",
".",
"object",
")",
"sc_list",
".",
"append",
"(",
"active_form_list",
")",
"# Now that we've got a list of conditions",
"for",
"pattern_combo",
"in",
"itertools",
".",
"product",
"(",
"*",
"sc_list",
")",
":",
"mp_sc",
"=",
"{",
"}",
"for",
"pattern",
"in",
"pattern_combo",
":",
"mp_sc",
".",
"update",
"(",
"pattern",
")",
"if",
"mp_sc",
":",
"yield",
"monomer",
"(",
"*",
"*",
"mp_sc",
")",
"if",
"not",
"sc_list",
":",
"yield",
"monomer",
"(",
")"
]
| Get monomer patterns for the agent accounting for grounding information.
Parameters
----------
model : pysb.core.Model
The model to search for MonomerPatterns matching the given Agent.
agent : indra.statements.Agent
The Agent to find matching MonomerPatterns for.
ignore_activites : bool
Whether to ignore any ActivityConditions on the agent when determining
the required site conditions for the MonomerPattern. For example, if
set to True, will find a match for the agent `MAPK1(activity=kinase)`
even if the corresponding MAPK1 Monomer in the model has no site
named `kinase`. Default is False (more stringent matching).
Returns
-------
generator of MonomerPatterns | [
"Get",
"monomer",
"patterns",
"for",
"the",
"agent",
"accounting",
"for",
"grounding",
"information",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/assembler.py#L152-L281 | train |
sorgerlab/indra | indra/assemblers/pysb/assembler.py | get_monomer_pattern | def get_monomer_pattern(model, agent, extra_fields=None):
"""Construct a PySB MonomerPattern from an Agent."""
try:
monomer = model.monomers[_n(agent.name)]
except KeyError as e:
logger.warning('Monomer with name %s not found in model' %
_n(agent.name))
return None
# Get the agent site pattern
pattern = get_site_pattern(agent)
if extra_fields is not None:
for k, v in extra_fields.items():
# This is an important assumption, it only sets the given pattern
# on the monomer if that site/key is not already specified at the
# Agent level. For instance, if the Agent is specified to have
# 'activity', that site will not be updated here.
if k not in pattern:
pattern[k] = v
# If a model is given, return the Monomer with the generated pattern,
# otherwise just return the pattern
try:
monomer_pattern = monomer(**pattern)
except Exception as e:
logger.info("Invalid site pattern %s for monomer %s" %
(pattern, monomer))
return None
return monomer_pattern | python | def get_monomer_pattern(model, agent, extra_fields=None):
"""Construct a PySB MonomerPattern from an Agent."""
try:
monomer = model.monomers[_n(agent.name)]
except KeyError as e:
logger.warning('Monomer with name %s not found in model' %
_n(agent.name))
return None
# Get the agent site pattern
pattern = get_site_pattern(agent)
if extra_fields is not None:
for k, v in extra_fields.items():
# This is an important assumption, it only sets the given pattern
# on the monomer if that site/key is not already specified at the
# Agent level. For instance, if the Agent is specified to have
# 'activity', that site will not be updated here.
if k not in pattern:
pattern[k] = v
# If a model is given, return the Monomer with the generated pattern,
# otherwise just return the pattern
try:
monomer_pattern = monomer(**pattern)
except Exception as e:
logger.info("Invalid site pattern %s for monomer %s" %
(pattern, monomer))
return None
return monomer_pattern | [
"def",
"get_monomer_pattern",
"(",
"model",
",",
"agent",
",",
"extra_fields",
"=",
"None",
")",
":",
"try",
":",
"monomer",
"=",
"model",
".",
"monomers",
"[",
"_n",
"(",
"agent",
".",
"name",
")",
"]",
"except",
"KeyError",
"as",
"e",
":",
"logger",
".",
"warning",
"(",
"'Monomer with name %s not found in model'",
"%",
"_n",
"(",
"agent",
".",
"name",
")",
")",
"return",
"None",
"# Get the agent site pattern",
"pattern",
"=",
"get_site_pattern",
"(",
"agent",
")",
"if",
"extra_fields",
"is",
"not",
"None",
":",
"for",
"k",
",",
"v",
"in",
"extra_fields",
".",
"items",
"(",
")",
":",
"# This is an important assumption, it only sets the given pattern",
"# on the monomer if that site/key is not already specified at the",
"# Agent level. For instance, if the Agent is specified to have",
"# 'activity', that site will not be updated here.",
"if",
"k",
"not",
"in",
"pattern",
":",
"pattern",
"[",
"k",
"]",
"=",
"v",
"# If a model is given, return the Monomer with the generated pattern,",
"# otherwise just return the pattern",
"try",
":",
"monomer_pattern",
"=",
"monomer",
"(",
"*",
"*",
"pattern",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"info",
"(",
"\"Invalid site pattern %s for monomer %s\"",
"%",
"(",
"pattern",
",",
"monomer",
")",
")",
"return",
"None",
"return",
"monomer_pattern"
]
| Construct a PySB MonomerPattern from an Agent. | [
"Construct",
"a",
"PySB",
"MonomerPattern",
"from",
"an",
"Agent",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/assembler.py#L293-L319 | train |
sorgerlab/indra | indra/assemblers/pysb/assembler.py | get_site_pattern | def get_site_pattern(agent):
"""Construct a dictionary of Monomer site states from an Agent.
This crates the mapping to the associated PySB monomer from an
INDRA Agent object."""
if not isinstance(agent, ist.Agent):
return {}
pattern = {}
# Handle bound conditions
for bc in agent.bound_conditions:
# Here we make the assumption that the binding site
# is simply named after the binding partner
if bc.is_bound:
pattern[get_binding_site_name(bc.agent)] = ANY
else:
pattern[get_binding_site_name(bc.agent)] = None
# Handle modifications
for mod in agent.mods:
mod_site_str = abbrevs[mod.mod_type]
if mod.residue is not None:
mod_site_str = mod.residue
mod_pos_str = mod.position if mod.position is not None else ''
mod_site = ('%s%s' % (mod_site_str, mod_pos_str))
site_states = states[mod.mod_type]
if mod.is_modified:
pattern[mod_site] = (site_states[1], WILD)
else:
pattern[mod_site] = (site_states[0], WILD)
# Handle mutations
for mc in agent.mutations:
res_from = mc.residue_from if mc.residue_from else 'mut'
res_to = mc.residue_to if mc.residue_to else 'X'
if mc.position is None:
mut_site_name = res_from
else:
mut_site_name = res_from + mc.position
pattern[mut_site_name] = res_to
# Handle location
if agent.location is not None:
pattern['loc'] = _n(agent.location)
# Handle activity
if agent.activity is not None:
active_site_name = agent.activity.activity_type
if agent.activity.is_active:
active_site_state = 'active'
else:
active_site_state = 'inactive'
pattern[active_site_name] = active_site_state
return pattern | python | def get_site_pattern(agent):
"""Construct a dictionary of Monomer site states from an Agent.
This crates the mapping to the associated PySB monomer from an
INDRA Agent object."""
if not isinstance(agent, ist.Agent):
return {}
pattern = {}
# Handle bound conditions
for bc in agent.bound_conditions:
# Here we make the assumption that the binding site
# is simply named after the binding partner
if bc.is_bound:
pattern[get_binding_site_name(bc.agent)] = ANY
else:
pattern[get_binding_site_name(bc.agent)] = None
# Handle modifications
for mod in agent.mods:
mod_site_str = abbrevs[mod.mod_type]
if mod.residue is not None:
mod_site_str = mod.residue
mod_pos_str = mod.position if mod.position is not None else ''
mod_site = ('%s%s' % (mod_site_str, mod_pos_str))
site_states = states[mod.mod_type]
if mod.is_modified:
pattern[mod_site] = (site_states[1], WILD)
else:
pattern[mod_site] = (site_states[0], WILD)
# Handle mutations
for mc in agent.mutations:
res_from = mc.residue_from if mc.residue_from else 'mut'
res_to = mc.residue_to if mc.residue_to else 'X'
if mc.position is None:
mut_site_name = res_from
else:
mut_site_name = res_from + mc.position
pattern[mut_site_name] = res_to
# Handle location
if agent.location is not None:
pattern['loc'] = _n(agent.location)
# Handle activity
if agent.activity is not None:
active_site_name = agent.activity.activity_type
if agent.activity.is_active:
active_site_state = 'active'
else:
active_site_state = 'inactive'
pattern[active_site_name] = active_site_state
return pattern | [
"def",
"get_site_pattern",
"(",
"agent",
")",
":",
"if",
"not",
"isinstance",
"(",
"agent",
",",
"ist",
".",
"Agent",
")",
":",
"return",
"{",
"}",
"pattern",
"=",
"{",
"}",
"# Handle bound conditions",
"for",
"bc",
"in",
"agent",
".",
"bound_conditions",
":",
"# Here we make the assumption that the binding site",
"# is simply named after the binding partner",
"if",
"bc",
".",
"is_bound",
":",
"pattern",
"[",
"get_binding_site_name",
"(",
"bc",
".",
"agent",
")",
"]",
"=",
"ANY",
"else",
":",
"pattern",
"[",
"get_binding_site_name",
"(",
"bc",
".",
"agent",
")",
"]",
"=",
"None",
"# Handle modifications",
"for",
"mod",
"in",
"agent",
".",
"mods",
":",
"mod_site_str",
"=",
"abbrevs",
"[",
"mod",
".",
"mod_type",
"]",
"if",
"mod",
".",
"residue",
"is",
"not",
"None",
":",
"mod_site_str",
"=",
"mod",
".",
"residue",
"mod_pos_str",
"=",
"mod",
".",
"position",
"if",
"mod",
".",
"position",
"is",
"not",
"None",
"else",
"''",
"mod_site",
"=",
"(",
"'%s%s'",
"%",
"(",
"mod_site_str",
",",
"mod_pos_str",
")",
")",
"site_states",
"=",
"states",
"[",
"mod",
".",
"mod_type",
"]",
"if",
"mod",
".",
"is_modified",
":",
"pattern",
"[",
"mod_site",
"]",
"=",
"(",
"site_states",
"[",
"1",
"]",
",",
"WILD",
")",
"else",
":",
"pattern",
"[",
"mod_site",
"]",
"=",
"(",
"site_states",
"[",
"0",
"]",
",",
"WILD",
")",
"# Handle mutations",
"for",
"mc",
"in",
"agent",
".",
"mutations",
":",
"res_from",
"=",
"mc",
".",
"residue_from",
"if",
"mc",
".",
"residue_from",
"else",
"'mut'",
"res_to",
"=",
"mc",
".",
"residue_to",
"if",
"mc",
".",
"residue_to",
"else",
"'X'",
"if",
"mc",
".",
"position",
"is",
"None",
":",
"mut_site_name",
"=",
"res_from",
"else",
":",
"mut_site_name",
"=",
"res_from",
"+",
"mc",
".",
"position",
"pattern",
"[",
"mut_site_name",
"]",
"=",
"res_to",
"# Handle location",
"if",
"agent",
".",
"location",
"is",
"not",
"None",
":",
"pattern",
"[",
"'loc'",
"]",
"=",
"_n",
"(",
"agent",
".",
"location",
")",
"# Handle activity",
"if",
"agent",
".",
"activity",
"is",
"not",
"None",
":",
"active_site_name",
"=",
"agent",
".",
"activity",
".",
"activity_type",
"if",
"agent",
".",
"activity",
".",
"is_active",
":",
"active_site_state",
"=",
"'active'",
"else",
":",
"active_site_state",
"=",
"'inactive'",
"pattern",
"[",
"active_site_name",
"]",
"=",
"active_site_state",
"return",
"pattern"
]
| Construct a dictionary of Monomer site states from an Agent.
This crates the mapping to the associated PySB monomer from an
INDRA Agent object. | [
"Construct",
"a",
"dictionary",
"of",
"Monomer",
"site",
"states",
"from",
"an",
"Agent",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/assembler.py#L322-L375 | train |
sorgerlab/indra | indra/assemblers/pysb/assembler.py | set_base_initial_condition | def set_base_initial_condition(model, monomer, value):
"""Set an initial condition for a monomer in its 'default' state."""
# Build up monomer pattern dict
sites_dict = {}
for site in monomer.sites:
if site in monomer.site_states:
if site == 'loc' and 'cytoplasm' in monomer.site_states['loc']:
sites_dict['loc'] = 'cytoplasm'
else:
sites_dict[site] = monomer.site_states[site][0]
else:
sites_dict[site] = None
mp = monomer(**sites_dict)
pname = monomer.name + '_0'
try:
p = model.parameters[pname]
p.value = value
except KeyError:
p = Parameter(pname, value)
model.add_component(p)
model.initial(mp, p) | python | def set_base_initial_condition(model, monomer, value):
"""Set an initial condition for a monomer in its 'default' state."""
# Build up monomer pattern dict
sites_dict = {}
for site in monomer.sites:
if site in monomer.site_states:
if site == 'loc' and 'cytoplasm' in monomer.site_states['loc']:
sites_dict['loc'] = 'cytoplasm'
else:
sites_dict[site] = monomer.site_states[site][0]
else:
sites_dict[site] = None
mp = monomer(**sites_dict)
pname = monomer.name + '_0'
try:
p = model.parameters[pname]
p.value = value
except KeyError:
p = Parameter(pname, value)
model.add_component(p)
model.initial(mp, p) | [
"def",
"set_base_initial_condition",
"(",
"model",
",",
"monomer",
",",
"value",
")",
":",
"# Build up monomer pattern dict",
"sites_dict",
"=",
"{",
"}",
"for",
"site",
"in",
"monomer",
".",
"sites",
":",
"if",
"site",
"in",
"monomer",
".",
"site_states",
":",
"if",
"site",
"==",
"'loc'",
"and",
"'cytoplasm'",
"in",
"monomer",
".",
"site_states",
"[",
"'loc'",
"]",
":",
"sites_dict",
"[",
"'loc'",
"]",
"=",
"'cytoplasm'",
"else",
":",
"sites_dict",
"[",
"site",
"]",
"=",
"monomer",
".",
"site_states",
"[",
"site",
"]",
"[",
"0",
"]",
"else",
":",
"sites_dict",
"[",
"site",
"]",
"=",
"None",
"mp",
"=",
"monomer",
"(",
"*",
"*",
"sites_dict",
")",
"pname",
"=",
"monomer",
".",
"name",
"+",
"'_0'",
"try",
":",
"p",
"=",
"model",
".",
"parameters",
"[",
"pname",
"]",
"p",
".",
"value",
"=",
"value",
"except",
"KeyError",
":",
"p",
"=",
"Parameter",
"(",
"pname",
",",
"value",
")",
"model",
".",
"add_component",
"(",
"p",
")",
"model",
".",
"initial",
"(",
"mp",
",",
"p",
")"
]
| Set an initial condition for a monomer in its 'default' state. | [
"Set",
"an",
"initial",
"condition",
"for",
"a",
"monomer",
"in",
"its",
"default",
"state",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/assembler.py#L378-L398 | train |
sorgerlab/indra | indra/assemblers/pysb/assembler.py | get_annotation | def get_annotation(component, db_name, db_ref):
"""Construct model Annotations for each component.
Annotation formats follow guidelines at http://identifiers.org/.
"""
url = get_identifiers_url(db_name, db_ref)
if not url:
return None
subj = component
ann = Annotation(subj, url, 'is')
return ann | python | def get_annotation(component, db_name, db_ref):
"""Construct model Annotations for each component.
Annotation formats follow guidelines at http://identifiers.org/.
"""
url = get_identifiers_url(db_name, db_ref)
if not url:
return None
subj = component
ann = Annotation(subj, url, 'is')
return ann | [
"def",
"get_annotation",
"(",
"component",
",",
"db_name",
",",
"db_ref",
")",
":",
"url",
"=",
"get_identifiers_url",
"(",
"db_name",
",",
"db_ref",
")",
"if",
"not",
"url",
":",
"return",
"None",
"subj",
"=",
"component",
"ann",
"=",
"Annotation",
"(",
"subj",
",",
"url",
",",
"'is'",
")",
"return",
"ann"
]
| Construct model Annotations for each component.
Annotation formats follow guidelines at http://identifiers.org/. | [
"Construct",
"model",
"Annotations",
"for",
"each",
"component",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/assembler.py#L432-L442 | train |
sorgerlab/indra | indra/assemblers/pysb/assembler.py | PysbAssembler.make_model | def make_model(self, policies=None, initial_conditions=True,
reverse_effects=False, model_name='indra_model'):
"""Assemble the PySB model from the collected INDRA Statements.
This method assembles a PySB model from the set of INDRA Statements.
The assembled model is both returned and set as the assembler's
model argument.
Parameters
----------
policies : Optional[Union[str, dict]]
A string or dictionary that defines one or more assembly policies.
If policies is a string, it defines a global assembly policy
that applies to all Statement types.
Example: one_step, interactions_only
A dictionary of policies has keys corresponding to Statement types
and values to the policy to be applied to that type of Statement.
For Statement types whose policy is undefined, the 'default'
policy is applied.
Example: {'Phosphorylation': 'two_step'}
initial_conditions : Optional[bool]
If True, default initial conditions are generated for the
Monomers in the model. Default: True
reverse_effects : Optional[bool]
If True, reverse rules are added to the model for activity,
modification and amount regulations that have no corresponding
reverse effects. Default: False
model_name : Optional[str]
The name attribute assigned to the PySB Model object.
Default: "indra_model"
Returns
-------
model : pysb.Model
The assembled PySB model object.
"""
ppa = PysbPreassembler(self.statements)
self.processed_policies = self.process_policies(policies)
ppa.replace_activities()
if reverse_effects:
ppa.add_reverse_effects()
self.statements = ppa.statements
self.model = Model()
self.model.name = model_name
self.agent_set = BaseAgentSet()
# Collect information about the monomers/self.agent_set from the
# statements
self._monomers()
# Add the monomers to the model based on our BaseAgentSet
for agent_name, agent in self.agent_set.items():
m = Monomer(_n(agent_name), agent.sites, agent.site_states)
m.site_annotations = agent.site_annotations
self.model.add_component(m)
for db_name, db_ref in agent.db_refs.items():
a = get_annotation(m, db_name, db_ref)
if a is not None:
self.model.add_annotation(a)
# Iterate over the active_forms
for af in agent.active_forms:
self.model.add_annotation(Annotation(m, af,
'has_active_pattern'))
for iaf in agent.inactive_forms:
self.model.add_annotation(Annotation(m, iaf,
'has_inactive_pattern'))
for at in agent.activity_types:
act_site_cond = {at: 'active'}
self.model.add_annotation(Annotation(m, act_site_cond,
'has_active_pattern'))
inact_site_cond = {at: 'inactive'}
self.model.add_annotation(Annotation(m, inact_site_cond,
'has_inactive_pattern'))
# Iterate over the statements to generate rules
self._assemble()
# Add initial conditions
if initial_conditions:
self.add_default_initial_conditions()
return self.model | python | def make_model(self, policies=None, initial_conditions=True,
reverse_effects=False, model_name='indra_model'):
"""Assemble the PySB model from the collected INDRA Statements.
This method assembles a PySB model from the set of INDRA Statements.
The assembled model is both returned and set as the assembler's
model argument.
Parameters
----------
policies : Optional[Union[str, dict]]
A string or dictionary that defines one or more assembly policies.
If policies is a string, it defines a global assembly policy
that applies to all Statement types.
Example: one_step, interactions_only
A dictionary of policies has keys corresponding to Statement types
and values to the policy to be applied to that type of Statement.
For Statement types whose policy is undefined, the 'default'
policy is applied.
Example: {'Phosphorylation': 'two_step'}
initial_conditions : Optional[bool]
If True, default initial conditions are generated for the
Monomers in the model. Default: True
reverse_effects : Optional[bool]
If True, reverse rules are added to the model for activity,
modification and amount regulations that have no corresponding
reverse effects. Default: False
model_name : Optional[str]
The name attribute assigned to the PySB Model object.
Default: "indra_model"
Returns
-------
model : pysb.Model
The assembled PySB model object.
"""
ppa = PysbPreassembler(self.statements)
self.processed_policies = self.process_policies(policies)
ppa.replace_activities()
if reverse_effects:
ppa.add_reverse_effects()
self.statements = ppa.statements
self.model = Model()
self.model.name = model_name
self.agent_set = BaseAgentSet()
# Collect information about the monomers/self.agent_set from the
# statements
self._monomers()
# Add the monomers to the model based on our BaseAgentSet
for agent_name, agent in self.agent_set.items():
m = Monomer(_n(agent_name), agent.sites, agent.site_states)
m.site_annotations = agent.site_annotations
self.model.add_component(m)
for db_name, db_ref in agent.db_refs.items():
a = get_annotation(m, db_name, db_ref)
if a is not None:
self.model.add_annotation(a)
# Iterate over the active_forms
for af in agent.active_forms:
self.model.add_annotation(Annotation(m, af,
'has_active_pattern'))
for iaf in agent.inactive_forms:
self.model.add_annotation(Annotation(m, iaf,
'has_inactive_pattern'))
for at in agent.activity_types:
act_site_cond = {at: 'active'}
self.model.add_annotation(Annotation(m, act_site_cond,
'has_active_pattern'))
inact_site_cond = {at: 'inactive'}
self.model.add_annotation(Annotation(m, inact_site_cond,
'has_inactive_pattern'))
# Iterate over the statements to generate rules
self._assemble()
# Add initial conditions
if initial_conditions:
self.add_default_initial_conditions()
return self.model | [
"def",
"make_model",
"(",
"self",
",",
"policies",
"=",
"None",
",",
"initial_conditions",
"=",
"True",
",",
"reverse_effects",
"=",
"False",
",",
"model_name",
"=",
"'indra_model'",
")",
":",
"ppa",
"=",
"PysbPreassembler",
"(",
"self",
".",
"statements",
")",
"self",
".",
"processed_policies",
"=",
"self",
".",
"process_policies",
"(",
"policies",
")",
"ppa",
".",
"replace_activities",
"(",
")",
"if",
"reverse_effects",
":",
"ppa",
".",
"add_reverse_effects",
"(",
")",
"self",
".",
"statements",
"=",
"ppa",
".",
"statements",
"self",
".",
"model",
"=",
"Model",
"(",
")",
"self",
".",
"model",
".",
"name",
"=",
"model_name",
"self",
".",
"agent_set",
"=",
"BaseAgentSet",
"(",
")",
"# Collect information about the monomers/self.agent_set from the",
"# statements",
"self",
".",
"_monomers",
"(",
")",
"# Add the monomers to the model based on our BaseAgentSet",
"for",
"agent_name",
",",
"agent",
"in",
"self",
".",
"agent_set",
".",
"items",
"(",
")",
":",
"m",
"=",
"Monomer",
"(",
"_n",
"(",
"agent_name",
")",
",",
"agent",
".",
"sites",
",",
"agent",
".",
"site_states",
")",
"m",
".",
"site_annotations",
"=",
"agent",
".",
"site_annotations",
"self",
".",
"model",
".",
"add_component",
"(",
"m",
")",
"for",
"db_name",
",",
"db_ref",
"in",
"agent",
".",
"db_refs",
".",
"items",
"(",
")",
":",
"a",
"=",
"get_annotation",
"(",
"m",
",",
"db_name",
",",
"db_ref",
")",
"if",
"a",
"is",
"not",
"None",
":",
"self",
".",
"model",
".",
"add_annotation",
"(",
"a",
")",
"# Iterate over the active_forms",
"for",
"af",
"in",
"agent",
".",
"active_forms",
":",
"self",
".",
"model",
".",
"add_annotation",
"(",
"Annotation",
"(",
"m",
",",
"af",
",",
"'has_active_pattern'",
")",
")",
"for",
"iaf",
"in",
"agent",
".",
"inactive_forms",
":",
"self",
".",
"model",
".",
"add_annotation",
"(",
"Annotation",
"(",
"m",
",",
"iaf",
",",
"'has_inactive_pattern'",
")",
")",
"for",
"at",
"in",
"agent",
".",
"activity_types",
":",
"act_site_cond",
"=",
"{",
"at",
":",
"'active'",
"}",
"self",
".",
"model",
".",
"add_annotation",
"(",
"Annotation",
"(",
"m",
",",
"act_site_cond",
",",
"'has_active_pattern'",
")",
")",
"inact_site_cond",
"=",
"{",
"at",
":",
"'inactive'",
"}",
"self",
".",
"model",
".",
"add_annotation",
"(",
"Annotation",
"(",
"m",
",",
"inact_site_cond",
",",
"'has_inactive_pattern'",
")",
")",
"# Iterate over the statements to generate rules",
"self",
".",
"_assemble",
"(",
")",
"# Add initial conditions",
"if",
"initial_conditions",
":",
"self",
".",
"add_default_initial_conditions",
"(",
")",
"return",
"self",
".",
"model"
]
| Assemble the PySB model from the collected INDRA Statements.
This method assembles a PySB model from the set of INDRA Statements.
The assembled model is both returned and set as the assembler's
model argument.
Parameters
----------
policies : Optional[Union[str, dict]]
A string or dictionary that defines one or more assembly policies.
If policies is a string, it defines a global assembly policy
that applies to all Statement types.
Example: one_step, interactions_only
A dictionary of policies has keys corresponding to Statement types
and values to the policy to be applied to that type of Statement.
For Statement types whose policy is undefined, the 'default'
policy is applied.
Example: {'Phosphorylation': 'two_step'}
initial_conditions : Optional[bool]
If True, default initial conditions are generated for the
Monomers in the model. Default: True
reverse_effects : Optional[bool]
If True, reverse rules are added to the model for activity,
modification and amount regulations that have no corresponding
reverse effects. Default: False
model_name : Optional[str]
The name attribute assigned to the PySB Model object.
Default: "indra_model"
Returns
-------
model : pysb.Model
The assembled PySB model object. | [
"Assemble",
"the",
"PySB",
"model",
"from",
"the",
"collected",
"INDRA",
"Statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/assembler.py#L546-L626 | train |
sorgerlab/indra | indra/assemblers/pysb/assembler.py | PysbAssembler.add_default_initial_conditions | def add_default_initial_conditions(self, value=None):
"""Set default initial conditions in the PySB model.
Parameters
----------
value : Optional[float]
Optionally a value can be supplied which will be the initial
amount applied. Otherwise a built-in default is used.
"""
if value is not None:
try:
value_num = float(value)
except ValueError:
logger.error('Invalid initial condition value.')
return
else:
value_num = self.default_initial_amount
if self.model is None:
return
for m in self.model.monomers:
set_base_initial_condition(self.model, m, value_num) | python | def add_default_initial_conditions(self, value=None):
"""Set default initial conditions in the PySB model.
Parameters
----------
value : Optional[float]
Optionally a value can be supplied which will be the initial
amount applied. Otherwise a built-in default is used.
"""
if value is not None:
try:
value_num = float(value)
except ValueError:
logger.error('Invalid initial condition value.')
return
else:
value_num = self.default_initial_amount
if self.model is None:
return
for m in self.model.monomers:
set_base_initial_condition(self.model, m, value_num) | [
"def",
"add_default_initial_conditions",
"(",
"self",
",",
"value",
"=",
"None",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"try",
":",
"value_num",
"=",
"float",
"(",
"value",
")",
"except",
"ValueError",
":",
"logger",
".",
"error",
"(",
"'Invalid initial condition value.'",
")",
"return",
"else",
":",
"value_num",
"=",
"self",
".",
"default_initial_amount",
"if",
"self",
".",
"model",
"is",
"None",
":",
"return",
"for",
"m",
"in",
"self",
".",
"model",
".",
"monomers",
":",
"set_base_initial_condition",
"(",
"self",
".",
"model",
",",
"m",
",",
"value_num",
")"
]
| Set default initial conditions in the PySB model.
Parameters
----------
value : Optional[float]
Optionally a value can be supplied which will be the initial
amount applied. Otherwise a built-in default is used. | [
"Set",
"default",
"initial",
"conditions",
"in",
"the",
"PySB",
"model",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/assembler.py#L628-L648 | train |
sorgerlab/indra | indra/assemblers/pysb/assembler.py | PysbAssembler.set_expression | def set_expression(self, expression_dict):
"""Set protein expression amounts as initial conditions
Parameters
----------
expression_dict : dict
A dictionary in which the keys are gene names and the
values are numbers representing the absolute amount
(count per cell) of proteins expressed. Proteins that
are not expressed can be represented as nan. Entries
that are not in the dict or are in there but resolve
to None, are set to the default initial amount.
Example: {'EGFR': 12345, 'BRAF': 4567, 'ESR1': nan}
"""
if self.model is None:
return
monomers_found = []
monomers_notfound = []
# Iterate over all the monomers
for m in self.model.monomers:
if (m.name in expression_dict and
expression_dict[m.name] is not None):
# Try to get the expression amount from the dict
init = expression_dict[m.name]
# We interpret nan and None as not expressed
if math.isnan(init):
init = 0
init_round = round(init)
set_base_initial_condition(self.model, m, init_round)
monomers_found.append(m.name)
else:
set_base_initial_condition(self.model, m,
self.default_initial_amount)
monomers_notfound.append(m.name)
logger.info('Monomers set to given context')
logger.info('-----------------------------')
for m in monomers_found:
logger.info('%s' % m)
if monomers_notfound:
logger.info('')
logger.info('Monomers not found in given context')
logger.info('-----------------------------------')
for m in monomers_notfound:
logger.info('%s' % m) | python | def set_expression(self, expression_dict):
"""Set protein expression amounts as initial conditions
Parameters
----------
expression_dict : dict
A dictionary in which the keys are gene names and the
values are numbers representing the absolute amount
(count per cell) of proteins expressed. Proteins that
are not expressed can be represented as nan. Entries
that are not in the dict or are in there but resolve
to None, are set to the default initial amount.
Example: {'EGFR': 12345, 'BRAF': 4567, 'ESR1': nan}
"""
if self.model is None:
return
monomers_found = []
monomers_notfound = []
# Iterate over all the monomers
for m in self.model.monomers:
if (m.name in expression_dict and
expression_dict[m.name] is not None):
# Try to get the expression amount from the dict
init = expression_dict[m.name]
# We interpret nan and None as not expressed
if math.isnan(init):
init = 0
init_round = round(init)
set_base_initial_condition(self.model, m, init_round)
monomers_found.append(m.name)
else:
set_base_initial_condition(self.model, m,
self.default_initial_amount)
monomers_notfound.append(m.name)
logger.info('Monomers set to given context')
logger.info('-----------------------------')
for m in monomers_found:
logger.info('%s' % m)
if monomers_notfound:
logger.info('')
logger.info('Monomers not found in given context')
logger.info('-----------------------------------')
for m in monomers_notfound:
logger.info('%s' % m) | [
"def",
"set_expression",
"(",
"self",
",",
"expression_dict",
")",
":",
"if",
"self",
".",
"model",
"is",
"None",
":",
"return",
"monomers_found",
"=",
"[",
"]",
"monomers_notfound",
"=",
"[",
"]",
"# Iterate over all the monomers",
"for",
"m",
"in",
"self",
".",
"model",
".",
"monomers",
":",
"if",
"(",
"m",
".",
"name",
"in",
"expression_dict",
"and",
"expression_dict",
"[",
"m",
".",
"name",
"]",
"is",
"not",
"None",
")",
":",
"# Try to get the expression amount from the dict",
"init",
"=",
"expression_dict",
"[",
"m",
".",
"name",
"]",
"# We interpret nan and None as not expressed",
"if",
"math",
".",
"isnan",
"(",
"init",
")",
":",
"init",
"=",
"0",
"init_round",
"=",
"round",
"(",
"init",
")",
"set_base_initial_condition",
"(",
"self",
".",
"model",
",",
"m",
",",
"init_round",
")",
"monomers_found",
".",
"append",
"(",
"m",
".",
"name",
")",
"else",
":",
"set_base_initial_condition",
"(",
"self",
".",
"model",
",",
"m",
",",
"self",
".",
"default_initial_amount",
")",
"monomers_notfound",
".",
"append",
"(",
"m",
".",
"name",
")",
"logger",
".",
"info",
"(",
"'Monomers set to given context'",
")",
"logger",
".",
"info",
"(",
"'-----------------------------'",
")",
"for",
"m",
"in",
"monomers_found",
":",
"logger",
".",
"info",
"(",
"'%s'",
"%",
"m",
")",
"if",
"monomers_notfound",
":",
"logger",
".",
"info",
"(",
"''",
")",
"logger",
".",
"info",
"(",
"'Monomers not found in given context'",
")",
"logger",
".",
"info",
"(",
"'-----------------------------------'",
")",
"for",
"m",
"in",
"monomers_notfound",
":",
"logger",
".",
"info",
"(",
"'%s'",
"%",
"m",
")"
]
| Set protein expression amounts as initial conditions
Parameters
----------
expression_dict : dict
A dictionary in which the keys are gene names and the
values are numbers representing the absolute amount
(count per cell) of proteins expressed. Proteins that
are not expressed can be represented as nan. Entries
that are not in the dict or are in there but resolve
to None, are set to the default initial amount.
Example: {'EGFR': 12345, 'BRAF': 4567, 'ESR1': nan} | [
"Set",
"protein",
"expression",
"amounts",
"as",
"initial",
"conditions"
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/assembler.py#L650-L694 | train |
sorgerlab/indra | indra/assemblers/pysb/assembler.py | PysbAssembler.set_context | def set_context(self, cell_type):
"""Set protein expression amounts from CCLE as initial conditions.
This method uses :py:mod:`indra.databases.context_client` to get
protein expression levels for a given cell type and set initial
conditions for Monomers in the model accordingly.
Parameters
----------
cell_type : str
Cell type name for which expression levels are queried.
The cell type name follows the CCLE database conventions.
Example: LOXIMVI_SKIN, BT20_BREAST
"""
if self.model is None:
return
monomer_names = [m.name for m in self.model.monomers]
res = context_client.get_protein_expression(monomer_names, [cell_type])
amounts = res.get(cell_type)
if not amounts:
logger.warning('Could not get context for %s cell type.' %
cell_type)
self.add_default_initial_conditions()
return
self.set_expression(amounts) | python | def set_context(self, cell_type):
"""Set protein expression amounts from CCLE as initial conditions.
This method uses :py:mod:`indra.databases.context_client` to get
protein expression levels for a given cell type and set initial
conditions for Monomers in the model accordingly.
Parameters
----------
cell_type : str
Cell type name for which expression levels are queried.
The cell type name follows the CCLE database conventions.
Example: LOXIMVI_SKIN, BT20_BREAST
"""
if self.model is None:
return
monomer_names = [m.name for m in self.model.monomers]
res = context_client.get_protein_expression(monomer_names, [cell_type])
amounts = res.get(cell_type)
if not amounts:
logger.warning('Could not get context for %s cell type.' %
cell_type)
self.add_default_initial_conditions()
return
self.set_expression(amounts) | [
"def",
"set_context",
"(",
"self",
",",
"cell_type",
")",
":",
"if",
"self",
".",
"model",
"is",
"None",
":",
"return",
"monomer_names",
"=",
"[",
"m",
".",
"name",
"for",
"m",
"in",
"self",
".",
"model",
".",
"monomers",
"]",
"res",
"=",
"context_client",
".",
"get_protein_expression",
"(",
"monomer_names",
",",
"[",
"cell_type",
"]",
")",
"amounts",
"=",
"res",
".",
"get",
"(",
"cell_type",
")",
"if",
"not",
"amounts",
":",
"logger",
".",
"warning",
"(",
"'Could not get context for %s cell type.'",
"%",
"cell_type",
")",
"self",
".",
"add_default_initial_conditions",
"(",
")",
"return",
"self",
".",
"set_expression",
"(",
"amounts",
")"
]
| Set protein expression amounts from CCLE as initial conditions.
This method uses :py:mod:`indra.databases.context_client` to get
protein expression levels for a given cell type and set initial
conditions for Monomers in the model accordingly.
Parameters
----------
cell_type : str
Cell type name for which expression levels are queried.
The cell type name follows the CCLE database conventions.
Example: LOXIMVI_SKIN, BT20_BREAST | [
"Set",
"protein",
"expression",
"amounts",
"from",
"CCLE",
"as",
"initial",
"conditions",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/assembler.py#L696-L721 | train |
sorgerlab/indra | indra/assemblers/pysb/assembler.py | PysbAssembler.export_model | def export_model(self, format, file_name=None):
"""Save the assembled model in a modeling formalism other than PySB.
For more details on exporting PySB models, see
http://pysb.readthedocs.io/en/latest/modules/export/index.html
Parameters
----------
format : str
The format to export into, for instance "kappa", "bngl",
"sbml", "matlab", "mathematica", "potterswheel". See
http://pysb.readthedocs.io/en/latest/modules/export/index.html
for a list of supported formats. In addition to the formats
supported by PySB itself, this method also provides "sbgn"
output.
file_name : Optional[str]
An optional file name to save the exported model into.
Returns
-------
exp_str : str or object
The exported model string or object
"""
# Handle SBGN as special case
if format == 'sbgn':
exp_str = export_sbgn(self.model)
elif format == 'kappa_im':
# NOTE: this export is not a str, rather a graph object
return export_kappa_im(self.model, file_name)
elif format == 'kappa_cm':
# NOTE: this export is not a str, rather a graph object
return export_kappa_cm(self.model, file_name)
else:
try:
exp_str = pysb.export.export(self.model, format)
except KeyError:
logging.error('Unknown export format: %s' % format)
return None
if file_name:
with open(file_name, 'wb') as fh:
fh.write(exp_str.encode('utf-8'))
return exp_str | python | def export_model(self, format, file_name=None):
"""Save the assembled model in a modeling formalism other than PySB.
For more details on exporting PySB models, see
http://pysb.readthedocs.io/en/latest/modules/export/index.html
Parameters
----------
format : str
The format to export into, for instance "kappa", "bngl",
"sbml", "matlab", "mathematica", "potterswheel". See
http://pysb.readthedocs.io/en/latest/modules/export/index.html
for a list of supported formats. In addition to the formats
supported by PySB itself, this method also provides "sbgn"
output.
file_name : Optional[str]
An optional file name to save the exported model into.
Returns
-------
exp_str : str or object
The exported model string or object
"""
# Handle SBGN as special case
if format == 'sbgn':
exp_str = export_sbgn(self.model)
elif format == 'kappa_im':
# NOTE: this export is not a str, rather a graph object
return export_kappa_im(self.model, file_name)
elif format == 'kappa_cm':
# NOTE: this export is not a str, rather a graph object
return export_kappa_cm(self.model, file_name)
else:
try:
exp_str = pysb.export.export(self.model, format)
except KeyError:
logging.error('Unknown export format: %s' % format)
return None
if file_name:
with open(file_name, 'wb') as fh:
fh.write(exp_str.encode('utf-8'))
return exp_str | [
"def",
"export_model",
"(",
"self",
",",
"format",
",",
"file_name",
"=",
"None",
")",
":",
"# Handle SBGN as special case",
"if",
"format",
"==",
"'sbgn'",
":",
"exp_str",
"=",
"export_sbgn",
"(",
"self",
".",
"model",
")",
"elif",
"format",
"==",
"'kappa_im'",
":",
"# NOTE: this export is not a str, rather a graph object",
"return",
"export_kappa_im",
"(",
"self",
".",
"model",
",",
"file_name",
")",
"elif",
"format",
"==",
"'kappa_cm'",
":",
"# NOTE: this export is not a str, rather a graph object",
"return",
"export_kappa_cm",
"(",
"self",
".",
"model",
",",
"file_name",
")",
"else",
":",
"try",
":",
"exp_str",
"=",
"pysb",
".",
"export",
".",
"export",
"(",
"self",
".",
"model",
",",
"format",
")",
"except",
"KeyError",
":",
"logging",
".",
"error",
"(",
"'Unknown export format: %s'",
"%",
"format",
")",
"return",
"None",
"if",
"file_name",
":",
"with",
"open",
"(",
"file_name",
",",
"'wb'",
")",
"as",
"fh",
":",
"fh",
".",
"write",
"(",
"exp_str",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"return",
"exp_str"
]
| Save the assembled model in a modeling formalism other than PySB.
For more details on exporting PySB models, see
http://pysb.readthedocs.io/en/latest/modules/export/index.html
Parameters
----------
format : str
The format to export into, for instance "kappa", "bngl",
"sbml", "matlab", "mathematica", "potterswheel". See
http://pysb.readthedocs.io/en/latest/modules/export/index.html
for a list of supported formats. In addition to the formats
supported by PySB itself, this method also provides "sbgn"
output.
file_name : Optional[str]
An optional file name to save the exported model into.
Returns
-------
exp_str : str or object
The exported model string or object | [
"Save",
"the",
"assembled",
"model",
"in",
"a",
"modeling",
"formalism",
"other",
"than",
"PySB",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/assembler.py#L746-L789 | train |
sorgerlab/indra | indra/assemblers/pysb/assembler.py | PysbAssembler.save_rst | def save_rst(self, file_name='pysb_model.rst', module_name='pysb_module'):
"""Save the assembled model as an RST file for literate modeling.
Parameters
----------
file_name : Optional[str]
The name of the file to save the RST in.
Default: pysb_model.rst
module_name : Optional[str]
The name of the python function defining the module.
Default: pysb_module
"""
if self.model is not None:
with open(file_name, 'wt') as fh:
fh.write('.. _%s:\n\n' % module_name)
fh.write('Module\n======\n\n')
fh.write('INDRA-assembled model\n---------------------\n\n')
fh.write('::\n\n')
model_str = pysb.export.export(self.model, 'pysb_flat')
model_str = '\t' + model_str.replace('\n', '\n\t')
fh.write(model_str) | python | def save_rst(self, file_name='pysb_model.rst', module_name='pysb_module'):
"""Save the assembled model as an RST file for literate modeling.
Parameters
----------
file_name : Optional[str]
The name of the file to save the RST in.
Default: pysb_model.rst
module_name : Optional[str]
The name of the python function defining the module.
Default: pysb_module
"""
if self.model is not None:
with open(file_name, 'wt') as fh:
fh.write('.. _%s:\n\n' % module_name)
fh.write('Module\n======\n\n')
fh.write('INDRA-assembled model\n---------------------\n\n')
fh.write('::\n\n')
model_str = pysb.export.export(self.model, 'pysb_flat')
model_str = '\t' + model_str.replace('\n', '\n\t')
fh.write(model_str) | [
"def",
"save_rst",
"(",
"self",
",",
"file_name",
"=",
"'pysb_model.rst'",
",",
"module_name",
"=",
"'pysb_module'",
")",
":",
"if",
"self",
".",
"model",
"is",
"not",
"None",
":",
"with",
"open",
"(",
"file_name",
",",
"'wt'",
")",
"as",
"fh",
":",
"fh",
".",
"write",
"(",
"'.. _%s:\\n\\n'",
"%",
"module_name",
")",
"fh",
".",
"write",
"(",
"'Module\\n======\\n\\n'",
")",
"fh",
".",
"write",
"(",
"'INDRA-assembled model\\n---------------------\\n\\n'",
")",
"fh",
".",
"write",
"(",
"'::\\n\\n'",
")",
"model_str",
"=",
"pysb",
".",
"export",
".",
"export",
"(",
"self",
".",
"model",
",",
"'pysb_flat'",
")",
"model_str",
"=",
"'\\t'",
"+",
"model_str",
".",
"replace",
"(",
"'\\n'",
",",
"'\\n\\t'",
")",
"fh",
".",
"write",
"(",
"model_str",
")"
]
| Save the assembled model as an RST file for literate modeling.
Parameters
----------
file_name : Optional[str]
The name of the file to save the RST in.
Default: pysb_model.rst
module_name : Optional[str]
The name of the python function defining the module.
Default: pysb_module | [
"Save",
"the",
"assembled",
"model",
"as",
"an",
"RST",
"file",
"for",
"literate",
"modeling",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/assembler.py#L792-L812 | train |
sorgerlab/indra | indra/assemblers/pysb/assembler.py | PysbAssembler._monomers | def _monomers(self):
"""Calls the appropriate monomers method based on policies."""
for stmt in self.statements:
if _is_whitelisted(stmt):
self._dispatch(stmt, 'monomers', self.agent_set) | python | def _monomers(self):
"""Calls the appropriate monomers method based on policies."""
for stmt in self.statements:
if _is_whitelisted(stmt):
self._dispatch(stmt, 'monomers', self.agent_set) | [
"def",
"_monomers",
"(",
"self",
")",
":",
"for",
"stmt",
"in",
"self",
".",
"statements",
":",
"if",
"_is_whitelisted",
"(",
"stmt",
")",
":",
"self",
".",
"_dispatch",
"(",
"stmt",
",",
"'monomers'",
",",
"self",
".",
"agent_set",
")"
]
| Calls the appropriate monomers method based on policies. | [
"Calls",
"the",
"appropriate",
"monomers",
"method",
"based",
"on",
"policies",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/assembler.py#L844-L848 | train |
sorgerlab/indra | indra/sources/trips/client.py | send_query | def send_query(text, service_endpoint='drum', query_args=None):
"""Send a query to the TRIPS web service.
Parameters
----------
text : str
The text to be processed.
service_endpoint : Optional[str]
Selects the TRIPS/DRUM web service endpoint to use. Is a choice between
"drum" (default), "drum-dev", a nightly build, and "cwms" for use with
more general knowledge extraction.
query_args : Optional[dict]
A dictionary of arguments to be passed with the query.
Returns
-------
html : str
The HTML result returned by the web service.
"""
if service_endpoint in ['drum', 'drum-dev', 'cwms', 'cwmsreader']:
url = base_url + service_endpoint
else:
logger.error('Invalid service endpoint: %s' % service_endpoint)
return ''
if query_args is None:
query_args = {}
query_args.update({'input': text})
res = requests.get(url, query_args, timeout=3600)
if not res.status_code == 200:
logger.error('Problem with TRIPS query: status code %s' %
res.status_code)
return ''
# Gets unicode content
return res.text | python | def send_query(text, service_endpoint='drum', query_args=None):
"""Send a query to the TRIPS web service.
Parameters
----------
text : str
The text to be processed.
service_endpoint : Optional[str]
Selects the TRIPS/DRUM web service endpoint to use. Is a choice between
"drum" (default), "drum-dev", a nightly build, and "cwms" for use with
more general knowledge extraction.
query_args : Optional[dict]
A dictionary of arguments to be passed with the query.
Returns
-------
html : str
The HTML result returned by the web service.
"""
if service_endpoint in ['drum', 'drum-dev', 'cwms', 'cwmsreader']:
url = base_url + service_endpoint
else:
logger.error('Invalid service endpoint: %s' % service_endpoint)
return ''
if query_args is None:
query_args = {}
query_args.update({'input': text})
res = requests.get(url, query_args, timeout=3600)
if not res.status_code == 200:
logger.error('Problem with TRIPS query: status code %s' %
res.status_code)
return ''
# Gets unicode content
return res.text | [
"def",
"send_query",
"(",
"text",
",",
"service_endpoint",
"=",
"'drum'",
",",
"query_args",
"=",
"None",
")",
":",
"if",
"service_endpoint",
"in",
"[",
"'drum'",
",",
"'drum-dev'",
",",
"'cwms'",
",",
"'cwmsreader'",
"]",
":",
"url",
"=",
"base_url",
"+",
"service_endpoint",
"else",
":",
"logger",
".",
"error",
"(",
"'Invalid service endpoint: %s'",
"%",
"service_endpoint",
")",
"return",
"''",
"if",
"query_args",
"is",
"None",
":",
"query_args",
"=",
"{",
"}",
"query_args",
".",
"update",
"(",
"{",
"'input'",
":",
"text",
"}",
")",
"res",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"query_args",
",",
"timeout",
"=",
"3600",
")",
"if",
"not",
"res",
".",
"status_code",
"==",
"200",
":",
"logger",
".",
"error",
"(",
"'Problem with TRIPS query: status code %s'",
"%",
"res",
".",
"status_code",
")",
"return",
"''",
"# Gets unicode content",
"return",
"res",
".",
"text"
]
| Send a query to the TRIPS web service.
Parameters
----------
text : str
The text to be processed.
service_endpoint : Optional[str]
Selects the TRIPS/DRUM web service endpoint to use. Is a choice between
"drum" (default), "drum-dev", a nightly build, and "cwms" for use with
more general knowledge extraction.
query_args : Optional[dict]
A dictionary of arguments to be passed with the query.
Returns
-------
html : str
The HTML result returned by the web service. | [
"Send",
"a",
"query",
"to",
"the",
"TRIPS",
"web",
"service",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/client.py#L15-L48 | train |
sorgerlab/indra | indra/sources/trips/client.py | get_xml | def get_xml(html, content_tag='ekb', fail_if_empty=False):
"""Extract the content XML from the HTML output of the TRIPS web service.
Parameters
----------
html : str
The HTML output from the TRIPS web service.
content_tag : str
The xml tag used to label the content. Default is 'ekb'.
fail_if_empty : bool
If True, and if the xml content found is an empty string, raise an
exception. Default is False.
Returns
-------
The extraction knowledge base (e.g. EKB) XML that contains the event and
term extractions.
"""
cont = re.findall(r'<%(tag)s(.*?)>(.*?)</%(tag)s>' % {'tag': content_tag},
html, re.MULTILINE | re.DOTALL)
if cont:
events_terms = ''.join([l.strip() for l in cont[0][1].splitlines()])
if 'xmlns' in cont[0][0]:
meta = ' '.join([l.strip() for l in cont[0][0].splitlines()])
else:
meta = ''
else:
events_terms = ''
meta = ''
if fail_if_empty:
assert events_terms != '',\
"Got empty string for events content from html:\n%s" % html
header = ('<?xml version="1.0" encoding="utf-8" standalone="yes"?><%s%s>'
% (content_tag, meta))
footer = '</%s>' % content_tag
return header + events_terms.replace('\n', '') + footer | python | def get_xml(html, content_tag='ekb', fail_if_empty=False):
"""Extract the content XML from the HTML output of the TRIPS web service.
Parameters
----------
html : str
The HTML output from the TRIPS web service.
content_tag : str
The xml tag used to label the content. Default is 'ekb'.
fail_if_empty : bool
If True, and if the xml content found is an empty string, raise an
exception. Default is False.
Returns
-------
The extraction knowledge base (e.g. EKB) XML that contains the event and
term extractions.
"""
cont = re.findall(r'<%(tag)s(.*?)>(.*?)</%(tag)s>' % {'tag': content_tag},
html, re.MULTILINE | re.DOTALL)
if cont:
events_terms = ''.join([l.strip() for l in cont[0][1].splitlines()])
if 'xmlns' in cont[0][0]:
meta = ' '.join([l.strip() for l in cont[0][0].splitlines()])
else:
meta = ''
else:
events_terms = ''
meta = ''
if fail_if_empty:
assert events_terms != '',\
"Got empty string for events content from html:\n%s" % html
header = ('<?xml version="1.0" encoding="utf-8" standalone="yes"?><%s%s>'
% (content_tag, meta))
footer = '</%s>' % content_tag
return header + events_terms.replace('\n', '') + footer | [
"def",
"get_xml",
"(",
"html",
",",
"content_tag",
"=",
"'ekb'",
",",
"fail_if_empty",
"=",
"False",
")",
":",
"cont",
"=",
"re",
".",
"findall",
"(",
"r'<%(tag)s(.*?)>(.*?)</%(tag)s>'",
"%",
"{",
"'tag'",
":",
"content_tag",
"}",
",",
"html",
",",
"re",
".",
"MULTILINE",
"|",
"re",
".",
"DOTALL",
")",
"if",
"cont",
":",
"events_terms",
"=",
"''",
".",
"join",
"(",
"[",
"l",
".",
"strip",
"(",
")",
"for",
"l",
"in",
"cont",
"[",
"0",
"]",
"[",
"1",
"]",
".",
"splitlines",
"(",
")",
"]",
")",
"if",
"'xmlns'",
"in",
"cont",
"[",
"0",
"]",
"[",
"0",
"]",
":",
"meta",
"=",
"' '",
".",
"join",
"(",
"[",
"l",
".",
"strip",
"(",
")",
"for",
"l",
"in",
"cont",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"splitlines",
"(",
")",
"]",
")",
"else",
":",
"meta",
"=",
"''",
"else",
":",
"events_terms",
"=",
"''",
"meta",
"=",
"''",
"if",
"fail_if_empty",
":",
"assert",
"events_terms",
"!=",
"''",
",",
"\"Got empty string for events content from html:\\n%s\"",
"%",
"html",
"header",
"=",
"(",
"'<?xml version=\"1.0\" encoding=\"utf-8\" standalone=\"yes\"?><%s%s>'",
"%",
"(",
"content_tag",
",",
"meta",
")",
")",
"footer",
"=",
"'</%s>'",
"%",
"content_tag",
"return",
"header",
"+",
"events_terms",
".",
"replace",
"(",
"'\\n'",
",",
"''",
")",
"+",
"footer"
]
| Extract the content XML from the HTML output of the TRIPS web service.
Parameters
----------
html : str
The HTML output from the TRIPS web service.
content_tag : str
The xml tag used to label the content. Default is 'ekb'.
fail_if_empty : bool
If True, and if the xml content found is an empty string, raise an
exception. Default is False.
Returns
-------
The extraction knowledge base (e.g. EKB) XML that contains the event and
term extractions. | [
"Extract",
"the",
"content",
"XML",
"from",
"the",
"HTML",
"output",
"of",
"the",
"TRIPS",
"web",
"service",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/client.py#L51-L88 | train |
sorgerlab/indra | indra/sources/trips/client.py | save_xml | def save_xml(xml_str, file_name, pretty=True):
"""Save the TRIPS EKB XML in a file.
Parameters
----------
xml_str : str
The TRIPS EKB XML string to be saved.
file_name : str
The name of the file to save the result in.
pretty : Optional[bool]
If True, the XML is pretty printed.
"""
try:
fh = open(file_name, 'wt')
except IOError:
logger.error('Could not open %s for writing.' % file_name)
return
if pretty:
xmld = xml.dom.minidom.parseString(xml_str)
xml_str_pretty = xmld.toprettyxml()
fh.write(xml_str_pretty)
else:
fh.write(xml_str)
fh.close() | python | def save_xml(xml_str, file_name, pretty=True):
"""Save the TRIPS EKB XML in a file.
Parameters
----------
xml_str : str
The TRIPS EKB XML string to be saved.
file_name : str
The name of the file to save the result in.
pretty : Optional[bool]
If True, the XML is pretty printed.
"""
try:
fh = open(file_name, 'wt')
except IOError:
logger.error('Could not open %s for writing.' % file_name)
return
if pretty:
xmld = xml.dom.minidom.parseString(xml_str)
xml_str_pretty = xmld.toprettyxml()
fh.write(xml_str_pretty)
else:
fh.write(xml_str)
fh.close() | [
"def",
"save_xml",
"(",
"xml_str",
",",
"file_name",
",",
"pretty",
"=",
"True",
")",
":",
"try",
":",
"fh",
"=",
"open",
"(",
"file_name",
",",
"'wt'",
")",
"except",
"IOError",
":",
"logger",
".",
"error",
"(",
"'Could not open %s for writing.'",
"%",
"file_name",
")",
"return",
"if",
"pretty",
":",
"xmld",
"=",
"xml",
".",
"dom",
".",
"minidom",
".",
"parseString",
"(",
"xml_str",
")",
"xml_str_pretty",
"=",
"xmld",
".",
"toprettyxml",
"(",
")",
"fh",
".",
"write",
"(",
"xml_str_pretty",
")",
"else",
":",
"fh",
".",
"write",
"(",
"xml_str",
")",
"fh",
".",
"close",
"(",
")"
]
| Save the TRIPS EKB XML in a file.
Parameters
----------
xml_str : str
The TRIPS EKB XML string to be saved.
file_name : str
The name of the file to save the result in.
pretty : Optional[bool]
If True, the XML is pretty printed. | [
"Save",
"the",
"TRIPS",
"EKB",
"XML",
"in",
"a",
"file",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/client.py#L91-L114 | train |
sorgerlab/indra | indra/sources/sofia/api.py | process_table | def process_table(fname):
"""Return processor by processing a given sheet of a spreadsheet file.
Parameters
----------
fname : str
The name of the Excel file (typically .xlsx extension) to process
Returns
-------
sp : indra.sources.sofia.processor.SofiaProcessor
A SofiaProcessor object which has a list of extracted INDRA
Statements as its statements attribute.
"""
book = openpyxl.load_workbook(fname, read_only=True)
try:
rel_sheet = book['Relations']
except Exception as e:
rel_sheet = book['Causal']
event_sheet = book['Events']
entities_sheet = book['Entities']
sp = SofiaExcelProcessor(rel_sheet.rows, event_sheet.rows,
entities_sheet.rows)
return sp | python | def process_table(fname):
"""Return processor by processing a given sheet of a spreadsheet file.
Parameters
----------
fname : str
The name of the Excel file (typically .xlsx extension) to process
Returns
-------
sp : indra.sources.sofia.processor.SofiaProcessor
A SofiaProcessor object which has a list of extracted INDRA
Statements as its statements attribute.
"""
book = openpyxl.load_workbook(fname, read_only=True)
try:
rel_sheet = book['Relations']
except Exception as e:
rel_sheet = book['Causal']
event_sheet = book['Events']
entities_sheet = book['Entities']
sp = SofiaExcelProcessor(rel_sheet.rows, event_sheet.rows,
entities_sheet.rows)
return sp | [
"def",
"process_table",
"(",
"fname",
")",
":",
"book",
"=",
"openpyxl",
".",
"load_workbook",
"(",
"fname",
",",
"read_only",
"=",
"True",
")",
"try",
":",
"rel_sheet",
"=",
"book",
"[",
"'Relations'",
"]",
"except",
"Exception",
"as",
"e",
":",
"rel_sheet",
"=",
"book",
"[",
"'Causal'",
"]",
"event_sheet",
"=",
"book",
"[",
"'Events'",
"]",
"entities_sheet",
"=",
"book",
"[",
"'Entities'",
"]",
"sp",
"=",
"SofiaExcelProcessor",
"(",
"rel_sheet",
".",
"rows",
",",
"event_sheet",
".",
"rows",
",",
"entities_sheet",
".",
"rows",
")",
"return",
"sp"
]
| Return processor by processing a given sheet of a spreadsheet file.
Parameters
----------
fname : str
The name of the Excel file (typically .xlsx extension) to process
Returns
-------
sp : indra.sources.sofia.processor.SofiaProcessor
A SofiaProcessor object which has a list of extracted INDRA
Statements as its statements attribute. | [
"Return",
"processor",
"by",
"processing",
"a",
"given",
"sheet",
"of",
"a",
"spreadsheet",
"file",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/sofia/api.py#L9-L32 | train |
sorgerlab/indra | indra/sources/sofia/api.py | process_text | def process_text(text, out_file='sofia_output.json', auth=None):
"""Return processor by processing text given as a string.
Parameters
----------
text : str
A string containing the text to be processed with Sofia.
out_file : Optional[str]
The path to a file to save the reader's output into.
Default: sofia_output.json
auth : Optional[list]
A username/password pair for the Sofia web service. If not given,
the SOFIA_USERNAME and SOFIA_PASSWORD values are loaded from either
the INDRA config or the environment.
Returns
-------
sp : indra.sources.sofia.processor.SofiaProcessor
A SofiaProcessor object which has a list of extracted INDRA
Statements as its statements attribute. If the API did not process
the text, None is returned.
"""
text_json = {'text': text}
if not auth:
user, password = _get_sofia_auth()
else:
user, password = auth
if not user or not password:
raise ValueError('Could not use SOFIA web service since'
' authentication information is missing. Please'
' set SOFIA_USERNAME and SOFIA_PASSWORD in the'
' INDRA configuration file or as environmental'
' variables.')
json_response, status_code, process_status = \
_text_processing(text_json=text_json, user=user, password=password)
# Check response status
if process_status != 'Done' or status_code != 200:
return None
# Cache reading output
if out_file:
with open(out_file, 'w') as fh:
json.dump(json_response, fh, indent=1)
return process_json(json_response) | python | def process_text(text, out_file='sofia_output.json', auth=None):
"""Return processor by processing text given as a string.
Parameters
----------
text : str
A string containing the text to be processed with Sofia.
out_file : Optional[str]
The path to a file to save the reader's output into.
Default: sofia_output.json
auth : Optional[list]
A username/password pair for the Sofia web service. If not given,
the SOFIA_USERNAME and SOFIA_PASSWORD values are loaded from either
the INDRA config or the environment.
Returns
-------
sp : indra.sources.sofia.processor.SofiaProcessor
A SofiaProcessor object which has a list of extracted INDRA
Statements as its statements attribute. If the API did not process
the text, None is returned.
"""
text_json = {'text': text}
if not auth:
user, password = _get_sofia_auth()
else:
user, password = auth
if not user or not password:
raise ValueError('Could not use SOFIA web service since'
' authentication information is missing. Please'
' set SOFIA_USERNAME and SOFIA_PASSWORD in the'
' INDRA configuration file or as environmental'
' variables.')
json_response, status_code, process_status = \
_text_processing(text_json=text_json, user=user, password=password)
# Check response status
if process_status != 'Done' or status_code != 200:
return None
# Cache reading output
if out_file:
with open(out_file, 'w') as fh:
json.dump(json_response, fh, indent=1)
return process_json(json_response) | [
"def",
"process_text",
"(",
"text",
",",
"out_file",
"=",
"'sofia_output.json'",
",",
"auth",
"=",
"None",
")",
":",
"text_json",
"=",
"{",
"'text'",
":",
"text",
"}",
"if",
"not",
"auth",
":",
"user",
",",
"password",
"=",
"_get_sofia_auth",
"(",
")",
"else",
":",
"user",
",",
"password",
"=",
"auth",
"if",
"not",
"user",
"or",
"not",
"password",
":",
"raise",
"ValueError",
"(",
"'Could not use SOFIA web service since'",
"' authentication information is missing. Please'",
"' set SOFIA_USERNAME and SOFIA_PASSWORD in the'",
"' INDRA configuration file or as environmental'",
"' variables.'",
")",
"json_response",
",",
"status_code",
",",
"process_status",
"=",
"_text_processing",
"(",
"text_json",
"=",
"text_json",
",",
"user",
"=",
"user",
",",
"password",
"=",
"password",
")",
"# Check response status",
"if",
"process_status",
"!=",
"'Done'",
"or",
"status_code",
"!=",
"200",
":",
"return",
"None",
"# Cache reading output",
"if",
"out_file",
":",
"with",
"open",
"(",
"out_file",
",",
"'w'",
")",
"as",
"fh",
":",
"json",
".",
"dump",
"(",
"json_response",
",",
"fh",
",",
"indent",
"=",
"1",
")",
"return",
"process_json",
"(",
"json_response",
")"
]
| Return processor by processing text given as a string.
Parameters
----------
text : str
A string containing the text to be processed with Sofia.
out_file : Optional[str]
The path to a file to save the reader's output into.
Default: sofia_output.json
auth : Optional[list]
A username/password pair for the Sofia web service. If not given,
the SOFIA_USERNAME and SOFIA_PASSWORD values are loaded from either
the INDRA config or the environment.
Returns
-------
sp : indra.sources.sofia.processor.SofiaProcessor
A SofiaProcessor object which has a list of extracted INDRA
Statements as its statements attribute. If the API did not process
the text, None is returned. | [
"Return",
"processor",
"by",
"processing",
"text",
"given",
"as",
"a",
"string",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/sofia/api.py#L35-L80 | train |
sorgerlab/indra | indra/sources/ndex_cx/processor.py | _get_dict_from_list | def _get_dict_from_list(dict_key, list_of_dicts):
"""Retrieve a specific dict from a list of dicts.
Parameters
----------
dict_key : str
The (single) key of the dict to be retrieved from the list.
list_of_dicts : list
The list of dicts to search for the specific dict.
Returns
-------
dict value
The value associated with the dict_key (e.g., a list of nodes or
edges).
"""
the_dict = [cur_dict for cur_dict in list_of_dicts
if cur_dict.get(dict_key)]
if not the_dict:
raise ValueError('Could not find a dict with key %s' % dict_key)
return the_dict[0][dict_key] | python | def _get_dict_from_list(dict_key, list_of_dicts):
"""Retrieve a specific dict from a list of dicts.
Parameters
----------
dict_key : str
The (single) key of the dict to be retrieved from the list.
list_of_dicts : list
The list of dicts to search for the specific dict.
Returns
-------
dict value
The value associated with the dict_key (e.g., a list of nodes or
edges).
"""
the_dict = [cur_dict for cur_dict in list_of_dicts
if cur_dict.get(dict_key)]
if not the_dict:
raise ValueError('Could not find a dict with key %s' % dict_key)
return the_dict[0][dict_key] | [
"def",
"_get_dict_from_list",
"(",
"dict_key",
",",
"list_of_dicts",
")",
":",
"the_dict",
"=",
"[",
"cur_dict",
"for",
"cur_dict",
"in",
"list_of_dicts",
"if",
"cur_dict",
".",
"get",
"(",
"dict_key",
")",
"]",
"if",
"not",
"the_dict",
":",
"raise",
"ValueError",
"(",
"'Could not find a dict with key %s'",
"%",
"dict_key",
")",
"return",
"the_dict",
"[",
"0",
"]",
"[",
"dict_key",
"]"
]
| Retrieve a specific dict from a list of dicts.
Parameters
----------
dict_key : str
The (single) key of the dict to be retrieved from the list.
list_of_dicts : list
The list of dicts to search for the specific dict.
Returns
-------
dict value
The value associated with the dict_key (e.g., a list of nodes or
edges). | [
"Retrieve",
"a",
"specific",
"dict",
"from",
"a",
"list",
"of",
"dicts",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/ndex_cx/processor.py#L22-L42 | train |
sorgerlab/indra | indra/sources/ndex_cx/processor.py | NdexCxProcessor._initialize_node_agents | def _initialize_node_agents(self):
"""Initialize internal dicts containing node information."""
nodes = _get_dict_from_list('nodes', self.cx)
invalid_genes = []
for node in nodes:
id = node['@id']
cx_db_refs = self.get_aliases(node)
up_id = cx_db_refs.get('UP')
if up_id:
gene_name = uniprot_client.get_gene_name(up_id)
hgnc_id = hgnc_client.get_hgnc_id(gene_name)
db_refs = {'UP': up_id, 'HGNC': hgnc_id, 'TEXT': gene_name}
agent = Agent(gene_name, db_refs=db_refs)
self._node_names[id] = gene_name
self._node_agents[id] = agent
continue
else:
node_name = node['n']
self._node_names[id] = node_name
hgnc_id = hgnc_client.get_hgnc_id(node_name)
db_refs = {'TEXT': node_name}
if not hgnc_id:
if not self.require_grounding:
self._node_agents[id] = \
Agent(node_name, db_refs=db_refs)
invalid_genes.append(node_name)
else:
db_refs.update({'HGNC': hgnc_id})
up_id = hgnc_client.get_uniprot_id(hgnc_id)
# It's possible that a valid HGNC ID will not have a
# Uniprot ID, as in the case of HOTAIR (HOX transcript
# antisense RNA, HGNC:33510)
if up_id:
db_refs.update({'UP': up_id})
self._node_agents[id] = Agent(node_name, db_refs=db_refs)
if invalid_genes:
verb = 'Skipped' if self.require_grounding else 'Included'
logger.info('%s invalid gene symbols: %s' %
(verb, ', '.join(invalid_genes))) | python | def _initialize_node_agents(self):
"""Initialize internal dicts containing node information."""
nodes = _get_dict_from_list('nodes', self.cx)
invalid_genes = []
for node in nodes:
id = node['@id']
cx_db_refs = self.get_aliases(node)
up_id = cx_db_refs.get('UP')
if up_id:
gene_name = uniprot_client.get_gene_name(up_id)
hgnc_id = hgnc_client.get_hgnc_id(gene_name)
db_refs = {'UP': up_id, 'HGNC': hgnc_id, 'TEXT': gene_name}
agent = Agent(gene_name, db_refs=db_refs)
self._node_names[id] = gene_name
self._node_agents[id] = agent
continue
else:
node_name = node['n']
self._node_names[id] = node_name
hgnc_id = hgnc_client.get_hgnc_id(node_name)
db_refs = {'TEXT': node_name}
if not hgnc_id:
if not self.require_grounding:
self._node_agents[id] = \
Agent(node_name, db_refs=db_refs)
invalid_genes.append(node_name)
else:
db_refs.update({'HGNC': hgnc_id})
up_id = hgnc_client.get_uniprot_id(hgnc_id)
# It's possible that a valid HGNC ID will not have a
# Uniprot ID, as in the case of HOTAIR (HOX transcript
# antisense RNA, HGNC:33510)
if up_id:
db_refs.update({'UP': up_id})
self._node_agents[id] = Agent(node_name, db_refs=db_refs)
if invalid_genes:
verb = 'Skipped' if self.require_grounding else 'Included'
logger.info('%s invalid gene symbols: %s' %
(verb, ', '.join(invalid_genes))) | [
"def",
"_initialize_node_agents",
"(",
"self",
")",
":",
"nodes",
"=",
"_get_dict_from_list",
"(",
"'nodes'",
",",
"self",
".",
"cx",
")",
"invalid_genes",
"=",
"[",
"]",
"for",
"node",
"in",
"nodes",
":",
"id",
"=",
"node",
"[",
"'@id'",
"]",
"cx_db_refs",
"=",
"self",
".",
"get_aliases",
"(",
"node",
")",
"up_id",
"=",
"cx_db_refs",
".",
"get",
"(",
"'UP'",
")",
"if",
"up_id",
":",
"gene_name",
"=",
"uniprot_client",
".",
"get_gene_name",
"(",
"up_id",
")",
"hgnc_id",
"=",
"hgnc_client",
".",
"get_hgnc_id",
"(",
"gene_name",
")",
"db_refs",
"=",
"{",
"'UP'",
":",
"up_id",
",",
"'HGNC'",
":",
"hgnc_id",
",",
"'TEXT'",
":",
"gene_name",
"}",
"agent",
"=",
"Agent",
"(",
"gene_name",
",",
"db_refs",
"=",
"db_refs",
")",
"self",
".",
"_node_names",
"[",
"id",
"]",
"=",
"gene_name",
"self",
".",
"_node_agents",
"[",
"id",
"]",
"=",
"agent",
"continue",
"else",
":",
"node_name",
"=",
"node",
"[",
"'n'",
"]",
"self",
".",
"_node_names",
"[",
"id",
"]",
"=",
"node_name",
"hgnc_id",
"=",
"hgnc_client",
".",
"get_hgnc_id",
"(",
"node_name",
")",
"db_refs",
"=",
"{",
"'TEXT'",
":",
"node_name",
"}",
"if",
"not",
"hgnc_id",
":",
"if",
"not",
"self",
".",
"require_grounding",
":",
"self",
".",
"_node_agents",
"[",
"id",
"]",
"=",
"Agent",
"(",
"node_name",
",",
"db_refs",
"=",
"db_refs",
")",
"invalid_genes",
".",
"append",
"(",
"node_name",
")",
"else",
":",
"db_refs",
".",
"update",
"(",
"{",
"'HGNC'",
":",
"hgnc_id",
"}",
")",
"up_id",
"=",
"hgnc_client",
".",
"get_uniprot_id",
"(",
"hgnc_id",
")",
"# It's possible that a valid HGNC ID will not have a",
"# Uniprot ID, as in the case of HOTAIR (HOX transcript",
"# antisense RNA, HGNC:33510)",
"if",
"up_id",
":",
"db_refs",
".",
"update",
"(",
"{",
"'UP'",
":",
"up_id",
"}",
")",
"self",
".",
"_node_agents",
"[",
"id",
"]",
"=",
"Agent",
"(",
"node_name",
",",
"db_refs",
"=",
"db_refs",
")",
"if",
"invalid_genes",
":",
"verb",
"=",
"'Skipped'",
"if",
"self",
".",
"require_grounding",
"else",
"'Included'",
"logger",
".",
"info",
"(",
"'%s invalid gene symbols: %s'",
"%",
"(",
"verb",
",",
"', '",
".",
"join",
"(",
"invalid_genes",
")",
")",
")"
]
| Initialize internal dicts containing node information. | [
"Initialize",
"internal",
"dicts",
"containing",
"node",
"information",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/ndex_cx/processor.py#L78-L116 | train |
sorgerlab/indra | indra/sources/ndex_cx/processor.py | NdexCxProcessor.get_pmids | def get_pmids(self):
"""Get list of all PMIDs associated with edges in the network."""
pmids = []
for ea in self._edge_attributes.values():
edge_pmids = ea.get('pmids')
if edge_pmids:
pmids += edge_pmids
return list(set(pmids)) | python | def get_pmids(self):
"""Get list of all PMIDs associated with edges in the network."""
pmids = []
for ea in self._edge_attributes.values():
edge_pmids = ea.get('pmids')
if edge_pmids:
pmids += edge_pmids
return list(set(pmids)) | [
"def",
"get_pmids",
"(",
"self",
")",
":",
"pmids",
"=",
"[",
"]",
"for",
"ea",
"in",
"self",
".",
"_edge_attributes",
".",
"values",
"(",
")",
":",
"edge_pmids",
"=",
"ea",
".",
"get",
"(",
"'pmids'",
")",
"if",
"edge_pmids",
":",
"pmids",
"+=",
"edge_pmids",
"return",
"list",
"(",
"set",
"(",
"pmids",
")",
")"
]
| Get list of all PMIDs associated with edges in the network. | [
"Get",
"list",
"of",
"all",
"PMIDs",
"associated",
"with",
"edges",
"in",
"the",
"network",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/ndex_cx/processor.py#L166-L173 | train |
sorgerlab/indra | indra/sources/ndex_cx/processor.py | NdexCxProcessor.get_statements | def get_statements(self):
"""Convert network edges into Statements.
Returns
-------
list of Statements
Converted INDRA Statements.
"""
edges = _get_dict_from_list('edges', self.cx)
for edge in edges:
edge_type = edge.get('i')
if not edge_type:
continue
stmt_type = _stmt_map.get(edge_type)
if stmt_type:
id = edge['@id']
source_agent = self._node_agents.get(edge['s'])
target_agent = self._node_agents.get(edge['t'])
if not source_agent or not target_agent:
logger.info("Skipping edge %s->%s: %s" %
(self._node_names[edge['s']],
self._node_names[edge['t']], edge))
continue
ev = self._create_evidence(id)
if stmt_type == Complex:
stmt = stmt_type([source_agent, target_agent], evidence=ev)
else:
stmt = stmt_type(source_agent, target_agent, evidence=ev)
self.statements.append(stmt)
return self.statements | python | def get_statements(self):
"""Convert network edges into Statements.
Returns
-------
list of Statements
Converted INDRA Statements.
"""
edges = _get_dict_from_list('edges', self.cx)
for edge in edges:
edge_type = edge.get('i')
if not edge_type:
continue
stmt_type = _stmt_map.get(edge_type)
if stmt_type:
id = edge['@id']
source_agent = self._node_agents.get(edge['s'])
target_agent = self._node_agents.get(edge['t'])
if not source_agent or not target_agent:
logger.info("Skipping edge %s->%s: %s" %
(self._node_names[edge['s']],
self._node_names[edge['t']], edge))
continue
ev = self._create_evidence(id)
if stmt_type == Complex:
stmt = stmt_type([source_agent, target_agent], evidence=ev)
else:
stmt = stmt_type(source_agent, target_agent, evidence=ev)
self.statements.append(stmt)
return self.statements | [
"def",
"get_statements",
"(",
"self",
")",
":",
"edges",
"=",
"_get_dict_from_list",
"(",
"'edges'",
",",
"self",
".",
"cx",
")",
"for",
"edge",
"in",
"edges",
":",
"edge_type",
"=",
"edge",
".",
"get",
"(",
"'i'",
")",
"if",
"not",
"edge_type",
":",
"continue",
"stmt_type",
"=",
"_stmt_map",
".",
"get",
"(",
"edge_type",
")",
"if",
"stmt_type",
":",
"id",
"=",
"edge",
"[",
"'@id'",
"]",
"source_agent",
"=",
"self",
".",
"_node_agents",
".",
"get",
"(",
"edge",
"[",
"'s'",
"]",
")",
"target_agent",
"=",
"self",
".",
"_node_agents",
".",
"get",
"(",
"edge",
"[",
"'t'",
"]",
")",
"if",
"not",
"source_agent",
"or",
"not",
"target_agent",
":",
"logger",
".",
"info",
"(",
"\"Skipping edge %s->%s: %s\"",
"%",
"(",
"self",
".",
"_node_names",
"[",
"edge",
"[",
"'s'",
"]",
"]",
",",
"self",
".",
"_node_names",
"[",
"edge",
"[",
"'t'",
"]",
"]",
",",
"edge",
")",
")",
"continue",
"ev",
"=",
"self",
".",
"_create_evidence",
"(",
"id",
")",
"if",
"stmt_type",
"==",
"Complex",
":",
"stmt",
"=",
"stmt_type",
"(",
"[",
"source_agent",
",",
"target_agent",
"]",
",",
"evidence",
"=",
"ev",
")",
"else",
":",
"stmt",
"=",
"stmt_type",
"(",
"source_agent",
",",
"target_agent",
",",
"evidence",
"=",
"ev",
")",
"self",
".",
"statements",
".",
"append",
"(",
"stmt",
")",
"return",
"self",
".",
"statements"
]
| Convert network edges into Statements.
Returns
-------
list of Statements
Converted INDRA Statements. | [
"Convert",
"network",
"edges",
"into",
"Statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/ndex_cx/processor.py#L175-L204 | train |
sorgerlab/indra | indra/sources/tees/processor.py | TEESProcessor.node_has_edge_with_label | def node_has_edge_with_label(self, node_name, edge_label):
"""Looks for an edge from node_name to some other node with the specified
label. Returns the node to which this edge points if it exists, or None
if it doesn't.
Parameters
----------
G :
The graph object
node_name :
Node that the edge starts at
edge_label :
The text in the relation property of the edge
"""
G = self.G
for edge in G.edges(node_name):
to = edge[1]
relation_name = G.edges[node_name, to]['relation']
if relation_name == edge_label:
return to
return None | python | def node_has_edge_with_label(self, node_name, edge_label):
"""Looks for an edge from node_name to some other node with the specified
label. Returns the node to which this edge points if it exists, or None
if it doesn't.
Parameters
----------
G :
The graph object
node_name :
Node that the edge starts at
edge_label :
The text in the relation property of the edge
"""
G = self.G
for edge in G.edges(node_name):
to = edge[1]
relation_name = G.edges[node_name, to]['relation']
if relation_name == edge_label:
return to
return None | [
"def",
"node_has_edge_with_label",
"(",
"self",
",",
"node_name",
",",
"edge_label",
")",
":",
"G",
"=",
"self",
".",
"G",
"for",
"edge",
"in",
"G",
".",
"edges",
"(",
"node_name",
")",
":",
"to",
"=",
"edge",
"[",
"1",
"]",
"relation_name",
"=",
"G",
".",
"edges",
"[",
"node_name",
",",
"to",
"]",
"[",
"'relation'",
"]",
"if",
"relation_name",
"==",
"edge_label",
":",
"return",
"to",
"return",
"None"
]
| Looks for an edge from node_name to some other node with the specified
label. Returns the node to which this edge points if it exists, or None
if it doesn't.
Parameters
----------
G :
The graph object
node_name :
Node that the edge starts at
edge_label :
The text in the relation property of the edge | [
"Looks",
"for",
"an",
"edge",
"from",
"node_name",
"to",
"some",
"other",
"node",
"with",
"the",
"specified",
"label",
".",
"Returns",
"the",
"node",
"to",
"which",
"this",
"edge",
"points",
"if",
"it",
"exists",
"or",
"None",
"if",
"it",
"doesn",
"t",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/tees/processor.py#L83-L104 | train |
sorgerlab/indra | indra/sources/tees/processor.py | TEESProcessor.general_node_label | def general_node_label(self, node):
"""Used for debugging - gives a short text description of a
graph node."""
G = self.G
if G.node[node]['is_event']:
return 'event type=' + G.node[node]['type']
else:
return 'entity text=' + G.node[node]['text'] | python | def general_node_label(self, node):
"""Used for debugging - gives a short text description of a
graph node."""
G = self.G
if G.node[node]['is_event']:
return 'event type=' + G.node[node]['type']
else:
return 'entity text=' + G.node[node]['text'] | [
"def",
"general_node_label",
"(",
"self",
",",
"node",
")",
":",
"G",
"=",
"self",
".",
"G",
"if",
"G",
".",
"node",
"[",
"node",
"]",
"[",
"'is_event'",
"]",
":",
"return",
"'event type='",
"+",
"G",
".",
"node",
"[",
"node",
"]",
"[",
"'type'",
"]",
"else",
":",
"return",
"'entity text='",
"+",
"G",
".",
"node",
"[",
"node",
"]",
"[",
"'text'",
"]"
]
| Used for debugging - gives a short text description of a
graph node. | [
"Used",
"for",
"debugging",
"-",
"gives",
"a",
"short",
"text",
"description",
"of",
"a",
"graph",
"node",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/tees/processor.py#L106-L113 | train |
sorgerlab/indra | indra/sources/tees/processor.py | TEESProcessor.print_parent_and_children_info | def print_parent_and_children_info(self, node):
"""Used for debugging - prints a short description of a a node, its
children, its parents, and its parents' children."""
G = self.G
parents = G.predecessors(node)
children = G.successors(node)
print(general_node_label(G, node))
tabs = '\t'
for parent in parents:
relation = G.edges[parent, node]['relation']
print(tabs + 'Parent (%s): %s' % (relation,
general_node_label(G, parent)))
for cop in G.successors(parent):
if cop != node:
relation = G.edges[parent, cop]['relation']
print(tabs + 'Child of parent (%s): %s' % (relation,
general_node_label(G, cop)))
for child in children:
relation = G.edges[node, child]['relation']
print(tabs + 'Child (%s): (%s)' % (relation,
general_node_label(G, child))) | python | def print_parent_and_children_info(self, node):
"""Used for debugging - prints a short description of a a node, its
children, its parents, and its parents' children."""
G = self.G
parents = G.predecessors(node)
children = G.successors(node)
print(general_node_label(G, node))
tabs = '\t'
for parent in parents:
relation = G.edges[parent, node]['relation']
print(tabs + 'Parent (%s): %s' % (relation,
general_node_label(G, parent)))
for cop in G.successors(parent):
if cop != node:
relation = G.edges[parent, cop]['relation']
print(tabs + 'Child of parent (%s): %s' % (relation,
general_node_label(G, cop)))
for child in children:
relation = G.edges[node, child]['relation']
print(tabs + 'Child (%s): (%s)' % (relation,
general_node_label(G, child))) | [
"def",
"print_parent_and_children_info",
"(",
"self",
",",
"node",
")",
":",
"G",
"=",
"self",
".",
"G",
"parents",
"=",
"G",
".",
"predecessors",
"(",
"node",
")",
"children",
"=",
"G",
".",
"successors",
"(",
"node",
")",
"print",
"(",
"general_node_label",
"(",
"G",
",",
"node",
")",
")",
"tabs",
"=",
"'\\t'",
"for",
"parent",
"in",
"parents",
":",
"relation",
"=",
"G",
".",
"edges",
"[",
"parent",
",",
"node",
"]",
"[",
"'relation'",
"]",
"print",
"(",
"tabs",
"+",
"'Parent (%s): %s'",
"%",
"(",
"relation",
",",
"general_node_label",
"(",
"G",
",",
"parent",
")",
")",
")",
"for",
"cop",
"in",
"G",
".",
"successors",
"(",
"parent",
")",
":",
"if",
"cop",
"!=",
"node",
":",
"relation",
"=",
"G",
".",
"edges",
"[",
"parent",
",",
"cop",
"]",
"[",
"'relation'",
"]",
"print",
"(",
"tabs",
"+",
"'Child of parent (%s): %s'",
"%",
"(",
"relation",
",",
"general_node_label",
"(",
"G",
",",
"cop",
")",
")",
")",
"for",
"child",
"in",
"children",
":",
"relation",
"=",
"G",
".",
"edges",
"[",
"node",
",",
"child",
"]",
"[",
"'relation'",
"]",
"print",
"(",
"tabs",
"+",
"'Child (%s): (%s)'",
"%",
"(",
"relation",
",",
"general_node_label",
"(",
"G",
",",
"child",
")",
")",
")"
]
| Used for debugging - prints a short description of a a node, its
children, its parents, and its parents' children. | [
"Used",
"for",
"debugging",
"-",
"prints",
"a",
"short",
"description",
"of",
"a",
"a",
"node",
"its",
"children",
"its",
"parents",
"and",
"its",
"parents",
"children",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/tees/processor.py#L115-L136 | train |
sorgerlab/indra | indra/sources/tees/processor.py | TEESProcessor.find_event_with_outgoing_edges | def find_event_with_outgoing_edges(self, event_name, desired_relations):
"""Gets a list of event nodes with the specified event_name and
outgoing edges annotated with each of the specified relations.
Parameters
----------
event_name : str
Look for event nodes with this name
desired_relations : list[str]
Look for event nodes with outgoing edges annotated with each of
these relations
Returns
-------
event_nodes : list[str]
Event nodes that fit the desired criteria
"""
G = self.G
desired_relations = set(desired_relations)
desired_event_nodes = []
for node in G.node.keys():
if G.node[node]['is_event'] and G.node[node]['type'] == event_name:
has_relations = [G.edges[node, edge[1]]['relation'] for
edge in G.edges(node)]
has_relations = set(has_relations)
# Did the outgoing edges from this node have all of the
# desired relations?
if desired_relations.issubset(has_relations):
desired_event_nodes.append(node)
return desired_event_nodes | python | def find_event_with_outgoing_edges(self, event_name, desired_relations):
"""Gets a list of event nodes with the specified event_name and
outgoing edges annotated with each of the specified relations.
Parameters
----------
event_name : str
Look for event nodes with this name
desired_relations : list[str]
Look for event nodes with outgoing edges annotated with each of
these relations
Returns
-------
event_nodes : list[str]
Event nodes that fit the desired criteria
"""
G = self.G
desired_relations = set(desired_relations)
desired_event_nodes = []
for node in G.node.keys():
if G.node[node]['is_event'] and G.node[node]['type'] == event_name:
has_relations = [G.edges[node, edge[1]]['relation'] for
edge in G.edges(node)]
has_relations = set(has_relations)
# Did the outgoing edges from this node have all of the
# desired relations?
if desired_relations.issubset(has_relations):
desired_event_nodes.append(node)
return desired_event_nodes | [
"def",
"find_event_with_outgoing_edges",
"(",
"self",
",",
"event_name",
",",
"desired_relations",
")",
":",
"G",
"=",
"self",
".",
"G",
"desired_relations",
"=",
"set",
"(",
"desired_relations",
")",
"desired_event_nodes",
"=",
"[",
"]",
"for",
"node",
"in",
"G",
".",
"node",
".",
"keys",
"(",
")",
":",
"if",
"G",
".",
"node",
"[",
"node",
"]",
"[",
"'is_event'",
"]",
"and",
"G",
".",
"node",
"[",
"node",
"]",
"[",
"'type'",
"]",
"==",
"event_name",
":",
"has_relations",
"=",
"[",
"G",
".",
"edges",
"[",
"node",
",",
"edge",
"[",
"1",
"]",
"]",
"[",
"'relation'",
"]",
"for",
"edge",
"in",
"G",
".",
"edges",
"(",
"node",
")",
"]",
"has_relations",
"=",
"set",
"(",
"has_relations",
")",
"# Did the outgoing edges from this node have all of the",
"# desired relations?",
"if",
"desired_relations",
".",
"issubset",
"(",
"has_relations",
")",
":",
"desired_event_nodes",
".",
"append",
"(",
"node",
")",
"return",
"desired_event_nodes"
]
| Gets a list of event nodes with the specified event_name and
outgoing edges annotated with each of the specified relations.
Parameters
----------
event_name : str
Look for event nodes with this name
desired_relations : list[str]
Look for event nodes with outgoing edges annotated with each of
these relations
Returns
-------
event_nodes : list[str]
Event nodes that fit the desired criteria | [
"Gets",
"a",
"list",
"of",
"event",
"nodes",
"with",
"the",
"specified",
"event_name",
"and",
"outgoing",
"edges",
"annotated",
"with",
"each",
"of",
"the",
"specified",
"relations",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/tees/processor.py#L154-L186 | train |
sorgerlab/indra | indra/sources/tees/processor.py | TEESProcessor.get_related_node | def get_related_node(self, node, relation):
"""Looks for an edge from node to some other node, such that the edge
is annotated with the given relation. If there exists such an edge,
returns the name of the node it points to. Otherwise, returns None."""
G = self.G
for edge in G.edges(node):
to = edge[1]
to_relation = G.edges[node, to]['relation']
if to_relation == relation:
return to
return None | python | def get_related_node(self, node, relation):
"""Looks for an edge from node to some other node, such that the edge
is annotated with the given relation. If there exists such an edge,
returns the name of the node it points to. Otherwise, returns None."""
G = self.G
for edge in G.edges(node):
to = edge[1]
to_relation = G.edges[node, to]['relation']
if to_relation == relation:
return to
return None | [
"def",
"get_related_node",
"(",
"self",
",",
"node",
",",
"relation",
")",
":",
"G",
"=",
"self",
".",
"G",
"for",
"edge",
"in",
"G",
".",
"edges",
"(",
"node",
")",
":",
"to",
"=",
"edge",
"[",
"1",
"]",
"to_relation",
"=",
"G",
".",
"edges",
"[",
"node",
",",
"to",
"]",
"[",
"'relation'",
"]",
"if",
"to_relation",
"==",
"relation",
":",
"return",
"to",
"return",
"None"
]
| Looks for an edge from node to some other node, such that the edge
is annotated with the given relation. If there exists such an edge,
returns the name of the node it points to. Otherwise, returns None. | [
"Looks",
"for",
"an",
"edge",
"from",
"node",
"to",
"some",
"other",
"node",
"such",
"that",
"the",
"edge",
"is",
"annotated",
"with",
"the",
"given",
"relation",
".",
"If",
"there",
"exists",
"such",
"an",
"edge",
"returns",
"the",
"name",
"of",
"the",
"node",
"it",
"points",
"to",
".",
"Otherwise",
"returns",
"None",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/tees/processor.py#L188-L199 | train |
sorgerlab/indra | indra/sources/tees/processor.py | TEESProcessor.get_entity_text_for_relation | def get_entity_text_for_relation(self, node, relation):
"""Looks for an edge from node to some other node, such that the edge is
annotated with the given relation. If there exists such an edge, and
the node at the other edge is an entity, return that entity's text.
Otherwise, returns None."""
G = self.G
related_node = self.get_related_node(node, relation)
if related_node is not None:
if not G.node[related_node]['is_event']:
return G.node[related_node]['text']
else:
return None
else:
return None | python | def get_entity_text_for_relation(self, node, relation):
"""Looks for an edge from node to some other node, such that the edge is
annotated with the given relation. If there exists such an edge, and
the node at the other edge is an entity, return that entity's text.
Otherwise, returns None."""
G = self.G
related_node = self.get_related_node(node, relation)
if related_node is not None:
if not G.node[related_node]['is_event']:
return G.node[related_node]['text']
else:
return None
else:
return None | [
"def",
"get_entity_text_for_relation",
"(",
"self",
",",
"node",
",",
"relation",
")",
":",
"G",
"=",
"self",
".",
"G",
"related_node",
"=",
"self",
".",
"get_related_node",
"(",
"node",
",",
"relation",
")",
"if",
"related_node",
"is",
"not",
"None",
":",
"if",
"not",
"G",
".",
"node",
"[",
"related_node",
"]",
"[",
"'is_event'",
"]",
":",
"return",
"G",
".",
"node",
"[",
"related_node",
"]",
"[",
"'text'",
"]",
"else",
":",
"return",
"None",
"else",
":",
"return",
"None"
]
| Looks for an edge from node to some other node, such that the edge is
annotated with the given relation. If there exists such an edge, and
the node at the other edge is an entity, return that entity's text.
Otherwise, returns None. | [
"Looks",
"for",
"an",
"edge",
"from",
"node",
"to",
"some",
"other",
"node",
"such",
"that",
"the",
"edge",
"is",
"annotated",
"with",
"the",
"given",
"relation",
".",
"If",
"there",
"exists",
"such",
"an",
"edge",
"and",
"the",
"node",
"at",
"the",
"other",
"edge",
"is",
"an",
"entity",
"return",
"that",
"entity",
"s",
"text",
".",
"Otherwise",
"returns",
"None",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/tees/processor.py#L201-L215 | train |
sorgerlab/indra | indra/sources/tees/processor.py | TEESProcessor.process_increase_expression_amount | def process_increase_expression_amount(self):
"""Looks for Positive_Regulation events with a specified Cause
and a Gene_Expression theme, and processes them into INDRA statements.
"""
statements = []
pwcs = self.find_event_parent_with_event_child(
'Positive_regulation', 'Gene_expression')
for pair in pwcs:
pos_reg = pair[0]
expression = pair[1]
cause = self.get_entity_text_for_relation(pos_reg, 'Cause')
target = self.get_entity_text_for_relation(expression, 'Theme')
if cause is not None and target is not None:
theme_node = self.get_related_node(expression, 'Theme')
assert(theme_node is not None)
evidence = self.node_to_evidence(theme_node, is_direct=False)
statements.append(IncreaseAmount(s2a(cause), s2a(target),
evidence=evidence))
return statements | python | def process_increase_expression_amount(self):
"""Looks for Positive_Regulation events with a specified Cause
and a Gene_Expression theme, and processes them into INDRA statements.
"""
statements = []
pwcs = self.find_event_parent_with_event_child(
'Positive_regulation', 'Gene_expression')
for pair in pwcs:
pos_reg = pair[0]
expression = pair[1]
cause = self.get_entity_text_for_relation(pos_reg, 'Cause')
target = self.get_entity_text_for_relation(expression, 'Theme')
if cause is not None and target is not None:
theme_node = self.get_related_node(expression, 'Theme')
assert(theme_node is not None)
evidence = self.node_to_evidence(theme_node, is_direct=False)
statements.append(IncreaseAmount(s2a(cause), s2a(target),
evidence=evidence))
return statements | [
"def",
"process_increase_expression_amount",
"(",
"self",
")",
":",
"statements",
"=",
"[",
"]",
"pwcs",
"=",
"self",
".",
"find_event_parent_with_event_child",
"(",
"'Positive_regulation'",
",",
"'Gene_expression'",
")",
"for",
"pair",
"in",
"pwcs",
":",
"pos_reg",
"=",
"pair",
"[",
"0",
"]",
"expression",
"=",
"pair",
"[",
"1",
"]",
"cause",
"=",
"self",
".",
"get_entity_text_for_relation",
"(",
"pos_reg",
",",
"'Cause'",
")",
"target",
"=",
"self",
".",
"get_entity_text_for_relation",
"(",
"expression",
",",
"'Theme'",
")",
"if",
"cause",
"is",
"not",
"None",
"and",
"target",
"is",
"not",
"None",
":",
"theme_node",
"=",
"self",
".",
"get_related_node",
"(",
"expression",
",",
"'Theme'",
")",
"assert",
"(",
"theme_node",
"is",
"not",
"None",
")",
"evidence",
"=",
"self",
".",
"node_to_evidence",
"(",
"theme_node",
",",
"is_direct",
"=",
"False",
")",
"statements",
".",
"append",
"(",
"IncreaseAmount",
"(",
"s2a",
"(",
"cause",
")",
",",
"s2a",
"(",
"target",
")",
",",
"evidence",
"=",
"evidence",
")",
")",
"return",
"statements"
]
| Looks for Positive_Regulation events with a specified Cause
and a Gene_Expression theme, and processes them into INDRA statements. | [
"Looks",
"for",
"Positive_Regulation",
"events",
"with",
"a",
"specified",
"Cause",
"and",
"a",
"Gene_Expression",
"theme",
"and",
"processes",
"them",
"into",
"INDRA",
"statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/tees/processor.py#L217-L239 | train |
sorgerlab/indra | indra/sources/tees/processor.py | TEESProcessor.process_phosphorylation_statements | def process_phosphorylation_statements(self):
"""Looks for Phosphorylation events in the graph and extracts them into
INDRA statements.
In particular, looks for a Positive_regulation event node with a child
Phosphorylation event node.
If Positive_regulation has an outgoing Cause edge, that's the subject
If Phosphorylation has an outgoing Theme edge, that's the object
If Phosphorylation has an outgoing Site edge, that's the site
"""
G = self.G
statements = []
pwcs = self.find_event_parent_with_event_child('Positive_regulation',
'Phosphorylation')
for pair in pwcs:
(pos_reg, phos) = pair
cause = self.get_entity_text_for_relation(pos_reg, 'Cause')
theme = self.get_entity_text_for_relation(phos, 'Theme')
print('Cause:', cause, 'Theme:', theme)
# If the trigger word is dephosphorylate or similar, then we
# extract a dephosphorylation statement
trigger_word = self.get_entity_text_for_relation(phos,
'Phosphorylation')
if 'dephos' in trigger_word:
deph = True
else:
deph = False
site = self.get_entity_text_for_relation(phos, 'Site')
theme_node = self.get_related_node(phos, 'Theme')
assert(theme_node is not None)
evidence = self.node_to_evidence(theme_node, is_direct=False)
if theme is not None:
if deph:
statements.append(Dephosphorylation(s2a(cause),
s2a(theme), site, evidence=evidence))
else:
statements.append(Phosphorylation(s2a(cause),
s2a(theme), site, evidence=evidence))
return statements | python | def process_phosphorylation_statements(self):
"""Looks for Phosphorylation events in the graph and extracts them into
INDRA statements.
In particular, looks for a Positive_regulation event node with a child
Phosphorylation event node.
If Positive_regulation has an outgoing Cause edge, that's the subject
If Phosphorylation has an outgoing Theme edge, that's the object
If Phosphorylation has an outgoing Site edge, that's the site
"""
G = self.G
statements = []
pwcs = self.find_event_parent_with_event_child('Positive_regulation',
'Phosphorylation')
for pair in pwcs:
(pos_reg, phos) = pair
cause = self.get_entity_text_for_relation(pos_reg, 'Cause')
theme = self.get_entity_text_for_relation(phos, 'Theme')
print('Cause:', cause, 'Theme:', theme)
# If the trigger word is dephosphorylate or similar, then we
# extract a dephosphorylation statement
trigger_word = self.get_entity_text_for_relation(phos,
'Phosphorylation')
if 'dephos' in trigger_word:
deph = True
else:
deph = False
site = self.get_entity_text_for_relation(phos, 'Site')
theme_node = self.get_related_node(phos, 'Theme')
assert(theme_node is not None)
evidence = self.node_to_evidence(theme_node, is_direct=False)
if theme is not None:
if deph:
statements.append(Dephosphorylation(s2a(cause),
s2a(theme), site, evidence=evidence))
else:
statements.append(Phosphorylation(s2a(cause),
s2a(theme), site, evidence=evidence))
return statements | [
"def",
"process_phosphorylation_statements",
"(",
"self",
")",
":",
"G",
"=",
"self",
".",
"G",
"statements",
"=",
"[",
"]",
"pwcs",
"=",
"self",
".",
"find_event_parent_with_event_child",
"(",
"'Positive_regulation'",
",",
"'Phosphorylation'",
")",
"for",
"pair",
"in",
"pwcs",
":",
"(",
"pos_reg",
",",
"phos",
")",
"=",
"pair",
"cause",
"=",
"self",
".",
"get_entity_text_for_relation",
"(",
"pos_reg",
",",
"'Cause'",
")",
"theme",
"=",
"self",
".",
"get_entity_text_for_relation",
"(",
"phos",
",",
"'Theme'",
")",
"print",
"(",
"'Cause:'",
",",
"cause",
",",
"'Theme:'",
",",
"theme",
")",
"# If the trigger word is dephosphorylate or similar, then we",
"# extract a dephosphorylation statement",
"trigger_word",
"=",
"self",
".",
"get_entity_text_for_relation",
"(",
"phos",
",",
"'Phosphorylation'",
")",
"if",
"'dephos'",
"in",
"trigger_word",
":",
"deph",
"=",
"True",
"else",
":",
"deph",
"=",
"False",
"site",
"=",
"self",
".",
"get_entity_text_for_relation",
"(",
"phos",
",",
"'Site'",
")",
"theme_node",
"=",
"self",
".",
"get_related_node",
"(",
"phos",
",",
"'Theme'",
")",
"assert",
"(",
"theme_node",
"is",
"not",
"None",
")",
"evidence",
"=",
"self",
".",
"node_to_evidence",
"(",
"theme_node",
",",
"is_direct",
"=",
"False",
")",
"if",
"theme",
"is",
"not",
"None",
":",
"if",
"deph",
":",
"statements",
".",
"append",
"(",
"Dephosphorylation",
"(",
"s2a",
"(",
"cause",
")",
",",
"s2a",
"(",
"theme",
")",
",",
"site",
",",
"evidence",
"=",
"evidence",
")",
")",
"else",
":",
"statements",
".",
"append",
"(",
"Phosphorylation",
"(",
"s2a",
"(",
"cause",
")",
",",
"s2a",
"(",
"theme",
")",
",",
"site",
",",
"evidence",
"=",
"evidence",
")",
")",
"return",
"statements"
]
| Looks for Phosphorylation events in the graph and extracts them into
INDRA statements.
In particular, looks for a Positive_regulation event node with a child
Phosphorylation event node.
If Positive_regulation has an outgoing Cause edge, that's the subject
If Phosphorylation has an outgoing Theme edge, that's the object
If Phosphorylation has an outgoing Site edge, that's the site | [
"Looks",
"for",
"Phosphorylation",
"events",
"in",
"the",
"graph",
"and",
"extracts",
"them",
"into",
"INDRA",
"statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/tees/processor.py#L265-L309 | train |
sorgerlab/indra | indra/sources/tees/processor.py | TEESProcessor.process_binding_statements | def process_binding_statements(self):
"""Looks for Binding events in the graph and extracts them into INDRA
statements.
In particular, looks for a Binding event node with outgoing edges
with relations Theme and Theme2 - the entities these edges point to
are the two constituents of the Complex INDRA statement.
"""
G = self.G
statements = []
binding_nodes = self.find_event_with_outgoing_edges('Binding',
['Theme',
'Theme2'])
for node in binding_nodes:
theme1 = self.get_entity_text_for_relation(node, 'Theme')
theme1_node = self.get_related_node(node, 'Theme')
theme2 = self.get_entity_text_for_relation(node, 'Theme2')
assert(theme1 is not None)
assert(theme2 is not None)
evidence = self.node_to_evidence(theme1_node, is_direct=True)
statements.append(Complex([s2a(theme1), s2a(theme2)],
evidence=evidence))
return statements | python | def process_binding_statements(self):
"""Looks for Binding events in the graph and extracts them into INDRA
statements.
In particular, looks for a Binding event node with outgoing edges
with relations Theme and Theme2 - the entities these edges point to
are the two constituents of the Complex INDRA statement.
"""
G = self.G
statements = []
binding_nodes = self.find_event_with_outgoing_edges('Binding',
['Theme',
'Theme2'])
for node in binding_nodes:
theme1 = self.get_entity_text_for_relation(node, 'Theme')
theme1_node = self.get_related_node(node, 'Theme')
theme2 = self.get_entity_text_for_relation(node, 'Theme2')
assert(theme1 is not None)
assert(theme2 is not None)
evidence = self.node_to_evidence(theme1_node, is_direct=True)
statements.append(Complex([s2a(theme1), s2a(theme2)],
evidence=evidence))
return statements | [
"def",
"process_binding_statements",
"(",
"self",
")",
":",
"G",
"=",
"self",
".",
"G",
"statements",
"=",
"[",
"]",
"binding_nodes",
"=",
"self",
".",
"find_event_with_outgoing_edges",
"(",
"'Binding'",
",",
"[",
"'Theme'",
",",
"'Theme2'",
"]",
")",
"for",
"node",
"in",
"binding_nodes",
":",
"theme1",
"=",
"self",
".",
"get_entity_text_for_relation",
"(",
"node",
",",
"'Theme'",
")",
"theme1_node",
"=",
"self",
".",
"get_related_node",
"(",
"node",
",",
"'Theme'",
")",
"theme2",
"=",
"self",
".",
"get_entity_text_for_relation",
"(",
"node",
",",
"'Theme2'",
")",
"assert",
"(",
"theme1",
"is",
"not",
"None",
")",
"assert",
"(",
"theme2",
"is",
"not",
"None",
")",
"evidence",
"=",
"self",
".",
"node_to_evidence",
"(",
"theme1_node",
",",
"is_direct",
"=",
"True",
")",
"statements",
".",
"append",
"(",
"Complex",
"(",
"[",
"s2a",
"(",
"theme1",
")",
",",
"s2a",
"(",
"theme2",
")",
"]",
",",
"evidence",
"=",
"evidence",
")",
")",
"return",
"statements"
]
| Looks for Binding events in the graph and extracts them into INDRA
statements.
In particular, looks for a Binding event node with outgoing edges
with relations Theme and Theme2 - the entities these edges point to
are the two constituents of the Complex INDRA statement. | [
"Looks",
"for",
"Binding",
"events",
"in",
"the",
"graph",
"and",
"extracts",
"them",
"into",
"INDRA",
"statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/tees/processor.py#L311-L338 | train |
sorgerlab/indra | indra/sources/tees/processor.py | TEESProcessor.node_to_evidence | def node_to_evidence(self, entity_node, is_direct):
"""Computes an evidence object for a statement.
We assume that the entire event happens within a single statement, and
get the text of the sentence by getting the text of the sentence
containing the provided node that corresponds to one of the entities
participanting in the event.
The Evidence's pmid is whatever was provided to the constructor
(perhaps None), and the annotations are the subgraph containing the
provided node, its ancestors, and its descendants.
"""
# We assume that the entire event is within a single sentence, and
# get this sentence by getting the sentence containing one of the
# entities
sentence_text = self.G.node[entity_node]['sentence_text']
# Make annotations object containing the fully connected subgraph
# containing these nodes
subgraph = self.connected_subgraph(entity_node)
edge_properties = {}
for edge in subgraph.edges():
edge_properties[edge] = subgraph.edges[edge]
annotations = {'node_properties': subgraph.node,
'edge_properties': edge_properties}
# Make evidence object
epistemics = dict()
evidence = Evidence(source_api='tees',
pmid=self.pmid,
text=sentence_text,
epistemics={'direct': is_direct},
annotations=annotations)
return evidence | python | def node_to_evidence(self, entity_node, is_direct):
"""Computes an evidence object for a statement.
We assume that the entire event happens within a single statement, and
get the text of the sentence by getting the text of the sentence
containing the provided node that corresponds to one of the entities
participanting in the event.
The Evidence's pmid is whatever was provided to the constructor
(perhaps None), and the annotations are the subgraph containing the
provided node, its ancestors, and its descendants.
"""
# We assume that the entire event is within a single sentence, and
# get this sentence by getting the sentence containing one of the
# entities
sentence_text = self.G.node[entity_node]['sentence_text']
# Make annotations object containing the fully connected subgraph
# containing these nodes
subgraph = self.connected_subgraph(entity_node)
edge_properties = {}
for edge in subgraph.edges():
edge_properties[edge] = subgraph.edges[edge]
annotations = {'node_properties': subgraph.node,
'edge_properties': edge_properties}
# Make evidence object
epistemics = dict()
evidence = Evidence(source_api='tees',
pmid=self.pmid,
text=sentence_text,
epistemics={'direct': is_direct},
annotations=annotations)
return evidence | [
"def",
"node_to_evidence",
"(",
"self",
",",
"entity_node",
",",
"is_direct",
")",
":",
"# We assume that the entire event is within a single sentence, and",
"# get this sentence by getting the sentence containing one of the",
"# entities",
"sentence_text",
"=",
"self",
".",
"G",
".",
"node",
"[",
"entity_node",
"]",
"[",
"'sentence_text'",
"]",
"# Make annotations object containing the fully connected subgraph",
"# containing these nodes",
"subgraph",
"=",
"self",
".",
"connected_subgraph",
"(",
"entity_node",
")",
"edge_properties",
"=",
"{",
"}",
"for",
"edge",
"in",
"subgraph",
".",
"edges",
"(",
")",
":",
"edge_properties",
"[",
"edge",
"]",
"=",
"subgraph",
".",
"edges",
"[",
"edge",
"]",
"annotations",
"=",
"{",
"'node_properties'",
":",
"subgraph",
".",
"node",
",",
"'edge_properties'",
":",
"edge_properties",
"}",
"# Make evidence object",
"epistemics",
"=",
"dict",
"(",
")",
"evidence",
"=",
"Evidence",
"(",
"source_api",
"=",
"'tees'",
",",
"pmid",
"=",
"self",
".",
"pmid",
",",
"text",
"=",
"sentence_text",
",",
"epistemics",
"=",
"{",
"'direct'",
":",
"is_direct",
"}",
",",
"annotations",
"=",
"annotations",
")",
"return",
"evidence"
]
| Computes an evidence object for a statement.
We assume that the entire event happens within a single statement, and
get the text of the sentence by getting the text of the sentence
containing the provided node that corresponds to one of the entities
participanting in the event.
The Evidence's pmid is whatever was provided to the constructor
(perhaps None), and the annotations are the subgraph containing the
provided node, its ancestors, and its descendants. | [
"Computes",
"an",
"evidence",
"object",
"for",
"a",
"statement",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/tees/processor.py#L340-L375 | train |
sorgerlab/indra | indra/sources/tees/processor.py | TEESProcessor.connected_subgraph | def connected_subgraph(self, node):
"""Returns the subgraph containing the given node, its ancestors, and
its descendants.
Parameters
----------
node : str
We want to create the subgraph containing this node.
Returns
-------
subgraph : networkx.DiGraph
The subgraph containing the specified node.
"""
G = self.G
subgraph_nodes = set()
subgraph_nodes.add(node)
subgraph_nodes.update(dag.ancestors(G, node))
subgraph_nodes.update(dag.descendants(G, node))
# Keep adding the ancesotrs and descendants on nodes of the graph
# until we can't do so any longer
graph_changed = True
while graph_changed:
initial_count = len(subgraph_nodes)
old_nodes = set(subgraph_nodes)
for n in old_nodes:
subgraph_nodes.update(dag.ancestors(G, n))
subgraph_nodes.update(dag.descendants(G, n))
current_count = len(subgraph_nodes)
graph_changed = current_count > initial_count
return G.subgraph(subgraph_nodes) | python | def connected_subgraph(self, node):
"""Returns the subgraph containing the given node, its ancestors, and
its descendants.
Parameters
----------
node : str
We want to create the subgraph containing this node.
Returns
-------
subgraph : networkx.DiGraph
The subgraph containing the specified node.
"""
G = self.G
subgraph_nodes = set()
subgraph_nodes.add(node)
subgraph_nodes.update(dag.ancestors(G, node))
subgraph_nodes.update(dag.descendants(G, node))
# Keep adding the ancesotrs and descendants on nodes of the graph
# until we can't do so any longer
graph_changed = True
while graph_changed:
initial_count = len(subgraph_nodes)
old_nodes = set(subgraph_nodes)
for n in old_nodes:
subgraph_nodes.update(dag.ancestors(G, n))
subgraph_nodes.update(dag.descendants(G, n))
current_count = len(subgraph_nodes)
graph_changed = current_count > initial_count
return G.subgraph(subgraph_nodes) | [
"def",
"connected_subgraph",
"(",
"self",
",",
"node",
")",
":",
"G",
"=",
"self",
".",
"G",
"subgraph_nodes",
"=",
"set",
"(",
")",
"subgraph_nodes",
".",
"add",
"(",
"node",
")",
"subgraph_nodes",
".",
"update",
"(",
"dag",
".",
"ancestors",
"(",
"G",
",",
"node",
")",
")",
"subgraph_nodes",
".",
"update",
"(",
"dag",
".",
"descendants",
"(",
"G",
",",
"node",
")",
")",
"# Keep adding the ancesotrs and descendants on nodes of the graph",
"# until we can't do so any longer",
"graph_changed",
"=",
"True",
"while",
"graph_changed",
":",
"initial_count",
"=",
"len",
"(",
"subgraph_nodes",
")",
"old_nodes",
"=",
"set",
"(",
"subgraph_nodes",
")",
"for",
"n",
"in",
"old_nodes",
":",
"subgraph_nodes",
".",
"update",
"(",
"dag",
".",
"ancestors",
"(",
"G",
",",
"n",
")",
")",
"subgraph_nodes",
".",
"update",
"(",
"dag",
".",
"descendants",
"(",
"G",
",",
"n",
")",
")",
"current_count",
"=",
"len",
"(",
"subgraph_nodes",
")",
"graph_changed",
"=",
"current_count",
">",
"initial_count",
"return",
"G",
".",
"subgraph",
"(",
"subgraph_nodes",
")"
]
| Returns the subgraph containing the given node, its ancestors, and
its descendants.
Parameters
----------
node : str
We want to create the subgraph containing this node.
Returns
-------
subgraph : networkx.DiGraph
The subgraph containing the specified node. | [
"Returns",
"the",
"subgraph",
"containing",
"the",
"given",
"node",
"its",
"ancestors",
"and",
"its",
"descendants",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/tees/processor.py#L377-L412 | train |
sorgerlab/indra | indra/sources/trips/api.py | process_text | def process_text(text, save_xml_name='trips_output.xml', save_xml_pretty=True,
offline=False, service_endpoint='drum'):
"""Return a TripsProcessor by processing text.
Parameters
----------
text : str
The text to be processed.
save_xml_name : Optional[str]
The name of the file to save the returned TRIPS extraction knowledge
base XML. Default: trips_output.xml
save_xml_pretty : Optional[bool]
If True, the saved XML is pretty-printed. Some third-party tools
require non-pretty-printed XMLs which can be obtained by setting this
to False. Default: True
offline : Optional[bool]
If True, offline reading is used with a local instance of DRUM, if
available. Default: False
service_endpoint : Optional[str]
Selects the TRIPS/DRUM web service endpoint to use. Is a choice between
"drum" (default) and "drum-dev", a nightly build.
Returns
-------
tp : TripsProcessor
A TripsProcessor containing the extracted INDRA Statements
in tp.statements.
"""
if not offline:
html = client.send_query(text, service_endpoint)
xml = client.get_xml(html)
else:
if offline_reading:
try:
dr = DrumReader()
if dr is None:
raise Exception('DrumReader could not be instantiated.')
except BaseException as e:
logger.error(e)
logger.error('Make sure drum/bin/trips-drum is running in'
' a separate process')
return None
try:
dr.read_text(text)
dr.start()
except SystemExit:
pass
xml = dr.extractions[0]
else:
logger.error('Offline reading with TRIPS/DRUM not available.')
logger.error('Error message was: %s' % offline_err)
msg = """
To install DRUM locally, follow instructions at
https://github.com/wdebeaum/drum.
Next, install the pykqml package either from pip or from
https://github.com/bgyori/pykqml.
Once installed, run drum/bin/trips-drum in a separate process.
"""
logger.error(msg)
return None
if save_xml_name:
client.save_xml(xml, save_xml_name, save_xml_pretty)
return process_xml(xml) | python | def process_text(text, save_xml_name='trips_output.xml', save_xml_pretty=True,
offline=False, service_endpoint='drum'):
"""Return a TripsProcessor by processing text.
Parameters
----------
text : str
The text to be processed.
save_xml_name : Optional[str]
The name of the file to save the returned TRIPS extraction knowledge
base XML. Default: trips_output.xml
save_xml_pretty : Optional[bool]
If True, the saved XML is pretty-printed. Some third-party tools
require non-pretty-printed XMLs which can be obtained by setting this
to False. Default: True
offline : Optional[bool]
If True, offline reading is used with a local instance of DRUM, if
available. Default: False
service_endpoint : Optional[str]
Selects the TRIPS/DRUM web service endpoint to use. Is a choice between
"drum" (default) and "drum-dev", a nightly build.
Returns
-------
tp : TripsProcessor
A TripsProcessor containing the extracted INDRA Statements
in tp.statements.
"""
if not offline:
html = client.send_query(text, service_endpoint)
xml = client.get_xml(html)
else:
if offline_reading:
try:
dr = DrumReader()
if dr is None:
raise Exception('DrumReader could not be instantiated.')
except BaseException as e:
logger.error(e)
logger.error('Make sure drum/bin/trips-drum is running in'
' a separate process')
return None
try:
dr.read_text(text)
dr.start()
except SystemExit:
pass
xml = dr.extractions[0]
else:
logger.error('Offline reading with TRIPS/DRUM not available.')
logger.error('Error message was: %s' % offline_err)
msg = """
To install DRUM locally, follow instructions at
https://github.com/wdebeaum/drum.
Next, install the pykqml package either from pip or from
https://github.com/bgyori/pykqml.
Once installed, run drum/bin/trips-drum in a separate process.
"""
logger.error(msg)
return None
if save_xml_name:
client.save_xml(xml, save_xml_name, save_xml_pretty)
return process_xml(xml) | [
"def",
"process_text",
"(",
"text",
",",
"save_xml_name",
"=",
"'trips_output.xml'",
",",
"save_xml_pretty",
"=",
"True",
",",
"offline",
"=",
"False",
",",
"service_endpoint",
"=",
"'drum'",
")",
":",
"if",
"not",
"offline",
":",
"html",
"=",
"client",
".",
"send_query",
"(",
"text",
",",
"service_endpoint",
")",
"xml",
"=",
"client",
".",
"get_xml",
"(",
"html",
")",
"else",
":",
"if",
"offline_reading",
":",
"try",
":",
"dr",
"=",
"DrumReader",
"(",
")",
"if",
"dr",
"is",
"None",
":",
"raise",
"Exception",
"(",
"'DrumReader could not be instantiated.'",
")",
"except",
"BaseException",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"e",
")",
"logger",
".",
"error",
"(",
"'Make sure drum/bin/trips-drum is running in'",
"' a separate process'",
")",
"return",
"None",
"try",
":",
"dr",
".",
"read_text",
"(",
"text",
")",
"dr",
".",
"start",
"(",
")",
"except",
"SystemExit",
":",
"pass",
"xml",
"=",
"dr",
".",
"extractions",
"[",
"0",
"]",
"else",
":",
"logger",
".",
"error",
"(",
"'Offline reading with TRIPS/DRUM not available.'",
")",
"logger",
".",
"error",
"(",
"'Error message was: %s'",
"%",
"offline_err",
")",
"msg",
"=",
"\"\"\"\n To install DRUM locally, follow instructions at\n https://github.com/wdebeaum/drum.\n Next, install the pykqml package either from pip or from\n https://github.com/bgyori/pykqml.\n Once installed, run drum/bin/trips-drum in a separate process.\n \"\"\"",
"logger",
".",
"error",
"(",
"msg",
")",
"return",
"None",
"if",
"save_xml_name",
":",
"client",
".",
"save_xml",
"(",
"xml",
",",
"save_xml_name",
",",
"save_xml_pretty",
")",
"return",
"process_xml",
"(",
"xml",
")"
]
| Return a TripsProcessor by processing text.
Parameters
----------
text : str
The text to be processed.
save_xml_name : Optional[str]
The name of the file to save the returned TRIPS extraction knowledge
base XML. Default: trips_output.xml
save_xml_pretty : Optional[bool]
If True, the saved XML is pretty-printed. Some third-party tools
require non-pretty-printed XMLs which can be obtained by setting this
to False. Default: True
offline : Optional[bool]
If True, offline reading is used with a local instance of DRUM, if
available. Default: False
service_endpoint : Optional[str]
Selects the TRIPS/DRUM web service endpoint to use. Is a choice between
"drum" (default) and "drum-dev", a nightly build.
Returns
-------
tp : TripsProcessor
A TripsProcessor containing the extracted INDRA Statements
in tp.statements. | [
"Return",
"a",
"TripsProcessor",
"by",
"processing",
"text",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/api.py#L18-L80 | train |
sorgerlab/indra | indra/sources/trips/api.py | process_xml_file | def process_xml_file(file_name):
"""Return a TripsProcessor by processing a TRIPS EKB XML file.
Parameters
----------
file_name : str
Path to a TRIPS extraction knowledge base (EKB) file to be processed.
Returns
-------
tp : TripsProcessor
A TripsProcessor containing the extracted INDRA Statements
in tp.statements.
"""
with open(file_name, 'rb') as fh:
ekb = fh.read().decode('utf-8')
return process_xml(ekb) | python | def process_xml_file(file_name):
"""Return a TripsProcessor by processing a TRIPS EKB XML file.
Parameters
----------
file_name : str
Path to a TRIPS extraction knowledge base (EKB) file to be processed.
Returns
-------
tp : TripsProcessor
A TripsProcessor containing the extracted INDRA Statements
in tp.statements.
"""
with open(file_name, 'rb') as fh:
ekb = fh.read().decode('utf-8')
return process_xml(ekb) | [
"def",
"process_xml_file",
"(",
"file_name",
")",
":",
"with",
"open",
"(",
"file_name",
",",
"'rb'",
")",
"as",
"fh",
":",
"ekb",
"=",
"fh",
".",
"read",
"(",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"return",
"process_xml",
"(",
"ekb",
")"
]
| Return a TripsProcessor by processing a TRIPS EKB XML file.
Parameters
----------
file_name : str
Path to a TRIPS extraction knowledge base (EKB) file to be processed.
Returns
-------
tp : TripsProcessor
A TripsProcessor containing the extracted INDRA Statements
in tp.statements. | [
"Return",
"a",
"TripsProcessor",
"by",
"processing",
"a",
"TRIPS",
"EKB",
"XML",
"file",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/api.py#L83-L99 | train |
sorgerlab/indra | indra/sources/trips/api.py | process_xml | def process_xml(xml_string):
"""Return a TripsProcessor by processing a TRIPS EKB XML string.
Parameters
----------
xml_string : str
A TRIPS extraction knowledge base (EKB) string to be processed.
http://trips.ihmc.us/parser/api.html
Returns
-------
tp : TripsProcessor
A TripsProcessor containing the extracted INDRA Statements
in tp.statements.
"""
tp = TripsProcessor(xml_string)
if tp.tree is None:
return None
tp.get_modifications_indirect()
tp.get_activations_causal()
tp.get_activations_stimulate()
tp.get_complexes()
tp.get_modifications()
tp.get_active_forms()
tp.get_active_forms_state()
tp.get_activations()
tp.get_translocation()
tp.get_regulate_amounts()
tp.get_degradations()
tp.get_syntheses()
tp.get_conversions()
tp.get_simple_increase_decrease()
return tp | python | def process_xml(xml_string):
"""Return a TripsProcessor by processing a TRIPS EKB XML string.
Parameters
----------
xml_string : str
A TRIPS extraction knowledge base (EKB) string to be processed.
http://trips.ihmc.us/parser/api.html
Returns
-------
tp : TripsProcessor
A TripsProcessor containing the extracted INDRA Statements
in tp.statements.
"""
tp = TripsProcessor(xml_string)
if tp.tree is None:
return None
tp.get_modifications_indirect()
tp.get_activations_causal()
tp.get_activations_stimulate()
tp.get_complexes()
tp.get_modifications()
tp.get_active_forms()
tp.get_active_forms_state()
tp.get_activations()
tp.get_translocation()
tp.get_regulate_amounts()
tp.get_degradations()
tp.get_syntheses()
tp.get_conversions()
tp.get_simple_increase_decrease()
return tp | [
"def",
"process_xml",
"(",
"xml_string",
")",
":",
"tp",
"=",
"TripsProcessor",
"(",
"xml_string",
")",
"if",
"tp",
".",
"tree",
"is",
"None",
":",
"return",
"None",
"tp",
".",
"get_modifications_indirect",
"(",
")",
"tp",
".",
"get_activations_causal",
"(",
")",
"tp",
".",
"get_activations_stimulate",
"(",
")",
"tp",
".",
"get_complexes",
"(",
")",
"tp",
".",
"get_modifications",
"(",
")",
"tp",
".",
"get_active_forms",
"(",
")",
"tp",
".",
"get_active_forms_state",
"(",
")",
"tp",
".",
"get_activations",
"(",
")",
"tp",
".",
"get_translocation",
"(",
")",
"tp",
".",
"get_regulate_amounts",
"(",
")",
"tp",
".",
"get_degradations",
"(",
")",
"tp",
".",
"get_syntheses",
"(",
")",
"tp",
".",
"get_conversions",
"(",
")",
"tp",
".",
"get_simple_increase_decrease",
"(",
")",
"return",
"tp"
]
| Return a TripsProcessor by processing a TRIPS EKB XML string.
Parameters
----------
xml_string : str
A TRIPS extraction knowledge base (EKB) string to be processed.
http://trips.ihmc.us/parser/api.html
Returns
-------
tp : TripsProcessor
A TripsProcessor containing the extracted INDRA Statements
in tp.statements. | [
"Return",
"a",
"TripsProcessor",
"by",
"processing",
"a",
"TRIPS",
"EKB",
"XML",
"string",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/api.py#L102-L134 | train |
sorgerlab/indra | indra/belief/wm_scorer.py | load_eidos_curation_table | def load_eidos_curation_table():
"""Return a pandas table of Eidos curation data."""
url = 'https://raw.githubusercontent.com/clulab/eidos/master/' + \
'src/main/resources/org/clulab/wm/eidos/english/confidence/' + \
'rule_summary.tsv'
# Load the table of scores from the URL above into a data frame
res = StringIO(requests.get(url).text)
table = pandas.read_table(res, sep='\t')
# Drop the last "Grant total" row
table = table.drop(table.index[len(table)-1])
return table | python | def load_eidos_curation_table():
"""Return a pandas table of Eidos curation data."""
url = 'https://raw.githubusercontent.com/clulab/eidos/master/' + \
'src/main/resources/org/clulab/wm/eidos/english/confidence/' + \
'rule_summary.tsv'
# Load the table of scores from the URL above into a data frame
res = StringIO(requests.get(url).text)
table = pandas.read_table(res, sep='\t')
# Drop the last "Grant total" row
table = table.drop(table.index[len(table)-1])
return table | [
"def",
"load_eidos_curation_table",
"(",
")",
":",
"url",
"=",
"'https://raw.githubusercontent.com/clulab/eidos/master/'",
"+",
"'src/main/resources/org/clulab/wm/eidos/english/confidence/'",
"+",
"'rule_summary.tsv'",
"# Load the table of scores from the URL above into a data frame",
"res",
"=",
"StringIO",
"(",
"requests",
".",
"get",
"(",
"url",
")",
".",
"text",
")",
"table",
"=",
"pandas",
".",
"read_table",
"(",
"res",
",",
"sep",
"=",
"'\\t'",
")",
"# Drop the last \"Grant total\" row",
"table",
"=",
"table",
".",
"drop",
"(",
"table",
".",
"index",
"[",
"len",
"(",
"table",
")",
"-",
"1",
"]",
")",
"return",
"table"
]
| Return a pandas table of Eidos curation data. | [
"Return",
"a",
"pandas",
"table",
"of",
"Eidos",
"curation",
"data",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/belief/wm_scorer.py#L11-L21 | train |
sorgerlab/indra | indra/belief/wm_scorer.py | get_eidos_bayesian_scorer | def get_eidos_bayesian_scorer(prior_counts=None):
"""Return a BayesianScorer based on Eidos curation counts."""
table = load_eidos_curation_table()
subtype_counts = {'eidos': {r: [c, i] for r, c, i in
zip(table['RULE'], table['Num correct'],
table['Num incorrect'])}}
prior_counts = prior_counts if prior_counts else copy.deepcopy(
default_priors)
scorer = BayesianScorer(prior_counts=prior_counts,
subtype_counts=subtype_counts)
return scorer | python | def get_eidos_bayesian_scorer(prior_counts=None):
"""Return a BayesianScorer based on Eidos curation counts."""
table = load_eidos_curation_table()
subtype_counts = {'eidos': {r: [c, i] for r, c, i in
zip(table['RULE'], table['Num correct'],
table['Num incorrect'])}}
prior_counts = prior_counts if prior_counts else copy.deepcopy(
default_priors)
scorer = BayesianScorer(prior_counts=prior_counts,
subtype_counts=subtype_counts)
return scorer | [
"def",
"get_eidos_bayesian_scorer",
"(",
"prior_counts",
"=",
"None",
")",
":",
"table",
"=",
"load_eidos_curation_table",
"(",
")",
"subtype_counts",
"=",
"{",
"'eidos'",
":",
"{",
"r",
":",
"[",
"c",
",",
"i",
"]",
"for",
"r",
",",
"c",
",",
"i",
"in",
"zip",
"(",
"table",
"[",
"'RULE'",
"]",
",",
"table",
"[",
"'Num correct'",
"]",
",",
"table",
"[",
"'Num incorrect'",
"]",
")",
"}",
"}",
"prior_counts",
"=",
"prior_counts",
"if",
"prior_counts",
"else",
"copy",
".",
"deepcopy",
"(",
"default_priors",
")",
"scorer",
"=",
"BayesianScorer",
"(",
"prior_counts",
"=",
"prior_counts",
",",
"subtype_counts",
"=",
"subtype_counts",
")",
"return",
"scorer"
]
| Return a BayesianScorer based on Eidos curation counts. | [
"Return",
"a",
"BayesianScorer",
"based",
"on",
"Eidos",
"curation",
"counts",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/belief/wm_scorer.py#L24-L35 | train |
sorgerlab/indra | indra/belief/wm_scorer.py | get_eidos_scorer | def get_eidos_scorer():
"""Return a SimpleScorer based on Eidos curated precision estimates."""
table = load_eidos_curation_table()
# Get the overall precision
total_num = table['COUNT of RULE'].sum()
weighted_sum = table['COUNT of RULE'].dot(table['% correct'])
precision = weighted_sum / total_num
# We have to divide this into a random and systematic component, for now
# in an ad-hoc manner
syst_error = 0.05
rand_error = 1 - precision - syst_error
prior_probs = {'rand': {'eidos': rand_error}, 'syst': {'eidos': syst_error}}
# Get a dict of rule-specific errors.
subtype_probs = {'eidos':
{k: 1.0-min(v, 0.95)-syst_error for k, v
in zip(table['RULE'], table['% correct'])}}
scorer = SimpleScorer(prior_probs, subtype_probs)
return scorer | python | def get_eidos_scorer():
"""Return a SimpleScorer based on Eidos curated precision estimates."""
table = load_eidos_curation_table()
# Get the overall precision
total_num = table['COUNT of RULE'].sum()
weighted_sum = table['COUNT of RULE'].dot(table['% correct'])
precision = weighted_sum / total_num
# We have to divide this into a random and systematic component, for now
# in an ad-hoc manner
syst_error = 0.05
rand_error = 1 - precision - syst_error
prior_probs = {'rand': {'eidos': rand_error}, 'syst': {'eidos': syst_error}}
# Get a dict of rule-specific errors.
subtype_probs = {'eidos':
{k: 1.0-min(v, 0.95)-syst_error for k, v
in zip(table['RULE'], table['% correct'])}}
scorer = SimpleScorer(prior_probs, subtype_probs)
return scorer | [
"def",
"get_eidos_scorer",
"(",
")",
":",
"table",
"=",
"load_eidos_curation_table",
"(",
")",
"# Get the overall precision",
"total_num",
"=",
"table",
"[",
"'COUNT of RULE'",
"]",
".",
"sum",
"(",
")",
"weighted_sum",
"=",
"table",
"[",
"'COUNT of RULE'",
"]",
".",
"dot",
"(",
"table",
"[",
"'% correct'",
"]",
")",
"precision",
"=",
"weighted_sum",
"/",
"total_num",
"# We have to divide this into a random and systematic component, for now",
"# in an ad-hoc manner",
"syst_error",
"=",
"0.05",
"rand_error",
"=",
"1",
"-",
"precision",
"-",
"syst_error",
"prior_probs",
"=",
"{",
"'rand'",
":",
"{",
"'eidos'",
":",
"rand_error",
"}",
",",
"'syst'",
":",
"{",
"'eidos'",
":",
"syst_error",
"}",
"}",
"# Get a dict of rule-specific errors.",
"subtype_probs",
"=",
"{",
"'eidos'",
":",
"{",
"k",
":",
"1.0",
"-",
"min",
"(",
"v",
",",
"0.95",
")",
"-",
"syst_error",
"for",
"k",
",",
"v",
"in",
"zip",
"(",
"table",
"[",
"'RULE'",
"]",
",",
"table",
"[",
"'% correct'",
"]",
")",
"}",
"}",
"scorer",
"=",
"SimpleScorer",
"(",
"prior_probs",
",",
"subtype_probs",
")",
"return",
"scorer"
]
| Return a SimpleScorer based on Eidos curated precision estimates. | [
"Return",
"a",
"SimpleScorer",
"based",
"on",
"Eidos",
"curated",
"precision",
"estimates",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/belief/wm_scorer.py#L38-L57 | train |
sorgerlab/indra | indra/sources/trrust/api.py | process_from_web | def process_from_web():
"""Return a TrrustProcessor based on the online interaction table.
Returns
-------
TrrustProcessor
A TrrustProcessor object that has a list of INDRA Statements in its
statements attribute.
"""
logger.info('Downloading table from %s' % trrust_human_url)
res = requests.get(trrust_human_url)
res.raise_for_status()
df = pandas.read_table(io.StringIO(res.text))
tp = TrrustProcessor(df)
tp.extract_statements()
return tp | python | def process_from_web():
"""Return a TrrustProcessor based on the online interaction table.
Returns
-------
TrrustProcessor
A TrrustProcessor object that has a list of INDRA Statements in its
statements attribute.
"""
logger.info('Downloading table from %s' % trrust_human_url)
res = requests.get(trrust_human_url)
res.raise_for_status()
df = pandas.read_table(io.StringIO(res.text))
tp = TrrustProcessor(df)
tp.extract_statements()
return tp | [
"def",
"process_from_web",
"(",
")",
":",
"logger",
".",
"info",
"(",
"'Downloading table from %s'",
"%",
"trrust_human_url",
")",
"res",
"=",
"requests",
".",
"get",
"(",
"trrust_human_url",
")",
"res",
".",
"raise_for_status",
"(",
")",
"df",
"=",
"pandas",
".",
"read_table",
"(",
"io",
".",
"StringIO",
"(",
"res",
".",
"text",
")",
")",
"tp",
"=",
"TrrustProcessor",
"(",
"df",
")",
"tp",
".",
"extract_statements",
"(",
")",
"return",
"tp"
]
| Return a TrrustProcessor based on the online interaction table.
Returns
-------
TrrustProcessor
A TrrustProcessor object that has a list of INDRA Statements in its
statements attribute. | [
"Return",
"a",
"TrrustProcessor",
"based",
"on",
"the",
"online",
"interaction",
"table",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trrust/api.py#L18-L33 | train |
sorgerlab/indra | indra/sources/rlimsp/api.py | process_from_webservice | def process_from_webservice(id_val, id_type='pmcid', source='pmc',
with_grounding=True):
"""Return an output from RLIMS-p for the given PubMed ID or PMC ID.
Parameters
----------
id_val : str
A PMCID, with the prefix PMC, or pmid, with no prefix, of the paper to
be "read".
id_type : str
Either 'pmid' or 'pmcid'. The default is 'pmcid'.
source : str
Either 'pmc' or 'medline', whether you want pmc fulltext or medline
abstracts.
with_grounding : bool
The RLIMS-P web service provides two endpoints, one pre-grounded, the
other not so much. The grounded endpoint returns far less content, and
may perform some grounding that can be handled by the grounding mapper.
Returns
-------
:py:class:`indra.sources.rlimsp.processor.RlimspProcessor`
An RlimspProcessor which contains a list of extracted INDRA Statements
in its statements attribute.
"""
if with_grounding:
fmt = '%s.normed/%s/%s'
else:
fmt = '%s/%s/%s'
resp = requests.get(RLIMSP_URL + fmt % (source, id_type, id_val))
if resp.status_code != 200:
raise RLIMSP_Error("Bad status code: %d - %s"
% (resp.status_code, resp.reason))
rp = RlimspProcessor(resp.json())
rp.extract_statements()
return rp | python | def process_from_webservice(id_val, id_type='pmcid', source='pmc',
with_grounding=True):
"""Return an output from RLIMS-p for the given PubMed ID or PMC ID.
Parameters
----------
id_val : str
A PMCID, with the prefix PMC, or pmid, with no prefix, of the paper to
be "read".
id_type : str
Either 'pmid' or 'pmcid'. The default is 'pmcid'.
source : str
Either 'pmc' or 'medline', whether you want pmc fulltext or medline
abstracts.
with_grounding : bool
The RLIMS-P web service provides two endpoints, one pre-grounded, the
other not so much. The grounded endpoint returns far less content, and
may perform some grounding that can be handled by the grounding mapper.
Returns
-------
:py:class:`indra.sources.rlimsp.processor.RlimspProcessor`
An RlimspProcessor which contains a list of extracted INDRA Statements
in its statements attribute.
"""
if with_grounding:
fmt = '%s.normed/%s/%s'
else:
fmt = '%s/%s/%s'
resp = requests.get(RLIMSP_URL + fmt % (source, id_type, id_val))
if resp.status_code != 200:
raise RLIMSP_Error("Bad status code: %d - %s"
% (resp.status_code, resp.reason))
rp = RlimspProcessor(resp.json())
rp.extract_statements()
return rp | [
"def",
"process_from_webservice",
"(",
"id_val",
",",
"id_type",
"=",
"'pmcid'",
",",
"source",
"=",
"'pmc'",
",",
"with_grounding",
"=",
"True",
")",
":",
"if",
"with_grounding",
":",
"fmt",
"=",
"'%s.normed/%s/%s'",
"else",
":",
"fmt",
"=",
"'%s/%s/%s'",
"resp",
"=",
"requests",
".",
"get",
"(",
"RLIMSP_URL",
"+",
"fmt",
"%",
"(",
"source",
",",
"id_type",
",",
"id_val",
")",
")",
"if",
"resp",
".",
"status_code",
"!=",
"200",
":",
"raise",
"RLIMSP_Error",
"(",
"\"Bad status code: %d - %s\"",
"%",
"(",
"resp",
".",
"status_code",
",",
"resp",
".",
"reason",
")",
")",
"rp",
"=",
"RlimspProcessor",
"(",
"resp",
".",
"json",
"(",
")",
")",
"rp",
".",
"extract_statements",
"(",
")",
"return",
"rp"
]
| Return an output from RLIMS-p for the given PubMed ID or PMC ID.
Parameters
----------
id_val : str
A PMCID, with the prefix PMC, or pmid, with no prefix, of the paper to
be "read".
id_type : str
Either 'pmid' or 'pmcid'. The default is 'pmcid'.
source : str
Either 'pmc' or 'medline', whether you want pmc fulltext or medline
abstracts.
with_grounding : bool
The RLIMS-P web service provides two endpoints, one pre-grounded, the
other not so much. The grounded endpoint returns far less content, and
may perform some grounding that can be handled by the grounding mapper.
Returns
-------
:py:class:`indra.sources.rlimsp.processor.RlimspProcessor`
An RlimspProcessor which contains a list of extracted INDRA Statements
in its statements attribute. | [
"Return",
"an",
"output",
"from",
"RLIMS",
"-",
"p",
"for",
"the",
"given",
"PubMed",
"ID",
"or",
"PMC",
"ID",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/rlimsp/api.py#L21-L59 | train |
sorgerlab/indra | indra/sources/rlimsp/api.py | process_from_json_file | def process_from_json_file(filename, doc_id_type=None):
"""Process RLIMSP extractions from a bulk-download JSON file.
Parameters
----------
filename : str
Path to the JSON file.
doc_id_type : Optional[str]
In some cases the RLIMS-P paragraph info doesn't contain 'pmid' or
'pmcid' explicitly, instead if contains a 'docId' key. This parameter
allows defining what ID type 'docId' sould be interpreted as. Its
values should be 'pmid' or 'pmcid' or None if not used.
Returns
-------
:py:class:`indra.sources.rlimsp.processor.RlimspProcessor`
An RlimspProcessor which contains a list of extracted INDRA Statements
in its statements attribute.
"""
with open(filename, 'rt') as f:
lines = f.readlines()
json_list = []
for line in lines:
json_list.append(json.loads(line))
rp = RlimspProcessor(json_list, doc_id_type=doc_id_type)
rp.extract_statements()
return rp | python | def process_from_json_file(filename, doc_id_type=None):
"""Process RLIMSP extractions from a bulk-download JSON file.
Parameters
----------
filename : str
Path to the JSON file.
doc_id_type : Optional[str]
In some cases the RLIMS-P paragraph info doesn't contain 'pmid' or
'pmcid' explicitly, instead if contains a 'docId' key. This parameter
allows defining what ID type 'docId' sould be interpreted as. Its
values should be 'pmid' or 'pmcid' or None if not used.
Returns
-------
:py:class:`indra.sources.rlimsp.processor.RlimspProcessor`
An RlimspProcessor which contains a list of extracted INDRA Statements
in its statements attribute.
"""
with open(filename, 'rt') as f:
lines = f.readlines()
json_list = []
for line in lines:
json_list.append(json.loads(line))
rp = RlimspProcessor(json_list, doc_id_type=doc_id_type)
rp.extract_statements()
return rp | [
"def",
"process_from_json_file",
"(",
"filename",
",",
"doc_id_type",
"=",
"None",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'rt'",
")",
"as",
"f",
":",
"lines",
"=",
"f",
".",
"readlines",
"(",
")",
"json_list",
"=",
"[",
"]",
"for",
"line",
"in",
"lines",
":",
"json_list",
".",
"append",
"(",
"json",
".",
"loads",
"(",
"line",
")",
")",
"rp",
"=",
"RlimspProcessor",
"(",
"json_list",
",",
"doc_id_type",
"=",
"doc_id_type",
")",
"rp",
".",
"extract_statements",
"(",
")",
"return",
"rp"
]
| Process RLIMSP extractions from a bulk-download JSON file.
Parameters
----------
filename : str
Path to the JSON file.
doc_id_type : Optional[str]
In some cases the RLIMS-P paragraph info doesn't contain 'pmid' or
'pmcid' explicitly, instead if contains a 'docId' key. This parameter
allows defining what ID type 'docId' sould be interpreted as. Its
values should be 'pmid' or 'pmcid' or None if not used.
Returns
-------
:py:class:`indra.sources.rlimsp.processor.RlimspProcessor`
An RlimspProcessor which contains a list of extracted INDRA Statements
in its statements attribute. | [
"Process",
"RLIMSP",
"extractions",
"from",
"a",
"bulk",
"-",
"download",
"JSON",
"file",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/rlimsp/api.py#L62-L88 | train |
sorgerlab/indra | indra/util/nested_dict.py | NestedDict.get | def get(self, key):
"Find the first value within the tree which has the key."
if key in self.keys():
return self[key]
else:
res = None
for v in self.values():
# This could get weird if the actual expected returned value
# is None, especially in teh case of overlap. Any ambiguity
# would be resolved by get_path(s).
if hasattr(v, 'get'):
res = v.get(key)
if res is not None:
break
return res | python | def get(self, key):
"Find the first value within the tree which has the key."
if key in self.keys():
return self[key]
else:
res = None
for v in self.values():
# This could get weird if the actual expected returned value
# is None, especially in teh case of overlap. Any ambiguity
# would be resolved by get_path(s).
if hasattr(v, 'get'):
res = v.get(key)
if res is not None:
break
return res | [
"def",
"get",
"(",
"self",
",",
"key",
")",
":",
"if",
"key",
"in",
"self",
".",
"keys",
"(",
")",
":",
"return",
"self",
"[",
"key",
"]",
"else",
":",
"res",
"=",
"None",
"for",
"v",
"in",
"self",
".",
"values",
"(",
")",
":",
"# This could get weird if the actual expected returned value",
"# is None, especially in teh case of overlap. Any ambiguity",
"# would be resolved by get_path(s).",
"if",
"hasattr",
"(",
"v",
",",
"'get'",
")",
":",
"res",
"=",
"v",
".",
"get",
"(",
"key",
")",
"if",
"res",
"is",
"not",
"None",
":",
"break",
"return",
"res"
]
| Find the first value within the tree which has the key. | [
"Find",
"the",
"first",
"value",
"within",
"the",
"tree",
"which",
"has",
"the",
"key",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/util/nested_dict.py#L58-L72 | train |
sorgerlab/indra | indra/util/nested_dict.py | NestedDict.get_path | def get_path(self, key):
"Like `get`, but also return the path taken to the value."
if key in self.keys():
return (key,), self[key]
else:
key_path, res = (None, None)
for sub_key, v in self.items():
if isinstance(v, self.__class__):
key_path, res = v.get_path(key)
elif hasattr(v, 'get'):
res = v.get(key)
key_path = (key,) if res is not None else None
if res is not None and key_path is not None:
key_path = (sub_key,) + key_path
break
return key_path, res | python | def get_path(self, key):
"Like `get`, but also return the path taken to the value."
if key in self.keys():
return (key,), self[key]
else:
key_path, res = (None, None)
for sub_key, v in self.items():
if isinstance(v, self.__class__):
key_path, res = v.get_path(key)
elif hasattr(v, 'get'):
res = v.get(key)
key_path = (key,) if res is not None else None
if res is not None and key_path is not None:
key_path = (sub_key,) + key_path
break
return key_path, res | [
"def",
"get_path",
"(",
"self",
",",
"key",
")",
":",
"if",
"key",
"in",
"self",
".",
"keys",
"(",
")",
":",
"return",
"(",
"key",
",",
")",
",",
"self",
"[",
"key",
"]",
"else",
":",
"key_path",
",",
"res",
"=",
"(",
"None",
",",
"None",
")",
"for",
"sub_key",
",",
"v",
"in",
"self",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"self",
".",
"__class__",
")",
":",
"key_path",
",",
"res",
"=",
"v",
".",
"get_path",
"(",
"key",
")",
"elif",
"hasattr",
"(",
"v",
",",
"'get'",
")",
":",
"res",
"=",
"v",
".",
"get",
"(",
"key",
")",
"key_path",
"=",
"(",
"key",
",",
")",
"if",
"res",
"is",
"not",
"None",
"else",
"None",
"if",
"res",
"is",
"not",
"None",
"and",
"key_path",
"is",
"not",
"None",
":",
"key_path",
"=",
"(",
"sub_key",
",",
")",
"+",
"key_path",
"break",
"return",
"key_path",
",",
"res"
]
| Like `get`, but also return the path taken to the value. | [
"Like",
"get",
"but",
"also",
"return",
"the",
"path",
"taken",
"to",
"the",
"value",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/util/nested_dict.py#L74-L89 | train |
sorgerlab/indra | indra/util/nested_dict.py | NestedDict.gets | def gets(self, key):
"Like `get`, but return all matches, not just the first."
result_list = []
if key in self.keys():
result_list.append(self[key])
for v in self.values():
if isinstance(v, self.__class__):
sub_res_list = v.gets(key)
for res in sub_res_list:
result_list.append(res)
elif isinstance(v, dict):
if key in v.keys():
result_list.append(v[key])
return result_list | python | def gets(self, key):
"Like `get`, but return all matches, not just the first."
result_list = []
if key in self.keys():
result_list.append(self[key])
for v in self.values():
if isinstance(v, self.__class__):
sub_res_list = v.gets(key)
for res in sub_res_list:
result_list.append(res)
elif isinstance(v, dict):
if key in v.keys():
result_list.append(v[key])
return result_list | [
"def",
"gets",
"(",
"self",
",",
"key",
")",
":",
"result_list",
"=",
"[",
"]",
"if",
"key",
"in",
"self",
".",
"keys",
"(",
")",
":",
"result_list",
".",
"append",
"(",
"self",
"[",
"key",
"]",
")",
"for",
"v",
"in",
"self",
".",
"values",
"(",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"self",
".",
"__class__",
")",
":",
"sub_res_list",
"=",
"v",
".",
"gets",
"(",
"key",
")",
"for",
"res",
"in",
"sub_res_list",
":",
"result_list",
".",
"append",
"(",
"res",
")",
"elif",
"isinstance",
"(",
"v",
",",
"dict",
")",
":",
"if",
"key",
"in",
"v",
".",
"keys",
"(",
")",
":",
"result_list",
".",
"append",
"(",
"v",
"[",
"key",
"]",
")",
"return",
"result_list"
]
| Like `get`, but return all matches, not just the first. | [
"Like",
"get",
"but",
"return",
"all",
"matches",
"not",
"just",
"the",
"first",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/util/nested_dict.py#L91-L104 | train |
sorgerlab/indra | indra/util/nested_dict.py | NestedDict.get_paths | def get_paths(self, key):
"Like `gets`, but include the paths, like `get_path` for all matches."
result_list = []
if key in self.keys():
result_list.append(((key,), self[key]))
for sub_key, v in self.items():
if isinstance(v, self.__class__):
sub_res_list = v.get_paths(key)
for key_path, res in sub_res_list:
result_list.append(((sub_key,) + key_path, res))
elif isinstance(v, dict):
if key in v.keys():
result_list.append(((sub_key, key), v[key]))
return result_list | python | def get_paths(self, key):
"Like `gets`, but include the paths, like `get_path` for all matches."
result_list = []
if key in self.keys():
result_list.append(((key,), self[key]))
for sub_key, v in self.items():
if isinstance(v, self.__class__):
sub_res_list = v.get_paths(key)
for key_path, res in sub_res_list:
result_list.append(((sub_key,) + key_path, res))
elif isinstance(v, dict):
if key in v.keys():
result_list.append(((sub_key, key), v[key]))
return result_list | [
"def",
"get_paths",
"(",
"self",
",",
"key",
")",
":",
"result_list",
"=",
"[",
"]",
"if",
"key",
"in",
"self",
".",
"keys",
"(",
")",
":",
"result_list",
".",
"append",
"(",
"(",
"(",
"key",
",",
")",
",",
"self",
"[",
"key",
"]",
")",
")",
"for",
"sub_key",
",",
"v",
"in",
"self",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"self",
".",
"__class__",
")",
":",
"sub_res_list",
"=",
"v",
".",
"get_paths",
"(",
"key",
")",
"for",
"key_path",
",",
"res",
"in",
"sub_res_list",
":",
"result_list",
".",
"append",
"(",
"(",
"(",
"sub_key",
",",
")",
"+",
"key_path",
",",
"res",
")",
")",
"elif",
"isinstance",
"(",
"v",
",",
"dict",
")",
":",
"if",
"key",
"in",
"v",
".",
"keys",
"(",
")",
":",
"result_list",
".",
"append",
"(",
"(",
"(",
"sub_key",
",",
"key",
")",
",",
"v",
"[",
"key",
"]",
")",
")",
"return",
"result_list"
]
| Like `gets`, but include the paths, like `get_path` for all matches. | [
"Like",
"gets",
"but",
"include",
"the",
"paths",
"like",
"get_path",
"for",
"all",
"matches",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/util/nested_dict.py#L106-L119 | train |
sorgerlab/indra | indra/util/nested_dict.py | NestedDict.get_leaves | def get_leaves(self):
"""Get the deepest entries as a flat set."""
ret_set = set()
for val in self.values():
if isinstance(val, self.__class__):
ret_set |= val.get_leaves()
elif isinstance(val, dict):
ret_set |= set(val.values())
elif isinstance(val, list):
ret_set |= set(val)
elif isinstance(val, set):
ret_set |= val
else:
ret_set.add(val)
return ret_set | python | def get_leaves(self):
"""Get the deepest entries as a flat set."""
ret_set = set()
for val in self.values():
if isinstance(val, self.__class__):
ret_set |= val.get_leaves()
elif isinstance(val, dict):
ret_set |= set(val.values())
elif isinstance(val, list):
ret_set |= set(val)
elif isinstance(val, set):
ret_set |= val
else:
ret_set.add(val)
return ret_set | [
"def",
"get_leaves",
"(",
"self",
")",
":",
"ret_set",
"=",
"set",
"(",
")",
"for",
"val",
"in",
"self",
".",
"values",
"(",
")",
":",
"if",
"isinstance",
"(",
"val",
",",
"self",
".",
"__class__",
")",
":",
"ret_set",
"|=",
"val",
".",
"get_leaves",
"(",
")",
"elif",
"isinstance",
"(",
"val",
",",
"dict",
")",
":",
"ret_set",
"|=",
"set",
"(",
"val",
".",
"values",
"(",
")",
")",
"elif",
"isinstance",
"(",
"val",
",",
"list",
")",
":",
"ret_set",
"|=",
"set",
"(",
"val",
")",
"elif",
"isinstance",
"(",
"val",
",",
"set",
")",
":",
"ret_set",
"|=",
"val",
"else",
":",
"ret_set",
".",
"add",
"(",
"val",
")",
"return",
"ret_set"
]
| Get the deepest entries as a flat set. | [
"Get",
"the",
"deepest",
"entries",
"as",
"a",
"flat",
"set",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/util/nested_dict.py#L121-L135 | train |
sorgerlab/indra | indra/sources/reach/processor.py | determine_reach_subtype | def determine_reach_subtype(event_name):
"""Returns the category of reach rule from the reach rule instance.
Looks at a list of regular
expressions corresponding to reach rule types, and returns the longest
regexp that matches, or None if none of them match.
Parameters
----------
evidence : indra.statements.Evidence
A reach evidence object to subtype
Returns
-------
best_match : str
A regular expression corresponding to the reach rule that was used to
extract this evidence
"""
best_match_length = None
best_match = None
for ss in reach_rule_regexps:
if re.search(ss, event_name):
if best_match is None or len(ss) > best_match_length:
best_match = ss
best_match_length = len(ss)
return best_match | python | def determine_reach_subtype(event_name):
"""Returns the category of reach rule from the reach rule instance.
Looks at a list of regular
expressions corresponding to reach rule types, and returns the longest
regexp that matches, or None if none of them match.
Parameters
----------
evidence : indra.statements.Evidence
A reach evidence object to subtype
Returns
-------
best_match : str
A regular expression corresponding to the reach rule that was used to
extract this evidence
"""
best_match_length = None
best_match = None
for ss in reach_rule_regexps:
if re.search(ss, event_name):
if best_match is None or len(ss) > best_match_length:
best_match = ss
best_match_length = len(ss)
return best_match | [
"def",
"determine_reach_subtype",
"(",
"event_name",
")",
":",
"best_match_length",
"=",
"None",
"best_match",
"=",
"None",
"for",
"ss",
"in",
"reach_rule_regexps",
":",
"if",
"re",
".",
"search",
"(",
"ss",
",",
"event_name",
")",
":",
"if",
"best_match",
"is",
"None",
"or",
"len",
"(",
"ss",
")",
">",
"best_match_length",
":",
"best_match",
"=",
"ss",
"best_match_length",
"=",
"len",
"(",
"ss",
")",
"return",
"best_match"
]
| Returns the category of reach rule from the reach rule instance.
Looks at a list of regular
expressions corresponding to reach rule types, and returns the longest
regexp that matches, or None if none of them match.
Parameters
----------
evidence : indra.statements.Evidence
A reach evidence object to subtype
Returns
-------
best_match : str
A regular expression corresponding to the reach rule that was used to
extract this evidence | [
"Returns",
"the",
"category",
"of",
"reach",
"rule",
"from",
"the",
"reach",
"rule",
"instance",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/reach/processor.py#L835-L862 | train |
sorgerlab/indra | indra/sources/reach/processor.py | ReachProcessor.print_event_statistics | def print_event_statistics(self):
"""Print the number of events in the REACH output by type."""
logger.info('All events by type')
logger.info('-------------------')
for k, v in self.all_events.items():
logger.info('%s, %s' % (k, len(v)))
logger.info('-------------------') | python | def print_event_statistics(self):
"""Print the number of events in the REACH output by type."""
logger.info('All events by type')
logger.info('-------------------')
for k, v in self.all_events.items():
logger.info('%s, %s' % (k, len(v)))
logger.info('-------------------') | [
"def",
"print_event_statistics",
"(",
"self",
")",
":",
"logger",
".",
"info",
"(",
"'All events by type'",
")",
"logger",
".",
"info",
"(",
"'-------------------'",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"all_events",
".",
"items",
"(",
")",
":",
"logger",
".",
"info",
"(",
"'%s, %s'",
"%",
"(",
"k",
",",
"len",
"(",
"v",
")",
")",
")",
"logger",
".",
"info",
"(",
"'-------------------'",
")"
]
| Print the number of events in the REACH output by type. | [
"Print",
"the",
"number",
"of",
"events",
"in",
"the",
"REACH",
"output",
"by",
"type",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/reach/processor.py#L52-L58 | train |
sorgerlab/indra | indra/sources/reach/processor.py | ReachProcessor.get_all_events | def get_all_events(self):
"""Gather all event IDs in the REACH output by type.
These IDs are stored in the self.all_events dict.
"""
self.all_events = {}
events = self.tree.execute("$.events.frames")
if events is None:
return
for e in events:
event_type = e.get('type')
frame_id = e.get('frame_id')
try:
self.all_events[event_type].append(frame_id)
except KeyError:
self.all_events[event_type] = [frame_id] | python | def get_all_events(self):
"""Gather all event IDs in the REACH output by type.
These IDs are stored in the self.all_events dict.
"""
self.all_events = {}
events = self.tree.execute("$.events.frames")
if events is None:
return
for e in events:
event_type = e.get('type')
frame_id = e.get('frame_id')
try:
self.all_events[event_type].append(frame_id)
except KeyError:
self.all_events[event_type] = [frame_id] | [
"def",
"get_all_events",
"(",
"self",
")",
":",
"self",
".",
"all_events",
"=",
"{",
"}",
"events",
"=",
"self",
".",
"tree",
".",
"execute",
"(",
"\"$.events.frames\"",
")",
"if",
"events",
"is",
"None",
":",
"return",
"for",
"e",
"in",
"events",
":",
"event_type",
"=",
"e",
".",
"get",
"(",
"'type'",
")",
"frame_id",
"=",
"e",
".",
"get",
"(",
"'frame_id'",
")",
"try",
":",
"self",
".",
"all_events",
"[",
"event_type",
"]",
".",
"append",
"(",
"frame_id",
")",
"except",
"KeyError",
":",
"self",
".",
"all_events",
"[",
"event_type",
"]",
"=",
"[",
"frame_id",
"]"
]
| Gather all event IDs in the REACH output by type.
These IDs are stored in the self.all_events dict. | [
"Gather",
"all",
"event",
"IDs",
"in",
"the",
"REACH",
"output",
"by",
"type",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/reach/processor.py#L60-L75 | train |
sorgerlab/indra | indra/sources/reach/processor.py | ReachProcessor.get_modifications | def get_modifications(self):
"""Extract Modification INDRA Statements."""
# Find all event frames that are a type of protein modification
qstr = "$.events.frames[(@.type is 'protein-modification')]"
res = self.tree.execute(qstr)
if res is None:
return
# Extract each of the results when possible
for r in res:
# The subtype of the modification
modification_type = r.get('subtype')
# Skip negated events (i.e. something doesn't happen)
epistemics = self._get_epistemics(r)
if epistemics.get('negated'):
continue
annotations, context = self._get_annot_context(r)
frame_id = r['frame_id']
args = r['arguments']
site = None
theme = None
# Find the substrate (the "theme" agent here) and the
# site and position it is modified on
for a in args:
if self._get_arg_type(a) == 'theme':
theme = a['arg']
elif self._get_arg_type(a) == 'site':
site = a['text']
theme_agent, theme_coords = self._get_agent_from_entity(theme)
if site is not None:
mods = self._parse_site_text(site)
else:
mods = [(None, None)]
for mod in mods:
# Add up to one statement for each site
residue, pos = mod
# Now we need to look for all regulation event to get to the
# enzymes (the "controller" here)
qstr = "$.events.frames[(@.type is 'regulation') and " + \
"(@.arguments[0].arg is '%s')]" % frame_id
reg_res = self.tree.execute(qstr)
reg_res = list(reg_res)
for reg in reg_res:
controller_agent, controller_coords = None, None
for a in reg['arguments']:
if self._get_arg_type(a) == 'controller':
controller = a.get('arg')
if controller is not None:
controller_agent, controller_coords = \
self._get_agent_from_entity(controller)
break
# Check the polarity of the regulation and if negative,
# flip the modification type.
# For instance, negative-regulation of a phosphorylation
# will become an (indirect) dephosphorylation
reg_subtype = reg.get('subtype')
if reg_subtype == 'negative-regulation':
modification_type = \
modtype_to_inverse.get(modification_type)
if not modification_type:
logger.warning('Unhandled modification type: %s' %
modification_type)
continue
sentence = reg['verbose-text']
annotations['agents']['coords'] = [controller_coords,
theme_coords]
ev = Evidence(source_api='reach', text=sentence,
annotations=annotations, pmid=self.citation,
context=context, epistemics=epistemics)
args = [controller_agent, theme_agent, residue, pos, ev]
# Here ModStmt is a sub-class of Modification
ModStmt = modtype_to_modclass.get(modification_type)
if ModStmt is None:
logger.warning('Unhandled modification type: %s' %
modification_type)
else:
# Handle this special case here because only
# enzyme argument is needed
if modification_type == 'autophosphorylation':
args = [theme_agent, residue, pos, ev]
self.statements.append(ModStmt(*args)) | python | def get_modifications(self):
"""Extract Modification INDRA Statements."""
# Find all event frames that are a type of protein modification
qstr = "$.events.frames[(@.type is 'protein-modification')]"
res = self.tree.execute(qstr)
if res is None:
return
# Extract each of the results when possible
for r in res:
# The subtype of the modification
modification_type = r.get('subtype')
# Skip negated events (i.e. something doesn't happen)
epistemics = self._get_epistemics(r)
if epistemics.get('negated'):
continue
annotations, context = self._get_annot_context(r)
frame_id = r['frame_id']
args = r['arguments']
site = None
theme = None
# Find the substrate (the "theme" agent here) and the
# site and position it is modified on
for a in args:
if self._get_arg_type(a) == 'theme':
theme = a['arg']
elif self._get_arg_type(a) == 'site':
site = a['text']
theme_agent, theme_coords = self._get_agent_from_entity(theme)
if site is not None:
mods = self._parse_site_text(site)
else:
mods = [(None, None)]
for mod in mods:
# Add up to one statement for each site
residue, pos = mod
# Now we need to look for all regulation event to get to the
# enzymes (the "controller" here)
qstr = "$.events.frames[(@.type is 'regulation') and " + \
"(@.arguments[0].arg is '%s')]" % frame_id
reg_res = self.tree.execute(qstr)
reg_res = list(reg_res)
for reg in reg_res:
controller_agent, controller_coords = None, None
for a in reg['arguments']:
if self._get_arg_type(a) == 'controller':
controller = a.get('arg')
if controller is not None:
controller_agent, controller_coords = \
self._get_agent_from_entity(controller)
break
# Check the polarity of the regulation and if negative,
# flip the modification type.
# For instance, negative-regulation of a phosphorylation
# will become an (indirect) dephosphorylation
reg_subtype = reg.get('subtype')
if reg_subtype == 'negative-regulation':
modification_type = \
modtype_to_inverse.get(modification_type)
if not modification_type:
logger.warning('Unhandled modification type: %s' %
modification_type)
continue
sentence = reg['verbose-text']
annotations['agents']['coords'] = [controller_coords,
theme_coords]
ev = Evidence(source_api='reach', text=sentence,
annotations=annotations, pmid=self.citation,
context=context, epistemics=epistemics)
args = [controller_agent, theme_agent, residue, pos, ev]
# Here ModStmt is a sub-class of Modification
ModStmt = modtype_to_modclass.get(modification_type)
if ModStmt is None:
logger.warning('Unhandled modification type: %s' %
modification_type)
else:
# Handle this special case here because only
# enzyme argument is needed
if modification_type == 'autophosphorylation':
args = [theme_agent, residue, pos, ev]
self.statements.append(ModStmt(*args)) | [
"def",
"get_modifications",
"(",
"self",
")",
":",
"# Find all event frames that are a type of protein modification",
"qstr",
"=",
"\"$.events.frames[(@.type is 'protein-modification')]\"",
"res",
"=",
"self",
".",
"tree",
".",
"execute",
"(",
"qstr",
")",
"if",
"res",
"is",
"None",
":",
"return",
"# Extract each of the results when possible",
"for",
"r",
"in",
"res",
":",
"# The subtype of the modification",
"modification_type",
"=",
"r",
".",
"get",
"(",
"'subtype'",
")",
"# Skip negated events (i.e. something doesn't happen)",
"epistemics",
"=",
"self",
".",
"_get_epistemics",
"(",
"r",
")",
"if",
"epistemics",
".",
"get",
"(",
"'negated'",
")",
":",
"continue",
"annotations",
",",
"context",
"=",
"self",
".",
"_get_annot_context",
"(",
"r",
")",
"frame_id",
"=",
"r",
"[",
"'frame_id'",
"]",
"args",
"=",
"r",
"[",
"'arguments'",
"]",
"site",
"=",
"None",
"theme",
"=",
"None",
"# Find the substrate (the \"theme\" agent here) and the",
"# site and position it is modified on",
"for",
"a",
"in",
"args",
":",
"if",
"self",
".",
"_get_arg_type",
"(",
"a",
")",
"==",
"'theme'",
":",
"theme",
"=",
"a",
"[",
"'arg'",
"]",
"elif",
"self",
".",
"_get_arg_type",
"(",
"a",
")",
"==",
"'site'",
":",
"site",
"=",
"a",
"[",
"'text'",
"]",
"theme_agent",
",",
"theme_coords",
"=",
"self",
".",
"_get_agent_from_entity",
"(",
"theme",
")",
"if",
"site",
"is",
"not",
"None",
":",
"mods",
"=",
"self",
".",
"_parse_site_text",
"(",
"site",
")",
"else",
":",
"mods",
"=",
"[",
"(",
"None",
",",
"None",
")",
"]",
"for",
"mod",
"in",
"mods",
":",
"# Add up to one statement for each site",
"residue",
",",
"pos",
"=",
"mod",
"# Now we need to look for all regulation event to get to the",
"# enzymes (the \"controller\" here)",
"qstr",
"=",
"\"$.events.frames[(@.type is 'regulation') and \"",
"+",
"\"(@.arguments[0].arg is '%s')]\"",
"%",
"frame_id",
"reg_res",
"=",
"self",
".",
"tree",
".",
"execute",
"(",
"qstr",
")",
"reg_res",
"=",
"list",
"(",
"reg_res",
")",
"for",
"reg",
"in",
"reg_res",
":",
"controller_agent",
",",
"controller_coords",
"=",
"None",
",",
"None",
"for",
"a",
"in",
"reg",
"[",
"'arguments'",
"]",
":",
"if",
"self",
".",
"_get_arg_type",
"(",
"a",
")",
"==",
"'controller'",
":",
"controller",
"=",
"a",
".",
"get",
"(",
"'arg'",
")",
"if",
"controller",
"is",
"not",
"None",
":",
"controller_agent",
",",
"controller_coords",
"=",
"self",
".",
"_get_agent_from_entity",
"(",
"controller",
")",
"break",
"# Check the polarity of the regulation and if negative,",
"# flip the modification type.",
"# For instance, negative-regulation of a phosphorylation",
"# will become an (indirect) dephosphorylation",
"reg_subtype",
"=",
"reg",
".",
"get",
"(",
"'subtype'",
")",
"if",
"reg_subtype",
"==",
"'negative-regulation'",
":",
"modification_type",
"=",
"modtype_to_inverse",
".",
"get",
"(",
"modification_type",
")",
"if",
"not",
"modification_type",
":",
"logger",
".",
"warning",
"(",
"'Unhandled modification type: %s'",
"%",
"modification_type",
")",
"continue",
"sentence",
"=",
"reg",
"[",
"'verbose-text'",
"]",
"annotations",
"[",
"'agents'",
"]",
"[",
"'coords'",
"]",
"=",
"[",
"controller_coords",
",",
"theme_coords",
"]",
"ev",
"=",
"Evidence",
"(",
"source_api",
"=",
"'reach'",
",",
"text",
"=",
"sentence",
",",
"annotations",
"=",
"annotations",
",",
"pmid",
"=",
"self",
".",
"citation",
",",
"context",
"=",
"context",
",",
"epistemics",
"=",
"epistemics",
")",
"args",
"=",
"[",
"controller_agent",
",",
"theme_agent",
",",
"residue",
",",
"pos",
",",
"ev",
"]",
"# Here ModStmt is a sub-class of Modification",
"ModStmt",
"=",
"modtype_to_modclass",
".",
"get",
"(",
"modification_type",
")",
"if",
"ModStmt",
"is",
"None",
":",
"logger",
".",
"warning",
"(",
"'Unhandled modification type: %s'",
"%",
"modification_type",
")",
"else",
":",
"# Handle this special case here because only",
"# enzyme argument is needed",
"if",
"modification_type",
"==",
"'autophosphorylation'",
":",
"args",
"=",
"[",
"theme_agent",
",",
"residue",
",",
"pos",
",",
"ev",
"]",
"self",
".",
"statements",
".",
"append",
"(",
"ModStmt",
"(",
"*",
"args",
")",
")"
]
| Extract Modification INDRA Statements. | [
"Extract",
"Modification",
"INDRA",
"Statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/reach/processor.py#L87-L173 | train |
sorgerlab/indra | indra/sources/reach/processor.py | ReachProcessor.get_regulate_amounts | def get_regulate_amounts(self):
"""Extract RegulateAmount INDRA Statements."""
qstr = "$.events.frames[(@.type is 'transcription')]"
res = self.tree.execute(qstr)
all_res = []
if res is not None:
all_res += list(res)
qstr = "$.events.frames[(@.type is 'amount')]"
res = self.tree.execute(qstr)
if res is not None:
all_res += list(res)
for r in all_res:
subtype = r.get('subtype')
epistemics = self._get_epistemics(r)
if epistemics.get('negated'):
continue
annotations, context = self._get_annot_context(r)
frame_id = r['frame_id']
args = r['arguments']
theme = None
for a in args:
if self._get_arg_type(a) == 'theme':
theme = a['arg']
break
if theme is None:
continue
theme_agent, theme_coords = self._get_agent_from_entity(theme)
qstr = "$.events.frames[(@.type is 'regulation') and " + \
"(@.arguments[0].arg is '%s')]" % frame_id
reg_res = self.tree.execute(qstr)
for reg in reg_res:
controller_agent, controller_coords = None, None
for a in reg['arguments']:
if self._get_arg_type(a) == 'controller':
controller_agent, controller_coords = \
self._get_controller_agent(a)
sentence = reg['verbose-text']
annotations['agents']['coords'] = [controller_coords,
theme_coords]
ev = Evidence(source_api='reach', text=sentence,
annotations=annotations, pmid=self.citation,
context=context, epistemics=epistemics)
args = [controller_agent, theme_agent, ev]
subtype = reg.get('subtype')
if subtype == 'positive-regulation':
st = IncreaseAmount(*args)
else:
st = DecreaseAmount(*args)
self.statements.append(st) | python | def get_regulate_amounts(self):
"""Extract RegulateAmount INDRA Statements."""
qstr = "$.events.frames[(@.type is 'transcription')]"
res = self.tree.execute(qstr)
all_res = []
if res is not None:
all_res += list(res)
qstr = "$.events.frames[(@.type is 'amount')]"
res = self.tree.execute(qstr)
if res is not None:
all_res += list(res)
for r in all_res:
subtype = r.get('subtype')
epistemics = self._get_epistemics(r)
if epistemics.get('negated'):
continue
annotations, context = self._get_annot_context(r)
frame_id = r['frame_id']
args = r['arguments']
theme = None
for a in args:
if self._get_arg_type(a) == 'theme':
theme = a['arg']
break
if theme is None:
continue
theme_agent, theme_coords = self._get_agent_from_entity(theme)
qstr = "$.events.frames[(@.type is 'regulation') and " + \
"(@.arguments[0].arg is '%s')]" % frame_id
reg_res = self.tree.execute(qstr)
for reg in reg_res:
controller_agent, controller_coords = None, None
for a in reg['arguments']:
if self._get_arg_type(a) == 'controller':
controller_agent, controller_coords = \
self._get_controller_agent(a)
sentence = reg['verbose-text']
annotations['agents']['coords'] = [controller_coords,
theme_coords]
ev = Evidence(source_api='reach', text=sentence,
annotations=annotations, pmid=self.citation,
context=context, epistemics=epistemics)
args = [controller_agent, theme_agent, ev]
subtype = reg.get('subtype')
if subtype == 'positive-regulation':
st = IncreaseAmount(*args)
else:
st = DecreaseAmount(*args)
self.statements.append(st) | [
"def",
"get_regulate_amounts",
"(",
"self",
")",
":",
"qstr",
"=",
"\"$.events.frames[(@.type is 'transcription')]\"",
"res",
"=",
"self",
".",
"tree",
".",
"execute",
"(",
"qstr",
")",
"all_res",
"=",
"[",
"]",
"if",
"res",
"is",
"not",
"None",
":",
"all_res",
"+=",
"list",
"(",
"res",
")",
"qstr",
"=",
"\"$.events.frames[(@.type is 'amount')]\"",
"res",
"=",
"self",
".",
"tree",
".",
"execute",
"(",
"qstr",
")",
"if",
"res",
"is",
"not",
"None",
":",
"all_res",
"+=",
"list",
"(",
"res",
")",
"for",
"r",
"in",
"all_res",
":",
"subtype",
"=",
"r",
".",
"get",
"(",
"'subtype'",
")",
"epistemics",
"=",
"self",
".",
"_get_epistemics",
"(",
"r",
")",
"if",
"epistemics",
".",
"get",
"(",
"'negated'",
")",
":",
"continue",
"annotations",
",",
"context",
"=",
"self",
".",
"_get_annot_context",
"(",
"r",
")",
"frame_id",
"=",
"r",
"[",
"'frame_id'",
"]",
"args",
"=",
"r",
"[",
"'arguments'",
"]",
"theme",
"=",
"None",
"for",
"a",
"in",
"args",
":",
"if",
"self",
".",
"_get_arg_type",
"(",
"a",
")",
"==",
"'theme'",
":",
"theme",
"=",
"a",
"[",
"'arg'",
"]",
"break",
"if",
"theme",
"is",
"None",
":",
"continue",
"theme_agent",
",",
"theme_coords",
"=",
"self",
".",
"_get_agent_from_entity",
"(",
"theme",
")",
"qstr",
"=",
"\"$.events.frames[(@.type is 'regulation') and \"",
"+",
"\"(@.arguments[0].arg is '%s')]\"",
"%",
"frame_id",
"reg_res",
"=",
"self",
".",
"tree",
".",
"execute",
"(",
"qstr",
")",
"for",
"reg",
"in",
"reg_res",
":",
"controller_agent",
",",
"controller_coords",
"=",
"None",
",",
"None",
"for",
"a",
"in",
"reg",
"[",
"'arguments'",
"]",
":",
"if",
"self",
".",
"_get_arg_type",
"(",
"a",
")",
"==",
"'controller'",
":",
"controller_agent",
",",
"controller_coords",
"=",
"self",
".",
"_get_controller_agent",
"(",
"a",
")",
"sentence",
"=",
"reg",
"[",
"'verbose-text'",
"]",
"annotations",
"[",
"'agents'",
"]",
"[",
"'coords'",
"]",
"=",
"[",
"controller_coords",
",",
"theme_coords",
"]",
"ev",
"=",
"Evidence",
"(",
"source_api",
"=",
"'reach'",
",",
"text",
"=",
"sentence",
",",
"annotations",
"=",
"annotations",
",",
"pmid",
"=",
"self",
".",
"citation",
",",
"context",
"=",
"context",
",",
"epistemics",
"=",
"epistemics",
")",
"args",
"=",
"[",
"controller_agent",
",",
"theme_agent",
",",
"ev",
"]",
"subtype",
"=",
"reg",
".",
"get",
"(",
"'subtype'",
")",
"if",
"subtype",
"==",
"'positive-regulation'",
":",
"st",
"=",
"IncreaseAmount",
"(",
"*",
"args",
")",
"else",
":",
"st",
"=",
"DecreaseAmount",
"(",
"*",
"args",
")",
"self",
".",
"statements",
".",
"append",
"(",
"st",
")"
]
| Extract RegulateAmount INDRA Statements. | [
"Extract",
"RegulateAmount",
"INDRA",
"Statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/reach/processor.py#L175-L224 | train |
sorgerlab/indra | indra/sources/reach/processor.py | ReachProcessor.get_complexes | def get_complexes(self):
"""Extract INDRA Complex Statements."""
qstr = "$.events.frames[@.type is 'complex-assembly']"
res = self.tree.execute(qstr)
if res is None:
return
for r in res:
epistemics = self._get_epistemics(r)
if epistemics.get('negated'):
continue
# Due to an issue with the REACH output serialization
# (though seemingly not with the raw mentions), sometimes
# a redundant complex-assembly event is reported which can
# be recognized by the missing direct flag, which we can filter
# for here
if epistemics.get('direct') is None:
continue
annotations, context = self._get_annot_context(r)
args = r['arguments']
sentence = r['verbose-text']
members = []
agent_coordinates = []
for a in args:
agent, coords = self._get_agent_from_entity(a['arg'])
members.append(agent)
agent_coordinates.append(coords)
annotations['agents']['coords'] = agent_coordinates
ev = Evidence(source_api='reach', text=sentence,
annotations=annotations, pmid=self.citation,
context=context, epistemics=epistemics)
stmt = Complex(members, ev)
self.statements.append(stmt) | python | def get_complexes(self):
"""Extract INDRA Complex Statements."""
qstr = "$.events.frames[@.type is 'complex-assembly']"
res = self.tree.execute(qstr)
if res is None:
return
for r in res:
epistemics = self._get_epistemics(r)
if epistemics.get('negated'):
continue
# Due to an issue with the REACH output serialization
# (though seemingly not with the raw mentions), sometimes
# a redundant complex-assembly event is reported which can
# be recognized by the missing direct flag, which we can filter
# for here
if epistemics.get('direct') is None:
continue
annotations, context = self._get_annot_context(r)
args = r['arguments']
sentence = r['verbose-text']
members = []
agent_coordinates = []
for a in args:
agent, coords = self._get_agent_from_entity(a['arg'])
members.append(agent)
agent_coordinates.append(coords)
annotations['agents']['coords'] = agent_coordinates
ev = Evidence(source_api='reach', text=sentence,
annotations=annotations, pmid=self.citation,
context=context, epistemics=epistemics)
stmt = Complex(members, ev)
self.statements.append(stmt) | [
"def",
"get_complexes",
"(",
"self",
")",
":",
"qstr",
"=",
"\"$.events.frames[@.type is 'complex-assembly']\"",
"res",
"=",
"self",
".",
"tree",
".",
"execute",
"(",
"qstr",
")",
"if",
"res",
"is",
"None",
":",
"return",
"for",
"r",
"in",
"res",
":",
"epistemics",
"=",
"self",
".",
"_get_epistemics",
"(",
"r",
")",
"if",
"epistemics",
".",
"get",
"(",
"'negated'",
")",
":",
"continue",
"# Due to an issue with the REACH output serialization",
"# (though seemingly not with the raw mentions), sometimes",
"# a redundant complex-assembly event is reported which can",
"# be recognized by the missing direct flag, which we can filter",
"# for here",
"if",
"epistemics",
".",
"get",
"(",
"'direct'",
")",
"is",
"None",
":",
"continue",
"annotations",
",",
"context",
"=",
"self",
".",
"_get_annot_context",
"(",
"r",
")",
"args",
"=",
"r",
"[",
"'arguments'",
"]",
"sentence",
"=",
"r",
"[",
"'verbose-text'",
"]",
"members",
"=",
"[",
"]",
"agent_coordinates",
"=",
"[",
"]",
"for",
"a",
"in",
"args",
":",
"agent",
",",
"coords",
"=",
"self",
".",
"_get_agent_from_entity",
"(",
"a",
"[",
"'arg'",
"]",
")",
"members",
".",
"append",
"(",
"agent",
")",
"agent_coordinates",
".",
"append",
"(",
"coords",
")",
"annotations",
"[",
"'agents'",
"]",
"[",
"'coords'",
"]",
"=",
"agent_coordinates",
"ev",
"=",
"Evidence",
"(",
"source_api",
"=",
"'reach'",
",",
"text",
"=",
"sentence",
",",
"annotations",
"=",
"annotations",
",",
"pmid",
"=",
"self",
".",
"citation",
",",
"context",
"=",
"context",
",",
"epistemics",
"=",
"epistemics",
")",
"stmt",
"=",
"Complex",
"(",
"members",
",",
"ev",
")",
"self",
".",
"statements",
".",
"append",
"(",
"stmt",
")"
]
| Extract INDRA Complex Statements. | [
"Extract",
"INDRA",
"Complex",
"Statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/reach/processor.py#L226-L258 | train |
sorgerlab/indra | indra/sources/reach/processor.py | ReachProcessor.get_activation | def get_activation(self):
"""Extract INDRA Activation Statements."""
qstr = "$.events.frames[@.type is 'activation']"
res = self.tree.execute(qstr)
if res is None:
return
for r in res:
epistemics = self._get_epistemics(r)
if epistemics.get('negated'):
continue
sentence = r['verbose-text']
annotations, context = self._get_annot_context(r)
ev = Evidence(source_api='reach', text=sentence,
pmid=self.citation, annotations=annotations,
context=context, epistemics=epistemics)
args = r['arguments']
for a in args:
if self._get_arg_type(a) == 'controller':
controller_agent, controller_coords = \
self._get_controller_agent(a)
if self._get_arg_type(a) == 'controlled':
controlled = a['arg']
controlled_agent, controlled_coords = \
self._get_agent_from_entity(controlled)
annotations['agents']['coords'] = [controller_coords,
controlled_coords]
if r['subtype'] == 'positive-activation':
st = Activation(controller_agent, controlled_agent,
evidence=ev)
else:
st = Inhibition(controller_agent, controlled_agent,
evidence=ev)
self.statements.append(st) | python | def get_activation(self):
"""Extract INDRA Activation Statements."""
qstr = "$.events.frames[@.type is 'activation']"
res = self.tree.execute(qstr)
if res is None:
return
for r in res:
epistemics = self._get_epistemics(r)
if epistemics.get('negated'):
continue
sentence = r['verbose-text']
annotations, context = self._get_annot_context(r)
ev = Evidence(source_api='reach', text=sentence,
pmid=self.citation, annotations=annotations,
context=context, epistemics=epistemics)
args = r['arguments']
for a in args:
if self._get_arg_type(a) == 'controller':
controller_agent, controller_coords = \
self._get_controller_agent(a)
if self._get_arg_type(a) == 'controlled':
controlled = a['arg']
controlled_agent, controlled_coords = \
self._get_agent_from_entity(controlled)
annotations['agents']['coords'] = [controller_coords,
controlled_coords]
if r['subtype'] == 'positive-activation':
st = Activation(controller_agent, controlled_agent,
evidence=ev)
else:
st = Inhibition(controller_agent, controlled_agent,
evidence=ev)
self.statements.append(st) | [
"def",
"get_activation",
"(",
"self",
")",
":",
"qstr",
"=",
"\"$.events.frames[@.type is 'activation']\"",
"res",
"=",
"self",
".",
"tree",
".",
"execute",
"(",
"qstr",
")",
"if",
"res",
"is",
"None",
":",
"return",
"for",
"r",
"in",
"res",
":",
"epistemics",
"=",
"self",
".",
"_get_epistemics",
"(",
"r",
")",
"if",
"epistemics",
".",
"get",
"(",
"'negated'",
")",
":",
"continue",
"sentence",
"=",
"r",
"[",
"'verbose-text'",
"]",
"annotations",
",",
"context",
"=",
"self",
".",
"_get_annot_context",
"(",
"r",
")",
"ev",
"=",
"Evidence",
"(",
"source_api",
"=",
"'reach'",
",",
"text",
"=",
"sentence",
",",
"pmid",
"=",
"self",
".",
"citation",
",",
"annotations",
"=",
"annotations",
",",
"context",
"=",
"context",
",",
"epistemics",
"=",
"epistemics",
")",
"args",
"=",
"r",
"[",
"'arguments'",
"]",
"for",
"a",
"in",
"args",
":",
"if",
"self",
".",
"_get_arg_type",
"(",
"a",
")",
"==",
"'controller'",
":",
"controller_agent",
",",
"controller_coords",
"=",
"self",
".",
"_get_controller_agent",
"(",
"a",
")",
"if",
"self",
".",
"_get_arg_type",
"(",
"a",
")",
"==",
"'controlled'",
":",
"controlled",
"=",
"a",
"[",
"'arg'",
"]",
"controlled_agent",
",",
"controlled_coords",
"=",
"self",
".",
"_get_agent_from_entity",
"(",
"controlled",
")",
"annotations",
"[",
"'agents'",
"]",
"[",
"'coords'",
"]",
"=",
"[",
"controller_coords",
",",
"controlled_coords",
"]",
"if",
"r",
"[",
"'subtype'",
"]",
"==",
"'positive-activation'",
":",
"st",
"=",
"Activation",
"(",
"controller_agent",
",",
"controlled_agent",
",",
"evidence",
"=",
"ev",
")",
"else",
":",
"st",
"=",
"Inhibition",
"(",
"controller_agent",
",",
"controlled_agent",
",",
"evidence",
"=",
"ev",
")",
"self",
".",
"statements",
".",
"append",
"(",
"st",
")"
]
| Extract INDRA Activation Statements. | [
"Extract",
"INDRA",
"Activation",
"Statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/reach/processor.py#L260-L292 | train |
sorgerlab/indra | indra/sources/reach/processor.py | ReachProcessor.get_translocation | def get_translocation(self):
"""Extract INDRA Translocation Statements."""
qstr = "$.events.frames[@.type is 'translocation']"
res = self.tree.execute(qstr)
if res is None:
return
for r in res:
epistemics = self._get_epistemics(r)
if epistemics.get('negated'):
continue
sentence = r['verbose-text']
annotations, context = self._get_annot_context(r)
args = r['arguments']
from_location = None
to_location = None
for a in args:
if self._get_arg_type(a) == 'theme':
agent, theme_coords = self._get_agent_from_entity(a['arg'])
if agent is None:
continue
elif self._get_arg_type(a) == 'source':
from_location = self._get_location_by_id(a['arg'])
elif self._get_arg_type(a) == 'destination':
to_location = self._get_location_by_id(a['arg'])
annotations['agents']['coords'] = [theme_coords]
ev = Evidence(source_api='reach', text=sentence,
pmid=self.citation, annotations=annotations,
context=context, epistemics=epistemics)
st = Translocation(agent, from_location, to_location,
evidence=ev)
self.statements.append(st) | python | def get_translocation(self):
"""Extract INDRA Translocation Statements."""
qstr = "$.events.frames[@.type is 'translocation']"
res = self.tree.execute(qstr)
if res is None:
return
for r in res:
epistemics = self._get_epistemics(r)
if epistemics.get('negated'):
continue
sentence = r['verbose-text']
annotations, context = self._get_annot_context(r)
args = r['arguments']
from_location = None
to_location = None
for a in args:
if self._get_arg_type(a) == 'theme':
agent, theme_coords = self._get_agent_from_entity(a['arg'])
if agent is None:
continue
elif self._get_arg_type(a) == 'source':
from_location = self._get_location_by_id(a['arg'])
elif self._get_arg_type(a) == 'destination':
to_location = self._get_location_by_id(a['arg'])
annotations['agents']['coords'] = [theme_coords]
ev = Evidence(source_api='reach', text=sentence,
pmid=self.citation, annotations=annotations,
context=context, epistemics=epistemics)
st = Translocation(agent, from_location, to_location,
evidence=ev)
self.statements.append(st) | [
"def",
"get_translocation",
"(",
"self",
")",
":",
"qstr",
"=",
"\"$.events.frames[@.type is 'translocation']\"",
"res",
"=",
"self",
".",
"tree",
".",
"execute",
"(",
"qstr",
")",
"if",
"res",
"is",
"None",
":",
"return",
"for",
"r",
"in",
"res",
":",
"epistemics",
"=",
"self",
".",
"_get_epistemics",
"(",
"r",
")",
"if",
"epistemics",
".",
"get",
"(",
"'negated'",
")",
":",
"continue",
"sentence",
"=",
"r",
"[",
"'verbose-text'",
"]",
"annotations",
",",
"context",
"=",
"self",
".",
"_get_annot_context",
"(",
"r",
")",
"args",
"=",
"r",
"[",
"'arguments'",
"]",
"from_location",
"=",
"None",
"to_location",
"=",
"None",
"for",
"a",
"in",
"args",
":",
"if",
"self",
".",
"_get_arg_type",
"(",
"a",
")",
"==",
"'theme'",
":",
"agent",
",",
"theme_coords",
"=",
"self",
".",
"_get_agent_from_entity",
"(",
"a",
"[",
"'arg'",
"]",
")",
"if",
"agent",
"is",
"None",
":",
"continue",
"elif",
"self",
".",
"_get_arg_type",
"(",
"a",
")",
"==",
"'source'",
":",
"from_location",
"=",
"self",
".",
"_get_location_by_id",
"(",
"a",
"[",
"'arg'",
"]",
")",
"elif",
"self",
".",
"_get_arg_type",
"(",
"a",
")",
"==",
"'destination'",
":",
"to_location",
"=",
"self",
".",
"_get_location_by_id",
"(",
"a",
"[",
"'arg'",
"]",
")",
"annotations",
"[",
"'agents'",
"]",
"[",
"'coords'",
"]",
"=",
"[",
"theme_coords",
"]",
"ev",
"=",
"Evidence",
"(",
"source_api",
"=",
"'reach'",
",",
"text",
"=",
"sentence",
",",
"pmid",
"=",
"self",
".",
"citation",
",",
"annotations",
"=",
"annotations",
",",
"context",
"=",
"context",
",",
"epistemics",
"=",
"epistemics",
")",
"st",
"=",
"Translocation",
"(",
"agent",
",",
"from_location",
",",
"to_location",
",",
"evidence",
"=",
"ev",
")",
"self",
".",
"statements",
".",
"append",
"(",
"st",
")"
]
| Extract INDRA Translocation Statements. | [
"Extract",
"INDRA",
"Translocation",
"Statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/reach/processor.py#L294-L324 | train |
sorgerlab/indra | indra/sources/reach/processor.py | ReachProcessor._get_mod_conditions | def _get_mod_conditions(self, mod_term):
"""Return a list of ModConditions given a mod term dict."""
site = mod_term.get('site')
if site is not None:
mods = self._parse_site_text(site)
else:
mods = [Site(None, None)]
mcs = []
for mod in mods:
mod_res, mod_pos = mod
mod_type_str = mod_term['type'].lower()
mod_state = agent_mod_map.get(mod_type_str)
if mod_state is not None:
mc = ModCondition(mod_state[0], residue=mod_res,
position=mod_pos, is_modified=mod_state[1])
mcs.append(mc)
else:
logger.warning('Unhandled entity modification type: %s'
% mod_type_str)
return mcs | python | def _get_mod_conditions(self, mod_term):
"""Return a list of ModConditions given a mod term dict."""
site = mod_term.get('site')
if site is not None:
mods = self._parse_site_text(site)
else:
mods = [Site(None, None)]
mcs = []
for mod in mods:
mod_res, mod_pos = mod
mod_type_str = mod_term['type'].lower()
mod_state = agent_mod_map.get(mod_type_str)
if mod_state is not None:
mc = ModCondition(mod_state[0], residue=mod_res,
position=mod_pos, is_modified=mod_state[1])
mcs.append(mc)
else:
logger.warning('Unhandled entity modification type: %s'
% mod_type_str)
return mcs | [
"def",
"_get_mod_conditions",
"(",
"self",
",",
"mod_term",
")",
":",
"site",
"=",
"mod_term",
".",
"get",
"(",
"'site'",
")",
"if",
"site",
"is",
"not",
"None",
":",
"mods",
"=",
"self",
".",
"_parse_site_text",
"(",
"site",
")",
"else",
":",
"mods",
"=",
"[",
"Site",
"(",
"None",
",",
"None",
")",
"]",
"mcs",
"=",
"[",
"]",
"for",
"mod",
"in",
"mods",
":",
"mod_res",
",",
"mod_pos",
"=",
"mod",
"mod_type_str",
"=",
"mod_term",
"[",
"'type'",
"]",
".",
"lower",
"(",
")",
"mod_state",
"=",
"agent_mod_map",
".",
"get",
"(",
"mod_type_str",
")",
"if",
"mod_state",
"is",
"not",
"None",
":",
"mc",
"=",
"ModCondition",
"(",
"mod_state",
"[",
"0",
"]",
",",
"residue",
"=",
"mod_res",
",",
"position",
"=",
"mod_pos",
",",
"is_modified",
"=",
"mod_state",
"[",
"1",
"]",
")",
"mcs",
".",
"append",
"(",
"mc",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"'Unhandled entity modification type: %s'",
"%",
"mod_type_str",
")",
"return",
"mcs"
]
| Return a list of ModConditions given a mod term dict. | [
"Return",
"a",
"list",
"of",
"ModConditions",
"given",
"a",
"mod",
"term",
"dict",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/reach/processor.py#L469-L489 | train |
sorgerlab/indra | indra/sources/reach/processor.py | ReachProcessor._get_entity_coordinates | def _get_entity_coordinates(self, entity_term):
"""Return sentence coordinates for a given entity.
Given an entity term return the associated sentence coordinates as
a tuple of the form (int, int). Returns None if for any reason the
sentence coordinates cannot be found.
"""
# The following lines get the starting coordinate of the sentence
# containing the entity.
sent_id = entity_term.get('sentence')
if sent_id is None:
return None
qstr = "$.sentences.frames[(@.frame_id is \'%s')]" % sent_id
res = self.tree.execute(qstr)
if res is None:
return None
try:
sentence = next(res)
except StopIteration:
return None
sent_start = sentence.get('start-pos')
if sent_start is None:
return None
sent_start = sent_start.get('offset')
if sent_start is None:
return None
# Get the entity coordinate in the entire text and subtract the
# coordinate of the first character in the associated sentence to
# get the sentence coordinate of the entity. Return None if entity
# coordinates are missing
entity_start = entity_term.get('start-pos')
entity_stop = entity_term.get('end-pos')
if entity_start is None or entity_stop is None:
return None
entity_start = entity_start.get('offset')
entity_stop = entity_stop.get('offset')
if entity_start is None or entity_stop is None:
return None
return (entity_start - sent_start, entity_stop - sent_start) | python | def _get_entity_coordinates(self, entity_term):
"""Return sentence coordinates for a given entity.
Given an entity term return the associated sentence coordinates as
a tuple of the form (int, int). Returns None if for any reason the
sentence coordinates cannot be found.
"""
# The following lines get the starting coordinate of the sentence
# containing the entity.
sent_id = entity_term.get('sentence')
if sent_id is None:
return None
qstr = "$.sentences.frames[(@.frame_id is \'%s')]" % sent_id
res = self.tree.execute(qstr)
if res is None:
return None
try:
sentence = next(res)
except StopIteration:
return None
sent_start = sentence.get('start-pos')
if sent_start is None:
return None
sent_start = sent_start.get('offset')
if sent_start is None:
return None
# Get the entity coordinate in the entire text and subtract the
# coordinate of the first character in the associated sentence to
# get the sentence coordinate of the entity. Return None if entity
# coordinates are missing
entity_start = entity_term.get('start-pos')
entity_stop = entity_term.get('end-pos')
if entity_start is None or entity_stop is None:
return None
entity_start = entity_start.get('offset')
entity_stop = entity_stop.get('offset')
if entity_start is None or entity_stop is None:
return None
return (entity_start - sent_start, entity_stop - sent_start) | [
"def",
"_get_entity_coordinates",
"(",
"self",
",",
"entity_term",
")",
":",
"# The following lines get the starting coordinate of the sentence",
"# containing the entity.",
"sent_id",
"=",
"entity_term",
".",
"get",
"(",
"'sentence'",
")",
"if",
"sent_id",
"is",
"None",
":",
"return",
"None",
"qstr",
"=",
"\"$.sentences.frames[(@.frame_id is \\'%s')]\"",
"%",
"sent_id",
"res",
"=",
"self",
".",
"tree",
".",
"execute",
"(",
"qstr",
")",
"if",
"res",
"is",
"None",
":",
"return",
"None",
"try",
":",
"sentence",
"=",
"next",
"(",
"res",
")",
"except",
"StopIteration",
":",
"return",
"None",
"sent_start",
"=",
"sentence",
".",
"get",
"(",
"'start-pos'",
")",
"if",
"sent_start",
"is",
"None",
":",
"return",
"None",
"sent_start",
"=",
"sent_start",
".",
"get",
"(",
"'offset'",
")",
"if",
"sent_start",
"is",
"None",
":",
"return",
"None",
"# Get the entity coordinate in the entire text and subtract the",
"# coordinate of the first character in the associated sentence to",
"# get the sentence coordinate of the entity. Return None if entity",
"# coordinates are missing",
"entity_start",
"=",
"entity_term",
".",
"get",
"(",
"'start-pos'",
")",
"entity_stop",
"=",
"entity_term",
".",
"get",
"(",
"'end-pos'",
")",
"if",
"entity_start",
"is",
"None",
"or",
"entity_stop",
"is",
"None",
":",
"return",
"None",
"entity_start",
"=",
"entity_start",
".",
"get",
"(",
"'offset'",
")",
"entity_stop",
"=",
"entity_stop",
".",
"get",
"(",
"'offset'",
")",
"if",
"entity_start",
"is",
"None",
"or",
"entity_stop",
"is",
"None",
":",
"return",
"None",
"return",
"(",
"entity_start",
"-",
"sent_start",
",",
"entity_stop",
"-",
"sent_start",
")"
]
| Return sentence coordinates for a given entity.
Given an entity term return the associated sentence coordinates as
a tuple of the form (int, int). Returns None if for any reason the
sentence coordinates cannot be found. | [
"Return",
"sentence",
"coordinates",
"for",
"a",
"given",
"entity",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/reach/processor.py#L491-L529 | train |
sorgerlab/indra | indra/sources/reach/processor.py | ReachProcessor._get_section | def _get_section(self, event):
"""Get the section of the paper that the event is from."""
sentence_id = event.get('sentence')
section = None
if sentence_id:
qstr = "$.sentences.frames[(@.frame_id is \'%s\')]" % sentence_id
res = self.tree.execute(qstr)
if res:
sentence_frame = list(res)[0]
passage_id = sentence_frame.get('passage')
if passage_id:
qstr = "$.sentences.frames[(@.frame_id is \'%s\')]" % \
passage_id
res = self.tree.execute(qstr)
if res:
passage_frame = list(res)[0]
section = passage_frame.get('section-id')
# If the section is in the standard list, return as is
if section in self._section_list:
return section
# Next, handle a few special cases that come up in practice
elif section.startswith('fig'):
return 'figure'
elif section.startswith('supm'):
return 'supplementary'
elif section == 'article-title':
return 'title'
elif section in ['subjects|methods', 'methods|subjects']:
return 'methods'
elif section == 'conclusions':
return 'conclusion'
elif section == 'intro':
return 'introduction'
else:
return None | python | def _get_section(self, event):
"""Get the section of the paper that the event is from."""
sentence_id = event.get('sentence')
section = None
if sentence_id:
qstr = "$.sentences.frames[(@.frame_id is \'%s\')]" % sentence_id
res = self.tree.execute(qstr)
if res:
sentence_frame = list(res)[0]
passage_id = sentence_frame.get('passage')
if passage_id:
qstr = "$.sentences.frames[(@.frame_id is \'%s\')]" % \
passage_id
res = self.tree.execute(qstr)
if res:
passage_frame = list(res)[0]
section = passage_frame.get('section-id')
# If the section is in the standard list, return as is
if section in self._section_list:
return section
# Next, handle a few special cases that come up in practice
elif section.startswith('fig'):
return 'figure'
elif section.startswith('supm'):
return 'supplementary'
elif section == 'article-title':
return 'title'
elif section in ['subjects|methods', 'methods|subjects']:
return 'methods'
elif section == 'conclusions':
return 'conclusion'
elif section == 'intro':
return 'introduction'
else:
return None | [
"def",
"_get_section",
"(",
"self",
",",
"event",
")",
":",
"sentence_id",
"=",
"event",
".",
"get",
"(",
"'sentence'",
")",
"section",
"=",
"None",
"if",
"sentence_id",
":",
"qstr",
"=",
"\"$.sentences.frames[(@.frame_id is \\'%s\\')]\"",
"%",
"sentence_id",
"res",
"=",
"self",
".",
"tree",
".",
"execute",
"(",
"qstr",
")",
"if",
"res",
":",
"sentence_frame",
"=",
"list",
"(",
"res",
")",
"[",
"0",
"]",
"passage_id",
"=",
"sentence_frame",
".",
"get",
"(",
"'passage'",
")",
"if",
"passage_id",
":",
"qstr",
"=",
"\"$.sentences.frames[(@.frame_id is \\'%s\\')]\"",
"%",
"passage_id",
"res",
"=",
"self",
".",
"tree",
".",
"execute",
"(",
"qstr",
")",
"if",
"res",
":",
"passage_frame",
"=",
"list",
"(",
"res",
")",
"[",
"0",
"]",
"section",
"=",
"passage_frame",
".",
"get",
"(",
"'section-id'",
")",
"# If the section is in the standard list, return as is",
"if",
"section",
"in",
"self",
".",
"_section_list",
":",
"return",
"section",
"# Next, handle a few special cases that come up in practice",
"elif",
"section",
".",
"startswith",
"(",
"'fig'",
")",
":",
"return",
"'figure'",
"elif",
"section",
".",
"startswith",
"(",
"'supm'",
")",
":",
"return",
"'supplementary'",
"elif",
"section",
"==",
"'article-title'",
":",
"return",
"'title'",
"elif",
"section",
"in",
"[",
"'subjects|methods'",
",",
"'methods|subjects'",
"]",
":",
"return",
"'methods'",
"elif",
"section",
"==",
"'conclusions'",
":",
"return",
"'conclusion'",
"elif",
"section",
"==",
"'intro'",
":",
"return",
"'introduction'",
"else",
":",
"return",
"None"
]
| Get the section of the paper that the event is from. | [
"Get",
"the",
"section",
"of",
"the",
"paper",
"that",
"the",
"event",
"is",
"from",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/reach/processor.py#L610-L644 | train |
sorgerlab/indra | indra/sources/reach/processor.py | ReachProcessor._get_controller_agent | def _get_controller_agent(self, arg):
"""Return a single or a complex controller agent."""
controller_agent = None
controller = arg.get('arg')
# There is either a single controller here
if controller is not None:
controller_agent, coords = self._get_agent_from_entity(controller)
# Or the controller is a complex
elif arg['argument-type'] == 'complex':
controllers = list(arg.get('args').values())
controller_agent, coords = \
self._get_agent_from_entity(controllers[0])
bound_agents = [self._get_agent_from_entity(c)[0]
for c in controllers[1:]]
bound_conditions = [BoundCondition(ba, True) for
ba in bound_agents]
controller_agent.bound_conditions = bound_conditions
return controller_agent, coords | python | def _get_controller_agent(self, arg):
"""Return a single or a complex controller agent."""
controller_agent = None
controller = arg.get('arg')
# There is either a single controller here
if controller is not None:
controller_agent, coords = self._get_agent_from_entity(controller)
# Or the controller is a complex
elif arg['argument-type'] == 'complex':
controllers = list(arg.get('args').values())
controller_agent, coords = \
self._get_agent_from_entity(controllers[0])
bound_agents = [self._get_agent_from_entity(c)[0]
for c in controllers[1:]]
bound_conditions = [BoundCondition(ba, True) for
ba in bound_agents]
controller_agent.bound_conditions = bound_conditions
return controller_agent, coords | [
"def",
"_get_controller_agent",
"(",
"self",
",",
"arg",
")",
":",
"controller_agent",
"=",
"None",
"controller",
"=",
"arg",
".",
"get",
"(",
"'arg'",
")",
"# There is either a single controller here",
"if",
"controller",
"is",
"not",
"None",
":",
"controller_agent",
",",
"coords",
"=",
"self",
".",
"_get_agent_from_entity",
"(",
"controller",
")",
"# Or the controller is a complex",
"elif",
"arg",
"[",
"'argument-type'",
"]",
"==",
"'complex'",
":",
"controllers",
"=",
"list",
"(",
"arg",
".",
"get",
"(",
"'args'",
")",
".",
"values",
"(",
")",
")",
"controller_agent",
",",
"coords",
"=",
"self",
".",
"_get_agent_from_entity",
"(",
"controllers",
"[",
"0",
"]",
")",
"bound_agents",
"=",
"[",
"self",
".",
"_get_agent_from_entity",
"(",
"c",
")",
"[",
"0",
"]",
"for",
"c",
"in",
"controllers",
"[",
"1",
":",
"]",
"]",
"bound_conditions",
"=",
"[",
"BoundCondition",
"(",
"ba",
",",
"True",
")",
"for",
"ba",
"in",
"bound_agents",
"]",
"controller_agent",
".",
"bound_conditions",
"=",
"bound_conditions",
"return",
"controller_agent",
",",
"coords"
]
| Return a single or a complex controller agent. | [
"Return",
"a",
"single",
"or",
"a",
"complex",
"controller",
"agent",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/reach/processor.py#L646-L663 | train |
sorgerlab/indra | indra/sources/eidos/processor.py | _sanitize | def _sanitize(text):
"""Return sanitized Eidos text field for human readability."""
d = {'-LRB-': '(', '-RRB-': ')'}
return re.sub('|'.join(d.keys()), lambda m: d[m.group(0)], text) | python | def _sanitize(text):
"""Return sanitized Eidos text field for human readability."""
d = {'-LRB-': '(', '-RRB-': ')'}
return re.sub('|'.join(d.keys()), lambda m: d[m.group(0)], text) | [
"def",
"_sanitize",
"(",
"text",
")",
":",
"d",
"=",
"{",
"'-LRB-'",
":",
"'('",
",",
"'-RRB-'",
":",
"')'",
"}",
"return",
"re",
".",
"sub",
"(",
"'|'",
".",
"join",
"(",
"d",
".",
"keys",
"(",
")",
")",
",",
"lambda",
"m",
":",
"d",
"[",
"m",
".",
"group",
"(",
"0",
")",
"]",
",",
"text",
")"
]
| Return sanitized Eidos text field for human readability. | [
"Return",
"sanitized",
"Eidos",
"text",
"field",
"for",
"human",
"readability",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/eidos/processor.py#L380-L383 | train |
sorgerlab/indra | indra/sources/eidos/processor.py | ref_context_from_geoloc | def ref_context_from_geoloc(geoloc):
"""Return a RefContext object given a geoloc entry."""
text = geoloc.get('text')
geoid = geoloc.get('geoID')
rc = RefContext(name=text, db_refs={'GEOID': geoid})
return rc | python | def ref_context_from_geoloc(geoloc):
"""Return a RefContext object given a geoloc entry."""
text = geoloc.get('text')
geoid = geoloc.get('geoID')
rc = RefContext(name=text, db_refs={'GEOID': geoid})
return rc | [
"def",
"ref_context_from_geoloc",
"(",
"geoloc",
")",
":",
"text",
"=",
"geoloc",
".",
"get",
"(",
"'text'",
")",
"geoid",
"=",
"geoloc",
".",
"get",
"(",
"'geoID'",
")",
"rc",
"=",
"RefContext",
"(",
"name",
"=",
"text",
",",
"db_refs",
"=",
"{",
"'GEOID'",
":",
"geoid",
"}",
")",
"return",
"rc"
]
| Return a RefContext object given a geoloc entry. | [
"Return",
"a",
"RefContext",
"object",
"given",
"a",
"geoloc",
"entry",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/eidos/processor.py#L401-L406 | train |
sorgerlab/indra | indra/sources/eidos/processor.py | time_context_from_timex | def time_context_from_timex(timex):
"""Return a TimeContext object given a timex entry."""
time_text = timex.get('text')
constraint = timex['intervals'][0]
start = _get_time_stamp(constraint.get('start'))
end = _get_time_stamp(constraint.get('end'))
duration = constraint['duration']
tc = TimeContext(text=time_text, start=start, end=end,
duration=duration)
return tc | python | def time_context_from_timex(timex):
"""Return a TimeContext object given a timex entry."""
time_text = timex.get('text')
constraint = timex['intervals'][0]
start = _get_time_stamp(constraint.get('start'))
end = _get_time_stamp(constraint.get('end'))
duration = constraint['duration']
tc = TimeContext(text=time_text, start=start, end=end,
duration=duration)
return tc | [
"def",
"time_context_from_timex",
"(",
"timex",
")",
":",
"time_text",
"=",
"timex",
".",
"get",
"(",
"'text'",
")",
"constraint",
"=",
"timex",
"[",
"'intervals'",
"]",
"[",
"0",
"]",
"start",
"=",
"_get_time_stamp",
"(",
"constraint",
".",
"get",
"(",
"'start'",
")",
")",
"end",
"=",
"_get_time_stamp",
"(",
"constraint",
".",
"get",
"(",
"'end'",
")",
")",
"duration",
"=",
"constraint",
"[",
"'duration'",
"]",
"tc",
"=",
"TimeContext",
"(",
"text",
"=",
"time_text",
",",
"start",
"=",
"start",
",",
"end",
"=",
"end",
",",
"duration",
"=",
"duration",
")",
"return",
"tc"
]
| Return a TimeContext object given a timex entry. | [
"Return",
"a",
"TimeContext",
"object",
"given",
"a",
"timex",
"entry",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/eidos/processor.py#L409-L418 | train |
sorgerlab/indra | indra/sources/eidos/processor.py | find_args | def find_args(event, arg_type):
"""Return IDs of all arguments of a given type"""
args = event.get('arguments', {})
obj_tags = [arg for arg in args if arg['type'] == arg_type]
if obj_tags:
return [o['value']['@id'] for o in obj_tags]
else:
return [] | python | def find_args(event, arg_type):
"""Return IDs of all arguments of a given type"""
args = event.get('arguments', {})
obj_tags = [arg for arg in args if arg['type'] == arg_type]
if obj_tags:
return [o['value']['@id'] for o in obj_tags]
else:
return [] | [
"def",
"find_args",
"(",
"event",
",",
"arg_type",
")",
":",
"args",
"=",
"event",
".",
"get",
"(",
"'arguments'",
",",
"{",
"}",
")",
"obj_tags",
"=",
"[",
"arg",
"for",
"arg",
"in",
"args",
"if",
"arg",
"[",
"'type'",
"]",
"==",
"arg_type",
"]",
"if",
"obj_tags",
":",
"return",
"[",
"o",
"[",
"'value'",
"]",
"[",
"'@id'",
"]",
"for",
"o",
"in",
"obj_tags",
"]",
"else",
":",
"return",
"[",
"]"
]
| Return IDs of all arguments of a given type | [
"Return",
"IDs",
"of",
"all",
"arguments",
"of",
"a",
"given",
"type"
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/eidos/processor.py#L430-L437 | train |
sorgerlab/indra | indra/sources/eidos/processor.py | EidosProcessor.extract_causal_relations | def extract_causal_relations(self):
"""Extract causal relations as Statements."""
# Get the extractions that are labeled as directed and causal
relations = [e for e in self.doc.extractions if
'DirectedRelation' in e['labels'] and
'Causal' in e['labels']]
# For each relation, we try to extract an INDRA Statement and
# save it if its valid
for relation in relations:
stmt = self.get_causal_relation(relation)
if stmt is not None:
self.statements.append(stmt) | python | def extract_causal_relations(self):
"""Extract causal relations as Statements."""
# Get the extractions that are labeled as directed and causal
relations = [e for e in self.doc.extractions if
'DirectedRelation' in e['labels'] and
'Causal' in e['labels']]
# For each relation, we try to extract an INDRA Statement and
# save it if its valid
for relation in relations:
stmt = self.get_causal_relation(relation)
if stmt is not None:
self.statements.append(stmt) | [
"def",
"extract_causal_relations",
"(",
"self",
")",
":",
"# Get the extractions that are labeled as directed and causal",
"relations",
"=",
"[",
"e",
"for",
"e",
"in",
"self",
".",
"doc",
".",
"extractions",
"if",
"'DirectedRelation'",
"in",
"e",
"[",
"'labels'",
"]",
"and",
"'Causal'",
"in",
"e",
"[",
"'labels'",
"]",
"]",
"# For each relation, we try to extract an INDRA Statement and",
"# save it if its valid",
"for",
"relation",
"in",
"relations",
":",
"stmt",
"=",
"self",
".",
"get_causal_relation",
"(",
"relation",
")",
"if",
"stmt",
"is",
"not",
"None",
":",
"self",
".",
"statements",
".",
"append",
"(",
"stmt",
")"
]
| Extract causal relations as Statements. | [
"Extract",
"causal",
"relations",
"as",
"Statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/eidos/processor.py#L27-L38 | train |
sorgerlab/indra | indra/sources/eidos/processor.py | EidosProcessor.get_evidence | def get_evidence(self, relation):
"""Return the Evidence object for the INDRA Statment."""
provenance = relation.get('provenance')
# First try looking up the full sentence through provenance
text = None
context = None
if provenance:
sentence_tag = provenance[0].get('sentence')
if sentence_tag and '@id' in sentence_tag:
sentence_id = sentence_tag['@id']
sentence = self.doc.sentences.get(sentence_id)
if sentence is not None:
text = _sanitize(sentence['text'])
# Get temporal constraints if available
timexes = sentence.get('timexes', [])
if timexes:
# We currently handle just one timex per statement
timex = timexes[0]
tc = time_context_from_timex(timex)
context = WorldContext(time=tc)
# Get geolocation if available
geolocs = sentence.get('geolocs', [])
if geolocs:
geoloc = geolocs[0]
rc = ref_context_from_geoloc(geoloc)
if context:
context.geo_location = rc
else:
context = WorldContext(geo_location=rc)
# Here we try to get the title of the document and set it
# in the provenance
doc_id = provenance[0].get('document', {}).get('@id')
if doc_id:
title = self.doc.documents.get(doc_id, {}).get('title')
if title:
provenance[0]['document']['title'] = title
annotations = {'found_by': relation.get('rule'),
'provenance': provenance}
if self.doc.dct is not None:
annotations['document_creation_time'] = self.doc.dct.to_json()
epistemics = {}
negations = self.get_negation(relation)
hedgings = self.get_hedging(relation)
if hedgings:
epistemics['hedgings'] = hedgings
if negations:
# This is the INDRA standard to show negation
epistemics['negated'] = True
# But we can also save the texts associated with the negation
# under annotations, just in case it's needed
annotations['negated_texts'] = negations
# If that fails, we can still get the text of the relation
if text is None:
text = _sanitize(event.get('text'))
ev = Evidence(source_api='eidos', text=text, annotations=annotations,
context=context, epistemics=epistemics)
return ev | python | def get_evidence(self, relation):
"""Return the Evidence object for the INDRA Statment."""
provenance = relation.get('provenance')
# First try looking up the full sentence through provenance
text = None
context = None
if provenance:
sentence_tag = provenance[0].get('sentence')
if sentence_tag and '@id' in sentence_tag:
sentence_id = sentence_tag['@id']
sentence = self.doc.sentences.get(sentence_id)
if sentence is not None:
text = _sanitize(sentence['text'])
# Get temporal constraints if available
timexes = sentence.get('timexes', [])
if timexes:
# We currently handle just one timex per statement
timex = timexes[0]
tc = time_context_from_timex(timex)
context = WorldContext(time=tc)
# Get geolocation if available
geolocs = sentence.get('geolocs', [])
if geolocs:
geoloc = geolocs[0]
rc = ref_context_from_geoloc(geoloc)
if context:
context.geo_location = rc
else:
context = WorldContext(geo_location=rc)
# Here we try to get the title of the document and set it
# in the provenance
doc_id = provenance[0].get('document', {}).get('@id')
if doc_id:
title = self.doc.documents.get(doc_id, {}).get('title')
if title:
provenance[0]['document']['title'] = title
annotations = {'found_by': relation.get('rule'),
'provenance': provenance}
if self.doc.dct is not None:
annotations['document_creation_time'] = self.doc.dct.to_json()
epistemics = {}
negations = self.get_negation(relation)
hedgings = self.get_hedging(relation)
if hedgings:
epistemics['hedgings'] = hedgings
if negations:
# This is the INDRA standard to show negation
epistemics['negated'] = True
# But we can also save the texts associated with the negation
# under annotations, just in case it's needed
annotations['negated_texts'] = negations
# If that fails, we can still get the text of the relation
if text is None:
text = _sanitize(event.get('text'))
ev = Evidence(source_api='eidos', text=text, annotations=annotations,
context=context, epistemics=epistemics)
return ev | [
"def",
"get_evidence",
"(",
"self",
",",
"relation",
")",
":",
"provenance",
"=",
"relation",
".",
"get",
"(",
"'provenance'",
")",
"# First try looking up the full sentence through provenance",
"text",
"=",
"None",
"context",
"=",
"None",
"if",
"provenance",
":",
"sentence_tag",
"=",
"provenance",
"[",
"0",
"]",
".",
"get",
"(",
"'sentence'",
")",
"if",
"sentence_tag",
"and",
"'@id'",
"in",
"sentence_tag",
":",
"sentence_id",
"=",
"sentence_tag",
"[",
"'@id'",
"]",
"sentence",
"=",
"self",
".",
"doc",
".",
"sentences",
".",
"get",
"(",
"sentence_id",
")",
"if",
"sentence",
"is",
"not",
"None",
":",
"text",
"=",
"_sanitize",
"(",
"sentence",
"[",
"'text'",
"]",
")",
"# Get temporal constraints if available",
"timexes",
"=",
"sentence",
".",
"get",
"(",
"'timexes'",
",",
"[",
"]",
")",
"if",
"timexes",
":",
"# We currently handle just one timex per statement",
"timex",
"=",
"timexes",
"[",
"0",
"]",
"tc",
"=",
"time_context_from_timex",
"(",
"timex",
")",
"context",
"=",
"WorldContext",
"(",
"time",
"=",
"tc",
")",
"# Get geolocation if available",
"geolocs",
"=",
"sentence",
".",
"get",
"(",
"'geolocs'",
",",
"[",
"]",
")",
"if",
"geolocs",
":",
"geoloc",
"=",
"geolocs",
"[",
"0",
"]",
"rc",
"=",
"ref_context_from_geoloc",
"(",
"geoloc",
")",
"if",
"context",
":",
"context",
".",
"geo_location",
"=",
"rc",
"else",
":",
"context",
"=",
"WorldContext",
"(",
"geo_location",
"=",
"rc",
")",
"# Here we try to get the title of the document and set it",
"# in the provenance",
"doc_id",
"=",
"provenance",
"[",
"0",
"]",
".",
"get",
"(",
"'document'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'@id'",
")",
"if",
"doc_id",
":",
"title",
"=",
"self",
".",
"doc",
".",
"documents",
".",
"get",
"(",
"doc_id",
",",
"{",
"}",
")",
".",
"get",
"(",
"'title'",
")",
"if",
"title",
":",
"provenance",
"[",
"0",
"]",
"[",
"'document'",
"]",
"[",
"'title'",
"]",
"=",
"title",
"annotations",
"=",
"{",
"'found_by'",
":",
"relation",
".",
"get",
"(",
"'rule'",
")",
",",
"'provenance'",
":",
"provenance",
"}",
"if",
"self",
".",
"doc",
".",
"dct",
"is",
"not",
"None",
":",
"annotations",
"[",
"'document_creation_time'",
"]",
"=",
"self",
".",
"doc",
".",
"dct",
".",
"to_json",
"(",
")",
"epistemics",
"=",
"{",
"}",
"negations",
"=",
"self",
".",
"get_negation",
"(",
"relation",
")",
"hedgings",
"=",
"self",
".",
"get_hedging",
"(",
"relation",
")",
"if",
"hedgings",
":",
"epistemics",
"[",
"'hedgings'",
"]",
"=",
"hedgings",
"if",
"negations",
":",
"# This is the INDRA standard to show negation",
"epistemics",
"[",
"'negated'",
"]",
"=",
"True",
"# But we can also save the texts associated with the negation",
"# under annotations, just in case it's needed",
"annotations",
"[",
"'negated_texts'",
"]",
"=",
"negations",
"# If that fails, we can still get the text of the relation",
"if",
"text",
"is",
"None",
":",
"text",
"=",
"_sanitize",
"(",
"event",
".",
"get",
"(",
"'text'",
")",
")",
"ev",
"=",
"Evidence",
"(",
"source_api",
"=",
"'eidos'",
",",
"text",
"=",
"text",
",",
"annotations",
"=",
"annotations",
",",
"context",
"=",
"context",
",",
"epistemics",
"=",
"epistemics",
")",
"return",
"ev"
]
| Return the Evidence object for the INDRA Statment. | [
"Return",
"the",
"Evidence",
"object",
"for",
"the",
"INDRA",
"Statment",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/eidos/processor.py#L119-L181 | train |
sorgerlab/indra | indra/sources/eidos/processor.py | EidosProcessor.get_negation | def get_negation(event):
"""Return negation attached to an event.
Example: "states": [{"@type": "State", "type": "NEGATION",
"text": "n't"}]
"""
states = event.get('states', [])
if not states:
return []
negs = [state for state in states
if state.get('type') == 'NEGATION']
neg_texts = [neg['text'] for neg in negs]
return neg_texts | python | def get_negation(event):
"""Return negation attached to an event.
Example: "states": [{"@type": "State", "type": "NEGATION",
"text": "n't"}]
"""
states = event.get('states', [])
if not states:
return []
negs = [state for state in states
if state.get('type') == 'NEGATION']
neg_texts = [neg['text'] for neg in negs]
return neg_texts | [
"def",
"get_negation",
"(",
"event",
")",
":",
"states",
"=",
"event",
".",
"get",
"(",
"'states'",
",",
"[",
"]",
")",
"if",
"not",
"states",
":",
"return",
"[",
"]",
"negs",
"=",
"[",
"state",
"for",
"state",
"in",
"states",
"if",
"state",
".",
"get",
"(",
"'type'",
")",
"==",
"'NEGATION'",
"]",
"neg_texts",
"=",
"[",
"neg",
"[",
"'text'",
"]",
"for",
"neg",
"in",
"negs",
"]",
"return",
"neg_texts"
]
| Return negation attached to an event.
Example: "states": [{"@type": "State", "type": "NEGATION",
"text": "n't"}] | [
"Return",
"negation",
"attached",
"to",
"an",
"event",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/eidos/processor.py#L184-L196 | train |
sorgerlab/indra | indra/sources/eidos/processor.py | EidosProcessor.get_hedging | def get_hedging(event):
"""Return hedging markers attached to an event.
Example: "states": [{"@type": "State", "type": "HEDGE",
"text": "could"}
"""
states = event.get('states', [])
if not states:
return []
hedgings = [state for state in states
if state.get('type') == 'HEDGE']
hedging_texts = [hedging['text'] for hedging in hedgings]
return hedging_texts | python | def get_hedging(event):
"""Return hedging markers attached to an event.
Example: "states": [{"@type": "State", "type": "HEDGE",
"text": "could"}
"""
states = event.get('states', [])
if not states:
return []
hedgings = [state for state in states
if state.get('type') == 'HEDGE']
hedging_texts = [hedging['text'] for hedging in hedgings]
return hedging_texts | [
"def",
"get_hedging",
"(",
"event",
")",
":",
"states",
"=",
"event",
".",
"get",
"(",
"'states'",
",",
"[",
"]",
")",
"if",
"not",
"states",
":",
"return",
"[",
"]",
"hedgings",
"=",
"[",
"state",
"for",
"state",
"in",
"states",
"if",
"state",
".",
"get",
"(",
"'type'",
")",
"==",
"'HEDGE'",
"]",
"hedging_texts",
"=",
"[",
"hedging",
"[",
"'text'",
"]",
"for",
"hedging",
"in",
"hedgings",
"]",
"return",
"hedging_texts"
]
| Return hedging markers attached to an event.
Example: "states": [{"@type": "State", "type": "HEDGE",
"text": "could"} | [
"Return",
"hedging",
"markers",
"attached",
"to",
"an",
"event",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/eidos/processor.py#L199-L211 | train |
sorgerlab/indra | indra/sources/eidos/processor.py | EidosProcessor.get_groundings | def get_groundings(entity):
"""Return groundings as db_refs for an entity."""
def get_grounding_entries(grounding):
if not grounding:
return None
entries = []
values = grounding.get('values', [])
# Values could still have been a None entry here
if values:
for entry in values:
ont_concept = entry.get('ontologyConcept')
value = entry.get('value')
if ont_concept is None or value is None:
continue
entries.append((ont_concept, value))
return entries
# Save raw text and Eidos scored groundings as db_refs
db_refs = {'TEXT': entity['text']}
groundings = entity.get('groundings')
if not groundings:
return db_refs
for g in groundings:
entries = get_grounding_entries(g)
# Only add these groundings if there are actual values listed
if entries:
key = g['name'].upper()
if key == 'UN':
db_refs[key] = [(s[0].replace(' ', '_'), s[1])
for s in entries]
else:
db_refs[key] = entries
return db_refs | python | def get_groundings(entity):
"""Return groundings as db_refs for an entity."""
def get_grounding_entries(grounding):
if not grounding:
return None
entries = []
values = grounding.get('values', [])
# Values could still have been a None entry here
if values:
for entry in values:
ont_concept = entry.get('ontologyConcept')
value = entry.get('value')
if ont_concept is None or value is None:
continue
entries.append((ont_concept, value))
return entries
# Save raw text and Eidos scored groundings as db_refs
db_refs = {'TEXT': entity['text']}
groundings = entity.get('groundings')
if not groundings:
return db_refs
for g in groundings:
entries = get_grounding_entries(g)
# Only add these groundings if there are actual values listed
if entries:
key = g['name'].upper()
if key == 'UN':
db_refs[key] = [(s[0].replace(' ', '_'), s[1])
for s in entries]
else:
db_refs[key] = entries
return db_refs | [
"def",
"get_groundings",
"(",
"entity",
")",
":",
"def",
"get_grounding_entries",
"(",
"grounding",
")",
":",
"if",
"not",
"grounding",
":",
"return",
"None",
"entries",
"=",
"[",
"]",
"values",
"=",
"grounding",
".",
"get",
"(",
"'values'",
",",
"[",
"]",
")",
"# Values could still have been a None entry here",
"if",
"values",
":",
"for",
"entry",
"in",
"values",
":",
"ont_concept",
"=",
"entry",
".",
"get",
"(",
"'ontologyConcept'",
")",
"value",
"=",
"entry",
".",
"get",
"(",
"'value'",
")",
"if",
"ont_concept",
"is",
"None",
"or",
"value",
"is",
"None",
":",
"continue",
"entries",
".",
"append",
"(",
"(",
"ont_concept",
",",
"value",
")",
")",
"return",
"entries",
"# Save raw text and Eidos scored groundings as db_refs",
"db_refs",
"=",
"{",
"'TEXT'",
":",
"entity",
"[",
"'text'",
"]",
"}",
"groundings",
"=",
"entity",
".",
"get",
"(",
"'groundings'",
")",
"if",
"not",
"groundings",
":",
"return",
"db_refs",
"for",
"g",
"in",
"groundings",
":",
"entries",
"=",
"get_grounding_entries",
"(",
"g",
")",
"# Only add these groundings if there are actual values listed",
"if",
"entries",
":",
"key",
"=",
"g",
"[",
"'name'",
"]",
".",
"upper",
"(",
")",
"if",
"key",
"==",
"'UN'",
":",
"db_refs",
"[",
"key",
"]",
"=",
"[",
"(",
"s",
"[",
"0",
"]",
".",
"replace",
"(",
"' '",
",",
"'_'",
")",
",",
"s",
"[",
"1",
"]",
")",
"for",
"s",
"in",
"entries",
"]",
"else",
":",
"db_refs",
"[",
"key",
"]",
"=",
"entries",
"return",
"db_refs"
]
| Return groundings as db_refs for an entity. | [
"Return",
"groundings",
"as",
"db_refs",
"for",
"an",
"entity",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/eidos/processor.py#L243-L276 | train |
sorgerlab/indra | indra/sources/eidos/processor.py | EidosProcessor.get_concept | def get_concept(entity):
"""Return Concept from an Eidos entity."""
# Use the canonical name as the name of the Concept
name = entity['canonicalName']
db_refs = EidosProcessor.get_groundings(entity)
concept = Concept(name, db_refs=db_refs)
return concept | python | def get_concept(entity):
"""Return Concept from an Eidos entity."""
# Use the canonical name as the name of the Concept
name = entity['canonicalName']
db_refs = EidosProcessor.get_groundings(entity)
concept = Concept(name, db_refs=db_refs)
return concept | [
"def",
"get_concept",
"(",
"entity",
")",
":",
"# Use the canonical name as the name of the Concept",
"name",
"=",
"entity",
"[",
"'canonicalName'",
"]",
"db_refs",
"=",
"EidosProcessor",
".",
"get_groundings",
"(",
"entity",
")",
"concept",
"=",
"Concept",
"(",
"name",
",",
"db_refs",
"=",
"db_refs",
")",
"return",
"concept"
]
| Return Concept from an Eidos entity. | [
"Return",
"Concept",
"from",
"an",
"Eidos",
"entity",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/eidos/processor.py#L279-L285 | train |
sorgerlab/indra | indra/sources/eidos/processor.py | EidosProcessor.time_context_from_ref | def time_context_from_ref(self, timex):
"""Return a time context object given a timex reference entry."""
# If the timex has a value set, it means that it refers to a DCT or
# a TimeExpression e.g. "value": {"@id": "_:DCT_1"} and the parameters
# need to be taken from there
value = timex.get('value')
if value:
# Here we get the TimeContext directly from the stashed DCT
# dictionary
tc = self.doc.timexes.get(value['@id'])
return tc
return None | python | def time_context_from_ref(self, timex):
"""Return a time context object given a timex reference entry."""
# If the timex has a value set, it means that it refers to a DCT or
# a TimeExpression e.g. "value": {"@id": "_:DCT_1"} and the parameters
# need to be taken from there
value = timex.get('value')
if value:
# Here we get the TimeContext directly from the stashed DCT
# dictionary
tc = self.doc.timexes.get(value['@id'])
return tc
return None | [
"def",
"time_context_from_ref",
"(",
"self",
",",
"timex",
")",
":",
"# If the timex has a value set, it means that it refers to a DCT or",
"# a TimeExpression e.g. \"value\": {\"@id\": \"_:DCT_1\"} and the parameters",
"# need to be taken from there",
"value",
"=",
"timex",
".",
"get",
"(",
"'value'",
")",
"if",
"value",
":",
"# Here we get the TimeContext directly from the stashed DCT",
"# dictionary",
"tc",
"=",
"self",
".",
"doc",
".",
"timexes",
".",
"get",
"(",
"value",
"[",
"'@id'",
"]",
")",
"return",
"tc",
"return",
"None"
]
| Return a time context object given a timex reference entry. | [
"Return",
"a",
"time",
"context",
"object",
"given",
"a",
"timex",
"reference",
"entry",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/eidos/processor.py#L287-L298 | train |
sorgerlab/indra | indra/sources/eidos/processor.py | EidosProcessor.geo_context_from_ref | def geo_context_from_ref(self, ref):
"""Return a ref context object given a location reference entry."""
value = ref.get('value')
if value:
# Here we get the RefContext from the stashed geoloc dictionary
rc = self.doc.geolocs.get(value['@id'])
return rc
return None | python | def geo_context_from_ref(self, ref):
"""Return a ref context object given a location reference entry."""
value = ref.get('value')
if value:
# Here we get the RefContext from the stashed geoloc dictionary
rc = self.doc.geolocs.get(value['@id'])
return rc
return None | [
"def",
"geo_context_from_ref",
"(",
"self",
",",
"ref",
")",
":",
"value",
"=",
"ref",
".",
"get",
"(",
"'value'",
")",
"if",
"value",
":",
"# Here we get the RefContext from the stashed geoloc dictionary",
"rc",
"=",
"self",
".",
"doc",
".",
"geolocs",
".",
"get",
"(",
"value",
"[",
"'@id'",
"]",
")",
"return",
"rc",
"return",
"None"
]
| Return a ref context object given a location reference entry. | [
"Return",
"a",
"ref",
"context",
"object",
"given",
"a",
"location",
"reference",
"entry",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/eidos/processor.py#L300-L307 | train |
sorgerlab/indra | indra/sources/eidos/processor.py | EidosDocument.time_context_from_dct | def time_context_from_dct(dct):
"""Return a time context object given a DCT entry."""
time_text = dct.get('text')
start = _get_time_stamp(dct.get('start'))
end = _get_time_stamp(dct.get('end'))
duration = dct.get('duration')
tc = TimeContext(text=time_text, start=start, end=end,
duration=duration)
return tc | python | def time_context_from_dct(dct):
"""Return a time context object given a DCT entry."""
time_text = dct.get('text')
start = _get_time_stamp(dct.get('start'))
end = _get_time_stamp(dct.get('end'))
duration = dct.get('duration')
tc = TimeContext(text=time_text, start=start, end=end,
duration=duration)
return tc | [
"def",
"time_context_from_dct",
"(",
"dct",
")",
":",
"time_text",
"=",
"dct",
".",
"get",
"(",
"'text'",
")",
"start",
"=",
"_get_time_stamp",
"(",
"dct",
".",
"get",
"(",
"'start'",
")",
")",
"end",
"=",
"_get_time_stamp",
"(",
"dct",
".",
"get",
"(",
"'end'",
")",
")",
"duration",
"=",
"dct",
".",
"get",
"(",
"'duration'",
")",
"tc",
"=",
"TimeContext",
"(",
"text",
"=",
"time_text",
",",
"start",
"=",
"start",
",",
"end",
"=",
"end",
",",
"duration",
"=",
"duration",
")",
"return",
"tc"
]
| Return a time context object given a DCT entry. | [
"Return",
"a",
"time",
"context",
"object",
"given",
"a",
"DCT",
"entry",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/eidos/processor.py#L369-L377 | train |
sorgerlab/indra | indra/statements/util.py | make_hash | def make_hash(s, n_bytes):
"""Make the hash from a matches key."""
raw_h = int(md5(s.encode('utf-8')).hexdigest()[:n_bytes], 16)
# Make it a signed int.
return 16**n_bytes//2 - raw_h | python | def make_hash(s, n_bytes):
"""Make the hash from a matches key."""
raw_h = int(md5(s.encode('utf-8')).hexdigest()[:n_bytes], 16)
# Make it a signed int.
return 16**n_bytes//2 - raw_h | [
"def",
"make_hash",
"(",
"s",
",",
"n_bytes",
")",
":",
"raw_h",
"=",
"int",
"(",
"md5",
"(",
"s",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"hexdigest",
"(",
")",
"[",
":",
"n_bytes",
"]",
",",
"16",
")",
"# Make it a signed int.",
"return",
"16",
"**",
"n_bytes",
"//",
"2",
"-",
"raw_h"
]
| Make the hash from a matches key. | [
"Make",
"the",
"hash",
"from",
"a",
"matches",
"key",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/statements/util.py#L12-L16 | train |
sorgerlab/indra | indra/sources/tees/parse_tees.py | parse_a1 | def parse_a1(a1_text):
"""Parses an a1 file, the file TEES outputs that lists the entities in
the extracted events.
Parameters
----------
a1_text : str
Text of the TEES a1 output file, specifying the entities
Returns
-------
entities : Dictionary mapping TEES identifiers to TEESEntity objects
describing each entity. Each row of the .a1 file corresponds to one
TEESEntity object.
"""
entities = {}
for line in a1_text.split('\n'):
if len(line) == 0:
continue
tokens = line.rstrip().split('\t')
if len(tokens) != 3:
raise Exception('Expected three tab-seperated tokens per line ' +
'in the a1 file output from TEES.')
identifier = tokens[0]
entity_info = tokens[1]
entity_name = tokens[2]
info_tokens = entity_info.split()
if len(info_tokens) != 3:
raise Exception('Expected three space-seperated tokens in the ' +
'second column of the a2 file output from TEES.')
entity_type = info_tokens[0]
first_offset = int(info_tokens[1])
second_offset = int(info_tokens[2])
offsets = (first_offset, second_offset)
entities[identifier] = TEESEntity(
identifier,
entity_type,
entity_name,
offsets)
return entities | python | def parse_a1(a1_text):
"""Parses an a1 file, the file TEES outputs that lists the entities in
the extracted events.
Parameters
----------
a1_text : str
Text of the TEES a1 output file, specifying the entities
Returns
-------
entities : Dictionary mapping TEES identifiers to TEESEntity objects
describing each entity. Each row of the .a1 file corresponds to one
TEESEntity object.
"""
entities = {}
for line in a1_text.split('\n'):
if len(line) == 0:
continue
tokens = line.rstrip().split('\t')
if len(tokens) != 3:
raise Exception('Expected three tab-seperated tokens per line ' +
'in the a1 file output from TEES.')
identifier = tokens[0]
entity_info = tokens[1]
entity_name = tokens[2]
info_tokens = entity_info.split()
if len(info_tokens) != 3:
raise Exception('Expected three space-seperated tokens in the ' +
'second column of the a2 file output from TEES.')
entity_type = info_tokens[0]
first_offset = int(info_tokens[1])
second_offset = int(info_tokens[2])
offsets = (first_offset, second_offset)
entities[identifier] = TEESEntity(
identifier,
entity_type,
entity_name,
offsets)
return entities | [
"def",
"parse_a1",
"(",
"a1_text",
")",
":",
"entities",
"=",
"{",
"}",
"for",
"line",
"in",
"a1_text",
".",
"split",
"(",
"'\\n'",
")",
":",
"if",
"len",
"(",
"line",
")",
"==",
"0",
":",
"continue",
"tokens",
"=",
"line",
".",
"rstrip",
"(",
")",
".",
"split",
"(",
"'\\t'",
")",
"if",
"len",
"(",
"tokens",
")",
"!=",
"3",
":",
"raise",
"Exception",
"(",
"'Expected three tab-seperated tokens per line '",
"+",
"'in the a1 file output from TEES.'",
")",
"identifier",
"=",
"tokens",
"[",
"0",
"]",
"entity_info",
"=",
"tokens",
"[",
"1",
"]",
"entity_name",
"=",
"tokens",
"[",
"2",
"]",
"info_tokens",
"=",
"entity_info",
".",
"split",
"(",
")",
"if",
"len",
"(",
"info_tokens",
")",
"!=",
"3",
":",
"raise",
"Exception",
"(",
"'Expected three space-seperated tokens in the '",
"+",
"'second column of the a2 file output from TEES.'",
")",
"entity_type",
"=",
"info_tokens",
"[",
"0",
"]",
"first_offset",
"=",
"int",
"(",
"info_tokens",
"[",
"1",
"]",
")",
"second_offset",
"=",
"int",
"(",
"info_tokens",
"[",
"2",
"]",
")",
"offsets",
"=",
"(",
"first_offset",
",",
"second_offset",
")",
"entities",
"[",
"identifier",
"]",
"=",
"TEESEntity",
"(",
"identifier",
",",
"entity_type",
",",
"entity_name",
",",
"offsets",
")",
"return",
"entities"
]
| Parses an a1 file, the file TEES outputs that lists the entities in
the extracted events.
Parameters
----------
a1_text : str
Text of the TEES a1 output file, specifying the entities
Returns
-------
entities : Dictionary mapping TEES identifiers to TEESEntity objects
describing each entity. Each row of the .a1 file corresponds to one
TEESEntity object. | [
"Parses",
"an",
"a1",
"file",
"the",
"file",
"TEES",
"outputs",
"that",
"lists",
"the",
"entities",
"in",
"the",
"extracted",
"events",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/tees/parse_tees.py#L71-L115 | train |
sorgerlab/indra | indra/sources/tees/parse_tees.py | parse_output | def parse_output(a1_text, a2_text, sentence_segmentations):
"""Parses the output of the TEES reader and returns a networkx graph
with the event information.
Parameters
----------
a1_text : str
Contents of the TEES a1 output, specifying the entities
a1_text : str
Contents of the TEES a2 output, specifying the event graph
sentence_segmentations : str
Concents of the TEES sentence segmentation output XML
Returns
-------
events : networkx.DiGraph
networkx graph with the entities, events, and relationship between
extracted by TEES
"""
# Parse the sentence segmentation document
tees_sentences = TEESSentences(sentence_segmentations)
# Parse the a1 (entities) file
entities = parse_a1(a1_text)
# Parse the a2 (events) file
events = parse_a2(a2_text, entities, tees_sentences)
return events | python | def parse_output(a1_text, a2_text, sentence_segmentations):
"""Parses the output of the TEES reader and returns a networkx graph
with the event information.
Parameters
----------
a1_text : str
Contents of the TEES a1 output, specifying the entities
a1_text : str
Contents of the TEES a2 output, specifying the event graph
sentence_segmentations : str
Concents of the TEES sentence segmentation output XML
Returns
-------
events : networkx.DiGraph
networkx graph with the entities, events, and relationship between
extracted by TEES
"""
# Parse the sentence segmentation document
tees_sentences = TEESSentences(sentence_segmentations)
# Parse the a1 (entities) file
entities = parse_a1(a1_text)
# Parse the a2 (events) file
events = parse_a2(a2_text, entities, tees_sentences)
return events | [
"def",
"parse_output",
"(",
"a1_text",
",",
"a2_text",
",",
"sentence_segmentations",
")",
":",
"# Parse the sentence segmentation document",
"tees_sentences",
"=",
"TEESSentences",
"(",
"sentence_segmentations",
")",
"# Parse the a1 (entities) file",
"entities",
"=",
"parse_a1",
"(",
"a1_text",
")",
"# Parse the a2 (events) file",
"events",
"=",
"parse_a2",
"(",
"a2_text",
",",
"entities",
",",
"tees_sentences",
")",
"return",
"events"
]
| Parses the output of the TEES reader and returns a networkx graph
with the event information.
Parameters
----------
a1_text : str
Contents of the TEES a1 output, specifying the entities
a1_text : str
Contents of the TEES a2 output, specifying the event graph
sentence_segmentations : str
Concents of the TEES sentence segmentation output XML
Returns
-------
events : networkx.DiGraph
networkx graph with the entities, events, and relationship between
extracted by TEES | [
"Parses",
"the",
"output",
"of",
"the",
"TEES",
"reader",
"and",
"returns",
"a",
"networkx",
"graph",
"with",
"the",
"event",
"information",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/tees/parse_tees.py#L272-L301 | train |
sorgerlab/indra | indra/sources/tees/parse_tees.py | tees_parse_networkx_to_dot | def tees_parse_networkx_to_dot(G, output_file, subgraph_nodes):
"""Converts TEES extractions stored in a networkx graph into a graphviz
.dot file.
Parameters
----------
G : networkx.DiGraph
Graph with TEES extractions returned by run_and_parse_tees
output_file : str
Output file to which to write .dot file
subgraph_nodes : list[str]
Only convert the connected graph that includes these ndoes
"""
with codecs.open(output_file, 'w', encoding='utf-8') as f:
f.write('digraph teesParse {\n')
mentioned_nodes = set()
for from_node in subgraph_nodes:
for edge in G.edges(from_node):
to_node = edge[1]
mentioned_nodes.add(from_node)
mentioned_nodes.add(to_node)
relation = G.edges[from_node, to_node]['relation']
f.write('%s -> %s [ label = "%s" ];\n' % (from_node, to_node,
relation))
for node in mentioned_nodes:
is_event = G.node[node]['is_event']
if is_event:
node_type = G.node[node]['type']
negated = G.node[node]['negated']
speculation = G.node[node]['speculation']
# Add a tag to the label if the event is negated or speculation
if negated and speculation:
tag = ' {NS}'
elif negated:
tag = ' {N}'
elif speculation:
tag = ' {S}'
else:
tag = ''
node_label = node_type + tag
else:
node_label = G.node[node]['text']
f.write('%s [label="%s"];\n' % (node, node_label))
f.write('}\n') | python | def tees_parse_networkx_to_dot(G, output_file, subgraph_nodes):
"""Converts TEES extractions stored in a networkx graph into a graphviz
.dot file.
Parameters
----------
G : networkx.DiGraph
Graph with TEES extractions returned by run_and_parse_tees
output_file : str
Output file to which to write .dot file
subgraph_nodes : list[str]
Only convert the connected graph that includes these ndoes
"""
with codecs.open(output_file, 'w', encoding='utf-8') as f:
f.write('digraph teesParse {\n')
mentioned_nodes = set()
for from_node in subgraph_nodes:
for edge in G.edges(from_node):
to_node = edge[1]
mentioned_nodes.add(from_node)
mentioned_nodes.add(to_node)
relation = G.edges[from_node, to_node]['relation']
f.write('%s -> %s [ label = "%s" ];\n' % (from_node, to_node,
relation))
for node in mentioned_nodes:
is_event = G.node[node]['is_event']
if is_event:
node_type = G.node[node]['type']
negated = G.node[node]['negated']
speculation = G.node[node]['speculation']
# Add a tag to the label if the event is negated or speculation
if negated and speculation:
tag = ' {NS}'
elif negated:
tag = ' {N}'
elif speculation:
tag = ' {S}'
else:
tag = ''
node_label = node_type + tag
else:
node_label = G.node[node]['text']
f.write('%s [label="%s"];\n' % (node, node_label))
f.write('}\n') | [
"def",
"tees_parse_networkx_to_dot",
"(",
"G",
",",
"output_file",
",",
"subgraph_nodes",
")",
":",
"with",
"codecs",
".",
"open",
"(",
"output_file",
",",
"'w'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"'digraph teesParse {\\n'",
")",
"mentioned_nodes",
"=",
"set",
"(",
")",
"for",
"from_node",
"in",
"subgraph_nodes",
":",
"for",
"edge",
"in",
"G",
".",
"edges",
"(",
"from_node",
")",
":",
"to_node",
"=",
"edge",
"[",
"1",
"]",
"mentioned_nodes",
".",
"add",
"(",
"from_node",
")",
"mentioned_nodes",
".",
"add",
"(",
"to_node",
")",
"relation",
"=",
"G",
".",
"edges",
"[",
"from_node",
",",
"to_node",
"]",
"[",
"'relation'",
"]",
"f",
".",
"write",
"(",
"'%s -> %s [ label = \"%s\" ];\\n'",
"%",
"(",
"from_node",
",",
"to_node",
",",
"relation",
")",
")",
"for",
"node",
"in",
"mentioned_nodes",
":",
"is_event",
"=",
"G",
".",
"node",
"[",
"node",
"]",
"[",
"'is_event'",
"]",
"if",
"is_event",
":",
"node_type",
"=",
"G",
".",
"node",
"[",
"node",
"]",
"[",
"'type'",
"]",
"negated",
"=",
"G",
".",
"node",
"[",
"node",
"]",
"[",
"'negated'",
"]",
"speculation",
"=",
"G",
".",
"node",
"[",
"node",
"]",
"[",
"'speculation'",
"]",
"# Add a tag to the label if the event is negated or speculation",
"if",
"negated",
"and",
"speculation",
":",
"tag",
"=",
"' {NS}'",
"elif",
"negated",
":",
"tag",
"=",
"' {N}'",
"elif",
"speculation",
":",
"tag",
"=",
"' {S}'",
"else",
":",
"tag",
"=",
"''",
"node_label",
"=",
"node_type",
"+",
"tag",
"else",
":",
"node_label",
"=",
"G",
".",
"node",
"[",
"node",
"]",
"[",
"'text'",
"]",
"f",
".",
"write",
"(",
"'%s [label=\"%s\"];\\n'",
"%",
"(",
"node",
",",
"node_label",
")",
")",
"f",
".",
"write",
"(",
"'}\\n'",
")"
]
| Converts TEES extractions stored in a networkx graph into a graphviz
.dot file.
Parameters
----------
G : networkx.DiGraph
Graph with TEES extractions returned by run_and_parse_tees
output_file : str
Output file to which to write .dot file
subgraph_nodes : list[str]
Only convert the connected graph that includes these ndoes | [
"Converts",
"TEES",
"extractions",
"stored",
"in",
"a",
"networkx",
"graph",
"into",
"a",
"graphviz",
".",
"dot",
"file",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/tees/parse_tees.py#L303-L354 | train |
sorgerlab/indra | indra/sources/cwms/processor.py | CWMSProcessor._get_event | def _get_event(self, event, find_str):
"""Get a concept referred from the event by the given string."""
# Get the term with the given element id
element = event.find(find_str)
if element is None:
return None
element_id = element.attrib.get('id')
element_term = self.tree.find("*[@id='%s']" % element_id)
if element_term is None:
return None
time, location = self._extract_time_loc(element_term)
# Now see if there is a modifier like assoc-with connected
# to the main concept
assoc_with = self._get_assoc_with(element_term)
# Get the element's text and use it to construct a Concept
element_text_element = element_term.find('text')
if element_text_element is None:
return None
element_text = element_text_element.text
element_db_refs = {'TEXT': element_text}
element_name = sanitize_name(element_text)
element_type_element = element_term.find('type')
if element_type_element is not None:
element_db_refs['CWMS'] = element_type_element.text
# If there's an assoc-with, we tack it on as extra grounding
if assoc_with is not None:
element_db_refs['CWMS'] += ('|%s' % assoc_with)
concept = Concept(element_name, db_refs=element_db_refs)
if time or location:
context = WorldContext(time=time, geo_location=location)
else:
context = None
event_obj = Event(concept, context=context)
return event_obj | python | def _get_event(self, event, find_str):
"""Get a concept referred from the event by the given string."""
# Get the term with the given element id
element = event.find(find_str)
if element is None:
return None
element_id = element.attrib.get('id')
element_term = self.tree.find("*[@id='%s']" % element_id)
if element_term is None:
return None
time, location = self._extract_time_loc(element_term)
# Now see if there is a modifier like assoc-with connected
# to the main concept
assoc_with = self._get_assoc_with(element_term)
# Get the element's text and use it to construct a Concept
element_text_element = element_term.find('text')
if element_text_element is None:
return None
element_text = element_text_element.text
element_db_refs = {'TEXT': element_text}
element_name = sanitize_name(element_text)
element_type_element = element_term.find('type')
if element_type_element is not None:
element_db_refs['CWMS'] = element_type_element.text
# If there's an assoc-with, we tack it on as extra grounding
if assoc_with is not None:
element_db_refs['CWMS'] += ('|%s' % assoc_with)
concept = Concept(element_name, db_refs=element_db_refs)
if time or location:
context = WorldContext(time=time, geo_location=location)
else:
context = None
event_obj = Event(concept, context=context)
return event_obj | [
"def",
"_get_event",
"(",
"self",
",",
"event",
",",
"find_str",
")",
":",
"# Get the term with the given element id",
"element",
"=",
"event",
".",
"find",
"(",
"find_str",
")",
"if",
"element",
"is",
"None",
":",
"return",
"None",
"element_id",
"=",
"element",
".",
"attrib",
".",
"get",
"(",
"'id'",
")",
"element_term",
"=",
"self",
".",
"tree",
".",
"find",
"(",
"\"*[@id='%s']\"",
"%",
"element_id",
")",
"if",
"element_term",
"is",
"None",
":",
"return",
"None",
"time",
",",
"location",
"=",
"self",
".",
"_extract_time_loc",
"(",
"element_term",
")",
"# Now see if there is a modifier like assoc-with connected",
"# to the main concept",
"assoc_with",
"=",
"self",
".",
"_get_assoc_with",
"(",
"element_term",
")",
"# Get the element's text and use it to construct a Concept",
"element_text_element",
"=",
"element_term",
".",
"find",
"(",
"'text'",
")",
"if",
"element_text_element",
"is",
"None",
":",
"return",
"None",
"element_text",
"=",
"element_text_element",
".",
"text",
"element_db_refs",
"=",
"{",
"'TEXT'",
":",
"element_text",
"}",
"element_name",
"=",
"sanitize_name",
"(",
"element_text",
")",
"element_type_element",
"=",
"element_term",
".",
"find",
"(",
"'type'",
")",
"if",
"element_type_element",
"is",
"not",
"None",
":",
"element_db_refs",
"[",
"'CWMS'",
"]",
"=",
"element_type_element",
".",
"text",
"# If there's an assoc-with, we tack it on as extra grounding",
"if",
"assoc_with",
"is",
"not",
"None",
":",
"element_db_refs",
"[",
"'CWMS'",
"]",
"+=",
"(",
"'|%s'",
"%",
"assoc_with",
")",
"concept",
"=",
"Concept",
"(",
"element_name",
",",
"db_refs",
"=",
"element_db_refs",
")",
"if",
"time",
"or",
"location",
":",
"context",
"=",
"WorldContext",
"(",
"time",
"=",
"time",
",",
"geo_location",
"=",
"location",
")",
"else",
":",
"context",
"=",
"None",
"event_obj",
"=",
"Event",
"(",
"concept",
",",
"context",
"=",
"context",
")",
"return",
"event_obj"
]
| Get a concept referred from the event by the given string. | [
"Get",
"a",
"concept",
"referred",
"from",
"the",
"event",
"by",
"the",
"given",
"string",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/cwms/processor.py#L115-L152 | train |
sorgerlab/indra | indra/assemblers/cag/assembler.py | CAGAssembler.make_model | def make_model(self, grounding_ontology='UN', grounding_threshold=None):
"""Return a networkx MultiDiGraph representing a causal analysis graph.
Parameters
----------
grounding_ontology : Optional[str]
The ontology from which the grounding should be taken
(e.g. UN, FAO)
grounding_threshold : Optional[float]
Minimum threshold score for Eidos grounding.
Returns
-------
nx.MultiDiGraph
The assembled CAG.
"""
if grounding_threshold is not None:
self.grounding_threshold = grounding_threshold
self.grounding_ontology = grounding_ontology
# Filter to Influence Statements which are currently supported
statements = [stmt for stmt in self.statements if
isinstance(stmt, Influence)]
# Initialize graph
self.CAG = nx.MultiDiGraph()
# Add nodes and edges to the graph
for s in statements:
# Get standardized name of subject and object
# subj, obj = (self._node_name(s.subj), self._node_name(s.obj))
# See if both subject and object have polarities given
has_both_polarity = (s.subj.delta['polarity'] is not None and
s.obj.delta['polarity'] is not None)
# Add the nodes to the graph
for node, delta in zip((s.subj.concept, s.obj.concept),
(s.subj.delta, s.obj.delta)):
self.CAG.add_node(self._node_name(node),
simulable=has_both_polarity,
mods=delta['adjectives'])
# Edge is solid if both nodes have polarity given
linestyle = 'solid' if has_both_polarity else 'dotted'
if has_both_polarity:
same_polarity = (s.subj.delta['polarity'] ==
s.obj.delta['polarity'])
if same_polarity:
target_arrow_shape, linecolor = ('circle', 'green')
else:
target_arrow_shape, linecolor = ('tee', 'maroon')
else:
target_arrow_shape, linecolor = ('triangle', 'maroon')
# Add edge to the graph with metadata from statement
provenance = []
if s.evidence:
provenance = s.evidence[0].annotations.get('provenance', [])
if provenance:
provenance[0]['text'] = s.evidence[0].text
self.CAG.add_edge(
self._node_name(s.subj.concept),
self._node_name(s.obj.concept),
subj_polarity=s.subj.delta['polarity'],
subj_adjectives=s.subj.delta['adjectives'],
obj_polarity=s.obj.delta['polarity'],
obj_adjectives=s.obj.delta['adjectives'],
linestyle=linestyle,
linecolor=linecolor,
targetArrowShape=target_arrow_shape,
provenance=provenance,
)
return self.CAG | python | def make_model(self, grounding_ontology='UN', grounding_threshold=None):
"""Return a networkx MultiDiGraph representing a causal analysis graph.
Parameters
----------
grounding_ontology : Optional[str]
The ontology from which the grounding should be taken
(e.g. UN, FAO)
grounding_threshold : Optional[float]
Minimum threshold score for Eidos grounding.
Returns
-------
nx.MultiDiGraph
The assembled CAG.
"""
if grounding_threshold is not None:
self.grounding_threshold = grounding_threshold
self.grounding_ontology = grounding_ontology
# Filter to Influence Statements which are currently supported
statements = [stmt for stmt in self.statements if
isinstance(stmt, Influence)]
# Initialize graph
self.CAG = nx.MultiDiGraph()
# Add nodes and edges to the graph
for s in statements:
# Get standardized name of subject and object
# subj, obj = (self._node_name(s.subj), self._node_name(s.obj))
# See if both subject and object have polarities given
has_both_polarity = (s.subj.delta['polarity'] is not None and
s.obj.delta['polarity'] is not None)
# Add the nodes to the graph
for node, delta in zip((s.subj.concept, s.obj.concept),
(s.subj.delta, s.obj.delta)):
self.CAG.add_node(self._node_name(node),
simulable=has_both_polarity,
mods=delta['adjectives'])
# Edge is solid if both nodes have polarity given
linestyle = 'solid' if has_both_polarity else 'dotted'
if has_both_polarity:
same_polarity = (s.subj.delta['polarity'] ==
s.obj.delta['polarity'])
if same_polarity:
target_arrow_shape, linecolor = ('circle', 'green')
else:
target_arrow_shape, linecolor = ('tee', 'maroon')
else:
target_arrow_shape, linecolor = ('triangle', 'maroon')
# Add edge to the graph with metadata from statement
provenance = []
if s.evidence:
provenance = s.evidence[0].annotations.get('provenance', [])
if provenance:
provenance[0]['text'] = s.evidence[0].text
self.CAG.add_edge(
self._node_name(s.subj.concept),
self._node_name(s.obj.concept),
subj_polarity=s.subj.delta['polarity'],
subj_adjectives=s.subj.delta['adjectives'],
obj_polarity=s.obj.delta['polarity'],
obj_adjectives=s.obj.delta['adjectives'],
linestyle=linestyle,
linecolor=linecolor,
targetArrowShape=target_arrow_shape,
provenance=provenance,
)
return self.CAG | [
"def",
"make_model",
"(",
"self",
",",
"grounding_ontology",
"=",
"'UN'",
",",
"grounding_threshold",
"=",
"None",
")",
":",
"if",
"grounding_threshold",
"is",
"not",
"None",
":",
"self",
".",
"grounding_threshold",
"=",
"grounding_threshold",
"self",
".",
"grounding_ontology",
"=",
"grounding_ontology",
"# Filter to Influence Statements which are currently supported",
"statements",
"=",
"[",
"stmt",
"for",
"stmt",
"in",
"self",
".",
"statements",
"if",
"isinstance",
"(",
"stmt",
",",
"Influence",
")",
"]",
"# Initialize graph",
"self",
".",
"CAG",
"=",
"nx",
".",
"MultiDiGraph",
"(",
")",
"# Add nodes and edges to the graph",
"for",
"s",
"in",
"statements",
":",
"# Get standardized name of subject and object",
"# subj, obj = (self._node_name(s.subj), self._node_name(s.obj))",
"# See if both subject and object have polarities given",
"has_both_polarity",
"=",
"(",
"s",
".",
"subj",
".",
"delta",
"[",
"'polarity'",
"]",
"is",
"not",
"None",
"and",
"s",
".",
"obj",
".",
"delta",
"[",
"'polarity'",
"]",
"is",
"not",
"None",
")",
"# Add the nodes to the graph",
"for",
"node",
",",
"delta",
"in",
"zip",
"(",
"(",
"s",
".",
"subj",
".",
"concept",
",",
"s",
".",
"obj",
".",
"concept",
")",
",",
"(",
"s",
".",
"subj",
".",
"delta",
",",
"s",
".",
"obj",
".",
"delta",
")",
")",
":",
"self",
".",
"CAG",
".",
"add_node",
"(",
"self",
".",
"_node_name",
"(",
"node",
")",
",",
"simulable",
"=",
"has_both_polarity",
",",
"mods",
"=",
"delta",
"[",
"'adjectives'",
"]",
")",
"# Edge is solid if both nodes have polarity given",
"linestyle",
"=",
"'solid'",
"if",
"has_both_polarity",
"else",
"'dotted'",
"if",
"has_both_polarity",
":",
"same_polarity",
"=",
"(",
"s",
".",
"subj",
".",
"delta",
"[",
"'polarity'",
"]",
"==",
"s",
".",
"obj",
".",
"delta",
"[",
"'polarity'",
"]",
")",
"if",
"same_polarity",
":",
"target_arrow_shape",
",",
"linecolor",
"=",
"(",
"'circle'",
",",
"'green'",
")",
"else",
":",
"target_arrow_shape",
",",
"linecolor",
"=",
"(",
"'tee'",
",",
"'maroon'",
")",
"else",
":",
"target_arrow_shape",
",",
"linecolor",
"=",
"(",
"'triangle'",
",",
"'maroon'",
")",
"# Add edge to the graph with metadata from statement",
"provenance",
"=",
"[",
"]",
"if",
"s",
".",
"evidence",
":",
"provenance",
"=",
"s",
".",
"evidence",
"[",
"0",
"]",
".",
"annotations",
".",
"get",
"(",
"'provenance'",
",",
"[",
"]",
")",
"if",
"provenance",
":",
"provenance",
"[",
"0",
"]",
"[",
"'text'",
"]",
"=",
"s",
".",
"evidence",
"[",
"0",
"]",
".",
"text",
"self",
".",
"CAG",
".",
"add_edge",
"(",
"self",
".",
"_node_name",
"(",
"s",
".",
"subj",
".",
"concept",
")",
",",
"self",
".",
"_node_name",
"(",
"s",
".",
"obj",
".",
"concept",
")",
",",
"subj_polarity",
"=",
"s",
".",
"subj",
".",
"delta",
"[",
"'polarity'",
"]",
",",
"subj_adjectives",
"=",
"s",
".",
"subj",
".",
"delta",
"[",
"'adjectives'",
"]",
",",
"obj_polarity",
"=",
"s",
".",
"obj",
".",
"delta",
"[",
"'polarity'",
"]",
",",
"obj_adjectives",
"=",
"s",
".",
"obj",
".",
"delta",
"[",
"'adjectives'",
"]",
",",
"linestyle",
"=",
"linestyle",
",",
"linecolor",
"=",
"linecolor",
",",
"targetArrowShape",
"=",
"target_arrow_shape",
",",
"provenance",
"=",
"provenance",
",",
")",
"return",
"self",
".",
"CAG"
]
| Return a networkx MultiDiGraph representing a causal analysis graph.
Parameters
----------
grounding_ontology : Optional[str]
The ontology from which the grounding should be taken
(e.g. UN, FAO)
grounding_threshold : Optional[float]
Minimum threshold score for Eidos grounding.
Returns
-------
nx.MultiDiGraph
The assembled CAG. | [
"Return",
"a",
"networkx",
"MultiDiGraph",
"representing",
"a",
"causal",
"analysis",
"graph",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cag/assembler.py#L49-L124 | train |
sorgerlab/indra | indra/assemblers/cag/assembler.py | CAGAssembler.export_to_cytoscapejs | def export_to_cytoscapejs(self):
"""Return CAG in format readable by CytoscapeJS.
Return
------
dict
A JSON-like dict representing the graph for use with
CytoscapeJS.
"""
def _create_edge_data_dict(e):
"""Return a dict from a MultiDiGraph edge for CytoscapeJS export."""
# A hack to get rid of the redundant 'Provenance' label.
if e[3].get('provenance'):
tooltip = e[3]['provenance'][0]
if tooltip.get('@type'):
del tooltip['@type']
else:
tooltip = None
edge_data_dict = {
'id' : e[0]+'_'+e[1],
'source' : e[0],
'target' : e[1],
'linestyle' : e[3]["linestyle"],
'linecolor' : e[3]["linecolor"],
'targetArrowShape' : e[3]["targetArrowShape"],
'subj_adjectives' : e[3]["subj_adjectives"],
'subj_polarity' : e[3]["subj_polarity"],
'obj_adjectives' : e[3]["obj_adjectives"],
'obj_polarity' : e[3]["obj_polarity"],
'tooltip' : tooltip,
'simulable' : False if (
e[3]['obj_polarity'] is None or
e[3]['subj_polarity'] is None) else True,
}
return edge_data_dict
return {
'nodes': [{'data': {
'id': n[0],
'simulable': n[1]['simulable'],
'tooltip': 'Modifiers: '+json.dumps(n[1]['mods'])}
} for n in self.CAG.nodes(data=True)],
'edges': [{'data': _create_edge_data_dict(e)}
for e in self.CAG.edges(data=True, keys=True)]
} | python | def export_to_cytoscapejs(self):
"""Return CAG in format readable by CytoscapeJS.
Return
------
dict
A JSON-like dict representing the graph for use with
CytoscapeJS.
"""
def _create_edge_data_dict(e):
"""Return a dict from a MultiDiGraph edge for CytoscapeJS export."""
# A hack to get rid of the redundant 'Provenance' label.
if e[3].get('provenance'):
tooltip = e[3]['provenance'][0]
if tooltip.get('@type'):
del tooltip['@type']
else:
tooltip = None
edge_data_dict = {
'id' : e[0]+'_'+e[1],
'source' : e[0],
'target' : e[1],
'linestyle' : e[3]["linestyle"],
'linecolor' : e[3]["linecolor"],
'targetArrowShape' : e[3]["targetArrowShape"],
'subj_adjectives' : e[3]["subj_adjectives"],
'subj_polarity' : e[3]["subj_polarity"],
'obj_adjectives' : e[3]["obj_adjectives"],
'obj_polarity' : e[3]["obj_polarity"],
'tooltip' : tooltip,
'simulable' : False if (
e[3]['obj_polarity'] is None or
e[3]['subj_polarity'] is None) else True,
}
return edge_data_dict
return {
'nodes': [{'data': {
'id': n[0],
'simulable': n[1]['simulable'],
'tooltip': 'Modifiers: '+json.dumps(n[1]['mods'])}
} for n in self.CAG.nodes(data=True)],
'edges': [{'data': _create_edge_data_dict(e)}
for e in self.CAG.edges(data=True, keys=True)]
} | [
"def",
"export_to_cytoscapejs",
"(",
"self",
")",
":",
"def",
"_create_edge_data_dict",
"(",
"e",
")",
":",
"\"\"\"Return a dict from a MultiDiGraph edge for CytoscapeJS export.\"\"\"",
"# A hack to get rid of the redundant 'Provenance' label.",
"if",
"e",
"[",
"3",
"]",
".",
"get",
"(",
"'provenance'",
")",
":",
"tooltip",
"=",
"e",
"[",
"3",
"]",
"[",
"'provenance'",
"]",
"[",
"0",
"]",
"if",
"tooltip",
".",
"get",
"(",
"'@type'",
")",
":",
"del",
"tooltip",
"[",
"'@type'",
"]",
"else",
":",
"tooltip",
"=",
"None",
"edge_data_dict",
"=",
"{",
"'id'",
":",
"e",
"[",
"0",
"]",
"+",
"'_'",
"+",
"e",
"[",
"1",
"]",
",",
"'source'",
":",
"e",
"[",
"0",
"]",
",",
"'target'",
":",
"e",
"[",
"1",
"]",
",",
"'linestyle'",
":",
"e",
"[",
"3",
"]",
"[",
"\"linestyle\"",
"]",
",",
"'linecolor'",
":",
"e",
"[",
"3",
"]",
"[",
"\"linecolor\"",
"]",
",",
"'targetArrowShape'",
":",
"e",
"[",
"3",
"]",
"[",
"\"targetArrowShape\"",
"]",
",",
"'subj_adjectives'",
":",
"e",
"[",
"3",
"]",
"[",
"\"subj_adjectives\"",
"]",
",",
"'subj_polarity'",
":",
"e",
"[",
"3",
"]",
"[",
"\"subj_polarity\"",
"]",
",",
"'obj_adjectives'",
":",
"e",
"[",
"3",
"]",
"[",
"\"obj_adjectives\"",
"]",
",",
"'obj_polarity'",
":",
"e",
"[",
"3",
"]",
"[",
"\"obj_polarity\"",
"]",
",",
"'tooltip'",
":",
"tooltip",
",",
"'simulable'",
":",
"False",
"if",
"(",
"e",
"[",
"3",
"]",
"[",
"'obj_polarity'",
"]",
"is",
"None",
"or",
"e",
"[",
"3",
"]",
"[",
"'subj_polarity'",
"]",
"is",
"None",
")",
"else",
"True",
",",
"}",
"return",
"edge_data_dict",
"return",
"{",
"'nodes'",
":",
"[",
"{",
"'data'",
":",
"{",
"'id'",
":",
"n",
"[",
"0",
"]",
",",
"'simulable'",
":",
"n",
"[",
"1",
"]",
"[",
"'simulable'",
"]",
",",
"'tooltip'",
":",
"'Modifiers: '",
"+",
"json",
".",
"dumps",
"(",
"n",
"[",
"1",
"]",
"[",
"'mods'",
"]",
")",
"}",
"}",
"for",
"n",
"in",
"self",
".",
"CAG",
".",
"nodes",
"(",
"data",
"=",
"True",
")",
"]",
",",
"'edges'",
":",
"[",
"{",
"'data'",
":",
"_create_edge_data_dict",
"(",
"e",
")",
"}",
"for",
"e",
"in",
"self",
".",
"CAG",
".",
"edges",
"(",
"data",
"=",
"True",
",",
"keys",
"=",
"True",
")",
"]",
"}"
]
| Return CAG in format readable by CytoscapeJS.
Return
------
dict
A JSON-like dict representing the graph for use with
CytoscapeJS. | [
"Return",
"CAG",
"in",
"format",
"readable",
"by",
"CytoscapeJS",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cag/assembler.py#L203-L248 | train |
sorgerlab/indra | indra/assemblers/cag/assembler.py | CAGAssembler.generate_jupyter_js | def generate_jupyter_js(self, cyjs_style=None, cyjs_layout=None):
"""Generate Javascript from a template to run in Jupyter notebooks.
Parameters
----------
cyjs_style : Optional[dict]
A dict that sets CytoscapeJS style as specified in
https://github.com/cytoscape/cytoscape.js/blob/master/documentation/md/style.md.
cyjs_layout : Optional[dict]
A dict that sets CytoscapeJS
`layout parameters <http://js.cytoscape.org/#core/layout>`_.
Returns
-------
str
A Javascript string to be rendered in a Jupyter notebook cell.
"""
# First, export the CAG to CyJS
cyjs_elements = self.export_to_cytoscapejs()
# Load the Javascript template
tempf = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'cag_template.js')
with open(tempf, 'r') as fh:
template = fh.read()
# Load the default style and layout
stylef = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'cag_style.json')
with open(stylef, 'r') as fh:
style = json.load(fh)
# Apply style and layout only if arg wasn't passed in
if cyjs_style is None:
cyjs_style = style['style']
if cyjs_layout is None:
cyjs_layout = style['layout']
# Now fill in the template
formatted_args = tuple(json.dumps(x, indent=2) for x in
(cyjs_elements, cyjs_style, cyjs_layout))
js_str = template % formatted_args
return js_str | python | def generate_jupyter_js(self, cyjs_style=None, cyjs_layout=None):
"""Generate Javascript from a template to run in Jupyter notebooks.
Parameters
----------
cyjs_style : Optional[dict]
A dict that sets CytoscapeJS style as specified in
https://github.com/cytoscape/cytoscape.js/blob/master/documentation/md/style.md.
cyjs_layout : Optional[dict]
A dict that sets CytoscapeJS
`layout parameters <http://js.cytoscape.org/#core/layout>`_.
Returns
-------
str
A Javascript string to be rendered in a Jupyter notebook cell.
"""
# First, export the CAG to CyJS
cyjs_elements = self.export_to_cytoscapejs()
# Load the Javascript template
tempf = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'cag_template.js')
with open(tempf, 'r') as fh:
template = fh.read()
# Load the default style and layout
stylef = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'cag_style.json')
with open(stylef, 'r') as fh:
style = json.load(fh)
# Apply style and layout only if arg wasn't passed in
if cyjs_style is None:
cyjs_style = style['style']
if cyjs_layout is None:
cyjs_layout = style['layout']
# Now fill in the template
formatted_args = tuple(json.dumps(x, indent=2) for x in
(cyjs_elements, cyjs_style, cyjs_layout))
js_str = template % formatted_args
return js_str | [
"def",
"generate_jupyter_js",
"(",
"self",
",",
"cyjs_style",
"=",
"None",
",",
"cyjs_layout",
"=",
"None",
")",
":",
"# First, export the CAG to CyJS",
"cyjs_elements",
"=",
"self",
".",
"export_to_cytoscapejs",
"(",
")",
"# Load the Javascript template",
"tempf",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"__file__",
")",
")",
",",
"'cag_template.js'",
")",
"with",
"open",
"(",
"tempf",
",",
"'r'",
")",
"as",
"fh",
":",
"template",
"=",
"fh",
".",
"read",
"(",
")",
"# Load the default style and layout",
"stylef",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"__file__",
")",
")",
",",
"'cag_style.json'",
")",
"with",
"open",
"(",
"stylef",
",",
"'r'",
")",
"as",
"fh",
":",
"style",
"=",
"json",
".",
"load",
"(",
"fh",
")",
"# Apply style and layout only if arg wasn't passed in",
"if",
"cyjs_style",
"is",
"None",
":",
"cyjs_style",
"=",
"style",
"[",
"'style'",
"]",
"if",
"cyjs_layout",
"is",
"None",
":",
"cyjs_layout",
"=",
"style",
"[",
"'layout'",
"]",
"# Now fill in the template",
"formatted_args",
"=",
"tuple",
"(",
"json",
".",
"dumps",
"(",
"x",
",",
"indent",
"=",
"2",
")",
"for",
"x",
"in",
"(",
"cyjs_elements",
",",
"cyjs_style",
",",
"cyjs_layout",
")",
")",
"js_str",
"=",
"template",
"%",
"formatted_args",
"return",
"js_str"
]
| Generate Javascript from a template to run in Jupyter notebooks.
Parameters
----------
cyjs_style : Optional[dict]
A dict that sets CytoscapeJS style as specified in
https://github.com/cytoscape/cytoscape.js/blob/master/documentation/md/style.md.
cyjs_layout : Optional[dict]
A dict that sets CytoscapeJS
`layout parameters <http://js.cytoscape.org/#core/layout>`_.
Returns
-------
str
A Javascript string to be rendered in a Jupyter notebook cell. | [
"Generate",
"Javascript",
"from",
"a",
"template",
"to",
"run",
"in",
"Jupyter",
"notebooks",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cag/assembler.py#L250-L289 | train |
sorgerlab/indra | indra/assemblers/cag/assembler.py | CAGAssembler._node_name | def _node_name(self, concept):
"""Return a standardized name for a node given a Concept."""
if (# grounding threshold is specified
self.grounding_threshold is not None
# The particular eidos ontology grounding (un/wdi/fao) is present
and concept.db_refs[self.grounding_ontology]
# The grounding score is above the grounding threshold
and (concept.db_refs[self.grounding_ontology][0][1] >
self.grounding_threshold)):
entry = concept.db_refs[self.grounding_ontology][0][0]
return entry.split('/')[-1].replace('_', ' ').capitalize()
else:
return concept.name.capitalize() | python | def _node_name(self, concept):
"""Return a standardized name for a node given a Concept."""
if (# grounding threshold is specified
self.grounding_threshold is not None
# The particular eidos ontology grounding (un/wdi/fao) is present
and concept.db_refs[self.grounding_ontology]
# The grounding score is above the grounding threshold
and (concept.db_refs[self.grounding_ontology][0][1] >
self.grounding_threshold)):
entry = concept.db_refs[self.grounding_ontology][0][0]
return entry.split('/')[-1].replace('_', ' ').capitalize()
else:
return concept.name.capitalize() | [
"def",
"_node_name",
"(",
"self",
",",
"concept",
")",
":",
"if",
"(",
"# grounding threshold is specified",
"self",
".",
"grounding_threshold",
"is",
"not",
"None",
"# The particular eidos ontology grounding (un/wdi/fao) is present",
"and",
"concept",
".",
"db_refs",
"[",
"self",
".",
"grounding_ontology",
"]",
"# The grounding score is above the grounding threshold",
"and",
"(",
"concept",
".",
"db_refs",
"[",
"self",
".",
"grounding_ontology",
"]",
"[",
"0",
"]",
"[",
"1",
"]",
">",
"self",
".",
"grounding_threshold",
")",
")",
":",
"entry",
"=",
"concept",
".",
"db_refs",
"[",
"self",
".",
"grounding_ontology",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
"return",
"entry",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
".",
"replace",
"(",
"'_'",
",",
"' '",
")",
".",
"capitalize",
"(",
")",
"else",
":",
"return",
"concept",
".",
"name",
".",
"capitalize",
"(",
")"
]
| Return a standardized name for a node given a Concept. | [
"Return",
"a",
"standardized",
"name",
"for",
"a",
"node",
"given",
"a",
"Concept",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cag/assembler.py#L291-L303 | train |
sorgerlab/indra | indra/sources/bel/rdf_processor.py | term_from_uri | def term_from_uri(uri):
"""Removes prepended URI information from terms."""
if uri is None:
return None
# This insures that if we get a Literal with an integer value (as we
# do for modification positions), it will get converted to a string,
# not an integer.
if isinstance(uri, rdflib.Literal):
uri = str(uri.toPython())
# This is to handle URIs like
# http://www.openbel.org/bel/namespace//MAPK%20Erk1/3%20Family
# or
# http://www.openbel.org/bel/namespace/MAPK%20Erk1/3%20Family
# In the current implementation, the order of the patterns
# matters.
patterns = ['http://www.openbel.org/bel/namespace//(.*)',
'http://www.openbel.org/vocabulary//(.*)',
'http://www.openbel.org/bel//(.*)',
'http://www.openbel.org/bel/namespace/(.*)',
'http://www.openbel.org/vocabulary/(.*)',
'http://www.openbel.org/bel/(.*)']
for pr in patterns:
match = re.match(pr, uri)
if match is not None:
term = match.groups()[0]
term = unquote(term)
return term
# If none of the patterns match then the URI is actually a simple term
# for instance a site: "341" or a substitution: "sub(V,600,E)"
return uri | python | def term_from_uri(uri):
"""Removes prepended URI information from terms."""
if uri is None:
return None
# This insures that if we get a Literal with an integer value (as we
# do for modification positions), it will get converted to a string,
# not an integer.
if isinstance(uri, rdflib.Literal):
uri = str(uri.toPython())
# This is to handle URIs like
# http://www.openbel.org/bel/namespace//MAPK%20Erk1/3%20Family
# or
# http://www.openbel.org/bel/namespace/MAPK%20Erk1/3%20Family
# In the current implementation, the order of the patterns
# matters.
patterns = ['http://www.openbel.org/bel/namespace//(.*)',
'http://www.openbel.org/vocabulary//(.*)',
'http://www.openbel.org/bel//(.*)',
'http://www.openbel.org/bel/namespace/(.*)',
'http://www.openbel.org/vocabulary/(.*)',
'http://www.openbel.org/bel/(.*)']
for pr in patterns:
match = re.match(pr, uri)
if match is not None:
term = match.groups()[0]
term = unquote(term)
return term
# If none of the patterns match then the URI is actually a simple term
# for instance a site: "341" or a substitution: "sub(V,600,E)"
return uri | [
"def",
"term_from_uri",
"(",
"uri",
")",
":",
"if",
"uri",
"is",
"None",
":",
"return",
"None",
"# This insures that if we get a Literal with an integer value (as we",
"# do for modification positions), it will get converted to a string,",
"# not an integer.",
"if",
"isinstance",
"(",
"uri",
",",
"rdflib",
".",
"Literal",
")",
":",
"uri",
"=",
"str",
"(",
"uri",
".",
"toPython",
"(",
")",
")",
"# This is to handle URIs like",
"# http://www.openbel.org/bel/namespace//MAPK%20Erk1/3%20Family",
"# or",
"# http://www.openbel.org/bel/namespace/MAPK%20Erk1/3%20Family",
"# In the current implementation, the order of the patterns",
"# matters.",
"patterns",
"=",
"[",
"'http://www.openbel.org/bel/namespace//(.*)'",
",",
"'http://www.openbel.org/vocabulary//(.*)'",
",",
"'http://www.openbel.org/bel//(.*)'",
",",
"'http://www.openbel.org/bel/namespace/(.*)'",
",",
"'http://www.openbel.org/vocabulary/(.*)'",
",",
"'http://www.openbel.org/bel/(.*)'",
"]",
"for",
"pr",
"in",
"patterns",
":",
"match",
"=",
"re",
".",
"match",
"(",
"pr",
",",
"uri",
")",
"if",
"match",
"is",
"not",
"None",
":",
"term",
"=",
"match",
".",
"groups",
"(",
")",
"[",
"0",
"]",
"term",
"=",
"unquote",
"(",
"term",
")",
"return",
"term",
"# If none of the patterns match then the URI is actually a simple term",
"# for instance a site: \"341\" or a substitution: \"sub(V,600,E)\"",
"return",
"uri"
]
| Removes prepended URI information from terms. | [
"Removes",
"prepended",
"URI",
"information",
"from",
"terms",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/bel/rdf_processor.py#L37-L66 | train |
sorgerlab/indra | indra/sources/bel/rdf_processor.py | BelRdfProcessor.get_activating_mods | def get_activating_mods(self):
"""Extract INDRA ActiveForm Statements with a single mod from BEL.
The SPARQL pattern used for extraction from BEL looks for a
ModifiedProteinAbundance as subject and an Activiy of a
ProteinAbundance as object.
Examples:
proteinAbundance(HGNC:INSR,proteinModification(P,Y))
directlyIncreases
kinaseActivity(proteinAbundance(HGNC:INSR))
"""
q_mods = prefixes + """
SELECT ?speciesName ?actType ?mod ?pos ?rel ?stmt ?species
WHERE {
?stmt a belvoc:Statement .
?stmt belvoc:hasRelationship ?rel .
?stmt belvoc:hasSubject ?subject .
?stmt belvoc:hasObject ?object .
?object belvoc:hasActivityType ?actType .
?object belvoc:hasChild ?species .
?species a belvoc:ProteinAbundance .
?species belvoc:hasConcept ?speciesName .
?subject a belvoc:ModifiedProteinAbundance .
?subject belvoc:hasModificationType ?mod .
?subject belvoc:hasChild ?species .
OPTIONAL { ?subject belvoc:hasModificationPosition ?pos . }
FILTER (?rel = belvoc:DirectlyIncreases ||
?rel = belvoc:DirectlyDecreases)
}
"""
# Now make the PySB for the phosphorylation
res_mods = self.g.query(q_mods)
for stmt in res_mods:
evidence = self._get_evidence(stmt[5])
# Parse out the elements of the query
species = self._get_agent(stmt[0], stmt[6])
act_type = term_from_uri(stmt[1]).lower()
mod = term_from_uri(stmt[2])
mod_pos = term_from_uri(stmt[3])
mc = self._get_mod_condition(mod, mod_pos)
species.mods = [mc]
rel = term_from_uri(stmt[4])
if rel == 'DirectlyDecreases':
is_active = False
else:
is_active = True
stmt_str = strip_statement(stmt[5])
# Mark this as a converted statement
self.converted_direct_stmts.append(stmt_str)
st = ActiveForm(species, act_type, is_active, evidence)
self.statements.append(st) | python | def get_activating_mods(self):
"""Extract INDRA ActiveForm Statements with a single mod from BEL.
The SPARQL pattern used for extraction from BEL looks for a
ModifiedProteinAbundance as subject and an Activiy of a
ProteinAbundance as object.
Examples:
proteinAbundance(HGNC:INSR,proteinModification(P,Y))
directlyIncreases
kinaseActivity(proteinAbundance(HGNC:INSR))
"""
q_mods = prefixes + """
SELECT ?speciesName ?actType ?mod ?pos ?rel ?stmt ?species
WHERE {
?stmt a belvoc:Statement .
?stmt belvoc:hasRelationship ?rel .
?stmt belvoc:hasSubject ?subject .
?stmt belvoc:hasObject ?object .
?object belvoc:hasActivityType ?actType .
?object belvoc:hasChild ?species .
?species a belvoc:ProteinAbundance .
?species belvoc:hasConcept ?speciesName .
?subject a belvoc:ModifiedProteinAbundance .
?subject belvoc:hasModificationType ?mod .
?subject belvoc:hasChild ?species .
OPTIONAL { ?subject belvoc:hasModificationPosition ?pos . }
FILTER (?rel = belvoc:DirectlyIncreases ||
?rel = belvoc:DirectlyDecreases)
}
"""
# Now make the PySB for the phosphorylation
res_mods = self.g.query(q_mods)
for stmt in res_mods:
evidence = self._get_evidence(stmt[5])
# Parse out the elements of the query
species = self._get_agent(stmt[0], stmt[6])
act_type = term_from_uri(stmt[1]).lower()
mod = term_from_uri(stmt[2])
mod_pos = term_from_uri(stmt[3])
mc = self._get_mod_condition(mod, mod_pos)
species.mods = [mc]
rel = term_from_uri(stmt[4])
if rel == 'DirectlyDecreases':
is_active = False
else:
is_active = True
stmt_str = strip_statement(stmt[5])
# Mark this as a converted statement
self.converted_direct_stmts.append(stmt_str)
st = ActiveForm(species, act_type, is_active, evidence)
self.statements.append(st) | [
"def",
"get_activating_mods",
"(",
"self",
")",
":",
"q_mods",
"=",
"prefixes",
"+",
"\"\"\"\n SELECT ?speciesName ?actType ?mod ?pos ?rel ?stmt ?species\n WHERE {\n ?stmt a belvoc:Statement .\n ?stmt belvoc:hasRelationship ?rel .\n ?stmt belvoc:hasSubject ?subject .\n ?stmt belvoc:hasObject ?object .\n ?object belvoc:hasActivityType ?actType .\n ?object belvoc:hasChild ?species .\n ?species a belvoc:ProteinAbundance .\n ?species belvoc:hasConcept ?speciesName .\n ?subject a belvoc:ModifiedProteinAbundance .\n ?subject belvoc:hasModificationType ?mod .\n ?subject belvoc:hasChild ?species .\n OPTIONAL { ?subject belvoc:hasModificationPosition ?pos . }\n FILTER (?rel = belvoc:DirectlyIncreases ||\n ?rel = belvoc:DirectlyDecreases)\n }\n \"\"\"",
"# Now make the PySB for the phosphorylation",
"res_mods",
"=",
"self",
".",
"g",
".",
"query",
"(",
"q_mods",
")",
"for",
"stmt",
"in",
"res_mods",
":",
"evidence",
"=",
"self",
".",
"_get_evidence",
"(",
"stmt",
"[",
"5",
"]",
")",
"# Parse out the elements of the query",
"species",
"=",
"self",
".",
"_get_agent",
"(",
"stmt",
"[",
"0",
"]",
",",
"stmt",
"[",
"6",
"]",
")",
"act_type",
"=",
"term_from_uri",
"(",
"stmt",
"[",
"1",
"]",
")",
".",
"lower",
"(",
")",
"mod",
"=",
"term_from_uri",
"(",
"stmt",
"[",
"2",
"]",
")",
"mod_pos",
"=",
"term_from_uri",
"(",
"stmt",
"[",
"3",
"]",
")",
"mc",
"=",
"self",
".",
"_get_mod_condition",
"(",
"mod",
",",
"mod_pos",
")",
"species",
".",
"mods",
"=",
"[",
"mc",
"]",
"rel",
"=",
"term_from_uri",
"(",
"stmt",
"[",
"4",
"]",
")",
"if",
"rel",
"==",
"'DirectlyDecreases'",
":",
"is_active",
"=",
"False",
"else",
":",
"is_active",
"=",
"True",
"stmt_str",
"=",
"strip_statement",
"(",
"stmt",
"[",
"5",
"]",
")",
"# Mark this as a converted statement",
"self",
".",
"converted_direct_stmts",
".",
"append",
"(",
"stmt_str",
")",
"st",
"=",
"ActiveForm",
"(",
"species",
",",
"act_type",
",",
"is_active",
",",
"evidence",
")",
"self",
".",
"statements",
".",
"append",
"(",
"st",
")"
]
| Extract INDRA ActiveForm Statements with a single mod from BEL.
The SPARQL pattern used for extraction from BEL looks for a
ModifiedProteinAbundance as subject and an Activiy of a
ProteinAbundance as object.
Examples:
proteinAbundance(HGNC:INSR,proteinModification(P,Y))
directlyIncreases
kinaseActivity(proteinAbundance(HGNC:INSR)) | [
"Extract",
"INDRA",
"ActiveForm",
"Statements",
"with",
"a",
"single",
"mod",
"from",
"BEL",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/bel/rdf_processor.py#L225-L279 | train |
sorgerlab/indra | indra/sources/bel/rdf_processor.py | BelRdfProcessor.get_complexes | def get_complexes(self):
"""Extract INDRA Complex Statements from BEL.
The SPARQL query used to extract Complexes looks for ComplexAbundance
terms and their constituents. This pattern is distinct from other
patterns in this processor in that it queries for terms, not
full statements.
Examples:
complexAbundance(proteinAbundance(HGNC:PPARG),
proteinAbundance(HGNC:RXRA))
decreases
biologicalProcess(MESHPP:"Insulin Resistance")
"""
q_cmplx = prefixes + """
SELECT ?complexTerm ?childName ?child ?stmt
WHERE {
{
{?stmt belvoc:hasSubject ?complexTerm}
UNION
{?stmt belvoc:hasObject ?complexTerm .}
UNION
{?stmt belvoc:hasSubject ?term .
?term belvoc:hasChild ?complexTerm .}
UNION
{?stmt belvoc:hasObject ?term .
?term belvoc:hasChild ?complexTerm .}
}
?complexTerm a belvoc:Term .
?complexTerm a belvoc:ComplexAbundance .
?complexTerm belvoc:hasChild ?child .
?child belvoc:hasConcept ?childName .
}
"""
# Run the query
res_cmplx = self.g.query(q_cmplx)
# Store the members of each complex in a dict of lists, keyed by the
# term for the complex
cmplx_dict = collections.defaultdict(list)
cmplx_ev = {}
for stmt in res_cmplx:
stmt_uri = stmt[3]
ev = self._get_evidence(stmt_uri)
for e in ev:
e.epistemics['direct'] = True
cmplx_name = term_from_uri(stmt[0])
cmplx_id = stmt_uri + '#' + cmplx_name
child = self._get_agent(stmt[1], stmt[2])
cmplx_dict[cmplx_id].append(child)
# This might be written multiple times but with the same
# evidence
cmplx_ev[cmplx_id] = ev
# Now iterate over the stored complex information and create binding
# statements
for cmplx_id, cmplx_list in cmplx_dict.items():
if len(cmplx_list) < 2:
msg = 'Complex %s has less than 2 members! Skipping.' % \
cmplx_name
logger.warning(msg)
else:
self.statements.append(Complex(cmplx_list,
evidence=cmplx_ev[cmplx_id])) | python | def get_complexes(self):
"""Extract INDRA Complex Statements from BEL.
The SPARQL query used to extract Complexes looks for ComplexAbundance
terms and their constituents. This pattern is distinct from other
patterns in this processor in that it queries for terms, not
full statements.
Examples:
complexAbundance(proteinAbundance(HGNC:PPARG),
proteinAbundance(HGNC:RXRA))
decreases
biologicalProcess(MESHPP:"Insulin Resistance")
"""
q_cmplx = prefixes + """
SELECT ?complexTerm ?childName ?child ?stmt
WHERE {
{
{?stmt belvoc:hasSubject ?complexTerm}
UNION
{?stmt belvoc:hasObject ?complexTerm .}
UNION
{?stmt belvoc:hasSubject ?term .
?term belvoc:hasChild ?complexTerm .}
UNION
{?stmt belvoc:hasObject ?term .
?term belvoc:hasChild ?complexTerm .}
}
?complexTerm a belvoc:Term .
?complexTerm a belvoc:ComplexAbundance .
?complexTerm belvoc:hasChild ?child .
?child belvoc:hasConcept ?childName .
}
"""
# Run the query
res_cmplx = self.g.query(q_cmplx)
# Store the members of each complex in a dict of lists, keyed by the
# term for the complex
cmplx_dict = collections.defaultdict(list)
cmplx_ev = {}
for stmt in res_cmplx:
stmt_uri = stmt[3]
ev = self._get_evidence(stmt_uri)
for e in ev:
e.epistemics['direct'] = True
cmplx_name = term_from_uri(stmt[0])
cmplx_id = stmt_uri + '#' + cmplx_name
child = self._get_agent(stmt[1], stmt[2])
cmplx_dict[cmplx_id].append(child)
# This might be written multiple times but with the same
# evidence
cmplx_ev[cmplx_id] = ev
# Now iterate over the stored complex information and create binding
# statements
for cmplx_id, cmplx_list in cmplx_dict.items():
if len(cmplx_list) < 2:
msg = 'Complex %s has less than 2 members! Skipping.' % \
cmplx_name
logger.warning(msg)
else:
self.statements.append(Complex(cmplx_list,
evidence=cmplx_ev[cmplx_id])) | [
"def",
"get_complexes",
"(",
"self",
")",
":",
"q_cmplx",
"=",
"prefixes",
"+",
"\"\"\"\n SELECT ?complexTerm ?childName ?child ?stmt\n WHERE {\n {\n {?stmt belvoc:hasSubject ?complexTerm}\n UNION\n {?stmt belvoc:hasObject ?complexTerm .}\n UNION\n {?stmt belvoc:hasSubject ?term .\n ?term belvoc:hasChild ?complexTerm .}\n UNION\n {?stmt belvoc:hasObject ?term .\n ?term belvoc:hasChild ?complexTerm .}\n }\n ?complexTerm a belvoc:Term .\n ?complexTerm a belvoc:ComplexAbundance .\n ?complexTerm belvoc:hasChild ?child .\n ?child belvoc:hasConcept ?childName .\n }\n \"\"\"",
"# Run the query",
"res_cmplx",
"=",
"self",
".",
"g",
".",
"query",
"(",
"q_cmplx",
")",
"# Store the members of each complex in a dict of lists, keyed by the",
"# term for the complex",
"cmplx_dict",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"cmplx_ev",
"=",
"{",
"}",
"for",
"stmt",
"in",
"res_cmplx",
":",
"stmt_uri",
"=",
"stmt",
"[",
"3",
"]",
"ev",
"=",
"self",
".",
"_get_evidence",
"(",
"stmt_uri",
")",
"for",
"e",
"in",
"ev",
":",
"e",
".",
"epistemics",
"[",
"'direct'",
"]",
"=",
"True",
"cmplx_name",
"=",
"term_from_uri",
"(",
"stmt",
"[",
"0",
"]",
")",
"cmplx_id",
"=",
"stmt_uri",
"+",
"'#'",
"+",
"cmplx_name",
"child",
"=",
"self",
".",
"_get_agent",
"(",
"stmt",
"[",
"1",
"]",
",",
"stmt",
"[",
"2",
"]",
")",
"cmplx_dict",
"[",
"cmplx_id",
"]",
".",
"append",
"(",
"child",
")",
"# This might be written multiple times but with the same",
"# evidence",
"cmplx_ev",
"[",
"cmplx_id",
"]",
"=",
"ev",
"# Now iterate over the stored complex information and create binding",
"# statements",
"for",
"cmplx_id",
",",
"cmplx_list",
"in",
"cmplx_dict",
".",
"items",
"(",
")",
":",
"if",
"len",
"(",
"cmplx_list",
")",
"<",
"2",
":",
"msg",
"=",
"'Complex %s has less than 2 members! Skipping.'",
"%",
"cmplx_name",
"logger",
".",
"warning",
"(",
"msg",
")",
"else",
":",
"self",
".",
"statements",
".",
"append",
"(",
"Complex",
"(",
"cmplx_list",
",",
"evidence",
"=",
"cmplx_ev",
"[",
"cmplx_id",
"]",
")",
")"
]
| Extract INDRA Complex Statements from BEL.
The SPARQL query used to extract Complexes looks for ComplexAbundance
terms and their constituents. This pattern is distinct from other
patterns in this processor in that it queries for terms, not
full statements.
Examples:
complexAbundance(proteinAbundance(HGNC:PPARG),
proteinAbundance(HGNC:RXRA))
decreases
biologicalProcess(MESHPP:"Insulin Resistance") | [
"Extract",
"INDRA",
"Complex",
"Statements",
"from",
"BEL",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/bel/rdf_processor.py#L281-L344 | train |
sorgerlab/indra | indra/sources/bel/rdf_processor.py | BelRdfProcessor.get_activating_subs | def get_activating_subs(self):
"""Extract INDRA ActiveForm Statements based on a mutation from BEL.
The SPARQL pattern used to extract ActiveForms due to mutations look
for a ProteinAbundance as a subject which has a child encoding the
amino acid substitution. The object of the statement is an
ActivityType of the same ProteinAbundance, which is either increased
or decreased.
Examples:
proteinAbundance(HGNC:NRAS,substitution(Q,61,K))
directlyIncreases
gtpBoundActivity(proteinAbundance(HGNC:NRAS))
proteinAbundance(HGNC:TP53,substitution(F,134,I))
directlyDecreases
transcriptionalActivity(proteinAbundance(HGNC:TP53))
"""
q_mods = prefixes + """
SELECT ?enzyme_name ?sub_label ?act_type ?rel ?stmt ?subject
WHERE {
?stmt a belvoc:Statement .
?stmt belvoc:hasRelationship ?rel .
?stmt belvoc:hasSubject ?subject .
?stmt belvoc:hasObject ?object .
?subject a belvoc:ProteinAbundance .
?subject belvoc:hasConcept ?enzyme_name .
?subject belvoc:hasChild ?sub_expr .
?sub_expr rdfs:label ?sub_label .
?object a belvoc:AbundanceActivity .
?object belvoc:hasActivityType ?act_type .
?object belvoc:hasChild ?enzyme .
?enzyme a belvoc:ProteinAbundance .
?enzyme belvoc:hasConcept ?enzyme_name .
}
"""
# Now make the PySB for the phosphorylation
res_mods = self.g.query(q_mods)
for stmt in res_mods:
evidence = self._get_evidence(stmt[4])
# Parse out the elements of the query
enz = self._get_agent(stmt[0], stmt[5])
sub_expr = term_from_uri(stmt[1])
act_type = term_from_uri(stmt[2]).lower()
# Parse the WT and substituted residues from the node label.
# Strangely, the RDF for substituted residue doesn't break the
# terms of the BEL expression down into their meaning, as happens
# for modified protein abundances. Instead, the substitution
# just comes back as a string, e.g., "sub(V,600,E)". This code
# parses the arguments back out using a regular expression.
match = re.match('sub\(([A-Z]),([0-9]*),([A-Z])\)', sub_expr)
if match:
matches = match.groups()
wt_residue = matches[0]
position = matches[1]
sub_residue = matches[2]
else:
logger.warning("Could not parse substitution expression %s" %
sub_expr)
continue
mc = MutCondition(position, wt_residue, sub_residue)
enz.mutations = [mc]
rel = strip_statement(stmt[3])
if rel == 'DirectlyDecreases':
is_active = False
else:
is_active = True
stmt_str = strip_statement(stmt[4])
# Mark this as a converted statement
self.converted_direct_stmts.append(stmt_str)
st = ActiveForm(enz, act_type, is_active, evidence)
self.statements.append(st) | python | def get_activating_subs(self):
"""Extract INDRA ActiveForm Statements based on a mutation from BEL.
The SPARQL pattern used to extract ActiveForms due to mutations look
for a ProteinAbundance as a subject which has a child encoding the
amino acid substitution. The object of the statement is an
ActivityType of the same ProteinAbundance, which is either increased
or decreased.
Examples:
proteinAbundance(HGNC:NRAS,substitution(Q,61,K))
directlyIncreases
gtpBoundActivity(proteinAbundance(HGNC:NRAS))
proteinAbundance(HGNC:TP53,substitution(F,134,I))
directlyDecreases
transcriptionalActivity(proteinAbundance(HGNC:TP53))
"""
q_mods = prefixes + """
SELECT ?enzyme_name ?sub_label ?act_type ?rel ?stmt ?subject
WHERE {
?stmt a belvoc:Statement .
?stmt belvoc:hasRelationship ?rel .
?stmt belvoc:hasSubject ?subject .
?stmt belvoc:hasObject ?object .
?subject a belvoc:ProteinAbundance .
?subject belvoc:hasConcept ?enzyme_name .
?subject belvoc:hasChild ?sub_expr .
?sub_expr rdfs:label ?sub_label .
?object a belvoc:AbundanceActivity .
?object belvoc:hasActivityType ?act_type .
?object belvoc:hasChild ?enzyme .
?enzyme a belvoc:ProteinAbundance .
?enzyme belvoc:hasConcept ?enzyme_name .
}
"""
# Now make the PySB for the phosphorylation
res_mods = self.g.query(q_mods)
for stmt in res_mods:
evidence = self._get_evidence(stmt[4])
# Parse out the elements of the query
enz = self._get_agent(stmt[0], stmt[5])
sub_expr = term_from_uri(stmt[1])
act_type = term_from_uri(stmt[2]).lower()
# Parse the WT and substituted residues from the node label.
# Strangely, the RDF for substituted residue doesn't break the
# terms of the BEL expression down into their meaning, as happens
# for modified protein abundances. Instead, the substitution
# just comes back as a string, e.g., "sub(V,600,E)". This code
# parses the arguments back out using a regular expression.
match = re.match('sub\(([A-Z]),([0-9]*),([A-Z])\)', sub_expr)
if match:
matches = match.groups()
wt_residue = matches[0]
position = matches[1]
sub_residue = matches[2]
else:
logger.warning("Could not parse substitution expression %s" %
sub_expr)
continue
mc = MutCondition(position, wt_residue, sub_residue)
enz.mutations = [mc]
rel = strip_statement(stmt[3])
if rel == 'DirectlyDecreases':
is_active = False
else:
is_active = True
stmt_str = strip_statement(stmt[4])
# Mark this as a converted statement
self.converted_direct_stmts.append(stmt_str)
st = ActiveForm(enz, act_type, is_active, evidence)
self.statements.append(st) | [
"def",
"get_activating_subs",
"(",
"self",
")",
":",
"q_mods",
"=",
"prefixes",
"+",
"\"\"\"\n SELECT ?enzyme_name ?sub_label ?act_type ?rel ?stmt ?subject\n WHERE {\n ?stmt a belvoc:Statement .\n ?stmt belvoc:hasRelationship ?rel .\n ?stmt belvoc:hasSubject ?subject .\n ?stmt belvoc:hasObject ?object .\n ?subject a belvoc:ProteinAbundance .\n ?subject belvoc:hasConcept ?enzyme_name .\n ?subject belvoc:hasChild ?sub_expr .\n ?sub_expr rdfs:label ?sub_label .\n ?object a belvoc:AbundanceActivity .\n ?object belvoc:hasActivityType ?act_type .\n ?object belvoc:hasChild ?enzyme .\n ?enzyme a belvoc:ProteinAbundance .\n ?enzyme belvoc:hasConcept ?enzyme_name .\n }\n \"\"\"",
"# Now make the PySB for the phosphorylation",
"res_mods",
"=",
"self",
".",
"g",
".",
"query",
"(",
"q_mods",
")",
"for",
"stmt",
"in",
"res_mods",
":",
"evidence",
"=",
"self",
".",
"_get_evidence",
"(",
"stmt",
"[",
"4",
"]",
")",
"# Parse out the elements of the query",
"enz",
"=",
"self",
".",
"_get_agent",
"(",
"stmt",
"[",
"0",
"]",
",",
"stmt",
"[",
"5",
"]",
")",
"sub_expr",
"=",
"term_from_uri",
"(",
"stmt",
"[",
"1",
"]",
")",
"act_type",
"=",
"term_from_uri",
"(",
"stmt",
"[",
"2",
"]",
")",
".",
"lower",
"(",
")",
"# Parse the WT and substituted residues from the node label.",
"# Strangely, the RDF for substituted residue doesn't break the",
"# terms of the BEL expression down into their meaning, as happens",
"# for modified protein abundances. Instead, the substitution",
"# just comes back as a string, e.g., \"sub(V,600,E)\". This code",
"# parses the arguments back out using a regular expression.",
"match",
"=",
"re",
".",
"match",
"(",
"'sub\\(([A-Z]),([0-9]*),([A-Z])\\)'",
",",
"sub_expr",
")",
"if",
"match",
":",
"matches",
"=",
"match",
".",
"groups",
"(",
")",
"wt_residue",
"=",
"matches",
"[",
"0",
"]",
"position",
"=",
"matches",
"[",
"1",
"]",
"sub_residue",
"=",
"matches",
"[",
"2",
"]",
"else",
":",
"logger",
".",
"warning",
"(",
"\"Could not parse substitution expression %s\"",
"%",
"sub_expr",
")",
"continue",
"mc",
"=",
"MutCondition",
"(",
"position",
",",
"wt_residue",
",",
"sub_residue",
")",
"enz",
".",
"mutations",
"=",
"[",
"mc",
"]",
"rel",
"=",
"strip_statement",
"(",
"stmt",
"[",
"3",
"]",
")",
"if",
"rel",
"==",
"'DirectlyDecreases'",
":",
"is_active",
"=",
"False",
"else",
":",
"is_active",
"=",
"True",
"stmt_str",
"=",
"strip_statement",
"(",
"stmt",
"[",
"4",
"]",
")",
"# Mark this as a converted statement",
"self",
".",
"converted_direct_stmts",
".",
"append",
"(",
"stmt_str",
")",
"st",
"=",
"ActiveForm",
"(",
"enz",
",",
"act_type",
",",
"is_active",
",",
"evidence",
")",
"self",
".",
"statements",
".",
"append",
"(",
"st",
")"
]
| Extract INDRA ActiveForm Statements based on a mutation from BEL.
The SPARQL pattern used to extract ActiveForms due to mutations look
for a ProteinAbundance as a subject which has a child encoding the
amino acid substitution. The object of the statement is an
ActivityType of the same ProteinAbundance, which is either increased
or decreased.
Examples:
proteinAbundance(HGNC:NRAS,substitution(Q,61,K))
directlyIncreases
gtpBoundActivity(proteinAbundance(HGNC:NRAS))
proteinAbundance(HGNC:TP53,substitution(F,134,I))
directlyDecreases
transcriptionalActivity(proteinAbundance(HGNC:TP53)) | [
"Extract",
"INDRA",
"ActiveForm",
"Statements",
"based",
"on",
"a",
"mutation",
"from",
"BEL",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/bel/rdf_processor.py#L346-L421 | train |
sorgerlab/indra | indra/sources/bel/rdf_processor.py | BelRdfProcessor.get_conversions | def get_conversions(self):
"""Extract Conversion INDRA Statements from BEL.
The SPARQL query used to extract Conversions searches for
a subject (controller) which is an AbundanceActivity
which directlyIncreases a Reaction with a given list of
Reactants and Products.
Examples:
catalyticActivity(proteinAbundance(HGNC:HMOX1))
directlyIncreases
reaction(reactants(abundance(CHEBI:heme)),
products(abundance(SCHEM:Biliverdine),
abundance(CHEBI:"carbon monoxide")))
"""
query = prefixes + """
SELECT DISTINCT ?controller ?controllerName ?controllerActivity
?product ?productName ?reactant ?reactantName ?stmt
WHERE {
?stmt a belvoc:Statement .
?stmt belvoc:hasRelationship ?rel .
?stmt belvoc:hasSubject ?subject .
?stmt belvoc:hasObject ?rxn .
?subject a belvoc:AbundanceActivity .
?subject belvoc:hasActivityType ?controllerActivity .
?subject belvoc:hasChild ?controller .
?controller belvoc:hasConcept ?controllerName .
?rxn a belvoc:Reaction .
?rxn belvoc:hasChild ?reactants .
?reactants rdfs:label ?reactLabel .
FILTER (regex(?reactLabel, "^reactants.*"))
?rxn belvoc:hasChild ?products .
?products rdfs:label ?prodLabel .
FILTER (regex(?prodLabel, "^products.*"))
?reactants belvoc:hasChild ?reactant .
?products belvoc:hasChild ?product .
?reactant belvoc:hasConcept ?reactantName .
?product belvoc:hasConcept ?productName .
}
"""
res = self.g.query(query)
# We need to collect all pieces of the same statement so that we can
# collect multiple reactants and products
stmt_map = collections.defaultdict(list)
for stmt in res:
stmt_map[stmt[-1]].append(stmt)
for stmts in stmt_map.values():
# First we get the shared part of the Statement
stmt = stmts[0]
subj = self._get_agent(stmt[1], stmt[0])
evidence = self._get_evidence(stmt[-1])
stmt_str = strip_statement(stmt[-1])
# Now we collect the participants
obj_from_map = {}
obj_to_map = {}
for stmt in stmts:
reactant_name = stmt[6]
product_name = stmt[4]
if reactant_name not in obj_from_map:
obj_from_map[reactant_name] = \
self._get_agent(stmt[6], stmt[5])
if product_name not in obj_to_map:
obj_to_map[product_name] = \
self._get_agent(stmt[4], stmt[3])
obj_from = list(obj_from_map.values())
obj_to = list(obj_to_map.values())
st = Conversion(subj, obj_from, obj_to, evidence=evidence)
# If we've matched a pattern, mark this as a converted statement
self.statements.append(st)
self.converted_direct_stmts.append(stmt_str) | python | def get_conversions(self):
"""Extract Conversion INDRA Statements from BEL.
The SPARQL query used to extract Conversions searches for
a subject (controller) which is an AbundanceActivity
which directlyIncreases a Reaction with a given list of
Reactants and Products.
Examples:
catalyticActivity(proteinAbundance(HGNC:HMOX1))
directlyIncreases
reaction(reactants(abundance(CHEBI:heme)),
products(abundance(SCHEM:Biliverdine),
abundance(CHEBI:"carbon monoxide")))
"""
query = prefixes + """
SELECT DISTINCT ?controller ?controllerName ?controllerActivity
?product ?productName ?reactant ?reactantName ?stmt
WHERE {
?stmt a belvoc:Statement .
?stmt belvoc:hasRelationship ?rel .
?stmt belvoc:hasSubject ?subject .
?stmt belvoc:hasObject ?rxn .
?subject a belvoc:AbundanceActivity .
?subject belvoc:hasActivityType ?controllerActivity .
?subject belvoc:hasChild ?controller .
?controller belvoc:hasConcept ?controllerName .
?rxn a belvoc:Reaction .
?rxn belvoc:hasChild ?reactants .
?reactants rdfs:label ?reactLabel .
FILTER (regex(?reactLabel, "^reactants.*"))
?rxn belvoc:hasChild ?products .
?products rdfs:label ?prodLabel .
FILTER (regex(?prodLabel, "^products.*"))
?reactants belvoc:hasChild ?reactant .
?products belvoc:hasChild ?product .
?reactant belvoc:hasConcept ?reactantName .
?product belvoc:hasConcept ?productName .
}
"""
res = self.g.query(query)
# We need to collect all pieces of the same statement so that we can
# collect multiple reactants and products
stmt_map = collections.defaultdict(list)
for stmt in res:
stmt_map[stmt[-1]].append(stmt)
for stmts in stmt_map.values():
# First we get the shared part of the Statement
stmt = stmts[0]
subj = self._get_agent(stmt[1], stmt[0])
evidence = self._get_evidence(stmt[-1])
stmt_str = strip_statement(stmt[-1])
# Now we collect the participants
obj_from_map = {}
obj_to_map = {}
for stmt in stmts:
reactant_name = stmt[6]
product_name = stmt[4]
if reactant_name not in obj_from_map:
obj_from_map[reactant_name] = \
self._get_agent(stmt[6], stmt[5])
if product_name not in obj_to_map:
obj_to_map[product_name] = \
self._get_agent(stmt[4], stmt[3])
obj_from = list(obj_from_map.values())
obj_to = list(obj_to_map.values())
st = Conversion(subj, obj_from, obj_to, evidence=evidence)
# If we've matched a pattern, mark this as a converted statement
self.statements.append(st)
self.converted_direct_stmts.append(stmt_str) | [
"def",
"get_conversions",
"(",
"self",
")",
":",
"query",
"=",
"prefixes",
"+",
"\"\"\"\n SELECT DISTINCT ?controller ?controllerName ?controllerActivity\n ?product ?productName ?reactant ?reactantName ?stmt\n WHERE {\n ?stmt a belvoc:Statement .\n ?stmt belvoc:hasRelationship ?rel .\n ?stmt belvoc:hasSubject ?subject .\n ?stmt belvoc:hasObject ?rxn .\n ?subject a belvoc:AbundanceActivity .\n ?subject belvoc:hasActivityType ?controllerActivity .\n ?subject belvoc:hasChild ?controller .\n ?controller belvoc:hasConcept ?controllerName .\n ?rxn a belvoc:Reaction .\n ?rxn belvoc:hasChild ?reactants .\n ?reactants rdfs:label ?reactLabel .\n FILTER (regex(?reactLabel, \"^reactants.*\"))\n ?rxn belvoc:hasChild ?products .\n ?products rdfs:label ?prodLabel .\n FILTER (regex(?prodLabel, \"^products.*\"))\n ?reactants belvoc:hasChild ?reactant .\n ?products belvoc:hasChild ?product .\n ?reactant belvoc:hasConcept ?reactantName .\n ?product belvoc:hasConcept ?productName .\n }\n \"\"\"",
"res",
"=",
"self",
".",
"g",
".",
"query",
"(",
"query",
")",
"# We need to collect all pieces of the same statement so that we can",
"# collect multiple reactants and products",
"stmt_map",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"for",
"stmt",
"in",
"res",
":",
"stmt_map",
"[",
"stmt",
"[",
"-",
"1",
"]",
"]",
".",
"append",
"(",
"stmt",
")",
"for",
"stmts",
"in",
"stmt_map",
".",
"values",
"(",
")",
":",
"# First we get the shared part of the Statement",
"stmt",
"=",
"stmts",
"[",
"0",
"]",
"subj",
"=",
"self",
".",
"_get_agent",
"(",
"stmt",
"[",
"1",
"]",
",",
"stmt",
"[",
"0",
"]",
")",
"evidence",
"=",
"self",
".",
"_get_evidence",
"(",
"stmt",
"[",
"-",
"1",
"]",
")",
"stmt_str",
"=",
"strip_statement",
"(",
"stmt",
"[",
"-",
"1",
"]",
")",
"# Now we collect the participants",
"obj_from_map",
"=",
"{",
"}",
"obj_to_map",
"=",
"{",
"}",
"for",
"stmt",
"in",
"stmts",
":",
"reactant_name",
"=",
"stmt",
"[",
"6",
"]",
"product_name",
"=",
"stmt",
"[",
"4",
"]",
"if",
"reactant_name",
"not",
"in",
"obj_from_map",
":",
"obj_from_map",
"[",
"reactant_name",
"]",
"=",
"self",
".",
"_get_agent",
"(",
"stmt",
"[",
"6",
"]",
",",
"stmt",
"[",
"5",
"]",
")",
"if",
"product_name",
"not",
"in",
"obj_to_map",
":",
"obj_to_map",
"[",
"product_name",
"]",
"=",
"self",
".",
"_get_agent",
"(",
"stmt",
"[",
"4",
"]",
",",
"stmt",
"[",
"3",
"]",
")",
"obj_from",
"=",
"list",
"(",
"obj_from_map",
".",
"values",
"(",
")",
")",
"obj_to",
"=",
"list",
"(",
"obj_to_map",
".",
"values",
"(",
")",
")",
"st",
"=",
"Conversion",
"(",
"subj",
",",
"obj_from",
",",
"obj_to",
",",
"evidence",
"=",
"evidence",
")",
"# If we've matched a pattern, mark this as a converted statement",
"self",
".",
"statements",
".",
"append",
"(",
"st",
")",
"self",
".",
"converted_direct_stmts",
".",
"append",
"(",
"stmt_str",
")"
]
| Extract Conversion INDRA Statements from BEL.
The SPARQL query used to extract Conversions searches for
a subject (controller) which is an AbundanceActivity
which directlyIncreases a Reaction with a given list of
Reactants and Products.
Examples:
catalyticActivity(proteinAbundance(HGNC:HMOX1))
directlyIncreases
reaction(reactants(abundance(CHEBI:heme)),
products(abundance(SCHEM:Biliverdine),
abundance(CHEBI:"carbon monoxide"))) | [
"Extract",
"Conversion",
"INDRA",
"Statements",
"from",
"BEL",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/bel/rdf_processor.py#L654-L725 | train |
sorgerlab/indra | indra/sources/bel/rdf_processor.py | BelRdfProcessor.get_degenerate_statements | def get_degenerate_statements(self):
"""Get all degenerate BEL statements.
Stores the results of the query in self.degenerate_stmts.
"""
logger.info("Checking for 'degenerate' statements...\n")
# Get rules of type protein X -> activity Y
q_stmts = prefixes + """
SELECT ?stmt
WHERE {
?stmt a belvoc:Statement .
?stmt belvoc:hasSubject ?subj .
?stmt belvoc:hasObject ?obj .
{
{ ?stmt belvoc:hasRelationship belvoc:DirectlyIncreases . }
UNION
{ ?stmt belvoc:hasRelationship belvoc:DirectlyDecreases . }
}
{
{ ?subj a belvoc:ProteinAbundance . }
UNION
{ ?subj a belvoc:ModifiedProteinAbundance . }
}
?subj belvoc:hasConcept ?xName .
{
{
?obj a belvoc:ProteinAbundance .
?obj belvoc:hasConcept ?yName .
}
UNION
{
?obj a belvoc:ModifiedProteinAbundance .
?obj belvoc:hasChild ?proteinY .
?proteinY belvoc:hasConcept ?yName .
}
UNION
{
?obj a belvoc:AbundanceActivity .
?obj belvoc:hasChild ?objChild .
?objChild a belvoc:ProteinAbundance .
?objChild belvoc:hasConcept ?yName .
}
}
FILTER (?xName != ?yName)
}
"""
res_stmts = self.g.query(q_stmts)
logger.info("Protein -> Protein/Activity statements:")
logger.info("---------------------------------------")
for stmt in res_stmts:
stmt_str = strip_statement(stmt[0])
logger.info(stmt_str)
self.degenerate_stmts.append(stmt_str) | python | def get_degenerate_statements(self):
"""Get all degenerate BEL statements.
Stores the results of the query in self.degenerate_stmts.
"""
logger.info("Checking for 'degenerate' statements...\n")
# Get rules of type protein X -> activity Y
q_stmts = prefixes + """
SELECT ?stmt
WHERE {
?stmt a belvoc:Statement .
?stmt belvoc:hasSubject ?subj .
?stmt belvoc:hasObject ?obj .
{
{ ?stmt belvoc:hasRelationship belvoc:DirectlyIncreases . }
UNION
{ ?stmt belvoc:hasRelationship belvoc:DirectlyDecreases . }
}
{
{ ?subj a belvoc:ProteinAbundance . }
UNION
{ ?subj a belvoc:ModifiedProteinAbundance . }
}
?subj belvoc:hasConcept ?xName .
{
{
?obj a belvoc:ProteinAbundance .
?obj belvoc:hasConcept ?yName .
}
UNION
{
?obj a belvoc:ModifiedProteinAbundance .
?obj belvoc:hasChild ?proteinY .
?proteinY belvoc:hasConcept ?yName .
}
UNION
{
?obj a belvoc:AbundanceActivity .
?obj belvoc:hasChild ?objChild .
?objChild a belvoc:ProteinAbundance .
?objChild belvoc:hasConcept ?yName .
}
}
FILTER (?xName != ?yName)
}
"""
res_stmts = self.g.query(q_stmts)
logger.info("Protein -> Protein/Activity statements:")
logger.info("---------------------------------------")
for stmt in res_stmts:
stmt_str = strip_statement(stmt[0])
logger.info(stmt_str)
self.degenerate_stmts.append(stmt_str) | [
"def",
"get_degenerate_statements",
"(",
"self",
")",
":",
"logger",
".",
"info",
"(",
"\"Checking for 'degenerate' statements...\\n\"",
")",
"# Get rules of type protein X -> activity Y",
"q_stmts",
"=",
"prefixes",
"+",
"\"\"\"\n SELECT ?stmt\n WHERE {\n ?stmt a belvoc:Statement .\n ?stmt belvoc:hasSubject ?subj .\n ?stmt belvoc:hasObject ?obj .\n {\n { ?stmt belvoc:hasRelationship belvoc:DirectlyIncreases . }\n UNION\n { ?stmt belvoc:hasRelationship belvoc:DirectlyDecreases . }\n }\n {\n { ?subj a belvoc:ProteinAbundance . }\n UNION\n { ?subj a belvoc:ModifiedProteinAbundance . }\n }\n ?subj belvoc:hasConcept ?xName .\n {\n {\n ?obj a belvoc:ProteinAbundance .\n ?obj belvoc:hasConcept ?yName .\n }\n UNION\n {\n ?obj a belvoc:ModifiedProteinAbundance .\n ?obj belvoc:hasChild ?proteinY .\n ?proteinY belvoc:hasConcept ?yName .\n }\n UNION\n {\n ?obj a belvoc:AbundanceActivity .\n ?obj belvoc:hasChild ?objChild .\n ?objChild a belvoc:ProteinAbundance .\n ?objChild belvoc:hasConcept ?yName .\n }\n }\n FILTER (?xName != ?yName)\n }\n \"\"\"",
"res_stmts",
"=",
"self",
".",
"g",
".",
"query",
"(",
"q_stmts",
")",
"logger",
".",
"info",
"(",
"\"Protein -> Protein/Activity statements:\"",
")",
"logger",
".",
"info",
"(",
"\"---------------------------------------\"",
")",
"for",
"stmt",
"in",
"res_stmts",
":",
"stmt_str",
"=",
"strip_statement",
"(",
"stmt",
"[",
"0",
"]",
")",
"logger",
".",
"info",
"(",
"stmt_str",
")",
"self",
".",
"degenerate_stmts",
".",
"append",
"(",
"stmt_str",
")"
]
| Get all degenerate BEL statements.
Stores the results of the query in self.degenerate_stmts. | [
"Get",
"all",
"degenerate",
"BEL",
"statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/bel/rdf_processor.py#L774-L827 | train |
sorgerlab/indra | indra/sources/bel/rdf_processor.py | BelRdfProcessor.print_statement_coverage | def print_statement_coverage(self):
"""Display how many of the direct statements have been converted.
Also prints how many are considered 'degenerate' and not converted."""
if not self.all_direct_stmts:
self.get_all_direct_statements()
if not self.degenerate_stmts:
self.get_degenerate_statements()
if not self.all_indirect_stmts:
self.get_all_indirect_statements()
logger.info('')
logger.info("Total indirect statements: %d" %
len(self.all_indirect_stmts))
logger.info("Converted indirect statements: %d" %
len(self.converted_indirect_stmts))
logger.info(">> Unhandled indirect statements: %d" %
(len(self.all_indirect_stmts) -
len(self.converted_indirect_stmts)))
logger.info('')
logger.info("Total direct statements: %d" % len(self.all_direct_stmts))
logger.info("Converted direct statements: %d" %
len(self.converted_direct_stmts))
logger.info("Degenerate direct statements: %d" %
len(self.degenerate_stmts))
logger.info(">> Unhandled direct statements: %d" %
(len(self.all_direct_stmts) -
len(self.converted_direct_stmts) -
len(self.degenerate_stmts)))
logger.info('')
logger.info("--- Unhandled direct statements ---------")
for stmt in self.all_direct_stmts:
if not (stmt in self.converted_direct_stmts or
stmt in self.degenerate_stmts):
logger.info(stmt)
logger.info('')
logger.info("--- Unhandled indirect statements ---------")
for stmt in self.all_indirect_stmts:
if not (stmt in self.converted_indirect_stmts or
stmt in self.degenerate_stmts):
logger.info(stmt) | python | def print_statement_coverage(self):
"""Display how many of the direct statements have been converted.
Also prints how many are considered 'degenerate' and not converted."""
if not self.all_direct_stmts:
self.get_all_direct_statements()
if not self.degenerate_stmts:
self.get_degenerate_statements()
if not self.all_indirect_stmts:
self.get_all_indirect_statements()
logger.info('')
logger.info("Total indirect statements: %d" %
len(self.all_indirect_stmts))
logger.info("Converted indirect statements: %d" %
len(self.converted_indirect_stmts))
logger.info(">> Unhandled indirect statements: %d" %
(len(self.all_indirect_stmts) -
len(self.converted_indirect_stmts)))
logger.info('')
logger.info("Total direct statements: %d" % len(self.all_direct_stmts))
logger.info("Converted direct statements: %d" %
len(self.converted_direct_stmts))
logger.info("Degenerate direct statements: %d" %
len(self.degenerate_stmts))
logger.info(">> Unhandled direct statements: %d" %
(len(self.all_direct_stmts) -
len(self.converted_direct_stmts) -
len(self.degenerate_stmts)))
logger.info('')
logger.info("--- Unhandled direct statements ---------")
for stmt in self.all_direct_stmts:
if not (stmt in self.converted_direct_stmts or
stmt in self.degenerate_stmts):
logger.info(stmt)
logger.info('')
logger.info("--- Unhandled indirect statements ---------")
for stmt in self.all_indirect_stmts:
if not (stmt in self.converted_indirect_stmts or
stmt in self.degenerate_stmts):
logger.info(stmt) | [
"def",
"print_statement_coverage",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"all_direct_stmts",
":",
"self",
".",
"get_all_direct_statements",
"(",
")",
"if",
"not",
"self",
".",
"degenerate_stmts",
":",
"self",
".",
"get_degenerate_statements",
"(",
")",
"if",
"not",
"self",
".",
"all_indirect_stmts",
":",
"self",
".",
"get_all_indirect_statements",
"(",
")",
"logger",
".",
"info",
"(",
"''",
")",
"logger",
".",
"info",
"(",
"\"Total indirect statements: %d\"",
"%",
"len",
"(",
"self",
".",
"all_indirect_stmts",
")",
")",
"logger",
".",
"info",
"(",
"\"Converted indirect statements: %d\"",
"%",
"len",
"(",
"self",
".",
"converted_indirect_stmts",
")",
")",
"logger",
".",
"info",
"(",
"\">> Unhandled indirect statements: %d\"",
"%",
"(",
"len",
"(",
"self",
".",
"all_indirect_stmts",
")",
"-",
"len",
"(",
"self",
".",
"converted_indirect_stmts",
")",
")",
")",
"logger",
".",
"info",
"(",
"''",
")",
"logger",
".",
"info",
"(",
"\"Total direct statements: %d\"",
"%",
"len",
"(",
"self",
".",
"all_direct_stmts",
")",
")",
"logger",
".",
"info",
"(",
"\"Converted direct statements: %d\"",
"%",
"len",
"(",
"self",
".",
"converted_direct_stmts",
")",
")",
"logger",
".",
"info",
"(",
"\"Degenerate direct statements: %d\"",
"%",
"len",
"(",
"self",
".",
"degenerate_stmts",
")",
")",
"logger",
".",
"info",
"(",
"\">> Unhandled direct statements: %d\"",
"%",
"(",
"len",
"(",
"self",
".",
"all_direct_stmts",
")",
"-",
"len",
"(",
"self",
".",
"converted_direct_stmts",
")",
"-",
"len",
"(",
"self",
".",
"degenerate_stmts",
")",
")",
")",
"logger",
".",
"info",
"(",
"''",
")",
"logger",
".",
"info",
"(",
"\"--- Unhandled direct statements ---------\"",
")",
"for",
"stmt",
"in",
"self",
".",
"all_direct_stmts",
":",
"if",
"not",
"(",
"stmt",
"in",
"self",
".",
"converted_direct_stmts",
"or",
"stmt",
"in",
"self",
".",
"degenerate_stmts",
")",
":",
"logger",
".",
"info",
"(",
"stmt",
")",
"logger",
".",
"info",
"(",
"''",
")",
"logger",
".",
"info",
"(",
"\"--- Unhandled indirect statements ---------\"",
")",
"for",
"stmt",
"in",
"self",
".",
"all_indirect_stmts",
":",
"if",
"not",
"(",
"stmt",
"in",
"self",
".",
"converted_indirect_stmts",
"or",
"stmt",
"in",
"self",
".",
"degenerate_stmts",
")",
":",
"logger",
".",
"info",
"(",
"stmt",
")"
]
| Display how many of the direct statements have been converted.
Also prints how many are considered 'degenerate' and not converted. | [
"Display",
"how",
"many",
"of",
"the",
"direct",
"statements",
"have",
"been",
"converted",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/bel/rdf_processor.py#L829-L871 | train |
sorgerlab/indra | indra/sources/bel/rdf_processor.py | BelRdfProcessor.print_statements | def print_statements(self):
"""Print all extracted INDRA Statements."""
logger.info('--- Direct INDRA statements ----------')
for i, stmt in enumerate(self.statements):
logger.info("%s: %s" % (i, stmt))
logger.info('--- Indirect INDRA statements ----------')
for i, stmt in enumerate(self.indirect_stmts):
logger.info("%s: %s" % (i, stmt)) | python | def print_statements(self):
"""Print all extracted INDRA Statements."""
logger.info('--- Direct INDRA statements ----------')
for i, stmt in enumerate(self.statements):
logger.info("%s: %s" % (i, stmt))
logger.info('--- Indirect INDRA statements ----------')
for i, stmt in enumerate(self.indirect_stmts):
logger.info("%s: %s" % (i, stmt)) | [
"def",
"print_statements",
"(",
"self",
")",
":",
"logger",
".",
"info",
"(",
"'--- Direct INDRA statements ----------'",
")",
"for",
"i",
",",
"stmt",
"in",
"enumerate",
"(",
"self",
".",
"statements",
")",
":",
"logger",
".",
"info",
"(",
"\"%s: %s\"",
"%",
"(",
"i",
",",
"stmt",
")",
")",
"logger",
".",
"info",
"(",
"'--- Indirect INDRA statements ----------'",
")",
"for",
"i",
",",
"stmt",
"in",
"enumerate",
"(",
"self",
".",
"indirect_stmts",
")",
":",
"logger",
".",
"info",
"(",
"\"%s: %s\"",
"%",
"(",
"i",
",",
"stmt",
")",
")"
]
| Print all extracted INDRA Statements. | [
"Print",
"all",
"extracted",
"INDRA",
"Statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/bel/rdf_processor.py#L873-L880 | train |
sorgerlab/indra | indra/sources/medscan/api.py | process_directory_statements_sorted_by_pmid | def process_directory_statements_sorted_by_pmid(directory_name):
"""Processes a directory filled with CSXML files, first normalizing the
character encoding to utf-8, and then processing into INDRA statements
sorted by pmid.
Parameters
----------
directory_name : str
The name of a directory filled with csxml files to process
Returns
-------
pmid_dict : dict
A dictionary mapping pmids to a list of statements corresponding to
that pmid
"""
s_dict = defaultdict(list)
mp = process_directory(directory_name, lazy=True)
for statement in mp.iter_statements():
s_dict[statement.evidence[0].pmid].append(statement)
return s_dict | python | def process_directory_statements_sorted_by_pmid(directory_name):
"""Processes a directory filled with CSXML files, first normalizing the
character encoding to utf-8, and then processing into INDRA statements
sorted by pmid.
Parameters
----------
directory_name : str
The name of a directory filled with csxml files to process
Returns
-------
pmid_dict : dict
A dictionary mapping pmids to a list of statements corresponding to
that pmid
"""
s_dict = defaultdict(list)
mp = process_directory(directory_name, lazy=True)
for statement in mp.iter_statements():
s_dict[statement.evidence[0].pmid].append(statement)
return s_dict | [
"def",
"process_directory_statements_sorted_by_pmid",
"(",
"directory_name",
")",
":",
"s_dict",
"=",
"defaultdict",
"(",
"list",
")",
"mp",
"=",
"process_directory",
"(",
"directory_name",
",",
"lazy",
"=",
"True",
")",
"for",
"statement",
"in",
"mp",
".",
"iter_statements",
"(",
")",
":",
"s_dict",
"[",
"statement",
".",
"evidence",
"[",
"0",
"]",
".",
"pmid",
"]",
".",
"append",
"(",
"statement",
")",
"return",
"s_dict"
]
| Processes a directory filled with CSXML files, first normalizing the
character encoding to utf-8, and then processing into INDRA statements
sorted by pmid.
Parameters
----------
directory_name : str
The name of a directory filled with csxml files to process
Returns
-------
pmid_dict : dict
A dictionary mapping pmids to a list of statements corresponding to
that pmid | [
"Processes",
"a",
"directory",
"filled",
"with",
"CSXML",
"files",
"first",
"normalizing",
"the",
"character",
"encoding",
"to",
"utf",
"-",
"8",
"and",
"then",
"processing",
"into",
"INDRA",
"statements",
"sorted",
"by",
"pmid",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/medscan/api.py#L9-L30 | train |
sorgerlab/indra | indra/sources/medscan/api.py | process_directory | def process_directory(directory_name, lazy=False):
"""Processes a directory filled with CSXML files, first normalizing the
character encodings to utf-8, and then processing into a list of INDRA
statements.
Parameters
----------
directory_name : str
The name of a directory filled with csxml files to process
lazy : bool
If True, the statements will not be generated immediately, but rather
a generator will be formulated, and statements can be retrieved by
using `iter_statements`. If False, the `statements` attribute will be
populated immediately. Default is False.
Returns
-------
mp : indra.sources.medscan.processor.MedscanProcessor
A MedscanProcessor populated with INDRA statements extracted from the
csxml files
"""
# Parent Medscan processor containing extractions from all files
mp = MedscanProcessor()
mp.process_directory(directory_name, lazy)
return mp | python | def process_directory(directory_name, lazy=False):
"""Processes a directory filled with CSXML files, first normalizing the
character encodings to utf-8, and then processing into a list of INDRA
statements.
Parameters
----------
directory_name : str
The name of a directory filled with csxml files to process
lazy : bool
If True, the statements will not be generated immediately, but rather
a generator will be formulated, and statements can be retrieved by
using `iter_statements`. If False, the `statements` attribute will be
populated immediately. Default is False.
Returns
-------
mp : indra.sources.medscan.processor.MedscanProcessor
A MedscanProcessor populated with INDRA statements extracted from the
csxml files
"""
# Parent Medscan processor containing extractions from all files
mp = MedscanProcessor()
mp.process_directory(directory_name, lazy)
return mp | [
"def",
"process_directory",
"(",
"directory_name",
",",
"lazy",
"=",
"False",
")",
":",
"# Parent Medscan processor containing extractions from all files",
"mp",
"=",
"MedscanProcessor",
"(",
")",
"mp",
".",
"process_directory",
"(",
"directory_name",
",",
"lazy",
")",
"return",
"mp"
]
| Processes a directory filled with CSXML files, first normalizing the
character encodings to utf-8, and then processing into a list of INDRA
statements.
Parameters
----------
directory_name : str
The name of a directory filled with csxml files to process
lazy : bool
If True, the statements will not be generated immediately, but rather
a generator will be formulated, and statements can be retrieved by
using `iter_statements`. If False, the `statements` attribute will be
populated immediately. Default is False.
Returns
-------
mp : indra.sources.medscan.processor.MedscanProcessor
A MedscanProcessor populated with INDRA statements extracted from the
csxml files | [
"Processes",
"a",
"directory",
"filled",
"with",
"CSXML",
"files",
"first",
"normalizing",
"the",
"character",
"encodings",
"to",
"utf",
"-",
"8",
"and",
"then",
"processing",
"into",
"a",
"list",
"of",
"INDRA",
"statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/medscan/api.py#L33-L58 | train |
sorgerlab/indra | indra/sources/medscan/api.py | process_file_sorted_by_pmid | def process_file_sorted_by_pmid(file_name):
"""Processes a file and returns a dictionary mapping pmids to a list of
statements corresponding to that pmid.
Parameters
----------
file_name : str
A csxml file to process
Returns
-------
s_dict : dict
Dictionary mapping pmids to a list of statements corresponding to
that pmid
"""
s_dict = defaultdict(list)
mp = process_file(file_name, lazy=True)
for statement in mp.iter_statements():
s_dict[statement.evidence[0].pmid].append(statement)
return s_dict | python | def process_file_sorted_by_pmid(file_name):
"""Processes a file and returns a dictionary mapping pmids to a list of
statements corresponding to that pmid.
Parameters
----------
file_name : str
A csxml file to process
Returns
-------
s_dict : dict
Dictionary mapping pmids to a list of statements corresponding to
that pmid
"""
s_dict = defaultdict(list)
mp = process_file(file_name, lazy=True)
for statement in mp.iter_statements():
s_dict[statement.evidence[0].pmid].append(statement)
return s_dict | [
"def",
"process_file_sorted_by_pmid",
"(",
"file_name",
")",
":",
"s_dict",
"=",
"defaultdict",
"(",
"list",
")",
"mp",
"=",
"process_file",
"(",
"file_name",
",",
"lazy",
"=",
"True",
")",
"for",
"statement",
"in",
"mp",
".",
"iter_statements",
"(",
")",
":",
"s_dict",
"[",
"statement",
".",
"evidence",
"[",
"0",
"]",
".",
"pmid",
"]",
".",
"append",
"(",
"statement",
")",
"return",
"s_dict"
]
| Processes a file and returns a dictionary mapping pmids to a list of
statements corresponding to that pmid.
Parameters
----------
file_name : str
A csxml file to process
Returns
-------
s_dict : dict
Dictionary mapping pmids to a list of statements corresponding to
that pmid | [
"Processes",
"a",
"file",
"and",
"returns",
"a",
"dictionary",
"mapping",
"pmids",
"to",
"a",
"list",
"of",
"statements",
"corresponding",
"to",
"that",
"pmid",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/medscan/api.py#L61-L81 | train |
sorgerlab/indra | indra/sources/medscan/api.py | process_file | def process_file(filename, interval=None, lazy=False):
"""Process a CSXML file for its relevant information.
Consider running the fix_csxml_character_encoding.py script in
indra/sources/medscan to fix any encoding issues in the input file before
processing.
Attributes
----------
filename : str
The csxml file, containing Medscan XML, to process
interval : (start, end) or None
Select the interval of documents to read, starting with the
`start`th document and ending before the `end`th document. If
either is None, the value is considered undefined. If the value
exceeds the bounds of available documents, it will simply be
ignored.
lazy : bool
If True, the statements will not be generated immediately, but rather
a generator will be formulated, and statements can be retrieved by
using `iter_statements`. If False, the `statements` attribute will be
populated immediately. Default is False.
Returns
-------
mp : MedscanProcessor
A MedscanProcessor object containing extracted statements
"""
mp = MedscanProcessor()
mp.process_csxml_file(filename, interval, lazy)
return mp | python | def process_file(filename, interval=None, lazy=False):
"""Process a CSXML file for its relevant information.
Consider running the fix_csxml_character_encoding.py script in
indra/sources/medscan to fix any encoding issues in the input file before
processing.
Attributes
----------
filename : str
The csxml file, containing Medscan XML, to process
interval : (start, end) or None
Select the interval of documents to read, starting with the
`start`th document and ending before the `end`th document. If
either is None, the value is considered undefined. If the value
exceeds the bounds of available documents, it will simply be
ignored.
lazy : bool
If True, the statements will not be generated immediately, but rather
a generator will be formulated, and statements can be retrieved by
using `iter_statements`. If False, the `statements` attribute will be
populated immediately. Default is False.
Returns
-------
mp : MedscanProcessor
A MedscanProcessor object containing extracted statements
"""
mp = MedscanProcessor()
mp.process_csxml_file(filename, interval, lazy)
return mp | [
"def",
"process_file",
"(",
"filename",
",",
"interval",
"=",
"None",
",",
"lazy",
"=",
"False",
")",
":",
"mp",
"=",
"MedscanProcessor",
"(",
")",
"mp",
".",
"process_csxml_file",
"(",
"filename",
",",
"interval",
",",
"lazy",
")",
"return",
"mp"
]
| Process a CSXML file for its relevant information.
Consider running the fix_csxml_character_encoding.py script in
indra/sources/medscan to fix any encoding issues in the input file before
processing.
Attributes
----------
filename : str
The csxml file, containing Medscan XML, to process
interval : (start, end) or None
Select the interval of documents to read, starting with the
`start`th document and ending before the `end`th document. If
either is None, the value is considered undefined. If the value
exceeds the bounds of available documents, it will simply be
ignored.
lazy : bool
If True, the statements will not be generated immediately, but rather
a generator will be formulated, and statements can be retrieved by
using `iter_statements`. If False, the `statements` attribute will be
populated immediately. Default is False.
Returns
-------
mp : MedscanProcessor
A MedscanProcessor object containing extracted statements | [
"Process",
"a",
"CSXML",
"file",
"for",
"its",
"relevant",
"information",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/medscan/api.py#L84-L114 | train |
sorgerlab/indra | indra/explanation/reporting.py | stmts_from_path | def stmts_from_path(path, model, stmts):
"""Return source Statements corresponding to a path in a model.
Parameters
----------
path : list[tuple[str, int]]
A list of tuples where the first element of the tuple is the
name of a rule, and the second is the associated polarity along
a path.
model : pysb.core.Model
A PySB model which contains the rules along the path.
stmts : list[indra.statements.Statement]
A list of INDRA Statements from which the model was assembled.
Returns
-------
path_stmts : list[indra.statements.Statement]
The Statements from which the rules along the path were obtained.
"""
path_stmts = []
for path_rule, sign in path:
for rule in model.rules:
if rule.name == path_rule:
stmt = stmt_from_rule(path_rule, model, stmts)
assert stmt is not None
path_stmts.append(stmt)
return path_stmts | python | def stmts_from_path(path, model, stmts):
"""Return source Statements corresponding to a path in a model.
Parameters
----------
path : list[tuple[str, int]]
A list of tuples where the first element of the tuple is the
name of a rule, and the second is the associated polarity along
a path.
model : pysb.core.Model
A PySB model which contains the rules along the path.
stmts : list[indra.statements.Statement]
A list of INDRA Statements from which the model was assembled.
Returns
-------
path_stmts : list[indra.statements.Statement]
The Statements from which the rules along the path were obtained.
"""
path_stmts = []
for path_rule, sign in path:
for rule in model.rules:
if rule.name == path_rule:
stmt = stmt_from_rule(path_rule, model, stmts)
assert stmt is not None
path_stmts.append(stmt)
return path_stmts | [
"def",
"stmts_from_path",
"(",
"path",
",",
"model",
",",
"stmts",
")",
":",
"path_stmts",
"=",
"[",
"]",
"for",
"path_rule",
",",
"sign",
"in",
"path",
":",
"for",
"rule",
"in",
"model",
".",
"rules",
":",
"if",
"rule",
".",
"name",
"==",
"path_rule",
":",
"stmt",
"=",
"stmt_from_rule",
"(",
"path_rule",
",",
"model",
",",
"stmts",
")",
"assert",
"stmt",
"is",
"not",
"None",
"path_stmts",
".",
"append",
"(",
"stmt",
")",
"return",
"path_stmts"
]
| Return source Statements corresponding to a path in a model.
Parameters
----------
path : list[tuple[str, int]]
A list of tuples where the first element of the tuple is the
name of a rule, and the second is the associated polarity along
a path.
model : pysb.core.Model
A PySB model which contains the rules along the path.
stmts : list[indra.statements.Statement]
A list of INDRA Statements from which the model was assembled.
Returns
-------
path_stmts : list[indra.statements.Statement]
The Statements from which the rules along the path were obtained. | [
"Return",
"source",
"Statements",
"corresponding",
"to",
"a",
"path",
"in",
"a",
"model",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/explanation/reporting.py#L3-L29 | train |
sorgerlab/indra | indra/sources/bel/processor.py | extract_context | def extract_context(annotations, annot_manager):
"""Return a BioContext object extracted from the annotations.
The entries that are extracted into the BioContext are popped from the
annotations.
Parameters
----------
annotations : dict
PyBEL annotations dict
annot_manager : AnnotationManager
An annotation manager to get name/db reference mappings for each ot the
annotation types.
Returns
-------
bc : BioContext
An INDRA BioContext object
"""
def get_annot(annotations, key):
"""Return a specific annotation given a key."""
val = annotations.pop(key, None)
if val:
val_list = [v for v, tf in val.items() if tf]
if len(val_list) > 1:
logger.warning('More than one "%s" in annotations' % key)
elif not val_list:
return None
return val_list[0]
return None
bc = BioContext()
species = get_annot(annotations, 'Species')
if species:
name = annot_manager.get_mapping('Species', species)
bc.species = RefContext(name=name, db_refs={'TAXONOMY': species})
mappings = (('CellLine', 'cell_line', None),
('Disease', 'disease', None),
('Anatomy', 'organ', None),
('Cell', 'cell_type', None),
('CellStructure', 'location', 'MESH'))
for bel_name, indra_name, ns in mappings:
ann = get_annot(annotations, bel_name)
if ann:
ref = annot_manager.get_mapping(bel_name, ann)
if ref is None:
continue
if not ns:
db_ns, db_id = ref.split('_', 1)
else:
db_ns, db_id = ns, ref
setattr(bc, indra_name,
RefContext(name=ann, db_refs={db_ns: db_id}))
# Overwrite blank BioContext
if not bc:
bc = None
return bc | python | def extract_context(annotations, annot_manager):
"""Return a BioContext object extracted from the annotations.
The entries that are extracted into the BioContext are popped from the
annotations.
Parameters
----------
annotations : dict
PyBEL annotations dict
annot_manager : AnnotationManager
An annotation manager to get name/db reference mappings for each ot the
annotation types.
Returns
-------
bc : BioContext
An INDRA BioContext object
"""
def get_annot(annotations, key):
"""Return a specific annotation given a key."""
val = annotations.pop(key, None)
if val:
val_list = [v for v, tf in val.items() if tf]
if len(val_list) > 1:
logger.warning('More than one "%s" in annotations' % key)
elif not val_list:
return None
return val_list[0]
return None
bc = BioContext()
species = get_annot(annotations, 'Species')
if species:
name = annot_manager.get_mapping('Species', species)
bc.species = RefContext(name=name, db_refs={'TAXONOMY': species})
mappings = (('CellLine', 'cell_line', None),
('Disease', 'disease', None),
('Anatomy', 'organ', None),
('Cell', 'cell_type', None),
('CellStructure', 'location', 'MESH'))
for bel_name, indra_name, ns in mappings:
ann = get_annot(annotations, bel_name)
if ann:
ref = annot_manager.get_mapping(bel_name, ann)
if ref is None:
continue
if not ns:
db_ns, db_id = ref.split('_', 1)
else:
db_ns, db_id = ns, ref
setattr(bc, indra_name,
RefContext(name=ann, db_refs={db_ns: db_id}))
# Overwrite blank BioContext
if not bc:
bc = None
return bc | [
"def",
"extract_context",
"(",
"annotations",
",",
"annot_manager",
")",
":",
"def",
"get_annot",
"(",
"annotations",
",",
"key",
")",
":",
"\"\"\"Return a specific annotation given a key.\"\"\"",
"val",
"=",
"annotations",
".",
"pop",
"(",
"key",
",",
"None",
")",
"if",
"val",
":",
"val_list",
"=",
"[",
"v",
"for",
"v",
",",
"tf",
"in",
"val",
".",
"items",
"(",
")",
"if",
"tf",
"]",
"if",
"len",
"(",
"val_list",
")",
">",
"1",
":",
"logger",
".",
"warning",
"(",
"'More than one \"%s\" in annotations'",
"%",
"key",
")",
"elif",
"not",
"val_list",
":",
"return",
"None",
"return",
"val_list",
"[",
"0",
"]",
"return",
"None",
"bc",
"=",
"BioContext",
"(",
")",
"species",
"=",
"get_annot",
"(",
"annotations",
",",
"'Species'",
")",
"if",
"species",
":",
"name",
"=",
"annot_manager",
".",
"get_mapping",
"(",
"'Species'",
",",
"species",
")",
"bc",
".",
"species",
"=",
"RefContext",
"(",
"name",
"=",
"name",
",",
"db_refs",
"=",
"{",
"'TAXONOMY'",
":",
"species",
"}",
")",
"mappings",
"=",
"(",
"(",
"'CellLine'",
",",
"'cell_line'",
",",
"None",
")",
",",
"(",
"'Disease'",
",",
"'disease'",
",",
"None",
")",
",",
"(",
"'Anatomy'",
",",
"'organ'",
",",
"None",
")",
",",
"(",
"'Cell'",
",",
"'cell_type'",
",",
"None",
")",
",",
"(",
"'CellStructure'",
",",
"'location'",
",",
"'MESH'",
")",
")",
"for",
"bel_name",
",",
"indra_name",
",",
"ns",
"in",
"mappings",
":",
"ann",
"=",
"get_annot",
"(",
"annotations",
",",
"bel_name",
")",
"if",
"ann",
":",
"ref",
"=",
"annot_manager",
".",
"get_mapping",
"(",
"bel_name",
",",
"ann",
")",
"if",
"ref",
"is",
"None",
":",
"continue",
"if",
"not",
"ns",
":",
"db_ns",
",",
"db_id",
"=",
"ref",
".",
"split",
"(",
"'_'",
",",
"1",
")",
"else",
":",
"db_ns",
",",
"db_id",
"=",
"ns",
",",
"ref",
"setattr",
"(",
"bc",
",",
"indra_name",
",",
"RefContext",
"(",
"name",
"=",
"ann",
",",
"db_refs",
"=",
"{",
"db_ns",
":",
"db_id",
"}",
")",
")",
"# Overwrite blank BioContext",
"if",
"not",
"bc",
":",
"bc",
"=",
"None",
"return",
"bc"
]
| Return a BioContext object extracted from the annotations.
The entries that are extracted into the BioContext are popped from the
annotations.
Parameters
----------
annotations : dict
PyBEL annotations dict
annot_manager : AnnotationManager
An annotation manager to get name/db reference mappings for each ot the
annotation types.
Returns
-------
bc : BioContext
An INDRA BioContext object | [
"Return",
"a",
"BioContext",
"object",
"extracted",
"from",
"the",
"annotations",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/bel/processor.py#L492-L549 | train |
sorgerlab/indra | indra/util/plot_formatting.py | format_axis | def format_axis(ax, label_padding=2, tick_padding=0, yticks_position='left'):
"""Set standardized axis formatting for figure."""
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position(yticks_position)
ax.yaxis.set_tick_params(which='both', direction='out', labelsize=fontsize,
pad=tick_padding, length=2, width=0.5)
ax.xaxis.set_tick_params(which='both', direction='out', labelsize=fontsize,
pad=tick_padding, length=2, width=0.5)
ax.xaxis.labelpad = label_padding
ax.yaxis.labelpad = label_padding
ax.xaxis.label.set_size(fontsize)
ax.yaxis.label.set_size(fontsize) | python | def format_axis(ax, label_padding=2, tick_padding=0, yticks_position='left'):
"""Set standardized axis formatting for figure."""
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position(yticks_position)
ax.yaxis.set_tick_params(which='both', direction='out', labelsize=fontsize,
pad=tick_padding, length=2, width=0.5)
ax.xaxis.set_tick_params(which='both', direction='out', labelsize=fontsize,
pad=tick_padding, length=2, width=0.5)
ax.xaxis.labelpad = label_padding
ax.yaxis.labelpad = label_padding
ax.xaxis.label.set_size(fontsize)
ax.yaxis.label.set_size(fontsize) | [
"def",
"format_axis",
"(",
"ax",
",",
"label_padding",
"=",
"2",
",",
"tick_padding",
"=",
"0",
",",
"yticks_position",
"=",
"'left'",
")",
":",
"ax",
".",
"xaxis",
".",
"set_ticks_position",
"(",
"'bottom'",
")",
"ax",
".",
"yaxis",
".",
"set_ticks_position",
"(",
"yticks_position",
")",
"ax",
".",
"yaxis",
".",
"set_tick_params",
"(",
"which",
"=",
"'both'",
",",
"direction",
"=",
"'out'",
",",
"labelsize",
"=",
"fontsize",
",",
"pad",
"=",
"tick_padding",
",",
"length",
"=",
"2",
",",
"width",
"=",
"0.5",
")",
"ax",
".",
"xaxis",
".",
"set_tick_params",
"(",
"which",
"=",
"'both'",
",",
"direction",
"=",
"'out'",
",",
"labelsize",
"=",
"fontsize",
",",
"pad",
"=",
"tick_padding",
",",
"length",
"=",
"2",
",",
"width",
"=",
"0.5",
")",
"ax",
".",
"xaxis",
".",
"labelpad",
"=",
"label_padding",
"ax",
".",
"yaxis",
".",
"labelpad",
"=",
"label_padding",
"ax",
".",
"xaxis",
".",
"label",
".",
"set_size",
"(",
"fontsize",
")",
"ax",
".",
"yaxis",
".",
"label",
".",
"set_size",
"(",
"fontsize",
")"
]
| Set standardized axis formatting for figure. | [
"Set",
"standardized",
"axis",
"formatting",
"for",
"figure",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/util/plot_formatting.py#L17-L28 | train |
sorgerlab/indra | indra/assemblers/html/assembler.py | HtmlAssembler.make_model | def make_model(self):
"""Return the assembled HTML content as a string.
Returns
-------
str
The assembled HTML as a string.
"""
stmts_formatted = []
stmt_rows = group_and_sort_statements(self.statements,
self.ev_totals if self.ev_totals else None)
for key, verb, stmts in stmt_rows:
# This will now be ordered by prevalence and entity pairs.
stmt_info_list = []
for stmt in stmts:
stmt_hash = stmt.get_hash(shallow=True)
ev_list = self._format_evidence_text(stmt)
english = self._format_stmt_text(stmt)
if self.ev_totals:
total_evidence = self.ev_totals.get(int(stmt_hash), '?')
if total_evidence == '?':
logger.warning('The hash %s was not found in the '
'evidence totals dict.' % stmt_hash)
evidence_count_str = '%s / %s' % (len(ev_list), total_evidence)
else:
evidence_count_str = str(len(ev_list))
stmt_info_list.append({
'hash': stmt_hash,
'english': english,
'evidence': ev_list,
'evidence_count': evidence_count_str})
short_name = make_string_from_sort_key(key, verb)
short_name_key = str(uuid.uuid4())
stmts_formatted.append((short_name, short_name_key, stmt_info_list))
metadata = {k.replace('_', ' ').title(): v
for k, v in self.metadata.items()}
if self.db_rest_url and not self.db_rest_url.endswith('statements'):
db_rest_url = self.db_rest_url + '/statements'
else:
db_rest_url = '.'
self.model = template.render(stmt_data=stmts_formatted,
metadata=metadata, title=self.title,
db_rest_url=db_rest_url)
return self.model | python | def make_model(self):
"""Return the assembled HTML content as a string.
Returns
-------
str
The assembled HTML as a string.
"""
stmts_formatted = []
stmt_rows = group_and_sort_statements(self.statements,
self.ev_totals if self.ev_totals else None)
for key, verb, stmts in stmt_rows:
# This will now be ordered by prevalence and entity pairs.
stmt_info_list = []
for stmt in stmts:
stmt_hash = stmt.get_hash(shallow=True)
ev_list = self._format_evidence_text(stmt)
english = self._format_stmt_text(stmt)
if self.ev_totals:
total_evidence = self.ev_totals.get(int(stmt_hash), '?')
if total_evidence == '?':
logger.warning('The hash %s was not found in the '
'evidence totals dict.' % stmt_hash)
evidence_count_str = '%s / %s' % (len(ev_list), total_evidence)
else:
evidence_count_str = str(len(ev_list))
stmt_info_list.append({
'hash': stmt_hash,
'english': english,
'evidence': ev_list,
'evidence_count': evidence_count_str})
short_name = make_string_from_sort_key(key, verb)
short_name_key = str(uuid.uuid4())
stmts_formatted.append((short_name, short_name_key, stmt_info_list))
metadata = {k.replace('_', ' ').title(): v
for k, v in self.metadata.items()}
if self.db_rest_url and not self.db_rest_url.endswith('statements'):
db_rest_url = self.db_rest_url + '/statements'
else:
db_rest_url = '.'
self.model = template.render(stmt_data=stmts_formatted,
metadata=metadata, title=self.title,
db_rest_url=db_rest_url)
return self.model | [
"def",
"make_model",
"(",
"self",
")",
":",
"stmts_formatted",
"=",
"[",
"]",
"stmt_rows",
"=",
"group_and_sort_statements",
"(",
"self",
".",
"statements",
",",
"self",
".",
"ev_totals",
"if",
"self",
".",
"ev_totals",
"else",
"None",
")",
"for",
"key",
",",
"verb",
",",
"stmts",
"in",
"stmt_rows",
":",
"# This will now be ordered by prevalence and entity pairs.",
"stmt_info_list",
"=",
"[",
"]",
"for",
"stmt",
"in",
"stmts",
":",
"stmt_hash",
"=",
"stmt",
".",
"get_hash",
"(",
"shallow",
"=",
"True",
")",
"ev_list",
"=",
"self",
".",
"_format_evidence_text",
"(",
"stmt",
")",
"english",
"=",
"self",
".",
"_format_stmt_text",
"(",
"stmt",
")",
"if",
"self",
".",
"ev_totals",
":",
"total_evidence",
"=",
"self",
".",
"ev_totals",
".",
"get",
"(",
"int",
"(",
"stmt_hash",
")",
",",
"'?'",
")",
"if",
"total_evidence",
"==",
"'?'",
":",
"logger",
".",
"warning",
"(",
"'The hash %s was not found in the '",
"'evidence totals dict.'",
"%",
"stmt_hash",
")",
"evidence_count_str",
"=",
"'%s / %s'",
"%",
"(",
"len",
"(",
"ev_list",
")",
",",
"total_evidence",
")",
"else",
":",
"evidence_count_str",
"=",
"str",
"(",
"len",
"(",
"ev_list",
")",
")",
"stmt_info_list",
".",
"append",
"(",
"{",
"'hash'",
":",
"stmt_hash",
",",
"'english'",
":",
"english",
",",
"'evidence'",
":",
"ev_list",
",",
"'evidence_count'",
":",
"evidence_count_str",
"}",
")",
"short_name",
"=",
"make_string_from_sort_key",
"(",
"key",
",",
"verb",
")",
"short_name_key",
"=",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"stmts_formatted",
".",
"append",
"(",
"(",
"short_name",
",",
"short_name_key",
",",
"stmt_info_list",
")",
")",
"metadata",
"=",
"{",
"k",
".",
"replace",
"(",
"'_'",
",",
"' '",
")",
".",
"title",
"(",
")",
":",
"v",
"for",
"k",
",",
"v",
"in",
"self",
".",
"metadata",
".",
"items",
"(",
")",
"}",
"if",
"self",
".",
"db_rest_url",
"and",
"not",
"self",
".",
"db_rest_url",
".",
"endswith",
"(",
"'statements'",
")",
":",
"db_rest_url",
"=",
"self",
".",
"db_rest_url",
"+",
"'/statements'",
"else",
":",
"db_rest_url",
"=",
"'.'",
"self",
".",
"model",
"=",
"template",
".",
"render",
"(",
"stmt_data",
"=",
"stmts_formatted",
",",
"metadata",
"=",
"metadata",
",",
"title",
"=",
"self",
".",
"title",
",",
"db_rest_url",
"=",
"db_rest_url",
")",
"return",
"self",
".",
"model"
]
| Return the assembled HTML content as a string.
Returns
-------
str
The assembled HTML as a string. | [
"Return",
"the",
"assembled",
"HTML",
"content",
"as",
"a",
"string",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/html/assembler.py#L99-L142 | train |
sorgerlab/indra | indra/assemblers/html/assembler.py | HtmlAssembler.append_warning | def append_warning(self, msg):
"""Append a warning message to the model to expose issues."""
assert self.model is not None, "You must already have run make_model!"
addendum = ('\t<span style="color:red;">(CAUTION: %s occurred when '
'creating this page.)</span>' % msg)
self.model = self.model.replace(self.title, self.title + addendum)
return self.model | python | def append_warning(self, msg):
"""Append a warning message to the model to expose issues."""
assert self.model is not None, "You must already have run make_model!"
addendum = ('\t<span style="color:red;">(CAUTION: %s occurred when '
'creating this page.)</span>' % msg)
self.model = self.model.replace(self.title, self.title + addendum)
return self.model | [
"def",
"append_warning",
"(",
"self",
",",
"msg",
")",
":",
"assert",
"self",
".",
"model",
"is",
"not",
"None",
",",
"\"You must already have run make_model!\"",
"addendum",
"=",
"(",
"'\\t<span style=\"color:red;\">(CAUTION: %s occurred when '",
"'creating this page.)</span>'",
"%",
"msg",
")",
"self",
".",
"model",
"=",
"self",
".",
"model",
".",
"replace",
"(",
"self",
".",
"title",
",",
"self",
".",
"title",
"+",
"addendum",
")",
"return",
"self",
".",
"model"
]
| Append a warning message to the model to expose issues. | [
"Append",
"a",
"warning",
"message",
"to",
"the",
"model",
"to",
"expose",
"issues",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/html/assembler.py#L144-L150 | train |
sorgerlab/indra | indra/assemblers/html/assembler.py | HtmlAssembler.save_model | def save_model(self, fname):
"""Save the assembled HTML into a file.
Parameters
----------
fname : str
The path to the file to save the HTML into.
"""
if self.model is None:
self.make_model()
with open(fname, 'wb') as fh:
fh.write(self.model.encode('utf-8')) | python | def save_model(self, fname):
"""Save the assembled HTML into a file.
Parameters
----------
fname : str
The path to the file to save the HTML into.
"""
if self.model is None:
self.make_model()
with open(fname, 'wb') as fh:
fh.write(self.model.encode('utf-8')) | [
"def",
"save_model",
"(",
"self",
",",
"fname",
")",
":",
"if",
"self",
".",
"model",
"is",
"None",
":",
"self",
".",
"make_model",
"(",
")",
"with",
"open",
"(",
"fname",
",",
"'wb'",
")",
"as",
"fh",
":",
"fh",
".",
"write",
"(",
"self",
".",
"model",
".",
"encode",
"(",
"'utf-8'",
")",
")"
]
| Save the assembled HTML into a file.
Parameters
----------
fname : str
The path to the file to save the HTML into. | [
"Save",
"the",
"assembled",
"HTML",
"into",
"a",
"file",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/html/assembler.py#L152-L164 | train |
sorgerlab/indra | indra/assemblers/html/assembler.py | HtmlAssembler._format_evidence_text | def _format_evidence_text(stmt):
"""Returns evidence metadata with highlighted evidence text.
Parameters
----------
stmt : indra.Statement
The Statement with Evidence to be formatted.
Returns
-------
list of dicts
List of dictionaries corresponding to each Evidence object in the
Statement's evidence list. Each dictionary has keys 'source_api',
'pmid' and 'text', drawn from the corresponding fields in the
Evidence objects. The text entry of the dict includes
`<span>` tags identifying the agents referenced by the Statement.
"""
def get_role(ag_ix):
if isinstance(stmt, Complex) or \
isinstance(stmt, SelfModification) or \
isinstance(stmt, ActiveForm) or isinstance(stmt, Conversion) or\
isinstance(stmt, Translocation):
return 'other'
else:
assert len(stmt.agent_list()) == 2, (len(stmt.agent_list()),
type(stmt))
return 'subject' if ag_ix == 0 else 'object'
ev_list = []
for ix, ev in enumerate(stmt.evidence):
# Expand the source api to include the sub-database
if ev.source_api == 'biopax' and \
'source_sub_id' in ev.annotations and \
ev.annotations['source_sub_id']:
source_api = '%s:%s' % (ev.source_api,
ev.annotations['source_sub_id'])
else:
source_api = ev.source_api
# Prepare the evidence text
if ev.text is None:
format_text = None
else:
indices = []
for ix, ag in enumerate(stmt.agent_list()):
if ag is None:
continue
# If the statement has been preassembled, it will have
# this entry in annotations
try:
ag_text = ev.annotations['agents']['raw_text'][ix]
if ag_text is None:
raise KeyError
# Otherwise we try to get the agent text from db_refs
except KeyError:
ag_text = ag.db_refs.get('TEXT')
if ag_text is None:
continue
role = get_role(ix)
# Get the tag with the correct badge
tag_start = '<span class="badge badge-%s">' % role
tag_close = '</span>'
# Build up a set of indices
indices += [(m.start(), m.start() + len(ag_text),
ag_text, tag_start, tag_close)
for m in re.finditer(re.escape(ag_text),
ev.text)]
format_text = tag_text(ev.text, indices)
ev_list.append({'source_api': source_api,
'pmid': ev.pmid,
'text_refs': ev.text_refs,
'text': format_text,
'source_hash': ev.source_hash })
return ev_list | python | def _format_evidence_text(stmt):
"""Returns evidence metadata with highlighted evidence text.
Parameters
----------
stmt : indra.Statement
The Statement with Evidence to be formatted.
Returns
-------
list of dicts
List of dictionaries corresponding to each Evidence object in the
Statement's evidence list. Each dictionary has keys 'source_api',
'pmid' and 'text', drawn from the corresponding fields in the
Evidence objects. The text entry of the dict includes
`<span>` tags identifying the agents referenced by the Statement.
"""
def get_role(ag_ix):
if isinstance(stmt, Complex) or \
isinstance(stmt, SelfModification) or \
isinstance(stmt, ActiveForm) or isinstance(stmt, Conversion) or\
isinstance(stmt, Translocation):
return 'other'
else:
assert len(stmt.agent_list()) == 2, (len(stmt.agent_list()),
type(stmt))
return 'subject' if ag_ix == 0 else 'object'
ev_list = []
for ix, ev in enumerate(stmt.evidence):
# Expand the source api to include the sub-database
if ev.source_api == 'biopax' and \
'source_sub_id' in ev.annotations and \
ev.annotations['source_sub_id']:
source_api = '%s:%s' % (ev.source_api,
ev.annotations['source_sub_id'])
else:
source_api = ev.source_api
# Prepare the evidence text
if ev.text is None:
format_text = None
else:
indices = []
for ix, ag in enumerate(stmt.agent_list()):
if ag is None:
continue
# If the statement has been preassembled, it will have
# this entry in annotations
try:
ag_text = ev.annotations['agents']['raw_text'][ix]
if ag_text is None:
raise KeyError
# Otherwise we try to get the agent text from db_refs
except KeyError:
ag_text = ag.db_refs.get('TEXT')
if ag_text is None:
continue
role = get_role(ix)
# Get the tag with the correct badge
tag_start = '<span class="badge badge-%s">' % role
tag_close = '</span>'
# Build up a set of indices
indices += [(m.start(), m.start() + len(ag_text),
ag_text, tag_start, tag_close)
for m in re.finditer(re.escape(ag_text),
ev.text)]
format_text = tag_text(ev.text, indices)
ev_list.append({'source_api': source_api,
'pmid': ev.pmid,
'text_refs': ev.text_refs,
'text': format_text,
'source_hash': ev.source_hash })
return ev_list | [
"def",
"_format_evidence_text",
"(",
"stmt",
")",
":",
"def",
"get_role",
"(",
"ag_ix",
")",
":",
"if",
"isinstance",
"(",
"stmt",
",",
"Complex",
")",
"or",
"isinstance",
"(",
"stmt",
",",
"SelfModification",
")",
"or",
"isinstance",
"(",
"stmt",
",",
"ActiveForm",
")",
"or",
"isinstance",
"(",
"stmt",
",",
"Conversion",
")",
"or",
"isinstance",
"(",
"stmt",
",",
"Translocation",
")",
":",
"return",
"'other'",
"else",
":",
"assert",
"len",
"(",
"stmt",
".",
"agent_list",
"(",
")",
")",
"==",
"2",
",",
"(",
"len",
"(",
"stmt",
".",
"agent_list",
"(",
")",
")",
",",
"type",
"(",
"stmt",
")",
")",
"return",
"'subject'",
"if",
"ag_ix",
"==",
"0",
"else",
"'object'",
"ev_list",
"=",
"[",
"]",
"for",
"ix",
",",
"ev",
"in",
"enumerate",
"(",
"stmt",
".",
"evidence",
")",
":",
"# Expand the source api to include the sub-database",
"if",
"ev",
".",
"source_api",
"==",
"'biopax'",
"and",
"'source_sub_id'",
"in",
"ev",
".",
"annotations",
"and",
"ev",
".",
"annotations",
"[",
"'source_sub_id'",
"]",
":",
"source_api",
"=",
"'%s:%s'",
"%",
"(",
"ev",
".",
"source_api",
",",
"ev",
".",
"annotations",
"[",
"'source_sub_id'",
"]",
")",
"else",
":",
"source_api",
"=",
"ev",
".",
"source_api",
"# Prepare the evidence text",
"if",
"ev",
".",
"text",
"is",
"None",
":",
"format_text",
"=",
"None",
"else",
":",
"indices",
"=",
"[",
"]",
"for",
"ix",
",",
"ag",
"in",
"enumerate",
"(",
"stmt",
".",
"agent_list",
"(",
")",
")",
":",
"if",
"ag",
"is",
"None",
":",
"continue",
"# If the statement has been preassembled, it will have",
"# this entry in annotations",
"try",
":",
"ag_text",
"=",
"ev",
".",
"annotations",
"[",
"'agents'",
"]",
"[",
"'raw_text'",
"]",
"[",
"ix",
"]",
"if",
"ag_text",
"is",
"None",
":",
"raise",
"KeyError",
"# Otherwise we try to get the agent text from db_refs",
"except",
"KeyError",
":",
"ag_text",
"=",
"ag",
".",
"db_refs",
".",
"get",
"(",
"'TEXT'",
")",
"if",
"ag_text",
"is",
"None",
":",
"continue",
"role",
"=",
"get_role",
"(",
"ix",
")",
"# Get the tag with the correct badge",
"tag_start",
"=",
"'<span class=\"badge badge-%s\">'",
"%",
"role",
"tag_close",
"=",
"'</span>'",
"# Build up a set of indices",
"indices",
"+=",
"[",
"(",
"m",
".",
"start",
"(",
")",
",",
"m",
".",
"start",
"(",
")",
"+",
"len",
"(",
"ag_text",
")",
",",
"ag_text",
",",
"tag_start",
",",
"tag_close",
")",
"for",
"m",
"in",
"re",
".",
"finditer",
"(",
"re",
".",
"escape",
"(",
"ag_text",
")",
",",
"ev",
".",
"text",
")",
"]",
"format_text",
"=",
"tag_text",
"(",
"ev",
".",
"text",
",",
"indices",
")",
"ev_list",
".",
"append",
"(",
"{",
"'source_api'",
":",
"source_api",
",",
"'pmid'",
":",
"ev",
".",
"pmid",
",",
"'text_refs'",
":",
"ev",
".",
"text_refs",
",",
"'text'",
":",
"format_text",
",",
"'source_hash'",
":",
"ev",
".",
"source_hash",
"}",
")",
"return",
"ev_list"
]
| Returns evidence metadata with highlighted evidence text.
Parameters
----------
stmt : indra.Statement
The Statement with Evidence to be formatted.
Returns
-------
list of dicts
List of dictionaries corresponding to each Evidence object in the
Statement's evidence list. Each dictionary has keys 'source_api',
'pmid' and 'text', drawn from the corresponding fields in the
Evidence objects. The text entry of the dict includes
`<span>` tags identifying the agents referenced by the Statement. | [
"Returns",
"evidence",
"metadata",
"with",
"highlighted",
"evidence",
"text",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/html/assembler.py#L167-L241 | train |
sorgerlab/indra | indra/sources/reach/api.py | process_pmc | def process_pmc(pmc_id, offline=False, output_fname=default_output_fname):
"""Return a ReachProcessor by processing a paper with a given PMC id.
Uses the PMC client to obtain the full text. If it's not available,
None is returned.
Parameters
----------
pmc_id : str
The ID of a PubmedCentral article. The string may start with PMC but
passing just the ID also works.
Examples: 3717945, PMC3717945
https://www.ncbi.nlm.nih.gov/pmc/
offline : Optional[bool]
If set to True, the REACH system is ran offline. Otherwise (by default)
the web service is called. Default: False
Returns
-------
rp : ReachProcessor
A ReachProcessor containing the extracted INDRA Statements
in rp.statements.
"""
xml_str = pmc_client.get_xml(pmc_id)
if xml_str is None:
return None
fname = pmc_id + '.nxml'
with open(fname, 'wb') as fh:
fh.write(xml_str.encode('utf-8'))
ids = id_lookup(pmc_id, 'pmcid')
pmid = ids.get('pmid')
rp = process_nxml_file(fname, citation=pmid, offline=offline,
output_fname=output_fname)
return rp | python | def process_pmc(pmc_id, offline=False, output_fname=default_output_fname):
"""Return a ReachProcessor by processing a paper with a given PMC id.
Uses the PMC client to obtain the full text. If it's not available,
None is returned.
Parameters
----------
pmc_id : str
The ID of a PubmedCentral article. The string may start with PMC but
passing just the ID also works.
Examples: 3717945, PMC3717945
https://www.ncbi.nlm.nih.gov/pmc/
offline : Optional[bool]
If set to True, the REACH system is ran offline. Otherwise (by default)
the web service is called. Default: False
Returns
-------
rp : ReachProcessor
A ReachProcessor containing the extracted INDRA Statements
in rp.statements.
"""
xml_str = pmc_client.get_xml(pmc_id)
if xml_str is None:
return None
fname = pmc_id + '.nxml'
with open(fname, 'wb') as fh:
fh.write(xml_str.encode('utf-8'))
ids = id_lookup(pmc_id, 'pmcid')
pmid = ids.get('pmid')
rp = process_nxml_file(fname, citation=pmid, offline=offline,
output_fname=output_fname)
return rp | [
"def",
"process_pmc",
"(",
"pmc_id",
",",
"offline",
"=",
"False",
",",
"output_fname",
"=",
"default_output_fname",
")",
":",
"xml_str",
"=",
"pmc_client",
".",
"get_xml",
"(",
"pmc_id",
")",
"if",
"xml_str",
"is",
"None",
":",
"return",
"None",
"fname",
"=",
"pmc_id",
"+",
"'.nxml'",
"with",
"open",
"(",
"fname",
",",
"'wb'",
")",
"as",
"fh",
":",
"fh",
".",
"write",
"(",
"xml_str",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"ids",
"=",
"id_lookup",
"(",
"pmc_id",
",",
"'pmcid'",
")",
"pmid",
"=",
"ids",
".",
"get",
"(",
"'pmid'",
")",
"rp",
"=",
"process_nxml_file",
"(",
"fname",
",",
"citation",
"=",
"pmid",
",",
"offline",
"=",
"offline",
",",
"output_fname",
"=",
"output_fname",
")",
"return",
"rp"
]
| Return a ReachProcessor by processing a paper with a given PMC id.
Uses the PMC client to obtain the full text. If it's not available,
None is returned.
Parameters
----------
pmc_id : str
The ID of a PubmedCentral article. The string may start with PMC but
passing just the ID also works.
Examples: 3717945, PMC3717945
https://www.ncbi.nlm.nih.gov/pmc/
offline : Optional[bool]
If set to True, the REACH system is ran offline. Otherwise (by default)
the web service is called. Default: False
Returns
-------
rp : ReachProcessor
A ReachProcessor containing the extracted INDRA Statements
in rp.statements. | [
"Return",
"a",
"ReachProcessor",
"by",
"processing",
"a",
"paper",
"with",
"a",
"given",
"PMC",
"id",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/reach/api.py#L41-L74 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.