Code
stringlengths 103
85.9k
| Summary
listlengths 0
94
|
---|---|
Please provide a description of the function:def stmts_to_json(stmts_in, use_sbo=False):
if not isinstance(stmts_in, list):
json_dict = stmts_in.to_json(use_sbo=use_sbo)
return json_dict
else:
json_dict = [st.to_json(use_sbo=use_sbo) for st in stmts_in]
return json_dict | [
"Return the JSON-serialized form of one or more INDRA Statements.\n\n Parameters\n ----------\n stmts_in : Statement or list[Statement]\n A Statement or list of Statement objects to serialize into JSON.\n use_sbo : Optional[bool]\n If True, SBO annotations are added to each applicable element of the\n JSON. Default: False\n\n Returns\n -------\n json_dict : dict\n JSON-serialized INDRA Statements.\n "
]
|
Please provide a description of the function:def _promote_support(sup_list, uuid_dict, on_missing='handle'):
valid_handling_choices = ['handle', 'error', 'ignore']
if on_missing not in valid_handling_choices:
raise InputError('Invalid option for `on_missing_support`: \'%s\'\n'
'Choices are: %s.'
% (on_missing, str(valid_handling_choices)))
for idx, uuid in enumerate(sup_list):
if uuid in uuid_dict.keys():
sup_list[idx] = uuid_dict[uuid]
elif on_missing == 'handle':
sup_list[idx] = Unresolved(uuid)
elif on_missing == 'ignore':
sup_list.remove(uuid)
elif on_missing == 'error':
raise UnresolvedUuidError("Uuid %s not found in stmt jsons."
% uuid)
return | [
"Promote the list of support-related uuids to Statements, if possible."
]
|
Please provide a description of the function:def draw_stmt_graph(stmts):
import networkx
try:
import matplotlib.pyplot as plt
except Exception:
logger.error('Could not import matplotlib, not drawing graph.')
return
try: # This checks whether networkx has this package to work with.
import pygraphviz
except Exception:
logger.error('Could not import pygraphviz, not drawing graph.')
return
import numpy
g = networkx.compose_all([stmt.to_graph() for stmt in stmts])
plt.figure()
plt.ion()
g.graph['graph'] = {'rankdir': 'LR'}
pos = networkx.drawing.nx_agraph.graphviz_layout(g, prog='dot')
g = g.to_undirected()
# Draw nodes
options = {
'marker': 'o',
's': 200,
'c': [0.85, 0.85, 1],
'facecolor': '0.5',
'lw': 0,
}
ax = plt.gca()
nodelist = list(g)
xy = numpy.asarray([pos[v] for v in nodelist])
node_collection = ax.scatter(xy[:, 0], xy[:, 1], **options)
node_collection.set_zorder(2)
# Draw edges
networkx.draw_networkx_edges(g, pos, arrows=False, edge_color='0.5')
# Draw labels
edge_labels = {(e[0], e[1]): e[2].get('label') for e in g.edges(data=True)}
networkx.draw_networkx_edge_labels(g, pos, edge_labels=edge_labels)
node_labels = {n[0]: n[1].get('label') for n in g.nodes(data=True)}
for key, label in node_labels.items():
if len(label) > 25:
parts = label.split(' ')
parts.insert(int(len(parts)/2), '\n')
label = ' '.join(parts)
node_labels[key] = label
networkx.draw_networkx_labels(g, pos, labels=node_labels)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show() | [
"Render the attributes of a list of Statements as directed graphs.\n\n The layout works well for a single Statement or a few Statements at a time.\n This function displays the plot of the graph using plt.show().\n\n Parameters\n ----------\n stmts : list[indra.statements.Statement]\n A list of one or more INDRA Statements whose attribute graph should\n be drawn.\n "
]
|
Please provide a description of the function:def _fix_json_agents(ag_obj):
if isinstance(ag_obj, str):
logger.info("Fixing string agent: %s." % ag_obj)
ret = {'name': ag_obj, 'db_refs': {'TEXT': ag_obj}}
elif isinstance(ag_obj, list):
# Recursive for complexes and similar.
ret = [_fix_json_agents(ag) for ag in ag_obj]
elif isinstance(ag_obj, dict) and 'TEXT' in ag_obj.keys():
ret = deepcopy(ag_obj)
text = ret.pop('TEXT')
ret['db_refs']['TEXT'] = text
else:
ret = ag_obj
return ret | [
"Fix the json representation of an agent."
]
|
Please provide a description of the function:def set_statements_pmid(self, pmid):
# Replace PMID value in JSON dict first
for stmt in self.json_stmts:
evs = stmt.get('evidence', [])
for ev in evs:
ev['pmid'] = pmid
# Replace PMID value in extracted Statements next
for stmt in self.statements:
for ev in stmt.evidence:
ev.pmid = pmid | [
"Set the evidence PMID of Statements that have been extracted.\n\n Parameters\n ----------\n pmid : str or None\n The PMID to be used in the Evidence objects of the Statements\n that were extracted by the processor.\n "
]
|
Please provide a description of the function:def get_args(node):
arg_roles = {}
args = node.findall('arg') + \
[node.find('arg1'), node.find('arg2'), node.find('arg3')]
for arg in args:
if arg is not None:
id = arg.attrib.get('id')
if id is not None:
arg_roles[arg.attrib['role']] = (arg.attrib['id'], arg)
# Now look at possible inevent links
if node.find('features') is not None:
inevents = node.findall('features/inevent')
for inevent in inevents:
if 'id' in inevent.attrib:
arg_roles['inevent'] = (inevent.attrib['id'], inevent)
ptms = node.findall('features/ptm') + node.findall('features/no-ptm')
for ptm in ptms:
if 'id' in inevent.attrib:
arg_roles['ptm'] = (inevent.attrib['id'], ptm)
# And also look for assoc-with links
aw = node.find('assoc-with')
if aw is not None:
aw_id = aw.attrib['id']
arg_roles['assoc-with'] = (aw_id, aw)
return arg_roles | [
"Return the arguments of a node in the event graph."
]
|
Please provide a description of the function:def type_match(a, b):
# If the types are the same, return True
if a['type'] == b['type']:
return True
# Otherwise, look at some special cases
eq_groups = [
{'ONT::GENE-PROTEIN', 'ONT::GENE', 'ONT::PROTEIN'},
{'ONT::PHARMACOLOGIC-SUBSTANCE', 'ONT::CHEMICAL'}
]
for eq_group in eq_groups:
if a['type'] in eq_group and b['type'] in eq_group:
return True
return False | [
"Return True of the types of a and b are compatible, False otherwise."
]
|
Please provide a description of the function:def add_graph(patterns, G):
if not patterns:
patterns.append([G])
return
for i, graphs in enumerate(patterns):
if networkx.is_isomorphic(graphs[0], G, node_match=type_match,
edge_match=type_match):
patterns[i].append(G)
return
patterns.append([G]) | [
"Add a graph to a set of unique patterns."
]
|
Please provide a description of the function:def draw(graph, fname):
ag = networkx.nx_agraph.to_agraph(graph)
ag.draw(fname, prog='dot') | [
"Draw a graph and save it into a file"
]
|
Please provide a description of the function:def build_patterns(fnames):
patterns = []
for fn in fnames:
et = ET.parse(fn)
res = et.findall('CC') + et.findall('EVENT')
for event in res:
G = networkx.DiGraph()
build_event_graph(G, et, event)
add_graph(patterns, G)
patterns = sorted(patterns, key=lambda x: len(x[0]), reverse=True)
return patterns | [
"Return a list of CC/EVENT graph patterns from a list of EKB files"
]
|
Please provide a description of the function:def build_event_graph(graph, tree, node):
# If we have already added this node then let's return
if node_key(node) in graph:
return
type = get_type(node)
text = get_text(node)
label = '%s (%s)' % (type, text)
graph.add_node(node_key(node), type=type, label=label, text=text)
args = get_args(node)
for arg_role, (arg_id, arg_tag) in args.items():
arg = get_node_by_id(tree, arg_id)
if arg is None:
arg = arg_tag
build_event_graph(graph, tree, arg)
graph.add_edge(node_key(node), node_key(arg), type=arg_role,
label=arg_role) | [
"Return a DiGraph of a specific event structure, built recursively"
]
|
Please provide a description of the function:def get_extracted_events(fnames):
event_list = []
for fn in fnames:
tp = trips.process_xml_file(fn)
ed = tp.extracted_events
for k, v in ed.items():
event_list += v
return event_list | [
"Get a full list of all extracted event IDs from a list of EKB files"
]
|
Please provide a description of the function:def check_event_coverage(patterns, event_list):
proportions = []
for pattern_list in patterns:
proportion = 0
for pattern in pattern_list:
for node in pattern.nodes():
if node in event_list:
proportion += 1.0 / len(pattern_list)
break
proportions.append(proportion)
return proportions | [
"Calculate the ratio of patterns that were extracted."
]
|
Please provide a description of the function:def _load_wm_map(exclude_auto=None):
exclude_auto = [] if not exclude_auto else exclude_auto
path_here = os.path.dirname(os.path.abspath(__file__))
ontomap_file = os.path.join(path_here, '../resources/wm_ontomap.tsv')
mappings = {}
def make_hume_prefix_map():
hume_ont = os.path.join(path_here, '../sources/hume/hume_ontology.rdf')
graph = rdflib.Graph()
graph.parse(os.path.abspath(hume_ont), format='nt')
entry_map = {}
for node in graph.all_nodes():
entry = node.split('#')[1]
# Handle "event" and other top-level entries
if '/' not in entry:
entry_map[entry] = None
continue
parts = entry.split('/')
prefix, real_entry = parts[0], '/'.join(parts[1:])
entry_map[real_entry] = prefix
return entry_map
hume_prefix_map = make_hume_prefix_map()
def add_hume_prefix(hume_entry):
prefix = hume_prefix_map[hume_entry]
return '%s/%s' % (prefix, hume_entry)
def map_entry(reader, entry):
if reader == 'eidos':
namespace = 'UN'
entry = entry.replace(' ', '_')
entry_id = entry
elif reader == 'BBN':
namespace = 'HUME'
entry = entry.replace(' ', '_')
entry_id = add_hume_prefix(entry)
elif reader == 'sofia':
namespace = 'SOFIA'
# First chop off the Event/Entity prefix
parts = entry.split('/')[1:]
# Now we split each part by underscore and capitalize
# each piece of each part
parts = ['_'.join([p.capitalize() for p in part.split('_')])
for part in parts]
# Finally we stick the entry back together separated by slashes
entry_id = '/'.join(parts)
else:
return reader, entry
return namespace, entry_id
with open(ontomap_file, 'r') as fh:
for line in fh.readlines():
# Get each entry from the line
s, se, t, te, score = line.strip().split('\t')
score = float(score)
# Map the entries to our internal naming standards
s, se = map_entry(s, se)
t, te = map_entry(t, te)
# Skip automated mappings when they should be excluded
if (s, t) not in exclude_auto:
# We first do the forward mapping
if (s, se, t) in mappings:
if mappings[(s, se, t)][1] < score:
mappings[(s, se, t)] = ((t, te), score)
else:
mappings[(s, se, t)] = ((t, te), score)
# Then we add the reverse mapping
if (t, s) not in exclude_auto:
if (t, te, s) in mappings:
if mappings[(t, te, s)][1] < score:
mappings[(t, te, s)] = ((s, se), score)
else:
mappings[(t, te, s)] = ((s, se), score)
ontomap = []
for s, ts in mappings.items():
ontomap.append(((s[0], s[1]), ts[0], ts[1]))
# Now apply the Hume -> Eidos override
override_file = os.path.join(path_here, '../resources/wm_ontomap.bbn.tsv')
override_mappings = []
with open(override_file, 'r') as fh:
for row in fh.readlines():
if 'BBN' not in row:
continue
# Order is target first, source second
_, te, _, se = row.strip().split('\t')
# Map the entries to our internal naming standards
s = 'HUME'
t = 'UN'
se = se.replace(' ', '_')
te = te.replace(' ', '_')
if se.startswith('/'):
se = se[1:]
override_mappings.append((s, se, t, te))
for s, se, t, te in override_mappings:
found = False
for idx, ((so, seo), (eo, teo), score) in enumerate(ontomap):
if (s, se, t) == (so, seo, eo):
# Override when a match is found
ontomap[idx] = ((s, se), (t, te), 1.0)
found = True
if not found:
ontomap.append(((s, se), (t, te), 1.0))
return ontomap | [
"Load an ontology map for world models.\n\n exclude_auto : None or list[tuple]\n A list of ontology mappings for which automated mappings should be\n excluded, e.g. [(HUME, UN)] would result in not using mappings\n from HUME to UN.\n ",
"We need to do this because the HUME prefixes are missing",
"Remap the readers and entries to match our internal standards."
]
|
Please provide a description of the function:def map_statements(self):
for stmt in self.statements:
for agent in stmt.agent_list():
if agent is None:
continue
all_mappings = []
for db_name, db_id in agent.db_refs.items():
if isinstance(db_id, list):
db_id = db_id[0][0]
mappings = self._map_id(db_name, db_id)
all_mappings += mappings
for map_db_name, map_db_id, score, orig_db_name in all_mappings:
if map_db_name in agent.db_refs:
continue
if self.scored:
# If the original one is a scored grounding,
# we take that score and multiply it with the mapping
# score. Otherwise we assume the original score is 1.
try:
orig_score = agent.db_refs[orig_db_name][0][1]
except Exception:
orig_score = 1.0
agent.db_refs[map_db_name] = \
[(map_db_id, score * orig_score)]
else:
if map_db_name in ('UN', 'HUME'):
agent.db_refs[map_db_name] = [(map_db_id, 1.0)]
else:
agent.db_refs[map_db_name] = map_db_id | [
"Run the ontology mapping on the statements."
]
|
Please provide a description of the function:def load_grounding_map(grounding_map_path, ignore_path=None,
lineterminator='\r\n'):
g_map = {}
map_rows = read_unicode_csv(grounding_map_path, delimiter=',',
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
lineterminator='\r\n')
if ignore_path and os.path.exists(ignore_path):
ignore_rows = read_unicode_csv(ignore_path, delimiter=',',
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
lineterminator=lineterminator)
else:
ignore_rows = []
csv_rows = chain(map_rows, ignore_rows)
for row in csv_rows:
key = row[0]
db_refs = {'TEXT': key}
keys = [entry for entry in row[1::2] if entry != '']
values = [entry for entry in row[2::2] if entry != '']
if len(keys) != len(values):
logger.info('ERROR: Mismatched keys and values in row %s' %
str(row))
continue
else:
db_refs.update(dict(zip(keys, values)))
if len(db_refs.keys()) > 1:
g_map[key] = db_refs
else:
g_map[key] = None
return g_map | [
"Return a grounding map dictionary loaded from a csv file.\n\n In the file pointed to by grounding_map_path, the number of name_space ID\n pairs can vary per row and commas are\n used to pad out entries containing fewer than the maximum amount of\n name spaces appearing in the file. Lines should be terminated with \\r\\n\n both a carriage return and a new line by default.\n\n Optionally, one can specify another csv file (pointed to by ignore_path)\n containing agent texts that are degenerate and should be filtered out.\n\n Parameters\n ----------\n grounding_map_path : str\n Path to csv file containing grounding map information. Rows of the file\n should be of the form <agent_text>,<name_space_1>,<ID_1>,...\n <name_space_n>,<ID_n>\n ignore_path : Optional[str]\n Path to csv file containing terms that should be filtered out during\n the grounding mapping process. The file Should be of the form\n <agent_text>,,..., where the number of commas that\n appear is the same as in the csv file at grounding_map_path.\n Default: None\n lineterminator : Optional[str]\n Line terminator used in input csv file. Default: \\r\\n\n\n Returns\n -------\n g_map : dict\n The grounding map constructed from the given files.\n "
]
|
Please provide a description of the function:def all_agents(stmts):
agents = []
for stmt in stmts:
for agent in stmt.agent_list():
# Agents don't always have a TEXT db_refs entry (for instance
# in the case of Statements from databases) so we check for this.
if agent is not None and agent.db_refs.get('TEXT') is not None:
agents.append(agent)
return agents | [
"Return a list of all of the agents from a list of statements.\n\n Only agents that are not None and have a TEXT entry are returned.\n\n Parameters\n ----------\n stmts : list of :py:class:`indra.statements.Statement`\n\n Returns\n -------\n agents : list of :py:class:`indra.statements.Agent`\n List of agents that appear in the input list of indra statements.\n "
]
|
Please provide a description of the function:def get_sentences_for_agent(text, stmts, max_sentences=None):
sentences = []
for stmt in stmts:
for agent in stmt.agent_list():
if agent is not None and agent.db_refs.get('TEXT') == text:
sentences.append((stmt.evidence[0].pmid,
stmt.evidence[0].text))
if max_sentences is not None and \
len(sentences) >= max_sentences:
return sentences
return sentences | [
"Returns evidence sentences with a given agent text from a list of statements\n\n Parameters\n ----------\n text : str\n An agent text\n\n stmts : list of :py:class:`indra.statements.Statement`\n INDRA Statements to search in for evidence statements.\n\n max_sentences : Optional[int/None]\n Cap on the number of evidence sentences to return. Default: None\n\n Returns\n -------\n sentences : list of str\n Evidence sentences from the list of statements containing\n the given agent text.\n "
]
|
Please provide a description of the function:def agent_texts_with_grounding(stmts):
allag = all_agents(stmts)
# Convert PFAM-DEF lists into tuples so that they are hashable and can
# be tabulated with a Counter
for ag in allag:
pfam_def = ag.db_refs.get('PFAM-DEF')
if pfam_def is not None:
ag.db_refs['PFAM-DEF'] = tuple(pfam_def)
refs = [tuple(ag.db_refs.items()) for ag in allag]
refs_counter = Counter(refs)
refs_counter_dict = [(dict(entry[0]), entry[1])
for entry in refs_counter.items()]
# First, sort by text so that we can do a groupby
refs_counter_dict.sort(key=lambda x: x[0].get('TEXT'))
# Then group by text
grouped_by_text = []
for k, g in groupby(refs_counter_dict, key=lambda x: x[0].get('TEXT')):
# Total occurrences of this agent text
total = 0
entry = [k]
db_ref_list = []
for db_refs, count in g:
# Check if TEXT is our only key, indicating no grounding
if list(db_refs.keys()) == ['TEXT']:
db_ref_list.append((None, None, count))
# Add any other db_refs (not TEXT)
for db, db_id in db_refs.items():
if db == 'TEXT':
continue
else:
db_ref_list.append((db, db_id, count))
total += count
# Sort the db_ref_list by the occurrences of each grounding
entry.append(tuple(sorted(db_ref_list, key=lambda x: x[2],
reverse=True)))
# Now add the total frequency to the entry
entry.append(total)
# And add the entry to the overall list
grouped_by_text.append(tuple(entry))
# Sort the list by the total number of occurrences of each unique key
grouped_by_text.sort(key=lambda x: x[2], reverse=True)
return grouped_by_text | [
"Return agent text groundings in a list of statements with their counts\n\n Parameters\n ----------\n stmts: list of :py:class:`indra.statements.Statement`\n\n Returns\n -------\n list of tuple\n List of tuples of the form\n (text: str, ((name_space: str, ID: str, count: int)...),\n total_count: int)\n\n Where the counts within the tuple of groundings give the number of\n times an agent with the given agent_text appears grounded with the\n particular name space and ID. The total_count gives the total number\n of times an agent with text appears in the list of statements.\n "
]
|
Please provide a description of the function:def ungrounded_texts(stmts):
ungrounded = [ag.db_refs['TEXT']
for s in stmts
for ag in s.agent_list()
if ag is not None and list(ag.db_refs.keys()) == ['TEXT']]
ungroundc = Counter(ungrounded)
ungroundc = ungroundc.items()
ungroundc = sorted(ungroundc, key=lambda x: x[1], reverse=True)
return ungroundc | [
"Return a list of all ungrounded entities ordered by number of mentions\n\n Parameters\n ----------\n stmts : list of :py:class:`indra.statements.Statement`\n\n Returns\n -------\n ungroundc : list of tuple\n list of tuples of the form (text: str, count: int) sorted in descending\n order by count.\n "
]
|
Please provide a description of the function:def get_agents_with_name(name, stmts):
return [ag for stmt in stmts for ag in stmt.agent_list()
if ag is not None and ag.name == name] | [
"Return all agents within a list of statements with a particular name."
]
|
Please provide a description of the function:def save_base_map(filename, grouped_by_text):
rows = []
for group in grouped_by_text:
text_string = group[0]
for db, db_id, count in group[1]:
if db == 'UP':
name = uniprot_client.get_mnemonic(db_id)
else:
name = ''
row = [text_string, db, db_id, count, name]
rows.append(row)
write_unicode_csv(filename, rows, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL, lineterminator='\r\n') | [
"Dump a list of agents along with groundings and counts into a csv file\n\n Parameters\n ----------\n filename : str\n Filepath for output file\n grouped_by_text : list of tuple\n List of tuples of the form output by agent_texts_with_grounding\n "
]
|
Please provide a description of the function:def protein_map_from_twg(twg):
protein_map = {}
unmatched = 0
matched = 0
logger.info('Building grounding map for human proteins')
for agent_text, grounding_list, _ in twg:
# If 'UP' (Uniprot) not one of the grounding entries for this text,
# then we skip it.
if 'UP' not in [entry[0] for entry in grounding_list]:
continue
# Otherwise, collect all the Uniprot IDs for this protein.
uniprot_ids = [entry[1] for entry in grounding_list
if entry[0] == 'UP']
# For each Uniprot ID, look up the species
for uniprot_id in uniprot_ids:
# If it's not a human protein, skip it
mnemonic = uniprot_client.get_mnemonic(uniprot_id)
if mnemonic is None or not mnemonic.endswith('_HUMAN'):
continue
# Otherwise, look up the gene name in HGNC and match against the
# agent text
gene_name = uniprot_client.get_gene_name(uniprot_id)
if gene_name is None:
unmatched += 1
continue
if agent_text.upper() == gene_name.upper():
matched += 1
protein_map[agent_text] = {'TEXT': agent_text,
'UP': uniprot_id}
else:
unmatched += 1
logger.info('Exact matches for %d proteins' % matched)
logger.info('No match (or no gene name) for %d proteins' % unmatched)
return protein_map | [
"Build map of entity texts to validate protein grounding.\n\n Looks at the grounding of the entity texts extracted from the statements\n and finds proteins where there is grounding to a human protein that maps to\n an HGNC name that is an exact match to the entity text. Returns a dict that\n can be used to update/expand the grounding map.\n\n Parameters\n ----------\n twg : list of tuple\n list of tuples of the form output by agent_texts_with_grounding\n\n Returns\n -------\n protein_map : dict\n dict keyed on agent text with associated values\n {'TEXT': agent_text, 'UP': uniprot_id}. Entries are for agent texts\n where the grounding map was able to find human protein grounded to\n this agent_text in Uniprot.\n "
]
|
Please provide a description of the function:def save_sentences(twg, stmts, filename, agent_limit=300):
sentences = []
unmapped_texts = [t[0] for t in twg]
counter = 0
logger.info('Getting sentences for top %d unmapped agent texts.' %
agent_limit)
for text in unmapped_texts:
agent_sentences = get_sentences_for_agent(text, stmts)
sentences += map(lambda tup: (text,) + tup, agent_sentences)
counter += 1
if counter >= agent_limit:
break
# Write sentences to CSV file
write_unicode_csv(filename, sentences, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL, lineterminator='\r\n') | [
"Write evidence sentences for stmts with ungrounded agents to csv file.\n\n Parameters\n ----------\n twg: list of tuple\n list of tuples of ungrounded agent_texts with counts of the\n number of times they are mentioned in the list of statements.\n Should be sorted in descending order by the counts.\n This is of the form output by the function ungrounded texts.\n\n stmts: list of :py:class:`indra.statements.Statement`\n\n filename : str\n Path to output file\n\n agent_limit : Optional[int]\n Number of agents to include in output file. Takes the top agents\n by count.\n "
]
|
Please provide a description of the function:def _get_text_for_grounding(stmt, agent_text):
text = None
# First we will try to get content from the DB
try:
from indra_db.util.content_scripts \
import get_text_content_from_text_refs
from indra.literature.deft_tools import universal_extract_text
refs = stmt.evidence[0].text_refs
# Prioritize the pmid attribute if given
if stmt.evidence[0].pmid:
refs['PMID'] = stmt.evidence[0].pmid
logger.info('Obtaining text for disambiguation with refs: %s' %
refs)
content = get_text_content_from_text_refs(refs)
text = universal_extract_text(content, contains=agent_text)
if text:
return text
except Exception as e:
logger.info('Could not get text for disambiguation from DB.')
# If that doesn't work, we try PubMed next
if text is None:
from indra.literature import pubmed_client
pmid = stmt.evidence[0].pmid
if pmid:
logger.info('Obtaining abstract for disambiguation for PMID%s' %
pmid)
text = pubmed_client.get_abstract(pmid)
if text:
return text
# Finally, falling back on the evidence sentence
if text is None:
logger.info('Falling back on sentence-based disambiguation')
text = stmt.evidence[0].text
return text
return None | [
"Get text context for Deft disambiguation\n\n If the INDRA database is available, attempts to get the fulltext from\n which the statement was extracted. If the fulltext is not available, the\n abstract is returned. If the indra database is not available, uses the\n pubmed client to get the abstract. If no abstract can be found, falls back\n on returning the evidence text for the statement.\n\n Parameters\n ----------\n stmt : py:class:`indra.statements.Statement`\n Statement with agent we seek to disambiguate.\n\n agent_text : str\n Agent text that needs to be disambiguated\n\n Returns\n -------\n text : str\n Text for Feft disambiguation\n "
]
|
Please provide a description of the function:def update_agent_db_refs(self, agent, agent_text, do_rename=True):
map_db_refs = deepcopy(self.gm.get(agent_text))
self.standardize_agent_db_refs(agent, map_db_refs, do_rename) | [
"Update db_refs of agent using the grounding map\n\n If the grounding map is missing one of the HGNC symbol or Uniprot ID,\n attempts to reconstruct one from the other.\n\n Parameters\n ----------\n agent : :py:class:`indra.statements.Agent`\n The agent whose db_refs will be updated\n agent_text : str\n The agent_text to find a grounding for in the grounding map\n dictionary. Typically this will be agent.db_refs['TEXT'] but\n there may be situations where a different value should be used.\n do_rename: Optional[bool]\n If True, the Agent name is updated based on the mapped grounding.\n If do_rename is True the priority for setting the name is\n FamPlex ID, HGNC symbol, then the gene name\n from Uniprot. Default: True\n\n Raises\n ------\n ValueError\n If the the grounding map contains and HGNC symbol for\n agent_text but no HGNC ID can be found for it.\n ValueError\n If the grounding map contains both an HGNC symbol and a\n Uniprot ID, but the HGNC symbol and the gene name associated with\n the gene in Uniprot do not match or if there is no associated gene\n name in Uniprot.\n "
]
|
Please provide a description of the function:def map_agents_for_stmt(self, stmt, do_rename=True):
mapped_stmt = deepcopy(stmt)
# Iterate over the agents
# Update agents directly participating in the statement
agent_list = mapped_stmt.agent_list()
for idx, agent in enumerate(agent_list):
if agent is None:
continue
agent_txt = agent.db_refs.get('TEXT')
if agent_txt is None:
continue
new_agent, maps_to_none = self.map_agent(agent, do_rename)
# Check if a deft model exists for agent text
if self.use_deft and agent_txt in deft_disambiguators:
try:
run_deft_disambiguation(mapped_stmt, agent_list, idx,
new_agent, agent_txt)
except Exception as e:
logger.error('There was an error during Deft'
' disambiguation.')
logger.error(e)
if maps_to_none:
# Skip the entire statement if the agent maps to None in the
# grounding map
return None
# If the old agent had bound conditions, but the new agent does
# not, copy the bound conditions over
if new_agent is not None and len(new_agent.bound_conditions) == 0:
new_agent.bound_conditions = agent.bound_conditions
agent_list[idx] = new_agent
mapped_stmt.set_agent_list(agent_list)
# Update agents in the bound conditions
for agent in agent_list:
if agent is not None:
for bc in agent.bound_conditions:
bc.agent, maps_to_none = self.map_agent(bc.agent,
do_rename)
if maps_to_none:
# Skip the entire statement if the agent maps to None
# in the grounding map
return None
return mapped_stmt | [
"Return a new Statement whose agents have been grounding mapped.\n\n Parameters\n ----------\n stmt : :py:class:`indra.statements.Statement`\n The Statement whose agents need mapping.\n do_rename: Optional[bool]\n If True, the Agent name is updated based on the mapped grounding.\n If do_rename is True the priority for setting the name is\n FamPlex ID, HGNC symbol, then the gene name\n from Uniprot. Default: True\n\n Returns\n -------\n mapped_stmt : :py:class:`indra.statements.Statement`\n The mapped Statement.\n "
]
|
Please provide a description of the function:def map_agent(self, agent, do_rename):
agent_text = agent.db_refs.get('TEXT')
mapped_to_agent_json = self.agent_map.get(agent_text)
if mapped_to_agent_json:
mapped_to_agent = \
Agent._from_json(mapped_to_agent_json['agent'])
return mapped_to_agent, False
# Look this string up in the grounding map
# If not in the map, leave agent alone and continue
if agent_text in self.gm.keys():
map_db_refs = self.gm[agent_text]
else:
return agent, False
# If it's in the map but it maps to None, then filter out
# this statement by skipping it
if map_db_refs is None:
# Increase counter if this statement has not already
# been skipped via another agent
logger.debug("Skipping %s" % agent_text)
return None, True
# If it has a value that's not None, map it and add it
else:
# Otherwise, update the agent's db_refs field
self.update_agent_db_refs(agent, agent_text, do_rename)
return agent, False | [
"Return the given Agent with its grounding mapped.\n\n This function grounds a single agent. It returns the new Agent object\n (which might be a different object if we load a new agent state\n from json) or the same object otherwise.\n\n Parameters\n ----------\n agent : :py:class:`indra.statements.Agent`\n The Agent to map.\n do_rename: bool\n If True, the Agent name is updated based on the mapped grounding.\n If do_rename is True the priority for setting the name is\n FamPlex ID, HGNC symbol, then the gene name\n from Uniprot.\n\n Returns\n -------\n grounded_agent : :py:class:`indra.statements.Agent`\n The grounded Agent.\n maps_to_none : bool\n True if the Agent is in the grounding map and maps to None.\n "
]
|
Please provide a description of the function:def map_agents(self, stmts, do_rename=True):
# Make a copy of the stmts
mapped_stmts = []
num_skipped = 0
# Iterate over the statements
for stmt in stmts:
mapped_stmt = self.map_agents_for_stmt(stmt, do_rename)
# Check if we should skip the statement
if mapped_stmt is not None:
mapped_stmts.append(mapped_stmt)
else:
num_skipped += 1
logger.info('%s statements filtered out' % num_skipped)
return mapped_stmts | [
"Return a new list of statements whose agents have been mapped\n\n Parameters\n ----------\n stmts : list of :py:class:`indra.statements.Statement`\n The statements whose agents need mapping\n do_rename: Optional[bool]\n If True, the Agent name is updated based on the mapped grounding.\n If do_rename is True the priority for setting the name is\n FamPlex ID, HGNC symbol, then the gene name\n from Uniprot. Default: True\n\n Returns\n -------\n mapped_stmts : list of :py:class:`indra.statements.Statement`\n A list of statements given by mapping the agents from each\n statement in the input list\n "
]
|
Please provide a description of the function:def rename_agents(self, stmts):
# Make a copy of the stmts
mapped_stmts = deepcopy(stmts)
# Iterate over the statements
for _, stmt in enumerate(mapped_stmts):
# Iterate over the agents
for agent in stmt.agent_list():
if agent is None:
continue
# If there's a FamPlex ID, prefer that for the name
if agent.db_refs.get('FPLX'):
agent.name = agent.db_refs.get('FPLX')
# Take a HGNC name from Uniprot next
elif agent.db_refs.get('UP'):
# Try for the gene name
gene_name = uniprot_client.get_gene_name(
agent.db_refs.get('UP'),
web_fallback=False)
if gene_name:
agent.name = gene_name
hgnc_id = hgnc_client.get_hgnc_id(gene_name)
if hgnc_id:
agent.db_refs['HGNC'] = hgnc_id
# Take the text string
#if agent.db_refs.get('TEXT'):
# agent.name = agent.db_refs.get('TEXT')
# If this fails, then we continue with no change
# Fall back to the text string
#elif agent.db_refs.get('TEXT'):
# agent.name = agent.db_refs.get('TEXT')
return mapped_stmts | [
"Return a list of mapped statements with updated agent names.\n\n Creates a new list of statements without modifying the original list.\n\n The agents in a statement should be renamed if the grounding map has\n updated their db_refs. If an agent contains a FamPlex grounding, the\n FamPlex ID is used as a name. Otherwise if it contains a Uniprot ID,\n an attempt is made to find the associated HGNC gene name. If one can\n be found it is used as the agent name and the associated HGNC ID is\n added as an entry to the db_refs. If neither a FamPlex ID or HGNC name\n can be found, falls back to the original name.\n\n Parameters\n ----------\n stmts : list of :py:class:`indra.statements.Statement`\n List of statements whose Agents need their names updated.\n\n Returns\n -------\n mapped_stmts : list of :py:class:`indra.statements.Statement`\n A new list of Statements with updated Agent names\n "
]
|
Please provide a description of the function:def get_complexes(self, cplx_df):
# Group the agents for the complex
logger.info('Processing complexes...')
for cplx_id, this_cplx in cplx_df.groupby('CPLX_ID'):
agents = []
for hprd_id in this_cplx.HPRD_ID:
ag = self._make_agent(hprd_id)
if ag is not None:
agents.append(ag)
# Make sure we got some agents!
if not agents:
continue
# Get evidence info from first member of complex
row0 = this_cplx.iloc[0]
isoform_id = '%s_1' % row0.HPRD_ID
ev_list = self._get_evidence(row0.HPRD_ID, isoform_id, row0.PMIDS,
row0.EVIDENCE, 'interactions')
stmt = Complex(agents, evidence=ev_list)
self.statements.append(stmt) | [
"Generate Complex Statements from the HPRD protein complexes data.\n\n Parameters\n ----------\n cplx_df : pandas.DataFrame\n DataFrame loaded from the PROTEIN_COMPLEXES.txt file.\n "
]
|
Please provide a description of the function:def get_ptms(self, ptm_df):
logger.info('Processing PTMs...')
# Iterate over the rows of the dataframe
for ix, row in ptm_df.iterrows():
# Check the modification type; if we can't make an INDRA statement
# for it, then skip it
ptm_class = _ptm_map[row['MOD_TYPE']]
if ptm_class is None:
continue
# Use the Refseq protein ID for the substrate to make sure that
# we get the right Uniprot ID for the isoform
sub_ag = self._make_agent(row['HPRD_ID'],
refseq_id=row['REFSEQ_PROTEIN'])
# If we couldn't get the substrate, skip the statement
if sub_ag is None:
continue
enz_id = _nan_to_none(row['ENZ_HPRD_ID'])
enz_ag = self._make_agent(enz_id)
res = _nan_to_none(row['RESIDUE'])
pos = _nan_to_none(row['POSITION'])
if pos is not None and ';' in pos:
pos, dash = pos.split(';')
assert dash == '-'
# As a fallback for later site mapping, we also get the protein
# sequence information in case there was a problem with the
# RefSeq->Uniprot mapping
assert res
assert pos
motif_dict = self._get_seq_motif(row['REFSEQ_PROTEIN'], res, pos)
# Get evidence
ev_list = self._get_evidence(
row['HPRD_ID'], row['HPRD_ISOFORM'], row['PMIDS'],
row['EVIDENCE'], 'ptms', motif_dict)
stmt = ptm_class(enz_ag, sub_ag, res, pos, evidence=ev_list)
self.statements.append(stmt) | [
"Generate Modification statements from the HPRD PTM data.\n\n Parameters\n ----------\n ptm_df : pandas.DataFrame\n DataFrame loaded from the POST_TRANSLATIONAL_MODIFICATIONS.txt file.\n "
]
|
Please provide a description of the function:def get_ppis(self, ppi_df):
logger.info('Processing PPIs...')
for ix, row in ppi_df.iterrows():
agA = self._make_agent(row['HPRD_ID_A'])
agB = self._make_agent(row['HPRD_ID_B'])
# If don't get valid agents for both, skip this PPI
if agA is None or agB is None:
continue
isoform_id = '%s_1' % row['HPRD_ID_A']
ev_list = self._get_evidence(
row['HPRD_ID_A'], isoform_id, row['PMIDS'],
row['EVIDENCE'], 'interactions')
stmt = Complex([agA, agB], evidence=ev_list)
self.statements.append(stmt) | [
"Generate Complex Statements from the HPRD PPI data.\n\n Parameters\n ----------\n ppi_df : pandas.DataFrame\n DataFrame loaded from the BINARY_PROTEIN_PROTEIN_INTERACTIONS.txt\n file.\n "
]
|
Please provide a description of the function:def _build_verb_statement_mapping():
path_this = os.path.dirname(os.path.abspath(__file__))
map_path = os.path.join(path_this, 'isi_verb_to_indra_statement_type.tsv')
with open(map_path, 'r') as f:
first_line = True
verb_to_statement_type = {}
for line in f:
if not first_line:
line = line[:-1]
tokens = line.split('\t')
if len(tokens) == 2 and len(tokens[1]) > 0:
verb = tokens[0]
s_type = tokens[1]
try:
statement_class = getattr(ist, s_type)
verb_to_statement_type[verb] = statement_class
except Exception:
pass
else:
first_line = False
return verb_to_statement_type | [
"Build the mapping between ISI verb strings and INDRA statement classes.\n\n Looks up the INDRA statement class name, if any, in a resource file,\n and resolves this class name to a class.\n\n Returns\n -------\n verb_to_statement_type : dict\n Dictionary mapping verb name to an INDRA statment class\n "
]
|
Please provide a description of the function:def get_statements(self):
for k, v in self.reader_output.items():
for interaction in v['interactions']:
self._process_interaction(k, interaction, v['text'], self.pmid,
self.extra_annotations) | [
"Process reader output to produce INDRA Statements."
]
|
Please provide a description of the function:def _process_interaction(self, source_id, interaction, text, pmid,
extra_annotations):
verb = interaction[0].lower()
subj = interaction[-2]
obj = interaction[-1]
# Make ungrounded agent objects for the subject and object
# Grounding will happen after all statements are extracted in __init__
subj = self._make_agent(subj)
obj = self._make_agent(obj)
# Make an evidence object
annotations = deepcopy(extra_annotations)
if 'interaction' in extra_annotations:
logger.warning("'interaction' key of extra_annotations ignored" +
" since this is reserved for storing the raw ISI " +
"input.")
annotations['source_id'] = source_id
annotations['interaction'] = interaction
ev = ist.Evidence(source_api='isi',
pmid=pmid,
text=text.rstrip(),
annotations=annotations)
# For binding time interactions, it is said that a catayst might be
# specified. We don't use this for now, but extract in case we want
# to in the future
cataylst_specified = False
if len(interaction) == 4:
catalyst = interaction[1]
if catalyst is not None:
cataylst_specified = True
self.verbs.add(verb)
statement = None
if verb in verb_to_statement_type:
statement_class = verb_to_statement_type[verb]
if statement_class == ist.Complex:
statement = ist.Complex([subj, obj], evidence=ev)
else:
statement = statement_class(subj, obj, evidence=ev)
if statement is not None:
# For Complex statements, the ISI reader produces two events:
# binds(A, B) and binds(B, A)
# We want only one Complex statement for each sentence, so check
# to see if we already have a Complex for this source_id with the
# same members
already_have = False
if type(statement) == ist.Complex:
for old_s in self.statements:
old_id = statement.evidence[0].source_id
new_id = old_s.evidence[0].source_id
if type(old_s) == ist.Complex and old_id == new_id:
old_statement_members = \
[m.db_refs['TEXT'] for m in old_s.members]
old_statement_members = sorted(old_statement_members)
new_statement_members = [m.db_refs['TEXT']
for m in statement.members]
new_statement_members = sorted(new_statement_members)
if old_statement_members == new_statement_members:
already_have = True
break
if not already_have:
self.statements.append(statement) | [
"Process an interaction JSON tuple from the ISI output, and adds up\n to one statement to the list of extracted statements.\n\n Parameters\n ----------\n source_id : str\n the JSON key corresponding to the sentence in the ISI output\n interaction: the JSON list with subject/verb/object information\n about the event in the ISI output\n text : str\n the text of the sentence\n pmid : str\n the PMID of the article from which the information was extracted\n extra_annotations : dict\n Additional annotations to add to the statement's evidence,\n potentially containing metadata about the source. Annotations\n with the key \"interaction\" will be overridden by the JSON\n interaction tuple from the ISI output\n "
]
|
Please provide a description of the function:def make_annotation(self):
annotation = dict()
# Put all properties of the action object into the annotation
for item in dir(self):
if len(item) > 0 and item[0] != '_' and \
not inspect.ismethod(getattr(self, item)):
annotation[item] = getattr(self, item)
return annotation | [
"Returns a dictionary with all properties of the action mention."
]
|
Please provide a description of the function:def _match_to_array(m):
return [_cast_biopax_element(m.get(i)) for i in range(m.varSize())] | [
" Returns an array consisting of the elements obtained from a pattern\n search cast into their appropriate classes. "
]
|
Please provide a description of the function:def _is_complex(pe):
val = isinstance(pe, _bp('Complex')) or \
isinstance(pe, _bpimpl('Complex'))
return val | [
"Return True if the physical entity is a complex"
]
|
Please provide a description of the function:def _is_protein(pe):
val = isinstance(pe, _bp('Protein')) or \
isinstance(pe, _bpimpl('Protein')) or \
isinstance(pe, _bp('ProteinReference')) or \
isinstance(pe, _bpimpl('ProteinReference'))
return val | [
"Return True if the element is a protein"
]
|
Please provide a description of the function:def _is_rna(pe):
val = isinstance(pe, _bp('Rna')) or isinstance(pe, _bpimpl('Rna'))
return val | [
"Return True if the element is an RNA"
]
|
Please provide a description of the function:def _is_small_molecule(pe):
val = isinstance(pe, _bp('SmallMolecule')) or \
isinstance(pe, _bpimpl('SmallMolecule')) or \
isinstance(pe, _bp('SmallMoleculeReference')) or \
isinstance(pe, _bpimpl('SmallMoleculeReference'))
return val | [
"Return True if the element is a small molecule"
]
|
Please provide a description of the function:def _is_physical_entity(pe):
val = isinstance(pe, _bp('PhysicalEntity')) or \
isinstance(pe, _bpimpl('PhysicalEntity'))
return val | [
"Return True if the element is a physical entity"
]
|
Please provide a description of the function:def _is_modification_or_activity(feature):
if not (isinstance(feature, _bp('ModificationFeature')) or \
isinstance(feature, _bpimpl('ModificationFeature'))):
return None
mf_type = feature.getModificationType()
if mf_type is None:
return None
mf_type_terms = mf_type.getTerm().toArray()
for term in mf_type_terms:
if term in ('residue modification, active',
'residue modification, inactive',
'active', 'inactive'):
return 'activity'
return 'modification' | [
"Return True if the feature is a modification"
]
|
Please provide a description of the function:def _is_reference(bpe):
if isinstance(bpe, _bp('ProteinReference')) or \
isinstance(bpe, _bpimpl('ProteinReference')) or \
isinstance(bpe, _bp('SmallMoleculeReference')) or \
isinstance(bpe, _bpimpl('SmallMoleculeReference')) or \
isinstance(bpe, _bp('RnaReference')) or \
isinstance(bpe, _bpimpl('RnaReference')) or \
isinstance(bpe, _bp('EntityReference')) or \
isinstance(bpe, _bpimpl('EntityReference')):
return True
else:
return False | [
"Return True if the element is an entity reference."
]
|
Please provide a description of the function:def _is_entity(bpe):
if isinstance(bpe, _bp('Protein')) or \
isinstance(bpe, _bpimpl('Protein')) or \
isinstance(bpe, _bp('SmallMolecule')) or \
isinstance(bpe, _bpimpl('SmallMolecule')) or \
isinstance(bpe, _bp('Complex')) or \
isinstance(bpe, _bpimpl('Complex')) or \
isinstance(bpe, _bp('Rna')) or \
isinstance(bpe, _bpimpl('Rna')) or \
isinstance(bpe, _bp('RnaRegion')) or \
isinstance(bpe, _bpimpl('RnaRegion')) or \
isinstance(bpe, _bp('DnaRegion')) or \
isinstance(bpe, _bpimpl('DnaRegion')) or \
isinstance(bpe, _bp('PhysicalEntity')) or \
isinstance(bpe, _bpimpl('PhysicalEntity')):
return True
else:
return False | [
"Return True if the element is a physical entity."
]
|
Please provide a description of the function:def _is_catalysis(bpe):
if isinstance(bpe, _bp('Catalysis')) or \
isinstance(bpe, _bpimpl('Catalysis')):
return True
else:
return False | [
"Return True if the element is Catalysis."
]
|
Please provide a description of the function:def print_statements(self):
for i, stmt in enumerate(self.statements):
print("%s: %s" % (i, stmt)) | [
"Print all INDRA Statements collected by the processors."
]
|
Please provide a description of the function:def save_model(self, file_name=None):
if file_name is None:
logger.error('Missing file name')
return
pcc.model_to_owl(self.model, file_name) | [
"Save the BioPAX model object in an OWL file.\n\n Parameters\n ----------\n file_name : Optional[str]\n The name of the OWL file to save the model in.\n "
]
|
Please provide a description of the function:def eliminate_exact_duplicates(self):
# Here we use the deep hash of each Statement, and by making a dict,
# we effectively keep only one Statement with a given deep hash
self.statements = list({stmt.get_hash(shallow=False, refresh=True): stmt
for stmt in self.statements}.values()) | [
"Eliminate Statements that were extracted multiple times.\n\n Due to the way the patterns are implemented, they can sometimes yield\n the same Statement information multiple times, in which case,\n we end up with redundant Statements that aren't from independent\n underlying entries. To avoid this, here, we filter out such\n duplicates.\n "
]
|
Please provide a description of the function:def get_complexes(self):
for obj in self.model.getObjects().toArray():
bpe = _cast_biopax_element(obj)
if not _is_complex(bpe):
continue
ev = self._get_evidence(bpe)
members = self._get_complex_members(bpe)
if members is not None:
if len(members) > 10:
logger.debug('Skipping complex with more than 10 members.')
continue
complexes = _get_combinations(members)
for c in complexes:
self.statements.append(decode_obj(Complex(c, ev),
encoding='utf-8')) | [
"Extract INDRA Complex Statements from the BioPAX model.\n\n This method searches for org.biopax.paxtools.model.level3.Complex\n objects which represent molecular complexes. It doesn't reuse\n BioPAX Pattern's org.biopax.paxtools.pattern.PatternBox.inComplexWith\n query since that retrieves pairs of complex members rather than\n the full complex.\n "
]
|
Please provide a description of the function:def get_modifications(self):
for modtype, modclass in modtype_to_modclass.items():
# TODO: we could possibly try to also extract generic
# modifications here
if modtype == 'modification':
continue
stmts = self._get_generic_modification(modclass)
self.statements += stmts | [
"Extract INDRA Modification Statements from the BioPAX model.\n\n To extract Modifications, this method reuses the structure of\n BioPAX Pattern's\n org.biopax.paxtools.pattern.PatternBox.constrolsStateChange pattern\n with additional constraints to specify the type of state change\n occurring (phosphorylation, deubiquitination, etc.).\n "
]
|
Please provide a description of the function:def get_activity_modification(self):
mod_filter = 'residue modification, active'
for is_active in [True, False]:
p = self._construct_modification_pattern()
rel = mcct.GAIN if is_active else mcct.LOSS
p.add(mcc(rel, mod_filter),
"input simple PE", "output simple PE")
s = _bpp('Searcher')
res = s.searchPlain(self.model, p)
res_array = [_match_to_array(m) for m in res.toArray()]
for r in res_array:
reaction = r[p.indexOf('Conversion')]
activity = 'activity'
input_spe = r[p.indexOf('input simple PE')]
output_spe = r[p.indexOf('output simple PE')]
# Get the modifications
mod_in = \
BiopaxProcessor._get_entity_mods(input_spe)
mod_out = \
BiopaxProcessor._get_entity_mods(output_spe)
mod_shared = _get_mod_intersection(mod_in, mod_out)
gained_mods = _get_mod_difference(mod_out, mod_in)
# Here we get the evidence for the BiochemicalReaction
ev = self._get_evidence(reaction)
agents = self._get_agents_from_entity(output_spe)
for agent in _listify(agents):
static_mods = _get_mod_difference(agent.mods,
gained_mods)
# NOTE: with the ActiveForm representation we cannot
# separate static_mods and gained_mods. We assume here
# that the static_mods are inconsequential and therefore
# are not mentioned as an Agent condition, following
# don't care don't write semantics. Therefore only the
# gained_mods are listed in the ActiveForm as Agent
# conditions.
if gained_mods:
agent.mods = gained_mods
stmt = ActiveForm(agent, activity, is_active,
evidence=ev)
self.statements.append(decode_obj(stmt,
encoding='utf-8')) | [
"Extract INDRA ActiveForm statements from the BioPAX model.\n\n This method extracts ActiveForm Statements that are due to\n protein modifications. This method reuses the structure of\n BioPAX Pattern's\n org.biopax.paxtools.pattern.PatternBox.constrolsStateChange pattern\n with additional constraints to specify the gain or loss of a\n modification occurring (phosphorylation, deubiquitination, etc.)\n and the gain or loss of activity due to the modification state\n change.\n "
]
|
Please provide a description of the function:def get_regulate_activities(self):
mcc = _bpp('constraint.ModificationChangeConstraint')
mcct = _bpp('constraint.ModificationChangeConstraint$Type')
mod_filter = 'residue modification, active'
# Start with a generic modification pattern
p = BiopaxProcessor._construct_modification_pattern()
stmts = []
for act_class, gain_loss in zip([Activation, Inhibition],
[mcct.GAIN, mcct.LOSS]):
p.add(mcc(gain_loss, mod_filter),
"input simple PE", "output simple PE")
s = _bpp('Searcher')
res = s.searchPlain(self.model, p)
res_array = [_match_to_array(m) for m in res.toArray()]
for r in res_array:
controller_pe = r[p.indexOf('controller PE')]
input_pe = r[p.indexOf('input PE')]
input_spe = r[p.indexOf('input simple PE')]
output_spe = r[p.indexOf('output simple PE')]
reaction = r[p.indexOf('Conversion')]
control = r[p.indexOf('Control')]
if not _is_catalysis(control):
continue
cat_dir = control.getCatalysisDirection()
if cat_dir is not None and cat_dir.name() != 'LEFT_TO_RIGHT':
logger.debug('Unexpected catalysis direction: %s.' % \
control.getCatalysisDirection())
continue
subjs = BiopaxProcessor._get_primary_controller(controller_pe)
if not subjs:
continue
'''
if _is_complex(input_pe):
# TODO: It is possible to find which member of the complex
# is actually activated. That member will be the substrate
# and all other members of the complex will be bound to it.
logger.info('Cannot handle complex subjects.')
continue
'''
objs = BiopaxProcessor._get_agents_from_entity(input_spe,
expand_pe=False)
ev = self._get_evidence(control)
for subj, obj in itertools.product(_listify(subjs),
_listify(objs)):
# Get the modifications
mod_in = \
BiopaxProcessor._get_entity_mods(input_spe)
mod_out = \
BiopaxProcessor._get_entity_mods(output_spe)
# We assume if modifications change then this is not really
# a pure activation event
gained_mods = _get_mod_difference(mod_out, mod_in)
lost_mods = _get_mod_difference(mod_in, mod_out)
if gained_mods or lost_mods:
continue
stmt = act_class(subj, obj, 'activity', evidence=ev)
self.statements.append(decode_obj(stmt, encoding='utf-8')) | [
"Get Activation/Inhibition INDRA Statements from the BioPAX model.\n\n This method extracts Activation/Inhibition Statements and reuses the\n structure of BioPAX Pattern's\n org.biopax.paxtools.pattern.PatternBox.constrolsStateChange pattern\n with additional constraints to specify the gain or loss of\n activity state but assuring that the activity change is not due to\n a modification state change (which are extracted by get_modifications\n and get_activity_modification).\n "
]
|
Please provide a description of the function:def get_regulate_amounts(self):
p = pb.controlsExpressionWithTemplateReac()
s = _bpp('Searcher')
res = s.searchPlain(self.model, p)
res_array = [_match_to_array(m) for m in res.toArray()]
stmts = []
for res in res_array:
# FIXME: for some reason labels are not accessible
# for these queries. It would be more reliable
# to get results by label instead of index.
'''
controller_er = res[p.indexOf('controller ER')]
generic_controller_er = res[p.indexOf('generic controller ER')]
controller_simple_pe = res[p.indexOf('controller simple PE')]
controller_pe = res[p.indexOf('controller PE')]
control = res[p.indexOf('Control')]
conversion = res[p.indexOf('Conversion')]
input_pe = res[p.indexOf('input PE')]
input_simple_pe = res[p.indexOf('input simple PE')]
changed_generic_er = res[p.indexOf('changed generic ER')]
output_pe = res[p.indexOf('output PE')]
output_simple_pe = res[p.indexOf('output simple PE')]
changed_er = res[p.indexOf('changed ER')]
'''
# TODO: here, res[3] is the complex physical entity
# for instance http://pathwaycommons.org/pc2/
# Complex_43c6b8330562c1b411d21e9d1185bae9
# consists of 3 components: JUN, FOS and NFAT
# where NFAT further contains 3 member physical entities.
#
# However, res[2] iterates over all 5 member physical entities
# of the complex which doesn't represent the underlying
# structure faithfully. It would be better to use res[3]
# (the complex itself) and look at components and then
# members. However, then, it would not be clear how to
# construct an INDRA Agent for the controller.
controller = self._get_agents_from_entity(res[2])
controlled_pe = res[6]
controlled = self._get_agents_from_entity(controlled_pe)
conversion = res[5]
direction = conversion.getTemplateDirection()
if direction is not None:
direction = direction.name()
if direction != 'FORWARD':
logger.warning('Unhandled conversion direction %s' %
direction)
continue
# Sometimes interaction type is annotated as
# term=='TRANSCRIPTION'. Other times this is not
# annotated.
int_type = conversion.getInteractionType().toArray()
if int_type:
for it in int_type:
for term in it.getTerm().toArray():
pass
control = res[4]
control_type = control.getControlType()
if control_type:
control_type = control_type.name()
ev = self._get_evidence(control)
for subj, obj in itertools.product(_listify(controller),
_listify(controlled)):
subj_act = ActivityCondition('transcription', True)
subj.activity = subj_act
if control_type == 'ACTIVATION':
st = IncreaseAmount(subj, obj, evidence=ev)
elif control_type == 'INHIBITION':
st = DecreaseAmount(subj, obj, evidence=ev)
else:
logger.warning('Unhandled control type %s' % control_type)
continue
st_dec = decode_obj(st, encoding='utf-8')
self.statements.append(st_dec) | [
"Extract INDRA RegulateAmount Statements from the BioPAX model.\n\n This method extracts IncreaseAmount/DecreaseAmount Statements from\n the BioPAX model. It fully reuses BioPAX Pattern's\n org.biopax.paxtools.pattern.PatternBox.controlsExpressionWithTemplateReac\n pattern to find TemplateReactions which control the expression of\n a protein.\n "
]
|
Please provide a description of the function:def get_conversions(self):
# NOTE: This pattern gets all reactions in which a protein is the
# controller and chemicals are converted. But with this pattern only
# a single chemical is extracted from each side. This can be misleading
# since we want to capture all inputs and all outputs of the
# conversion. So we need to step back to the conversion itself and
# enumerate all inputs/outputs, make sure they constitute the kind
# of conversion we can capture here and then extract as a Conversion
# Statement. Another issue here is that the same reaction will be
# extracted multiple times if there is more then one input or output.
# Therefore we need to cache the ID of the reactions that have already
# been handled.
p = _bpp('Pattern')(_bpimpl('PhysicalEntity')().getModelInterface(),
'controller PE')
# Getting the control itself
p.add(cb.peToControl(), "controller PE", "Control")
# Make sure the controller is a protein
# TODO: possibly allow Complex too
p.add(tp(_bpimpl('Protein')().getModelInterface()), "controller PE")
# Link the control to the conversion that it controls
p.add(cb.controlToConv(), "Control", "Conversion")
# Make sure this is a BiochemicalRection (as opposed to, for instance,
# ComplexAssembly)
p.add(tp(_bpimpl('BiochemicalReaction')().getModelInterface()),
"Conversion")
# The controller shouldn't be a participant of the conversion
p.add(_bpp('constraint.NOT')(cb.participant()),
"Conversion", "controller PE")
# Get the input participant of the conversion
p.add(pt(rt.INPUT, True), "Control", "Conversion", "input PE")
# Link to the other side of the conversion
p.add(cs(cst.OTHER_SIDE), "input PE", "Conversion", "output PE")
# Make sure the two sides are not the same
p.add(_bpp('constraint.Equality')(False), "input PE", "output PE")
# Make sure the input/output is a chemical
p.add(tp(_bpimpl('SmallMolecule')().getModelInterface()), "input PE")
p.add(tp(_bpimpl('SmallMolecule')().getModelInterface()), "output PE")
s = _bpp('Searcher')
res = s.searchPlain(self.model, p)
res_array = [_match_to_array(m) for m in res.toArray()]
stmts = []
reaction_extracted = set()
for r in res_array:
controller_pe = r[p.indexOf('controller PE')]
reaction = r[p.indexOf('Conversion')]
control = r[p.indexOf('Control')]
input_pe = r[p.indexOf('input PE')]
output_pe = r[p.indexOf('output PE')]
if control.getUri() in reaction_extracted:
continue
# Get controller
subj_list = self._get_agents_from_entity(controller_pe)
# Get inputs and outputs
left = reaction.getLeft().toArray()
right = reaction.getRight().toArray()
# Skip this if not all participants are chemicals
if any([not _is_small_molecule(e) for e in left]):
continue
if any([not _is_small_molecule(e) for e in right]):
continue
obj_left = []
obj_right = []
for participant in left:
agent = self._get_agents_from_entity(participant)
if isinstance(agent, list):
obj_left += agent
else:
obj_left.append(agent)
for participant in right:
agent = self._get_agents_from_entity(participant)
if isinstance(agent, list):
obj_right += agent
else:
obj_right.append(agent)
ev = self._get_evidence(control)
for subj in _listify(subj_list):
st = Conversion(subj, obj_left, obj_right, evidence=ev)
st_dec = decode_obj(st, encoding='utf-8')
self.statements.append(st_dec)
reaction_extracted.add(control.getUri()) | [
"Extract Conversion INDRA Statements from the BioPAX model.\n\n This method uses a custom BioPAX Pattern\n (one that is not implemented PatternBox) to query for\n BiochemicalReactions whose left and right hand sides are collections\n of SmallMolecules. This pattern thereby extracts metabolic\n conversions as well as signaling processes via small molecules\n (e.g. lipid phosphorylation or cleavage).\n "
]
|
Please provide a description of the function:def get_gef(self):
p = self._gef_gap_base()
s = _bpp('Searcher')
res = s.searchPlain(self.model, p)
res_array = [_match_to_array(m) for m in res.toArray()]
for r in res_array:
controller_pe = r[p.indexOf('controller PE')]
input_pe = r[p.indexOf('input PE')]
input_spe = r[p.indexOf('input simple PE')]
output_pe = r[p.indexOf('output PE')]
output_spe = r[p.indexOf('output simple PE')]
reaction = r[p.indexOf('Conversion')]
control = r[p.indexOf('Control')]
# Make sure the GEF is not a complex
# TODO: it could be possible to extract certain complexes here, for
# instance ones that only have a single protein
if _is_complex(controller_pe):
continue
members_in = self._get_complex_members(input_pe)
members_out = self._get_complex_members(output_pe)
if not (members_in and members_out):
continue
# Make sure the outgoing complex has exactly 2 members
# TODO: by finding matching proteins on either side, in principle
# it would be possible to find Gef relationships in complexes
# with more members
if len(members_out) != 2:
continue
# Make sure complex starts with GDP that becomes GTP
gdp_in = False
for member in members_in:
if isinstance(member, Agent) and member.name == 'GDP':
gdp_in = True
gtp_out = False
for member in members_out:
if isinstance(member, Agent) and member.name == 'GTP':
gtp_out = True
if not (gdp_in and gtp_out):
continue
ras_list = self._get_agents_from_entity(input_spe)
gef_list = self._get_agents_from_entity(controller_pe)
ev = self._get_evidence(control)
for gef, ras in itertools.product(_listify(gef_list),
_listify(ras_list)):
st = Gef(gef, ras, evidence=ev)
st_dec = decode_obj(st, encoding='utf-8')
self.statements.append(st_dec) | [
"Extract Gef INDRA Statements from the BioPAX model.\n\n This method uses a custom BioPAX Pattern\n (one that is not implemented PatternBox) to query for controlled\n BiochemicalReactions in which the same protein is in complex with\n GDP on the left hand side and in complex with GTP on the\n right hand side. This implies that the controller is a GEF for the\n GDP/GTP-bound protein.\n "
]
|
Please provide a description of the function:def get_gap(self):
p = self._gef_gap_base()
s = _bpp('Searcher')
res = s.searchPlain(self.model, p)
res_array = [_match_to_array(m) for m in res.toArray()]
for r in res_array:
controller_pe = r[p.indexOf('controller PE')]
input_pe = r[p.indexOf('input PE')]
input_spe = r[p.indexOf('input simple PE')]
output_pe = r[p.indexOf('output PE')]
output_spe = r[p.indexOf('output simple PE')]
reaction = r[p.indexOf('Conversion')]
control = r[p.indexOf('Control')]
# Make sure the GAP is not a complex
# TODO: it could be possible to extract certain complexes here, for
# instance ones that only have a single protein
if _is_complex(controller_pe):
continue
members_in = self._get_complex_members(input_pe)
members_out = self._get_complex_members(output_pe)
if not (members_in and members_out):
continue
# Make sure the outgoing complex has exactly 2 members
# TODO: by finding matching proteins on either side, in principle
# it would be possible to find Gap relationships in complexes
# with more members
if len(members_out) != 2:
continue
# Make sure complex starts with GDP that becomes GTP
gtp_in = False
for member in members_in:
if isinstance(member, Agent) and member.name == 'GTP':
gtp_in = True
gdp_out = False
for member in members_out:
if isinstance(member, Agent) and member.name == 'GDP':
gdp_out = True
if not (gtp_in and gdp_out):
continue
ras_list = self._get_agents_from_entity(input_spe)
gap_list = self._get_agents_from_entity(controller_pe)
ev = self._get_evidence(control)
for gap, ras in itertools.product(_listify(gap_list),
_listify(ras_list)):
st = Gap(gap, ras, evidence=ev)
st_dec = decode_obj(st, encoding='utf-8')
self.statements.append(st_dec) | [
"Extract Gap INDRA Statements from the BioPAX model.\n\n This method uses a custom BioPAX Pattern\n (one that is not implemented PatternBox) to query for controlled\n BiochemicalReactions in which the same protein is in complex with\n GTP on the left hand side and in complex with GDP on the\n right hand side. This implies that the controller is a GAP for the\n GDP/GTP-bound protein.\n "
]
|
Please provide a description of the function:def _get_entity_mods(bpe):
if _is_entity(bpe):
features = bpe.getFeature().toArray()
else:
features = bpe.getEntityFeature().toArray()
mods = []
for feature in features:
if not _is_modification(feature):
continue
mc = BiopaxProcessor._extract_mod_from_feature(feature)
if mc is not None:
mods.append(mc)
return mods | [
"Get all the modifications of an entity in INDRA format"
]
|
Please provide a description of the function:def _get_generic_modification(self, mod_class):
mod_type = modclass_to_modtype[mod_class]
if issubclass(mod_class, RemoveModification):
mod_gain_const = mcct.LOSS
mod_type = modtype_to_inverse[mod_type]
else:
mod_gain_const = mcct.GAIN
mod_filter = mod_type[:5]
# Start with a generic modification pattern
p = BiopaxProcessor._construct_modification_pattern()
p.add(mcc(mod_gain_const, mod_filter),
"input simple PE", "output simple PE")
s = _bpp('Searcher')
res = s.searchPlain(self.model, p)
res_array = [_match_to_array(m) for m in res.toArray()]
stmts = []
for r in res_array:
controller_pe = r[p.indexOf('controller PE')]
input_pe = r[p.indexOf('input PE')]
input_spe = r[p.indexOf('input simple PE')]
output_spe = r[p.indexOf('output simple PE')]
reaction = r[p.indexOf('Conversion')]
control = r[p.indexOf('Control')]
if not _is_catalysis(control):
continue
cat_dir = control.getCatalysisDirection()
if cat_dir is not None and cat_dir.name() != 'LEFT_TO_RIGHT':
logger.debug('Unexpected catalysis direction: %s.' % \
control.getCatalysisDirection())
continue
enzs = BiopaxProcessor._get_primary_controller(controller_pe)
if not enzs:
continue
'''
if _is_complex(input_pe):
sub_members_in = self._get_complex_members(input_pe)
sub_members_out = self._get_complex_members(output_pe)
# TODO: It is possible to find which member of the complex is
# actually modified. That member will be the substrate and
# all other members of the complex will be bound to it.
logger.info('Cannot handle complex substrates.')
continue
'''
subs = BiopaxProcessor._get_agents_from_entity(input_spe,
expand_pe=False)
ev = self._get_evidence(control)
for enz, sub in itertools.product(_listify(enzs), _listify(subs)):
# Get the modifications
mod_in = \
BiopaxProcessor._get_entity_mods(input_spe)
mod_out = \
BiopaxProcessor._get_entity_mods(output_spe)
sub.mods = _get_mod_intersection(mod_in, mod_out)
if issubclass(mod_class, AddModification):
gained_mods = _get_mod_difference(mod_out, mod_in)
else:
gained_mods = _get_mod_difference(mod_in, mod_out)
for mod in gained_mods:
# Is it guaranteed that these are all modifications
# of the type we are extracting?
if mod.mod_type not in (mod_type,
modtype_to_inverse[mod_type]):
continue
stmt = mod_class(enz, sub, mod.residue, mod.position,
evidence=ev)
stmts.append(decode_obj(stmt, encoding='utf-8'))
return stmts | [
"Get all modification reactions given a Modification class."
]
|
Please provide a description of the function:def _construct_modification_pattern():
# The following constraints were pieced together based on the
# following two higher level constrains: pb.controlsStateChange(),
# pb.controlsPhosphorylation().
p = _bpp('Pattern')(_bpimpl('PhysicalEntity')().getModelInterface(),
'controller PE')
# Getting the control itself
p.add(cb.peToControl(), "controller PE", "Control")
# Link the control to the conversion that it controls
p.add(cb.controlToConv(), "Control", "Conversion")
# The controller shouldn't be a participant of the conversion
p.add(_bpp('constraint.NOT')(cb.participant()),
"Conversion", "controller PE")
# Get the input participant of the conversion
p.add(pt(rt.INPUT, True), "Control", "Conversion", "input PE")
# Get the specific PhysicalEntity
p.add(cb.linkToSpecific(), "input PE", "input simple PE")
# Link to ER
p.add(cb.peToER(), "input simple PE", "input simple ER")
# Make sure the participant is a protein
p.add(tp(_bpimpl('Protein')().getModelInterface()), "input simple PE")
# Link to the other side of the conversion
p.add(cs(cst.OTHER_SIDE), "input PE", "Conversion", "output PE")
# Make sure the two sides are not the same
p.add(_bpp('constraint.Equality')(False), "input PE", "output PE")
# Get the specific PhysicalEntity
p.add(cb.linkToSpecific(), "output PE", "output simple PE")
# Link to ER
p.add(cb.peToER(), "output simple PE", "output simple ER")
p.add(_bpp('constraint.Equality')(True), "input simple ER",
"output simple ER")
# Make sure the output is a Protein
p.add(tp(_bpimpl('Protein')().getModelInterface()), "output simple PE")
p.add(_bpp('constraint.NOT')(cb.linkToSpecific()),
"input PE", "output simple PE")
p.add(_bpp('constraint.NOT')(cb.linkToSpecific()),
"output PE", "input simple PE")
return p | [
"Construct the BioPAX pattern to extract modification reactions."
]
|
Please provide a description of the function:def _extract_mod_from_feature(mf):
# ModificationFeature / SequenceModificationVocabulary
mf_type = mf.getModificationType()
if mf_type is None:
return None
mf_type_terms = mf_type.getTerm().toArray()
known_mf_type = None
for t in mf_type_terms:
if t.startswith('MOD_RES '):
t = t[8:]
mf_type_indra = _mftype_dict.get(t)
if mf_type_indra is not None:
known_mf_type = mf_type_indra
break
if not known_mf_type:
logger.debug('Skipping modification with unknown terms: %s' %
', '.join(mf_type_terms))
return None
mod_type, residue = known_mf_type
# getFeatureLocation returns SequenceLocation, which is the
# generic parent class of SequenceSite and SequenceInterval.
# Here we need to cast to SequenceSite in order to get to
# the sequence position.
mf_pos = mf.getFeatureLocation()
if mf_pos is not None:
# If it is not a SequenceSite we can't handle it
if not mf_pos.modelInterface.getName() == \
'org.biopax.paxtools.model.level3.SequenceSite':
mod_pos = None
else:
mf_site = cast(_bp('SequenceSite'), mf_pos)
mf_pos_status = mf_site.getPositionStatus()
if mf_pos_status is None:
mod_pos = None
elif mf_pos_status and mf_pos_status.toString() != 'EQUAL':
logger.debug('Modification site position is %s' %
mf_pos_status.toString())
else:
mod_pos = mf_site.getSequencePosition()
mod_pos = '%s' % mod_pos
else:
mod_pos = None
mc = ModCondition(mod_type, residue, mod_pos, True)
return mc | [
"Extract the type of modification and the position from\n a ModificationFeature object in the INDRA format."
]
|
Please provide a description of the function:def _get_entref(bpe):
if not _is_reference(bpe):
try:
er = bpe.getEntityReference()
except AttributeError:
return None
return er
else:
return bpe | [
"Returns the entity reference of an entity if it exists or\n return the entity reference that was passed in as argument."
]
|
Please provide a description of the function:def _stmt_location_to_agents(stmt, location):
if location is None:
return
agents = stmt.agent_list()
for a in agents:
if a is not None:
a.location = location | [
"Apply an event location to the Agents in the corresponding Statement.\n\n If a Statement is in a given location we represent that by requiring all\n Agents in the Statement to be in that location.\n "
]
|
Please provide a description of the function:def _get_db_refs(term):
db_refs = {}
# Here we extract the text name of the Agent
# There are two relevant tags to consider here.
# The <text> tag typically contains a larger phrase surrounding the
# term but it contains the term in a raw, non-canonicalized form.
# The <name> tag only contains the name of the entity but it is
# canonicalized. For instance, MAP2K1 appears as MAP-2-K-1.
agent_text_tag = term.find('name')
if agent_text_tag is not None:
db_refs['TEXT'] = agent_text_tag.text
# If we have some drum-terms, the matched-name of the first
# drum-term (e.g. "MAP2K1") is a better value for TEXT than
# the name of the TERM (e.g. "MAP-2-K-1") so we put that in there
drum_terms = term.findall('drum-terms/drum-term')
if drum_terms:
matched_name = drum_terms[0].attrib.get('matched-name')
if matched_name:
db_refs['TEXT'] = matched_name
# We make a list of scored grounding terms from the DRUM terms
grounding_terms = _get_grounding_terms(term)
if not grounding_terms:
# This is for backwards compatibility with EKBs without drum-term
# scored entries. It is important to keep for Bioagents
# compatibility.
dbid = term.attrib.get('dbid')
if dbid:
dbids = dbid.split('|')
for dbname, dbid in [d.split(':') for d in dbids]:
if not db_refs.get(dbname):
db_refs[dbname] = dbid
return db_refs, None, []
# This is the INDRA prioritization of grounding name spaces. Lower score
# takes precedence.
ns_priority = {
'HGNC': 1,
'UP': 1,
'FPLX': 2,
'CHEBI': 3,
'PC': 3,
'GO': 4,
'FA': 5,
'XFAM': 5,
'NCIT': 5
}
# We get the top priority entry from each score group
score_groups = itertools.groupby(grounding_terms, lambda x: x['score'])
top_per_score_group = []
ambiguities = []
for score, group in score_groups:
entries = list(group)
for entry in entries:
priority = 100
for ref_ns, ref_id in entry['refs'].items():
# Skip etc UP entries
if ref_ns == 'UP' and ref_id == 'etc':
continue
try:
priority = min(priority, ns_priority[ref_ns])
except KeyError:
pass
if ref_ns == 'UP':
if not up_client.is_human(ref_id):
priority = 4
entry['priority'] = priority
if len(entries) > 1:
top_entry = entries[0]
top_idx = 0
for i, entry in enumerate(entries):
# We take the lowest priority entry within the score group
# as the top entry
if entry['priority'] < top_entry['priority']:
# This is a corner case in which a protein family
# should be prioritized over a specific protein,
# specifically when HGNC was mapped from NCIT but
# FPLX was not mapped from NCIT, the HGNC shouldn't
# take precedence.
if entry.get('comment') == 'HGNC_FROM_NCIT' and \
'FPLX' in top_entry['refs'] and \
top_entry.get('comment') != 'FPLX_FROM_NCIT':
continue
top_entry = entry
top_idx = i
for i, entry in enumerate(entries):
if i == top_idx:
continue
if (entry['priority'] - top_entry['priority']) <= 1:
ambiguities.append((top_entry, entry))
else:
top_entry = entries[0]
top_per_score_group.append(top_entry)
# Get the top priority for each score group
priorities = [entry['priority'] for entry in top_per_score_group]
# By default, we coose the top priority entry from the highest score group
top_grounding = top_per_score_group[0]
# Sometimes the top grounding has much lower priority and not much higher
# score than the second grounding. Typically 1.0 vs 0.82857 and 5 vs 2.
# In this case we take the second entry. A special case is handled where
# a FPLX entry was mapped from FA, in which case priority difference of < 2
# is also accepted.
if len(top_per_score_group) > 1:
score_diff = top_per_score_group[0]['score'] - \
top_per_score_group[1]['score']
priority_diff = top_per_score_group[0]['priority'] - \
top_per_score_group[1]['priority']
if score_diff < 0.2 and (priority_diff >= 2 or \
top_per_score_group[0].get('comment') == 'FPLX_FROM_FA'):
top_grounding = top_per_score_group[1]
relevant_ambiguities = []
for amb in ambiguities:
if top_grounding not in amb:
continue
if top_grounding == amb[0]:
relevant_ambiguities.append({'preferred': amb[0],
'alternative': amb[1]})
else:
relevant_ambiguities.append({'preferred': amb[1],
'alternative': amb[0]})
for k, v in top_grounding['refs'].items():
db_refs[k] = v
# Now standardize db_refs to the INDRA standards
# We need to add a prefix for CHEBI
chebi_id = db_refs.get('CHEBI')
if chebi_id and not chebi_id.startswith('CHEBI:'):
db_refs['CHEBI'] = 'CHEBI:%s' % chebi_id
# We need to strip the trailing version number for XFAM and rename to PF
pfam_id = db_refs.get('XFAM')
if pfam_id:
pfam_id = pfam_id.split('.')[0]
db_refs.pop('XFAM', None)
db_refs['PF'] = pfam_id
# We need to add GO prefix if it is missing
go_id = db_refs.get('GO')
if go_id:
if not go_id.startswith('GO:'):
db_refs['GO'] = 'GO:%s' % go_id
# We need to deal with Nextprot families
nxp_id = db_refs.get('FA')
if nxp_id:
db_refs.pop('FA', None)
db_refs['NXPFA'] = nxp_id
# We need to rename PC to PUBCHEM
pc_id = db_refs.get('PC')
if pc_id:
db_refs.pop('PC', None)
db_refs['PUBCHEM'] = pc_id
# Here we also get and return the type, which is a TRIPS
# ontology type. This is to be used in the context of
# Bioagents.
ont_type = top_grounding['type']
return db_refs, ont_type, relevant_ambiguities | [
"Extract database references for a TERM."
]
|
Please provide a description of the function:def get_all_events(self):
self.all_events = {}
events = self.tree.findall('EVENT')
events += self.tree.findall('CC')
for e in events:
event_id = e.attrib['id']
if event_id in self._static_events:
continue
event_type = e.find('type').text
try:
self.all_events[event_type].append(event_id)
except KeyError:
self.all_events[event_type] = [event_id] | [
"Make a list of all events in the TRIPS EKB.\n\n The events are stored in self.all_events.\n "
]
|
Please provide a description of the function:def get_activations(self):
act_events = self.tree.findall("EVENT/[type='ONT::ACTIVATE']")
inact_events = self.tree.findall("EVENT/[type='ONT::DEACTIVATE']")
inact_events += self.tree.findall("EVENT/[type='ONT::INHIBIT']")
for event in (act_events + inact_events):
event_id = event.attrib['id']
if event_id in self._static_events:
continue
# Get the activating agent in the event
agent = event.find(".//*[@role=':AGENT']")
if agent is None:
continue
agent_id = agent.attrib.get('id')
if agent_id is None:
logger.debug(
'Skipping activation with missing activator agent')
continue
activator_agent = self._get_agent_by_id(agent_id, event_id)
if activator_agent is None:
continue
# Get the activated agent in the event
affected = event.find(".//*[@role=':AFFECTED']")
if affected is None:
logger.debug(
'Skipping activation with missing affected agent')
continue
affected_id = affected.attrib.get('id')
if affected_id is None:
logger.debug(
'Skipping activation with missing affected agent')
continue
affected_agent = self._get_agent_by_id(affected_id, event_id)
if affected_agent is None:
logger.debug(
'Skipping activation with missing affected agent')
continue
is_activation = True
if _is_type(event, 'ONT::ACTIVATE'):
self._add_extracted('ONT::ACTIVATE', event.attrib['id'])
elif _is_type(event, 'ONT::INHIBIT'):
is_activation = False
self._add_extracted('ONT::INHIBIT', event.attrib['id'])
elif _is_type(event, 'ONT::DEACTIVATE'):
is_activation = False
self._add_extracted('ONT::DEACTIVATE', event.attrib['id'])
ev = self._get_evidence(event)
location = self._get_event_location(event)
for a1, a2 in _agent_list_product((activator_agent,
affected_agent)):
if is_activation:
st = Activation(a1, a2, evidence=[deepcopy(ev)])
else:
st = Inhibition(a1, a2, evidence=[deepcopy(ev)])
_stmt_location_to_agents(st, location)
self.statements.append(st) | [
"Extract direct Activation INDRA Statements."
]
|
Please provide a description of the function:def get_activations_causal(self):
# Search for causal connectives of type ONT::CAUSE
ccs = self.tree.findall("CC/[type='ONT::CAUSE']")
for cc in ccs:
factor = cc.find("arg/[@role=':FACTOR']")
outcome = cc.find("arg/[@role=':OUTCOME']")
# If either the factor or the outcome is missing, skip
if factor is None or outcome is None:
continue
factor_id = factor.attrib.get('id')
# Here, implicitly, we require that the factor is a TERM
# and not an EVENT
factor_term = self.tree.find("TERM/[@id='%s']" % factor_id)
outcome_id = outcome.attrib.get('id')
# Here it is implicit that the outcome is an event not
# a TERM
outcome_event = self.tree.find("EVENT/[@id='%s']" % outcome_id)
if factor_term is None or outcome_event is None:
continue
factor_term_type = factor_term.find('type')
# The factor term must be a molecular entity
if factor_term_type is None or \
factor_term_type.text not in molecule_types:
continue
factor_agent = self._get_agent_by_id(factor_id, None)
if factor_agent is None:
continue
outcome_event_type = outcome_event.find('type')
if outcome_event_type is None:
continue
# Construct evidence
ev = self._get_evidence(cc)
ev.epistemics['direct'] = False
location = self._get_event_location(outcome_event)
if outcome_event_type.text in ['ONT::ACTIVATE', 'ONT::ACTIVITY',
'ONT::DEACTIVATE']:
if outcome_event_type.text in ['ONT::ACTIVATE',
'ONT::DEACTIVATE']:
agent_tag = outcome_event.find(".//*[@role=':AFFECTED']")
elif outcome_event_type.text == 'ONT::ACTIVITY':
agent_tag = outcome_event.find(".//*[@role=':AGENT']")
if agent_tag is None or agent_tag.attrib.get('id') is None:
continue
outcome_agent = self._get_agent_by_id(agent_tag.attrib['id'],
outcome_id)
if outcome_agent is None:
continue
if outcome_event_type.text == 'ONT::DEACTIVATE':
is_activation = False
else:
is_activation = True
for a1, a2 in _agent_list_product((factor_agent,
outcome_agent)):
if is_activation:
st = Activation(a1, a2, evidence=[deepcopy(ev)])
else:
st = Inhibition(a1, a2, evidence=[deepcopy(ev)])
_stmt_location_to_agents(st, location)
self.statements.append(st) | [
"Extract causal Activation INDRA Statements."
]
|
Please provide a description of the function:def get_activations_stimulate(self):
# TODO: extract to other patterns:
# - Stimulation by EGF activates ERK
# - Stimulation by EGF leads to ERK activation
# Search for stimulation event
stim_events = self.tree.findall("EVENT/[type='ONT::STIMULATE']")
for event in stim_events:
event_id = event.attrib.get('id')
if event_id in self._static_events:
continue
controller = event.find("arg1/[@role=':AGENT']")
affected = event.find("arg2/[@role=':AFFECTED']")
# If either the controller or the affected is missing, skip
if controller is None or affected is None:
continue
controller_id = controller.attrib.get('id')
# Here, implicitly, we require that the controller is a TERM
# and not an EVENT
controller_term = self.tree.find("TERM/[@id='%s']" % controller_id)
affected_id = affected.attrib.get('id')
# Here it is implicit that the affected is an event not
# a TERM
affected_event = self.tree.find("EVENT/[@id='%s']" % affected_id)
if controller_term is None or affected_event is None:
continue
controller_term_type = controller_term.find('type')
# The controller term must be a molecular entity
if controller_term_type is None or \
controller_term_type.text not in molecule_types:
continue
controller_agent = self._get_agent_by_id(controller_id, None)
if controller_agent is None:
continue
affected_event_type = affected_event.find('type')
if affected_event_type is None:
continue
# Construct evidence
ev = self._get_evidence(event)
ev.epistemics['direct'] = False
location = self._get_event_location(affected_event)
if affected_event_type.text == 'ONT::ACTIVATE':
affected = affected_event.find(".//*[@role=':AFFECTED']")
if affected is None:
continue
affected_agent = self._get_agent_by_id(affected.attrib['id'],
affected_id)
if affected_agent is None:
continue
for a1, a2 in _agent_list_product((controller_agent,
affected_agent)):
st = Activation(a1, a2, evidence=[deepcopy(ev)])
_stmt_location_to_agents(st, location)
self.statements.append(st)
elif affected_event_type.text == 'ONT::ACTIVITY':
agent_tag = affected_event.find(".//*[@role=':AGENT']")
if agent_tag is None:
continue
affected_agent = self._get_agent_by_id(agent_tag.attrib['id'],
affected_id)
if affected_agent is None:
continue
for a1, a2 in _agent_list_product((controller_agent,
affected_agent)):
st = Activation(a1, a2, evidence=[deepcopy(ev)])
_stmt_location_to_agents(st, location)
self.statements.append(st) | [
"Extract Activation INDRA Statements via stimulation."
]
|
Please provide a description of the function:def get_degradations(self):
deg_events = self.tree.findall("EVENT/[type='ONT::CONSUME']")
for event in deg_events:
if event.attrib['id'] in self._static_events:
continue
affected = event.find(".//*[@role=':AFFECTED']")
if affected is None:
msg = 'Skipping degradation event with no affected term.'
logger.debug(msg)
continue
# Make sure the degradation is affecting a molecule type
# Temporarily removed for CwC compatibility with no type tag
#affected_type = affected.find('type')
#if affected_type is None or \
# affected_type.text not in molecule_types:
# continue
affected_id = affected.attrib.get('id')
if affected_id is None:
logger.debug(
'Skipping degradation event with missing affected agent')
continue
affected_agent = self._get_agent_by_id(affected_id,
event.attrib['id'])
if affected_agent is None:
logger.debug(
'Skipping degradation event with missing affected agent')
continue
agent = event.find(".//*[@role=':AGENT']")
if agent is None:
agent_agent = None
else:
agent_id = agent.attrib.get('id')
if agent_id is None:
agent_agent = None
else:
agent_agent = self._get_agent_by_id(agent_id,
event.attrib['id'])
ev = self._get_evidence(event)
location = self._get_event_location(event)
for subj, obj in \
_agent_list_product((agent_agent, affected_agent)):
st = DecreaseAmount(subj, obj, evidence=deepcopy(ev))
_stmt_location_to_agents(st, location)
self.statements.append(st)
self._add_extracted(_get_type(event), event.attrib['id']) | [
"Extract Degradation INDRA Statements."
]
|
Please provide a description of the function:def get_regulate_amounts(self):
pos_events = []
neg_events = []
pattern = "EVENT/[type='ONT::STIMULATE']/arg2/[type='ONT::TRANSCRIBE']/.."
pos_events += self.tree.findall(pattern)
pattern = "EVENT/[type='ONT::INCREASE']/arg2/[type='ONT::TRANSCRIBE']/.."
pos_events += self.tree.findall(pattern)
pattern = "EVENT/[type='ONT::INHIBIT']/arg2/[type='ONT::TRANSCRIBE']/.."
neg_events += self.tree.findall(pattern)
pattern = "EVENT/[type='ONT::DECREASE']/arg2/[type='ONT::TRANSCRIBE']/.."
neg_events += self.tree.findall(pattern)
# Look at polarity
pattern = "EVENT/[type='ONT::MODULATE']/arg2/[type='ONT::TRANSCRIBE']/.."
mod_events = self.tree.findall(pattern)
for event in mod_events:
pol = event.find('polarity')
if pol is not None:
if pol.text == 'ONT::POSITIVE':
pos_events.append(event)
elif pol.text == 'ONT::NEGATIVE':
neg_events.append(event)
combs = zip([pos_events, neg_events], [IncreaseAmount, DecreaseAmount])
for events, cls in combs:
for event in events:
if event.attrib['id'] in self._static_events:
continue
if event.attrib['id'] in self._subsumed_events:
continue
# The agent has to exist and be a protein type
agent = event.find(".//*[@role=':AGENT']")
if agent is None:
continue
if agent.find('type') is None or \
(agent.find('type').text not in protein_types):
continue
agent_id = agent.attrib.get('id')
if agent_id is None:
continue
agent_agent = self._get_agent_by_id(agent_id,
event.attrib['id'])
# The affected, we already know is ONT::TRANSCRIPTION
affected_arg = event.find(".//*[@role=':AFFECTED']")
if affected_arg is None:
continue
affected_id = affected_arg.attrib.get('id')
affected_event = self.tree.find("EVENT/[@id='%s']" %
affected_id)
if affected_event is None:
continue
affected = \
affected_event.find(".//*[@role=':AFFECTED-RESULT']")
if affected is None:
affected = \
affected_event.find(".//*[@role=':AFFECTED']")
if affected is None:
continue
affected_id = affected.attrib.get('id')
if affected_id is None:
continue
affected_agent = \
self._get_agent_by_id(affected_id,
affected_event.attrib['id'])
ev = self._get_evidence(event)
location = self._get_event_location(event)
for subj, obj in \
_agent_list_product((agent_agent, affected_agent)):
if obj is None:
continue
st = cls(subj, obj, evidence=deepcopy(ev))
_stmt_location_to_agents(st, location)
self.statements.append(st)
self._add_extracted(_get_type(event), event.attrib['id'])
self._subsumed_events.append(affected_event.attrib['id']) | [
"Extract Increase/DecreaseAmount Statements."
]
|
Please provide a description of the function:def get_active_forms(self):
act_events = self.tree.findall("EVENT/[type='ONT::ACTIVATE']")
def _agent_is_basic(agent):
if not agent.mods and not agent.mutations \
and not agent.bound_conditions and not agent.location:
return True
return False
for event in act_events:
if event.attrib['id'] in self._static_events:
continue
agent = event.find(".//*[@role=':AGENT']")
if agent is not None:
# In this case this is not an ActiveForm statement
continue
affected = event.find(".//*[@role=':AFFECTED']")
if affected is None:
msg = 'Skipping active form event with no affected term.'
logger.debug(msg)
continue
affected_id = affected.attrib.get('id')
if affected_id is None:
logger.debug(
'Skipping active form event with missing affected agent')
continue
affected_agent = self._get_agent_by_id(affected_id,
event.attrib['id'])
# If it is a list of agents, skip them for now
if not isinstance(affected_agent, Agent):
continue
if _agent_is_basic(affected_agent):
continue
# The affected agent has to be protein-like type
affected_type = affected.find('type')
if affected_type is None or \
affected_type.text not in protein_types:
continue
# If the Agent state is at the base state then this is not an
# ActiveForm statement
if _is_base_agent_state(affected_agent):
continue
ev = self._get_evidence(event)
location = self._get_event_location(event)
st = ActiveForm(affected_agent, 'activity', True, evidence=ev)
_stmt_location_to_agents(st, location)
self.statements.append(st)
self._add_extracted('ONT::ACTIVATE', event.attrib['id']) | [
"Extract ActiveForm INDRA Statements."
]
|
Please provide a description of the function:def get_active_forms_state(self):
for term in self._isolated_terms:
act = term.find('features/active')
if act is None:
continue
if act.text == 'TRUE':
is_active = True
elif act.text == 'FALSE':
is_active = False
else:
logger.warning('Unhandled term activity feature %s' % act.text)
agent = self._get_agent_by_id(term.attrib['id'], None)
# Skip aggregates for now
if not isinstance(agent, Agent):
continue
# If the Agent state is at the base state then this is not an
# ActiveForm statement
if _is_base_agent_state(agent):
continue
# Remove the activity flag since it's irrelevant here
agent.activity = None
text_term = term.find('text')
if text_term is not None:
ev_text = text_term.text
else:
ev_text = None
ev = Evidence(source_api='trips', text=ev_text, pmid=self.doc_id)
st = ActiveForm(agent, 'activity', is_active, evidence=[ev])
self.statements.append(st) | [
"Extract ActiveForm INDRA Statements."
]
|
Please provide a description of the function:def get_complexes(self):
bind_events = self.tree.findall("EVENT/[type='ONT::BIND']")
bind_events += self.tree.findall("EVENT/[type='ONT::INTERACT']")
for event in bind_events:
if event.attrib['id'] in self._static_events:
continue
arg1 = event.find("arg1")
arg2 = event.find("arg2")
# EKB-AGENT
if arg1 is None and arg2 is None:
args = list(event.findall('arg'))
if len(args) < 2:
continue
arg1 = args[0]
arg2 = args[1]
if (arg1 is None or arg1.attrib.get('id') is None) or \
(arg2 is None or arg2.attrib.get('id') is None):
logger.debug('Skipping complex with less than 2 members')
continue
agent1 = self._get_agent_by_id(arg1.attrib['id'],
event.attrib['id'])
agent2 = self._get_agent_by_id(arg2.attrib['id'],
event.attrib['id'])
if agent1 is None or agent2 is None:
logger.debug('Skipping complex with less than 2 members')
continue
# Information on binding site is either attached to the agent term
# in a features/site tag or attached to the event itself in
# a site tag
'''
site_feature = self._find_in_term(arg1.attrib['id'], 'features/site')
if site_feature is not None:
sites, positions = self._get_site_by_id(site_id)
print sites, positions
site_feature = self._find_in_term(arg2.attrib['id'], 'features/site')
if site_feature is not None:
sites, positions = self._get_site_by_id(site_id)
print sites, positions
site = event.find("site")
if site is not None:
sites, positions = self._get_site_by_id(site.attrib['id'])
print sites, positions
'''
ev = self._get_evidence(event)
location = self._get_event_location(event)
for a1, a2 in _agent_list_product((agent1, agent2)):
st = Complex([a1, a2], evidence=deepcopy(ev))
_stmt_location_to_agents(st, location)
self.statements.append(st)
self._add_extracted(_get_type(event), event.attrib['id']) | [
"Extract Complex INDRA Statements."
]
|
Please provide a description of the function:def get_modifications(self):
# Get all the specific mod types
mod_event_types = list(ont_to_mod_type.keys())
# Add ONT::PTMs as a special case
mod_event_types += ['ONT::PTM']
mod_events = []
for mod_event_type in mod_event_types:
events = self.tree.findall("EVENT/[type='%s']" % mod_event_type)
mod_extracted = self.extracted_events.get(mod_event_type, [])
for event in events:
event_id = event.attrib.get('id')
if event_id not in mod_extracted:
mod_events.append(event)
# Iterate over all modification events
for event in mod_events:
stmts = self._get_modification_event(event)
if stmts:
for stmt in stmts:
self.statements.append(stmt) | [
"Extract all types of Modification INDRA Statements."
]
|
Please provide a description of the function:def get_modifications_indirect(self):
# Get all the specific mod types
mod_event_types = list(ont_to_mod_type.keys())
# Add ONT::PTMs as a special case
mod_event_types += ['ONT::PTM']
def get_increase_events(mod_event_types):
mod_events = []
events = self.tree.findall("EVENT/[type='ONT::INCREASE']")
for event in events:
affected = event.find(".//*[@role=':AFFECTED']")
if affected is None:
continue
affected_id = affected.attrib.get('id')
if not affected_id:
continue
pattern = "EVENT/[@id='%s']" % affected_id
affected_event = self.tree.find(pattern)
if affected_event is not None:
affected_type = affected_event.find('type')
if affected_type is not None and \
affected_type.text in mod_event_types:
mod_events.append(event)
return mod_events
def get_cause_events(mod_event_types):
mod_events = []
ccs = self.tree.findall("CC/[type='ONT::CAUSE']")
for cc in ccs:
outcome = cc.find(".//*[@role=':OUTCOME']")
if outcome is None:
continue
outcome_id = outcome.attrib.get('id')
if not outcome_id:
continue
pattern = "EVENT/[@id='%s']" % outcome_id
outcome_event = self.tree.find(pattern)
if outcome_event is not None:
outcome_type = outcome_event.find('type')
if outcome_type is not None and \
outcome_type.text in mod_event_types:
mod_events.append(cc)
return mod_events
mod_events = get_increase_events(mod_event_types)
mod_events += get_cause_events(mod_event_types)
# Iterate over all modification events
for event in mod_events:
event_id = event.attrib['id']
if event_id in self._static_events:
continue
event_type = _get_type(event)
# Get enzyme Agent
enzyme = event.find(".//*[@role=':AGENT']")
if enzyme is None:
enzyme = event.find(".//*[@role=':FACTOR']")
if enzyme is None:
return
enzyme_id = enzyme.attrib.get('id')
if enzyme_id is None:
continue
enzyme_agent = self._get_agent_by_id(enzyme_id, event_id)
affected_event_tag = event.find(".//*[@role=':AFFECTED']")
if affected_event_tag is None:
affected_event_tag = event.find(".//*[@role=':OUTCOME']")
if affected_event_tag is None:
return
affected_id = affected_event_tag.attrib.get('id')
if not affected_id:
return
affected_event = self.tree.find("EVENT/[@id='%s']" % affected_id)
if affected_event is None:
return
# Iterate over all enzyme agents if there are multiple ones
for enz_t in _agent_list_product((enzyme_agent, )):
# enz_t comes out as a tuple so we need to take the first
# element here
enz = enz_t[0]
# Note that we re-run the extraction code here potentially
# multiple times. This is mainly to make sure each Statement
# object created here is independent (i.e. has different UUIDs)
# without having to manipulate it after creation.
stmts = self._get_modification_event(affected_event)
stmts_to_make = []
if stmts:
for stmt in stmts:
# The affected event should have no enzyme but should
# have a substrate
if stmt.enz is None and stmt.sub is not None:
stmts_to_make.append(stmt)
for stmt in stmts_to_make:
stmt.enz = enz
for ev in stmt.evidence:
ev.epistemics['direct'] = False
self.statements.append(stmt)
self._add_extracted(event_type, event.attrib['id'])
self._add_extracted(affected_event.find('type').text, affected_id) | [
"Extract indirect Modification INDRA Statements."
]
|
Please provide a description of the function:def get_agents(self):
agents_dict = self.get_term_agents()
agents = [a for a in agents_dict.values() if a is not None]
return agents | [
"Return list of INDRA Agents corresponding to TERMs in the EKB.\n\n This is meant to be used when entities e.g. \"phosphorylated ERK\",\n rather than events need to be extracted from processed natural\n language. These entities with their respective states are represented\n as INDRA Agents.\n\n Returns\n -------\n agents : list[indra.statements.Agent]\n List of INDRA Agents extracted from EKB.\n "
]
|
Please provide a description of the function:def get_term_agents(self):
terms = self.tree.findall('TERM')
agents = {}
assoc_links = []
for term in terms:
term_id = term.attrib.get('id')
if term_id:
agent = self._get_agent_by_id(term_id, None)
agents[term_id] = agent
# Handle assoc-with links
aw = term.find('assoc-with')
if aw is not None:
aw_id = aw.attrib.get('id')
if aw_id:
assoc_links.append((term_id, aw_id))
# We only keep the target end of assoc with links if both
# source and target are in the list
for source, target in assoc_links:
if target in agents and source in agents:
agents.pop(source)
return agents | [
"Return dict of INDRA Agents keyed by corresponding TERMs in the EKB.\n\n This is meant to be used when entities e.g. \"phosphorylated ERK\",\n rather than events need to be extracted from processed natural\n language. These entities with their respective states are represented\n as INDRA Agents. Further, each key of the dictionary corresponds to\n the ID assigned by TRIPS to the given TERM that the Agent was\n extracted from.\n\n Returns\n -------\n agents : dict[str, indra.statements.Agent]\n Dict of INDRA Agents extracted from EKB.\n "
]
|
Please provide a description of the function:def _get_evidence_text(self, event_tag):
par_id = event_tag.attrib.get('paragraph')
uttnum = event_tag.attrib.get('uttnum')
event_text = event_tag.find('text')
if self.sentences is not None and uttnum is not None:
sentence = self.sentences[uttnum]
elif event_text is not None:
sentence = event_text.text
else:
sentence = None
return sentence | [
"Extract the evidence for an event.\n\n Pieces of text linked to an EVENT are fragments of a sentence. The\n EVENT refers to the paragraph ID and the \"uttnum\", which corresponds\n to a sentence ID. Here we find and return the full sentence from which\n the event was taken.\n "
]
|
Please provide a description of the function:def _get_agent_grounding(agent):
def _get_id(_agent, key):
_id = _agent.db_refs.get(key)
if isinstance(_id, list):
_id = _id[0]
return _id
hgnc_id = _get_id(agent, 'HGNC')
if hgnc_id:
hgnc_name = hgnc_client.get_hgnc_name(hgnc_id)
if not hgnc_name:
logger.warning('Agent %s with HGNC ID %s has no HGNC name.',
agent, hgnc_id)
return
return protein('HGNC', hgnc_name)
uniprot_id = _get_id(agent, 'UP')
if uniprot_id:
return protein('UP', uniprot_id)
fplx_id = _get_id(agent, 'FPLX')
if fplx_id:
return protein('FPLX', fplx_id)
pfam_id = _get_id(agent, 'PF')
if pfam_id:
return protein('PFAM', pfam_id)
ip_id = _get_id(agent, 'IP')
if ip_id:
return protein('IP', ip_id)
fa_id = _get_id(agent, 'FA')
if fa_id:
return protein('NXPFA', fa_id)
chebi_id = _get_id(agent, 'CHEBI')
if chebi_id:
if chebi_id.startswith('CHEBI:'):
chebi_id = chebi_id[len('CHEBI:'):]
return abundance('CHEBI', chebi_id)
pubchem_id = _get_id(agent, 'PUBCHEM')
if pubchem_id:
return abundance('PUBCHEM', pubchem_id)
go_id = _get_id(agent, 'GO')
if go_id:
return bioprocess('GO', go_id)
mesh_id = _get_id(agent, 'MESH')
if mesh_id:
return bioprocess('MESH', mesh_id)
return | [
"Convert an agent to the corresponding PyBEL DSL object (to be filled with variants later)."
]
|
Please provide a description of the function:def get_causal_edge(stmt, activates):
any_contact = any(
evidence.epistemics.get('direct', False)
for evidence in stmt.evidence
)
if any_contact:
return pc.DIRECTLY_INCREASES if activates else pc.DIRECTLY_DECREASES
return pc.INCREASES if activates else pc.DECREASES | [
"Returns the causal, polar edge with the correct \"contact\"."
]
|
Please provide a description of the function:def to_database(self, manager=None):
network = pybel.to_database(self.model, manager=manager)
return network | [
"Send the model to the PyBEL database\n\n This function wraps :py:func:`pybel.to_database`.\n\n Parameters\n ----------\n manager : Optional[pybel.manager.Manager]\n A PyBEL database manager. If none, first checks the PyBEL\n configuration for ``PYBEL_CONNECTION`` then checks the\n environment variable ``PYBEL_REMOTE_HOST``. Finally,\n defaults to using SQLite database in PyBEL data directory\n (automatically configured by PyBEL)\n\n Returns\n -------\n network : Optional[pybel.manager.models.Network]\n The SQLAlchemy model representing the network that was uploaded.\n Returns None if upload fails.\n "
]
|
Please provide a description of the function:def to_web(self, host=None, user=None, password=None):
response = pybel.to_web(self.model, host=host, user=user,
password=password)
return response | [
"Send the model to BEL Commons by wrapping :py:func:`pybel.to_web`\n\n The parameters ``host``, ``user``, and ``password`` all check the\n PyBEL configuration, which is located at\n ``~/.config/pybel/config.json`` by default\n\n Parameters\n ----------\n host : Optional[str]\n The host name to use. If none, first checks the PyBEL\n configuration entry ``PYBEL_REMOTE_HOST``, then the\n environment variable ``PYBEL_REMOTE_HOST``. Finally, defaults\n to https://bel-commons.scai.fraunhofer.de.\n user : Optional[str]\n The username (email) to use. If none, first checks the\n PyBEL configuration entry ``PYBEL_REMOTE_USER``,\n then the environment variable ``PYBEL_REMOTE_USER``.\n password : Optional[str]\n The password to use. If none, first checks the PyBEL configuration\n entry ``PYBEL_REMOTE_PASSWORD``, then the environment variable\n ``PYBEL_REMOTE_PASSWORD``.\n\n Returns\n -------\n response : requests.Response\n The response from the BEL Commons network upload endpoint.\n "
]
|
Please provide a description of the function:def save_model(self, path, output_format=None):
if output_format == 'pickle':
pybel.to_pickle(self.model, path)
else:
with open(path, 'w') as fh:
if output_format == 'json':
pybel.to_json_file(self.model, fh)
elif output_format == 'cx':
pybel.to_cx_file(self.model, fh)
else: # output_format == 'bel':
pybel.to_bel(self.model, fh) | [
"Save the :class:`pybel.BELGraph` using one of the outputs from\n :py:mod:`pybel`\n\n Parameters\n ----------\n path : str\n The path to output to\n output_format : Optional[str]\n Output format as ``cx``, ``pickle``, ``json`` or defaults to ``bel``\n "
]
|
Please provide a description of the function:def _add_nodes_edges(self, subj_agent, obj_agent, relation, evidences):
subj_data, subj_edge = _get_agent_node(subj_agent)
obj_data, obj_edge = _get_agent_node(obj_agent)
# If we failed to create nodes for subject or object, skip it
if subj_data is None or obj_data is None:
return
subj_node = self.model.add_node_from_data(subj_data)
obj_node = self.model.add_node_from_data(obj_data)
edge_data_list = \
_combine_edge_data(relation, subj_edge, obj_edge, evidences)
for edge_data in edge_data_list:
self.model.add_edge(subj_node, obj_node, **edge_data) | [
"Given subj/obj agents, relation, and evidence, add nodes/edges."
]
|
Please provide a description of the function:def _assemble_regulate_activity(self, stmt):
act_obj = deepcopy(stmt.obj)
act_obj.activity = stmt._get_activity_condition()
# We set is_active to True here since the polarity is encoded
# in the edge (decreases/increases)
act_obj.activity.is_active = True
activates = isinstance(stmt, Activation)
relation = get_causal_edge(stmt, activates)
self._add_nodes_edges(stmt.subj, act_obj, relation, stmt.evidence) | [
"Example: p(HGNC:MAP2K1) => act(p(HGNC:MAPK1))"
]
|
Please provide a description of the function:def _assemble_modification(self, stmt):
sub_agent = deepcopy(stmt.sub)
sub_agent.mods.append(stmt._get_mod_condition())
activates = isinstance(stmt, AddModification)
relation = get_causal_edge(stmt, activates)
self._add_nodes_edges(stmt.enz, sub_agent, relation, stmt.evidence) | [
"Example: p(HGNC:MAP2K1) => p(HGNC:MAPK1, pmod(Ph, Thr, 185))"
]
|
Please provide a description of the function:def _assemble_regulate_amount(self, stmt):
activates = isinstance(stmt, IncreaseAmount)
relation = get_causal_edge(stmt, activates)
self._add_nodes_edges(stmt.subj, stmt.obj, relation, stmt.evidence) | [
"Example: p(HGNC:ELK1) => p(HGNC:FOS)"
]
|
Please provide a description of the function:def _assemble_gef(self, stmt):
gef = deepcopy(stmt.gef)
gef.activity = ActivityCondition('gef', True)
ras = deepcopy(stmt.ras)
ras.activity = ActivityCondition('gtpbound', True)
self._add_nodes_edges(gef, ras, pc.DIRECTLY_INCREASES, stmt.evidence) | [
"Example: act(p(HGNC:SOS1), ma(gef)) => act(p(HGNC:KRAS), ma(gtp))"
]
|
Please provide a description of the function:def _assemble_gap(self, stmt):
gap = deepcopy(stmt.gap)
gap.activity = ActivityCondition('gap', True)
ras = deepcopy(stmt.ras)
ras.activity = ActivityCondition('gtpbound', True)
self._add_nodes_edges(gap, ras, pc.DIRECTLY_DECREASES, stmt.evidence) | [
"Example: act(p(HGNC:RASA1), ma(gap)) =| act(p(HGNC:KRAS), ma(gtp))"
]
|
Please provide a description of the function:def _assemble_active_form(self, stmt):
act_agent = Agent(stmt.agent.name, db_refs=stmt.agent.db_refs)
act_agent.activity = ActivityCondition(stmt.activity, True)
activates = stmt.is_active
relation = get_causal_edge(stmt, activates)
self._add_nodes_edges(stmt.agent, act_agent, relation, stmt.evidence) | [
"Example: p(HGNC:ELK1, pmod(Ph)) => act(p(HGNC:ELK1), ma(tscript))"
]
|
Please provide a description of the function:def _assemble_complex(self, stmt):
complex_data, _ = _get_complex_node(stmt.members)
if complex_data is None:
logger.info('skip adding complex with no members: %s', stmt.members)
return
self.model.add_node_from_data(complex_data) | [
"Example: complex(p(HGNC:MAPK14), p(HGNC:TAB1))"
]
|
Please provide a description of the function:def _assemble_conversion(self, stmt):
pybel_lists = ([], [])
for pybel_list, agent_list in \
zip(pybel_lists, (stmt.obj_from, stmt.obj_to)):
for agent in agent_list:
node = _get_agent_grounding(agent)
# TODO check for missing grounding?
pybel_list.append(node)
rxn_node_data = reaction(
reactants=pybel_lists[0],
products=pybel_lists[1],
)
obj_node = self.model.add_node_from_data(rxn_node_data)
obj_edge = None # TODO: Any edge information possible here?
# Add node for controller, if there is one
if stmt.subj is not None:
subj_attr, subj_edge = _get_agent_node(stmt.subj)
subj_node = self.model.add_node_from_data(subj_attr)
edge_data_list = _combine_edge_data(pc.DIRECTLY_INCREASES,
subj_edge, obj_edge, stmt.evidence)
for edge_data in edge_data_list:
self.model.add_edge(subj_node, obj_node, **edge_data) | [
"Example: p(HGNC:HK1) => rxn(reactants(a(CHEBI:\"CHEBI:17634\")),\n products(a(CHEBI:\"CHEBI:4170\")))"
]
|
Please provide a description of the function:def _assemble_autophosphorylation(self, stmt):
sub_agent = deepcopy(stmt.enz)
mc = stmt._get_mod_condition()
sub_agent.mods.append(mc)
# FIXME Ignore any bound conditions on the substrate!!!
# This is because if they are included, a complex node will be returned,
# which (at least currently) won't incorporate any protein
# modifications.
sub_agent.bound_conditions = []
# FIXME
self._add_nodes_edges(stmt.enz, sub_agent, pc.DIRECTLY_INCREASES,
stmt.evidence) | [
"Example: complex(p(HGNC:MAPK14), p(HGNC:TAB1)) =>\n p(HGNC:MAPK14, pmod(Ph, Tyr, 100))"
]
|
Please provide a description of the function:def _assemble_transphosphorylation(self, stmt):
# Check our assumptions about the bound condition of the enzyme
assert len(stmt.enz.bound_conditions) == 1
assert stmt.enz.bound_conditions[0].is_bound
# Create a modified protein node for the bound target
sub_agent = deepcopy(stmt.enz.bound_conditions[0].agent)
sub_agent.mods.append(stmt._get_mod_condition())
self._add_nodes_edges(stmt.enz, sub_agent, pc.DIRECTLY_INCREASES,
stmt.evidence) | [
"Example: complex(p(HGNC:EGFR)) =>\n p(HGNC:EGFR, pmod(Ph, Tyr, 1173))"
]
|
Please provide a description of the function:def get_binding_site_name(agent):
# Try to construct a binding site name based on parent
grounding = agent.get_grounding()
if grounding != (None, None):
uri = hierarchies['entity'].get_uri(grounding[0], grounding[1])
# Get highest level parents in hierarchy
parents = hierarchies['entity'].get_parents(uri, 'top')
if parents:
# Choose the first parent if there are more than one
parent_uri = sorted(parents)[0]
parent_agent = _agent_from_uri(parent_uri)
binding_site = _n(parent_agent.name).lower()
return binding_site
# Fall back to Agent's own name if one from parent can't be constructed
binding_site = _n(agent.name).lower()
return binding_site | [
"Return a binding site name from a given agent."
]
|
Please provide a description of the function:def get_mod_site_name(mod_condition):
if mod_condition.residue is None:
mod_str = abbrevs[mod_condition.mod_type]
else:
mod_str = mod_condition.residue
mod_pos = mod_condition.position if \
mod_condition.position is not None else ''
name = ('%s%s' % (mod_str, mod_pos))
return name | [
"Return site names for a modification."
]
|
Please provide a description of the function:def process_flat_files(id_mappings_file, complexes_file=None, ptm_file=None,
ppi_file=None, seq_file=None, motif_window=7):
id_df = pd.read_csv(id_mappings_file, delimiter='\t', names=_hprd_id_cols,
dtype='str')
id_df = id_df.set_index('HPRD_ID')
if complexes_file is None and ptm_file is None and ppi_file is None:
raise ValueError('At least one of complexes_file, ptm_file, or '
'ppi_file must be given.')
if ptm_file and not seq_file:
raise ValueError('If ptm_file is given, seq_file must also be given.')
# Load complexes into dataframe
cplx_df = None
if complexes_file:
cplx_df = pd.read_csv(complexes_file, delimiter='\t', names=_cplx_cols,
dtype='str', na_values=['-', 'None'])
# Load ptm data into dataframe
ptm_df = None
seq_dict = None
if ptm_file:
ptm_df = pd.read_csv(ptm_file, delimiter='\t', names=_ptm_cols,
dtype='str', na_values='-')
# Load protein sequences as a dict keyed by RefSeq ID
seq_dict = load_fasta_sequences(seq_file, id_index=2)
# Load the PPI data into dataframe
ppi_df = None
if ppi_file:
ppi_df = pd.read_csv(ppi_file, delimiter='\t', names=_ppi_cols,
dtype='str')
# Create the processor
return HprdProcessor(id_df, cplx_df, ptm_df, ppi_df, seq_dict, motif_window) | [
"Get INDRA Statements from HPRD data.\n\n Of the arguments, `id_mappings_file` is required, and at least one of\n `complexes_file`, `ptm_file`, and `ppi_file` must also be given. If\n `ptm_file` is given, `seq_file` must also be given.\n\n Note that many proteins (> 1,600) in the HPRD content are associated with\n outdated RefSeq IDs that cannot be mapped to Uniprot IDs. For these, the\n Uniprot ID obtained from the HGNC ID (itself obtained from the Entrez ID)\n is used. Because the sequence referenced by the Uniprot ID obtained this\n way may be different from the (outdated) RefSeq sequence included with the\n HPRD content, it is possible that this will lead to invalid site positions\n with respect to the Uniprot IDs.\n\n To allow these site positions to be mapped during assembly, the\n Modification statements produced by the HprdProcessor include an additional\n key in the `annotations` field of their Evidence object. The annotations\n field is called 'site_motif' and it maps to a dictionary with three\n elements: 'motif', 'respos', and 'off_by_one'. 'motif' gives the peptide\n sequence obtained from the RefSeq sequence included with HPRD. 'respos'\n indicates the position in the peptide sequence containing the residue.\n Note that these positions are ONE-INDEXED (not zero-indexed). Finally, the\n 'off-by-one' field contains a boolean value indicating whether the correct\n position was inferred as being an off-by-one (methionine cleavage) error.\n If True, it means that the given residue could not be found in the HPRD\n RefSeq sequence at the given position, but a matching residue was found at\n position+1, suggesting a sequence numbering based on the methionine-cleaved\n sequence. The peptide included in the 'site_motif' dictionary is based on\n this updated position.\n\n Parameters\n ----------\n id_mappings_file : str\n Path to HPRD_ID_MAPPINGS.txt file.\n complexes_file : Optional[str]\n Path to PROTEIN_COMPLEXES.txt file.\n ptm_file : Optional[str]\n Path to POST_TRANSLATIONAL_MODIFICATIONS.txt file.\n ppi_file : Optional[str]\n Path to BINARY_PROTEIN_PROTEIN_INTERACTIONS.txt file.\n seq_file : Optional[str]\n Path to PROTEIN_SEQUENCES.txt file.\n motif_window : int\n Number of flanking amino acids to include on each side of the\n PTM target residue in the 'site_motif' annotations field of the\n Evidence for Modification Statements. Default is 7.\n\n Returns\n -------\n HprdProcessor\n An HprdProcessor object which contains a list of extracted INDRA\n Statements in its statements attribute.\n "
]
|
Please provide a description of the function:def _gather_active_forms(self):
for stmt in self.statements:
if isinstance(stmt, ActiveForm):
base_agent = self.agent_set.get_create_base_agent(stmt.agent)
# Handle the case where an activity flag is set
agent_to_add = stmt.agent
if stmt.agent.activity:
new_agent = fast_deepcopy(stmt.agent)
new_agent.activity = None
agent_to_add = new_agent
base_agent.add_activity_form(agent_to_add, stmt.is_active) | [
"Collect all the active forms of each Agent in the Statements."
]
|
Please provide a description of the function:def replace_activities(self):
logger.debug('Running PySB Preassembler replace activities')
# TODO: handle activity hierarchies
new_stmts = []
def has_agent_activity(stmt):
for agent in stmt.agent_list():
if isinstance(agent, Agent) and agent.activity is not None:
return True
return False
# First collect all explicit active forms
self._gather_active_forms()
# Iterate over all statements
for j, stmt in enumerate(self.statements):
logger.debug('%d/%d %s' % (j + 1, len(self.statements), stmt))
# If the Statement doesn't have any activities, we can just
# keep it and move on
if not has_agent_activity(stmt):
new_stmts.append(stmt)
continue
stmt_agents = stmt.agent_list()
num_agents = len(stmt_agents)
# Make a list with an empty list for each Agent so that later
# we can build combinations of Agent forms
agent_forms = [[] for a in stmt_agents]
for i, agent in enumerate(stmt_agents):
# This is the case where there is an activity flag on an
# Agent which we will attempt to replace with an explicit
# active form
if agent is not None and isinstance(agent, Agent) and \
agent.activity is not None:
base_agent = self.agent_set.get_create_base_agent(agent)
# If it is an "active" state
if agent.activity.is_active:
active_forms = base_agent.active_forms
# If no explicit active forms are known then we use
# the generic one
if not active_forms:
active_forms = [agent]
# If it is an "inactive" state
else:
active_forms = base_agent.inactive_forms
# If no explicit inactive forms are known then we use
# the generic one
if not active_forms:
active_forms = [agent]
# We now iterate over the active agent forms and create
# new agents
for af in active_forms:
new_agent = fast_deepcopy(agent)
self._set_agent_context(af, new_agent)
agent_forms[i].append(new_agent)
# Otherwise we just copy over the agent as is
else:
agent_forms[i].append(agent)
# Now create all possible combinations of the agents and create new
# statements as needed
agent_combs = itertools.product(*agent_forms)
for agent_comb in agent_combs:
new_stmt = fast_deepcopy(stmt)
new_stmt.set_agent_list(agent_comb)
new_stmts.append(new_stmt)
self.statements = new_stmts | [
"Replace ative flags with Agent states when possible.",
"Return True if any agents in the Statement have activity."
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.