Code
stringlengths 103
85.9k
| Summary
listlengths 0
94
|
---|---|
Please provide a description of the function:def get_groundings(entity):
def get_grounding_entries(grounding):
if not grounding:
return None
entries = []
values = grounding.get('values', [])
# Values could still have been a None entry here
if values:
for entry in values:
ont_concept = entry.get('ontologyConcept')
value = entry.get('value')
if ont_concept is None or value is None:
continue
entries.append((ont_concept, value))
return entries
# Save raw text and Eidos scored groundings as db_refs
db_refs = {'TEXT': entity['text']}
groundings = entity.get('groundings')
if not groundings:
return db_refs
for g in groundings:
entries = get_grounding_entries(g)
# Only add these groundings if there are actual values listed
if entries:
key = g['name'].upper()
if key == 'UN':
db_refs[key] = [(s[0].replace(' ', '_'), s[1])
for s in entries]
else:
db_refs[key] = entries
return db_refs | [
"Return groundings as db_refs for an entity."
]
|
Please provide a description of the function:def get_concept(entity):
# Use the canonical name as the name of the Concept
name = entity['canonicalName']
db_refs = EidosProcessor.get_groundings(entity)
concept = Concept(name, db_refs=db_refs)
return concept | [
"Return Concept from an Eidos entity."
]
|
Please provide a description of the function:def time_context_from_ref(self, timex):
# If the timex has a value set, it means that it refers to a DCT or
# a TimeExpression e.g. "value": {"@id": "_:DCT_1"} and the parameters
# need to be taken from there
value = timex.get('value')
if value:
# Here we get the TimeContext directly from the stashed DCT
# dictionary
tc = self.doc.timexes.get(value['@id'])
return tc
return None | [
"Return a time context object given a timex reference entry."
]
|
Please provide a description of the function:def geo_context_from_ref(self, ref):
value = ref.get('value')
if value:
# Here we get the RefContext from the stashed geoloc dictionary
rc = self.doc.geolocs.get(value['@id'])
return rc
return None | [
"Return a ref context object given a location reference entry."
]
|
Please provide a description of the function:def time_context_from_dct(dct):
time_text = dct.get('text')
start = _get_time_stamp(dct.get('start'))
end = _get_time_stamp(dct.get('end'))
duration = dct.get('duration')
tc = TimeContext(text=time_text, start=start, end=end,
duration=duration)
return tc | [
"Return a time context object given a DCT entry."
]
|
Please provide a description of the function:def make_hash(s, n_bytes):
raw_h = int(md5(s.encode('utf-8')).hexdigest()[:n_bytes], 16)
# Make it a signed int.
return 16**n_bytes//2 - raw_h | [
"Make the hash from a matches key."
]
|
Please provide a description of the function:def parse_a1(a1_text):
entities = {}
for line in a1_text.split('\n'):
if len(line) == 0:
continue
tokens = line.rstrip().split('\t')
if len(tokens) != 3:
raise Exception('Expected three tab-seperated tokens per line ' +
'in the a1 file output from TEES.')
identifier = tokens[0]
entity_info = tokens[1]
entity_name = tokens[2]
info_tokens = entity_info.split()
if len(info_tokens) != 3:
raise Exception('Expected three space-seperated tokens in the ' +
'second column of the a2 file output from TEES.')
entity_type = info_tokens[0]
first_offset = int(info_tokens[1])
second_offset = int(info_tokens[2])
offsets = (first_offset, second_offset)
entities[identifier] = TEESEntity(
identifier,
entity_type,
entity_name,
offsets)
return entities | [
"Parses an a1 file, the file TEES outputs that lists the entities in\n the extracted events.\n\n Parameters\n ----------\n a1_text : str\n Text of the TEES a1 output file, specifying the entities\n\n Returns\n -------\n entities : Dictionary mapping TEES identifiers to TEESEntity objects\n describing each entity. Each row of the .a1 file corresponds to one\n TEESEntity object.\n "
]
|
Please provide a description of the function:def parse_a2(a2_text, entities, tees_sentences):
G = nx.DiGraph()
event_names = set()
# Put entities into the graph
for entity_name in entities.keys():
offset0 = entities[entity_name].offsets[0]
G.add_node(entity_name, text=entities[entity_name].entity_name,
type=entities[entity_name].entity_type, is_event=False,
sentence_text=tees_sentences.index_to_sentence(offset0))
for line in a2_text.split('\n'):
if len(line) == 0:
continue
if line[0] == 'T': # New text
tokens = line.rstrip().split('\t')
identifier = tokens[0]
text = tokens[2]
if identifier not in G.node:
G.add_node(identifier)
G.node[identifier]['text'] = text
G.node[identifier]['is_event'] = False
elif line[0] == 'E': # New event
tokens = line.rstrip().split('\t')
if len(tokens) != 2:
raise Exception('Expected two tab-separated tokens per line ' +
'in TEES a2 file.')
event_identifier = tokens[0]
# In the second tab-separated token, we have a series of keys
# and values separated by the colon
key_value_pairs = tokens[1].split()
event_name = key_value_pairs[0].split(':')[0]
properties = dict()
for pair in key_value_pairs:
key_and_value = pair.split(':')
if len(key_and_value) != 2:
raise Exception('Expected two colon-separated tokens ' +
'in the second column of the a2 file ' +
'output from TEES.')
properties[key_and_value[0]] = key_and_value[1]
# Add event to the graph if we haven't added it yet
if event_identifier not in G.node:
G.add_node(event_identifier)
# Add edges
for key in properties.keys():
G.add_edge(event_identifier, properties[key],
relation=key)
# We assume that node is not negated unless a event modifier
# later says otherwise
G.node[event_identifier]['negated'] = False
G.node[event_identifier]['speculation'] = False
G.node[event_identifier]['type'] = event_name
G.node[event_identifier]['is_event'] = True
event_names.add(event_name)
elif line[0] == 'M': # Event modification
tokens = line.split('\t')
if len(tokens) == 2:
raise Exception('Expected two tab-separated tokens per line ' +
'in the a2 file output from TEES.')
tokens2 = tokens[1].split()
if len(tokens2) == 2:
raise Exception('Expected two space-separated tokens per ' +
'line in the a2 file output from TEES.')
modification_type = tokens2[0]
modified = tokens2[1]
# But assuming this is a negation modifier, we'll need to
# handle it
if modification_type == 'Negation':
G.node[modified]['negated'] = True
elif modification_type == 'Speculation':
G.node[modified]['speculation'] = True
else:
# I've only seen negation event modifiers in these outputs
# If there are other types of modifications,
# we'll need to handle them, since it could
# affect whether we want to process them into statements
print('Unknown negation event: %s' % line)
assert(False)
return G | [
"Extracts events from a TEES a2 output into a networkx directed graph.\n\n Parameters\n ----------\n a2_text : str\n Text of the TEES a2 file output, specifying the event graph\n sentences_xml_gz : str\n Filename with the TEES sentence segmentation in a gzipped xml format\n\n Returns\n -------\n events :\n A networkx graph of events. Node names are entity and event labels\n in the original A2 file (such as \"E2\" or \"T1\") and edges between nodes\n are the various properties. Text nodes (ex. \"T1\") have a text node\n property that gives the text.\n "
]
|
Please provide a description of the function:def parse_output(a1_text, a2_text, sentence_segmentations):
# Parse the sentence segmentation document
tees_sentences = TEESSentences(sentence_segmentations)
# Parse the a1 (entities) file
entities = parse_a1(a1_text)
# Parse the a2 (events) file
events = parse_a2(a2_text, entities, tees_sentences)
return events | [
"Parses the output of the TEES reader and returns a networkx graph\n with the event information.\n\n Parameters\n ----------\n a1_text : str\n Contents of the TEES a1 output, specifying the entities\n a1_text : str\n Contents of the TEES a2 output, specifying the event graph\n sentence_segmentations : str\n Concents of the TEES sentence segmentation output XML\n\n Returns\n -------\n events : networkx.DiGraph\n networkx graph with the entities, events, and relationship between\n extracted by TEES\n "
]
|
Please provide a description of the function:def tees_parse_networkx_to_dot(G, output_file, subgraph_nodes):
with codecs.open(output_file, 'w', encoding='utf-8') as f:
f.write('digraph teesParse {\n')
mentioned_nodes = set()
for from_node in subgraph_nodes:
for edge in G.edges(from_node):
to_node = edge[1]
mentioned_nodes.add(from_node)
mentioned_nodes.add(to_node)
relation = G.edges[from_node, to_node]['relation']
f.write('%s -> %s [ label = "%s" ];\n' % (from_node, to_node,
relation))
for node in mentioned_nodes:
is_event = G.node[node]['is_event']
if is_event:
node_type = G.node[node]['type']
negated = G.node[node]['negated']
speculation = G.node[node]['speculation']
# Add a tag to the label if the event is negated or speculation
if negated and speculation:
tag = ' {NS}'
elif negated:
tag = ' {N}'
elif speculation:
tag = ' {S}'
else:
tag = ''
node_label = node_type + tag
else:
node_label = G.node[node]['text']
f.write('%s [label="%s"];\n' % (node, node_label))
f.write('}\n') | [
"Converts TEES extractions stored in a networkx graph into a graphviz\n .dot file.\n\n Parameters\n ----------\n G : networkx.DiGraph\n Graph with TEES extractions returned by run_and_parse_tees\n output_file : str\n Output file to which to write .dot file\n subgraph_nodes : list[str]\n Only convert the connected graph that includes these ndoes\n "
]
|
Please provide a description of the function:def _get_event(self, event, find_str):
# Get the term with the given element id
element = event.find(find_str)
if element is None:
return None
element_id = element.attrib.get('id')
element_term = self.tree.find("*[@id='%s']" % element_id)
if element_term is None:
return None
time, location = self._extract_time_loc(element_term)
# Now see if there is a modifier like assoc-with connected
# to the main concept
assoc_with = self._get_assoc_with(element_term)
# Get the element's text and use it to construct a Concept
element_text_element = element_term.find('text')
if element_text_element is None:
return None
element_text = element_text_element.text
element_db_refs = {'TEXT': element_text}
element_name = sanitize_name(element_text)
element_type_element = element_term.find('type')
if element_type_element is not None:
element_db_refs['CWMS'] = element_type_element.text
# If there's an assoc-with, we tack it on as extra grounding
if assoc_with is not None:
element_db_refs['CWMS'] += ('|%s' % assoc_with)
concept = Concept(element_name, db_refs=element_db_refs)
if time or location:
context = WorldContext(time=time, geo_location=location)
else:
context = None
event_obj = Event(concept, context=context)
return event_obj | [
"Get a concept referred from the event by the given string."
]
|
Please provide a description of the function:def _extract_time_loc(self, term):
loc = term.find('location')
if loc is None:
loc_context = None
else:
loc_id = loc.attrib.get('id')
loc_term = self.tree.find("*[@id='%s']" % loc_id)
text = loc_term.findtext('text')
name = loc_term.findtext('name')
loc_context = RefContext(name=text)
time = term.find('time')
if time is None:
time_context = None
else:
time_id = time.attrib.get('id')
time_term = self.tree.find("*[@id='%s']" % time_id)
if time_term is not None:
text = time_term.findtext('text')
timex = time_term.find('timex')
if timex is not None:
year = timex.findtext('year')
try:
year = int(year)
except Exception:
year = None
month = timex.findtext('month')
day = timex.findtext('day')
if year and (month or day):
try:
month = int(month)
except Exception:
month = 1
try:
day = int(day)
except Exception:
day = 1
start = datetime(year, month, day)
time_context = TimeContext(text=text, start=start)
else:
time_context = TimeContext(text=text)
else:
time_context = TimeContext(text=text)
else:
time_context = None
return time_context, loc_context | [
"Get the location from a term (CC or TERM)"
]
|
Please provide a description of the function:def make_model(self, grounding_ontology='UN', grounding_threshold=None):
if grounding_threshold is not None:
self.grounding_threshold = grounding_threshold
self.grounding_ontology = grounding_ontology
# Filter to Influence Statements which are currently supported
statements = [stmt for stmt in self.statements if
isinstance(stmt, Influence)]
# Initialize graph
self.CAG = nx.MultiDiGraph()
# Add nodes and edges to the graph
for s in statements:
# Get standardized name of subject and object
# subj, obj = (self._node_name(s.subj), self._node_name(s.obj))
# See if both subject and object have polarities given
has_both_polarity = (s.subj.delta['polarity'] is not None and
s.obj.delta['polarity'] is not None)
# Add the nodes to the graph
for node, delta in zip((s.subj.concept, s.obj.concept),
(s.subj.delta, s.obj.delta)):
self.CAG.add_node(self._node_name(node),
simulable=has_both_polarity,
mods=delta['adjectives'])
# Edge is solid if both nodes have polarity given
linestyle = 'solid' if has_both_polarity else 'dotted'
if has_both_polarity:
same_polarity = (s.subj.delta['polarity'] ==
s.obj.delta['polarity'])
if same_polarity:
target_arrow_shape, linecolor = ('circle', 'green')
else:
target_arrow_shape, linecolor = ('tee', 'maroon')
else:
target_arrow_shape, linecolor = ('triangle', 'maroon')
# Add edge to the graph with metadata from statement
provenance = []
if s.evidence:
provenance = s.evidence[0].annotations.get('provenance', [])
if provenance:
provenance[0]['text'] = s.evidence[0].text
self.CAG.add_edge(
self._node_name(s.subj.concept),
self._node_name(s.obj.concept),
subj_polarity=s.subj.delta['polarity'],
subj_adjectives=s.subj.delta['adjectives'],
obj_polarity=s.obj.delta['polarity'],
obj_adjectives=s.obj.delta['adjectives'],
linestyle=linestyle,
linecolor=linecolor,
targetArrowShape=target_arrow_shape,
provenance=provenance,
)
return self.CAG | [
"Return a networkx MultiDiGraph representing a causal analysis graph.\n\n Parameters\n ----------\n grounding_ontology : Optional[str]\n The ontology from which the grounding should be taken\n (e.g. UN, FAO)\n grounding_threshold : Optional[float]\n Minimum threshold score for Eidos grounding.\n\n Returns\n -------\n nx.MultiDiGraph\n The assembled CAG.\n "
]
|
Please provide a description of the function:def export_to_cytoscapejs(self):
def _create_edge_data_dict(e):
# A hack to get rid of the redundant 'Provenance' label.
if e[3].get('provenance'):
tooltip = e[3]['provenance'][0]
if tooltip.get('@type'):
del tooltip['@type']
else:
tooltip = None
edge_data_dict = {
'id' : e[0]+'_'+e[1],
'source' : e[0],
'target' : e[1],
'linestyle' : e[3]["linestyle"],
'linecolor' : e[3]["linecolor"],
'targetArrowShape' : e[3]["targetArrowShape"],
'subj_adjectives' : e[3]["subj_adjectives"],
'subj_polarity' : e[3]["subj_polarity"],
'obj_adjectives' : e[3]["obj_adjectives"],
'obj_polarity' : e[3]["obj_polarity"],
'tooltip' : tooltip,
'simulable' : False if (
e[3]['obj_polarity'] is None or
e[3]['subj_polarity'] is None) else True,
}
return edge_data_dict
return {
'nodes': [{'data': {
'id': n[0],
'simulable': n[1]['simulable'],
'tooltip': 'Modifiers: '+json.dumps(n[1]['mods'])}
} for n in self.CAG.nodes(data=True)],
'edges': [{'data': _create_edge_data_dict(e)}
for e in self.CAG.edges(data=True, keys=True)]
} | [
"Return CAG in format readable by CytoscapeJS.\n\n Return\n ------\n dict\n A JSON-like dict representing the graph for use with\n CytoscapeJS.\n ",
"Return a dict from a MultiDiGraph edge for CytoscapeJS export."
]
|
Please provide a description of the function:def generate_jupyter_js(self, cyjs_style=None, cyjs_layout=None):
# First, export the CAG to CyJS
cyjs_elements = self.export_to_cytoscapejs()
# Load the Javascript template
tempf = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'cag_template.js')
with open(tempf, 'r') as fh:
template = fh.read()
# Load the default style and layout
stylef = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'cag_style.json')
with open(stylef, 'r') as fh:
style = json.load(fh)
# Apply style and layout only if arg wasn't passed in
if cyjs_style is None:
cyjs_style = style['style']
if cyjs_layout is None:
cyjs_layout = style['layout']
# Now fill in the template
formatted_args = tuple(json.dumps(x, indent=2) for x in
(cyjs_elements, cyjs_style, cyjs_layout))
js_str = template % formatted_args
return js_str | [
"Generate Javascript from a template to run in Jupyter notebooks.\n\n Parameters\n ----------\n cyjs_style : Optional[dict]\n A dict that sets CytoscapeJS style as specified in\n https://github.com/cytoscape/cytoscape.js/blob/master/documentation/md/style.md.\n\n cyjs_layout : Optional[dict]\n A dict that sets CytoscapeJS\n `layout parameters <http://js.cytoscape.org/#core/layout>`_.\n\n Returns\n -------\n str\n A Javascript string to be rendered in a Jupyter notebook cell.\n "
]
|
Please provide a description of the function:def _node_name(self, concept):
if (# grounding threshold is specified
self.grounding_threshold is not None
# The particular eidos ontology grounding (un/wdi/fao) is present
and concept.db_refs[self.grounding_ontology]
# The grounding score is above the grounding threshold
and (concept.db_refs[self.grounding_ontology][0][1] >
self.grounding_threshold)):
entry = concept.db_refs[self.grounding_ontology][0][0]
return entry.split('/')[-1].replace('_', ' ').capitalize()
else:
return concept.name.capitalize() | [
"Return a standardized name for a node given a Concept."
]
|
Please provide a description of the function:def namespace_from_uri(uri):
patterns = ['http://www.openbel.org/bel/[pragm]_([A-Za-z]+)_.*',
'http://www.openbel.org/bel/[a-z]+_[pr]_([A-Za-z]+)_.*',
'http://www.openbel.org/bel/[a-z]+_complex_([A-Za-z]+)_.*',
'http://www.openbel.org/bel/complex_([A-Za-z]+)_.*']
for pr in patterns:
match = re.match(pr, uri)
if match is not None:
return match.groups()[0]
return None | [
"Return the entity namespace from the URI. Examples:\n http://www.openbel.org/bel/p_HGNC_RAF1 -> HGNC\n http://www.openbel.org/bel/p_RGD_Raf1 -> RGD\n http://www.openbel.org/bel/p_PFH_MEK1/2_Family -> PFH\n "
]
|
Please provide a description of the function:def term_from_uri(uri):
if uri is None:
return None
# This insures that if we get a Literal with an integer value (as we
# do for modification positions), it will get converted to a string,
# not an integer.
if isinstance(uri, rdflib.Literal):
uri = str(uri.toPython())
# This is to handle URIs like
# http://www.openbel.org/bel/namespace//MAPK%20Erk1/3%20Family
# or
# http://www.openbel.org/bel/namespace/MAPK%20Erk1/3%20Family
# In the current implementation, the order of the patterns
# matters.
patterns = ['http://www.openbel.org/bel/namespace//(.*)',
'http://www.openbel.org/vocabulary//(.*)',
'http://www.openbel.org/bel//(.*)',
'http://www.openbel.org/bel/namespace/(.*)',
'http://www.openbel.org/vocabulary/(.*)',
'http://www.openbel.org/bel/(.*)']
for pr in patterns:
match = re.match(pr, uri)
if match is not None:
term = match.groups()[0]
term = unquote(term)
return term
# If none of the patterns match then the URI is actually a simple term
# for instance a site: "341" or a substitution: "sub(V,600,E)"
return uri | [
"Removes prepended URI information from terms."
]
|
Please provide a description of the function:def get_modifications(self):
# Get statements where the subject is an activity
q_phospho1 = prefixes +
# Get statements where the subject is a protein abundance
q_phospho2 = prefixes +
for q_phospho in (q_phospho1, q_phospho2):
# Run the query
res_phospho = self.g.query(q_phospho)
for stmt in res_phospho:
# Parse out the elements of the query
evidence = self._get_evidence(stmt[4])
enz = self._get_agent(stmt[0], stmt[5])
#act_type = name_from_uri(stmt[1])
sub = self._get_agent(stmt[1], stmt[6])
mod = term_from_uri(stmt[2])
residue = self._get_residue(mod)
mod_pos = term_from_uri(stmt[3])
stmt_str = strip_statement(stmt[4])
# Get the relationship (increases/decreases, etc.)
rel = term_from_uri(stmt[7])
if rel == 'DirectlyIncreases' or rel == 'DirectlyDecreases':
is_direct = True
else:
is_direct = False
# Build the INDRA statement
# Handle PhosphorylationSerine, etc.
if mod.startswith('Phosphorylation'):
modtype = 'phosphorylation'
else:
modtype = mod.lower()
# Get the class and invert if needed
modclass = modtype_to_modclass[modtype]
if rel == 'DirectlyDecreases' or rel == 'Decreases':
modclass = modclass_to_inverse[modclass]
stmt = modclass(enz, sub, residue, mod_pos, evidence)
if is_direct:
self.statements.append(stmt)
self.converted_direct_stmts.append(stmt_str)
else:
self.converted_indirect_stmts.append(stmt_str)
self.indirect_stmts.append(stmt)
return | [
"Extract INDRA Modification Statements from BEL.\n\n Two SPARQL patterns are used for extracting Modifications from BEL:\n\n - q_phospho1 assumes that the subject is an AbundanceActivity, which\n increases/decreases a ModifiedProteinAbundance.\n\n Examples:\n\n kinaseActivity(proteinAbundance(HGNC:IKBKE))\n directlyIncreases\n proteinAbundance(HGNC:IRF3,proteinModification(P,S,385))\n\n phosphataseActivity(proteinAbundance(HGNC:DUSP4))\n directlyDecreases\n proteinAbundance(HGNC:MAPK1,proteinModification(P,T,185))\n\n - q_phospho2 assumes that the subject is a ProteinAbundance which\n increases/decreases a ModifiedProteinAbundance.\n\n Examples:\n\n proteinAbundance(HGNC:NGF) increases\n proteinAbundance(HGNC:NFKBIA,proteinModification(P,Y,42))\n\n proteinAbundance(HGNC:FGF1) decreases\n proteinAbundance(HGNC:RB1,proteinModification(P))\n ",
"\n SELECT ?enzName ?substrateName ?mod ?pos\n ?stmt ?enzyme ?substrate ?rel\n WHERE {\n ?stmt a belvoc:Statement .\n ?stmt belvoc:hasRelationship ?rel .\n ?stmt belvoc:hasSubject ?subject .\n ?stmt belvoc:hasObject ?object .\n ?subject a belvoc:AbundanceActivity .\n ?subject belvoc:hasChild ?enzyme .\n ?enzyme a belvoc:ProteinAbundance .\n ?enzyme belvoc:hasConcept ?enzName .\n ?object a belvoc:ModifiedProteinAbundance .\n ?object belvoc:hasModificationType ?mod .\n ?object belvoc:hasChild ?substrate .\n ?substrate belvoc:hasConcept ?substrateName .\n OPTIONAL { ?object belvoc:hasModificationPosition ?pos . }\n }\n ",
"\n SELECT ?enzName ?substrateName ?mod ?pos\n ?stmt ?enzyme ?substrate ?rel\n WHERE {\n ?stmt a belvoc:Statement .\n ?stmt belvoc:hasRelationship ?rel .\n ?stmt belvoc:hasSubject ?enzyme .\n ?stmt belvoc:hasObject ?object .\n ?enzyme a belvoc:ProteinAbundance .\n ?enzyme belvoc:hasConcept ?enzName .\n ?object a belvoc:ModifiedProteinAbundance .\n ?object belvoc:hasModificationType ?mod .\n ?object belvoc:hasChild ?substrate .\n ?substrate belvoc:hasConcept ?substrateName .\n OPTIONAL { ?object belvoc:hasModificationPosition ?pos . }\n }\n "
]
|
Please provide a description of the function:def get_activating_mods(self):
q_mods = prefixes +
# Now make the PySB for the phosphorylation
res_mods = self.g.query(q_mods)
for stmt in res_mods:
evidence = self._get_evidence(stmt[5])
# Parse out the elements of the query
species = self._get_agent(stmt[0], stmt[6])
act_type = term_from_uri(stmt[1]).lower()
mod = term_from_uri(stmt[2])
mod_pos = term_from_uri(stmt[3])
mc = self._get_mod_condition(mod, mod_pos)
species.mods = [mc]
rel = term_from_uri(stmt[4])
if rel == 'DirectlyDecreases':
is_active = False
else:
is_active = True
stmt_str = strip_statement(stmt[5])
# Mark this as a converted statement
self.converted_direct_stmts.append(stmt_str)
st = ActiveForm(species, act_type, is_active, evidence)
self.statements.append(st) | [
"Extract INDRA ActiveForm Statements with a single mod from BEL.\n\n The SPARQL pattern used for extraction from BEL looks for a\n ModifiedProteinAbundance as subject and an Activiy of a\n ProteinAbundance as object.\n\n Examples:\n\n proteinAbundance(HGNC:INSR,proteinModification(P,Y))\n directlyIncreases\n kinaseActivity(proteinAbundance(HGNC:INSR))\n ",
"\n SELECT ?speciesName ?actType ?mod ?pos ?rel ?stmt ?species\n WHERE {\n ?stmt a belvoc:Statement .\n ?stmt belvoc:hasRelationship ?rel .\n ?stmt belvoc:hasSubject ?subject .\n ?stmt belvoc:hasObject ?object .\n ?object belvoc:hasActivityType ?actType .\n ?object belvoc:hasChild ?species .\n ?species a belvoc:ProteinAbundance .\n ?species belvoc:hasConcept ?speciesName .\n ?subject a belvoc:ModifiedProteinAbundance .\n ?subject belvoc:hasModificationType ?mod .\n ?subject belvoc:hasChild ?species .\n OPTIONAL { ?subject belvoc:hasModificationPosition ?pos . }\n FILTER (?rel = belvoc:DirectlyIncreases ||\n ?rel = belvoc:DirectlyDecreases)\n }\n "
]
|
Please provide a description of the function:def get_complexes(self):
q_cmplx = prefixes +
# Run the query
res_cmplx = self.g.query(q_cmplx)
# Store the members of each complex in a dict of lists, keyed by the
# term for the complex
cmplx_dict = collections.defaultdict(list)
cmplx_ev = {}
for stmt in res_cmplx:
stmt_uri = stmt[3]
ev = self._get_evidence(stmt_uri)
for e in ev:
e.epistemics['direct'] = True
cmplx_name = term_from_uri(stmt[0])
cmplx_id = stmt_uri + '#' + cmplx_name
child = self._get_agent(stmt[1], stmt[2])
cmplx_dict[cmplx_id].append(child)
# This might be written multiple times but with the same
# evidence
cmplx_ev[cmplx_id] = ev
# Now iterate over the stored complex information and create binding
# statements
for cmplx_id, cmplx_list in cmplx_dict.items():
if len(cmplx_list) < 2:
msg = 'Complex %s has less than 2 members! Skipping.' % \
cmplx_name
logger.warning(msg)
else:
self.statements.append(Complex(cmplx_list,
evidence=cmplx_ev[cmplx_id])) | [
"Extract INDRA Complex Statements from BEL.\n\n The SPARQL query used to extract Complexes looks for ComplexAbundance\n terms and their constituents. This pattern is distinct from other\n patterns in this processor in that it queries for terms, not\n full statements.\n\n Examples:\n\n complexAbundance(proteinAbundance(HGNC:PPARG),\n proteinAbundance(HGNC:RXRA))\n decreases\n biologicalProcess(MESHPP:\"Insulin Resistance\")\n ",
"\n SELECT ?complexTerm ?childName ?child ?stmt\n WHERE {\n {\n {?stmt belvoc:hasSubject ?complexTerm}\n UNION\n {?stmt belvoc:hasObject ?complexTerm .}\n UNION\n {?stmt belvoc:hasSubject ?term .\n ?term belvoc:hasChild ?complexTerm .}\n UNION\n {?stmt belvoc:hasObject ?term .\n ?term belvoc:hasChild ?complexTerm .}\n }\n ?complexTerm a belvoc:Term .\n ?complexTerm a belvoc:ComplexAbundance .\n ?complexTerm belvoc:hasChild ?child .\n ?child belvoc:hasConcept ?childName .\n }\n "
]
|
Please provide a description of the function:def get_activating_subs(self):
q_mods = prefixes +
# Now make the PySB for the phosphorylation
res_mods = self.g.query(q_mods)
for stmt in res_mods:
evidence = self._get_evidence(stmt[4])
# Parse out the elements of the query
enz = self._get_agent(stmt[0], stmt[5])
sub_expr = term_from_uri(stmt[1])
act_type = term_from_uri(stmt[2]).lower()
# Parse the WT and substituted residues from the node label.
# Strangely, the RDF for substituted residue doesn't break the
# terms of the BEL expression down into their meaning, as happens
# for modified protein abundances. Instead, the substitution
# just comes back as a string, e.g., "sub(V,600,E)". This code
# parses the arguments back out using a regular expression.
match = re.match('sub\(([A-Z]),([0-9]*),([A-Z])\)', sub_expr)
if match:
matches = match.groups()
wt_residue = matches[0]
position = matches[1]
sub_residue = matches[2]
else:
logger.warning("Could not parse substitution expression %s" %
sub_expr)
continue
mc = MutCondition(position, wt_residue, sub_residue)
enz.mutations = [mc]
rel = strip_statement(stmt[3])
if rel == 'DirectlyDecreases':
is_active = False
else:
is_active = True
stmt_str = strip_statement(stmt[4])
# Mark this as a converted statement
self.converted_direct_stmts.append(stmt_str)
st = ActiveForm(enz, act_type, is_active, evidence)
self.statements.append(st) | [
"Extract INDRA ActiveForm Statements based on a mutation from BEL.\n\n The SPARQL pattern used to extract ActiveForms due to mutations look\n for a ProteinAbundance as a subject which has a child encoding the\n amino acid substitution. The object of the statement is an\n ActivityType of the same ProteinAbundance, which is either increased\n or decreased.\n\n Examples:\n\n proteinAbundance(HGNC:NRAS,substitution(Q,61,K))\n directlyIncreases\n gtpBoundActivity(proteinAbundance(HGNC:NRAS))\n\n proteinAbundance(HGNC:TP53,substitution(F,134,I))\n directlyDecreases\n transcriptionalActivity(proteinAbundance(HGNC:TP53))\n ",
"\n SELECT ?enzyme_name ?sub_label ?act_type ?rel ?stmt ?subject\n WHERE {\n ?stmt a belvoc:Statement .\n ?stmt belvoc:hasRelationship ?rel .\n ?stmt belvoc:hasSubject ?subject .\n ?stmt belvoc:hasObject ?object .\n ?subject a belvoc:ProteinAbundance .\n ?subject belvoc:hasConcept ?enzyme_name .\n ?subject belvoc:hasChild ?sub_expr .\n ?sub_expr rdfs:label ?sub_label .\n ?object a belvoc:AbundanceActivity .\n ?object belvoc:hasActivityType ?act_type .\n ?object belvoc:hasChild ?enzyme .\n ?enzyme a belvoc:ProteinAbundance .\n ?enzyme belvoc:hasConcept ?enzyme_name .\n }\n "
]
|
Please provide a description of the function:def get_activation(self):
q_stmts = prefixes +
res_stmts = self.g.query(q_stmts)
for stmt in res_stmts:
evidence = self._get_evidence(stmt[5])
subj = self._get_agent(stmt[0], stmt[6])
subj_activity = stmt[1]
if subj_activity:
subj_activity = term_from_uri(stmt[1]).lower()
subj.activity = ActivityCondition(subj_activity, True)
rel = term_from_uri(stmt[2])
if rel == 'DirectlyDecreases':
is_activation = False
else:
is_activation = True
obj = self._get_agent(stmt[3], stmt[7])
obj_activity = term_from_uri(stmt[4]).lower()
stmt_str = strip_statement(stmt[5])
# Mark this as a converted statement
self.converted_direct_stmts.append(stmt_str)
# Distinguish the case when the activator is a GTPase
# (since this may involve unique and stereotyped mechanisms)
if subj_activity == 'gtpbound':
if not is_activation:
logger.warning('GtpActivation only handles positive '
'activation.')
continue
self.statements.append(
GtpActivation(subj, obj, obj_activity, evidence))
# If the object is a GTPase, and the subject *increases*
# its GtpBound activity, then the subject is a GEF
elif obj_activity == 'gtpbound' and rel == 'DirectlyIncreases':
self.statements.append(
Gef(subj, obj, evidence))
# If the object is a GTPase, and the subject *decreases*
# its GtpBound activity, then the subject is a GAP
elif obj_activity == 'gtpbound' and rel == 'DirectlyDecreases':
self.statements.append(
Gap(subj, obj, evidence))
# Otherwise, create a generic Activity->Activity statement
else:
if rel == 'DirectlyDecreases':
st = Inhibition(subj, obj, obj_activity, evidence)
else:
st = Activation(subj, obj, obj_activity, evidence)
self.statements.append(st) | [
"Extract INDRA Inhibition/Activation Statements from BEL.\n\n The SPARQL query used to extract Activation Statements looks for\n patterns in which the subject is is an ActivityType\n (of a ProtainAbundance) or an Abundance (of a small molecule).\n The object has to be the ActivityType (typically of a\n ProteinAbundance) which is either increased or decreased.\n\n Examples:\n\n abundance(CHEBI:gefitinib) directlyDecreases\n kinaseActivity(proteinAbundance(HGNC:EGFR))\n\n kinaseActivity(proteinAbundance(HGNC:MAP3K5))\n directlyIncreases kinaseActivity(proteinAbundance(HGNC:MAP2K7))\n\n This pattern covers the extraction of Gap/Gef and GtpActivation\n Statements, which are recognized by the object activty or the\n subject activity, respectively, being `gtpbound`.\n\n Examples:\n\n catalyticActivity(proteinAbundance(HGNC:RASA1))\n directlyDecreases\n gtpBoundActivity(proteinAbundance(PFH:\"RAS Family\"))\n\n catalyticActivity(proteinAbundance(HGNC:SOS1))\n directlyIncreases\n gtpBoundActivity(proteinAbundance(HGNC:HRAS))\n\n gtpBoundActivity(proteinAbundance(HGNC:HRAS))\n directlyIncreases\n catalyticActivity(proteinAbundance(HGNC:TIAM1))\n ",
"\n SELECT ?subjName ?subjActType ?rel ?objName ?objActType\n ?stmt ?subj ?obj\n WHERE {\n ?stmt a belvoc:Statement .\n ?stmt belvoc:hasRelationship ?rel .\n ?stmt belvoc:hasSubject ?subj .\n {?subj belvoc:hasActivityType ?subjActType .\n ?subj belvoc:hasChild ?subjProt .\n ?subjProt belvoc:hasConcept ?subjName .}\n UNION\n {?subj a belvoc:Abundance .\n ?subj belvoc:hasConcept ?subjName .}\n ?stmt belvoc:hasObject ?obj .\n ?obj belvoc:hasActivityType ?objActType .\n ?obj belvoc:hasChild ?objProt .\n ?objProt belvoc:hasConcept ?objName .\n FILTER (?rel = belvoc:DirectlyIncreases ||\n ?rel = belvoc:DirectlyDecreases)\n }\n "
]
|
Please provide a description of the function:def get_transcription(self):
q_tscript1 = prefixes +
q_tscript2 = prefixes +
q_tscript3 = prefixes +
for q_tscript in (q_tscript1, q_tscript2, q_tscript3):
res_tscript = self.g.query(q_tscript)
for stmt in res_tscript:
# Get modifications on the subject, if any
if q_tscript == q_tscript1:
tf = self._get_agent(stmt[0], stmt[3])
tf.activity = ActivityCondition('transcription', True)
elif q_tscript == q_tscript3:
mod = term_from_uri(stmt[6])
mod_pos = term_from_uri(stmt[7])
mc = self._get_mod_condition(mod, mod_pos)
if mc is None:
continue
tf = self._get_agent(stmt[0], stmt[3])
tf.mods = mods=[mc]
else:
tf = self._get_agent(stmt[0], stmt[3])
# Parse out the elements of the query
evidence = self._get_evidence(stmt[2])
target = self._get_agent(stmt[1], stmt[4])
stmt_str = strip_statement(stmt[2])
# Get the relationship (increases/decreases, etc.)
rel = term_from_uri(stmt[5])
if rel == 'DirectlyIncreases' or rel == 'DirectlyDecreases':
is_direct = True
else:
is_direct = False
# Build the INDRA statement
stmt = None
if rel == 'DirectlyIncreases' or rel == 'Increases':
stmt = IncreaseAmount(tf, target, evidence)
elif rel == 'DirectlyDecreases' or rel == 'Decreases':
stmt = DecreaseAmount(tf, target, evidence)
# If we've matched a pattern, mark this as a converted statement
if stmt is not None:
if is_direct:
self.statements.append(stmt)
self.converted_direct_stmts.append(stmt_str)
else:
self.indirect_stmts.append(stmt)
self.converted_indirect_stmts.append(stmt_str) | [
"Extract Increase/DecreaseAmount INDRA Statements from BEL.\n\n Three distinct SPARQL patterns are used to extract amount\n regulations from BEL.\n\n - q_tscript1 searches for a subject which is a Transcription\n ActivityType of a ProteinAbundance and an object which is\n an RNAAbundance that is either increased or decreased.\n\n Examples:\n\n transcriptionalActivity(proteinAbundance(HGNC:FOXP2))\n directlyIncreases\n rnaAbundance(HGNC:SYK)\n\n transcriptionalActivity(proteinAbundance(HGNC:FOXP2))\n directlyDecreases\n rnaAbundance(HGNC:CALCRL)\n\n - q_tscript2 searches for a subject which is a ProteinAbundance\n and an object which is an RNAAbundance. Note that this pattern\n typically exists in an indirect form (i.e. increases/decreases).\n\n Example:\n\n proteinAbundance(HGNC:MTF1) directlyIncreases\n rnaAbundance(HGNC:LCN1)\n\n - q_tscript3 searches for a subject which is a\n ModifiedProteinAbundance, with an object which is an RNAAbundance.\n In the BEL large corpus, this pattern is found for\n subjects which are protein families or mouse/rat proteins, and\n the predicate in an indirect increase.\n\n Example:\n\n proteinAbundance(PFR:\"Akt Family\",proteinModification(P))\n increases\n rnaAbundance(RGD:Cald1)\n ",
"\n SELECT ?tfName ?targetName ?stmt ?tf ?target ?rel\n WHERE {\n ?stmt a belvoc:Statement .\n ?stmt belvoc:hasRelationship ?rel .\n ?stmt belvoc:hasSubject ?subject .\n ?stmt belvoc:hasObject ?target .\n ?subject a belvoc:AbundanceActivity .\n ?subject belvoc:hasActivityType belvoc:Transcription .\n ?subject belvoc:hasChild ?tf .\n ?tf a belvoc:ProteinAbundance .\n ?tf belvoc:hasConcept ?tfName .\n ?target a belvoc:RNAAbundance .\n ?target belvoc:hasConcept ?targetName .\n }\n ",
"\n SELECT ?tfName ?targetName ?stmt ?tf ?target ?rel\n WHERE {\n ?stmt a belvoc:Statement .\n ?stmt belvoc:hasRelationship ?rel .\n ?stmt belvoc:hasSubject ?tf .\n ?stmt belvoc:hasObject ?target .\n ?tf a belvoc:ProteinAbundance .\n ?tf belvoc:hasConcept ?tfName .\n ?target a belvoc:RNAAbundance .\n ?target belvoc:hasConcept ?targetName .\n }\n ",
"\n SELECT ?tfName ?targetName ?stmt ?tf ?target ?rel ?mod ?pos\n WHERE {\n ?stmt a belvoc:Statement .\n ?stmt belvoc:hasRelationship ?rel .\n ?stmt belvoc:hasSubject ?subject .\n ?stmt belvoc:hasObject ?target .\n ?subject a belvoc:ModifiedProteinAbundance .\n ?subject belvoc:hasModificationType ?mod .\n ?subject belvoc:hasChild ?tf .\n ?tf belvoc:hasConcept ?tfName .\n ?target a belvoc:RNAAbundance .\n ?target belvoc:hasConcept ?targetName .\n OPTIONAL { ?subject belvoc:hasModificationPosition ?pos . }\n }\n "
]
|
Please provide a description of the function:def get_conversions(self):
query = prefixes +
res = self.g.query(query)
# We need to collect all pieces of the same statement so that we can
# collect multiple reactants and products
stmt_map = collections.defaultdict(list)
for stmt in res:
stmt_map[stmt[-1]].append(stmt)
for stmts in stmt_map.values():
# First we get the shared part of the Statement
stmt = stmts[0]
subj = self._get_agent(stmt[1], stmt[0])
evidence = self._get_evidence(stmt[-1])
stmt_str = strip_statement(stmt[-1])
# Now we collect the participants
obj_from_map = {}
obj_to_map = {}
for stmt in stmts:
reactant_name = stmt[6]
product_name = stmt[4]
if reactant_name not in obj_from_map:
obj_from_map[reactant_name] = \
self._get_agent(stmt[6], stmt[5])
if product_name not in obj_to_map:
obj_to_map[product_name] = \
self._get_agent(stmt[4], stmt[3])
obj_from = list(obj_from_map.values())
obj_to = list(obj_to_map.values())
st = Conversion(subj, obj_from, obj_to, evidence=evidence)
# If we've matched a pattern, mark this as a converted statement
self.statements.append(st)
self.converted_direct_stmts.append(stmt_str) | [
"Extract Conversion INDRA Statements from BEL.\n\n\n The SPARQL query used to extract Conversions searches for\n a subject (controller) which is an AbundanceActivity\n which directlyIncreases a Reaction with a given list of\n Reactants and Products.\n\n Examples:\n\n catalyticActivity(proteinAbundance(HGNC:HMOX1))\n directlyIncreases\n reaction(reactants(abundance(CHEBI:heme)),\n products(abundance(SCHEM:Biliverdine),\n abundance(CHEBI:\"carbon monoxide\")))\n ",
"\n SELECT DISTINCT ?controller ?controllerName ?controllerActivity\n ?product ?productName ?reactant ?reactantName ?stmt\n WHERE {\n ?stmt a belvoc:Statement .\n ?stmt belvoc:hasRelationship ?rel .\n ?stmt belvoc:hasSubject ?subject .\n ?stmt belvoc:hasObject ?rxn .\n ?subject a belvoc:AbundanceActivity .\n ?subject belvoc:hasActivityType ?controllerActivity .\n ?subject belvoc:hasChild ?controller .\n ?controller belvoc:hasConcept ?controllerName .\n ?rxn a belvoc:Reaction .\n ?rxn belvoc:hasChild ?reactants .\n ?reactants rdfs:label ?reactLabel .\n FILTER (regex(?reactLabel, \"^reactants.*\"))\n ?rxn belvoc:hasChild ?products .\n ?products rdfs:label ?prodLabel .\n FILTER (regex(?prodLabel, \"^products.*\"))\n ?reactants belvoc:hasChild ?reactant .\n ?products belvoc:hasChild ?product .\n ?reactant belvoc:hasConcept ?reactantName .\n ?product belvoc:hasConcept ?productName .\n }\n "
]
|
Please provide a description of the function:def get_all_direct_statements(self):
logger.info("Getting all direct statements...\n")
q_stmts = prefixes +
res_stmts = self.g.query(q_stmts)
self.all_direct_stmts = [strip_statement(stmt[0]) for stmt in res_stmts] | [
"Get all directlyIncreases/Decreases BEL statements.\n\n This method stores the results of the query in self.all_direct_stmts\n as a list of strings. The SPARQL query used to find direct BEL\n statements searches for all statements whose predicate is either\n DirectyIncreases or DirectlyDecreases.\n ",
"\n SELECT ?stmt\n WHERE {\n ?stmt a belvoc:Statement .\n {\n { ?stmt belvoc:hasRelationship belvoc:DirectlyIncreases . }\n UNION\n { ?stmt belvoc:hasRelationship belvoc:DirectlyDecreases . }\n }\n }\n "
]
|
Please provide a description of the function:def get_all_indirect_statements(self):
q_stmts = prefixes +
res_stmts = self.g.query(q_stmts)
self.all_indirect_stmts = [strip_statement(stmt[0]) for stmt in res_stmts] | [
"Get all indirect increases/decreases BEL statements.\n\n This method stores the results of the query in self.all_indirect_stmts\n as a list of strings. The SPARQL query used to find indirect BEL\n statements searches for all statements whose predicate is either\n Increases or Decreases.\n ",
"\n SELECT ?stmt\n WHERE {\n ?stmt a belvoc:Statement .\n {\n { ?stmt belvoc:hasRelationship belvoc:Increases . }\n UNION\n { ?stmt belvoc:hasRelationship belvoc:Decreases . }\n }\n }\n "
]
|
Please provide a description of the function:def get_degenerate_statements(self):
logger.info("Checking for 'degenerate' statements...\n")
# Get rules of type protein X -> activity Y
q_stmts = prefixes +
res_stmts = self.g.query(q_stmts)
logger.info("Protein -> Protein/Activity statements:")
logger.info("---------------------------------------")
for stmt in res_stmts:
stmt_str = strip_statement(stmt[0])
logger.info(stmt_str)
self.degenerate_stmts.append(stmt_str) | [
"Get all degenerate BEL statements.\n\n Stores the results of the query in self.degenerate_stmts.\n ",
"\n SELECT ?stmt\n WHERE {\n ?stmt a belvoc:Statement .\n ?stmt belvoc:hasSubject ?subj .\n ?stmt belvoc:hasObject ?obj .\n {\n { ?stmt belvoc:hasRelationship belvoc:DirectlyIncreases . }\n UNION\n { ?stmt belvoc:hasRelationship belvoc:DirectlyDecreases . }\n }\n {\n { ?subj a belvoc:ProteinAbundance . }\n UNION\n { ?subj a belvoc:ModifiedProteinAbundance . }\n }\n ?subj belvoc:hasConcept ?xName .\n {\n {\n ?obj a belvoc:ProteinAbundance .\n ?obj belvoc:hasConcept ?yName .\n }\n UNION\n {\n ?obj a belvoc:ModifiedProteinAbundance .\n ?obj belvoc:hasChild ?proteinY .\n ?proteinY belvoc:hasConcept ?yName .\n }\n UNION\n {\n ?obj a belvoc:AbundanceActivity .\n ?obj belvoc:hasChild ?objChild .\n ?objChild a belvoc:ProteinAbundance .\n ?objChild belvoc:hasConcept ?yName .\n }\n }\n FILTER (?xName != ?yName)\n }\n "
]
|
Please provide a description of the function:def print_statement_coverage(self):
if not self.all_direct_stmts:
self.get_all_direct_statements()
if not self.degenerate_stmts:
self.get_degenerate_statements()
if not self.all_indirect_stmts:
self.get_all_indirect_statements()
logger.info('')
logger.info("Total indirect statements: %d" %
len(self.all_indirect_stmts))
logger.info("Converted indirect statements: %d" %
len(self.converted_indirect_stmts))
logger.info(">> Unhandled indirect statements: %d" %
(len(self.all_indirect_stmts) -
len(self.converted_indirect_stmts)))
logger.info('')
logger.info("Total direct statements: %d" % len(self.all_direct_stmts))
logger.info("Converted direct statements: %d" %
len(self.converted_direct_stmts))
logger.info("Degenerate direct statements: %d" %
len(self.degenerate_stmts))
logger.info(">> Unhandled direct statements: %d" %
(len(self.all_direct_stmts) -
len(self.converted_direct_stmts) -
len(self.degenerate_stmts)))
logger.info('')
logger.info("--- Unhandled direct statements ---------")
for stmt in self.all_direct_stmts:
if not (stmt in self.converted_direct_stmts or
stmt in self.degenerate_stmts):
logger.info(stmt)
logger.info('')
logger.info("--- Unhandled indirect statements ---------")
for stmt in self.all_indirect_stmts:
if not (stmt in self.converted_indirect_stmts or
stmt in self.degenerate_stmts):
logger.info(stmt) | [
"Display how many of the direct statements have been converted.\n\n Also prints how many are considered 'degenerate' and not converted."
]
|
Please provide a description of the function:def print_statements(self):
logger.info('--- Direct INDRA statements ----------')
for i, stmt in enumerate(self.statements):
logger.info("%s: %s" % (i, stmt))
logger.info('--- Indirect INDRA statements ----------')
for i, stmt in enumerate(self.indirect_stmts):
logger.info("%s: %s" % (i, stmt)) | [
"Print all extracted INDRA Statements."
]
|
Please provide a description of the function:def process_directory_statements_sorted_by_pmid(directory_name):
s_dict = defaultdict(list)
mp = process_directory(directory_name, lazy=True)
for statement in mp.iter_statements():
s_dict[statement.evidence[0].pmid].append(statement)
return s_dict | [
"Processes a directory filled with CSXML files, first normalizing the\n character encoding to utf-8, and then processing into INDRA statements\n sorted by pmid.\n\n Parameters\n ----------\n directory_name : str\n The name of a directory filled with csxml files to process\n\n Returns\n -------\n pmid_dict : dict\n A dictionary mapping pmids to a list of statements corresponding to\n that pmid\n "
]
|
Please provide a description of the function:def process_directory(directory_name, lazy=False):
# Parent Medscan processor containing extractions from all files
mp = MedscanProcessor()
mp.process_directory(directory_name, lazy)
return mp | [
"Processes a directory filled with CSXML files, first normalizing the\n character encodings to utf-8, and then processing into a list of INDRA\n statements.\n\n Parameters\n ----------\n directory_name : str\n The name of a directory filled with csxml files to process\n lazy : bool\n If True, the statements will not be generated immediately, but rather\n a generator will be formulated, and statements can be retrieved by\n using `iter_statements`. If False, the `statements` attribute will be\n populated immediately. Default is False.\n\n Returns\n -------\n mp : indra.sources.medscan.processor.MedscanProcessor\n A MedscanProcessor populated with INDRA statements extracted from the\n csxml files\n "
]
|
Please provide a description of the function:def process_file_sorted_by_pmid(file_name):
s_dict = defaultdict(list)
mp = process_file(file_name, lazy=True)
for statement in mp.iter_statements():
s_dict[statement.evidence[0].pmid].append(statement)
return s_dict | [
"Processes a file and returns a dictionary mapping pmids to a list of\n statements corresponding to that pmid.\n\n Parameters\n ----------\n file_name : str\n A csxml file to process\n\n Returns\n -------\n s_dict : dict\n Dictionary mapping pmids to a list of statements corresponding to\n that pmid\n "
]
|
Please provide a description of the function:def process_file(filename, interval=None, lazy=False):
mp = MedscanProcessor()
mp.process_csxml_file(filename, interval, lazy)
return mp | [
"Process a CSXML file for its relevant information.\n\n Consider running the fix_csxml_character_encoding.py script in\n indra/sources/medscan to fix any encoding issues in the input file before\n processing.\n\n Attributes\n ----------\n filename : str\n The csxml file, containing Medscan XML, to process\n interval : (start, end) or None\n Select the interval of documents to read, starting with the\n `start`th document and ending before the `end`th document. If\n either is None, the value is considered undefined. If the value\n exceeds the bounds of available documents, it will simply be\n ignored.\n lazy : bool\n If True, the statements will not be generated immediately, but rather\n a generator will be formulated, and statements can be retrieved by\n using `iter_statements`. If False, the `statements` attribute will be\n populated immediately. Default is False.\n\n Returns\n -------\n mp : MedscanProcessor\n A MedscanProcessor object containing extracted statements\n "
]
|
Please provide a description of the function:def stmts_from_path(path, model, stmts):
path_stmts = []
for path_rule, sign in path:
for rule in model.rules:
if rule.name == path_rule:
stmt = stmt_from_rule(path_rule, model, stmts)
assert stmt is not None
path_stmts.append(stmt)
return path_stmts | [
"Return source Statements corresponding to a path in a model.\n\n Parameters\n ----------\n path : list[tuple[str, int]]\n A list of tuples where the first element of the tuple is the\n name of a rule, and the second is the associated polarity along\n a path.\n model : pysb.core.Model\n A PySB model which contains the rules along the path.\n stmts : list[indra.statements.Statement]\n A list of INDRA Statements from which the model was assembled.\n\n Returns\n -------\n path_stmts : list[indra.statements.Statement]\n The Statements from which the rules along the path were obtained.\n "
]
|
Please provide a description of the function:def extract_context(annotations, annot_manager):
def get_annot(annotations, key):
val = annotations.pop(key, None)
if val:
val_list = [v for v, tf in val.items() if tf]
if len(val_list) > 1:
logger.warning('More than one "%s" in annotations' % key)
elif not val_list:
return None
return val_list[0]
return None
bc = BioContext()
species = get_annot(annotations, 'Species')
if species:
name = annot_manager.get_mapping('Species', species)
bc.species = RefContext(name=name, db_refs={'TAXONOMY': species})
mappings = (('CellLine', 'cell_line', None),
('Disease', 'disease', None),
('Anatomy', 'organ', None),
('Cell', 'cell_type', None),
('CellStructure', 'location', 'MESH'))
for bel_name, indra_name, ns in mappings:
ann = get_annot(annotations, bel_name)
if ann:
ref = annot_manager.get_mapping(bel_name, ann)
if ref is None:
continue
if not ns:
db_ns, db_id = ref.split('_', 1)
else:
db_ns, db_id = ns, ref
setattr(bc, indra_name,
RefContext(name=ann, db_refs={db_ns: db_id}))
# Overwrite blank BioContext
if not bc:
bc = None
return bc | [
"Return a BioContext object extracted from the annotations.\n\n The entries that are extracted into the BioContext are popped from the\n annotations.\n\n Parameters\n ----------\n annotations : dict\n PyBEL annotations dict\n annot_manager : AnnotationManager\n An annotation manager to get name/db reference mappings for each ot the\n annotation types.\n\n Returns\n -------\n bc : BioContext\n An INDRA BioContext object\n ",
"Return a specific annotation given a key."
]
|
Please provide a description of the function:def format_axis(ax, label_padding=2, tick_padding=0, yticks_position='left'):
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position(yticks_position)
ax.yaxis.set_tick_params(which='both', direction='out', labelsize=fontsize,
pad=tick_padding, length=2, width=0.5)
ax.xaxis.set_tick_params(which='both', direction='out', labelsize=fontsize,
pad=tick_padding, length=2, width=0.5)
ax.xaxis.labelpad = label_padding
ax.yaxis.labelpad = label_padding
ax.xaxis.label.set_size(fontsize)
ax.yaxis.label.set_size(fontsize) | [
"Set standardized axis formatting for figure."
]
|
Please provide a description of the function:def tag_text(text, tag_info_list):
# Check to tags for overlap and if there is any, return the subsumed
# range. Return None if no overlap.
def overlap(t1, t2):
if range(max(t1[0], t2[0]), min(t1[1]-1, t2[1]-1)+1):
if t1[1] - t1[0] >= t2[1] - t2[0]:
return t2
else:
return t1
else:
return None
# Remove subsumed tags
for t1, t2 in list(itertools.combinations(tag_info_list, 2)):
subsumed_tag = overlap(t1, t2)
if subsumed_tag is not None:
# Delete the subsumed tag from the list
try:
tag_ix = tag_info_list.index(subsumed_tag)
del tag_info_list[tag_ix]
# Ignore case where tag has already been deleted
except ValueError:
pass
# Sort the indices by their start position
tag_info_list.sort(key=lambda x: x[0])
# Now, add the marker text for each occurrence of the strings
format_text = ''
start_pos = 0
for i, j, ag_text, tag_start, tag_close in tag_info_list:
# Add the text before this agent, if any
format_text += text[start_pos:i]
# Add wrapper for this entity
format_text += tag_start + ag_text + tag_close
# Now set the next start position
start_pos = j
# Add the last section of text
format_text += text[start_pos:]
return format_text | [
"Apply start/end tags to spans of the given text.\n\n\n Parameters\n ----------\n text : str\n Text to be tagged\n tag_info_list : list of tuples\n Each tuple refers to a span of the given text. Fields are `(start_ix,\n end_ix, substring, start_tag, close_tag)`, where substring, start_tag,\n and close_tag are strings. If any of the given spans of text overlap,\n the longest span is used.\n\n Returns\n -------\n str\n String where the specified substrings have been surrounded by the\n given start and close tags.\n "
]
|
Please provide a description of the function:def make_model(self):
stmts_formatted = []
stmt_rows = group_and_sort_statements(self.statements,
self.ev_totals if self.ev_totals else None)
for key, verb, stmts in stmt_rows:
# This will now be ordered by prevalence and entity pairs.
stmt_info_list = []
for stmt in stmts:
stmt_hash = stmt.get_hash(shallow=True)
ev_list = self._format_evidence_text(stmt)
english = self._format_stmt_text(stmt)
if self.ev_totals:
total_evidence = self.ev_totals.get(int(stmt_hash), '?')
if total_evidence == '?':
logger.warning('The hash %s was not found in the '
'evidence totals dict.' % stmt_hash)
evidence_count_str = '%s / %s' % (len(ev_list), total_evidence)
else:
evidence_count_str = str(len(ev_list))
stmt_info_list.append({
'hash': stmt_hash,
'english': english,
'evidence': ev_list,
'evidence_count': evidence_count_str})
short_name = make_string_from_sort_key(key, verb)
short_name_key = str(uuid.uuid4())
stmts_formatted.append((short_name, short_name_key, stmt_info_list))
metadata = {k.replace('_', ' ').title(): v
for k, v in self.metadata.items()}
if self.db_rest_url and not self.db_rest_url.endswith('statements'):
db_rest_url = self.db_rest_url + '/statements'
else:
db_rest_url = '.'
self.model = template.render(stmt_data=stmts_formatted,
metadata=metadata, title=self.title,
db_rest_url=db_rest_url)
return self.model | [
"Return the assembled HTML content as a string.\n\n Returns\n -------\n str\n The assembled HTML as a string.\n "
]
|
Please provide a description of the function:def append_warning(self, msg):
assert self.model is not None, "You must already have run make_model!"
addendum = ('\t<span style="color:red;">(CAUTION: %s occurred when '
'creating this page.)</span>' % msg)
self.model = self.model.replace(self.title, self.title + addendum)
return self.model | [
"Append a warning message to the model to expose issues."
]
|
Please provide a description of the function:def save_model(self, fname):
if self.model is None:
self.make_model()
with open(fname, 'wb') as fh:
fh.write(self.model.encode('utf-8')) | [
"Save the assembled HTML into a file.\n\n Parameters\n ----------\n fname : str\n The path to the file to save the HTML into.\n "
]
|
Please provide a description of the function:def _format_evidence_text(stmt):
def get_role(ag_ix):
if isinstance(stmt, Complex) or \
isinstance(stmt, SelfModification) or \
isinstance(stmt, ActiveForm) or isinstance(stmt, Conversion) or\
isinstance(stmt, Translocation):
return 'other'
else:
assert len(stmt.agent_list()) == 2, (len(stmt.agent_list()),
type(stmt))
return 'subject' if ag_ix == 0 else 'object'
ev_list = []
for ix, ev in enumerate(stmt.evidence):
# Expand the source api to include the sub-database
if ev.source_api == 'biopax' and \
'source_sub_id' in ev.annotations and \
ev.annotations['source_sub_id']:
source_api = '%s:%s' % (ev.source_api,
ev.annotations['source_sub_id'])
else:
source_api = ev.source_api
# Prepare the evidence text
if ev.text is None:
format_text = None
else:
indices = []
for ix, ag in enumerate(stmt.agent_list()):
if ag is None:
continue
# If the statement has been preassembled, it will have
# this entry in annotations
try:
ag_text = ev.annotations['agents']['raw_text'][ix]
if ag_text is None:
raise KeyError
# Otherwise we try to get the agent text from db_refs
except KeyError:
ag_text = ag.db_refs.get('TEXT')
if ag_text is None:
continue
role = get_role(ix)
# Get the tag with the correct badge
tag_start = '<span class="badge badge-%s">' % role
tag_close = '</span>'
# Build up a set of indices
indices += [(m.start(), m.start() + len(ag_text),
ag_text, tag_start, tag_close)
for m in re.finditer(re.escape(ag_text),
ev.text)]
format_text = tag_text(ev.text, indices)
ev_list.append({'source_api': source_api,
'pmid': ev.pmid,
'text_refs': ev.text_refs,
'text': format_text,
'source_hash': ev.source_hash })
return ev_list | [
"Returns evidence metadata with highlighted evidence text.\n\n Parameters\n ----------\n stmt : indra.Statement\n The Statement with Evidence to be formatted.\n\n Returns\n -------\n list of dicts\n List of dictionaries corresponding to each Evidence object in the\n Statement's evidence list. Each dictionary has keys 'source_api',\n 'pmid' and 'text', drawn from the corresponding fields in the\n Evidence objects. The text entry of the dict includes\n `<span>` tags identifying the agents referenced by the Statement.\n "
]
|
Please provide a description of the function:def process_pmc(pmc_id, offline=False, output_fname=default_output_fname):
xml_str = pmc_client.get_xml(pmc_id)
if xml_str is None:
return None
fname = pmc_id + '.nxml'
with open(fname, 'wb') as fh:
fh.write(xml_str.encode('utf-8'))
ids = id_lookup(pmc_id, 'pmcid')
pmid = ids.get('pmid')
rp = process_nxml_file(fname, citation=pmid, offline=offline,
output_fname=output_fname)
return rp | [
"Return a ReachProcessor by processing a paper with a given PMC id.\n\n Uses the PMC client to obtain the full text. If it's not available,\n None is returned.\n\n Parameters\n ----------\n pmc_id : str\n The ID of a PubmedCentral article. The string may start with PMC but\n passing just the ID also works.\n Examples: 3717945, PMC3717945\n https://www.ncbi.nlm.nih.gov/pmc/\n offline : Optional[bool]\n If set to True, the REACH system is ran offline. Otherwise (by default)\n the web service is called. Default: False\n\n Returns\n -------\n rp : ReachProcessor\n A ReachProcessor containing the extracted INDRA Statements\n in rp.statements.\n "
]
|
Please provide a description of the function:def process_pubmed_abstract(pubmed_id, offline=False,
output_fname=default_output_fname, **kwargs):
abs_txt = pubmed_client.get_abstract(pubmed_id)
if abs_txt is None:
return None
rp = process_text(abs_txt, citation=pubmed_id, offline=offline,
output_fname=output_fname, **kwargs)
if rp and rp.statements:
for st in rp.statements:
for ev in st.evidence:
ev.epistemics['section_type'] = 'abstract'
return rp | [
"Return a ReachProcessor by processing an abstract with a given Pubmed id.\n\n Uses the Pubmed client to get the abstract. If that fails, None is\n returned.\n\n Parameters\n ----------\n pubmed_id : str\n The ID of a Pubmed article. The string may start with PMID but\n passing just the ID also works.\n Examples: 27168024, PMID27168024\n https://www.ncbi.nlm.nih.gov/pubmed/\n offline : Optional[bool]\n If set to True, the REACH system is ran offline. Otherwise (by default)\n the web service is called. Default: False\n output_fname : Optional[str]\n The file to output the REACH JSON output to.\n Defaults to reach_output.json in current working directory.\n **kwargs : keyword arguments\n All other keyword arguments are passed directly to `process_text`.\n\n Returns\n -------\n rp : ReachProcessor\n A ReachProcessor containing the extracted INDRA Statements\n in rp.statements.\n "
]
|
Please provide a description of the function:def process_text(text, citation=None, offline=False,
output_fname=default_output_fname, timeout=None):
if offline:
if not try_offline:
logger.error('Offline reading is not available.')
return None
try:
api_ruler = reach_reader.get_api_ruler()
except ReachOfflineReadingError as e:
logger.error(e)
logger.error('Cannot read offline because the REACH ApiRuler '
'could not be instantiated.')
return None
try:
result_map = api_ruler.annotateText(text, 'fries')
except JavaException as e:
logger.error('Could not process text.')
logger.error(e)
return None
# REACH version < 1.3.3
json_str = result_map.get('resultJson')
if not json_str:
# REACH version >= 1.3.3
json_str = result_map.get('result')
if not isinstance(json_str, bytes):
json_str = json_str.encode('utf-8')
else:
data = {'text': text.encode('utf-8')}
try:
res = requests.post(reach_text_url, data, timeout=timeout)
except requests.exceptions.RequestException as e:
logger.error('Could not connect to REACH service:')
logger.error(e)
return None
# TODO: we could use res.json() here to get a dict
# directly
# This is a byte string
json_str = res.content
if not isinstance(json_str, bytes):
raise TypeError('{} is {} instead of {}'.format(json_str, json_str.__class__, bytes))
with open(output_fname, 'wb') as fh:
fh.write(json_str)
return process_json_str(json_str.decode('utf-8'), citation) | [
"Return a ReachProcessor by processing the given text.\n\n Parameters\n ----------\n text : str\n The text to be processed.\n citation : Optional[str]\n A PubMed ID passed to be used in the evidence for the extracted INDRA\n Statements. This is used when the text to be processed comes from\n a publication that is not otherwise identified. Default: None\n offline : Optional[bool]\n If set to True, the REACH system is ran offline. Otherwise (by default)\n the web service is called. Default: False\n output_fname : Optional[str]\n The file to output the REACH JSON output to.\n Defaults to reach_output.json in current working directory.\n timeout : Optional[float]\n This only applies when reading online (`offline=False`). Only wait for\n `timeout` seconds for the api to respond.\n\n Returns\n -------\n rp : ReachProcessor\n A ReachProcessor containing the extracted INDRA Statements\n in rp.statements.\n "
]
|
Please provide a description of the function:def process_nxml_str(nxml_str, citation=None, offline=False,
output_fname=default_output_fname):
if offline:
if not try_offline:
logger.error('Offline reading is not available.')
return None
try:
api_ruler = reach_reader.get_api_ruler()
except ReachOfflineReadingError as e:
logger.error(e)
logger.error('Cannot read offline because the REACH ApiRuler '
'could not be instantiated.')
return None
try:
result_map = api_ruler.annotateNxml(nxml_str, 'fries')
except JavaException as e:
logger.error('Could not process NXML.')
logger.error(e)
return None
# REACH version < 1.3.3
json_str = result_map.get('resultJson')
if not json_str:
# REACH version >= 1.3.3
json_str = result_map.get('result')
if json_str is None:
logger.warning('No results retrieved')
return None
if isinstance(json_str, bytes):
json_str = json_str.decode('utf-8')
return process_json_str(json_str, citation)
else:
data = {'nxml': nxml_str}
try:
res = requests.post(reach_nxml_url, data)
except requests.exceptions.RequestException as e:
logger.error('Could not connect to REACH service:')
logger.error(e)
return None
if res.status_code != 200:
logger.error('Could not process NXML via REACH service.'
+ 'Status code: %d' % res.status_code)
return None
json_str = res.text
with open(output_fname, 'wb') as fh:
fh.write(json_str.encode('utf-8'))
return process_json_str(json_str, citation) | [
"Return a ReachProcessor by processing the given NXML string.\n\n NXML is the format used by PubmedCentral for papers in the open\n access subset.\n\n Parameters\n ----------\n nxml_str : str\n The NXML string to be processed.\n citation : Optional[str]\n A PubMed ID passed to be used in the evidence for the extracted INDRA\n Statements. Default: None\n offline : Optional[bool]\n If set to True, the REACH system is ran offline. Otherwise (by default)\n the web service is called. Default: False\n output_fname : Optional[str]\n The file to output the REACH JSON output to.\n Defaults to reach_output.json in current working directory.\n\n Returns\n -------\n rp : ReachProcessor\n A ReachProcessor containing the extracted INDRA Statements\n in rp.statements.\n "
]
|
Please provide a description of the function:def process_nxml_file(file_name, citation=None, offline=False,
output_fname=default_output_fname):
with open(file_name, 'rb') as f:
nxml_str = f.read().decode('utf-8')
return process_nxml_str(nxml_str, citation, False, output_fname) | [
"Return a ReachProcessor by processing the given NXML file.\n\n NXML is the format used by PubmedCentral for papers in the open\n access subset.\n\n Parameters\n ----------\n file_name : str\n The name of the NXML file to be processed.\n citation : Optional[str]\n A PubMed ID passed to be used in the evidence for the extracted INDRA\n Statements. Default: None\n offline : Optional[bool]\n If set to True, the REACH system is ran offline. Otherwise (by default)\n the web service is called. Default: False\n output_fname : Optional[str]\n The file to output the REACH JSON output to.\n Defaults to reach_output.json in current working directory.\n\n Returns\n -------\n rp : ReachProcessor\n A ReachProcessor containing the extracted INDRA Statements\n in rp.statements.\n "
]
|
Please provide a description of the function:def process_json_file(file_name, citation=None):
try:
with open(file_name, 'rb') as fh:
json_str = fh.read().decode('utf-8')
return process_json_str(json_str, citation)
except IOError:
logger.error('Could not read file %s.' % file_name) | [
"Return a ReachProcessor by processing the given REACH json file.\n\n The output from the REACH parser is in this json format. This function is\n useful if the output is saved as a file and needs to be processed.\n For more information on the format, see: https://github.com/clulab/reach\n\n Parameters\n ----------\n file_name : str\n The name of the json file to be processed.\n citation : Optional[str]\n A PubMed ID passed to be used in the evidence for the extracted INDRA\n Statements. Default: None\n\n Returns\n -------\n rp : ReachProcessor\n A ReachProcessor containing the extracted INDRA Statements\n in rp.statements.\n "
]
|
Please provide a description of the function:def process_json_str(json_str, citation=None):
if not isinstance(json_str, basestring):
raise TypeError('{} is {} instead of {}'.format(json_str,
json_str.__class__,
basestring))
json_str = json_str.replace('frame-id', 'frame_id')
json_str = json_str.replace('argument-label', 'argument_label')
json_str = json_str.replace('object-meta', 'object_meta')
json_str = json_str.replace('doc-id', 'doc_id')
json_str = json_str.replace('is-hypothesis', 'is_hypothesis')
json_str = json_str.replace('is-negated', 'is_negated')
json_str = json_str.replace('is-direct', 'is_direct')
json_str = json_str.replace('found-by', 'found_by')
try:
json_dict = json.loads(json_str)
except ValueError:
logger.error('Could not decode JSON string.')
return None
rp = ReachProcessor(json_dict, citation)
rp.get_modifications()
rp.get_complexes()
rp.get_activation()
rp.get_translocation()
rp.get_regulate_amounts()
return rp | [
"Return a ReachProcessor by processing the given REACH json string.\n\n The output from the REACH parser is in this json format.\n For more information on the format, see: https://github.com/clulab/reach\n\n Parameters\n ----------\n json_str : str\n The json string to be processed.\n citation : Optional[str]\n A PubMed ID passed to be used in the evidence for the extracted INDRA\n Statements. Default: None\n\n Returns\n -------\n rp : ReachProcessor\n A ReachProcessor containing the extracted INDRA Statements\n in rp.statements.\n "
]
|
Please provide a description of the function:def make_parser():
parser = ArgumentParser(
'wait_for_complete.py',
usage='%(prog)s [-h] queue_name [options]',
description=('Wait for a set of batch jobs to complete, and monitor '
'them as they run.'),
epilog=('Jobs can also be monitored, terminated, and otherwise '
'managed on the AWS website. However this tool will also tag '
'the instances, and should be run whenever a job is submitted '
'to AWS.')
)
parser.add_argument(
dest='queue_name',
help=('The name of the queue to watch and wait for completion. If no '
'jobs are specified, this will wait until all jobs in the queue '
'are completed (either SUCCEEDED or FAILED).')
)
parser.add_argument(
'--watch', '-w',
dest='job_list',
metavar='JOB_ID',
nargs='+',
help=('Specify particular jobs using their job ids, as reported by '
'the submit command. Many ids may be specified.')
)
parser.add_argument(
'--prefix', '-p',
dest='job_name_prefix',
help='Specify a prefix for the name of the jobs to watch and wait for.'
)
parser.add_argument(
'--interval', '-i',
dest='poll_interval',
default=10,
type=int,
help=('The time interval to wait between job status checks, in '
'seconds (default: %(default)d seconds).')
)
parser.add_argument(
'--timeout', '-T',
metavar='TIMEOUT',
type=int,
help=('If the logs are not updated for %(metavar)s seconds, '
'print a warning. If `--kill_on_log_timeout` flag is set, then '
'the offending jobs will be automatically terminated.')
)
parser.add_argument(
'--kill_on_timeout', '-K',
action='store_true',
help='If a log times out, terminate the offending job.'
)
parser.add_argument(
'--stash_log_method', '-l',
choices=['s3', 'local'],
metavar='METHOD',
help=('Select a method from: [%(choices)s] to store the job logs. '
'If no method is specified, the logs will not be '
'loaded off of AWS. If \'s3\' is specified, then '
'`job_name_prefix` must also be given, as this will indicate '
'where on s3 to store the logs.')
)
return parser | [
"Generate the parser for this script."
]
|
Please provide a description of the function:def id_lookup(paper_id, idtype):
if idtype not in ('pmid', 'pmcid', 'doi'):
raise ValueError("Invalid idtype %s; must be 'pmid', 'pmcid', "
"or 'doi'." % idtype)
ids = {'doi': None, 'pmid': None, 'pmcid': None}
pmc_id_results = pmc_client.id_lookup(paper_id, idtype)
# Start with the results of the PMC lookup and then override with the
# provided ID
ids['pmid'] = pmc_id_results.get('pmid')
ids['pmcid'] = pmc_id_results.get('pmcid')
ids['doi'] = pmc_id_results.get('doi')
ids[idtype] = paper_id
# If we gave a DOI, then our work is done after looking for PMID and PMCID
if idtype == 'doi':
return ids
# If we gave a PMID or PMCID, we need to check to see if we got a DOI.
# If we got a DOI back, we're done.
elif ids.get('doi'):
return ids
# If we get here, then we've given PMID or PMCID and don't have a DOI yet.
# If we gave a PMCID and have neither a PMID nor a DOI, then we'll run
# into problems later on when we try to the reverse lookup using CrossRef.
# So we bail here and return what we have (PMCID only) with a warning.
if ids.get('pmcid') and ids.get('doi') is None and ids.get('pmid') is None:
logger.warning('%s: PMCID without PMID or DOI' % ids.get('pmcid'))
return ids
# To clarify the state of things at this point:
assert ids.get('pmid') is not None
assert ids.get('doi') is None
# As a last result, we try to get the DOI from CrossRef (which internally
# tries to get the DOI from Pubmed in the process of collecting the
# necessary metadata for the lookup):
ids['doi'] = crossref_client.doi_query(ids['pmid'])
# It may still be None, but at this point there's nothing we can do...
return ids | [
"Take an ID of type PMID, PMCID, or DOI and lookup the other IDs.\n\n If the DOI is not found in Pubmed, try to obtain the DOI by doing a\n reverse-lookup of the DOI in CrossRef using article metadata.\n\n Parameters\n ----------\n paper_id : str\n ID of the article.\n idtype : str\n Type of the ID: 'pmid', 'pmcid', or 'doi\n\n Returns\n -------\n ids : dict\n A dictionary with the following keys: pmid, pmcid and doi.\n "
]
|
Please provide a description of the function:def get_full_text(paper_id, idtype, preferred_content_type='text/xml'):
if preferred_content_type not in \
('text/xml', 'text/plain', 'application/pdf'):
raise ValueError("preferred_content_type must be one of 'text/xml', "
"'text/plain', or 'application/pdf'.")
ids = id_lookup(paper_id, idtype)
pmcid = ids.get('pmcid')
pmid = ids.get('pmid')
doi = ids.get('doi')
# First try to find paper via PMC
if pmcid:
nxml = pmc_client.get_xml(pmcid)
if nxml:
return nxml, 'pmc_oa_xml'
# If we got here, it means we didn't find the full text in PMC, so we'll
# need either the DOI (for lookup in CrossRef) and/or the PMID (so we
# can fall back on the abstract. If by some strange turn we have neither,
# give up now.
if not doi and not pmid:
return (None, None)
# If it does not have PMC NXML then we attempt to obtain the full-text
# through the CrossRef Click-through API
if doi:
# Get publisher
publisher = crossref_client.get_publisher(doi)
# First check for whether this is Elsevier--if so, use the Elsevier
# client directly, because the Clickthrough API key seems unreliable.
# Return full XML.
if publisher == 'Elsevier BV':
logger.info('Elsevier: %s' % pmid)
#article = elsevier_client.get_article(doi, output='txt')
try:
article_xml = elsevier_client.download_article(doi)
except Exception as e:
logger.error("Error downloading Elsevier article: %s" % e)
article_xml = None
if article_xml is not None:
return (article_xml, 'elsevier_xml')
# FIXME FIXME FIXME
# Because we don't yet have a way to process non-Elsevier content
# obtained from CrossRef, which includes both XML of unknown format
# and PDFs, we just comment this section out for now
# end FIXME FIXME FIXME
# No full text links and not a publisher we support. We'll have to
# fall back to the abstract.
#elif pmid:
if pmid:
abstract = pubmed_client.get_abstract(pmid)
if abstract is None:
return (None, None)
else:
return abstract, 'abstract'
# We have a useless DOI and no PMID. Give up.
else:
return (None, None)
# We don't have a DOI but we're guaranteed to have a PMID at this point,
# so we fall back to the abstract:
else:
abstract = pubmed_client.get_abstract(pmid)
if abstract is None:
return (None, None)
else:
return abstract, 'abstract'
# We'll only get here if we've missed a combination of conditions
assert False | [
"Return the content and the content type of an article.\n\n This function retreives the content of an article by its PubMed ID,\n PubMed Central ID, or DOI. It prioritizes full text content when available\n and returns an abstract from PubMed as a fallback.\n\n Parameters\n ----------\n paper_id : string\n ID of the article.\n idtype : 'pmid', 'pmcid', or 'doi\n Type of the ID.\n preferred_content_type : Optional[st]r\n Preference for full-text format, if available. Can be one of\n 'text/xml', 'text/plain', 'application/pdf'. Default: 'text/xml'\n\n Returns\n -------\n content : str\n The content of the article.\n content_type : str\n The content type of the article\n ",
"\n # Check if there are any full text links\n links = crossref_client.get_fulltext_links(doi)\n if links:\n headers = {}\n # Set the Cross Ref Clickthrough API key in the header, if we've\n # got one\n cr_api_key = crossref_client.get_api_key()\n if cr_api_key is not None:\n headers['CR-Clickthrough-Client-Token'] = cr_api_key\n # Utility function to get particular links by content-type\n def lookup_content_type(link_list, content_type):\n content_list = [l.get('URL') for l in link_list\n if l.get('content-type') == content_type]\n return None if not content_list else content_list[0]\n # First check for what the user asked for\n if lookup_content_type(links, preferred_content_type):\n req = requests.get(lookup_content_type(links,\n preferred_content_type),\n headers=headers)\n if req.status_code == 200:\n req_content_type = req.headers['Content-Type']\n return req.text, req_content_type\n elif req.status_code == 400:\n logger.warning('Full text query returned 400 (Bad Request): '\n 'Perhaps missing CrossRef Clickthrough API '\n 'key?')\n return (None, None)\n # Check for XML first\n if lookup_content_type(links, 'text/xml'):\n req = requests.get(lookup_content_type(links, 'text/xml'),\n headers=headers)\n if req.status_code == 200:\n req_content_type = req.headers['Content-Type']\n return req.text, req_content_type\n elif req.status_code == 400:\n logger.warning('Full text query returned 400 (Bad Request):'\n 'Perhaps missing CrossRef Clickthrough API '\n 'key?')\n return (None, None)\n # Next, plain text\n elif lookup_content_type(links, 'text/plain'):\n req = requests.get(lookup_content_type(links, 'text/plain'),\n headers=headers)\n if req.status_code == 200:\n req_content_type = req.headers['Content-Type']\n return req.text, req_content_type\n elif req.status_code == 400:\n logger.warning('Full text query returned 400 (Bad Request):'\n 'Perhaps missing CrossRef Clickthrough API '\n 'key?')\n return (None, None)\n elif lookup_content_type(links, 'application/pdf'):\n pass\n # Wiley's links are often of content-type 'unspecified'.\n elif lookup_content_type(links, 'unspecified'):\n req = requests.get(lookup_content_type(links, 'unspecified'),\n headers=headers)\n if req.status_code == 200:\n req_content_type = req.headers['Content-Type']\n return 'foo', req_content_type\n elif req.status_code == 400:\n logger.warning('Full text query returned 400 (Bad Request):'\n 'Perhaps missing CrossRef Clickthrough API '\n 'key?')\n return (None, None)\n elif req.status_code == 401:\n logger.warning('Full text query returned 401 (Unauthorized)')\n return (None, None)\n elif req.status_code == 403:\n logger.warning('Full text query returned 403 (Forbidden)')\n return (None, None)\n else:\n raise Exception(\"Unknown content type(s): %s\" % links)\n elif publisher == 'American Society for Biochemistry & Molecular ' \\\n 'Biology (ASBMB)':\n url = crossref_client.get_url(doi)\n return get_asbmb_full_text(url)\n "
]
|
Please provide a description of the function:def get_api_ruler(self):
if self.api_ruler is None:
try:
self.api_ruler = \
autoclass('org.clulab.reach.export.apis.ApiRuler')
except JavaException as e:
raise ReachOfflineReadingError(e)
return self.api_ruler | [
"Return the existing reader if it exists or launch a new one.\n\n Returns\n -------\n api_ruler : org.clulab.reach.apis.ApiRuler\n An instance of the REACH ApiRuler class (java object).\n "
]
|
Please provide a description of the function:def _download_biogrid_data(url):
res = requests.get(biogrid_file_url)
if res.status_code != 200:
raise Exception('Unable to download Biogrid data: status code %s'
% res.status_code)
zip_bytes = BytesIO(res.content)
zip_file = ZipFile(zip_bytes)
zip_info_list = zip_file.infolist()
# There should be only one file in this zip archive
if len(zip_info_list) != 1:
raise Exception('There should be exactly zipfile in BioGrid zip '
'archive: %s' % str(zip_info_list))
unzipped_bytes = zip_file.read(zip_info_list[0]) # Unzip the file
biogrid_str = StringIO(unzipped_bytes.decode('utf8')) # Make file-like obj
csv_reader = csv.reader(biogrid_str, delimiter='\t') # Get csv reader
next(csv_reader) # Skip the header
return csv_reader | [
"Downloads zipped, tab-separated Biogrid data in .tab2 format.\n\n Parameters:\n -----------\n url : str\n URL of the BioGrid zip file.\n\n Returns\n -------\n csv.reader\n A csv.reader object for iterating over the rows (header has already\n been skipped).\n "
]
|
Please provide a description of the function:def _make_agent(self, entrez_id, text_id):
hgnc_name, db_refs = self._make_db_refs(entrez_id, text_id)
if hgnc_name is not None:
name = hgnc_name
elif text_id is not None:
name = text_id
# Handle case where the name is None
else:
return None
return Agent(name, db_refs=db_refs) | [
"Make an Agent object, appropriately grounded.\n\n Parameters\n ----------\n entrez_id : str\n Entrez id number\n text_id : str\n A plain text systematic name, or None if not listed.\n\n Returns\n -------\n agent : indra.statements.Agent\n A grounded agent object.\n "
]
|
Please provide a description of the function:def _make_db_refs(self, entrez_id, text_id):
db_refs = {}
if text_id != '-' and text_id is not None:
db_refs['TEXT'] = text_id
hgnc_id = hgnc_client.get_hgnc_from_entrez(entrez_id)
hgnc_name = hgnc_client.get_hgnc_name(hgnc_id)
if hgnc_id is not None:
db_refs['HGNC'] = hgnc_id
up_id = hgnc_client.get_uniprot_id(hgnc_id)
if up_id is not None:
db_refs['UP'] = up_id
return (hgnc_name, db_refs) | [
"Looks up the HGNC ID and name, as well as the Uniprot ID.\n\n Parameters\n ----------\n entrez_id : str\n Entrez gene ID.\n text_id : str or None\n A plain text systematic name, or None if not listed in the\n Biogrid data.\n\n Returns\n -------\n hgnc_name : str\n Official HGNC symbol for the gene.\n db_refs : dict\n db_refs grounding dictionary, used when constructing the Agent\n object.\n "
]
|
Please provide a description of the function:def make_model(self, policies=None, initial_conditions=True,
reverse_effects=False):
self.processed_policies = self.process_policies(policies)
ppa = PysbPreassembler(self.statements)
ppa.replace_activities()
if reverse_effects:
ppa.add_reverse_effects()
self.statements = ppa.statements
# Set local policies for this make_model call that overwrite
# the global policies of the Kami assembler
if policies is not None:
global_policies = self.policies
if isinstance(policies, basestring):
local_policies = {'other': policies}
else:
local_policies = {'other': 'default'}
local_policies.update(policies)
self.policies = local_policies
self.model = {}
graphs = []
self.model['graphs'] = graphs
self.model['typing'] = []
# Action graph generated here
action_graph = {'id': 'action_graph',
'attrs': {'name': 'action_graph'}}
action_graph['graph'] = {'nodes': [], 'edges': []}
graphs.append(action_graph)
# Iterate over the statements to generate rules
self._assemble()
# Add initial conditions
#if initial_conditions:
# self.add_default_initial_conditions()
# If local policies were applied, revert to the global one
if policies is not None:
self.policies = global_policies
return self.model | [
"Assemble the Kami model from the collected INDRA Statements.\n\n This method assembles a Kami model from the set of INDRA Statements.\n The assembled model is both returned and set as the assembler's\n model argument.\n\n Parameters\n ----------\n policies : Optional[Union[str, dict]]\n A string or dictionary of policies, as defined in\n :py:class:`indra.assemblers.KamiAssembler`. This set of policies\n locally supersedes the default setting in the assembler. This\n is useful when this function is called multiple times with\n different policies.\n initial_conditions : Optional[bool]\n If True, default initial conditions are generated for the\n agents in the model.\n\n Returns\n -------\n model : dict\n The assembled Kami model.\n "
]
|
Please provide a description of the function:def _assemble(self):
for stmt in self.statements:
if _is_whitelisted(stmt):
self._dispatch(stmt, 'assemble', self.model) | [
"Calls the appropriate assemble method based on policies."
]
|
Please provide a description of the function:def _dispatch(self, stmt, stage, *args):
class_name = stmt.__class__.__name__
policy = self.processed_policies[stmt.uuid]
func_name = '%s_%s_%s' % (class_name.lower(), stage, policy)
func = globals().get(func_name)
if func is None:
# The specific policy is not implemented for the
# given statement type.
# We try to apply a default policy next.
func_name = '%s_%s_default' % (class_name.lower(), stage)
func = globals().get(func_name)
if func is None:
# The given statement type doesn't have a default
# policy.
#raise UnknownPolicyError('%s function %s not defined' %
# (stage, func_name))
logger.warning('%s function %s not defined' %
(stage, func_name))
return
return func(stmt, *args) | [
"Construct and call an assembly function.\n\n This function constructs the name of the assembly function based on\n the type of statement, the corresponding policy and the stage\n of assembly. It then calls that function to perform the assembly\n task."
]
|
Please provide a description of the function:def add_agent(self, agent):
agent_id = self.add_node(agent.name)
self.add_typing(agent_id, 'agent')
# Handle bound conditions
for bc in agent.bound_conditions:
# Here we make the assumption that the binding site
# is simply named after the binding partner
if bc.is_bound:
test_type = 'is_bnd'
else:
test_type = 'is_free'
bound_name = bc.agent.name
agent_bs = get_binding_site_name(bc.agent)
test_name = '%s_bound_to_%s_test' % (agent_id, bound_name)
agent_bs_id = self.add_node(agent_bs)
test_id = self.add_node(test_name)
self.add_edge(agent_bs_id, agent_id)
self.add_edge(agent_bs_id, test_id)
self.add_typing(agent_bs_id, 'locus')
self.add_typing(test_id, test_type)
for mod in agent.mods:
mod_site_str = abbrevs[mod.mod_type]
if mod.residue is not None:
mod_site_str = mod.residue
mod_pos_str = mod.position if mod.position is not None else ''
mod_site = ('%s%s' % (mod_site_str, mod_pos_str))
site_states = states[mod.mod_type]
if mod.is_modified:
val = site_states[1]
else:
val = site_states[0]
mod_site_id = self.add_node(mod_site, {'val': val})
self.add_edge(mod_site_id, agent_id)
self.add_typing(mod_site_id, 'state')
return agent_id | [
"Add an INDRA Agent and its conditions to the Nugget."
]
|
Please provide a description of the function:def add_node(self, name_base, attrs=None):
if name_base not in self.counters:
node_id = name_base
else:
node_id = '%s_%d' % (name_base, self.counters[name_base])
node = {'id': node_id}
if attrs:
node['attrs'] = attrs
self.nodes.append(node)
self.counters[node_id] += 1
return node_id | [
"Add a node with a given base name to the Nugget and return ID."
]
|
Please provide a description of the function:def get_nugget_dict(self):
nugget_dict = \
{'id': self.id,
'graph': {
'nodes': self.nodes,
'edges': self.edges
},
'attrs': {
'name': self.name,
'rate': self.rate
}
}
return nugget_dict | [
"Return the Nugget as a dictionary."
]
|
Please provide a description of the function:def process_text(text, pmid=None, python2_path=None):
# Try to locate python2 in one of the directories of the PATH environment
# variable if it is not provided
if python2_path is None:
for path in os.environ["PATH"].split(os.pathsep):
proposed_python2_path = os.path.join(path, 'python2.7')
if os.path.isfile(proposed_python2_path):
python2_path = proposed_python2_path
print('Found python 2 interpreter at', python2_path)
break
if python2_path is None:
raise Exception('Could not find python2 in the directories ' +
'listed in the PATH environment variable. ' +
'Need python2 to run TEES.')
# Run TEES
a1_text, a2_text, sentence_segmentations = run_on_text(text,
python2_path)
# Run the TEES processor
tp = TEESProcessor(a1_text, a2_text, sentence_segmentations, pmid)
return tp | [
"Processes the specified plain text with TEES and converts output to\n supported INDRA statements. Check for the TEES installation is the\n TEES_PATH environment variable, and configuration file; if not found,\n checks candidate paths in tees_candidate_paths. Raises an exception if\n TEES cannot be found in any of these places.\n\n Parameters\n ----------\n text : str\n Plain text to process with TEES\n pmid : str\n The PMID from which the paper comes from, to be stored in the Evidence\n object of statements. Set to None if this is unspecified.\n python2_path : str\n TEES is only compatible with python 2. This processor invokes this\n external python 2 interpreter so that the processor can be run in\n either python 2 or python 3. If None, searches for an executible named\n python2 in the PATH environment variable.\n\n Returns\n -------\n tp : TEESProcessor\n A TEESProcessor object which contains a list of INDRA statements\n extracted from TEES extractions\n "
]
|
Please provide a description of the function:def run_on_text(text, python2_path):
tees_path = get_config('TEES_PATH')
if tees_path is None:
# If TEES directory is not specifies, see if any of the candidate paths
# exist and contain all of the files expected for a TEES installation.
for cpath in tees_candidate_paths:
cpath = os.path.expanduser(cpath)
if os.path.isdir(cpath):
# Check to see if it has all of the expected files and
# directories
has_expected_files = True
for f in tees_installation_files:
fpath = os.path.join(cpath, f)
present = os.path.isfile(fpath)
has_expected_files = has_expected_files and present
has_expected_dirs = True
for d in tees_installation_dirs:
dpath = os.path.join(cpath, d)
present = os.path.isdir(dpath)
has_expected_dirs = has_expected_dirs and present
if has_expected_files and has_expected_dirs:
# We found a directory with all of the files and
# directories we expected in a TEES installation - let's
# assume it's a TEES installation
tees_path = cpath
print('Found TEES installation at ' + cpath)
break
# Make sure the provided TEES directory exists
if not os.path.isdir(tees_path):
raise Exception('Provided TEES directory does not exist.')
# Make sure the classify.py script exists within this directory
classify_path = 'classify.py'
# if not os.path.isfile(classify_path):
# raise Exception('classify.py does not exist in provided TEES path.')
# Create a temporary directory to tag the shared-task files
tmp_dir = tempfile.mkdtemp(suffix='indra_tees_processor')
pwd = os.path.abspath(os.getcwd())
try:
# Write text to a file in the temporary directory
text_path = os.path.join(tmp_dir, 'text.txt')
# Had some trouble with non-ascii characters. A possible TODO item in
# the future is to look into resolving this, for now just ignoring
# non-latin-1 characters
with codecs.open(text_path, 'w', encoding='latin-1', errors='ignore') \
as f:
f.write(text)
# Run TEES
output_path = os.path.join(tmp_dir, 'output')
model_path = os.path.join(tees_path, 'tees_data/models/GE11-test/')
command = [python2_path, classify_path, '-m', model_path,
'-i', text_path,
'-o', output_path]
try:
pwd = os.path.abspath(os.getcwd())
os.chdir(tees_path) # Change to TEES directory
# print('cwd is:', os.getcwd())
# out = subprocess.check_output(command, stderr=subprocess.STDOUT)
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=tees_path)
p.wait()
(so, se) = p.communicate()
print(so)
print(se)
os.chdir(pwd) # Change back to previous directory
# print('cwd is:', os.getcwd())
# print(out.decode('utf-8'))
except BaseException as e:
# If there's an error, print it out and then propagate the
# exception
os.chdir(pwd) # Change back to previous directory
# print (e.output.decode('utf-8'))
raise e
except BaseException as e:
# If there was an exception, delete the temporary directory and
# pass on the exception
shutil.rmtree(tmp_dir)
raise e
# Return the temporary directory with the TEES output
output_tuple = extract_output(tmp_dir)
shutil.rmtree(tmp_dir)
return output_tuple | [
"Runs TEES on the given text in a temporary directory and returns a\n temporary directory with TEES output.\n \n The caller should delete this directory when done with it. This function\n runs TEES and produces TEES output files but does not process TEES output\n into INDRA statements.\n\n Parameters\n ----------\n text : str\n Text from which to extract relationships\n python2_path : str\n The path to the python 2 interpreter\n\n Returns\n -------\n output_dir : str\n Temporary directory with TEES output. The caller should delete this\n directgory when done with it.\n "
]
|
Please provide a description of the function:def extract_output(output_dir):
# Locate the file of sentences segmented by the TEES system, described
# in a compressed xml document
sentences_glob = os.path.join(output_dir, '*-preprocessed.xml.gz')
sentences_filename_candidates = glob.glob(sentences_glob)
# Make sure there is exactly one such file
if len(sentences_filename_candidates) != 1:
m = 'Looking for exactly one file matching %s but found %d matches'
raise Exception(m % (
sentences_glob, len(sentences_filename_candidates)))
return None, None, None
# Read in the sentence segmentation XML
sentence_segmentation_filename = sentences_filename_candidates[0]
with gzip.GzipFile(sentences_filename_candidates[0], 'r') as f:
sentence_segmentations = f.read().decode('utf-8')
# Create a temporary directory to which to extract the a1 and a2 files from
# the tarball
tmp_dir = tempfile.mkdtemp(suffix='indra_tees_processor')
try:
# Make sure the tarfile with the extracted events is in shared task
# format is in the output directory
tarfile_glob = os.path.join(output_dir, '*-events.tar.gz')
candidate_tarfiles = glob.glob(tarfile_glob)
if len(candidate_tarfiles) != 1:
raise Exception('Expected exactly one match for glob %s' %
tarfile_glob)
return None, None, None
# Decide what tar files to extract
# (We're not blindly extracting all files because of the security
# warning in the documentation for TarFile.extractall
# In particular, we want to make sure that the filename doesn't
# try to specify a relative or absolute path other than the current
# directory by making sure the filename starts with an alphanumeric
# character.
# We're also only interested in files with the .a1 or .a2 extension
tar_file = tarfile.open(candidate_tarfiles[0])
a1_file = None
a2_file = None
extract_these = []
for m in tar_file.getmembers():
if re.match('[a-zA-Z0-9].*.a[12]', m.name):
extract_these.append(m)
if m.name.endswith('.a1'):
a1_file = m.name
elif m.name.endswith('.a2'):
a2_file = m.name
else:
assert(False)
# There should be exactly two files that match these criteria
if len(extract_these) != 2 or a1_file is None or a2_file is None:
raise Exception('We thought there would be one .a1 and one .a2' +
' file in the tarball, but we got %d files total' %
len(extract_these))
return None, None, None
# Extract the files that we decided to extract
tar_file.extractall(path=tmp_dir, members=extract_these)
# Read the text of the a1 (entities) file
with codecs.open(os.path.join(tmp_dir, a1_file), 'r',
encoding='utf-8') as f:
a1_text = f.read()
# Read the text of the a2 (events) file
with codecs.open(os.path.join(tmp_dir, a2_file), 'r',
encoding='utf-8') as f:
a2_text = f.read()
# Now that we're done, remove the temporary directory
shutil.rmtree(tmp_dir)
# Return the extracted text
return a1_text, a2_text, sentence_segmentations
except BaseException as e:
# If there was an exception, delete the temporary directory and
# pass on the exception
print('Not removing temporary directory: ' + tmp_dir)
shutil.rmtree(tmp_dir)
raise e
return None, None, None | [
"Extract the text of the a1, a2, and sentence segmentation files from the\n TEES output directory. These files are located within a compressed archive.\n\n Parameters\n ----------\n output_dir : str\n Directory containing the output of the TEES system\n\n Returns\n -------\n a1_text : str\n The text of the TEES a1 file (specifying the entities)\n a2_text : str\n The text of the TEES a2 file (specifying the event graph)\n sentence_segmentations : str\n The text of the XML file specifying the sentence segmentation\n "
]
|
Please provide a description of the function:def _list_to_seq(lst):
ml = autoclass('scala.collection.mutable.MutableList')()
for element in lst:
ml.appendElem(element)
return ml | [
"Return a scala.collection.Seq from a Python list."
]
|
Please provide a description of the function:def process_text(self, text, format='json'):
if self.eidos_reader is None:
self.initialize_reader()
default_arg = lambda x: autoclass('scala.Some')(x)
today = datetime.date.today().strftime("%Y-%m-%d")
fname = 'default_file_name'
annot_doc = self.eidos_reader.extractFromText(
text,
True, # keep text
False, # CAG-relevant only
default_arg(today), # doc creation time
default_arg(fname) # file name
)
if format == 'json':
mentions = annot_doc.odinMentions()
ser = autoclass(eidos_package +
'.serialization.json.WMJSONSerializer')
mentions_json = ser.toJsonStr(mentions)
elif format == 'json_ld':
# We need to get a Scala Seq of annot docs here
ml = _list_to_seq([annot_doc])
# We currently do not need toinstantiate the adjective grounder
# if we want to reinstate it, we would need to do the following
# ag = EidosAdjectiveGrounder.fromConfig(
# EidosSystem.defaultConfig.getConfig("adjectiveGrounder"))
# We now create a JSON-LD corpus
jc = autoclass(eidos_package + '.serialization.json.JLDCorpus')
corpus = jc(ml)
# Finally, serialize the corpus into JSON string
mentions_json = corpus.toJsonStr()
json_dict = json.loads(mentions_json)
return json_dict | [
"Return a mentions JSON object given text.\n\n Parameters\n ----------\n text : str\n Text to be processed.\n format : str\n The format of the output to produce, one of \"json\" or \"json_ld\".\n Default: \"json\"\n\n Returns\n -------\n json_dict : dict\n A JSON object of mentions extracted from text.\n "
]
|
Please provide a description of the function:def process_text(text, out_format='json_ld', save_json='eidos_output.json',
webservice=None):
if not webservice:
if eidos_reader is None:
logger.error('Eidos reader is not available.')
return None
json_dict = eidos_reader.process_text(text, out_format)
else:
res = requests.post('%s/process_text' % webservice,
json={'text': text})
json_dict = res.json()
if save_json:
with open(save_json, 'wt') as fh:
json.dump(json_dict, fh, indent=2)
return process_json(json_dict) | [
"Return an EidosProcessor by processing the given text.\n\n This constructs a reader object via Java and extracts mentions\n from the text. It then serializes the mentions into JSON and\n processes the result with process_json.\n\n Parameters\n ----------\n text : str\n The text to be processed.\n out_format : Optional[str]\n The type of Eidos output to read into and process. Currently only\n 'json-ld' is supported which is also the default value used.\n save_json : Optional[str]\n The name of a file in which to dump the JSON output of Eidos.\n webservice : Optional[str]\n An Eidos reader web service URL to send the request to.\n If None, the reading is assumed to be done with the Eidos JAR rather\n than via a web service. Default: None\n\n Returns\n -------\n ep : EidosProcessor\n An EidosProcessor containing the extracted INDRA Statements in its\n statements attribute.\n "
]
|
Please provide a description of the function:def process_json_file(file_name):
try:
with open(file_name, 'rb') as fh:
json_str = fh.read().decode('utf-8')
return process_json_str(json_str)
except IOError:
logger.exception('Could not read file %s.' % file_name) | [
"Return an EidosProcessor by processing the given Eidos JSON-LD file.\n\n This function is useful if the output from Eidos is saved as a file and\n needs to be processed.\n\n Parameters\n ----------\n file_name : str\n The name of the JSON-LD file to be processed.\n\n Returns\n -------\n ep : EidosProcessor\n A EidosProcessor containing the extracted INDRA Statements\n in its statements attribute.\n "
]
|
Please provide a description of the function:def process_json(json_dict):
ep = EidosProcessor(json_dict)
ep.extract_causal_relations()
ep.extract_correlations()
ep.extract_events()
return ep | [
"Return an EidosProcessor by processing a Eidos JSON-LD dict.\n\n Parameters\n ----------\n json_dict : dict\n The JSON-LD dict to be processed.\n\n Returns\n -------\n ep : EidosProcessor\n A EidosProcessor containing the extracted INDRA Statements\n in its statements attribute.\n "
]
|
Please provide a description of the function:def get_drug_inhibition_stmts(drug):
chebi_id = drug.db_refs.get('CHEBI')
mesh_id = drug.db_refs.get('MESH')
if chebi_id:
drug_chembl_id = chebi_client.get_chembl_id(chebi_id)
elif mesh_id:
drug_chembl_id = get_chembl_id(mesh_id)
else:
logger.error('Drug missing ChEBI or MESH grounding.')
return None
logger.info('Drug: %s' % (drug_chembl_id))
query_dict = {'query': 'activity',
'params': {'molecule_chembl_id': drug_chembl_id,
'limit': 10000}
}
res = send_query(query_dict)
activities = res['activities']
targ_act_dict = activities_by_target(activities)
target_chembl_ids = [x for x in targ_act_dict]
protein_targets = get_protein_targets_only(target_chembl_ids)
filtered_targ_act_dict = {t: targ_act_dict[t]
for t in [x for x in protein_targets]}
stmts = []
for target_chembl_id in filtered_targ_act_dict:
target_activity_ids = filtered_targ_act_dict[target_chembl_id]
target_activites = [x for x in activities
if x['activity_id'] in target_activity_ids]
target_upids = []
targ_comp = protein_targets[target_chembl_id]['target_components']
for t_c in targ_comp:
target_upids.append(t_c['accession'])
evidence = []
for assay in target_activites:
ev = get_evidence(assay)
if not ev:
continue
evidence.append(ev)
if len(evidence) > 0:
for target_upid in target_upids:
agent_name = uniprot_client.get_gene_name(target_upid)
target_agent = Agent(agent_name, db_refs={'UP': target_upid})
st = Inhibition(drug, target_agent, evidence=evidence)
stmts.append(st)
return stmts | [
"Query ChEMBL for kinetics data given drug as Agent get back statements\n\n Parameters\n ----------\n drug : Agent\n Agent representing drug with MESH or CHEBI grounding\n\n Returns\n -------\n stmts : list of INDRA statements\n INDRA statements generated by querying ChEMBL for all kinetics data of\n a drug interacting with protein targets\n "
]
|
Please provide a description of the function:def send_query(query_dict):
query = query_dict['query']
params = query_dict['params']
url = 'https://www.ebi.ac.uk/chembl/api/data/' + query + '.json'
r = requests.get(url, params=params)
r.raise_for_status()
js = r.json()
return js | [
"Query ChEMBL API\n\n Parameters\n ----------\n query_dict : dict\n 'query' : string of the endpoint to query\n 'params' : dict of params for the query\n\n Returns\n -------\n js : dict\n dict parsed from json that is unique to the submitted query\n "
]
|
Please provide a description of the function:def query_target(target_chembl_id):
query_dict = {'query': 'target',
'params': {'target_chembl_id': target_chembl_id,
'limit': 1}}
res = send_query(query_dict)
target = res['targets'][0]
return target | [
"Query ChEMBL API target by id\n\n Parameters\n ----------\n target_chembl_id : str\n\n Returns\n -------\n target : dict\n dict parsed from json that is unique for the target\n "
]
|
Please provide a description of the function:def activities_by_target(activities):
targ_act_dict = defaultdict(lambda: [])
for activity in activities:
target_chembl_id = activity['target_chembl_id']
activity_id = activity['activity_id']
targ_act_dict[target_chembl_id].append(activity_id)
for target_chembl_id in targ_act_dict:
targ_act_dict[target_chembl_id] = \
list(set(targ_act_dict[target_chembl_id]))
return targ_act_dict | [
"Get back lists of activities in a dict keyed by ChEMBL target id\n\n Parameters\n ----------\n activities : list\n response from a query returning activities for a drug\n\n Returns\n -------\n targ_act_dict : dict\n dictionary keyed to ChEMBL target ids with lists of activity ids\n "
]
|
Please provide a description of the function:def get_protein_targets_only(target_chembl_ids):
protein_targets = {}
for target_chembl_id in target_chembl_ids:
target = query_target(target_chembl_id)
if 'SINGLE PROTEIN' in target['target_type']:
protein_targets[target_chembl_id] = target
return protein_targets | [
"Given list of ChEMBL target ids, return dict of SINGLE PROTEIN targets\n\n Parameters\n ----------\n target_chembl_ids : list\n list of chembl_ids as strings\n\n Returns\n -------\n protein_targets : dict\n dictionary keyed to ChEMBL target ids with lists of activity ids\n "
]
|
Please provide a description of the function:def get_evidence(assay):
kin = get_kinetics(assay)
source_id = assay.get('assay_chembl_id')
if not kin:
return None
annotations = {'kinetics': kin}
chembl_doc_id = str(assay.get('document_chembl_id'))
pmid = get_pmid(chembl_doc_id)
ev = Evidence(source_api='chembl', pmid=pmid, source_id=source_id,
annotations=annotations)
return ev | [
"Given an activity, return an INDRA Evidence object.\n\n Parameters\n ----------\n assay : dict\n an activity from the activities list returned by a query to the API\n\n Returns\n -------\n ev : :py:class:`Evidence`\n an :py:class:`Evidence` object containing the kinetics of the\n "
]
|
Please provide a description of the function:def get_kinetics(assay):
try:
val = float(assay.get('standard_value'))
except TypeError:
logger.warning('Invalid assay value: %s' % assay.get('standard_value'))
return None
unit = assay.get('standard_units')
if unit == 'nM':
unit_sym = 1e-9 * units.mol / units.liter
elif unit == 'uM':
unit_sym = 1e-6 * units.mol / units.liter
else:
logger.warning('Unhandled unit: %s' % unit)
return None
param_type = assay.get('standard_type')
if param_type not in ['IC50', 'EC50', 'INH', 'Potency', 'Kd']:
logger.warning('Unhandled parameter type: %s' % param_type)
logger.info(str(assay))
return None
kin = {param_type: val * unit_sym}
return kin | [
"Given an activity, return its kinetics values.\n\n Parameters\n ----------\n assay : dict\n an activity from the activities list returned by a query to the API\n\n Returns\n -------\n kin : dict\n dictionary of values with units keyed to value types 'IC50', 'EC50',\n 'INH', 'Potency', 'Kd'\n "
]
|
Please provide a description of the function:def get_pmid(doc_id):
url_pmid = 'https://www.ebi.ac.uk/chembl/api/data/document.json'
params = {'document_chembl_id': doc_id}
res = requests.get(url_pmid, params=params)
js = res.json()
pmid = str(js['documents'][0]['pubmed_id'])
return pmid | [
"Get PMID from document_chembl_id\n\n Parameters\n ----------\n doc_id : str\n\n Returns\n -------\n pmid : str\n "
]
|
Please provide a description of the function:def get_target_chemblid(target_upid):
url = 'https://www.ebi.ac.uk/chembl/api/data/target.json'
params = {'target_components__accession': target_upid}
r = requests.get(url, params=params)
r.raise_for_status()
js = r.json()
target_chemblid = js['targets'][0]['target_chembl_id']
return target_chemblid | [
"Get ChEMBL ID from UniProt upid\n\n Parameters\n ----------\n target_upid : str\n\n Returns\n -------\n target_chembl_id : str\n "
]
|
Please provide a description of the function:def get_mesh_id(nlm_mesh):
url_nlm2mesh = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi'
params = {'db': 'mesh', 'term': nlm_mesh, 'retmode': 'JSON'}
r = requests.get(url_nlm2mesh, params=params)
res = r.json()
mesh_id = res['esearchresult']['idlist'][0]
return mesh_id | [
"Get MESH ID from NLM MESH\n\n Parameters\n ----------\n nlm_mesh : str\n\n Returns\n -------\n mesh_id : str\n "
]
|
Please provide a description of the function:def get_pcid(mesh_id):
url_mesh2pcid = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/elink.fcgi'
params = {'dbfrom': 'mesh', 'id': mesh_id,
'db': 'pccompound', 'retmode': 'JSON'}
r = requests.get(url_mesh2pcid, params=params)
res = r.json()
pcid = res['linksets'][0]['linksetdbs'][0]['links'][0]
return pcid | [
"Get PC ID from MESH ID\n\n Parameters\n ----------\n mesh : str\n\n Returns\n -------\n pcid : str\n "
]
|
Please provide a description of the function:def get_chembl_id(nlm_mesh):
mesh_id = get_mesh_id(nlm_mesh)
pcid = get_pcid(mesh_id)
url_mesh2pcid = 'https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/' + \
'cid/%s/synonyms/JSON' % pcid
r = requests.get(url_mesh2pcid)
res = r.json()
synonyms = res['InformationList']['Information'][0]['Synonym']
chembl_id = [syn for syn in synonyms
if 'CHEMBL' in syn and 'SCHEMBL' not in syn][0]
return chembl_id | [
"Get ChEMBL ID from NLM MESH\n\n Parameters\n ----------\n nlm_mesh : str\n\n Returns\n -------\n chembl_id : str\n "
]
|
Please provide a description of the function:def get_sentences(self, root_element, block_tags):
sentences = []
for element in root_element:
if not self.any_ends_with(block_tags, element.tag):
# tag not in block_tags
if element.text is not None and not re.match('^\s*$',
element.text):
sentences.extend(self.sentence_tokenize(element.text))
sentences.extend(self.get_sentences(element, block_tags))
f = open('sentence_debug.txt', 'w')
for s in sentences:
f.write(s.lower() + '\n')
f.close()
return sentences | [
"Returns a list of plain-text sentences by iterating through\n XML tags except for those listed in block_tags."
]
|
Please provide a description of the function:def any_ends_with(self, string_list, pattern):
try:
s_base = basestring
except:
s_base = str
is_string = isinstance(pattern, s_base)
if not is_string:
return False
for s in string_list:
if pattern.endswith(s):
return True
return False | [
"Returns true iff one of the strings in string_list ends in\n pattern."
]
|
Please provide a description of the function:def get_tag_names(self):
root = etree.fromstring(self.xml_full_text.encode('utf-8'))
return self.get_children_tag_names(root) | [
"Returns the set of tag names present in the XML."
]
|
Please provide a description of the function:def get_children_tag_names(self, xml_element):
tags = set()
tags.add(self.remove_namespace_from_tag(xml_element.tag))
for element in xml_element.iter(tag=etree.Element):
if element != xml_element:
new_tags = self.get_children_tag_names(element)
if new_tags is not None:
tags.update(new_tags)
return tags | [
"Returns all tag names of xml element and its children."
]
|
Please provide a description of the function:def string_matches_sans_whitespace(self, str1, str2_fuzzy_whitespace):
str2_fuzzy_whitespace = re.sub('\s+', '\s*', str2_fuzzy_whitespace)
return re.search(str2_fuzzy_whitespace, str1) is not None | [
"Check if two strings match, modulo their whitespace."
]
|
Please provide a description of the function:def sentence_matches(self, sentence_text):
has_upstream = False
has_downstream = False
has_verb = False
# Get the first word of the action type and assume this is the verb
# (Ex. get depends for depends on)
actiontype_words = word_tokenize(self.mention.actiontype)
actiontype_verb_stemmed = stem(actiontype_words[0])
words = word_tokenize(sentence_text)
if self.string_matches_sans_whitespace(sentence_text.lower(),
self.mention.upstream.lower()):
has_upstream = True
if self.string_matches_sans_whitespace(sentence_text.lower(),
self.mention.downstream.lower()):
has_downstream = True
for word in words:
if actiontype_verb_stemmed == stem(word):
has_verb = True
return has_upstream and has_downstream and has_verb | [
"Returns true iff the sentence contains this mention's upstream\n and downstream participants, and if one of the stemmed verbs in\n the sentence is the same as the stemmed action type."
]
|
Please provide a description of the function:def get_identifiers_url(db_name, db_id):
identifiers_url = 'http://identifiers.org/'
bel_scai_url = 'https://arty.scai.fraunhofer.de/artifactory/bel/namespace/'
if db_name == 'UP':
url = identifiers_url + 'uniprot/%s' % db_id
elif db_name == 'HGNC':
url = identifiers_url + 'hgnc/HGNC:%s' % db_id
elif db_name == 'IP':
url = identifiers_url + 'interpro/%s' % db_id
elif db_name == 'IPR':
url = identifiers_url + 'interpro/%s' % db_id
elif db_name == 'CHEBI':
url = identifiers_url + 'chebi/%s' % db_id
elif db_name == 'NCIT':
url = identifiers_url + 'ncit/%s' % db_id
elif db_name == 'GO':
if db_id.startswith('GO:'):
url = identifiers_url + 'go/%s' % db_id
else:
url = identifiers_url + 'go/GO:%s' % db_id
elif db_name in ('PUBCHEM', 'PCID'): # Assuming PCID = PubChem compound ID
if db_id.startswith('PUBCHEM:'):
db_id = db_id[8:]
elif db_id.startswith('PCID:'):
db_id = db_id[5:]
url = identifiers_url + 'pubchem.compound/%s' % db_id
elif db_name == 'PF':
url = identifiers_url + 'pfam/%s' % db_id
elif db_name == 'MIRBASEM':
url = identifiers_url + 'mirbase.mature/%s' % db_id
elif db_name == 'MIRBASE':
url = identifiers_url + 'mirbase/%s' % db_id
elif db_name == 'MESH':
url = identifiers_url + 'mesh/%s' % db_id
elif db_name == 'EGID':
url = identifiers_url + 'ncbigene/%s' % db_id
elif db_name == 'HMDB':
url = identifiers_url + 'hmdb/%s' % db_id
elif db_name == 'LINCS':
if db_id.startswith('LSM-'): # Lincs Small Molecule ID
url = identifiers_url + 'lincs.smallmolecule/%s' % db_id
elif db_id.startswith('LCL-'): # Lincs Cell Line ID
url = identifiers_url + 'lincs.cell/%s' % db_id
else: # Assume LINCS Protein
url = identifiers_url + 'lincs.protein/%s' % db_id
elif db_name == 'HMS-LINCS':
url = 'http://lincs.hms.harvard.edu/db/sm/%s-101' % db_id
# Special cases with no identifiers entry
elif db_name == 'SCHEM':
url = bel_scai_url + 'selventa-legacy-chemicals/' + \
'selventa-legacy-chemicals-20150601.belns'
elif db_name == 'SCOMP':
url = bel_scai_url + 'selventa-named-complexes/' + \
'selventa-named-complexes-20150601.belns'
elif db_name == 'SFAM':
url = bel_scai_url + 'selventa-protein-families/' + \
'selventa-protein-families-20150601.belns'
elif db_name == 'FPLX':
url = 'http://identifiers.org/fplx/%s' % db_id
elif db_name == 'LNCRNADB':
if db_id.startswith('ENSG'):
url = 'http://www.lncrnadb.org/search/?q=%s' % db_id
else: # Assmuing HGNC symbol
url = 'http://www.lncrnadb.org/%s/' % db_id
elif db_name == 'NXPFA':
url = 'https://www.nextprot.org/term/FA-%s' % db_id
elif db_name in ('UN', 'WDI', 'FAO'):
url = 'https://github.com/clulab/eidos/wiki/JSON-LD#Grounding/%s' % \
db_id
elif db_name == 'HUME':
url = ('https://github.com/BBN-E/Hume/blob/master/resource/ontologies/'
'hume_ontology/%s' % db_id)
elif db_name == 'CWMS':
url = 'http://trips.ihmc.us/%s' % db_id
elif db_name == 'SIGNOR': # Assuming db_id == Primary ID
url = 'https://signor.uniroma2.it/relation_result.php?id=%s' % db_id
elif db_name == 'SOFIA':
url = 'http://cs.cmu.edu/sofia/%s' % db_id
elif db_name == 'CHEMBL':
if not db_id.startswith('CHEMBL'):
db_id = 'CHEMBL%s' % db_id
url = identifiers_url + 'chembl.compound/%s' % db_id
elif db_name == 'NONCODE':
url = 'http://www.noncode.org/show_gene.php?id=NONHSAG%s' % db_id
elif db_name == 'TEXT':
return None
else:
logger.warning('Unhandled name space %s' % db_name)
url = None
return url | [
"Return an identifiers.org URL for a given database name and ID.\n\n Parameters\n ----------\n db_name : str\n An internal database name: HGNC, UP, CHEBI, etc.\n db_id : str\n An identifier in the given database.\n\n Returns\n -------\n url : str\n An identifiers.org URL corresponding to the given database name and ID.\n "
]
|
Please provide a description of the function:def dump_statements(stmts, fname, protocol=4):
logger.info('Dumping %d statements into %s...' % (len(stmts), fname))
with open(fname, 'wb') as fh:
pickle.dump(stmts, fh, protocol=protocol) | [
"Dump a list of statements into a pickle file.\n\n Parameters\n ----------\n fname : str\n The name of the pickle file to dump statements into.\n protocol : Optional[int]\n The pickle protocol to use (use 2 for Python 2 compatibility).\n Default: 4\n "
]
|
Please provide a description of the function:def load_statements(fname, as_dict=False):
logger.info('Loading %s...' % fname)
with open(fname, 'rb') as fh:
# Encoding argument not available in pickle for Python 2
if sys.version_info[0] < 3:
stmts = pickle.load(fh)
# Encoding argument specified here to enable compatibility with
# pickle files created with Python 2
else:
stmts = pickle.load(fh, encoding='latin1')
if isinstance(stmts, dict):
if as_dict:
return stmts
st = []
for pmid, st_list in stmts.items():
st += st_list
stmts = st
logger.info('Loaded %d statements' % len(stmts))
return stmts | [
"Load statements from a pickle file.\n\n Parameters\n ----------\n fname : str\n The name of the pickle file to load statements from.\n as_dict : Optional[bool]\n If True and the pickle file contains a dictionary of statements, it\n is returned as a dictionary. If False, the statements are always\n returned in a list. Default: False\n\n Returns\n -------\n stmts : list\n A list or dict of statements that were loaded.\n "
]
|
Please provide a description of the function:def map_grounding(stmts_in, **kwargs):
from indra.preassembler.grounding_mapper import GroundingMapper
from indra.preassembler.grounding_mapper import gm as grounding_map
from indra.preassembler.grounding_mapper import \
default_agent_map as agent_map
logger.info('Mapping grounding on %d statements...' % len(stmts_in))
do_rename = kwargs.get('do_rename')
gm = kwargs.get('grounding_map', grounding_map)
if do_rename is None:
do_rename = True
gm = GroundingMapper(gm, agent_map, use_deft=kwargs.get('use_deft', True))
stmts_out = gm.map_agents(stmts_in, do_rename=do_rename)
dump_pkl = kwargs.get('save')
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | [
"Map grounding using the GroundingMapper.\n\n Parameters\n ----------\n stmts_in : list[indra.statements.Statement]\n A list of statements to map.\n do_rename : Optional[bool]\n If True, Agents are renamed based on their mapped grounding.\n grounding_map : Optional[dict]\n A user supplied grounding map which maps a string to a\n dictionary of database IDs (in the format used by Agents'\n db_refs).\n use_deft : Optional[bool]\n If True, Deft will be attempted to be used for acronym disambiguation.\n Default: True\n save : Optional[str]\n The name of a pickle file to save the results (stmts_out) into.\n\n Returns\n -------\n stmts_out : list[indra.statements.Statement]\n A list of mapped statements.\n "
]
|
Please provide a description of the function:def merge_groundings(stmts_in):
def surface_grounding(stmt):
# Find the "best" grounding for a given concept and its evidences
# and surface that
for idx, concept in enumerate(stmt.agent_list()):
if concept is None:
continue
aggregate_groundings = {}
for ev in stmt.evidence:
if 'agents' in ev.annotations:
groundings = ev.annotations['agents']['raw_grounding'][idx]
for ns, value in groundings.items():
if ns not in aggregate_groundings:
aggregate_groundings[ns] = []
if isinstance(value, list):
aggregate_groundings[ns] += value
else:
aggregate_groundings[ns].append(value)
best_groundings = get_best_groundings(aggregate_groundings)
concept.db_refs = best_groundings
def get_best_groundings(aggregate_groundings):
best_groundings = {}
for ns, values in aggregate_groundings.items():
# There are 3 possibilities here
# 1. All the entries in the list are scored in which case we
# get unique entries and sort them by score
if all([isinstance(v, (tuple, list)) for v in values]):
best_groundings[ns] = []
for unique_value in {v[0] for v in values}:
scores = [v[1] for v in values if v[0] == unique_value]
best_groundings[ns].append((unique_value, max(scores)))
best_groundings[ns] = \
sorted(best_groundings[ns], key=lambda x: x[1],
reverse=True)
# 2. All the entries in the list are unscored in which case we
# get the highest frequency entry
elif all([not isinstance(v, (tuple, list)) for v in values]):
best_groundings[ns] = max(set(values), key=values.count)
# 3. There is a mixture, which can happen when some entries were
# mapped with scores and others had no scores to begin with.
# In this case, we again pick the highest frequency non-scored
# entry assuming that the unmapped version is more reliable.
else:
unscored_vals = [v for v in values
if not isinstance(v, (tuple, list))]
best_groundings[ns] = max(set(unscored_vals),
key=unscored_vals.count)
return best_groundings
stmts_out = []
for stmt in stmts_in:
if not isinstance(stmt, (Complex, Conversion)):
surface_grounding(stmt)
stmts_out.append(stmt)
return stmts_out | [
"Gather and merge original grounding information from evidences.\n\n Each Statement's evidences are traversed to find original grounding\n information. These groundings are then merged into an overall consensus\n grounding dict with as much detail as possible.\n\n The current implementation is only applicable to Statements whose\n concept/agent roles are fixed. Complexes, Associations and Conversions\n cannot be handled correctly.\n\n Parameters\n ----------\n stmts_in : list[indra.statements.Statement]\n A list of INDRA Statements whose groundings should be merged. These\n Statements are meant to have been preassembled and potentially have\n multiple pieces of evidence.\n\n Returns\n -------\n stmts_out : list[indra.statements.Statement]\n The list of Statements now with groundings merged at the Statement\n level.\n "
]
|
Please provide a description of the function:def merge_deltas(stmts_in):
stmts_out = []
for stmt in stmts_in:
# This operation is only applicable to Influences
if not isinstance(stmt, Influence):
stmts_out.append(stmt)
continue
# At this point this is guaranteed to be an Influence
deltas = {}
for role in ('subj', 'obj'):
for info in ('polarity', 'adjectives'):
key = (role, info)
deltas[key] = []
for ev in stmt.evidence:
entry = ev.annotations.get('%s_%s' % key)
deltas[key].append(entry if entry else None)
# POLARITY
# For polarity we need to work in pairs
polarity_pairs = list(zip(deltas[('subj', 'polarity')],
deltas[('obj', 'polarity')]))
# If we have some fully defined pairs, we take the most common one
both_pols = [pair for pair in polarity_pairs if pair[0] is not None and
pair[1] is not None]
if both_pols:
subj_pol, obj_pol = max(set(both_pols), key=both_pols.count)
stmt.subj.delta['polarity'] = subj_pol
stmt.obj.delta['polarity'] = obj_pol
# Otherwise we prefer the case when at least one entry of the
# pair is given
else:
one_pol = [pair for pair in polarity_pairs if pair[0] is not None or
pair[1] is not None]
if one_pol:
subj_pol, obj_pol = max(set(one_pol), key=one_pol.count)
stmt.subj.delta['polarity'] = subj_pol
stmt.obj.delta['polarity'] = obj_pol
# ADJECTIVES
for attr, role in ((stmt.subj.delta, 'subj'), (stmt.obj.delta, 'obj')):
all_adjectives = []
for adj in deltas[(role, 'adjectives')]:
if isinstance(adj, list):
all_adjectives += adj
elif adj is not None:
all_adjectives.append(adj)
attr['adjectives'] = all_adjectives
stmts_out.append(stmt)
return stmts_out | [
"Gather and merge original Influence delta information from evidence.\n\n\n This function is only applicable to Influence Statements that have\n subj and obj deltas. All other statement types are passed through unchanged.\n Polarities and adjectives for subjects and objects respectivey are\n collected and merged by travesrsing all evidences of a Statement.\n\n Parameters\n ----------\n stmts_in : list[indra.statements.Statement]\n A list of INDRA Statements whose influence deltas should be merged.\n These Statements are meant to have been preassembled and potentially\n have multiple pieces of evidence.\n\n Returns\n -------\n stmts_out : list[indra.statements.Statement]\n The list of Statements now with deltas merged at the Statement\n level.\n "
]
|
Please provide a description of the function:def map_sequence(stmts_in, **kwargs):
from indra.preassembler.sitemapper import SiteMapper, default_site_map
logger.info('Mapping sites on %d statements...' % len(stmts_in))
kwarg_list = ['do_methionine_offset', 'do_orthology_mapping',
'do_isoform_mapping']
sm = SiteMapper(default_site_map,
use_cache=kwargs.pop('use_cache', False),
**_filter(kwargs, kwarg_list))
valid, mapped = sm.map_sites(stmts_in)
correctly_mapped_stmts = []
for ms in mapped:
correctly_mapped = all([mm.has_mapping() for mm in ms.mapped_mods])
if correctly_mapped:
correctly_mapped_stmts.append(ms.mapped_stmt)
stmts_out = valid + correctly_mapped_stmts
logger.info('%d statements with valid sites' % len(stmts_out))
dump_pkl = kwargs.get('save')
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
del sm
return stmts_out | [
"Map sequences using the SiteMapper.\n\n Parameters\n ----------\n stmts_in : list[indra.statements.Statement]\n A list of statements to map.\n do_methionine_offset : boolean\n Whether to check for off-by-one errors in site position (possibly)\n attributable to site numbering from mature proteins after\n cleavage of the initial methionine. If True, checks the reference\n sequence for a known modification at 1 site position greater\n than the given one; if there exists such a site, creates the\n mapping. Default is True.\n do_orthology_mapping : boolean\n Whether to check sequence positions for known modification sites\n in mouse or rat sequences (based on PhosphoSitePlus data). If a\n mouse/rat site is found that is linked to a site in the human\n reference sequence, a mapping is created. Default is True.\n do_isoform_mapping : boolean\n Whether to check sequence positions for known modifications\n in other human isoforms of the protein (based on PhosphoSitePlus\n data). If a site is found that is linked to a site in the human\n reference sequence, a mapping is created. Default is True.\n use_cache : boolean\n If True, a cache will be created/used from the laction specified by\n SITEMAPPER_CACHE_PATH, defined in your INDRA config or the environment.\n If False, no cache is used. For more details on the cache, see the\n SiteMapper class definition.\n save : Optional[str]\n The name of a pickle file to save the results (stmts_out) into.\n\n Returns\n -------\n stmts_out : list[indra.statements.Statement]\n A list of mapped statements.\n "
]
|
Please provide a description of the function:def run_preassembly(stmts_in, **kwargs):
dump_pkl_unique = kwargs.get('save_unique')
belief_scorer = kwargs.get('belief_scorer')
use_hierarchies = kwargs['hierarchies'] if 'hierarchies' in kwargs else \
hierarchies
be = BeliefEngine(scorer=belief_scorer)
pa = Preassembler(hierarchies, stmts_in)
run_preassembly_duplicate(pa, be, save=dump_pkl_unique)
dump_pkl = kwargs.get('save')
return_toplevel = kwargs.get('return_toplevel', True)
poolsize = kwargs.get('poolsize', None)
size_cutoff = kwargs.get('size_cutoff', 100)
options = {'save': dump_pkl, 'return_toplevel': return_toplevel,
'poolsize': poolsize, 'size_cutoff': size_cutoff,
'flatten_evidence': kwargs.get('flatten_evidence', False),
'flatten_evidence_collect_from':
kwargs.get('flatten_evidence_collect_from', 'supported_by')
}
stmts_out = run_preassembly_related(pa, be, **options)
return stmts_out | [
"Run preassembly on a list of statements.\n\n Parameters\n ----------\n stmts_in : list[indra.statements.Statement]\n A list of statements to preassemble.\n return_toplevel : Optional[bool]\n If True, only the top-level statements are returned. If False,\n all statements are returned irrespective of level of specificity.\n Default: True\n poolsize : Optional[int]\n The number of worker processes to use to parallelize the\n comparisons performed by the function. If None (default), no\n parallelization is performed. NOTE: Parallelization is only\n available on Python 3.4 and above.\n size_cutoff : Optional[int]\n Groups with size_cutoff or more statements are sent to worker\n processes, while smaller groups are compared in the parent process.\n Default value is 100. Not relevant when parallelization is not\n used.\n belief_scorer : Optional[indra.belief.BeliefScorer]\n Instance of BeliefScorer class to use in calculating Statement\n probabilities. If None is provided (default), then the default\n scorer is used.\n hierarchies : Optional[dict]\n Dict of hierarchy managers to use for preassembly\n flatten_evidence : Optional[bool]\n If True, evidences are collected and flattened via supports/supported_by\n links. Default: False\n flatten_evidence_collect_from : Optional[str]\n String indicating whether to collect and flatten evidence from the\n `supports` attribute of each statement or the `supported_by` attribute.\n If not set, defaults to 'supported_by'.\n Only relevant when flatten_evidence is True.\n save : Optional[str]\n The name of a pickle file to save the results (stmts_out) into.\n save_unique : Optional[str]\n The name of a pickle file to save the unique statements into.\n\n Returns\n -------\n stmts_out : list[indra.statements.Statement]\n A list of preassembled top-level statements.\n "
]
|
Please provide a description of the function:def run_preassembly_duplicate(preassembler, beliefengine, **kwargs):
logger.info('Combining duplicates on %d statements...' %
len(preassembler.stmts))
dump_pkl = kwargs.get('save')
stmts_out = preassembler.combine_duplicates()
beliefengine.set_prior_probs(stmts_out)
logger.info('%d unique statements' % len(stmts_out))
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | [
"Run deduplication stage of preassembly on a list of statements.\n\n Parameters\n ----------\n preassembler : indra.preassembler.Preassembler\n A Preassembler instance\n beliefengine : indra.belief.BeliefEngine\n A BeliefEngine instance.\n save : Optional[str]\n The name of a pickle file to save the results (stmts_out) into.\n\n Returns\n -------\n stmts_out : list[indra.statements.Statement]\n A list of unique statements.\n "
]
|
Please provide a description of the function:def run_preassembly_related(preassembler, beliefengine, **kwargs):
logger.info('Combining related on %d statements...' %
len(preassembler.unique_stmts))
return_toplevel = kwargs.get('return_toplevel', True)
poolsize = kwargs.get('poolsize', None)
size_cutoff = kwargs.get('size_cutoff', 100)
stmts_out = preassembler.combine_related(return_toplevel=False,
poolsize=poolsize,
size_cutoff=size_cutoff)
# Calculate beliefs
beliefengine.set_hierarchy_probs(stmts_out)
# Flatten evidence if needed
do_flatten_evidence = kwargs.get('flatten_evidence', False)
if do_flatten_evidence:
flatten_evidences_collect_from = \
kwargs.get('flatten_evidence_collect_from', 'supported_by')
stmts_out = flatten_evidence(stmts_out, flatten_evidences_collect_from)
# Filter to top if needed
stmts_top = filter_top_level(stmts_out)
if return_toplevel:
stmts_out = stmts_top
logger.info('%d top-level statements' % len(stmts_out))
else:
logger.info('%d statements out of which %d are top-level' %
(len(stmts_out), len(stmts_top)))
dump_pkl = kwargs.get('save')
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | [
"Run related stage of preassembly on a list of statements.\n\n Parameters\n ----------\n preassembler : indra.preassembler.Preassembler\n A Preassembler instance which already has a set of unique statements\n internally.\n beliefengine : indra.belief.BeliefEngine\n A BeliefEngine instance.\n return_toplevel : Optional[bool]\n If True, only the top-level statements are returned. If False,\n all statements are returned irrespective of level of specificity.\n Default: True\n poolsize : Optional[int]\n The number of worker processes to use to parallelize the\n comparisons performed by the function. If None (default), no\n parallelization is performed. NOTE: Parallelization is only\n available on Python 3.4 and above.\n size_cutoff : Optional[int]\n Groups with size_cutoff or more statements are sent to worker\n processes, while smaller groups are compared in the parent process.\n Default value is 100. Not relevant when parallelization is not\n used.\n flatten_evidence : Optional[bool]\n If True, evidences are collected and flattened via supports/supported_by\n links. Default: False\n flatten_evidence_collect_from : Optional[str]\n String indicating whether to collect and flatten evidence from the\n `supports` attribute of each statement or the `supported_by` attribute.\n If not set, defaults to 'supported_by'.\n Only relevant when flatten_evidence is True.\n save : Optional[str]\n The name of a pickle file to save the results (stmts_out) into.\n\n Returns\n -------\n stmts_out : list[indra.statements.Statement]\n A list of preassembled top-level statements.\n "
]
|
Please provide a description of the function:def filter_by_type(stmts_in, stmt_type, **kwargs):
invert = kwargs.get('invert', False)
logger.info('Filtering %d statements for type %s%s...' %
(len(stmts_in), 'not ' if invert else '',
stmt_type.__name__))
if not invert:
stmts_out = [st for st in stmts_in if isinstance(st, stmt_type)]
else:
stmts_out = [st for st in stmts_in if not isinstance(st, stmt_type)]
logger.info('%d statements after filter...' % len(stmts_out))
dump_pkl = kwargs.get('save')
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | [
"Filter to a given statement type.\n\n Parameters\n ----------\n stmts_in : list[indra.statements.Statement]\n A list of statements to filter.\n stmt_type : indra.statements.Statement\n The class of the statement type to filter for.\n Example: indra.statements.Modification\n invert : Optional[bool]\n If True, the statements that are not of the given type\n are returned. Default: False\n save : Optional[str]\n The name of a pickle file to save the results (stmts_out) into.\n\n Returns\n -------\n stmts_out : list[indra.statements.Statement]\n A list of filtered statements.\n "
]
|
Please provide a description of the function:def _remove_bound_conditions(agent, keep_criterion):
new_bc = []
for ind in range(len(agent.bound_conditions)):
if keep_criterion(agent.bound_conditions[ind].agent):
new_bc.append(agent.bound_conditions[ind])
agent.bound_conditions = new_bc | [
"Removes bound conditions of agent such that keep_criterion is False.\n\n Parameters\n ----------\n agent: Agent\n The agent whose bound conditions we evaluate\n keep_criterion: function\n Evaluates removal_criterion(a) for each agent a in a bound condition\n and if it evaluates to False, removes a from agent's bound_conditions\n "
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.