repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
sorgerlab/indra | indra/literature/elsevier_client.py | download_article | def download_article(id_val, id_type='doi', on_retry=False):
"""Low level function to get an XML article for a particular id.
Parameters
----------
id_val : str
The value of the id.
id_type : str
The type of id, such as pmid (a.k.a. pubmed_id), doi, or eid.
on_retry : bool
This function has a recursive retry feature, and this is the only time
this parameter should be used.
Returns
-------
content : str or None
If found, the content string is returned, otherwise, None is returned.
"""
if id_type == 'pmid':
id_type = 'pubmed_id'
url = '%s/%s' % (elsevier_article_url_fmt % id_type, id_val)
params = {'httpAccept': 'text/xml'}
res = requests.get(url, params, headers=ELSEVIER_KEYS)
if res.status_code == 404:
logger.info("Resource for %s not available on elsevier." % url)
return None
elif res.status_code == 429:
if not on_retry:
logger.warning("Broke the speed limit. Waiting half a second then "
"trying again...")
sleep(0.5)
return download_article(id_val, id_type, True)
else:
logger.error("Still breaking speed limit after waiting.")
logger.error("Elsevier response: %s" % res.text)
return None
elif res.status_code != 200:
logger.error('Could not download article %s: status code %d' %
(url, res.status_code))
logger.error('Elsevier response: %s' % res.text)
return None
else:
content_str = res.content.decode('utf-8')
if content_str.startswith('<service-error>'):
logger.error('Got a service error with 200 status: %s'
% content_str)
return None
# Return the XML content as a unicode string, assuming UTF-8 encoding
return content_str | python | def download_article(id_val, id_type='doi', on_retry=False):
"""Low level function to get an XML article for a particular id.
Parameters
----------
id_val : str
The value of the id.
id_type : str
The type of id, such as pmid (a.k.a. pubmed_id), doi, or eid.
on_retry : bool
This function has a recursive retry feature, and this is the only time
this parameter should be used.
Returns
-------
content : str or None
If found, the content string is returned, otherwise, None is returned.
"""
if id_type == 'pmid':
id_type = 'pubmed_id'
url = '%s/%s' % (elsevier_article_url_fmt % id_type, id_val)
params = {'httpAccept': 'text/xml'}
res = requests.get(url, params, headers=ELSEVIER_KEYS)
if res.status_code == 404:
logger.info("Resource for %s not available on elsevier." % url)
return None
elif res.status_code == 429:
if not on_retry:
logger.warning("Broke the speed limit. Waiting half a second then "
"trying again...")
sleep(0.5)
return download_article(id_val, id_type, True)
else:
logger.error("Still breaking speed limit after waiting.")
logger.error("Elsevier response: %s" % res.text)
return None
elif res.status_code != 200:
logger.error('Could not download article %s: status code %d' %
(url, res.status_code))
logger.error('Elsevier response: %s' % res.text)
return None
else:
content_str = res.content.decode('utf-8')
if content_str.startswith('<service-error>'):
logger.error('Got a service error with 200 status: %s'
% content_str)
return None
# Return the XML content as a unicode string, assuming UTF-8 encoding
return content_str | [
"def",
"download_article",
"(",
"id_val",
",",
"id_type",
"=",
"'doi'",
",",
"on_retry",
"=",
"False",
")",
":",
"if",
"id_type",
"==",
"'pmid'",
":",
"id_type",
"=",
"'pubmed_id'",
"url",
"=",
"'%s/%s'",
"%",
"(",
"elsevier_article_url_fmt",
"%",
"id_type",
",",
"id_val",
")",
"params",
"=",
"{",
"'httpAccept'",
":",
"'text/xml'",
"}",
"res",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"params",
",",
"headers",
"=",
"ELSEVIER_KEYS",
")",
"if",
"res",
".",
"status_code",
"==",
"404",
":",
"logger",
".",
"info",
"(",
"\"Resource for %s not available on elsevier.\"",
"%",
"url",
")",
"return",
"None",
"elif",
"res",
".",
"status_code",
"==",
"429",
":",
"if",
"not",
"on_retry",
":",
"logger",
".",
"warning",
"(",
"\"Broke the speed limit. Waiting half a second then \"",
"\"trying again...\"",
")",
"sleep",
"(",
"0.5",
")",
"return",
"download_article",
"(",
"id_val",
",",
"id_type",
",",
"True",
")",
"else",
":",
"logger",
".",
"error",
"(",
"\"Still breaking speed limit after waiting.\"",
")",
"logger",
".",
"error",
"(",
"\"Elsevier response: %s\"",
"%",
"res",
".",
"text",
")",
"return",
"None",
"elif",
"res",
".",
"status_code",
"!=",
"200",
":",
"logger",
".",
"error",
"(",
"'Could not download article %s: status code %d'",
"%",
"(",
"url",
",",
"res",
".",
"status_code",
")",
")",
"logger",
".",
"error",
"(",
"'Elsevier response: %s'",
"%",
"res",
".",
"text",
")",
"return",
"None",
"else",
":",
"content_str",
"=",
"res",
".",
"content",
".",
"decode",
"(",
"'utf-8'",
")",
"if",
"content_str",
".",
"startswith",
"(",
"'<service-error>'",
")",
":",
"logger",
".",
"error",
"(",
"'Got a service error with 200 status: %s'",
"%",
"content_str",
")",
"return",
"None",
"# Return the XML content as a unicode string, assuming UTF-8 encoding",
"return",
"content_str"
]
| Low level function to get an XML article for a particular id.
Parameters
----------
id_val : str
The value of the id.
id_type : str
The type of id, such as pmid (a.k.a. pubmed_id), doi, or eid.
on_retry : bool
This function has a recursive retry feature, and this is the only time
this parameter should be used.
Returns
-------
content : str or None
If found, the content string is returned, otherwise, None is returned. | [
"Low",
"level",
"function",
"to",
"get",
"an",
"XML",
"article",
"for",
"a",
"particular",
"id",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/elsevier_client.py#L110-L158 | train |
sorgerlab/indra | indra/literature/elsevier_client.py | download_article_from_ids | def download_article_from_ids(**id_dict):
"""Download an article in XML format from Elsevier matching the set of ids.
Parameters
----------
<id_type> : str
You can enter any combination of eid, doi, pmid, and/or pii. Ids will be
checked in that order, until either content has been found or all ids
have been checked.
Returns
-------
content : str or None
If found, the content is returned as a string, otherwise None is
returned.
"""
valid_id_types = ['eid', 'doi', 'pmid', 'pii']
assert all([k in valid_id_types for k in id_dict.keys()]),\
("One of these id keys is invalid: %s Valid keys are: %s."
% (list(id_dict.keys()), valid_id_types))
if 'doi' in id_dict.keys() and id_dict['doi'].lower().startswith('doi:'):
id_dict['doi'] = id_dict['doi'][4:]
content = None
for id_type in valid_id_types:
if id_type in id_dict.keys():
content = download_article(id_dict[id_type], id_type)
if content is not None:
break
else:
logger.error("Could not download article with any of the ids: %s."
% str(id_dict))
return content | python | def download_article_from_ids(**id_dict):
"""Download an article in XML format from Elsevier matching the set of ids.
Parameters
----------
<id_type> : str
You can enter any combination of eid, doi, pmid, and/or pii. Ids will be
checked in that order, until either content has been found or all ids
have been checked.
Returns
-------
content : str or None
If found, the content is returned as a string, otherwise None is
returned.
"""
valid_id_types = ['eid', 'doi', 'pmid', 'pii']
assert all([k in valid_id_types for k in id_dict.keys()]),\
("One of these id keys is invalid: %s Valid keys are: %s."
% (list(id_dict.keys()), valid_id_types))
if 'doi' in id_dict.keys() and id_dict['doi'].lower().startswith('doi:'):
id_dict['doi'] = id_dict['doi'][4:]
content = None
for id_type in valid_id_types:
if id_type in id_dict.keys():
content = download_article(id_dict[id_type], id_type)
if content is not None:
break
else:
logger.error("Could not download article with any of the ids: %s."
% str(id_dict))
return content | [
"def",
"download_article_from_ids",
"(",
"*",
"*",
"id_dict",
")",
":",
"valid_id_types",
"=",
"[",
"'eid'",
",",
"'doi'",
",",
"'pmid'",
",",
"'pii'",
"]",
"assert",
"all",
"(",
"[",
"k",
"in",
"valid_id_types",
"for",
"k",
"in",
"id_dict",
".",
"keys",
"(",
")",
"]",
")",
",",
"(",
"\"One of these id keys is invalid: %s Valid keys are: %s.\"",
"%",
"(",
"list",
"(",
"id_dict",
".",
"keys",
"(",
")",
")",
",",
"valid_id_types",
")",
")",
"if",
"'doi'",
"in",
"id_dict",
".",
"keys",
"(",
")",
"and",
"id_dict",
"[",
"'doi'",
"]",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'doi:'",
")",
":",
"id_dict",
"[",
"'doi'",
"]",
"=",
"id_dict",
"[",
"'doi'",
"]",
"[",
"4",
":",
"]",
"content",
"=",
"None",
"for",
"id_type",
"in",
"valid_id_types",
":",
"if",
"id_type",
"in",
"id_dict",
".",
"keys",
"(",
")",
":",
"content",
"=",
"download_article",
"(",
"id_dict",
"[",
"id_type",
"]",
",",
"id_type",
")",
"if",
"content",
"is",
"not",
"None",
":",
"break",
"else",
":",
"logger",
".",
"error",
"(",
"\"Could not download article with any of the ids: %s.\"",
"%",
"str",
"(",
"id_dict",
")",
")",
"return",
"content"
]
| Download an article in XML format from Elsevier matching the set of ids.
Parameters
----------
<id_type> : str
You can enter any combination of eid, doi, pmid, and/or pii. Ids will be
checked in that order, until either content has been found or all ids
have been checked.
Returns
-------
content : str or None
If found, the content is returned as a string, otherwise None is
returned. | [
"Download",
"an",
"article",
"in",
"XML",
"format",
"from",
"Elsevier",
"matching",
"the",
"set",
"of",
"ids",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/elsevier_client.py#L161-L192 | train |
sorgerlab/indra | indra/literature/elsevier_client.py | get_abstract | def get_abstract(doi):
"""Get the abstract text of an article from Elsevier given a doi."""
xml_string = download_article(doi)
if xml_string is None:
return None
assert isinstance(xml_string, str)
xml_tree = ET.XML(xml_string.encode('utf-8'), parser=UTB())
if xml_tree is None:
return None
coredata = xml_tree.find('article:coredata', elsevier_ns)
abstract = coredata.find('dc:description', elsevier_ns)
abs_text = abstract.text
return abs_text | python | def get_abstract(doi):
"""Get the abstract text of an article from Elsevier given a doi."""
xml_string = download_article(doi)
if xml_string is None:
return None
assert isinstance(xml_string, str)
xml_tree = ET.XML(xml_string.encode('utf-8'), parser=UTB())
if xml_tree is None:
return None
coredata = xml_tree.find('article:coredata', elsevier_ns)
abstract = coredata.find('dc:description', elsevier_ns)
abs_text = abstract.text
return abs_text | [
"def",
"get_abstract",
"(",
"doi",
")",
":",
"xml_string",
"=",
"download_article",
"(",
"doi",
")",
"if",
"xml_string",
"is",
"None",
":",
"return",
"None",
"assert",
"isinstance",
"(",
"xml_string",
",",
"str",
")",
"xml_tree",
"=",
"ET",
".",
"XML",
"(",
"xml_string",
".",
"encode",
"(",
"'utf-8'",
")",
",",
"parser",
"=",
"UTB",
"(",
")",
")",
"if",
"xml_tree",
"is",
"None",
":",
"return",
"None",
"coredata",
"=",
"xml_tree",
".",
"find",
"(",
"'article:coredata'",
",",
"elsevier_ns",
")",
"abstract",
"=",
"coredata",
".",
"find",
"(",
"'dc:description'",
",",
"elsevier_ns",
")",
"abs_text",
"=",
"abstract",
".",
"text",
"return",
"abs_text"
]
| Get the abstract text of an article from Elsevier given a doi. | [
"Get",
"the",
"abstract",
"text",
"of",
"an",
"article",
"from",
"Elsevier",
"given",
"a",
"doi",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/elsevier_client.py#L195-L207 | train |
sorgerlab/indra | indra/literature/elsevier_client.py | get_article | def get_article(doi, output_format='txt'):
"""Get the full body of an article from Elsevier.
Parameters
----------
doi : str
The doi for the desired article.
output_format : 'txt' or 'xml'
The desired format for the output. Selecting 'txt' (default) strips all
xml tags and joins the pieces of text in the main text, while 'xml'
simply takes the tag containing the body of the article and returns it
as is . In the latter case, downstream code needs to be able to
interpret Elsever's XML format.
Returns
-------
content : str
Either text content or xml, as described above, for the given doi.
"""
xml_string = download_article(doi)
if output_format == 'txt' and xml_string is not None:
text = extract_text(xml_string)
return text
return xml_string | python | def get_article(doi, output_format='txt'):
"""Get the full body of an article from Elsevier.
Parameters
----------
doi : str
The doi for the desired article.
output_format : 'txt' or 'xml'
The desired format for the output. Selecting 'txt' (default) strips all
xml tags and joins the pieces of text in the main text, while 'xml'
simply takes the tag containing the body of the article and returns it
as is . In the latter case, downstream code needs to be able to
interpret Elsever's XML format.
Returns
-------
content : str
Either text content or xml, as described above, for the given doi.
"""
xml_string = download_article(doi)
if output_format == 'txt' and xml_string is not None:
text = extract_text(xml_string)
return text
return xml_string | [
"def",
"get_article",
"(",
"doi",
",",
"output_format",
"=",
"'txt'",
")",
":",
"xml_string",
"=",
"download_article",
"(",
"doi",
")",
"if",
"output_format",
"==",
"'txt'",
"and",
"xml_string",
"is",
"not",
"None",
":",
"text",
"=",
"extract_text",
"(",
"xml_string",
")",
"return",
"text",
"return",
"xml_string"
]
| Get the full body of an article from Elsevier.
Parameters
----------
doi : str
The doi for the desired article.
output_format : 'txt' or 'xml'
The desired format for the output. Selecting 'txt' (default) strips all
xml tags and joins the pieces of text in the main text, while 'xml'
simply takes the tag containing the body of the article and returns it
as is . In the latter case, downstream code needs to be able to
interpret Elsever's XML format.
Returns
-------
content : str
Either text content or xml, as described above, for the given doi. | [
"Get",
"the",
"full",
"body",
"of",
"an",
"article",
"from",
"Elsevier",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/elsevier_client.py#L210-L233 | train |
sorgerlab/indra | indra/literature/elsevier_client.py | extract_paragraphs | def extract_paragraphs(xml_string):
"""Get paragraphs from the body of the given Elsevier xml."""
assert isinstance(xml_string, str)
xml_tree = ET.XML(xml_string.encode('utf-8'), parser=UTB())
full_text = xml_tree.find('article:originalText', elsevier_ns)
if full_text is None:
logger.info('Could not find full text element article:originalText')
return None
article_body = _get_article_body(full_text)
if article_body:
return article_body
raw_text = _get_raw_text(full_text)
if raw_text:
return [raw_text]
return None | python | def extract_paragraphs(xml_string):
"""Get paragraphs from the body of the given Elsevier xml."""
assert isinstance(xml_string, str)
xml_tree = ET.XML(xml_string.encode('utf-8'), parser=UTB())
full_text = xml_tree.find('article:originalText', elsevier_ns)
if full_text is None:
logger.info('Could not find full text element article:originalText')
return None
article_body = _get_article_body(full_text)
if article_body:
return article_body
raw_text = _get_raw_text(full_text)
if raw_text:
return [raw_text]
return None | [
"def",
"extract_paragraphs",
"(",
"xml_string",
")",
":",
"assert",
"isinstance",
"(",
"xml_string",
",",
"str",
")",
"xml_tree",
"=",
"ET",
".",
"XML",
"(",
"xml_string",
".",
"encode",
"(",
"'utf-8'",
")",
",",
"parser",
"=",
"UTB",
"(",
")",
")",
"full_text",
"=",
"xml_tree",
".",
"find",
"(",
"'article:originalText'",
",",
"elsevier_ns",
")",
"if",
"full_text",
"is",
"None",
":",
"logger",
".",
"info",
"(",
"'Could not find full text element article:originalText'",
")",
"return",
"None",
"article_body",
"=",
"_get_article_body",
"(",
"full_text",
")",
"if",
"article_body",
":",
"return",
"article_body",
"raw_text",
"=",
"_get_raw_text",
"(",
"full_text",
")",
"if",
"raw_text",
":",
"return",
"[",
"raw_text",
"]",
"return",
"None"
]
| Get paragraphs from the body of the given Elsevier xml. | [
"Get",
"paragraphs",
"from",
"the",
"body",
"of",
"the",
"given",
"Elsevier",
"xml",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/elsevier_client.py#L245-L259 | train |
sorgerlab/indra | indra/literature/elsevier_client.py | get_dois | def get_dois(query_str, count=100):
"""Search ScienceDirect through the API for articles.
See http://api.elsevier.com/content/search/fields/scidir for constructing a
query string to pass here. Example: 'abstract(BRAF) AND all("colorectal
cancer")'
"""
url = '%s/%s' % (elsevier_search_url, query_str)
params = {'query': query_str,
'count': count,
'httpAccept': 'application/xml',
'sort': '-coverdate',
'field': 'doi'}
res = requests.get(url, params)
if not res.status_code == 200:
return None
tree = ET.XML(res.content, parser=UTB())
doi_tags = tree.findall('atom:entry/prism:doi', elsevier_ns)
dois = [dt.text for dt in doi_tags]
return dois | python | def get_dois(query_str, count=100):
"""Search ScienceDirect through the API for articles.
See http://api.elsevier.com/content/search/fields/scidir for constructing a
query string to pass here. Example: 'abstract(BRAF) AND all("colorectal
cancer")'
"""
url = '%s/%s' % (elsevier_search_url, query_str)
params = {'query': query_str,
'count': count,
'httpAccept': 'application/xml',
'sort': '-coverdate',
'field': 'doi'}
res = requests.get(url, params)
if not res.status_code == 200:
return None
tree = ET.XML(res.content, parser=UTB())
doi_tags = tree.findall('atom:entry/prism:doi', elsevier_ns)
dois = [dt.text for dt in doi_tags]
return dois | [
"def",
"get_dois",
"(",
"query_str",
",",
"count",
"=",
"100",
")",
":",
"url",
"=",
"'%s/%s'",
"%",
"(",
"elsevier_search_url",
",",
"query_str",
")",
"params",
"=",
"{",
"'query'",
":",
"query_str",
",",
"'count'",
":",
"count",
",",
"'httpAccept'",
":",
"'application/xml'",
",",
"'sort'",
":",
"'-coverdate'",
",",
"'field'",
":",
"'doi'",
"}",
"res",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"params",
")",
"if",
"not",
"res",
".",
"status_code",
"==",
"200",
":",
"return",
"None",
"tree",
"=",
"ET",
".",
"XML",
"(",
"res",
".",
"content",
",",
"parser",
"=",
"UTB",
"(",
")",
")",
"doi_tags",
"=",
"tree",
".",
"findall",
"(",
"'atom:entry/prism:doi'",
",",
"elsevier_ns",
")",
"dois",
"=",
"[",
"dt",
".",
"text",
"for",
"dt",
"in",
"doi_tags",
"]",
"return",
"dois"
]
| Search ScienceDirect through the API for articles.
See http://api.elsevier.com/content/search/fields/scidir for constructing a
query string to pass here. Example: 'abstract(BRAF) AND all("colorectal
cancer")' | [
"Search",
"ScienceDirect",
"through",
"the",
"API",
"for",
"articles",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/elsevier_client.py#L264-L283 | train |
sorgerlab/indra | indra/literature/elsevier_client.py | get_piis | def get_piis(query_str):
"""Search ScienceDirect through the API for articles and return PIIs.
Note that ScienceDirect has a limitation in which a maximum of 6,000
PIIs can be retrieved for a given search and therefore this call is
internally broken up into multiple queries by a range of years and the
results are combined.
Parameters
----------
query_str : str
The query string to search with
Returns
-------
piis : list[str]
The list of PIIs identifying the papers returned by the search
"""
dates = range(1960, datetime.datetime.now().year)
all_piis = flatten([get_piis_for_date(query_str, date) for date in dates])
return all_piis | python | def get_piis(query_str):
"""Search ScienceDirect through the API for articles and return PIIs.
Note that ScienceDirect has a limitation in which a maximum of 6,000
PIIs can be retrieved for a given search and therefore this call is
internally broken up into multiple queries by a range of years and the
results are combined.
Parameters
----------
query_str : str
The query string to search with
Returns
-------
piis : list[str]
The list of PIIs identifying the papers returned by the search
"""
dates = range(1960, datetime.datetime.now().year)
all_piis = flatten([get_piis_for_date(query_str, date) for date in dates])
return all_piis | [
"def",
"get_piis",
"(",
"query_str",
")",
":",
"dates",
"=",
"range",
"(",
"1960",
",",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"year",
")",
"all_piis",
"=",
"flatten",
"(",
"[",
"get_piis_for_date",
"(",
"query_str",
",",
"date",
")",
"for",
"date",
"in",
"dates",
"]",
")",
"return",
"all_piis"
]
| Search ScienceDirect through the API for articles and return PIIs.
Note that ScienceDirect has a limitation in which a maximum of 6,000
PIIs can be retrieved for a given search and therefore this call is
internally broken up into multiple queries by a range of years and the
results are combined.
Parameters
----------
query_str : str
The query string to search with
Returns
-------
piis : list[str]
The list of PIIs identifying the papers returned by the search | [
"Search",
"ScienceDirect",
"through",
"the",
"API",
"for",
"articles",
"and",
"return",
"PIIs",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/elsevier_client.py#L286-L306 | train |
sorgerlab/indra | indra/literature/elsevier_client.py | get_piis_for_date | def get_piis_for_date(query_str, date):
"""Search ScienceDirect with a query string constrained to a given year.
Parameters
----------
query_str : str
The query string to search with
date : str
The year to constrain the search to
Returns
-------
piis : list[str]
The list of PIIs identifying the papers returned by the search
"""
count = 200
params = {'query': query_str,
'count': count,
'start': 0,
'sort': '-coverdate',
'date': date,
'field': 'pii'}
all_piis = []
while True:
res = requests.get(elsevier_search_url, params, headers=ELSEVIER_KEYS)
if not res.status_code == 200:
logger.info('Got status code: %d' % res.status_code)
break
res_json = res.json()
entries = res_json['search-results']['entry']
logger.info(res_json['search-results']['opensearch:totalResults'])
if entries == [{'@_fa': 'true', 'error': 'Result set was empty'}]:
logger.info('Search result was empty')
return []
piis = [entry['pii'] for entry in entries]
all_piis += piis
# Get next batch
links = res_json['search-results'].get('link', [])
cont = False
for link in links:
if link.get('@ref') == 'next':
logger.info('Found link to next batch of results.')
params['start'] += count
cont = True
break
if not cont:
break
return all_piis | python | def get_piis_for_date(query_str, date):
"""Search ScienceDirect with a query string constrained to a given year.
Parameters
----------
query_str : str
The query string to search with
date : str
The year to constrain the search to
Returns
-------
piis : list[str]
The list of PIIs identifying the papers returned by the search
"""
count = 200
params = {'query': query_str,
'count': count,
'start': 0,
'sort': '-coverdate',
'date': date,
'field': 'pii'}
all_piis = []
while True:
res = requests.get(elsevier_search_url, params, headers=ELSEVIER_KEYS)
if not res.status_code == 200:
logger.info('Got status code: %d' % res.status_code)
break
res_json = res.json()
entries = res_json['search-results']['entry']
logger.info(res_json['search-results']['opensearch:totalResults'])
if entries == [{'@_fa': 'true', 'error': 'Result set was empty'}]:
logger.info('Search result was empty')
return []
piis = [entry['pii'] for entry in entries]
all_piis += piis
# Get next batch
links = res_json['search-results'].get('link', [])
cont = False
for link in links:
if link.get('@ref') == 'next':
logger.info('Found link to next batch of results.')
params['start'] += count
cont = True
break
if not cont:
break
return all_piis | [
"def",
"get_piis_for_date",
"(",
"query_str",
",",
"date",
")",
":",
"count",
"=",
"200",
"params",
"=",
"{",
"'query'",
":",
"query_str",
",",
"'count'",
":",
"count",
",",
"'start'",
":",
"0",
",",
"'sort'",
":",
"'-coverdate'",
",",
"'date'",
":",
"date",
",",
"'field'",
":",
"'pii'",
"}",
"all_piis",
"=",
"[",
"]",
"while",
"True",
":",
"res",
"=",
"requests",
".",
"get",
"(",
"elsevier_search_url",
",",
"params",
",",
"headers",
"=",
"ELSEVIER_KEYS",
")",
"if",
"not",
"res",
".",
"status_code",
"==",
"200",
":",
"logger",
".",
"info",
"(",
"'Got status code: %d'",
"%",
"res",
".",
"status_code",
")",
"break",
"res_json",
"=",
"res",
".",
"json",
"(",
")",
"entries",
"=",
"res_json",
"[",
"'search-results'",
"]",
"[",
"'entry'",
"]",
"logger",
".",
"info",
"(",
"res_json",
"[",
"'search-results'",
"]",
"[",
"'opensearch:totalResults'",
"]",
")",
"if",
"entries",
"==",
"[",
"{",
"'@_fa'",
":",
"'true'",
",",
"'error'",
":",
"'Result set was empty'",
"}",
"]",
":",
"logger",
".",
"info",
"(",
"'Search result was empty'",
")",
"return",
"[",
"]",
"piis",
"=",
"[",
"entry",
"[",
"'pii'",
"]",
"for",
"entry",
"in",
"entries",
"]",
"all_piis",
"+=",
"piis",
"# Get next batch",
"links",
"=",
"res_json",
"[",
"'search-results'",
"]",
".",
"get",
"(",
"'link'",
",",
"[",
"]",
")",
"cont",
"=",
"False",
"for",
"link",
"in",
"links",
":",
"if",
"link",
".",
"get",
"(",
"'@ref'",
")",
"==",
"'next'",
":",
"logger",
".",
"info",
"(",
"'Found link to next batch of results.'",
")",
"params",
"[",
"'start'",
"]",
"+=",
"count",
"cont",
"=",
"True",
"break",
"if",
"not",
"cont",
":",
"break",
"return",
"all_piis"
]
| Search ScienceDirect with a query string constrained to a given year.
Parameters
----------
query_str : str
The query string to search with
date : str
The year to constrain the search to
Returns
-------
piis : list[str]
The list of PIIs identifying the papers returned by the search | [
"Search",
"ScienceDirect",
"with",
"a",
"query",
"string",
"constrained",
"to",
"a",
"given",
"year",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/elsevier_client.py#L311-L358 | train |
sorgerlab/indra | indra/literature/elsevier_client.py | download_from_search | def download_from_search(query_str, folder, do_extract_text=True,
max_results=None):
"""Save raw text files based on a search for papers on ScienceDirect.
This performs a search to get PIIs, downloads the XML corresponding to
the PII, extracts the raw text and then saves the text into a file
in the designated folder.
Parameters
----------
query_str : str
The query string to search with
folder : str
The local path to an existing folder in which the text files
will be dumped
do_extract_text : bool
Choose whether to extract text from the xml, or simply save the raw xml
files. Default is True, so text is extracted.
max_results : int or None
Default is None. If specified, limit the number of results to the given
maximum.
"""
piis = get_piis(query_str)
for pii in piis[:max_results]:
if os.path.exists(os.path.join(folder, '%s.txt' % pii)):
continue
logger.info('Downloading %s' % pii)
xml = download_article(pii, 'pii')
sleep(1)
if do_extract_text:
txt = extract_text(xml)
if not txt:
continue
with open(os.path.join(folder, '%s.txt' % pii), 'wb') as fh:
fh.write(txt.encode('utf-8'))
else:
with open(os.path.join(folder, '%s.xml' % pii), 'wb') as fh:
fh.write(xml.encode('utf-8'))
return | python | def download_from_search(query_str, folder, do_extract_text=True,
max_results=None):
"""Save raw text files based on a search for papers on ScienceDirect.
This performs a search to get PIIs, downloads the XML corresponding to
the PII, extracts the raw text and then saves the text into a file
in the designated folder.
Parameters
----------
query_str : str
The query string to search with
folder : str
The local path to an existing folder in which the text files
will be dumped
do_extract_text : bool
Choose whether to extract text from the xml, or simply save the raw xml
files. Default is True, so text is extracted.
max_results : int or None
Default is None. If specified, limit the number of results to the given
maximum.
"""
piis = get_piis(query_str)
for pii in piis[:max_results]:
if os.path.exists(os.path.join(folder, '%s.txt' % pii)):
continue
logger.info('Downloading %s' % pii)
xml = download_article(pii, 'pii')
sleep(1)
if do_extract_text:
txt = extract_text(xml)
if not txt:
continue
with open(os.path.join(folder, '%s.txt' % pii), 'wb') as fh:
fh.write(txt.encode('utf-8'))
else:
with open(os.path.join(folder, '%s.xml' % pii), 'wb') as fh:
fh.write(xml.encode('utf-8'))
return | [
"def",
"download_from_search",
"(",
"query_str",
",",
"folder",
",",
"do_extract_text",
"=",
"True",
",",
"max_results",
"=",
"None",
")",
":",
"piis",
"=",
"get_piis",
"(",
"query_str",
")",
"for",
"pii",
"in",
"piis",
"[",
":",
"max_results",
"]",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"folder",
",",
"'%s.txt'",
"%",
"pii",
")",
")",
":",
"continue",
"logger",
".",
"info",
"(",
"'Downloading %s'",
"%",
"pii",
")",
"xml",
"=",
"download_article",
"(",
"pii",
",",
"'pii'",
")",
"sleep",
"(",
"1",
")",
"if",
"do_extract_text",
":",
"txt",
"=",
"extract_text",
"(",
"xml",
")",
"if",
"not",
"txt",
":",
"continue",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"folder",
",",
"'%s.txt'",
"%",
"pii",
")",
",",
"'wb'",
")",
"as",
"fh",
":",
"fh",
".",
"write",
"(",
"txt",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"else",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"folder",
",",
"'%s.xml'",
"%",
"pii",
")",
",",
"'wb'",
")",
"as",
"fh",
":",
"fh",
".",
"write",
"(",
"xml",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"return"
]
| Save raw text files based on a search for papers on ScienceDirect.
This performs a search to get PIIs, downloads the XML corresponding to
the PII, extracts the raw text and then saves the text into a file
in the designated folder.
Parameters
----------
query_str : str
The query string to search with
folder : str
The local path to an existing folder in which the text files
will be dumped
do_extract_text : bool
Choose whether to extract text from the xml, or simply save the raw xml
files. Default is True, so text is extracted.
max_results : int or None
Default is None. If specified, limit the number of results to the given
maximum. | [
"Save",
"raw",
"text",
"files",
"based",
"on",
"a",
"search",
"for",
"papers",
"on",
"ScienceDirect",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/elsevier_client.py#L361-L400 | train |
sorgerlab/indra | indra/sources/cwms/rdf_processor.py | CWMSRDFProcessor.extract_statement_from_query_result | def extract_statement_from_query_result(self, res):
"""Adds a statement based on one element of a rdflib SPARQL query.
Parameters
----------
res: rdflib.query.ResultRow
Element of rdflib SPARQL query result
"""
agent_start, agent_end, affected_start, affected_end = res
# Convert from rdflib literals to python integers so we can use
# them to index strings
agent_start = int(agent_start)
agent_end = int(agent_end)
affected_start = int(affected_start)
affected_end = int(affected_end)
# Find the text corresponding to these indices
agent = self.text[agent_start:agent_end]
affected = self.text[affected_start:affected_end]
# Strip off surrounding whitespace
agent = agent.lstrip().rstrip()
affected = affected.lstrip().rstrip()
# Make an Agent object for both the subject and the object
subj = Agent(agent, db_refs={'TEXT': agent})
obj = Agent(affected, db_refs={'TEXT': affected})
statement = Influence(subj=subj, obj=obj)
# Add the statement to the list of statements
self.statements.append(statement) | python | def extract_statement_from_query_result(self, res):
"""Adds a statement based on one element of a rdflib SPARQL query.
Parameters
----------
res: rdflib.query.ResultRow
Element of rdflib SPARQL query result
"""
agent_start, agent_end, affected_start, affected_end = res
# Convert from rdflib literals to python integers so we can use
# them to index strings
agent_start = int(agent_start)
agent_end = int(agent_end)
affected_start = int(affected_start)
affected_end = int(affected_end)
# Find the text corresponding to these indices
agent = self.text[agent_start:agent_end]
affected = self.text[affected_start:affected_end]
# Strip off surrounding whitespace
agent = agent.lstrip().rstrip()
affected = affected.lstrip().rstrip()
# Make an Agent object for both the subject and the object
subj = Agent(agent, db_refs={'TEXT': agent})
obj = Agent(affected, db_refs={'TEXT': affected})
statement = Influence(subj=subj, obj=obj)
# Add the statement to the list of statements
self.statements.append(statement) | [
"def",
"extract_statement_from_query_result",
"(",
"self",
",",
"res",
")",
":",
"agent_start",
",",
"agent_end",
",",
"affected_start",
",",
"affected_end",
"=",
"res",
"# Convert from rdflib literals to python integers so we can use",
"# them to index strings",
"agent_start",
"=",
"int",
"(",
"agent_start",
")",
"agent_end",
"=",
"int",
"(",
"agent_end",
")",
"affected_start",
"=",
"int",
"(",
"affected_start",
")",
"affected_end",
"=",
"int",
"(",
"affected_end",
")",
"# Find the text corresponding to these indices",
"agent",
"=",
"self",
".",
"text",
"[",
"agent_start",
":",
"agent_end",
"]",
"affected",
"=",
"self",
".",
"text",
"[",
"affected_start",
":",
"affected_end",
"]",
"# Strip off surrounding whitespace",
"agent",
"=",
"agent",
".",
"lstrip",
"(",
")",
".",
"rstrip",
"(",
")",
"affected",
"=",
"affected",
".",
"lstrip",
"(",
")",
".",
"rstrip",
"(",
")",
"# Make an Agent object for both the subject and the object",
"subj",
"=",
"Agent",
"(",
"agent",
",",
"db_refs",
"=",
"{",
"'TEXT'",
":",
"agent",
"}",
")",
"obj",
"=",
"Agent",
"(",
"affected",
",",
"db_refs",
"=",
"{",
"'TEXT'",
":",
"affected",
"}",
")",
"statement",
"=",
"Influence",
"(",
"subj",
"=",
"subj",
",",
"obj",
"=",
"obj",
")",
"# Add the statement to the list of statements",
"self",
".",
"statements",
".",
"append",
"(",
"statement",
")"
]
| Adds a statement based on one element of a rdflib SPARQL query.
Parameters
----------
res: rdflib.query.ResultRow
Element of rdflib SPARQL query result | [
"Adds",
"a",
"statement",
"based",
"on",
"one",
"element",
"of",
"a",
"rdflib",
"SPARQL",
"query",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/cwms/rdf_processor.py#L45-L77 | train |
sorgerlab/indra | indra/sources/cwms/rdf_processor.py | CWMSRDFProcessor.extract_statements | def extract_statements(self):
"""Extracts INDRA statements from the RDF graph via SPARQL queries.
"""
# Look for events that have an AGENT and an AFFECTED, and get the
# start and ending text indices for each.
query = prefixes + """
SELECT
?agent_start
?agent_end
?affected_start
?affected_end
WHERE {
?rel role:AGENT ?agent .
?rel role:AFFECTED ?affected .
?agent lf:start ?agent_start .
?agent lf:end ?agent_end .
?affected lf:start ?affected_start .
?affected lf:end ?affected_end .
}
"""
results = self.graph.query(query)
for res in results:
# Make a statement for each query match
self.extract_statement_from_query_result(res)
# Look for events that have an AGENT and a RESULT, and get the start
# and ending text indices for each.
query = query.replace('role:AFFECTED', 'role:RESULT')
results = self.graph.query(query)
for res in results:
# Make a statement for each query match
self.extract_statement_from_query_result(res) | python | def extract_statements(self):
"""Extracts INDRA statements from the RDF graph via SPARQL queries.
"""
# Look for events that have an AGENT and an AFFECTED, and get the
# start and ending text indices for each.
query = prefixes + """
SELECT
?agent_start
?agent_end
?affected_start
?affected_end
WHERE {
?rel role:AGENT ?agent .
?rel role:AFFECTED ?affected .
?agent lf:start ?agent_start .
?agent lf:end ?agent_end .
?affected lf:start ?affected_start .
?affected lf:end ?affected_end .
}
"""
results = self.graph.query(query)
for res in results:
# Make a statement for each query match
self.extract_statement_from_query_result(res)
# Look for events that have an AGENT and a RESULT, and get the start
# and ending text indices for each.
query = query.replace('role:AFFECTED', 'role:RESULT')
results = self.graph.query(query)
for res in results:
# Make a statement for each query match
self.extract_statement_from_query_result(res) | [
"def",
"extract_statements",
"(",
"self",
")",
":",
"# Look for events that have an AGENT and an AFFECTED, and get the",
"# start and ending text indices for each.",
"query",
"=",
"prefixes",
"+",
"\"\"\"\n SELECT\n ?agent_start\n ?agent_end\n ?affected_start\n ?affected_end\n WHERE {\n ?rel role:AGENT ?agent .\n ?rel role:AFFECTED ?affected .\n ?agent lf:start ?agent_start .\n ?agent lf:end ?agent_end .\n ?affected lf:start ?affected_start .\n ?affected lf:end ?affected_end .\n }\n \"\"\"",
"results",
"=",
"self",
".",
"graph",
".",
"query",
"(",
"query",
")",
"for",
"res",
"in",
"results",
":",
"# Make a statement for each query match",
"self",
".",
"extract_statement_from_query_result",
"(",
"res",
")",
"# Look for events that have an AGENT and a RESULT, and get the start",
"# and ending text indices for each.",
"query",
"=",
"query",
".",
"replace",
"(",
"'role:AFFECTED'",
",",
"'role:RESULT'",
")",
"results",
"=",
"self",
".",
"graph",
".",
"query",
"(",
"query",
")",
"for",
"res",
"in",
"results",
":",
"# Make a statement for each query match",
"self",
".",
"extract_statement_from_query_result",
"(",
"res",
")"
]
| Extracts INDRA statements from the RDF graph via SPARQL queries. | [
"Extracts",
"INDRA",
"statements",
"from",
"the",
"RDF",
"graph",
"via",
"SPARQL",
"queries",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/cwms/rdf_processor.py#L79-L111 | train |
sorgerlab/indra | indra/sources/signor/processor.py | SignorProcessor._recursively_lookup_complex | def _recursively_lookup_complex(self, complex_id):
"""Looks up the constitutents of a complex. If any constituent is
itself a complex, recursively expands until all constituents are
not complexes."""
assert complex_id in self.complex_map
expanded_agent_strings = []
expand_these_next = [complex_id]
while len(expand_these_next) > 0:
# Pop next element
c = expand_these_next[0]
expand_these_next = expand_these_next[1:]
# If a complex, add expanding it to the end of the queue
# If an agent string, add it to the agent string list immediately
assert c in self.complex_map
for s in self.complex_map[c]:
if s in self.complex_map:
expand_these_next.append(s)
else:
expanded_agent_strings.append(s)
return expanded_agent_strings | python | def _recursively_lookup_complex(self, complex_id):
"""Looks up the constitutents of a complex. If any constituent is
itself a complex, recursively expands until all constituents are
not complexes."""
assert complex_id in self.complex_map
expanded_agent_strings = []
expand_these_next = [complex_id]
while len(expand_these_next) > 0:
# Pop next element
c = expand_these_next[0]
expand_these_next = expand_these_next[1:]
# If a complex, add expanding it to the end of the queue
# If an agent string, add it to the agent string list immediately
assert c in self.complex_map
for s in self.complex_map[c]:
if s in self.complex_map:
expand_these_next.append(s)
else:
expanded_agent_strings.append(s)
return expanded_agent_strings | [
"def",
"_recursively_lookup_complex",
"(",
"self",
",",
"complex_id",
")",
":",
"assert",
"complex_id",
"in",
"self",
".",
"complex_map",
"expanded_agent_strings",
"=",
"[",
"]",
"expand_these_next",
"=",
"[",
"complex_id",
"]",
"while",
"len",
"(",
"expand_these_next",
")",
">",
"0",
":",
"# Pop next element",
"c",
"=",
"expand_these_next",
"[",
"0",
"]",
"expand_these_next",
"=",
"expand_these_next",
"[",
"1",
":",
"]",
"# If a complex, add expanding it to the end of the queue",
"# If an agent string, add it to the agent string list immediately",
"assert",
"c",
"in",
"self",
".",
"complex_map",
"for",
"s",
"in",
"self",
".",
"complex_map",
"[",
"c",
"]",
":",
"if",
"s",
"in",
"self",
".",
"complex_map",
":",
"expand_these_next",
".",
"append",
"(",
"s",
")",
"else",
":",
"expanded_agent_strings",
".",
"append",
"(",
"s",
")",
"return",
"expanded_agent_strings"
]
| Looks up the constitutents of a complex. If any constituent is
itself a complex, recursively expands until all constituents are
not complexes. | [
"Looks",
"up",
"the",
"constitutents",
"of",
"a",
"complex",
".",
"If",
"any",
"constituent",
"is",
"itself",
"a",
"complex",
"recursively",
"expands",
"until",
"all",
"constituents",
"are",
"not",
"complexes",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/signor/processor.py#L223-L244 | train |
sorgerlab/indra | indra/sources/signor/processor.py | SignorProcessor._get_complex_agents | def _get_complex_agents(self, complex_id):
"""Returns a list of agents corresponding to each of the constituents
in a SIGNOR complex."""
agents = []
components = self._recursively_lookup_complex(complex_id)
for c in components:
db_refs = {}
name = uniprot_client.get_gene_name(c)
if name is None:
db_refs['SIGNOR'] = c
else:
db_refs['UP'] = c
hgnc_id = hgnc_client.get_hgnc_id(name)
if hgnc_id:
db_refs['HGNC'] = hgnc_id
famplex_key = ('SIGNOR', c)
if famplex_key in famplex_map:
db_refs['FPLX'] = famplex_map[famplex_key]
if not name:
name = db_refs['FPLX'] # Set agent name to Famplex name if
# the Uniprot name is not available
elif not name:
# We neither have a Uniprot nor Famplex grounding
logger.info('Have neither a Uniprot nor Famplex grounding ' + \
'for ' + c)
if not name:
name = db_refs['SIGNOR'] # Set the agent name to the
# Signor name if neither the
# Uniprot nor Famplex names are
# available
assert(name is not None)
agents.append(Agent(name, db_refs=db_refs))
return agents | python | def _get_complex_agents(self, complex_id):
"""Returns a list of agents corresponding to each of the constituents
in a SIGNOR complex."""
agents = []
components = self._recursively_lookup_complex(complex_id)
for c in components:
db_refs = {}
name = uniprot_client.get_gene_name(c)
if name is None:
db_refs['SIGNOR'] = c
else:
db_refs['UP'] = c
hgnc_id = hgnc_client.get_hgnc_id(name)
if hgnc_id:
db_refs['HGNC'] = hgnc_id
famplex_key = ('SIGNOR', c)
if famplex_key in famplex_map:
db_refs['FPLX'] = famplex_map[famplex_key]
if not name:
name = db_refs['FPLX'] # Set agent name to Famplex name if
# the Uniprot name is not available
elif not name:
# We neither have a Uniprot nor Famplex grounding
logger.info('Have neither a Uniprot nor Famplex grounding ' + \
'for ' + c)
if not name:
name = db_refs['SIGNOR'] # Set the agent name to the
# Signor name if neither the
# Uniprot nor Famplex names are
# available
assert(name is not None)
agents.append(Agent(name, db_refs=db_refs))
return agents | [
"def",
"_get_complex_agents",
"(",
"self",
",",
"complex_id",
")",
":",
"agents",
"=",
"[",
"]",
"components",
"=",
"self",
".",
"_recursively_lookup_complex",
"(",
"complex_id",
")",
"for",
"c",
"in",
"components",
":",
"db_refs",
"=",
"{",
"}",
"name",
"=",
"uniprot_client",
".",
"get_gene_name",
"(",
"c",
")",
"if",
"name",
"is",
"None",
":",
"db_refs",
"[",
"'SIGNOR'",
"]",
"=",
"c",
"else",
":",
"db_refs",
"[",
"'UP'",
"]",
"=",
"c",
"hgnc_id",
"=",
"hgnc_client",
".",
"get_hgnc_id",
"(",
"name",
")",
"if",
"hgnc_id",
":",
"db_refs",
"[",
"'HGNC'",
"]",
"=",
"hgnc_id",
"famplex_key",
"=",
"(",
"'SIGNOR'",
",",
"c",
")",
"if",
"famplex_key",
"in",
"famplex_map",
":",
"db_refs",
"[",
"'FPLX'",
"]",
"=",
"famplex_map",
"[",
"famplex_key",
"]",
"if",
"not",
"name",
":",
"name",
"=",
"db_refs",
"[",
"'FPLX'",
"]",
"# Set agent name to Famplex name if",
"# the Uniprot name is not available",
"elif",
"not",
"name",
":",
"# We neither have a Uniprot nor Famplex grounding",
"logger",
".",
"info",
"(",
"'Have neither a Uniprot nor Famplex grounding '",
"+",
"'for '",
"+",
"c",
")",
"if",
"not",
"name",
":",
"name",
"=",
"db_refs",
"[",
"'SIGNOR'",
"]",
"# Set the agent name to the",
"# Signor name if neither the",
"# Uniprot nor Famplex names are",
"# available",
"assert",
"(",
"name",
"is",
"not",
"None",
")",
"agents",
".",
"append",
"(",
"Agent",
"(",
"name",
",",
"db_refs",
"=",
"db_refs",
")",
")",
"return",
"agents"
]
| Returns a list of agents corresponding to each of the constituents
in a SIGNOR complex. | [
"Returns",
"a",
"list",
"of",
"agents",
"corresponding",
"to",
"each",
"of",
"the",
"constituents",
"in",
"a",
"SIGNOR",
"complex",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/signor/processor.py#L246-L280 | train |
sorgerlab/indra | indra/statements/io.py | stmts_from_json | def stmts_from_json(json_in, on_missing_support='handle'):
"""Get a list of Statements from Statement jsons.
In the case of pre-assembled Statements which have `supports` and
`supported_by` lists, the uuids will be replaced with references to
Statement objects from the json, where possible. The method of handling
missing support is controled by the `on_missing_support` key-word argument.
Parameters
----------
json_in : iterable[dict]
A json list containing json dict representations of INDRA Statements,
as produced by the `to_json` methods of subclasses of Statement, or
equivalently by `stmts_to_json`.
on_missing_support : Optional[str]
Handles the behavior when a uuid reference in `supports` or
`supported_by` attribute cannot be resolved. This happens because uuids
can only be linked to Statements contained in the `json_in` list, and
some may be missing if only some of all the Statements from pre-
assembly are contained in the list.
Options:
- *'handle'* : (default) convert unresolved uuids into `Unresolved`
Statement objects.
- *'ignore'* : Simply omit any uuids that cannot be linked to any
Statements in the list.
- *'error'* : Raise an error upon hitting an un-linkable uuid.
Returns
-------
stmts : list[:py:class:`Statement`]
A list of INDRA Statements.
"""
stmts = []
uuid_dict = {}
for json_stmt in json_in:
try:
st = Statement._from_json(json_stmt)
except Exception as e:
logger.warning("Error creating statement: %s" % e)
continue
stmts.append(st)
uuid_dict[st.uuid] = st
for st in stmts:
_promote_support(st.supports, uuid_dict, on_missing_support)
_promote_support(st.supported_by, uuid_dict, on_missing_support)
return stmts | python | def stmts_from_json(json_in, on_missing_support='handle'):
"""Get a list of Statements from Statement jsons.
In the case of pre-assembled Statements which have `supports` and
`supported_by` lists, the uuids will be replaced with references to
Statement objects from the json, where possible. The method of handling
missing support is controled by the `on_missing_support` key-word argument.
Parameters
----------
json_in : iterable[dict]
A json list containing json dict representations of INDRA Statements,
as produced by the `to_json` methods of subclasses of Statement, or
equivalently by `stmts_to_json`.
on_missing_support : Optional[str]
Handles the behavior when a uuid reference in `supports` or
`supported_by` attribute cannot be resolved. This happens because uuids
can only be linked to Statements contained in the `json_in` list, and
some may be missing if only some of all the Statements from pre-
assembly are contained in the list.
Options:
- *'handle'* : (default) convert unresolved uuids into `Unresolved`
Statement objects.
- *'ignore'* : Simply omit any uuids that cannot be linked to any
Statements in the list.
- *'error'* : Raise an error upon hitting an un-linkable uuid.
Returns
-------
stmts : list[:py:class:`Statement`]
A list of INDRA Statements.
"""
stmts = []
uuid_dict = {}
for json_stmt in json_in:
try:
st = Statement._from_json(json_stmt)
except Exception as e:
logger.warning("Error creating statement: %s" % e)
continue
stmts.append(st)
uuid_dict[st.uuid] = st
for st in stmts:
_promote_support(st.supports, uuid_dict, on_missing_support)
_promote_support(st.supported_by, uuid_dict, on_missing_support)
return stmts | [
"def",
"stmts_from_json",
"(",
"json_in",
",",
"on_missing_support",
"=",
"'handle'",
")",
":",
"stmts",
"=",
"[",
"]",
"uuid_dict",
"=",
"{",
"}",
"for",
"json_stmt",
"in",
"json_in",
":",
"try",
":",
"st",
"=",
"Statement",
".",
"_from_json",
"(",
"json_stmt",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"warning",
"(",
"\"Error creating statement: %s\"",
"%",
"e",
")",
"continue",
"stmts",
".",
"append",
"(",
"st",
")",
"uuid_dict",
"[",
"st",
".",
"uuid",
"]",
"=",
"st",
"for",
"st",
"in",
"stmts",
":",
"_promote_support",
"(",
"st",
".",
"supports",
",",
"uuid_dict",
",",
"on_missing_support",
")",
"_promote_support",
"(",
"st",
".",
"supported_by",
",",
"uuid_dict",
",",
"on_missing_support",
")",
"return",
"stmts"
]
| Get a list of Statements from Statement jsons.
In the case of pre-assembled Statements which have `supports` and
`supported_by` lists, the uuids will be replaced with references to
Statement objects from the json, where possible. The method of handling
missing support is controled by the `on_missing_support` key-word argument.
Parameters
----------
json_in : iterable[dict]
A json list containing json dict representations of INDRA Statements,
as produced by the `to_json` methods of subclasses of Statement, or
equivalently by `stmts_to_json`.
on_missing_support : Optional[str]
Handles the behavior when a uuid reference in `supports` or
`supported_by` attribute cannot be resolved. This happens because uuids
can only be linked to Statements contained in the `json_in` list, and
some may be missing if only some of all the Statements from pre-
assembly are contained in the list.
Options:
- *'handle'* : (default) convert unresolved uuids into `Unresolved`
Statement objects.
- *'ignore'* : Simply omit any uuids that cannot be linked to any
Statements in the list.
- *'error'* : Raise an error upon hitting an un-linkable uuid.
Returns
-------
stmts : list[:py:class:`Statement`]
A list of INDRA Statements. | [
"Get",
"a",
"list",
"of",
"Statements",
"from",
"Statement",
"jsons",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/statements/io.py#L16-L64 | train |
sorgerlab/indra | indra/statements/io.py | stmts_to_json_file | def stmts_to_json_file(stmts, fname):
"""Serialize a list of INDRA Statements into a JSON file.
Parameters
----------
stmts : list[indra.statement.Statements]
The list of INDRA Statements to serialize into the JSON file.
fname : str
Path to the JSON file to serialize Statements into.
"""
with open(fname, 'w') as fh:
json.dump(stmts_to_json(stmts), fh, indent=1) | python | def stmts_to_json_file(stmts, fname):
"""Serialize a list of INDRA Statements into a JSON file.
Parameters
----------
stmts : list[indra.statement.Statements]
The list of INDRA Statements to serialize into the JSON file.
fname : str
Path to the JSON file to serialize Statements into.
"""
with open(fname, 'w') as fh:
json.dump(stmts_to_json(stmts), fh, indent=1) | [
"def",
"stmts_to_json_file",
"(",
"stmts",
",",
"fname",
")",
":",
"with",
"open",
"(",
"fname",
",",
"'w'",
")",
"as",
"fh",
":",
"json",
".",
"dump",
"(",
"stmts_to_json",
"(",
"stmts",
")",
",",
"fh",
",",
"indent",
"=",
"1",
")"
]
| Serialize a list of INDRA Statements into a JSON file.
Parameters
----------
stmts : list[indra.statement.Statements]
The list of INDRA Statements to serialize into the JSON file.
fname : str
Path to the JSON file to serialize Statements into. | [
"Serialize",
"a",
"list",
"of",
"INDRA",
"Statements",
"into",
"a",
"JSON",
"file",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/statements/io.py#L84-L95 | train |
sorgerlab/indra | indra/statements/io.py | stmts_to_json | def stmts_to_json(stmts_in, use_sbo=False):
"""Return the JSON-serialized form of one or more INDRA Statements.
Parameters
----------
stmts_in : Statement or list[Statement]
A Statement or list of Statement objects to serialize into JSON.
use_sbo : Optional[bool]
If True, SBO annotations are added to each applicable element of the
JSON. Default: False
Returns
-------
json_dict : dict
JSON-serialized INDRA Statements.
"""
if not isinstance(stmts_in, list):
json_dict = stmts_in.to_json(use_sbo=use_sbo)
return json_dict
else:
json_dict = [st.to_json(use_sbo=use_sbo) for st in stmts_in]
return json_dict | python | def stmts_to_json(stmts_in, use_sbo=False):
"""Return the JSON-serialized form of one or more INDRA Statements.
Parameters
----------
stmts_in : Statement or list[Statement]
A Statement or list of Statement objects to serialize into JSON.
use_sbo : Optional[bool]
If True, SBO annotations are added to each applicable element of the
JSON. Default: False
Returns
-------
json_dict : dict
JSON-serialized INDRA Statements.
"""
if not isinstance(stmts_in, list):
json_dict = stmts_in.to_json(use_sbo=use_sbo)
return json_dict
else:
json_dict = [st.to_json(use_sbo=use_sbo) for st in stmts_in]
return json_dict | [
"def",
"stmts_to_json",
"(",
"stmts_in",
",",
"use_sbo",
"=",
"False",
")",
":",
"if",
"not",
"isinstance",
"(",
"stmts_in",
",",
"list",
")",
":",
"json_dict",
"=",
"stmts_in",
".",
"to_json",
"(",
"use_sbo",
"=",
"use_sbo",
")",
"return",
"json_dict",
"else",
":",
"json_dict",
"=",
"[",
"st",
".",
"to_json",
"(",
"use_sbo",
"=",
"use_sbo",
")",
"for",
"st",
"in",
"stmts_in",
"]",
"return",
"json_dict"
]
| Return the JSON-serialized form of one or more INDRA Statements.
Parameters
----------
stmts_in : Statement or list[Statement]
A Statement or list of Statement objects to serialize into JSON.
use_sbo : Optional[bool]
If True, SBO annotations are added to each applicable element of the
JSON. Default: False
Returns
-------
json_dict : dict
JSON-serialized INDRA Statements. | [
"Return",
"the",
"JSON",
"-",
"serialized",
"form",
"of",
"one",
"or",
"more",
"INDRA",
"Statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/statements/io.py#L98-L119 | train |
sorgerlab/indra | indra/statements/io.py | _promote_support | def _promote_support(sup_list, uuid_dict, on_missing='handle'):
"""Promote the list of support-related uuids to Statements, if possible."""
valid_handling_choices = ['handle', 'error', 'ignore']
if on_missing not in valid_handling_choices:
raise InputError('Invalid option for `on_missing_support`: \'%s\'\n'
'Choices are: %s.'
% (on_missing, str(valid_handling_choices)))
for idx, uuid in enumerate(sup_list):
if uuid in uuid_dict.keys():
sup_list[idx] = uuid_dict[uuid]
elif on_missing == 'handle':
sup_list[idx] = Unresolved(uuid)
elif on_missing == 'ignore':
sup_list.remove(uuid)
elif on_missing == 'error':
raise UnresolvedUuidError("Uuid %s not found in stmt jsons."
% uuid)
return | python | def _promote_support(sup_list, uuid_dict, on_missing='handle'):
"""Promote the list of support-related uuids to Statements, if possible."""
valid_handling_choices = ['handle', 'error', 'ignore']
if on_missing not in valid_handling_choices:
raise InputError('Invalid option for `on_missing_support`: \'%s\'\n'
'Choices are: %s.'
% (on_missing, str(valid_handling_choices)))
for idx, uuid in enumerate(sup_list):
if uuid in uuid_dict.keys():
sup_list[idx] = uuid_dict[uuid]
elif on_missing == 'handle':
sup_list[idx] = Unresolved(uuid)
elif on_missing == 'ignore':
sup_list.remove(uuid)
elif on_missing == 'error':
raise UnresolvedUuidError("Uuid %s not found in stmt jsons."
% uuid)
return | [
"def",
"_promote_support",
"(",
"sup_list",
",",
"uuid_dict",
",",
"on_missing",
"=",
"'handle'",
")",
":",
"valid_handling_choices",
"=",
"[",
"'handle'",
",",
"'error'",
",",
"'ignore'",
"]",
"if",
"on_missing",
"not",
"in",
"valid_handling_choices",
":",
"raise",
"InputError",
"(",
"'Invalid option for `on_missing_support`: \\'%s\\'\\n'",
"'Choices are: %s.'",
"%",
"(",
"on_missing",
",",
"str",
"(",
"valid_handling_choices",
")",
")",
")",
"for",
"idx",
",",
"uuid",
"in",
"enumerate",
"(",
"sup_list",
")",
":",
"if",
"uuid",
"in",
"uuid_dict",
".",
"keys",
"(",
")",
":",
"sup_list",
"[",
"idx",
"]",
"=",
"uuid_dict",
"[",
"uuid",
"]",
"elif",
"on_missing",
"==",
"'handle'",
":",
"sup_list",
"[",
"idx",
"]",
"=",
"Unresolved",
"(",
"uuid",
")",
"elif",
"on_missing",
"==",
"'ignore'",
":",
"sup_list",
".",
"remove",
"(",
"uuid",
")",
"elif",
"on_missing",
"==",
"'error'",
":",
"raise",
"UnresolvedUuidError",
"(",
"\"Uuid %s not found in stmt jsons.\"",
"%",
"uuid",
")",
"return"
]
| Promote the list of support-related uuids to Statements, if possible. | [
"Promote",
"the",
"list",
"of",
"support",
"-",
"related",
"uuids",
"to",
"Statements",
"if",
"possible",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/statements/io.py#L122-L139 | train |
sorgerlab/indra | indra/statements/io.py | draw_stmt_graph | def draw_stmt_graph(stmts):
"""Render the attributes of a list of Statements as directed graphs.
The layout works well for a single Statement or a few Statements at a time.
This function displays the plot of the graph using plt.show().
Parameters
----------
stmts : list[indra.statements.Statement]
A list of one or more INDRA Statements whose attribute graph should
be drawn.
"""
import networkx
try:
import matplotlib.pyplot as plt
except Exception:
logger.error('Could not import matplotlib, not drawing graph.')
return
try: # This checks whether networkx has this package to work with.
import pygraphviz
except Exception:
logger.error('Could not import pygraphviz, not drawing graph.')
return
import numpy
g = networkx.compose_all([stmt.to_graph() for stmt in stmts])
plt.figure()
plt.ion()
g.graph['graph'] = {'rankdir': 'LR'}
pos = networkx.drawing.nx_agraph.graphviz_layout(g, prog='dot')
g = g.to_undirected()
# Draw nodes
options = {
'marker': 'o',
's': 200,
'c': [0.85, 0.85, 1],
'facecolor': '0.5',
'lw': 0,
}
ax = plt.gca()
nodelist = list(g)
xy = numpy.asarray([pos[v] for v in nodelist])
node_collection = ax.scatter(xy[:, 0], xy[:, 1], **options)
node_collection.set_zorder(2)
# Draw edges
networkx.draw_networkx_edges(g, pos, arrows=False, edge_color='0.5')
# Draw labels
edge_labels = {(e[0], e[1]): e[2].get('label') for e in g.edges(data=True)}
networkx.draw_networkx_edge_labels(g, pos, edge_labels=edge_labels)
node_labels = {n[0]: n[1].get('label') for n in g.nodes(data=True)}
for key, label in node_labels.items():
if len(label) > 25:
parts = label.split(' ')
parts.insert(int(len(parts)/2), '\n')
label = ' '.join(parts)
node_labels[key] = label
networkx.draw_networkx_labels(g, pos, labels=node_labels)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show() | python | def draw_stmt_graph(stmts):
"""Render the attributes of a list of Statements as directed graphs.
The layout works well for a single Statement or a few Statements at a time.
This function displays the plot of the graph using plt.show().
Parameters
----------
stmts : list[indra.statements.Statement]
A list of one or more INDRA Statements whose attribute graph should
be drawn.
"""
import networkx
try:
import matplotlib.pyplot as plt
except Exception:
logger.error('Could not import matplotlib, not drawing graph.')
return
try: # This checks whether networkx has this package to work with.
import pygraphviz
except Exception:
logger.error('Could not import pygraphviz, not drawing graph.')
return
import numpy
g = networkx.compose_all([stmt.to_graph() for stmt in stmts])
plt.figure()
plt.ion()
g.graph['graph'] = {'rankdir': 'LR'}
pos = networkx.drawing.nx_agraph.graphviz_layout(g, prog='dot')
g = g.to_undirected()
# Draw nodes
options = {
'marker': 'o',
's': 200,
'c': [0.85, 0.85, 1],
'facecolor': '0.5',
'lw': 0,
}
ax = plt.gca()
nodelist = list(g)
xy = numpy.asarray([pos[v] for v in nodelist])
node_collection = ax.scatter(xy[:, 0], xy[:, 1], **options)
node_collection.set_zorder(2)
# Draw edges
networkx.draw_networkx_edges(g, pos, arrows=False, edge_color='0.5')
# Draw labels
edge_labels = {(e[0], e[1]): e[2].get('label') for e in g.edges(data=True)}
networkx.draw_networkx_edge_labels(g, pos, edge_labels=edge_labels)
node_labels = {n[0]: n[1].get('label') for n in g.nodes(data=True)}
for key, label in node_labels.items():
if len(label) > 25:
parts = label.split(' ')
parts.insert(int(len(parts)/2), '\n')
label = ' '.join(parts)
node_labels[key] = label
networkx.draw_networkx_labels(g, pos, labels=node_labels)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show() | [
"def",
"draw_stmt_graph",
"(",
"stmts",
")",
":",
"import",
"networkx",
"try",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"except",
"Exception",
":",
"logger",
".",
"error",
"(",
"'Could not import matplotlib, not drawing graph.'",
")",
"return",
"try",
":",
"# This checks whether networkx has this package to work with.",
"import",
"pygraphviz",
"except",
"Exception",
":",
"logger",
".",
"error",
"(",
"'Could not import pygraphviz, not drawing graph.'",
")",
"return",
"import",
"numpy",
"g",
"=",
"networkx",
".",
"compose_all",
"(",
"[",
"stmt",
".",
"to_graph",
"(",
")",
"for",
"stmt",
"in",
"stmts",
"]",
")",
"plt",
".",
"figure",
"(",
")",
"plt",
".",
"ion",
"(",
")",
"g",
".",
"graph",
"[",
"'graph'",
"]",
"=",
"{",
"'rankdir'",
":",
"'LR'",
"}",
"pos",
"=",
"networkx",
".",
"drawing",
".",
"nx_agraph",
".",
"graphviz_layout",
"(",
"g",
",",
"prog",
"=",
"'dot'",
")",
"g",
"=",
"g",
".",
"to_undirected",
"(",
")",
"# Draw nodes",
"options",
"=",
"{",
"'marker'",
":",
"'o'",
",",
"'s'",
":",
"200",
",",
"'c'",
":",
"[",
"0.85",
",",
"0.85",
",",
"1",
"]",
",",
"'facecolor'",
":",
"'0.5'",
",",
"'lw'",
":",
"0",
",",
"}",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"nodelist",
"=",
"list",
"(",
"g",
")",
"xy",
"=",
"numpy",
".",
"asarray",
"(",
"[",
"pos",
"[",
"v",
"]",
"for",
"v",
"in",
"nodelist",
"]",
")",
"node_collection",
"=",
"ax",
".",
"scatter",
"(",
"xy",
"[",
":",
",",
"0",
"]",
",",
"xy",
"[",
":",
",",
"1",
"]",
",",
"*",
"*",
"options",
")",
"node_collection",
".",
"set_zorder",
"(",
"2",
")",
"# Draw edges",
"networkx",
".",
"draw_networkx_edges",
"(",
"g",
",",
"pos",
",",
"arrows",
"=",
"False",
",",
"edge_color",
"=",
"'0.5'",
")",
"# Draw labels",
"edge_labels",
"=",
"{",
"(",
"e",
"[",
"0",
"]",
",",
"e",
"[",
"1",
"]",
")",
":",
"e",
"[",
"2",
"]",
".",
"get",
"(",
"'label'",
")",
"for",
"e",
"in",
"g",
".",
"edges",
"(",
"data",
"=",
"True",
")",
"}",
"networkx",
".",
"draw_networkx_edge_labels",
"(",
"g",
",",
"pos",
",",
"edge_labels",
"=",
"edge_labels",
")",
"node_labels",
"=",
"{",
"n",
"[",
"0",
"]",
":",
"n",
"[",
"1",
"]",
".",
"get",
"(",
"'label'",
")",
"for",
"n",
"in",
"g",
".",
"nodes",
"(",
"data",
"=",
"True",
")",
"}",
"for",
"key",
",",
"label",
"in",
"node_labels",
".",
"items",
"(",
")",
":",
"if",
"len",
"(",
"label",
")",
">",
"25",
":",
"parts",
"=",
"label",
".",
"split",
"(",
"' '",
")",
"parts",
".",
"insert",
"(",
"int",
"(",
"len",
"(",
"parts",
")",
"/",
"2",
")",
",",
"'\\n'",
")",
"label",
"=",
"' '",
".",
"join",
"(",
"parts",
")",
"node_labels",
"[",
"key",
"]",
"=",
"label",
"networkx",
".",
"draw_networkx_labels",
"(",
"g",
",",
"pos",
",",
"labels",
"=",
"node_labels",
")",
"ax",
".",
"get_xaxis",
"(",
")",
".",
"set_visible",
"(",
"False",
")",
"ax",
".",
"get_yaxis",
"(",
")",
".",
"set_visible",
"(",
"False",
")",
"plt",
".",
"show",
"(",
")"
]
| Render the attributes of a list of Statements as directed graphs.
The layout works well for a single Statement or a few Statements at a time.
This function displays the plot of the graph using plt.show().
Parameters
----------
stmts : list[indra.statements.Statement]
A list of one or more INDRA Statements whose attribute graph should
be drawn. | [
"Render",
"the",
"attributes",
"of",
"a",
"list",
"of",
"Statements",
"as",
"directed",
"graphs",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/statements/io.py#L142-L201 | train |
sorgerlab/indra | indra/sources/sparser/processor.py | _fix_json_agents | def _fix_json_agents(ag_obj):
"""Fix the json representation of an agent."""
if isinstance(ag_obj, str):
logger.info("Fixing string agent: %s." % ag_obj)
ret = {'name': ag_obj, 'db_refs': {'TEXT': ag_obj}}
elif isinstance(ag_obj, list):
# Recursive for complexes and similar.
ret = [_fix_json_agents(ag) for ag in ag_obj]
elif isinstance(ag_obj, dict) and 'TEXT' in ag_obj.keys():
ret = deepcopy(ag_obj)
text = ret.pop('TEXT')
ret['db_refs']['TEXT'] = text
else:
ret = ag_obj
return ret | python | def _fix_json_agents(ag_obj):
"""Fix the json representation of an agent."""
if isinstance(ag_obj, str):
logger.info("Fixing string agent: %s." % ag_obj)
ret = {'name': ag_obj, 'db_refs': {'TEXT': ag_obj}}
elif isinstance(ag_obj, list):
# Recursive for complexes and similar.
ret = [_fix_json_agents(ag) for ag in ag_obj]
elif isinstance(ag_obj, dict) and 'TEXT' in ag_obj.keys():
ret = deepcopy(ag_obj)
text = ret.pop('TEXT')
ret['db_refs']['TEXT'] = text
else:
ret = ag_obj
return ret | [
"def",
"_fix_json_agents",
"(",
"ag_obj",
")",
":",
"if",
"isinstance",
"(",
"ag_obj",
",",
"str",
")",
":",
"logger",
".",
"info",
"(",
"\"Fixing string agent: %s.\"",
"%",
"ag_obj",
")",
"ret",
"=",
"{",
"'name'",
":",
"ag_obj",
",",
"'db_refs'",
":",
"{",
"'TEXT'",
":",
"ag_obj",
"}",
"}",
"elif",
"isinstance",
"(",
"ag_obj",
",",
"list",
")",
":",
"# Recursive for complexes and similar.",
"ret",
"=",
"[",
"_fix_json_agents",
"(",
"ag",
")",
"for",
"ag",
"in",
"ag_obj",
"]",
"elif",
"isinstance",
"(",
"ag_obj",
",",
"dict",
")",
"and",
"'TEXT'",
"in",
"ag_obj",
".",
"keys",
"(",
")",
":",
"ret",
"=",
"deepcopy",
"(",
"ag_obj",
")",
"text",
"=",
"ret",
".",
"pop",
"(",
"'TEXT'",
")",
"ret",
"[",
"'db_refs'",
"]",
"[",
"'TEXT'",
"]",
"=",
"text",
"else",
":",
"ret",
"=",
"ag_obj",
"return",
"ret"
]
| Fix the json representation of an agent. | [
"Fix",
"the",
"json",
"representation",
"of",
"an",
"agent",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/sparser/processor.py#L23-L37 | train |
sorgerlab/indra | indra/sources/sparser/processor.py | SparserJSONProcessor.set_statements_pmid | def set_statements_pmid(self, pmid):
"""Set the evidence PMID of Statements that have been extracted.
Parameters
----------
pmid : str or None
The PMID to be used in the Evidence objects of the Statements
that were extracted by the processor.
"""
# Replace PMID value in JSON dict first
for stmt in self.json_stmts:
evs = stmt.get('evidence', [])
for ev in evs:
ev['pmid'] = pmid
# Replace PMID value in extracted Statements next
for stmt in self.statements:
for ev in stmt.evidence:
ev.pmid = pmid | python | def set_statements_pmid(self, pmid):
"""Set the evidence PMID of Statements that have been extracted.
Parameters
----------
pmid : str or None
The PMID to be used in the Evidence objects of the Statements
that were extracted by the processor.
"""
# Replace PMID value in JSON dict first
for stmt in self.json_stmts:
evs = stmt.get('evidence', [])
for ev in evs:
ev['pmid'] = pmid
# Replace PMID value in extracted Statements next
for stmt in self.statements:
for ev in stmt.evidence:
ev.pmid = pmid | [
"def",
"set_statements_pmid",
"(",
"self",
",",
"pmid",
")",
":",
"# Replace PMID value in JSON dict first",
"for",
"stmt",
"in",
"self",
".",
"json_stmts",
":",
"evs",
"=",
"stmt",
".",
"get",
"(",
"'evidence'",
",",
"[",
"]",
")",
"for",
"ev",
"in",
"evs",
":",
"ev",
"[",
"'pmid'",
"]",
"=",
"pmid",
"# Replace PMID value in extracted Statements next",
"for",
"stmt",
"in",
"self",
".",
"statements",
":",
"for",
"ev",
"in",
"stmt",
".",
"evidence",
":",
"ev",
".",
"pmid",
"=",
"pmid"
]
| Set the evidence PMID of Statements that have been extracted.
Parameters
----------
pmid : str or None
The PMID to be used in the Evidence objects of the Statements
that were extracted by the processor. | [
"Set",
"the",
"evidence",
"PMID",
"of",
"Statements",
"that",
"have",
"been",
"extracted",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/sparser/processor.py#L155-L172 | train |
sorgerlab/indra | indra/sources/trips/analyze_ekbs.py | get_args | def get_args(node):
"""Return the arguments of a node in the event graph."""
arg_roles = {}
args = node.findall('arg') + \
[node.find('arg1'), node.find('arg2'), node.find('arg3')]
for arg in args:
if arg is not None:
id = arg.attrib.get('id')
if id is not None:
arg_roles[arg.attrib['role']] = (arg.attrib['id'], arg)
# Now look at possible inevent links
if node.find('features') is not None:
inevents = node.findall('features/inevent')
for inevent in inevents:
if 'id' in inevent.attrib:
arg_roles['inevent'] = (inevent.attrib['id'], inevent)
ptms = node.findall('features/ptm') + node.findall('features/no-ptm')
for ptm in ptms:
if 'id' in inevent.attrib:
arg_roles['ptm'] = (inevent.attrib['id'], ptm)
# And also look for assoc-with links
aw = node.find('assoc-with')
if aw is not None:
aw_id = aw.attrib['id']
arg_roles['assoc-with'] = (aw_id, aw)
return arg_roles | python | def get_args(node):
"""Return the arguments of a node in the event graph."""
arg_roles = {}
args = node.findall('arg') + \
[node.find('arg1'), node.find('arg2'), node.find('arg3')]
for arg in args:
if arg is not None:
id = arg.attrib.get('id')
if id is not None:
arg_roles[arg.attrib['role']] = (arg.attrib['id'], arg)
# Now look at possible inevent links
if node.find('features') is not None:
inevents = node.findall('features/inevent')
for inevent in inevents:
if 'id' in inevent.attrib:
arg_roles['inevent'] = (inevent.attrib['id'], inevent)
ptms = node.findall('features/ptm') + node.findall('features/no-ptm')
for ptm in ptms:
if 'id' in inevent.attrib:
arg_roles['ptm'] = (inevent.attrib['id'], ptm)
# And also look for assoc-with links
aw = node.find('assoc-with')
if aw is not None:
aw_id = aw.attrib['id']
arg_roles['assoc-with'] = (aw_id, aw)
return arg_roles | [
"def",
"get_args",
"(",
"node",
")",
":",
"arg_roles",
"=",
"{",
"}",
"args",
"=",
"node",
".",
"findall",
"(",
"'arg'",
")",
"+",
"[",
"node",
".",
"find",
"(",
"'arg1'",
")",
",",
"node",
".",
"find",
"(",
"'arg2'",
")",
",",
"node",
".",
"find",
"(",
"'arg3'",
")",
"]",
"for",
"arg",
"in",
"args",
":",
"if",
"arg",
"is",
"not",
"None",
":",
"id",
"=",
"arg",
".",
"attrib",
".",
"get",
"(",
"'id'",
")",
"if",
"id",
"is",
"not",
"None",
":",
"arg_roles",
"[",
"arg",
".",
"attrib",
"[",
"'role'",
"]",
"]",
"=",
"(",
"arg",
".",
"attrib",
"[",
"'id'",
"]",
",",
"arg",
")",
"# Now look at possible inevent links",
"if",
"node",
".",
"find",
"(",
"'features'",
")",
"is",
"not",
"None",
":",
"inevents",
"=",
"node",
".",
"findall",
"(",
"'features/inevent'",
")",
"for",
"inevent",
"in",
"inevents",
":",
"if",
"'id'",
"in",
"inevent",
".",
"attrib",
":",
"arg_roles",
"[",
"'inevent'",
"]",
"=",
"(",
"inevent",
".",
"attrib",
"[",
"'id'",
"]",
",",
"inevent",
")",
"ptms",
"=",
"node",
".",
"findall",
"(",
"'features/ptm'",
")",
"+",
"node",
".",
"findall",
"(",
"'features/no-ptm'",
")",
"for",
"ptm",
"in",
"ptms",
":",
"if",
"'id'",
"in",
"inevent",
".",
"attrib",
":",
"arg_roles",
"[",
"'ptm'",
"]",
"=",
"(",
"inevent",
".",
"attrib",
"[",
"'id'",
"]",
",",
"ptm",
")",
"# And also look for assoc-with links",
"aw",
"=",
"node",
".",
"find",
"(",
"'assoc-with'",
")",
"if",
"aw",
"is",
"not",
"None",
":",
"aw_id",
"=",
"aw",
".",
"attrib",
"[",
"'id'",
"]",
"arg_roles",
"[",
"'assoc-with'",
"]",
"=",
"(",
"aw_id",
",",
"aw",
")",
"return",
"arg_roles"
]
| Return the arguments of a node in the event graph. | [
"Return",
"the",
"arguments",
"of",
"a",
"node",
"in",
"the",
"event",
"graph",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/analyze_ekbs.py#L20-L47 | train |
sorgerlab/indra | indra/sources/trips/analyze_ekbs.py | type_match | def type_match(a, b):
"""Return True of the types of a and b are compatible, False otherwise."""
# If the types are the same, return True
if a['type'] == b['type']:
return True
# Otherwise, look at some special cases
eq_groups = [
{'ONT::GENE-PROTEIN', 'ONT::GENE', 'ONT::PROTEIN'},
{'ONT::PHARMACOLOGIC-SUBSTANCE', 'ONT::CHEMICAL'}
]
for eq_group in eq_groups:
if a['type'] in eq_group and b['type'] in eq_group:
return True
return False | python | def type_match(a, b):
"""Return True of the types of a and b are compatible, False otherwise."""
# If the types are the same, return True
if a['type'] == b['type']:
return True
# Otherwise, look at some special cases
eq_groups = [
{'ONT::GENE-PROTEIN', 'ONT::GENE', 'ONT::PROTEIN'},
{'ONT::PHARMACOLOGIC-SUBSTANCE', 'ONT::CHEMICAL'}
]
for eq_group in eq_groups:
if a['type'] in eq_group and b['type'] in eq_group:
return True
return False | [
"def",
"type_match",
"(",
"a",
",",
"b",
")",
":",
"# If the types are the same, return True",
"if",
"a",
"[",
"'type'",
"]",
"==",
"b",
"[",
"'type'",
"]",
":",
"return",
"True",
"# Otherwise, look at some special cases",
"eq_groups",
"=",
"[",
"{",
"'ONT::GENE-PROTEIN'",
",",
"'ONT::GENE'",
",",
"'ONT::PROTEIN'",
"}",
",",
"{",
"'ONT::PHARMACOLOGIC-SUBSTANCE'",
",",
"'ONT::CHEMICAL'",
"}",
"]",
"for",
"eq_group",
"in",
"eq_groups",
":",
"if",
"a",
"[",
"'type'",
"]",
"in",
"eq_group",
"and",
"b",
"[",
"'type'",
"]",
"in",
"eq_group",
":",
"return",
"True",
"return",
"False"
]
| Return True of the types of a and b are compatible, False otherwise. | [
"Return",
"True",
"of",
"the",
"types",
"of",
"a",
"and",
"b",
"are",
"compatible",
"False",
"otherwise",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/analyze_ekbs.py#L58-L71 | train |
sorgerlab/indra | indra/sources/trips/analyze_ekbs.py | add_graph | def add_graph(patterns, G):
"""Add a graph to a set of unique patterns."""
if not patterns:
patterns.append([G])
return
for i, graphs in enumerate(patterns):
if networkx.is_isomorphic(graphs[0], G, node_match=type_match,
edge_match=type_match):
patterns[i].append(G)
return
patterns.append([G]) | python | def add_graph(patterns, G):
"""Add a graph to a set of unique patterns."""
if not patterns:
patterns.append([G])
return
for i, graphs in enumerate(patterns):
if networkx.is_isomorphic(graphs[0], G, node_match=type_match,
edge_match=type_match):
patterns[i].append(G)
return
patterns.append([G]) | [
"def",
"add_graph",
"(",
"patterns",
",",
"G",
")",
":",
"if",
"not",
"patterns",
":",
"patterns",
".",
"append",
"(",
"[",
"G",
"]",
")",
"return",
"for",
"i",
",",
"graphs",
"in",
"enumerate",
"(",
"patterns",
")",
":",
"if",
"networkx",
".",
"is_isomorphic",
"(",
"graphs",
"[",
"0",
"]",
",",
"G",
",",
"node_match",
"=",
"type_match",
",",
"edge_match",
"=",
"type_match",
")",
":",
"patterns",
"[",
"i",
"]",
".",
"append",
"(",
"G",
")",
"return",
"patterns",
".",
"append",
"(",
"[",
"G",
"]",
")"
]
| Add a graph to a set of unique patterns. | [
"Add",
"a",
"graph",
"to",
"a",
"set",
"of",
"unique",
"patterns",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/analyze_ekbs.py#L74-L84 | train |
sorgerlab/indra | indra/sources/trips/analyze_ekbs.py | draw | def draw(graph, fname):
"""Draw a graph and save it into a file"""
ag = networkx.nx_agraph.to_agraph(graph)
ag.draw(fname, prog='dot') | python | def draw(graph, fname):
"""Draw a graph and save it into a file"""
ag = networkx.nx_agraph.to_agraph(graph)
ag.draw(fname, prog='dot') | [
"def",
"draw",
"(",
"graph",
",",
"fname",
")",
":",
"ag",
"=",
"networkx",
".",
"nx_agraph",
".",
"to_agraph",
"(",
"graph",
")",
"ag",
".",
"draw",
"(",
"fname",
",",
"prog",
"=",
"'dot'",
")"
]
| Draw a graph and save it into a file | [
"Draw",
"a",
"graph",
"and",
"save",
"it",
"into",
"a",
"file"
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/analyze_ekbs.py#L87-L90 | train |
sorgerlab/indra | indra/sources/trips/analyze_ekbs.py | build_event_graph | def build_event_graph(graph, tree, node):
"""Return a DiGraph of a specific event structure, built recursively"""
# If we have already added this node then let's return
if node_key(node) in graph:
return
type = get_type(node)
text = get_text(node)
label = '%s (%s)' % (type, text)
graph.add_node(node_key(node), type=type, label=label, text=text)
args = get_args(node)
for arg_role, (arg_id, arg_tag) in args.items():
arg = get_node_by_id(tree, arg_id)
if arg is None:
arg = arg_tag
build_event_graph(graph, tree, arg)
graph.add_edge(node_key(node), node_key(arg), type=arg_role,
label=arg_role) | python | def build_event_graph(graph, tree, node):
"""Return a DiGraph of a specific event structure, built recursively"""
# If we have already added this node then let's return
if node_key(node) in graph:
return
type = get_type(node)
text = get_text(node)
label = '%s (%s)' % (type, text)
graph.add_node(node_key(node), type=type, label=label, text=text)
args = get_args(node)
for arg_role, (arg_id, arg_tag) in args.items():
arg = get_node_by_id(tree, arg_id)
if arg is None:
arg = arg_tag
build_event_graph(graph, tree, arg)
graph.add_edge(node_key(node), node_key(arg), type=arg_role,
label=arg_role) | [
"def",
"build_event_graph",
"(",
"graph",
",",
"tree",
",",
"node",
")",
":",
"# If we have already added this node then let's return",
"if",
"node_key",
"(",
"node",
")",
"in",
"graph",
":",
"return",
"type",
"=",
"get_type",
"(",
"node",
")",
"text",
"=",
"get_text",
"(",
"node",
")",
"label",
"=",
"'%s (%s)'",
"%",
"(",
"type",
",",
"text",
")",
"graph",
".",
"add_node",
"(",
"node_key",
"(",
"node",
")",
",",
"type",
"=",
"type",
",",
"label",
"=",
"label",
",",
"text",
"=",
"text",
")",
"args",
"=",
"get_args",
"(",
"node",
")",
"for",
"arg_role",
",",
"(",
"arg_id",
",",
"arg_tag",
")",
"in",
"args",
".",
"items",
"(",
")",
":",
"arg",
"=",
"get_node_by_id",
"(",
"tree",
",",
"arg_id",
")",
"if",
"arg",
"is",
"None",
":",
"arg",
"=",
"arg_tag",
"build_event_graph",
"(",
"graph",
",",
"tree",
",",
"arg",
")",
"graph",
".",
"add_edge",
"(",
"node_key",
"(",
"node",
")",
",",
"node_key",
"(",
"arg",
")",
",",
"type",
"=",
"arg_role",
",",
"label",
"=",
"arg_role",
")"
]
| Return a DiGraph of a specific event structure, built recursively | [
"Return",
"a",
"DiGraph",
"of",
"a",
"specific",
"event",
"structure",
"built",
"recursively"
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/analyze_ekbs.py#L107-L123 | train |
sorgerlab/indra | indra/sources/trips/analyze_ekbs.py | get_extracted_events | def get_extracted_events(fnames):
"""Get a full list of all extracted event IDs from a list of EKB files"""
event_list = []
for fn in fnames:
tp = trips.process_xml_file(fn)
ed = tp.extracted_events
for k, v in ed.items():
event_list += v
return event_list | python | def get_extracted_events(fnames):
"""Get a full list of all extracted event IDs from a list of EKB files"""
event_list = []
for fn in fnames:
tp = trips.process_xml_file(fn)
ed = tp.extracted_events
for k, v in ed.items():
event_list += v
return event_list | [
"def",
"get_extracted_events",
"(",
"fnames",
")",
":",
"event_list",
"=",
"[",
"]",
"for",
"fn",
"in",
"fnames",
":",
"tp",
"=",
"trips",
".",
"process_xml_file",
"(",
"fn",
")",
"ed",
"=",
"tp",
".",
"extracted_events",
"for",
"k",
",",
"v",
"in",
"ed",
".",
"items",
"(",
")",
":",
"event_list",
"+=",
"v",
"return",
"event_list"
]
| Get a full list of all extracted event IDs from a list of EKB files | [
"Get",
"a",
"full",
"list",
"of",
"all",
"extracted",
"event",
"IDs",
"from",
"a",
"list",
"of",
"EKB",
"files"
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/analyze_ekbs.py#L126-L134 | train |
sorgerlab/indra | indra/sources/trips/analyze_ekbs.py | check_event_coverage | def check_event_coverage(patterns, event_list):
"""Calculate the ratio of patterns that were extracted."""
proportions = []
for pattern_list in patterns:
proportion = 0
for pattern in pattern_list:
for node in pattern.nodes():
if node in event_list:
proportion += 1.0 / len(pattern_list)
break
proportions.append(proportion)
return proportions | python | def check_event_coverage(patterns, event_list):
"""Calculate the ratio of patterns that were extracted."""
proportions = []
for pattern_list in patterns:
proportion = 0
for pattern in pattern_list:
for node in pattern.nodes():
if node in event_list:
proportion += 1.0 / len(pattern_list)
break
proportions.append(proportion)
return proportions | [
"def",
"check_event_coverage",
"(",
"patterns",
",",
"event_list",
")",
":",
"proportions",
"=",
"[",
"]",
"for",
"pattern_list",
"in",
"patterns",
":",
"proportion",
"=",
"0",
"for",
"pattern",
"in",
"pattern_list",
":",
"for",
"node",
"in",
"pattern",
".",
"nodes",
"(",
")",
":",
"if",
"node",
"in",
"event_list",
":",
"proportion",
"+=",
"1.0",
"/",
"len",
"(",
"pattern_list",
")",
"break",
"proportions",
".",
"append",
"(",
"proportion",
")",
"return",
"proportions"
]
| Calculate the ratio of patterns that were extracted. | [
"Calculate",
"the",
"ratio",
"of",
"patterns",
"that",
"were",
"extracted",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/analyze_ekbs.py#L137-L148 | train |
sorgerlab/indra | indra/preassembler/ontology_mapper.py | OntologyMapper.map_statements | def map_statements(self):
"""Run the ontology mapping on the statements."""
for stmt in self.statements:
for agent in stmt.agent_list():
if agent is None:
continue
all_mappings = []
for db_name, db_id in agent.db_refs.items():
if isinstance(db_id, list):
db_id = db_id[0][0]
mappings = self._map_id(db_name, db_id)
all_mappings += mappings
for map_db_name, map_db_id, score, orig_db_name in all_mappings:
if map_db_name in agent.db_refs:
continue
if self.scored:
# If the original one is a scored grounding,
# we take that score and multiply it with the mapping
# score. Otherwise we assume the original score is 1.
try:
orig_score = agent.db_refs[orig_db_name][0][1]
except Exception:
orig_score = 1.0
agent.db_refs[map_db_name] = \
[(map_db_id, score * orig_score)]
else:
if map_db_name in ('UN', 'HUME'):
agent.db_refs[map_db_name] = [(map_db_id, 1.0)]
else:
agent.db_refs[map_db_name] = map_db_id | python | def map_statements(self):
"""Run the ontology mapping on the statements."""
for stmt in self.statements:
for agent in stmt.agent_list():
if agent is None:
continue
all_mappings = []
for db_name, db_id in agent.db_refs.items():
if isinstance(db_id, list):
db_id = db_id[0][0]
mappings = self._map_id(db_name, db_id)
all_mappings += mappings
for map_db_name, map_db_id, score, orig_db_name in all_mappings:
if map_db_name in agent.db_refs:
continue
if self.scored:
# If the original one is a scored grounding,
# we take that score and multiply it with the mapping
# score. Otherwise we assume the original score is 1.
try:
orig_score = agent.db_refs[orig_db_name][0][1]
except Exception:
orig_score = 1.0
agent.db_refs[map_db_name] = \
[(map_db_id, score * orig_score)]
else:
if map_db_name in ('UN', 'HUME'):
agent.db_refs[map_db_name] = [(map_db_id, 1.0)]
else:
agent.db_refs[map_db_name] = map_db_id | [
"def",
"map_statements",
"(",
"self",
")",
":",
"for",
"stmt",
"in",
"self",
".",
"statements",
":",
"for",
"agent",
"in",
"stmt",
".",
"agent_list",
"(",
")",
":",
"if",
"agent",
"is",
"None",
":",
"continue",
"all_mappings",
"=",
"[",
"]",
"for",
"db_name",
",",
"db_id",
"in",
"agent",
".",
"db_refs",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"db_id",
",",
"list",
")",
":",
"db_id",
"=",
"db_id",
"[",
"0",
"]",
"[",
"0",
"]",
"mappings",
"=",
"self",
".",
"_map_id",
"(",
"db_name",
",",
"db_id",
")",
"all_mappings",
"+=",
"mappings",
"for",
"map_db_name",
",",
"map_db_id",
",",
"score",
",",
"orig_db_name",
"in",
"all_mappings",
":",
"if",
"map_db_name",
"in",
"agent",
".",
"db_refs",
":",
"continue",
"if",
"self",
".",
"scored",
":",
"# If the original one is a scored grounding,",
"# we take that score and multiply it with the mapping",
"# score. Otherwise we assume the original score is 1.",
"try",
":",
"orig_score",
"=",
"agent",
".",
"db_refs",
"[",
"orig_db_name",
"]",
"[",
"0",
"]",
"[",
"1",
"]",
"except",
"Exception",
":",
"orig_score",
"=",
"1.0",
"agent",
".",
"db_refs",
"[",
"map_db_name",
"]",
"=",
"[",
"(",
"map_db_id",
",",
"score",
"*",
"orig_score",
")",
"]",
"else",
":",
"if",
"map_db_name",
"in",
"(",
"'UN'",
",",
"'HUME'",
")",
":",
"agent",
".",
"db_refs",
"[",
"map_db_name",
"]",
"=",
"[",
"(",
"map_db_id",
",",
"1.0",
")",
"]",
"else",
":",
"agent",
".",
"db_refs",
"[",
"map_db_name",
"]",
"=",
"map_db_id"
]
| Run the ontology mapping on the statements. | [
"Run",
"the",
"ontology",
"mapping",
"on",
"the",
"statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/ontology_mapper.py#L45-L74 | train |
sorgerlab/indra | indra/preassembler/grounding_mapper.py | load_grounding_map | def load_grounding_map(grounding_map_path, ignore_path=None,
lineterminator='\r\n'):
"""Return a grounding map dictionary loaded from a csv file.
In the file pointed to by grounding_map_path, the number of name_space ID
pairs can vary per row and commas are
used to pad out entries containing fewer than the maximum amount of
name spaces appearing in the file. Lines should be terminated with \r\n
both a carriage return and a new line by default.
Optionally, one can specify another csv file (pointed to by ignore_path)
containing agent texts that are degenerate and should be filtered out.
Parameters
----------
grounding_map_path : str
Path to csv file containing grounding map information. Rows of the file
should be of the form <agent_text>,<name_space_1>,<ID_1>,...
<name_space_n>,<ID_n>
ignore_path : Optional[str]
Path to csv file containing terms that should be filtered out during
the grounding mapping process. The file Should be of the form
<agent_text>,,..., where the number of commas that
appear is the same as in the csv file at grounding_map_path.
Default: None
lineterminator : Optional[str]
Line terminator used in input csv file. Default: \r\n
Returns
-------
g_map : dict
The grounding map constructed from the given files.
"""
g_map = {}
map_rows = read_unicode_csv(grounding_map_path, delimiter=',',
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
lineterminator='\r\n')
if ignore_path and os.path.exists(ignore_path):
ignore_rows = read_unicode_csv(ignore_path, delimiter=',',
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
lineterminator=lineterminator)
else:
ignore_rows = []
csv_rows = chain(map_rows, ignore_rows)
for row in csv_rows:
key = row[0]
db_refs = {'TEXT': key}
keys = [entry for entry in row[1::2] if entry != '']
values = [entry for entry in row[2::2] if entry != '']
if len(keys) != len(values):
logger.info('ERROR: Mismatched keys and values in row %s' %
str(row))
continue
else:
db_refs.update(dict(zip(keys, values)))
if len(db_refs.keys()) > 1:
g_map[key] = db_refs
else:
g_map[key] = None
return g_map | python | def load_grounding_map(grounding_map_path, ignore_path=None,
lineterminator='\r\n'):
"""Return a grounding map dictionary loaded from a csv file.
In the file pointed to by grounding_map_path, the number of name_space ID
pairs can vary per row and commas are
used to pad out entries containing fewer than the maximum amount of
name spaces appearing in the file. Lines should be terminated with \r\n
both a carriage return and a new line by default.
Optionally, one can specify another csv file (pointed to by ignore_path)
containing agent texts that are degenerate and should be filtered out.
Parameters
----------
grounding_map_path : str
Path to csv file containing grounding map information. Rows of the file
should be of the form <agent_text>,<name_space_1>,<ID_1>,...
<name_space_n>,<ID_n>
ignore_path : Optional[str]
Path to csv file containing terms that should be filtered out during
the grounding mapping process. The file Should be of the form
<agent_text>,,..., where the number of commas that
appear is the same as in the csv file at grounding_map_path.
Default: None
lineterminator : Optional[str]
Line terminator used in input csv file. Default: \r\n
Returns
-------
g_map : dict
The grounding map constructed from the given files.
"""
g_map = {}
map_rows = read_unicode_csv(grounding_map_path, delimiter=',',
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
lineterminator='\r\n')
if ignore_path and os.path.exists(ignore_path):
ignore_rows = read_unicode_csv(ignore_path, delimiter=',',
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
lineterminator=lineterminator)
else:
ignore_rows = []
csv_rows = chain(map_rows, ignore_rows)
for row in csv_rows:
key = row[0]
db_refs = {'TEXT': key}
keys = [entry for entry in row[1::2] if entry != '']
values = [entry for entry in row[2::2] if entry != '']
if len(keys) != len(values):
logger.info('ERROR: Mismatched keys and values in row %s' %
str(row))
continue
else:
db_refs.update(dict(zip(keys, values)))
if len(db_refs.keys()) > 1:
g_map[key] = db_refs
else:
g_map[key] = None
return g_map | [
"def",
"load_grounding_map",
"(",
"grounding_map_path",
",",
"ignore_path",
"=",
"None",
",",
"lineterminator",
"=",
"'\\r\\n'",
")",
":",
"g_map",
"=",
"{",
"}",
"map_rows",
"=",
"read_unicode_csv",
"(",
"grounding_map_path",
",",
"delimiter",
"=",
"','",
",",
"quotechar",
"=",
"'\"'",
",",
"quoting",
"=",
"csv",
".",
"QUOTE_MINIMAL",
",",
"lineterminator",
"=",
"'\\r\\n'",
")",
"if",
"ignore_path",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"ignore_path",
")",
":",
"ignore_rows",
"=",
"read_unicode_csv",
"(",
"ignore_path",
",",
"delimiter",
"=",
"','",
",",
"quotechar",
"=",
"'\"'",
",",
"quoting",
"=",
"csv",
".",
"QUOTE_MINIMAL",
",",
"lineterminator",
"=",
"lineterminator",
")",
"else",
":",
"ignore_rows",
"=",
"[",
"]",
"csv_rows",
"=",
"chain",
"(",
"map_rows",
",",
"ignore_rows",
")",
"for",
"row",
"in",
"csv_rows",
":",
"key",
"=",
"row",
"[",
"0",
"]",
"db_refs",
"=",
"{",
"'TEXT'",
":",
"key",
"}",
"keys",
"=",
"[",
"entry",
"for",
"entry",
"in",
"row",
"[",
"1",
":",
":",
"2",
"]",
"if",
"entry",
"!=",
"''",
"]",
"values",
"=",
"[",
"entry",
"for",
"entry",
"in",
"row",
"[",
"2",
":",
":",
"2",
"]",
"if",
"entry",
"!=",
"''",
"]",
"if",
"len",
"(",
"keys",
")",
"!=",
"len",
"(",
"values",
")",
":",
"logger",
".",
"info",
"(",
"'ERROR: Mismatched keys and values in row %s'",
"%",
"str",
"(",
"row",
")",
")",
"continue",
"else",
":",
"db_refs",
".",
"update",
"(",
"dict",
"(",
"zip",
"(",
"keys",
",",
"values",
")",
")",
")",
"if",
"len",
"(",
"db_refs",
".",
"keys",
"(",
")",
")",
">",
"1",
":",
"g_map",
"[",
"key",
"]",
"=",
"db_refs",
"else",
":",
"g_map",
"[",
"key",
"]",
"=",
"None",
"return",
"g_map"
]
| Return a grounding map dictionary loaded from a csv file.
In the file pointed to by grounding_map_path, the number of name_space ID
pairs can vary per row and commas are
used to pad out entries containing fewer than the maximum amount of
name spaces appearing in the file. Lines should be terminated with \r\n
both a carriage return and a new line by default.
Optionally, one can specify another csv file (pointed to by ignore_path)
containing agent texts that are degenerate and should be filtered out.
Parameters
----------
grounding_map_path : str
Path to csv file containing grounding map information. Rows of the file
should be of the form <agent_text>,<name_space_1>,<ID_1>,...
<name_space_n>,<ID_n>
ignore_path : Optional[str]
Path to csv file containing terms that should be filtered out during
the grounding mapping process. The file Should be of the form
<agent_text>,,..., where the number of commas that
appear is the same as in the csv file at grounding_map_path.
Default: None
lineterminator : Optional[str]
Line terminator used in input csv file. Default: \r\n
Returns
-------
g_map : dict
The grounding map constructed from the given files. | [
"Return",
"a",
"grounding",
"map",
"dictionary",
"loaded",
"from",
"a",
"csv",
"file",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/grounding_mapper.py#L360-L421 | train |
sorgerlab/indra | indra/preassembler/grounding_mapper.py | all_agents | def all_agents(stmts):
"""Return a list of all of the agents from a list of statements.
Only agents that are not None and have a TEXT entry are returned.
Parameters
----------
stmts : list of :py:class:`indra.statements.Statement`
Returns
-------
agents : list of :py:class:`indra.statements.Agent`
List of agents that appear in the input list of indra statements.
"""
agents = []
for stmt in stmts:
for agent in stmt.agent_list():
# Agents don't always have a TEXT db_refs entry (for instance
# in the case of Statements from databases) so we check for this.
if agent is not None and agent.db_refs.get('TEXT') is not None:
agents.append(agent)
return agents | python | def all_agents(stmts):
"""Return a list of all of the agents from a list of statements.
Only agents that are not None and have a TEXT entry are returned.
Parameters
----------
stmts : list of :py:class:`indra.statements.Statement`
Returns
-------
agents : list of :py:class:`indra.statements.Agent`
List of agents that appear in the input list of indra statements.
"""
agents = []
for stmt in stmts:
for agent in stmt.agent_list():
# Agents don't always have a TEXT db_refs entry (for instance
# in the case of Statements from databases) so we check for this.
if agent is not None and agent.db_refs.get('TEXT') is not None:
agents.append(agent)
return agents | [
"def",
"all_agents",
"(",
"stmts",
")",
":",
"agents",
"=",
"[",
"]",
"for",
"stmt",
"in",
"stmts",
":",
"for",
"agent",
"in",
"stmt",
".",
"agent_list",
"(",
")",
":",
"# Agents don't always have a TEXT db_refs entry (for instance",
"# in the case of Statements from databases) so we check for this.",
"if",
"agent",
"is",
"not",
"None",
"and",
"agent",
".",
"db_refs",
".",
"get",
"(",
"'TEXT'",
")",
"is",
"not",
"None",
":",
"agents",
".",
"append",
"(",
"agent",
")",
"return",
"agents"
]
| Return a list of all of the agents from a list of statements.
Only agents that are not None and have a TEXT entry are returned.
Parameters
----------
stmts : list of :py:class:`indra.statements.Statement`
Returns
-------
agents : list of :py:class:`indra.statements.Agent`
List of agents that appear in the input list of indra statements. | [
"Return",
"a",
"list",
"of",
"all",
"of",
"the",
"agents",
"from",
"a",
"list",
"of",
"statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/grounding_mapper.py#L426-L447 | train |
sorgerlab/indra | indra/preassembler/grounding_mapper.py | get_sentences_for_agent | def get_sentences_for_agent(text, stmts, max_sentences=None):
"""Returns evidence sentences with a given agent text from a list of statements
Parameters
----------
text : str
An agent text
stmts : list of :py:class:`indra.statements.Statement`
INDRA Statements to search in for evidence statements.
max_sentences : Optional[int/None]
Cap on the number of evidence sentences to return. Default: None
Returns
-------
sentences : list of str
Evidence sentences from the list of statements containing
the given agent text.
"""
sentences = []
for stmt in stmts:
for agent in stmt.agent_list():
if agent is not None and agent.db_refs.get('TEXT') == text:
sentences.append((stmt.evidence[0].pmid,
stmt.evidence[0].text))
if max_sentences is not None and \
len(sentences) >= max_sentences:
return sentences
return sentences | python | def get_sentences_for_agent(text, stmts, max_sentences=None):
"""Returns evidence sentences with a given agent text from a list of statements
Parameters
----------
text : str
An agent text
stmts : list of :py:class:`indra.statements.Statement`
INDRA Statements to search in for evidence statements.
max_sentences : Optional[int/None]
Cap on the number of evidence sentences to return. Default: None
Returns
-------
sentences : list of str
Evidence sentences from the list of statements containing
the given agent text.
"""
sentences = []
for stmt in stmts:
for agent in stmt.agent_list():
if agent is not None and agent.db_refs.get('TEXT') == text:
sentences.append((stmt.evidence[0].pmid,
stmt.evidence[0].text))
if max_sentences is not None and \
len(sentences) >= max_sentences:
return sentences
return sentences | [
"def",
"get_sentences_for_agent",
"(",
"text",
",",
"stmts",
",",
"max_sentences",
"=",
"None",
")",
":",
"sentences",
"=",
"[",
"]",
"for",
"stmt",
"in",
"stmts",
":",
"for",
"agent",
"in",
"stmt",
".",
"agent_list",
"(",
")",
":",
"if",
"agent",
"is",
"not",
"None",
"and",
"agent",
".",
"db_refs",
".",
"get",
"(",
"'TEXT'",
")",
"==",
"text",
":",
"sentences",
".",
"append",
"(",
"(",
"stmt",
".",
"evidence",
"[",
"0",
"]",
".",
"pmid",
",",
"stmt",
".",
"evidence",
"[",
"0",
"]",
".",
"text",
")",
")",
"if",
"max_sentences",
"is",
"not",
"None",
"and",
"len",
"(",
"sentences",
")",
">=",
"max_sentences",
":",
"return",
"sentences",
"return",
"sentences"
]
| Returns evidence sentences with a given agent text from a list of statements
Parameters
----------
text : str
An agent text
stmts : list of :py:class:`indra.statements.Statement`
INDRA Statements to search in for evidence statements.
max_sentences : Optional[int/None]
Cap on the number of evidence sentences to return. Default: None
Returns
-------
sentences : list of str
Evidence sentences from the list of statements containing
the given agent text. | [
"Returns",
"evidence",
"sentences",
"with",
"a",
"given",
"agent",
"text",
"from",
"a",
"list",
"of",
"statements"
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/grounding_mapper.py#L467-L496 | train |
sorgerlab/indra | indra/preassembler/grounding_mapper.py | agent_texts_with_grounding | def agent_texts_with_grounding(stmts):
"""Return agent text groundings in a list of statements with their counts
Parameters
----------
stmts: list of :py:class:`indra.statements.Statement`
Returns
-------
list of tuple
List of tuples of the form
(text: str, ((name_space: str, ID: str, count: int)...),
total_count: int)
Where the counts within the tuple of groundings give the number of
times an agent with the given agent_text appears grounded with the
particular name space and ID. The total_count gives the total number
of times an agent with text appears in the list of statements.
"""
allag = all_agents(stmts)
# Convert PFAM-DEF lists into tuples so that they are hashable and can
# be tabulated with a Counter
for ag in allag:
pfam_def = ag.db_refs.get('PFAM-DEF')
if pfam_def is not None:
ag.db_refs['PFAM-DEF'] = tuple(pfam_def)
refs = [tuple(ag.db_refs.items()) for ag in allag]
refs_counter = Counter(refs)
refs_counter_dict = [(dict(entry[0]), entry[1])
for entry in refs_counter.items()]
# First, sort by text so that we can do a groupby
refs_counter_dict.sort(key=lambda x: x[0].get('TEXT'))
# Then group by text
grouped_by_text = []
for k, g in groupby(refs_counter_dict, key=lambda x: x[0].get('TEXT')):
# Total occurrences of this agent text
total = 0
entry = [k]
db_ref_list = []
for db_refs, count in g:
# Check if TEXT is our only key, indicating no grounding
if list(db_refs.keys()) == ['TEXT']:
db_ref_list.append((None, None, count))
# Add any other db_refs (not TEXT)
for db, db_id in db_refs.items():
if db == 'TEXT':
continue
else:
db_ref_list.append((db, db_id, count))
total += count
# Sort the db_ref_list by the occurrences of each grounding
entry.append(tuple(sorted(db_ref_list, key=lambda x: x[2],
reverse=True)))
# Now add the total frequency to the entry
entry.append(total)
# And add the entry to the overall list
grouped_by_text.append(tuple(entry))
# Sort the list by the total number of occurrences of each unique key
grouped_by_text.sort(key=lambda x: x[2], reverse=True)
return grouped_by_text | python | def agent_texts_with_grounding(stmts):
"""Return agent text groundings in a list of statements with their counts
Parameters
----------
stmts: list of :py:class:`indra.statements.Statement`
Returns
-------
list of tuple
List of tuples of the form
(text: str, ((name_space: str, ID: str, count: int)...),
total_count: int)
Where the counts within the tuple of groundings give the number of
times an agent with the given agent_text appears grounded with the
particular name space and ID. The total_count gives the total number
of times an agent with text appears in the list of statements.
"""
allag = all_agents(stmts)
# Convert PFAM-DEF lists into tuples so that they are hashable and can
# be tabulated with a Counter
for ag in allag:
pfam_def = ag.db_refs.get('PFAM-DEF')
if pfam_def is not None:
ag.db_refs['PFAM-DEF'] = tuple(pfam_def)
refs = [tuple(ag.db_refs.items()) for ag in allag]
refs_counter = Counter(refs)
refs_counter_dict = [(dict(entry[0]), entry[1])
for entry in refs_counter.items()]
# First, sort by text so that we can do a groupby
refs_counter_dict.sort(key=lambda x: x[0].get('TEXT'))
# Then group by text
grouped_by_text = []
for k, g in groupby(refs_counter_dict, key=lambda x: x[0].get('TEXT')):
# Total occurrences of this agent text
total = 0
entry = [k]
db_ref_list = []
for db_refs, count in g:
# Check if TEXT is our only key, indicating no grounding
if list(db_refs.keys()) == ['TEXT']:
db_ref_list.append((None, None, count))
# Add any other db_refs (not TEXT)
for db, db_id in db_refs.items():
if db == 'TEXT':
continue
else:
db_ref_list.append((db, db_id, count))
total += count
# Sort the db_ref_list by the occurrences of each grounding
entry.append(tuple(sorted(db_ref_list, key=lambda x: x[2],
reverse=True)))
# Now add the total frequency to the entry
entry.append(total)
# And add the entry to the overall list
grouped_by_text.append(tuple(entry))
# Sort the list by the total number of occurrences of each unique key
grouped_by_text.sort(key=lambda x: x[2], reverse=True)
return grouped_by_text | [
"def",
"agent_texts_with_grounding",
"(",
"stmts",
")",
":",
"allag",
"=",
"all_agents",
"(",
"stmts",
")",
"# Convert PFAM-DEF lists into tuples so that they are hashable and can",
"# be tabulated with a Counter",
"for",
"ag",
"in",
"allag",
":",
"pfam_def",
"=",
"ag",
".",
"db_refs",
".",
"get",
"(",
"'PFAM-DEF'",
")",
"if",
"pfam_def",
"is",
"not",
"None",
":",
"ag",
".",
"db_refs",
"[",
"'PFAM-DEF'",
"]",
"=",
"tuple",
"(",
"pfam_def",
")",
"refs",
"=",
"[",
"tuple",
"(",
"ag",
".",
"db_refs",
".",
"items",
"(",
")",
")",
"for",
"ag",
"in",
"allag",
"]",
"refs_counter",
"=",
"Counter",
"(",
"refs",
")",
"refs_counter_dict",
"=",
"[",
"(",
"dict",
"(",
"entry",
"[",
"0",
"]",
")",
",",
"entry",
"[",
"1",
"]",
")",
"for",
"entry",
"in",
"refs_counter",
".",
"items",
"(",
")",
"]",
"# First, sort by text so that we can do a groupby",
"refs_counter_dict",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
".",
"get",
"(",
"'TEXT'",
")",
")",
"# Then group by text",
"grouped_by_text",
"=",
"[",
"]",
"for",
"k",
",",
"g",
"in",
"groupby",
"(",
"refs_counter_dict",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
".",
"get",
"(",
"'TEXT'",
")",
")",
":",
"# Total occurrences of this agent text",
"total",
"=",
"0",
"entry",
"=",
"[",
"k",
"]",
"db_ref_list",
"=",
"[",
"]",
"for",
"db_refs",
",",
"count",
"in",
"g",
":",
"# Check if TEXT is our only key, indicating no grounding",
"if",
"list",
"(",
"db_refs",
".",
"keys",
"(",
")",
")",
"==",
"[",
"'TEXT'",
"]",
":",
"db_ref_list",
".",
"append",
"(",
"(",
"None",
",",
"None",
",",
"count",
")",
")",
"# Add any other db_refs (not TEXT)",
"for",
"db",
",",
"db_id",
"in",
"db_refs",
".",
"items",
"(",
")",
":",
"if",
"db",
"==",
"'TEXT'",
":",
"continue",
"else",
":",
"db_ref_list",
".",
"append",
"(",
"(",
"db",
",",
"db_id",
",",
"count",
")",
")",
"total",
"+=",
"count",
"# Sort the db_ref_list by the occurrences of each grounding",
"entry",
".",
"append",
"(",
"tuple",
"(",
"sorted",
"(",
"db_ref_list",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"2",
"]",
",",
"reverse",
"=",
"True",
")",
")",
")",
"# Now add the total frequency to the entry",
"entry",
".",
"append",
"(",
"total",
")",
"# And add the entry to the overall list",
"grouped_by_text",
".",
"append",
"(",
"tuple",
"(",
"entry",
")",
")",
"# Sort the list by the total number of occurrences of each unique key",
"grouped_by_text",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"2",
"]",
",",
"reverse",
"=",
"True",
")",
"return",
"grouped_by_text"
]
| Return agent text groundings in a list of statements with their counts
Parameters
----------
stmts: list of :py:class:`indra.statements.Statement`
Returns
-------
list of tuple
List of tuples of the form
(text: str, ((name_space: str, ID: str, count: int)...),
total_count: int)
Where the counts within the tuple of groundings give the number of
times an agent with the given agent_text appears grounded with the
particular name space and ID. The total_count gives the total number
of times an agent with text appears in the list of statements. | [
"Return",
"agent",
"text",
"groundings",
"in",
"a",
"list",
"of",
"statements",
"with",
"their",
"counts"
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/grounding_mapper.py#L499-L559 | train |
sorgerlab/indra | indra/preassembler/grounding_mapper.py | ungrounded_texts | def ungrounded_texts(stmts):
"""Return a list of all ungrounded entities ordered by number of mentions
Parameters
----------
stmts : list of :py:class:`indra.statements.Statement`
Returns
-------
ungroundc : list of tuple
list of tuples of the form (text: str, count: int) sorted in descending
order by count.
"""
ungrounded = [ag.db_refs['TEXT']
for s in stmts
for ag in s.agent_list()
if ag is not None and list(ag.db_refs.keys()) == ['TEXT']]
ungroundc = Counter(ungrounded)
ungroundc = ungroundc.items()
ungroundc = sorted(ungroundc, key=lambda x: x[1], reverse=True)
return ungroundc | python | def ungrounded_texts(stmts):
"""Return a list of all ungrounded entities ordered by number of mentions
Parameters
----------
stmts : list of :py:class:`indra.statements.Statement`
Returns
-------
ungroundc : list of tuple
list of tuples of the form (text: str, count: int) sorted in descending
order by count.
"""
ungrounded = [ag.db_refs['TEXT']
for s in stmts
for ag in s.agent_list()
if ag is not None and list(ag.db_refs.keys()) == ['TEXT']]
ungroundc = Counter(ungrounded)
ungroundc = ungroundc.items()
ungroundc = sorted(ungroundc, key=lambda x: x[1], reverse=True)
return ungroundc | [
"def",
"ungrounded_texts",
"(",
"stmts",
")",
":",
"ungrounded",
"=",
"[",
"ag",
".",
"db_refs",
"[",
"'TEXT'",
"]",
"for",
"s",
"in",
"stmts",
"for",
"ag",
"in",
"s",
".",
"agent_list",
"(",
")",
"if",
"ag",
"is",
"not",
"None",
"and",
"list",
"(",
"ag",
".",
"db_refs",
".",
"keys",
"(",
")",
")",
"==",
"[",
"'TEXT'",
"]",
"]",
"ungroundc",
"=",
"Counter",
"(",
"ungrounded",
")",
"ungroundc",
"=",
"ungroundc",
".",
"items",
"(",
")",
"ungroundc",
"=",
"sorted",
"(",
"ungroundc",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
",",
"reverse",
"=",
"True",
")",
"return",
"ungroundc"
]
| Return a list of all ungrounded entities ordered by number of mentions
Parameters
----------
stmts : list of :py:class:`indra.statements.Statement`
Returns
-------
ungroundc : list of tuple
list of tuples of the form (text: str, count: int) sorted in descending
order by count. | [
"Return",
"a",
"list",
"of",
"all",
"ungrounded",
"entities",
"ordered",
"by",
"number",
"of",
"mentions"
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/grounding_mapper.py#L563-L583 | train |
sorgerlab/indra | indra/preassembler/grounding_mapper.py | get_agents_with_name | def get_agents_with_name(name, stmts):
"""Return all agents within a list of statements with a particular name."""
return [ag for stmt in stmts for ag in stmt.agent_list()
if ag is not None and ag.name == name] | python | def get_agents_with_name(name, stmts):
"""Return all agents within a list of statements with a particular name."""
return [ag for stmt in stmts for ag in stmt.agent_list()
if ag is not None and ag.name == name] | [
"def",
"get_agents_with_name",
"(",
"name",
",",
"stmts",
")",
":",
"return",
"[",
"ag",
"for",
"stmt",
"in",
"stmts",
"for",
"ag",
"in",
"stmt",
".",
"agent_list",
"(",
")",
"if",
"ag",
"is",
"not",
"None",
"and",
"ag",
".",
"name",
"==",
"name",
"]"
]
| Return all agents within a list of statements with a particular name. | [
"Return",
"all",
"agents",
"within",
"a",
"list",
"of",
"statements",
"with",
"a",
"particular",
"name",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/grounding_mapper.py#L586-L589 | train |
sorgerlab/indra | indra/preassembler/grounding_mapper.py | save_base_map | def save_base_map(filename, grouped_by_text):
"""Dump a list of agents along with groundings and counts into a csv file
Parameters
----------
filename : str
Filepath for output file
grouped_by_text : list of tuple
List of tuples of the form output by agent_texts_with_grounding
"""
rows = []
for group in grouped_by_text:
text_string = group[0]
for db, db_id, count in group[1]:
if db == 'UP':
name = uniprot_client.get_mnemonic(db_id)
else:
name = ''
row = [text_string, db, db_id, count, name]
rows.append(row)
write_unicode_csv(filename, rows, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL, lineterminator='\r\n') | python | def save_base_map(filename, grouped_by_text):
"""Dump a list of agents along with groundings and counts into a csv file
Parameters
----------
filename : str
Filepath for output file
grouped_by_text : list of tuple
List of tuples of the form output by agent_texts_with_grounding
"""
rows = []
for group in grouped_by_text:
text_string = group[0]
for db, db_id, count in group[1]:
if db == 'UP':
name = uniprot_client.get_mnemonic(db_id)
else:
name = ''
row = [text_string, db, db_id, count, name]
rows.append(row)
write_unicode_csv(filename, rows, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL, lineterminator='\r\n') | [
"def",
"save_base_map",
"(",
"filename",
",",
"grouped_by_text",
")",
":",
"rows",
"=",
"[",
"]",
"for",
"group",
"in",
"grouped_by_text",
":",
"text_string",
"=",
"group",
"[",
"0",
"]",
"for",
"db",
",",
"db_id",
",",
"count",
"in",
"group",
"[",
"1",
"]",
":",
"if",
"db",
"==",
"'UP'",
":",
"name",
"=",
"uniprot_client",
".",
"get_mnemonic",
"(",
"db_id",
")",
"else",
":",
"name",
"=",
"''",
"row",
"=",
"[",
"text_string",
",",
"db",
",",
"db_id",
",",
"count",
",",
"name",
"]",
"rows",
".",
"append",
"(",
"row",
")",
"write_unicode_csv",
"(",
"filename",
",",
"rows",
",",
"delimiter",
"=",
"','",
",",
"quotechar",
"=",
"'\"'",
",",
"quoting",
"=",
"csv",
".",
"QUOTE_MINIMAL",
",",
"lineterminator",
"=",
"'\\r\\n'",
")"
]
| Dump a list of agents along with groundings and counts into a csv file
Parameters
----------
filename : str
Filepath for output file
grouped_by_text : list of tuple
List of tuples of the form output by agent_texts_with_grounding | [
"Dump",
"a",
"list",
"of",
"agents",
"along",
"with",
"groundings",
"and",
"counts",
"into",
"a",
"csv",
"file"
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/grounding_mapper.py#L592-L614 | train |
sorgerlab/indra | indra/preassembler/grounding_mapper.py | protein_map_from_twg | def protein_map_from_twg(twg):
"""Build map of entity texts to validate protein grounding.
Looks at the grounding of the entity texts extracted from the statements
and finds proteins where there is grounding to a human protein that maps to
an HGNC name that is an exact match to the entity text. Returns a dict that
can be used to update/expand the grounding map.
Parameters
----------
twg : list of tuple
list of tuples of the form output by agent_texts_with_grounding
Returns
-------
protein_map : dict
dict keyed on agent text with associated values
{'TEXT': agent_text, 'UP': uniprot_id}. Entries are for agent texts
where the grounding map was able to find human protein grounded to
this agent_text in Uniprot.
"""
protein_map = {}
unmatched = 0
matched = 0
logger.info('Building grounding map for human proteins')
for agent_text, grounding_list, _ in twg:
# If 'UP' (Uniprot) not one of the grounding entries for this text,
# then we skip it.
if 'UP' not in [entry[0] for entry in grounding_list]:
continue
# Otherwise, collect all the Uniprot IDs for this protein.
uniprot_ids = [entry[1] for entry in grounding_list
if entry[0] == 'UP']
# For each Uniprot ID, look up the species
for uniprot_id in uniprot_ids:
# If it's not a human protein, skip it
mnemonic = uniprot_client.get_mnemonic(uniprot_id)
if mnemonic is None or not mnemonic.endswith('_HUMAN'):
continue
# Otherwise, look up the gene name in HGNC and match against the
# agent text
gene_name = uniprot_client.get_gene_name(uniprot_id)
if gene_name is None:
unmatched += 1
continue
if agent_text.upper() == gene_name.upper():
matched += 1
protein_map[agent_text] = {'TEXT': agent_text,
'UP': uniprot_id}
else:
unmatched += 1
logger.info('Exact matches for %d proteins' % matched)
logger.info('No match (or no gene name) for %d proteins' % unmatched)
return protein_map | python | def protein_map_from_twg(twg):
"""Build map of entity texts to validate protein grounding.
Looks at the grounding of the entity texts extracted from the statements
and finds proteins where there is grounding to a human protein that maps to
an HGNC name that is an exact match to the entity text. Returns a dict that
can be used to update/expand the grounding map.
Parameters
----------
twg : list of tuple
list of tuples of the form output by agent_texts_with_grounding
Returns
-------
protein_map : dict
dict keyed on agent text with associated values
{'TEXT': agent_text, 'UP': uniprot_id}. Entries are for agent texts
where the grounding map was able to find human protein grounded to
this agent_text in Uniprot.
"""
protein_map = {}
unmatched = 0
matched = 0
logger.info('Building grounding map for human proteins')
for agent_text, grounding_list, _ in twg:
# If 'UP' (Uniprot) not one of the grounding entries for this text,
# then we skip it.
if 'UP' not in [entry[0] for entry in grounding_list]:
continue
# Otherwise, collect all the Uniprot IDs for this protein.
uniprot_ids = [entry[1] for entry in grounding_list
if entry[0] == 'UP']
# For each Uniprot ID, look up the species
for uniprot_id in uniprot_ids:
# If it's not a human protein, skip it
mnemonic = uniprot_client.get_mnemonic(uniprot_id)
if mnemonic is None or not mnemonic.endswith('_HUMAN'):
continue
# Otherwise, look up the gene name in HGNC and match against the
# agent text
gene_name = uniprot_client.get_gene_name(uniprot_id)
if gene_name is None:
unmatched += 1
continue
if agent_text.upper() == gene_name.upper():
matched += 1
protein_map[agent_text] = {'TEXT': agent_text,
'UP': uniprot_id}
else:
unmatched += 1
logger.info('Exact matches for %d proteins' % matched)
logger.info('No match (or no gene name) for %d proteins' % unmatched)
return protein_map | [
"def",
"protein_map_from_twg",
"(",
"twg",
")",
":",
"protein_map",
"=",
"{",
"}",
"unmatched",
"=",
"0",
"matched",
"=",
"0",
"logger",
".",
"info",
"(",
"'Building grounding map for human proteins'",
")",
"for",
"agent_text",
",",
"grounding_list",
",",
"_",
"in",
"twg",
":",
"# If 'UP' (Uniprot) not one of the grounding entries for this text,",
"# then we skip it.",
"if",
"'UP'",
"not",
"in",
"[",
"entry",
"[",
"0",
"]",
"for",
"entry",
"in",
"grounding_list",
"]",
":",
"continue",
"# Otherwise, collect all the Uniprot IDs for this protein.",
"uniprot_ids",
"=",
"[",
"entry",
"[",
"1",
"]",
"for",
"entry",
"in",
"grounding_list",
"if",
"entry",
"[",
"0",
"]",
"==",
"'UP'",
"]",
"# For each Uniprot ID, look up the species",
"for",
"uniprot_id",
"in",
"uniprot_ids",
":",
"# If it's not a human protein, skip it",
"mnemonic",
"=",
"uniprot_client",
".",
"get_mnemonic",
"(",
"uniprot_id",
")",
"if",
"mnemonic",
"is",
"None",
"or",
"not",
"mnemonic",
".",
"endswith",
"(",
"'_HUMAN'",
")",
":",
"continue",
"# Otherwise, look up the gene name in HGNC and match against the",
"# agent text",
"gene_name",
"=",
"uniprot_client",
".",
"get_gene_name",
"(",
"uniprot_id",
")",
"if",
"gene_name",
"is",
"None",
":",
"unmatched",
"+=",
"1",
"continue",
"if",
"agent_text",
".",
"upper",
"(",
")",
"==",
"gene_name",
".",
"upper",
"(",
")",
":",
"matched",
"+=",
"1",
"protein_map",
"[",
"agent_text",
"]",
"=",
"{",
"'TEXT'",
":",
"agent_text",
",",
"'UP'",
":",
"uniprot_id",
"}",
"else",
":",
"unmatched",
"+=",
"1",
"logger",
".",
"info",
"(",
"'Exact matches for %d proteins'",
"%",
"matched",
")",
"logger",
".",
"info",
"(",
"'No match (or no gene name) for %d proteins'",
"%",
"unmatched",
")",
"return",
"protein_map"
]
| Build map of entity texts to validate protein grounding.
Looks at the grounding of the entity texts extracted from the statements
and finds proteins where there is grounding to a human protein that maps to
an HGNC name that is an exact match to the entity text. Returns a dict that
can be used to update/expand the grounding map.
Parameters
----------
twg : list of tuple
list of tuples of the form output by agent_texts_with_grounding
Returns
-------
protein_map : dict
dict keyed on agent text with associated values
{'TEXT': agent_text, 'UP': uniprot_id}. Entries are for agent texts
where the grounding map was able to find human protein grounded to
this agent_text in Uniprot. | [
"Build",
"map",
"of",
"entity",
"texts",
"to",
"validate",
"protein",
"grounding",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/grounding_mapper.py#L617-L671 | train |
sorgerlab/indra | indra/preassembler/grounding_mapper.py | save_sentences | def save_sentences(twg, stmts, filename, agent_limit=300):
"""Write evidence sentences for stmts with ungrounded agents to csv file.
Parameters
----------
twg: list of tuple
list of tuples of ungrounded agent_texts with counts of the
number of times they are mentioned in the list of statements.
Should be sorted in descending order by the counts.
This is of the form output by the function ungrounded texts.
stmts: list of :py:class:`indra.statements.Statement`
filename : str
Path to output file
agent_limit : Optional[int]
Number of agents to include in output file. Takes the top agents
by count.
"""
sentences = []
unmapped_texts = [t[0] for t in twg]
counter = 0
logger.info('Getting sentences for top %d unmapped agent texts.' %
agent_limit)
for text in unmapped_texts:
agent_sentences = get_sentences_for_agent(text, stmts)
sentences += map(lambda tup: (text,) + tup, agent_sentences)
counter += 1
if counter >= agent_limit:
break
# Write sentences to CSV file
write_unicode_csv(filename, sentences, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL, lineterminator='\r\n') | python | def save_sentences(twg, stmts, filename, agent_limit=300):
"""Write evidence sentences for stmts with ungrounded agents to csv file.
Parameters
----------
twg: list of tuple
list of tuples of ungrounded agent_texts with counts of the
number of times they are mentioned in the list of statements.
Should be sorted in descending order by the counts.
This is of the form output by the function ungrounded texts.
stmts: list of :py:class:`indra.statements.Statement`
filename : str
Path to output file
agent_limit : Optional[int]
Number of agents to include in output file. Takes the top agents
by count.
"""
sentences = []
unmapped_texts = [t[0] for t in twg]
counter = 0
logger.info('Getting sentences for top %d unmapped agent texts.' %
agent_limit)
for text in unmapped_texts:
agent_sentences = get_sentences_for_agent(text, stmts)
sentences += map(lambda tup: (text,) + tup, agent_sentences)
counter += 1
if counter >= agent_limit:
break
# Write sentences to CSV file
write_unicode_csv(filename, sentences, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL, lineterminator='\r\n') | [
"def",
"save_sentences",
"(",
"twg",
",",
"stmts",
",",
"filename",
",",
"agent_limit",
"=",
"300",
")",
":",
"sentences",
"=",
"[",
"]",
"unmapped_texts",
"=",
"[",
"t",
"[",
"0",
"]",
"for",
"t",
"in",
"twg",
"]",
"counter",
"=",
"0",
"logger",
".",
"info",
"(",
"'Getting sentences for top %d unmapped agent texts.'",
"%",
"agent_limit",
")",
"for",
"text",
"in",
"unmapped_texts",
":",
"agent_sentences",
"=",
"get_sentences_for_agent",
"(",
"text",
",",
"stmts",
")",
"sentences",
"+=",
"map",
"(",
"lambda",
"tup",
":",
"(",
"text",
",",
")",
"+",
"tup",
",",
"agent_sentences",
")",
"counter",
"+=",
"1",
"if",
"counter",
">=",
"agent_limit",
":",
"break",
"# Write sentences to CSV file",
"write_unicode_csv",
"(",
"filename",
",",
"sentences",
",",
"delimiter",
"=",
"','",
",",
"quotechar",
"=",
"'\"'",
",",
"quoting",
"=",
"csv",
".",
"QUOTE_MINIMAL",
",",
"lineterminator",
"=",
"'\\r\\n'",
")"
]
| Write evidence sentences for stmts with ungrounded agents to csv file.
Parameters
----------
twg: list of tuple
list of tuples of ungrounded agent_texts with counts of the
number of times they are mentioned in the list of statements.
Should be sorted in descending order by the counts.
This is of the form output by the function ungrounded texts.
stmts: list of :py:class:`indra.statements.Statement`
filename : str
Path to output file
agent_limit : Optional[int]
Number of agents to include in output file. Takes the top agents
by count. | [
"Write",
"evidence",
"sentences",
"for",
"stmts",
"with",
"ungrounded",
"agents",
"to",
"csv",
"file",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/grounding_mapper.py#L674-L707 | train |
sorgerlab/indra | indra/preassembler/grounding_mapper.py | _get_text_for_grounding | def _get_text_for_grounding(stmt, agent_text):
"""Get text context for Deft disambiguation
If the INDRA database is available, attempts to get the fulltext from
which the statement was extracted. If the fulltext is not available, the
abstract is returned. If the indra database is not available, uses the
pubmed client to get the abstract. If no abstract can be found, falls back
on returning the evidence text for the statement.
Parameters
----------
stmt : py:class:`indra.statements.Statement`
Statement with agent we seek to disambiguate.
agent_text : str
Agent text that needs to be disambiguated
Returns
-------
text : str
Text for Feft disambiguation
"""
text = None
# First we will try to get content from the DB
try:
from indra_db.util.content_scripts \
import get_text_content_from_text_refs
from indra.literature.deft_tools import universal_extract_text
refs = stmt.evidence[0].text_refs
# Prioritize the pmid attribute if given
if stmt.evidence[0].pmid:
refs['PMID'] = stmt.evidence[0].pmid
logger.info('Obtaining text for disambiguation with refs: %s' %
refs)
content = get_text_content_from_text_refs(refs)
text = universal_extract_text(content, contains=agent_text)
if text:
return text
except Exception as e:
logger.info('Could not get text for disambiguation from DB.')
# If that doesn't work, we try PubMed next
if text is None:
from indra.literature import pubmed_client
pmid = stmt.evidence[0].pmid
if pmid:
logger.info('Obtaining abstract for disambiguation for PMID%s' %
pmid)
text = pubmed_client.get_abstract(pmid)
if text:
return text
# Finally, falling back on the evidence sentence
if text is None:
logger.info('Falling back on sentence-based disambiguation')
text = stmt.evidence[0].text
return text
return None | python | def _get_text_for_grounding(stmt, agent_text):
"""Get text context for Deft disambiguation
If the INDRA database is available, attempts to get the fulltext from
which the statement was extracted. If the fulltext is not available, the
abstract is returned. If the indra database is not available, uses the
pubmed client to get the abstract. If no abstract can be found, falls back
on returning the evidence text for the statement.
Parameters
----------
stmt : py:class:`indra.statements.Statement`
Statement with agent we seek to disambiguate.
agent_text : str
Agent text that needs to be disambiguated
Returns
-------
text : str
Text for Feft disambiguation
"""
text = None
# First we will try to get content from the DB
try:
from indra_db.util.content_scripts \
import get_text_content_from_text_refs
from indra.literature.deft_tools import universal_extract_text
refs = stmt.evidence[0].text_refs
# Prioritize the pmid attribute if given
if stmt.evidence[0].pmid:
refs['PMID'] = stmt.evidence[0].pmid
logger.info('Obtaining text for disambiguation with refs: %s' %
refs)
content = get_text_content_from_text_refs(refs)
text = universal_extract_text(content, contains=agent_text)
if text:
return text
except Exception as e:
logger.info('Could not get text for disambiguation from DB.')
# If that doesn't work, we try PubMed next
if text is None:
from indra.literature import pubmed_client
pmid = stmt.evidence[0].pmid
if pmid:
logger.info('Obtaining abstract for disambiguation for PMID%s' %
pmid)
text = pubmed_client.get_abstract(pmid)
if text:
return text
# Finally, falling back on the evidence sentence
if text is None:
logger.info('Falling back on sentence-based disambiguation')
text = stmt.evidence[0].text
return text
return None | [
"def",
"_get_text_for_grounding",
"(",
"stmt",
",",
"agent_text",
")",
":",
"text",
"=",
"None",
"# First we will try to get content from the DB",
"try",
":",
"from",
"indra_db",
".",
"util",
".",
"content_scripts",
"import",
"get_text_content_from_text_refs",
"from",
"indra",
".",
"literature",
".",
"deft_tools",
"import",
"universal_extract_text",
"refs",
"=",
"stmt",
".",
"evidence",
"[",
"0",
"]",
".",
"text_refs",
"# Prioritize the pmid attribute if given",
"if",
"stmt",
".",
"evidence",
"[",
"0",
"]",
".",
"pmid",
":",
"refs",
"[",
"'PMID'",
"]",
"=",
"stmt",
".",
"evidence",
"[",
"0",
"]",
".",
"pmid",
"logger",
".",
"info",
"(",
"'Obtaining text for disambiguation with refs: %s'",
"%",
"refs",
")",
"content",
"=",
"get_text_content_from_text_refs",
"(",
"refs",
")",
"text",
"=",
"universal_extract_text",
"(",
"content",
",",
"contains",
"=",
"agent_text",
")",
"if",
"text",
":",
"return",
"text",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"info",
"(",
"'Could not get text for disambiguation from DB.'",
")",
"# If that doesn't work, we try PubMed next",
"if",
"text",
"is",
"None",
":",
"from",
"indra",
".",
"literature",
"import",
"pubmed_client",
"pmid",
"=",
"stmt",
".",
"evidence",
"[",
"0",
"]",
".",
"pmid",
"if",
"pmid",
":",
"logger",
".",
"info",
"(",
"'Obtaining abstract for disambiguation for PMID%s'",
"%",
"pmid",
")",
"text",
"=",
"pubmed_client",
".",
"get_abstract",
"(",
"pmid",
")",
"if",
"text",
":",
"return",
"text",
"# Finally, falling back on the evidence sentence",
"if",
"text",
"is",
"None",
":",
"logger",
".",
"info",
"(",
"'Falling back on sentence-based disambiguation'",
")",
"text",
"=",
"stmt",
".",
"evidence",
"[",
"0",
"]",
".",
"text",
"return",
"text",
"return",
"None"
]
| Get text context for Deft disambiguation
If the INDRA database is available, attempts to get the fulltext from
which the statement was extracted. If the fulltext is not available, the
abstract is returned. If the indra database is not available, uses the
pubmed client to get the abstract. If no abstract can be found, falls back
on returning the evidence text for the statement.
Parameters
----------
stmt : py:class:`indra.statements.Statement`
Statement with agent we seek to disambiguate.
agent_text : str
Agent text that needs to be disambiguated
Returns
-------
text : str
Text for Feft disambiguation | [
"Get",
"text",
"context",
"for",
"Deft",
"disambiguation"
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/grounding_mapper.py#L743-L798 | train |
sorgerlab/indra | indra/preassembler/grounding_mapper.py | GroundingMapper.update_agent_db_refs | def update_agent_db_refs(self, agent, agent_text, do_rename=True):
"""Update db_refs of agent using the grounding map
If the grounding map is missing one of the HGNC symbol or Uniprot ID,
attempts to reconstruct one from the other.
Parameters
----------
agent : :py:class:`indra.statements.Agent`
The agent whose db_refs will be updated
agent_text : str
The agent_text to find a grounding for in the grounding map
dictionary. Typically this will be agent.db_refs['TEXT'] but
there may be situations where a different value should be used.
do_rename: Optional[bool]
If True, the Agent name is updated based on the mapped grounding.
If do_rename is True the priority for setting the name is
FamPlex ID, HGNC symbol, then the gene name
from Uniprot. Default: True
Raises
------
ValueError
If the the grounding map contains and HGNC symbol for
agent_text but no HGNC ID can be found for it.
ValueError
If the grounding map contains both an HGNC symbol and a
Uniprot ID, but the HGNC symbol and the gene name associated with
the gene in Uniprot do not match or if there is no associated gene
name in Uniprot.
"""
map_db_refs = deepcopy(self.gm.get(agent_text))
self.standardize_agent_db_refs(agent, map_db_refs, do_rename) | python | def update_agent_db_refs(self, agent, agent_text, do_rename=True):
"""Update db_refs of agent using the grounding map
If the grounding map is missing one of the HGNC symbol or Uniprot ID,
attempts to reconstruct one from the other.
Parameters
----------
agent : :py:class:`indra.statements.Agent`
The agent whose db_refs will be updated
agent_text : str
The agent_text to find a grounding for in the grounding map
dictionary. Typically this will be agent.db_refs['TEXT'] but
there may be situations where a different value should be used.
do_rename: Optional[bool]
If True, the Agent name is updated based on the mapped grounding.
If do_rename is True the priority for setting the name is
FamPlex ID, HGNC symbol, then the gene name
from Uniprot. Default: True
Raises
------
ValueError
If the the grounding map contains and HGNC symbol for
agent_text but no HGNC ID can be found for it.
ValueError
If the grounding map contains both an HGNC symbol and a
Uniprot ID, but the HGNC symbol and the gene name associated with
the gene in Uniprot do not match or if there is no associated gene
name in Uniprot.
"""
map_db_refs = deepcopy(self.gm.get(agent_text))
self.standardize_agent_db_refs(agent, map_db_refs, do_rename) | [
"def",
"update_agent_db_refs",
"(",
"self",
",",
"agent",
",",
"agent_text",
",",
"do_rename",
"=",
"True",
")",
":",
"map_db_refs",
"=",
"deepcopy",
"(",
"self",
".",
"gm",
".",
"get",
"(",
"agent_text",
")",
")",
"self",
".",
"standardize_agent_db_refs",
"(",
"agent",
",",
"map_db_refs",
",",
"do_rename",
")"
]
| Update db_refs of agent using the grounding map
If the grounding map is missing one of the HGNC symbol or Uniprot ID,
attempts to reconstruct one from the other.
Parameters
----------
agent : :py:class:`indra.statements.Agent`
The agent whose db_refs will be updated
agent_text : str
The agent_text to find a grounding for in the grounding map
dictionary. Typically this will be agent.db_refs['TEXT'] but
there may be situations where a different value should be used.
do_rename: Optional[bool]
If True, the Agent name is updated based on the mapped grounding.
If do_rename is True the priority for setting the name is
FamPlex ID, HGNC symbol, then the gene name
from Uniprot. Default: True
Raises
------
ValueError
If the the grounding map contains and HGNC symbol for
agent_text but no HGNC ID can be found for it.
ValueError
If the grounding map contains both an HGNC symbol and a
Uniprot ID, but the HGNC symbol and the gene name associated with
the gene in Uniprot do not match or if there is no associated gene
name in Uniprot. | [
"Update",
"db_refs",
"of",
"agent",
"using",
"the",
"grounding",
"map"
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/grounding_mapper.py#L51-L83 | train |
sorgerlab/indra | indra/preassembler/grounding_mapper.py | GroundingMapper.map_agents_for_stmt | def map_agents_for_stmt(self, stmt, do_rename=True):
"""Return a new Statement whose agents have been grounding mapped.
Parameters
----------
stmt : :py:class:`indra.statements.Statement`
The Statement whose agents need mapping.
do_rename: Optional[bool]
If True, the Agent name is updated based on the mapped grounding.
If do_rename is True the priority for setting the name is
FamPlex ID, HGNC symbol, then the gene name
from Uniprot. Default: True
Returns
-------
mapped_stmt : :py:class:`indra.statements.Statement`
The mapped Statement.
"""
mapped_stmt = deepcopy(stmt)
# Iterate over the agents
# Update agents directly participating in the statement
agent_list = mapped_stmt.agent_list()
for idx, agent in enumerate(agent_list):
if agent is None:
continue
agent_txt = agent.db_refs.get('TEXT')
if agent_txt is None:
continue
new_agent, maps_to_none = self.map_agent(agent, do_rename)
# Check if a deft model exists for agent text
if self.use_deft and agent_txt in deft_disambiguators:
try:
run_deft_disambiguation(mapped_stmt, agent_list, idx,
new_agent, agent_txt)
except Exception as e:
logger.error('There was an error during Deft'
' disambiguation.')
logger.error(e)
if maps_to_none:
# Skip the entire statement if the agent maps to None in the
# grounding map
return None
# If the old agent had bound conditions, but the new agent does
# not, copy the bound conditions over
if new_agent is not None and len(new_agent.bound_conditions) == 0:
new_agent.bound_conditions = agent.bound_conditions
agent_list[idx] = new_agent
mapped_stmt.set_agent_list(agent_list)
# Update agents in the bound conditions
for agent in agent_list:
if agent is not None:
for bc in agent.bound_conditions:
bc.agent, maps_to_none = self.map_agent(bc.agent,
do_rename)
if maps_to_none:
# Skip the entire statement if the agent maps to None
# in the grounding map
return None
return mapped_stmt | python | def map_agents_for_stmt(self, stmt, do_rename=True):
"""Return a new Statement whose agents have been grounding mapped.
Parameters
----------
stmt : :py:class:`indra.statements.Statement`
The Statement whose agents need mapping.
do_rename: Optional[bool]
If True, the Agent name is updated based on the mapped grounding.
If do_rename is True the priority for setting the name is
FamPlex ID, HGNC symbol, then the gene name
from Uniprot. Default: True
Returns
-------
mapped_stmt : :py:class:`indra.statements.Statement`
The mapped Statement.
"""
mapped_stmt = deepcopy(stmt)
# Iterate over the agents
# Update agents directly participating in the statement
agent_list = mapped_stmt.agent_list()
for idx, agent in enumerate(agent_list):
if agent is None:
continue
agent_txt = agent.db_refs.get('TEXT')
if agent_txt is None:
continue
new_agent, maps_to_none = self.map_agent(agent, do_rename)
# Check if a deft model exists for agent text
if self.use_deft and agent_txt in deft_disambiguators:
try:
run_deft_disambiguation(mapped_stmt, agent_list, idx,
new_agent, agent_txt)
except Exception as e:
logger.error('There was an error during Deft'
' disambiguation.')
logger.error(e)
if maps_to_none:
# Skip the entire statement if the agent maps to None in the
# grounding map
return None
# If the old agent had bound conditions, but the new agent does
# not, copy the bound conditions over
if new_agent is not None and len(new_agent.bound_conditions) == 0:
new_agent.bound_conditions = agent.bound_conditions
agent_list[idx] = new_agent
mapped_stmt.set_agent_list(agent_list)
# Update agents in the bound conditions
for agent in agent_list:
if agent is not None:
for bc in agent.bound_conditions:
bc.agent, maps_to_none = self.map_agent(bc.agent,
do_rename)
if maps_to_none:
# Skip the entire statement if the agent maps to None
# in the grounding map
return None
return mapped_stmt | [
"def",
"map_agents_for_stmt",
"(",
"self",
",",
"stmt",
",",
"do_rename",
"=",
"True",
")",
":",
"mapped_stmt",
"=",
"deepcopy",
"(",
"stmt",
")",
"# Iterate over the agents",
"# Update agents directly participating in the statement",
"agent_list",
"=",
"mapped_stmt",
".",
"agent_list",
"(",
")",
"for",
"idx",
",",
"agent",
"in",
"enumerate",
"(",
"agent_list",
")",
":",
"if",
"agent",
"is",
"None",
":",
"continue",
"agent_txt",
"=",
"agent",
".",
"db_refs",
".",
"get",
"(",
"'TEXT'",
")",
"if",
"agent_txt",
"is",
"None",
":",
"continue",
"new_agent",
",",
"maps_to_none",
"=",
"self",
".",
"map_agent",
"(",
"agent",
",",
"do_rename",
")",
"# Check if a deft model exists for agent text",
"if",
"self",
".",
"use_deft",
"and",
"agent_txt",
"in",
"deft_disambiguators",
":",
"try",
":",
"run_deft_disambiguation",
"(",
"mapped_stmt",
",",
"agent_list",
",",
"idx",
",",
"new_agent",
",",
"agent_txt",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"'There was an error during Deft'",
"' disambiguation.'",
")",
"logger",
".",
"error",
"(",
"e",
")",
"if",
"maps_to_none",
":",
"# Skip the entire statement if the agent maps to None in the",
"# grounding map",
"return",
"None",
"# If the old agent had bound conditions, but the new agent does",
"# not, copy the bound conditions over",
"if",
"new_agent",
"is",
"not",
"None",
"and",
"len",
"(",
"new_agent",
".",
"bound_conditions",
")",
"==",
"0",
":",
"new_agent",
".",
"bound_conditions",
"=",
"agent",
".",
"bound_conditions",
"agent_list",
"[",
"idx",
"]",
"=",
"new_agent",
"mapped_stmt",
".",
"set_agent_list",
"(",
"agent_list",
")",
"# Update agents in the bound conditions",
"for",
"agent",
"in",
"agent_list",
":",
"if",
"agent",
"is",
"not",
"None",
":",
"for",
"bc",
"in",
"agent",
".",
"bound_conditions",
":",
"bc",
".",
"agent",
",",
"maps_to_none",
"=",
"self",
".",
"map_agent",
"(",
"bc",
".",
"agent",
",",
"do_rename",
")",
"if",
"maps_to_none",
":",
"# Skip the entire statement if the agent maps to None",
"# in the grounding map",
"return",
"None",
"return",
"mapped_stmt"
]
| Return a new Statement whose agents have been grounding mapped.
Parameters
----------
stmt : :py:class:`indra.statements.Statement`
The Statement whose agents need mapping.
do_rename: Optional[bool]
If True, the Agent name is updated based on the mapped grounding.
If do_rename is True the priority for setting the name is
FamPlex ID, HGNC symbol, then the gene name
from Uniprot. Default: True
Returns
-------
mapped_stmt : :py:class:`indra.statements.Statement`
The mapped Statement. | [
"Return",
"a",
"new",
"Statement",
"whose",
"agents",
"have",
"been",
"grounding",
"mapped",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/grounding_mapper.py#L150-L217 | train |
sorgerlab/indra | indra/preassembler/grounding_mapper.py | GroundingMapper.map_agent | def map_agent(self, agent, do_rename):
"""Return the given Agent with its grounding mapped.
This function grounds a single agent. It returns the new Agent object
(which might be a different object if we load a new agent state
from json) or the same object otherwise.
Parameters
----------
agent : :py:class:`indra.statements.Agent`
The Agent to map.
do_rename: bool
If True, the Agent name is updated based on the mapped grounding.
If do_rename is True the priority for setting the name is
FamPlex ID, HGNC symbol, then the gene name
from Uniprot.
Returns
-------
grounded_agent : :py:class:`indra.statements.Agent`
The grounded Agent.
maps_to_none : bool
True if the Agent is in the grounding map and maps to None.
"""
agent_text = agent.db_refs.get('TEXT')
mapped_to_agent_json = self.agent_map.get(agent_text)
if mapped_to_agent_json:
mapped_to_agent = \
Agent._from_json(mapped_to_agent_json['agent'])
return mapped_to_agent, False
# Look this string up in the grounding map
# If not in the map, leave agent alone and continue
if agent_text in self.gm.keys():
map_db_refs = self.gm[agent_text]
else:
return agent, False
# If it's in the map but it maps to None, then filter out
# this statement by skipping it
if map_db_refs is None:
# Increase counter if this statement has not already
# been skipped via another agent
logger.debug("Skipping %s" % agent_text)
return None, True
# If it has a value that's not None, map it and add it
else:
# Otherwise, update the agent's db_refs field
self.update_agent_db_refs(agent, agent_text, do_rename)
return agent, False | python | def map_agent(self, agent, do_rename):
"""Return the given Agent with its grounding mapped.
This function grounds a single agent. It returns the new Agent object
(which might be a different object if we load a new agent state
from json) or the same object otherwise.
Parameters
----------
agent : :py:class:`indra.statements.Agent`
The Agent to map.
do_rename: bool
If True, the Agent name is updated based on the mapped grounding.
If do_rename is True the priority for setting the name is
FamPlex ID, HGNC symbol, then the gene name
from Uniprot.
Returns
-------
grounded_agent : :py:class:`indra.statements.Agent`
The grounded Agent.
maps_to_none : bool
True if the Agent is in the grounding map and maps to None.
"""
agent_text = agent.db_refs.get('TEXT')
mapped_to_agent_json = self.agent_map.get(agent_text)
if mapped_to_agent_json:
mapped_to_agent = \
Agent._from_json(mapped_to_agent_json['agent'])
return mapped_to_agent, False
# Look this string up in the grounding map
# If not in the map, leave agent alone and continue
if agent_text in self.gm.keys():
map_db_refs = self.gm[agent_text]
else:
return agent, False
# If it's in the map but it maps to None, then filter out
# this statement by skipping it
if map_db_refs is None:
# Increase counter if this statement has not already
# been skipped via another agent
logger.debug("Skipping %s" % agent_text)
return None, True
# If it has a value that's not None, map it and add it
else:
# Otherwise, update the agent's db_refs field
self.update_agent_db_refs(agent, agent_text, do_rename)
return agent, False | [
"def",
"map_agent",
"(",
"self",
",",
"agent",
",",
"do_rename",
")",
":",
"agent_text",
"=",
"agent",
".",
"db_refs",
".",
"get",
"(",
"'TEXT'",
")",
"mapped_to_agent_json",
"=",
"self",
".",
"agent_map",
".",
"get",
"(",
"agent_text",
")",
"if",
"mapped_to_agent_json",
":",
"mapped_to_agent",
"=",
"Agent",
".",
"_from_json",
"(",
"mapped_to_agent_json",
"[",
"'agent'",
"]",
")",
"return",
"mapped_to_agent",
",",
"False",
"# Look this string up in the grounding map",
"# If not in the map, leave agent alone and continue",
"if",
"agent_text",
"in",
"self",
".",
"gm",
".",
"keys",
"(",
")",
":",
"map_db_refs",
"=",
"self",
".",
"gm",
"[",
"agent_text",
"]",
"else",
":",
"return",
"agent",
",",
"False",
"# If it's in the map but it maps to None, then filter out",
"# this statement by skipping it",
"if",
"map_db_refs",
"is",
"None",
":",
"# Increase counter if this statement has not already",
"# been skipped via another agent",
"logger",
".",
"debug",
"(",
"\"Skipping %s\"",
"%",
"agent_text",
")",
"return",
"None",
",",
"True",
"# If it has a value that's not None, map it and add it",
"else",
":",
"# Otherwise, update the agent's db_refs field",
"self",
".",
"update_agent_db_refs",
"(",
"agent",
",",
"agent_text",
",",
"do_rename",
")",
"return",
"agent",
",",
"False"
]
| Return the given Agent with its grounding mapped.
This function grounds a single agent. It returns the new Agent object
(which might be a different object if we load a new agent state
from json) or the same object otherwise.
Parameters
----------
agent : :py:class:`indra.statements.Agent`
The Agent to map.
do_rename: bool
If True, the Agent name is updated based on the mapped grounding.
If do_rename is True the priority for setting the name is
FamPlex ID, HGNC symbol, then the gene name
from Uniprot.
Returns
-------
grounded_agent : :py:class:`indra.statements.Agent`
The grounded Agent.
maps_to_none : bool
True if the Agent is in the grounding map and maps to None. | [
"Return",
"the",
"given",
"Agent",
"with",
"its",
"grounding",
"mapped",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/grounding_mapper.py#L219-L268 | train |
sorgerlab/indra | indra/preassembler/grounding_mapper.py | GroundingMapper.map_agents | def map_agents(self, stmts, do_rename=True):
"""Return a new list of statements whose agents have been mapped
Parameters
----------
stmts : list of :py:class:`indra.statements.Statement`
The statements whose agents need mapping
do_rename: Optional[bool]
If True, the Agent name is updated based on the mapped grounding.
If do_rename is True the priority for setting the name is
FamPlex ID, HGNC symbol, then the gene name
from Uniprot. Default: True
Returns
-------
mapped_stmts : list of :py:class:`indra.statements.Statement`
A list of statements given by mapping the agents from each
statement in the input list
"""
# Make a copy of the stmts
mapped_stmts = []
num_skipped = 0
# Iterate over the statements
for stmt in stmts:
mapped_stmt = self.map_agents_for_stmt(stmt, do_rename)
# Check if we should skip the statement
if mapped_stmt is not None:
mapped_stmts.append(mapped_stmt)
else:
num_skipped += 1
logger.info('%s statements filtered out' % num_skipped)
return mapped_stmts | python | def map_agents(self, stmts, do_rename=True):
"""Return a new list of statements whose agents have been mapped
Parameters
----------
stmts : list of :py:class:`indra.statements.Statement`
The statements whose agents need mapping
do_rename: Optional[bool]
If True, the Agent name is updated based on the mapped grounding.
If do_rename is True the priority for setting the name is
FamPlex ID, HGNC symbol, then the gene name
from Uniprot. Default: True
Returns
-------
mapped_stmts : list of :py:class:`indra.statements.Statement`
A list of statements given by mapping the agents from each
statement in the input list
"""
# Make a copy of the stmts
mapped_stmts = []
num_skipped = 0
# Iterate over the statements
for stmt in stmts:
mapped_stmt = self.map_agents_for_stmt(stmt, do_rename)
# Check if we should skip the statement
if mapped_stmt is not None:
mapped_stmts.append(mapped_stmt)
else:
num_skipped += 1
logger.info('%s statements filtered out' % num_skipped)
return mapped_stmts | [
"def",
"map_agents",
"(",
"self",
",",
"stmts",
",",
"do_rename",
"=",
"True",
")",
":",
"# Make a copy of the stmts",
"mapped_stmts",
"=",
"[",
"]",
"num_skipped",
"=",
"0",
"# Iterate over the statements",
"for",
"stmt",
"in",
"stmts",
":",
"mapped_stmt",
"=",
"self",
".",
"map_agents_for_stmt",
"(",
"stmt",
",",
"do_rename",
")",
"# Check if we should skip the statement",
"if",
"mapped_stmt",
"is",
"not",
"None",
":",
"mapped_stmts",
".",
"append",
"(",
"mapped_stmt",
")",
"else",
":",
"num_skipped",
"+=",
"1",
"logger",
".",
"info",
"(",
"'%s statements filtered out'",
"%",
"num_skipped",
")",
"return",
"mapped_stmts"
]
| Return a new list of statements whose agents have been mapped
Parameters
----------
stmts : list of :py:class:`indra.statements.Statement`
The statements whose agents need mapping
do_rename: Optional[bool]
If True, the Agent name is updated based on the mapped grounding.
If do_rename is True the priority for setting the name is
FamPlex ID, HGNC symbol, then the gene name
from Uniprot. Default: True
Returns
-------
mapped_stmts : list of :py:class:`indra.statements.Statement`
A list of statements given by mapping the agents from each
statement in the input list | [
"Return",
"a",
"new",
"list",
"of",
"statements",
"whose",
"agents",
"have",
"been",
"mapped"
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/grounding_mapper.py#L270-L301 | train |
sorgerlab/indra | indra/preassembler/grounding_mapper.py | GroundingMapper.rename_agents | def rename_agents(self, stmts):
"""Return a list of mapped statements with updated agent names.
Creates a new list of statements without modifying the original list.
The agents in a statement should be renamed if the grounding map has
updated their db_refs. If an agent contains a FamPlex grounding, the
FamPlex ID is used as a name. Otherwise if it contains a Uniprot ID,
an attempt is made to find the associated HGNC gene name. If one can
be found it is used as the agent name and the associated HGNC ID is
added as an entry to the db_refs. If neither a FamPlex ID or HGNC name
can be found, falls back to the original name.
Parameters
----------
stmts : list of :py:class:`indra.statements.Statement`
List of statements whose Agents need their names updated.
Returns
-------
mapped_stmts : list of :py:class:`indra.statements.Statement`
A new list of Statements with updated Agent names
"""
# Make a copy of the stmts
mapped_stmts = deepcopy(stmts)
# Iterate over the statements
for _, stmt in enumerate(mapped_stmts):
# Iterate over the agents
for agent in stmt.agent_list():
if agent is None:
continue
# If there's a FamPlex ID, prefer that for the name
if agent.db_refs.get('FPLX'):
agent.name = agent.db_refs.get('FPLX')
# Take a HGNC name from Uniprot next
elif agent.db_refs.get('UP'):
# Try for the gene name
gene_name = uniprot_client.get_gene_name(
agent.db_refs.get('UP'),
web_fallback=False)
if gene_name:
agent.name = gene_name
hgnc_id = hgnc_client.get_hgnc_id(gene_name)
if hgnc_id:
agent.db_refs['HGNC'] = hgnc_id
# Take the text string
#if agent.db_refs.get('TEXT'):
# agent.name = agent.db_refs.get('TEXT')
# If this fails, then we continue with no change
# Fall back to the text string
#elif agent.db_refs.get('TEXT'):
# agent.name = agent.db_refs.get('TEXT')
return mapped_stmts | python | def rename_agents(self, stmts):
"""Return a list of mapped statements with updated agent names.
Creates a new list of statements without modifying the original list.
The agents in a statement should be renamed if the grounding map has
updated their db_refs. If an agent contains a FamPlex grounding, the
FamPlex ID is used as a name. Otherwise if it contains a Uniprot ID,
an attempt is made to find the associated HGNC gene name. If one can
be found it is used as the agent name and the associated HGNC ID is
added as an entry to the db_refs. If neither a FamPlex ID or HGNC name
can be found, falls back to the original name.
Parameters
----------
stmts : list of :py:class:`indra.statements.Statement`
List of statements whose Agents need their names updated.
Returns
-------
mapped_stmts : list of :py:class:`indra.statements.Statement`
A new list of Statements with updated Agent names
"""
# Make a copy of the stmts
mapped_stmts = deepcopy(stmts)
# Iterate over the statements
for _, stmt in enumerate(mapped_stmts):
# Iterate over the agents
for agent in stmt.agent_list():
if agent is None:
continue
# If there's a FamPlex ID, prefer that for the name
if agent.db_refs.get('FPLX'):
agent.name = agent.db_refs.get('FPLX')
# Take a HGNC name from Uniprot next
elif agent.db_refs.get('UP'):
# Try for the gene name
gene_name = uniprot_client.get_gene_name(
agent.db_refs.get('UP'),
web_fallback=False)
if gene_name:
agent.name = gene_name
hgnc_id = hgnc_client.get_hgnc_id(gene_name)
if hgnc_id:
agent.db_refs['HGNC'] = hgnc_id
# Take the text string
#if agent.db_refs.get('TEXT'):
# agent.name = agent.db_refs.get('TEXT')
# If this fails, then we continue with no change
# Fall back to the text string
#elif agent.db_refs.get('TEXT'):
# agent.name = agent.db_refs.get('TEXT')
return mapped_stmts | [
"def",
"rename_agents",
"(",
"self",
",",
"stmts",
")",
":",
"# Make a copy of the stmts",
"mapped_stmts",
"=",
"deepcopy",
"(",
"stmts",
")",
"# Iterate over the statements",
"for",
"_",
",",
"stmt",
"in",
"enumerate",
"(",
"mapped_stmts",
")",
":",
"# Iterate over the agents",
"for",
"agent",
"in",
"stmt",
".",
"agent_list",
"(",
")",
":",
"if",
"agent",
"is",
"None",
":",
"continue",
"# If there's a FamPlex ID, prefer that for the name",
"if",
"agent",
".",
"db_refs",
".",
"get",
"(",
"'FPLX'",
")",
":",
"agent",
".",
"name",
"=",
"agent",
".",
"db_refs",
".",
"get",
"(",
"'FPLX'",
")",
"# Take a HGNC name from Uniprot next",
"elif",
"agent",
".",
"db_refs",
".",
"get",
"(",
"'UP'",
")",
":",
"# Try for the gene name",
"gene_name",
"=",
"uniprot_client",
".",
"get_gene_name",
"(",
"agent",
".",
"db_refs",
".",
"get",
"(",
"'UP'",
")",
",",
"web_fallback",
"=",
"False",
")",
"if",
"gene_name",
":",
"agent",
".",
"name",
"=",
"gene_name",
"hgnc_id",
"=",
"hgnc_client",
".",
"get_hgnc_id",
"(",
"gene_name",
")",
"if",
"hgnc_id",
":",
"agent",
".",
"db_refs",
"[",
"'HGNC'",
"]",
"=",
"hgnc_id",
"# Take the text string",
"#if agent.db_refs.get('TEXT'):",
"# agent.name = agent.db_refs.get('TEXT')",
"# If this fails, then we continue with no change",
"# Fall back to the text string",
"#elif agent.db_refs.get('TEXT'):",
"# agent.name = agent.db_refs.get('TEXT')",
"return",
"mapped_stmts"
]
| Return a list of mapped statements with updated agent names.
Creates a new list of statements without modifying the original list.
The agents in a statement should be renamed if the grounding map has
updated their db_refs. If an agent contains a FamPlex grounding, the
FamPlex ID is used as a name. Otherwise if it contains a Uniprot ID,
an attempt is made to find the associated HGNC gene name. If one can
be found it is used as the agent name and the associated HGNC ID is
added as an entry to the db_refs. If neither a FamPlex ID or HGNC name
can be found, falls back to the original name.
Parameters
----------
stmts : list of :py:class:`indra.statements.Statement`
List of statements whose Agents need their names updated.
Returns
-------
mapped_stmts : list of :py:class:`indra.statements.Statement`
A new list of Statements with updated Agent names | [
"Return",
"a",
"list",
"of",
"mapped",
"statements",
"with",
"updated",
"agent",
"names",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/grounding_mapper.py#L303-L355 | train |
sorgerlab/indra | indra/sources/hprd/processor.py | HprdProcessor.get_complexes | def get_complexes(self, cplx_df):
"""Generate Complex Statements from the HPRD protein complexes data.
Parameters
----------
cplx_df : pandas.DataFrame
DataFrame loaded from the PROTEIN_COMPLEXES.txt file.
"""
# Group the agents for the complex
logger.info('Processing complexes...')
for cplx_id, this_cplx in cplx_df.groupby('CPLX_ID'):
agents = []
for hprd_id in this_cplx.HPRD_ID:
ag = self._make_agent(hprd_id)
if ag is not None:
agents.append(ag)
# Make sure we got some agents!
if not agents:
continue
# Get evidence info from first member of complex
row0 = this_cplx.iloc[0]
isoform_id = '%s_1' % row0.HPRD_ID
ev_list = self._get_evidence(row0.HPRD_ID, isoform_id, row0.PMIDS,
row0.EVIDENCE, 'interactions')
stmt = Complex(agents, evidence=ev_list)
self.statements.append(stmt) | python | def get_complexes(self, cplx_df):
"""Generate Complex Statements from the HPRD protein complexes data.
Parameters
----------
cplx_df : pandas.DataFrame
DataFrame loaded from the PROTEIN_COMPLEXES.txt file.
"""
# Group the agents for the complex
logger.info('Processing complexes...')
for cplx_id, this_cplx in cplx_df.groupby('CPLX_ID'):
agents = []
for hprd_id in this_cplx.HPRD_ID:
ag = self._make_agent(hprd_id)
if ag is not None:
agents.append(ag)
# Make sure we got some agents!
if not agents:
continue
# Get evidence info from first member of complex
row0 = this_cplx.iloc[0]
isoform_id = '%s_1' % row0.HPRD_ID
ev_list = self._get_evidence(row0.HPRD_ID, isoform_id, row0.PMIDS,
row0.EVIDENCE, 'interactions')
stmt = Complex(agents, evidence=ev_list)
self.statements.append(stmt) | [
"def",
"get_complexes",
"(",
"self",
",",
"cplx_df",
")",
":",
"# Group the agents for the complex",
"logger",
".",
"info",
"(",
"'Processing complexes...'",
")",
"for",
"cplx_id",
",",
"this_cplx",
"in",
"cplx_df",
".",
"groupby",
"(",
"'CPLX_ID'",
")",
":",
"agents",
"=",
"[",
"]",
"for",
"hprd_id",
"in",
"this_cplx",
".",
"HPRD_ID",
":",
"ag",
"=",
"self",
".",
"_make_agent",
"(",
"hprd_id",
")",
"if",
"ag",
"is",
"not",
"None",
":",
"agents",
".",
"append",
"(",
"ag",
")",
"# Make sure we got some agents!",
"if",
"not",
"agents",
":",
"continue",
"# Get evidence info from first member of complex",
"row0",
"=",
"this_cplx",
".",
"iloc",
"[",
"0",
"]",
"isoform_id",
"=",
"'%s_1'",
"%",
"row0",
".",
"HPRD_ID",
"ev_list",
"=",
"self",
".",
"_get_evidence",
"(",
"row0",
".",
"HPRD_ID",
",",
"isoform_id",
",",
"row0",
".",
"PMIDS",
",",
"row0",
".",
"EVIDENCE",
",",
"'interactions'",
")",
"stmt",
"=",
"Complex",
"(",
"agents",
",",
"evidence",
"=",
"ev_list",
")",
"self",
".",
"statements",
".",
"append",
"(",
"stmt",
")"
]
| Generate Complex Statements from the HPRD protein complexes data.
Parameters
----------
cplx_df : pandas.DataFrame
DataFrame loaded from the PROTEIN_COMPLEXES.txt file. | [
"Generate",
"Complex",
"Statements",
"from",
"the",
"HPRD",
"protein",
"complexes",
"data",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/hprd/processor.py#L151-L176 | train |
sorgerlab/indra | indra/sources/hprd/processor.py | HprdProcessor.get_ptms | def get_ptms(self, ptm_df):
"""Generate Modification statements from the HPRD PTM data.
Parameters
----------
ptm_df : pandas.DataFrame
DataFrame loaded from the POST_TRANSLATIONAL_MODIFICATIONS.txt file.
"""
logger.info('Processing PTMs...')
# Iterate over the rows of the dataframe
for ix, row in ptm_df.iterrows():
# Check the modification type; if we can't make an INDRA statement
# for it, then skip it
ptm_class = _ptm_map[row['MOD_TYPE']]
if ptm_class is None:
continue
# Use the Refseq protein ID for the substrate to make sure that
# we get the right Uniprot ID for the isoform
sub_ag = self._make_agent(row['HPRD_ID'],
refseq_id=row['REFSEQ_PROTEIN'])
# If we couldn't get the substrate, skip the statement
if sub_ag is None:
continue
enz_id = _nan_to_none(row['ENZ_HPRD_ID'])
enz_ag = self._make_agent(enz_id)
res = _nan_to_none(row['RESIDUE'])
pos = _nan_to_none(row['POSITION'])
if pos is not None and ';' in pos:
pos, dash = pos.split(';')
assert dash == '-'
# As a fallback for later site mapping, we also get the protein
# sequence information in case there was a problem with the
# RefSeq->Uniprot mapping
assert res
assert pos
motif_dict = self._get_seq_motif(row['REFSEQ_PROTEIN'], res, pos)
# Get evidence
ev_list = self._get_evidence(
row['HPRD_ID'], row['HPRD_ISOFORM'], row['PMIDS'],
row['EVIDENCE'], 'ptms', motif_dict)
stmt = ptm_class(enz_ag, sub_ag, res, pos, evidence=ev_list)
self.statements.append(stmt) | python | def get_ptms(self, ptm_df):
"""Generate Modification statements from the HPRD PTM data.
Parameters
----------
ptm_df : pandas.DataFrame
DataFrame loaded from the POST_TRANSLATIONAL_MODIFICATIONS.txt file.
"""
logger.info('Processing PTMs...')
# Iterate over the rows of the dataframe
for ix, row in ptm_df.iterrows():
# Check the modification type; if we can't make an INDRA statement
# for it, then skip it
ptm_class = _ptm_map[row['MOD_TYPE']]
if ptm_class is None:
continue
# Use the Refseq protein ID for the substrate to make sure that
# we get the right Uniprot ID for the isoform
sub_ag = self._make_agent(row['HPRD_ID'],
refseq_id=row['REFSEQ_PROTEIN'])
# If we couldn't get the substrate, skip the statement
if sub_ag is None:
continue
enz_id = _nan_to_none(row['ENZ_HPRD_ID'])
enz_ag = self._make_agent(enz_id)
res = _nan_to_none(row['RESIDUE'])
pos = _nan_to_none(row['POSITION'])
if pos is not None and ';' in pos:
pos, dash = pos.split(';')
assert dash == '-'
# As a fallback for later site mapping, we also get the protein
# sequence information in case there was a problem with the
# RefSeq->Uniprot mapping
assert res
assert pos
motif_dict = self._get_seq_motif(row['REFSEQ_PROTEIN'], res, pos)
# Get evidence
ev_list = self._get_evidence(
row['HPRD_ID'], row['HPRD_ISOFORM'], row['PMIDS'],
row['EVIDENCE'], 'ptms', motif_dict)
stmt = ptm_class(enz_ag, sub_ag, res, pos, evidence=ev_list)
self.statements.append(stmt) | [
"def",
"get_ptms",
"(",
"self",
",",
"ptm_df",
")",
":",
"logger",
".",
"info",
"(",
"'Processing PTMs...'",
")",
"# Iterate over the rows of the dataframe",
"for",
"ix",
",",
"row",
"in",
"ptm_df",
".",
"iterrows",
"(",
")",
":",
"# Check the modification type; if we can't make an INDRA statement",
"# for it, then skip it",
"ptm_class",
"=",
"_ptm_map",
"[",
"row",
"[",
"'MOD_TYPE'",
"]",
"]",
"if",
"ptm_class",
"is",
"None",
":",
"continue",
"# Use the Refseq protein ID for the substrate to make sure that",
"# we get the right Uniprot ID for the isoform",
"sub_ag",
"=",
"self",
".",
"_make_agent",
"(",
"row",
"[",
"'HPRD_ID'",
"]",
",",
"refseq_id",
"=",
"row",
"[",
"'REFSEQ_PROTEIN'",
"]",
")",
"# If we couldn't get the substrate, skip the statement",
"if",
"sub_ag",
"is",
"None",
":",
"continue",
"enz_id",
"=",
"_nan_to_none",
"(",
"row",
"[",
"'ENZ_HPRD_ID'",
"]",
")",
"enz_ag",
"=",
"self",
".",
"_make_agent",
"(",
"enz_id",
")",
"res",
"=",
"_nan_to_none",
"(",
"row",
"[",
"'RESIDUE'",
"]",
")",
"pos",
"=",
"_nan_to_none",
"(",
"row",
"[",
"'POSITION'",
"]",
")",
"if",
"pos",
"is",
"not",
"None",
"and",
"';'",
"in",
"pos",
":",
"pos",
",",
"dash",
"=",
"pos",
".",
"split",
"(",
"';'",
")",
"assert",
"dash",
"==",
"'-'",
"# As a fallback for later site mapping, we also get the protein",
"# sequence information in case there was a problem with the",
"# RefSeq->Uniprot mapping",
"assert",
"res",
"assert",
"pos",
"motif_dict",
"=",
"self",
".",
"_get_seq_motif",
"(",
"row",
"[",
"'REFSEQ_PROTEIN'",
"]",
",",
"res",
",",
"pos",
")",
"# Get evidence",
"ev_list",
"=",
"self",
".",
"_get_evidence",
"(",
"row",
"[",
"'HPRD_ID'",
"]",
",",
"row",
"[",
"'HPRD_ISOFORM'",
"]",
",",
"row",
"[",
"'PMIDS'",
"]",
",",
"row",
"[",
"'EVIDENCE'",
"]",
",",
"'ptms'",
",",
"motif_dict",
")",
"stmt",
"=",
"ptm_class",
"(",
"enz_ag",
",",
"sub_ag",
",",
"res",
",",
"pos",
",",
"evidence",
"=",
"ev_list",
")",
"self",
".",
"statements",
".",
"append",
"(",
"stmt",
")"
]
| Generate Modification statements from the HPRD PTM data.
Parameters
----------
ptm_df : pandas.DataFrame
DataFrame loaded from the POST_TRANSLATIONAL_MODIFICATIONS.txt file. | [
"Generate",
"Modification",
"statements",
"from",
"the",
"HPRD",
"PTM",
"data",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/hprd/processor.py#L178-L220 | train |
sorgerlab/indra | indra/sources/hprd/processor.py | HprdProcessor.get_ppis | def get_ppis(self, ppi_df):
"""Generate Complex Statements from the HPRD PPI data.
Parameters
----------
ppi_df : pandas.DataFrame
DataFrame loaded from the BINARY_PROTEIN_PROTEIN_INTERACTIONS.txt
file.
"""
logger.info('Processing PPIs...')
for ix, row in ppi_df.iterrows():
agA = self._make_agent(row['HPRD_ID_A'])
agB = self._make_agent(row['HPRD_ID_B'])
# If don't get valid agents for both, skip this PPI
if agA is None or agB is None:
continue
isoform_id = '%s_1' % row['HPRD_ID_A']
ev_list = self._get_evidence(
row['HPRD_ID_A'], isoform_id, row['PMIDS'],
row['EVIDENCE'], 'interactions')
stmt = Complex([agA, agB], evidence=ev_list)
self.statements.append(stmt) | python | def get_ppis(self, ppi_df):
"""Generate Complex Statements from the HPRD PPI data.
Parameters
----------
ppi_df : pandas.DataFrame
DataFrame loaded from the BINARY_PROTEIN_PROTEIN_INTERACTIONS.txt
file.
"""
logger.info('Processing PPIs...')
for ix, row in ppi_df.iterrows():
agA = self._make_agent(row['HPRD_ID_A'])
agB = self._make_agent(row['HPRD_ID_B'])
# If don't get valid agents for both, skip this PPI
if agA is None or agB is None:
continue
isoform_id = '%s_1' % row['HPRD_ID_A']
ev_list = self._get_evidence(
row['HPRD_ID_A'], isoform_id, row['PMIDS'],
row['EVIDENCE'], 'interactions')
stmt = Complex([agA, agB], evidence=ev_list)
self.statements.append(stmt) | [
"def",
"get_ppis",
"(",
"self",
",",
"ppi_df",
")",
":",
"logger",
".",
"info",
"(",
"'Processing PPIs...'",
")",
"for",
"ix",
",",
"row",
"in",
"ppi_df",
".",
"iterrows",
"(",
")",
":",
"agA",
"=",
"self",
".",
"_make_agent",
"(",
"row",
"[",
"'HPRD_ID_A'",
"]",
")",
"agB",
"=",
"self",
".",
"_make_agent",
"(",
"row",
"[",
"'HPRD_ID_B'",
"]",
")",
"# If don't get valid agents for both, skip this PPI",
"if",
"agA",
"is",
"None",
"or",
"agB",
"is",
"None",
":",
"continue",
"isoform_id",
"=",
"'%s_1'",
"%",
"row",
"[",
"'HPRD_ID_A'",
"]",
"ev_list",
"=",
"self",
".",
"_get_evidence",
"(",
"row",
"[",
"'HPRD_ID_A'",
"]",
",",
"isoform_id",
",",
"row",
"[",
"'PMIDS'",
"]",
",",
"row",
"[",
"'EVIDENCE'",
"]",
",",
"'interactions'",
")",
"stmt",
"=",
"Complex",
"(",
"[",
"agA",
",",
"agB",
"]",
",",
"evidence",
"=",
"ev_list",
")",
"self",
".",
"statements",
".",
"append",
"(",
"stmt",
")"
]
| Generate Complex Statements from the HPRD PPI data.
Parameters
----------
ppi_df : pandas.DataFrame
DataFrame loaded from the BINARY_PROTEIN_PROTEIN_INTERACTIONS.txt
file. | [
"Generate",
"Complex",
"Statements",
"from",
"the",
"HPRD",
"PPI",
"data",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/hprd/processor.py#L222-L243 | train |
sorgerlab/indra | indra/sources/isi/processor.py | _build_verb_statement_mapping | def _build_verb_statement_mapping():
"""Build the mapping between ISI verb strings and INDRA statement classes.
Looks up the INDRA statement class name, if any, in a resource file,
and resolves this class name to a class.
Returns
-------
verb_to_statement_type : dict
Dictionary mapping verb name to an INDRA statment class
"""
path_this = os.path.dirname(os.path.abspath(__file__))
map_path = os.path.join(path_this, 'isi_verb_to_indra_statement_type.tsv')
with open(map_path, 'r') as f:
first_line = True
verb_to_statement_type = {}
for line in f:
if not first_line:
line = line[:-1]
tokens = line.split('\t')
if len(tokens) == 2 and len(tokens[1]) > 0:
verb = tokens[0]
s_type = tokens[1]
try:
statement_class = getattr(ist, s_type)
verb_to_statement_type[verb] = statement_class
except Exception:
pass
else:
first_line = False
return verb_to_statement_type | python | def _build_verb_statement_mapping():
"""Build the mapping between ISI verb strings and INDRA statement classes.
Looks up the INDRA statement class name, if any, in a resource file,
and resolves this class name to a class.
Returns
-------
verb_to_statement_type : dict
Dictionary mapping verb name to an INDRA statment class
"""
path_this = os.path.dirname(os.path.abspath(__file__))
map_path = os.path.join(path_this, 'isi_verb_to_indra_statement_type.tsv')
with open(map_path, 'r') as f:
first_line = True
verb_to_statement_type = {}
for line in f:
if not first_line:
line = line[:-1]
tokens = line.split('\t')
if len(tokens) == 2 and len(tokens[1]) > 0:
verb = tokens[0]
s_type = tokens[1]
try:
statement_class = getattr(ist, s_type)
verb_to_statement_type[verb] = statement_class
except Exception:
pass
else:
first_line = False
return verb_to_statement_type | [
"def",
"_build_verb_statement_mapping",
"(",
")",
":",
"path_this",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"__file__",
")",
")",
"map_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path_this",
",",
"'isi_verb_to_indra_statement_type.tsv'",
")",
"with",
"open",
"(",
"map_path",
",",
"'r'",
")",
"as",
"f",
":",
"first_line",
"=",
"True",
"verb_to_statement_type",
"=",
"{",
"}",
"for",
"line",
"in",
"f",
":",
"if",
"not",
"first_line",
":",
"line",
"=",
"line",
"[",
":",
"-",
"1",
"]",
"tokens",
"=",
"line",
".",
"split",
"(",
"'\\t'",
")",
"if",
"len",
"(",
"tokens",
")",
"==",
"2",
"and",
"len",
"(",
"tokens",
"[",
"1",
"]",
")",
">",
"0",
":",
"verb",
"=",
"tokens",
"[",
"0",
"]",
"s_type",
"=",
"tokens",
"[",
"1",
"]",
"try",
":",
"statement_class",
"=",
"getattr",
"(",
"ist",
",",
"s_type",
")",
"verb_to_statement_type",
"[",
"verb",
"]",
"=",
"statement_class",
"except",
"Exception",
":",
"pass",
"else",
":",
"first_line",
"=",
"False",
"return",
"verb_to_statement_type"
]
| Build the mapping between ISI verb strings and INDRA statement classes.
Looks up the INDRA statement class name, if any, in a resource file,
and resolves this class name to a class.
Returns
-------
verb_to_statement_type : dict
Dictionary mapping verb name to an INDRA statment class | [
"Build",
"the",
"mapping",
"between",
"ISI",
"verb",
"strings",
"and",
"INDRA",
"statement",
"classes",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/isi/processor.py#L167-L198 | train |
sorgerlab/indra | indra/sources/isi/processor.py | IsiProcessor.get_statements | def get_statements(self):
"""Process reader output to produce INDRA Statements."""
for k, v in self.reader_output.items():
for interaction in v['interactions']:
self._process_interaction(k, interaction, v['text'], self.pmid,
self.extra_annotations) | python | def get_statements(self):
"""Process reader output to produce INDRA Statements."""
for k, v in self.reader_output.items():
for interaction in v['interactions']:
self._process_interaction(k, interaction, v['text'], self.pmid,
self.extra_annotations) | [
"def",
"get_statements",
"(",
"self",
")",
":",
"for",
"k",
",",
"v",
"in",
"self",
".",
"reader_output",
".",
"items",
"(",
")",
":",
"for",
"interaction",
"in",
"v",
"[",
"'interactions'",
"]",
":",
"self",
".",
"_process_interaction",
"(",
"k",
",",
"interaction",
",",
"v",
"[",
"'text'",
"]",
",",
"self",
".",
"pmid",
",",
"self",
".",
"extra_annotations",
")"
]
| Process reader output to produce INDRA Statements. | [
"Process",
"reader",
"output",
"to",
"produce",
"INDRA",
"Statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/isi/processor.py#L38-L43 | train |
sorgerlab/indra | indra/sources/isi/processor.py | IsiProcessor._process_interaction | def _process_interaction(self, source_id, interaction, text, pmid,
extra_annotations):
"""Process an interaction JSON tuple from the ISI output, and adds up
to one statement to the list of extracted statements.
Parameters
----------
source_id : str
the JSON key corresponding to the sentence in the ISI output
interaction: the JSON list with subject/verb/object information
about the event in the ISI output
text : str
the text of the sentence
pmid : str
the PMID of the article from which the information was extracted
extra_annotations : dict
Additional annotations to add to the statement's evidence,
potentially containing metadata about the source. Annotations
with the key "interaction" will be overridden by the JSON
interaction tuple from the ISI output
"""
verb = interaction[0].lower()
subj = interaction[-2]
obj = interaction[-1]
# Make ungrounded agent objects for the subject and object
# Grounding will happen after all statements are extracted in __init__
subj = self._make_agent(subj)
obj = self._make_agent(obj)
# Make an evidence object
annotations = deepcopy(extra_annotations)
if 'interaction' in extra_annotations:
logger.warning("'interaction' key of extra_annotations ignored" +
" since this is reserved for storing the raw ISI " +
"input.")
annotations['source_id'] = source_id
annotations['interaction'] = interaction
ev = ist.Evidence(source_api='isi',
pmid=pmid,
text=text.rstrip(),
annotations=annotations)
# For binding time interactions, it is said that a catayst might be
# specified. We don't use this for now, but extract in case we want
# to in the future
cataylst_specified = False
if len(interaction) == 4:
catalyst = interaction[1]
if catalyst is not None:
cataylst_specified = True
self.verbs.add(verb)
statement = None
if verb in verb_to_statement_type:
statement_class = verb_to_statement_type[verb]
if statement_class == ist.Complex:
statement = ist.Complex([subj, obj], evidence=ev)
else:
statement = statement_class(subj, obj, evidence=ev)
if statement is not None:
# For Complex statements, the ISI reader produces two events:
# binds(A, B) and binds(B, A)
# We want only one Complex statement for each sentence, so check
# to see if we already have a Complex for this source_id with the
# same members
already_have = False
if type(statement) == ist.Complex:
for old_s in self.statements:
old_id = statement.evidence[0].source_id
new_id = old_s.evidence[0].source_id
if type(old_s) == ist.Complex and old_id == new_id:
old_statement_members = \
[m.db_refs['TEXT'] for m in old_s.members]
old_statement_members = sorted(old_statement_members)
new_statement_members = [m.db_refs['TEXT']
for m in statement.members]
new_statement_members = sorted(new_statement_members)
if old_statement_members == new_statement_members:
already_have = True
break
if not already_have:
self.statements.append(statement) | python | def _process_interaction(self, source_id, interaction, text, pmid,
extra_annotations):
"""Process an interaction JSON tuple from the ISI output, and adds up
to one statement to the list of extracted statements.
Parameters
----------
source_id : str
the JSON key corresponding to the sentence in the ISI output
interaction: the JSON list with subject/verb/object information
about the event in the ISI output
text : str
the text of the sentence
pmid : str
the PMID of the article from which the information was extracted
extra_annotations : dict
Additional annotations to add to the statement's evidence,
potentially containing metadata about the source. Annotations
with the key "interaction" will be overridden by the JSON
interaction tuple from the ISI output
"""
verb = interaction[0].lower()
subj = interaction[-2]
obj = interaction[-1]
# Make ungrounded agent objects for the subject and object
# Grounding will happen after all statements are extracted in __init__
subj = self._make_agent(subj)
obj = self._make_agent(obj)
# Make an evidence object
annotations = deepcopy(extra_annotations)
if 'interaction' in extra_annotations:
logger.warning("'interaction' key of extra_annotations ignored" +
" since this is reserved for storing the raw ISI " +
"input.")
annotations['source_id'] = source_id
annotations['interaction'] = interaction
ev = ist.Evidence(source_api='isi',
pmid=pmid,
text=text.rstrip(),
annotations=annotations)
# For binding time interactions, it is said that a catayst might be
# specified. We don't use this for now, but extract in case we want
# to in the future
cataylst_specified = False
if len(interaction) == 4:
catalyst = interaction[1]
if catalyst is not None:
cataylst_specified = True
self.verbs.add(verb)
statement = None
if verb in verb_to_statement_type:
statement_class = verb_to_statement_type[verb]
if statement_class == ist.Complex:
statement = ist.Complex([subj, obj], evidence=ev)
else:
statement = statement_class(subj, obj, evidence=ev)
if statement is not None:
# For Complex statements, the ISI reader produces two events:
# binds(A, B) and binds(B, A)
# We want only one Complex statement for each sentence, so check
# to see if we already have a Complex for this source_id with the
# same members
already_have = False
if type(statement) == ist.Complex:
for old_s in self.statements:
old_id = statement.evidence[0].source_id
new_id = old_s.evidence[0].source_id
if type(old_s) == ist.Complex and old_id == new_id:
old_statement_members = \
[m.db_refs['TEXT'] for m in old_s.members]
old_statement_members = sorted(old_statement_members)
new_statement_members = [m.db_refs['TEXT']
for m in statement.members]
new_statement_members = sorted(new_statement_members)
if old_statement_members == new_statement_members:
already_have = True
break
if not already_have:
self.statements.append(statement) | [
"def",
"_process_interaction",
"(",
"self",
",",
"source_id",
",",
"interaction",
",",
"text",
",",
"pmid",
",",
"extra_annotations",
")",
":",
"verb",
"=",
"interaction",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"subj",
"=",
"interaction",
"[",
"-",
"2",
"]",
"obj",
"=",
"interaction",
"[",
"-",
"1",
"]",
"# Make ungrounded agent objects for the subject and object",
"# Grounding will happen after all statements are extracted in __init__",
"subj",
"=",
"self",
".",
"_make_agent",
"(",
"subj",
")",
"obj",
"=",
"self",
".",
"_make_agent",
"(",
"obj",
")",
"# Make an evidence object",
"annotations",
"=",
"deepcopy",
"(",
"extra_annotations",
")",
"if",
"'interaction'",
"in",
"extra_annotations",
":",
"logger",
".",
"warning",
"(",
"\"'interaction' key of extra_annotations ignored\"",
"+",
"\" since this is reserved for storing the raw ISI \"",
"+",
"\"input.\"",
")",
"annotations",
"[",
"'source_id'",
"]",
"=",
"source_id",
"annotations",
"[",
"'interaction'",
"]",
"=",
"interaction",
"ev",
"=",
"ist",
".",
"Evidence",
"(",
"source_api",
"=",
"'isi'",
",",
"pmid",
"=",
"pmid",
",",
"text",
"=",
"text",
".",
"rstrip",
"(",
")",
",",
"annotations",
"=",
"annotations",
")",
"# For binding time interactions, it is said that a catayst might be",
"# specified. We don't use this for now, but extract in case we want",
"# to in the future",
"cataylst_specified",
"=",
"False",
"if",
"len",
"(",
"interaction",
")",
"==",
"4",
":",
"catalyst",
"=",
"interaction",
"[",
"1",
"]",
"if",
"catalyst",
"is",
"not",
"None",
":",
"cataylst_specified",
"=",
"True",
"self",
".",
"verbs",
".",
"add",
"(",
"verb",
")",
"statement",
"=",
"None",
"if",
"verb",
"in",
"verb_to_statement_type",
":",
"statement_class",
"=",
"verb_to_statement_type",
"[",
"verb",
"]",
"if",
"statement_class",
"==",
"ist",
".",
"Complex",
":",
"statement",
"=",
"ist",
".",
"Complex",
"(",
"[",
"subj",
",",
"obj",
"]",
",",
"evidence",
"=",
"ev",
")",
"else",
":",
"statement",
"=",
"statement_class",
"(",
"subj",
",",
"obj",
",",
"evidence",
"=",
"ev",
")",
"if",
"statement",
"is",
"not",
"None",
":",
"# For Complex statements, the ISI reader produces two events:",
"# binds(A, B) and binds(B, A)",
"# We want only one Complex statement for each sentence, so check",
"# to see if we already have a Complex for this source_id with the",
"# same members",
"already_have",
"=",
"False",
"if",
"type",
"(",
"statement",
")",
"==",
"ist",
".",
"Complex",
":",
"for",
"old_s",
"in",
"self",
".",
"statements",
":",
"old_id",
"=",
"statement",
".",
"evidence",
"[",
"0",
"]",
".",
"source_id",
"new_id",
"=",
"old_s",
".",
"evidence",
"[",
"0",
"]",
".",
"source_id",
"if",
"type",
"(",
"old_s",
")",
"==",
"ist",
".",
"Complex",
"and",
"old_id",
"==",
"new_id",
":",
"old_statement_members",
"=",
"[",
"m",
".",
"db_refs",
"[",
"'TEXT'",
"]",
"for",
"m",
"in",
"old_s",
".",
"members",
"]",
"old_statement_members",
"=",
"sorted",
"(",
"old_statement_members",
")",
"new_statement_members",
"=",
"[",
"m",
".",
"db_refs",
"[",
"'TEXT'",
"]",
"for",
"m",
"in",
"statement",
".",
"members",
"]",
"new_statement_members",
"=",
"sorted",
"(",
"new_statement_members",
")",
"if",
"old_statement_members",
"==",
"new_statement_members",
":",
"already_have",
"=",
"True",
"break",
"if",
"not",
"already_have",
":",
"self",
".",
"statements",
".",
"append",
"(",
"statement",
")"
]
| Process an interaction JSON tuple from the ISI output, and adds up
to one statement to the list of extracted statements.
Parameters
----------
source_id : str
the JSON key corresponding to the sentence in the ISI output
interaction: the JSON list with subject/verb/object information
about the event in the ISI output
text : str
the text of the sentence
pmid : str
the PMID of the article from which the information was extracted
extra_annotations : dict
Additional annotations to add to the statement's evidence,
potentially containing metadata about the source. Annotations
with the key "interaction" will be overridden by the JSON
interaction tuple from the ISI output | [
"Process",
"an",
"interaction",
"JSON",
"tuple",
"from",
"the",
"ISI",
"output",
"and",
"adds",
"up",
"to",
"one",
"statement",
"to",
"the",
"list",
"of",
"extracted",
"statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/isi/processor.py#L59-L146 | train |
sorgerlab/indra | indra/sources/geneways/actionmention_parser.py | GenewaysActionMention.make_annotation | def make_annotation(self):
"""Returns a dictionary with all properties of the action mention."""
annotation = dict()
# Put all properties of the action object into the annotation
for item in dir(self):
if len(item) > 0 and item[0] != '_' and \
not inspect.ismethod(getattr(self, item)):
annotation[item] = getattr(self, item)
return annotation | python | def make_annotation(self):
"""Returns a dictionary with all properties of the action mention."""
annotation = dict()
# Put all properties of the action object into the annotation
for item in dir(self):
if len(item) > 0 and item[0] != '_' and \
not inspect.ismethod(getattr(self, item)):
annotation[item] = getattr(self, item)
return annotation | [
"def",
"make_annotation",
"(",
"self",
")",
":",
"annotation",
"=",
"dict",
"(",
")",
"# Put all properties of the action object into the annotation",
"for",
"item",
"in",
"dir",
"(",
"self",
")",
":",
"if",
"len",
"(",
"item",
")",
">",
"0",
"and",
"item",
"[",
"0",
"]",
"!=",
"'_'",
"and",
"not",
"inspect",
".",
"ismethod",
"(",
"getattr",
"(",
"self",
",",
"item",
")",
")",
":",
"annotation",
"[",
"item",
"]",
"=",
"getattr",
"(",
"self",
",",
"item",
")",
"return",
"annotation"
]
| Returns a dictionary with all properties of the action mention. | [
"Returns",
"a",
"dictionary",
"with",
"all",
"properties",
"of",
"the",
"action",
"mention",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/geneways/actionmention_parser.py#L31-L41 | train |
sorgerlab/indra | indra/sources/biopax/processor.py | _match_to_array | def _match_to_array(m):
""" Returns an array consisting of the elements obtained from a pattern
search cast into their appropriate classes. """
return [_cast_biopax_element(m.get(i)) for i in range(m.varSize())] | python | def _match_to_array(m):
""" Returns an array consisting of the elements obtained from a pattern
search cast into their appropriate classes. """
return [_cast_biopax_element(m.get(i)) for i in range(m.varSize())] | [
"def",
"_match_to_array",
"(",
"m",
")",
":",
"return",
"[",
"_cast_biopax_element",
"(",
"m",
".",
"get",
"(",
"i",
")",
")",
"for",
"i",
"in",
"range",
"(",
"m",
".",
"varSize",
"(",
")",
")",
"]"
]
| Returns an array consisting of the elements obtained from a pattern
search cast into their appropriate classes. | [
"Returns",
"an",
"array",
"consisting",
"of",
"the",
"elements",
"obtained",
"from",
"a",
"pattern",
"search",
"cast",
"into",
"their",
"appropriate",
"classes",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/processor.py#L1374-L1377 | train |
sorgerlab/indra | indra/sources/biopax/processor.py | _is_complex | def _is_complex(pe):
"""Return True if the physical entity is a complex"""
val = isinstance(pe, _bp('Complex')) or \
isinstance(pe, _bpimpl('Complex'))
return val | python | def _is_complex(pe):
"""Return True if the physical entity is a complex"""
val = isinstance(pe, _bp('Complex')) or \
isinstance(pe, _bpimpl('Complex'))
return val | [
"def",
"_is_complex",
"(",
"pe",
")",
":",
"val",
"=",
"isinstance",
"(",
"pe",
",",
"_bp",
"(",
"'Complex'",
")",
")",
"or",
"isinstance",
"(",
"pe",
",",
"_bpimpl",
"(",
"'Complex'",
")",
")",
"return",
"val"
]
| Return True if the physical entity is a complex | [
"Return",
"True",
"if",
"the",
"physical",
"entity",
"is",
"a",
"complex"
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/processor.py#L1379-L1383 | train |
sorgerlab/indra | indra/sources/biopax/processor.py | _is_protein | def _is_protein(pe):
"""Return True if the element is a protein"""
val = isinstance(pe, _bp('Protein')) or \
isinstance(pe, _bpimpl('Protein')) or \
isinstance(pe, _bp('ProteinReference')) or \
isinstance(pe, _bpimpl('ProteinReference'))
return val | python | def _is_protein(pe):
"""Return True if the element is a protein"""
val = isinstance(pe, _bp('Protein')) or \
isinstance(pe, _bpimpl('Protein')) or \
isinstance(pe, _bp('ProteinReference')) or \
isinstance(pe, _bpimpl('ProteinReference'))
return val | [
"def",
"_is_protein",
"(",
"pe",
")",
":",
"val",
"=",
"isinstance",
"(",
"pe",
",",
"_bp",
"(",
"'Protein'",
")",
")",
"or",
"isinstance",
"(",
"pe",
",",
"_bpimpl",
"(",
"'Protein'",
")",
")",
"or",
"isinstance",
"(",
"pe",
",",
"_bp",
"(",
"'ProteinReference'",
")",
")",
"or",
"isinstance",
"(",
"pe",
",",
"_bpimpl",
"(",
"'ProteinReference'",
")",
")",
"return",
"val"
]
| Return True if the element is a protein | [
"Return",
"True",
"if",
"the",
"element",
"is",
"a",
"protein"
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/processor.py#L1385-L1391 | train |
sorgerlab/indra | indra/sources/biopax/processor.py | _is_rna | def _is_rna(pe):
"""Return True if the element is an RNA"""
val = isinstance(pe, _bp('Rna')) or isinstance(pe, _bpimpl('Rna'))
return val | python | def _is_rna(pe):
"""Return True if the element is an RNA"""
val = isinstance(pe, _bp('Rna')) or isinstance(pe, _bpimpl('Rna'))
return val | [
"def",
"_is_rna",
"(",
"pe",
")",
":",
"val",
"=",
"isinstance",
"(",
"pe",
",",
"_bp",
"(",
"'Rna'",
")",
")",
"or",
"isinstance",
"(",
"pe",
",",
"_bpimpl",
"(",
"'Rna'",
")",
")",
"return",
"val"
]
| Return True if the element is an RNA | [
"Return",
"True",
"if",
"the",
"element",
"is",
"an",
"RNA"
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/processor.py#L1393-L1396 | train |
sorgerlab/indra | indra/sources/biopax/processor.py | _is_small_molecule | def _is_small_molecule(pe):
"""Return True if the element is a small molecule"""
val = isinstance(pe, _bp('SmallMolecule')) or \
isinstance(pe, _bpimpl('SmallMolecule')) or \
isinstance(pe, _bp('SmallMoleculeReference')) or \
isinstance(pe, _bpimpl('SmallMoleculeReference'))
return val | python | def _is_small_molecule(pe):
"""Return True if the element is a small molecule"""
val = isinstance(pe, _bp('SmallMolecule')) or \
isinstance(pe, _bpimpl('SmallMolecule')) or \
isinstance(pe, _bp('SmallMoleculeReference')) or \
isinstance(pe, _bpimpl('SmallMoleculeReference'))
return val | [
"def",
"_is_small_molecule",
"(",
"pe",
")",
":",
"val",
"=",
"isinstance",
"(",
"pe",
",",
"_bp",
"(",
"'SmallMolecule'",
")",
")",
"or",
"isinstance",
"(",
"pe",
",",
"_bpimpl",
"(",
"'SmallMolecule'",
")",
")",
"or",
"isinstance",
"(",
"pe",
",",
"_bp",
"(",
"'SmallMoleculeReference'",
")",
")",
"or",
"isinstance",
"(",
"pe",
",",
"_bpimpl",
"(",
"'SmallMoleculeReference'",
")",
")",
"return",
"val"
]
| Return True if the element is a small molecule | [
"Return",
"True",
"if",
"the",
"element",
"is",
"a",
"small",
"molecule"
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/processor.py#L1398-L1404 | train |
sorgerlab/indra | indra/sources/biopax/processor.py | _is_physical_entity | def _is_physical_entity(pe):
"""Return True if the element is a physical entity"""
val = isinstance(pe, _bp('PhysicalEntity')) or \
isinstance(pe, _bpimpl('PhysicalEntity'))
return val | python | def _is_physical_entity(pe):
"""Return True if the element is a physical entity"""
val = isinstance(pe, _bp('PhysicalEntity')) or \
isinstance(pe, _bpimpl('PhysicalEntity'))
return val | [
"def",
"_is_physical_entity",
"(",
"pe",
")",
":",
"val",
"=",
"isinstance",
"(",
"pe",
",",
"_bp",
"(",
"'PhysicalEntity'",
")",
")",
"or",
"isinstance",
"(",
"pe",
",",
"_bpimpl",
"(",
"'PhysicalEntity'",
")",
")",
"return",
"val"
]
| Return True if the element is a physical entity | [
"Return",
"True",
"if",
"the",
"element",
"is",
"a",
"physical",
"entity"
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/processor.py#L1406-L1410 | train |
sorgerlab/indra | indra/sources/biopax/processor.py | _is_modification_or_activity | def _is_modification_or_activity(feature):
"""Return True if the feature is a modification"""
if not (isinstance(feature, _bp('ModificationFeature')) or \
isinstance(feature, _bpimpl('ModificationFeature'))):
return None
mf_type = feature.getModificationType()
if mf_type is None:
return None
mf_type_terms = mf_type.getTerm().toArray()
for term in mf_type_terms:
if term in ('residue modification, active',
'residue modification, inactive',
'active', 'inactive'):
return 'activity'
return 'modification' | python | def _is_modification_or_activity(feature):
"""Return True if the feature is a modification"""
if not (isinstance(feature, _bp('ModificationFeature')) or \
isinstance(feature, _bpimpl('ModificationFeature'))):
return None
mf_type = feature.getModificationType()
if mf_type is None:
return None
mf_type_terms = mf_type.getTerm().toArray()
for term in mf_type_terms:
if term in ('residue modification, active',
'residue modification, inactive',
'active', 'inactive'):
return 'activity'
return 'modification' | [
"def",
"_is_modification_or_activity",
"(",
"feature",
")",
":",
"if",
"not",
"(",
"isinstance",
"(",
"feature",
",",
"_bp",
"(",
"'ModificationFeature'",
")",
")",
"or",
"isinstance",
"(",
"feature",
",",
"_bpimpl",
"(",
"'ModificationFeature'",
")",
")",
")",
":",
"return",
"None",
"mf_type",
"=",
"feature",
".",
"getModificationType",
"(",
")",
"if",
"mf_type",
"is",
"None",
":",
"return",
"None",
"mf_type_terms",
"=",
"mf_type",
".",
"getTerm",
"(",
")",
".",
"toArray",
"(",
")",
"for",
"term",
"in",
"mf_type_terms",
":",
"if",
"term",
"in",
"(",
"'residue modification, active'",
",",
"'residue modification, inactive'",
",",
"'active'",
",",
"'inactive'",
")",
":",
"return",
"'activity'",
"return",
"'modification'"
]
| Return True if the feature is a modification | [
"Return",
"True",
"if",
"the",
"feature",
"is",
"a",
"modification"
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/processor.py#L1418-L1432 | train |
sorgerlab/indra | indra/sources/biopax/processor.py | _is_reference | def _is_reference(bpe):
"""Return True if the element is an entity reference."""
if isinstance(bpe, _bp('ProteinReference')) or \
isinstance(bpe, _bpimpl('ProteinReference')) or \
isinstance(bpe, _bp('SmallMoleculeReference')) or \
isinstance(bpe, _bpimpl('SmallMoleculeReference')) or \
isinstance(bpe, _bp('RnaReference')) or \
isinstance(bpe, _bpimpl('RnaReference')) or \
isinstance(bpe, _bp('EntityReference')) or \
isinstance(bpe, _bpimpl('EntityReference')):
return True
else:
return False | python | def _is_reference(bpe):
"""Return True if the element is an entity reference."""
if isinstance(bpe, _bp('ProteinReference')) or \
isinstance(bpe, _bpimpl('ProteinReference')) or \
isinstance(bpe, _bp('SmallMoleculeReference')) or \
isinstance(bpe, _bpimpl('SmallMoleculeReference')) or \
isinstance(bpe, _bp('RnaReference')) or \
isinstance(bpe, _bpimpl('RnaReference')) or \
isinstance(bpe, _bp('EntityReference')) or \
isinstance(bpe, _bpimpl('EntityReference')):
return True
else:
return False | [
"def",
"_is_reference",
"(",
"bpe",
")",
":",
"if",
"isinstance",
"(",
"bpe",
",",
"_bp",
"(",
"'ProteinReference'",
")",
")",
"or",
"isinstance",
"(",
"bpe",
",",
"_bpimpl",
"(",
"'ProteinReference'",
")",
")",
"or",
"isinstance",
"(",
"bpe",
",",
"_bp",
"(",
"'SmallMoleculeReference'",
")",
")",
"or",
"isinstance",
"(",
"bpe",
",",
"_bpimpl",
"(",
"'SmallMoleculeReference'",
")",
")",
"or",
"isinstance",
"(",
"bpe",
",",
"_bp",
"(",
"'RnaReference'",
")",
")",
"or",
"isinstance",
"(",
"bpe",
",",
"_bpimpl",
"(",
"'RnaReference'",
")",
")",
"or",
"isinstance",
"(",
"bpe",
",",
"_bp",
"(",
"'EntityReference'",
")",
")",
"or",
"isinstance",
"(",
"bpe",
",",
"_bpimpl",
"(",
"'EntityReference'",
")",
")",
":",
"return",
"True",
"else",
":",
"return",
"False"
]
| Return True if the element is an entity reference. | [
"Return",
"True",
"if",
"the",
"element",
"is",
"an",
"entity",
"reference",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/processor.py#L1434-L1446 | train |
sorgerlab/indra | indra/sources/biopax/processor.py | _is_entity | def _is_entity(bpe):
"""Return True if the element is a physical entity."""
if isinstance(bpe, _bp('Protein')) or \
isinstance(bpe, _bpimpl('Protein')) or \
isinstance(bpe, _bp('SmallMolecule')) or \
isinstance(bpe, _bpimpl('SmallMolecule')) or \
isinstance(bpe, _bp('Complex')) or \
isinstance(bpe, _bpimpl('Complex')) or \
isinstance(bpe, _bp('Rna')) or \
isinstance(bpe, _bpimpl('Rna')) or \
isinstance(bpe, _bp('RnaRegion')) or \
isinstance(bpe, _bpimpl('RnaRegion')) or \
isinstance(bpe, _bp('DnaRegion')) or \
isinstance(bpe, _bpimpl('DnaRegion')) or \
isinstance(bpe, _bp('PhysicalEntity')) or \
isinstance(bpe, _bpimpl('PhysicalEntity')):
return True
else:
return False | python | def _is_entity(bpe):
"""Return True if the element is a physical entity."""
if isinstance(bpe, _bp('Protein')) or \
isinstance(bpe, _bpimpl('Protein')) or \
isinstance(bpe, _bp('SmallMolecule')) or \
isinstance(bpe, _bpimpl('SmallMolecule')) or \
isinstance(bpe, _bp('Complex')) or \
isinstance(bpe, _bpimpl('Complex')) or \
isinstance(bpe, _bp('Rna')) or \
isinstance(bpe, _bpimpl('Rna')) or \
isinstance(bpe, _bp('RnaRegion')) or \
isinstance(bpe, _bpimpl('RnaRegion')) or \
isinstance(bpe, _bp('DnaRegion')) or \
isinstance(bpe, _bpimpl('DnaRegion')) or \
isinstance(bpe, _bp('PhysicalEntity')) or \
isinstance(bpe, _bpimpl('PhysicalEntity')):
return True
else:
return False | [
"def",
"_is_entity",
"(",
"bpe",
")",
":",
"if",
"isinstance",
"(",
"bpe",
",",
"_bp",
"(",
"'Protein'",
")",
")",
"or",
"isinstance",
"(",
"bpe",
",",
"_bpimpl",
"(",
"'Protein'",
")",
")",
"or",
"isinstance",
"(",
"bpe",
",",
"_bp",
"(",
"'SmallMolecule'",
")",
")",
"or",
"isinstance",
"(",
"bpe",
",",
"_bpimpl",
"(",
"'SmallMolecule'",
")",
")",
"or",
"isinstance",
"(",
"bpe",
",",
"_bp",
"(",
"'Complex'",
")",
")",
"or",
"isinstance",
"(",
"bpe",
",",
"_bpimpl",
"(",
"'Complex'",
")",
")",
"or",
"isinstance",
"(",
"bpe",
",",
"_bp",
"(",
"'Rna'",
")",
")",
"or",
"isinstance",
"(",
"bpe",
",",
"_bpimpl",
"(",
"'Rna'",
")",
")",
"or",
"isinstance",
"(",
"bpe",
",",
"_bp",
"(",
"'RnaRegion'",
")",
")",
"or",
"isinstance",
"(",
"bpe",
",",
"_bpimpl",
"(",
"'RnaRegion'",
")",
")",
"or",
"isinstance",
"(",
"bpe",
",",
"_bp",
"(",
"'DnaRegion'",
")",
")",
"or",
"isinstance",
"(",
"bpe",
",",
"_bpimpl",
"(",
"'DnaRegion'",
")",
")",
"or",
"isinstance",
"(",
"bpe",
",",
"_bp",
"(",
"'PhysicalEntity'",
")",
")",
"or",
"isinstance",
"(",
"bpe",
",",
"_bpimpl",
"(",
"'PhysicalEntity'",
")",
")",
":",
"return",
"True",
"else",
":",
"return",
"False"
]
| Return True if the element is a physical entity. | [
"Return",
"True",
"if",
"the",
"element",
"is",
"a",
"physical",
"entity",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/processor.py#L1448-L1466 | train |
sorgerlab/indra | indra/sources/biopax/processor.py | _is_catalysis | def _is_catalysis(bpe):
"""Return True if the element is Catalysis."""
if isinstance(bpe, _bp('Catalysis')) or \
isinstance(bpe, _bpimpl('Catalysis')):
return True
else:
return False | python | def _is_catalysis(bpe):
"""Return True if the element is Catalysis."""
if isinstance(bpe, _bp('Catalysis')) or \
isinstance(bpe, _bpimpl('Catalysis')):
return True
else:
return False | [
"def",
"_is_catalysis",
"(",
"bpe",
")",
":",
"if",
"isinstance",
"(",
"bpe",
",",
"_bp",
"(",
"'Catalysis'",
")",
")",
"or",
"isinstance",
"(",
"bpe",
",",
"_bpimpl",
"(",
"'Catalysis'",
")",
")",
":",
"return",
"True",
"else",
":",
"return",
"False"
]
| Return True if the element is Catalysis. | [
"Return",
"True",
"if",
"the",
"element",
"is",
"Catalysis",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/processor.py#L1468-L1474 | train |
sorgerlab/indra | indra/sources/biopax/processor.py | BiopaxProcessor.print_statements | def print_statements(self):
"""Print all INDRA Statements collected by the processors."""
for i, stmt in enumerate(self.statements):
print("%s: %s" % (i, stmt)) | python | def print_statements(self):
"""Print all INDRA Statements collected by the processors."""
for i, stmt in enumerate(self.statements):
print("%s: %s" % (i, stmt)) | [
"def",
"print_statements",
"(",
"self",
")",
":",
"for",
"i",
",",
"stmt",
"in",
"enumerate",
"(",
"self",
".",
"statements",
")",
":",
"print",
"(",
"\"%s: %s\"",
"%",
"(",
"i",
",",
"stmt",
")",
")"
]
| Print all INDRA Statements collected by the processors. | [
"Print",
"all",
"INDRA",
"Statements",
"collected",
"by",
"the",
"processors",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/processor.py#L53-L56 | train |
sorgerlab/indra | indra/sources/biopax/processor.py | BiopaxProcessor.save_model | def save_model(self, file_name=None):
"""Save the BioPAX model object in an OWL file.
Parameters
----------
file_name : Optional[str]
The name of the OWL file to save the model in.
"""
if file_name is None:
logger.error('Missing file name')
return
pcc.model_to_owl(self.model, file_name) | python | def save_model(self, file_name=None):
"""Save the BioPAX model object in an OWL file.
Parameters
----------
file_name : Optional[str]
The name of the OWL file to save the model in.
"""
if file_name is None:
logger.error('Missing file name')
return
pcc.model_to_owl(self.model, file_name) | [
"def",
"save_model",
"(",
"self",
",",
"file_name",
"=",
"None",
")",
":",
"if",
"file_name",
"is",
"None",
":",
"logger",
".",
"error",
"(",
"'Missing file name'",
")",
"return",
"pcc",
".",
"model_to_owl",
"(",
"self",
".",
"model",
",",
"file_name",
")"
]
| Save the BioPAX model object in an OWL file.
Parameters
----------
file_name : Optional[str]
The name of the OWL file to save the model in. | [
"Save",
"the",
"BioPAX",
"model",
"object",
"in",
"an",
"OWL",
"file",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/processor.py#L58-L69 | train |
sorgerlab/indra | indra/sources/biopax/processor.py | BiopaxProcessor.eliminate_exact_duplicates | def eliminate_exact_duplicates(self):
"""Eliminate Statements that were extracted multiple times.
Due to the way the patterns are implemented, they can sometimes yield
the same Statement information multiple times, in which case,
we end up with redundant Statements that aren't from independent
underlying entries. To avoid this, here, we filter out such
duplicates.
"""
# Here we use the deep hash of each Statement, and by making a dict,
# we effectively keep only one Statement with a given deep hash
self.statements = list({stmt.get_hash(shallow=False, refresh=True): stmt
for stmt in self.statements}.values()) | python | def eliminate_exact_duplicates(self):
"""Eliminate Statements that were extracted multiple times.
Due to the way the patterns are implemented, they can sometimes yield
the same Statement information multiple times, in which case,
we end up with redundant Statements that aren't from independent
underlying entries. To avoid this, here, we filter out such
duplicates.
"""
# Here we use the deep hash of each Statement, and by making a dict,
# we effectively keep only one Statement with a given deep hash
self.statements = list({stmt.get_hash(shallow=False, refresh=True): stmt
for stmt in self.statements}.values()) | [
"def",
"eliminate_exact_duplicates",
"(",
"self",
")",
":",
"# Here we use the deep hash of each Statement, and by making a dict,",
"# we effectively keep only one Statement with a given deep hash",
"self",
".",
"statements",
"=",
"list",
"(",
"{",
"stmt",
".",
"get_hash",
"(",
"shallow",
"=",
"False",
",",
"refresh",
"=",
"True",
")",
":",
"stmt",
"for",
"stmt",
"in",
"self",
".",
"statements",
"}",
".",
"values",
"(",
")",
")"
]
| Eliminate Statements that were extracted multiple times.
Due to the way the patterns are implemented, they can sometimes yield
the same Statement information multiple times, in which case,
we end up with redundant Statements that aren't from independent
underlying entries. To avoid this, here, we filter out such
duplicates. | [
"Eliminate",
"Statements",
"that",
"were",
"extracted",
"multiple",
"times",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/processor.py#L71-L83 | train |
sorgerlab/indra | indra/sources/biopax/processor.py | BiopaxProcessor.get_complexes | def get_complexes(self):
"""Extract INDRA Complex Statements from the BioPAX model.
This method searches for org.biopax.paxtools.model.level3.Complex
objects which represent molecular complexes. It doesn't reuse
BioPAX Pattern's org.biopax.paxtools.pattern.PatternBox.inComplexWith
query since that retrieves pairs of complex members rather than
the full complex.
"""
for obj in self.model.getObjects().toArray():
bpe = _cast_biopax_element(obj)
if not _is_complex(bpe):
continue
ev = self._get_evidence(bpe)
members = self._get_complex_members(bpe)
if members is not None:
if len(members) > 10:
logger.debug('Skipping complex with more than 10 members.')
continue
complexes = _get_combinations(members)
for c in complexes:
self.statements.append(decode_obj(Complex(c, ev),
encoding='utf-8')) | python | def get_complexes(self):
"""Extract INDRA Complex Statements from the BioPAX model.
This method searches for org.biopax.paxtools.model.level3.Complex
objects which represent molecular complexes. It doesn't reuse
BioPAX Pattern's org.biopax.paxtools.pattern.PatternBox.inComplexWith
query since that retrieves pairs of complex members rather than
the full complex.
"""
for obj in self.model.getObjects().toArray():
bpe = _cast_biopax_element(obj)
if not _is_complex(bpe):
continue
ev = self._get_evidence(bpe)
members = self._get_complex_members(bpe)
if members is not None:
if len(members) > 10:
logger.debug('Skipping complex with more than 10 members.')
continue
complexes = _get_combinations(members)
for c in complexes:
self.statements.append(decode_obj(Complex(c, ev),
encoding='utf-8')) | [
"def",
"get_complexes",
"(",
"self",
")",
":",
"for",
"obj",
"in",
"self",
".",
"model",
".",
"getObjects",
"(",
")",
".",
"toArray",
"(",
")",
":",
"bpe",
"=",
"_cast_biopax_element",
"(",
"obj",
")",
"if",
"not",
"_is_complex",
"(",
"bpe",
")",
":",
"continue",
"ev",
"=",
"self",
".",
"_get_evidence",
"(",
"bpe",
")",
"members",
"=",
"self",
".",
"_get_complex_members",
"(",
"bpe",
")",
"if",
"members",
"is",
"not",
"None",
":",
"if",
"len",
"(",
"members",
")",
">",
"10",
":",
"logger",
".",
"debug",
"(",
"'Skipping complex with more than 10 members.'",
")",
"continue",
"complexes",
"=",
"_get_combinations",
"(",
"members",
")",
"for",
"c",
"in",
"complexes",
":",
"self",
".",
"statements",
".",
"append",
"(",
"decode_obj",
"(",
"Complex",
"(",
"c",
",",
"ev",
")",
",",
"encoding",
"=",
"'utf-8'",
")",
")"
]
| Extract INDRA Complex Statements from the BioPAX model.
This method searches for org.biopax.paxtools.model.level3.Complex
objects which represent molecular complexes. It doesn't reuse
BioPAX Pattern's org.biopax.paxtools.pattern.PatternBox.inComplexWith
query since that retrieves pairs of complex members rather than
the full complex. | [
"Extract",
"INDRA",
"Complex",
"Statements",
"from",
"the",
"BioPAX",
"model",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/processor.py#L86-L109 | train |
sorgerlab/indra | indra/sources/biopax/processor.py | BiopaxProcessor.get_modifications | def get_modifications(self):
"""Extract INDRA Modification Statements from the BioPAX model.
To extract Modifications, this method reuses the structure of
BioPAX Pattern's
org.biopax.paxtools.pattern.PatternBox.constrolsStateChange pattern
with additional constraints to specify the type of state change
occurring (phosphorylation, deubiquitination, etc.).
"""
for modtype, modclass in modtype_to_modclass.items():
# TODO: we could possibly try to also extract generic
# modifications here
if modtype == 'modification':
continue
stmts = self._get_generic_modification(modclass)
self.statements += stmts | python | def get_modifications(self):
"""Extract INDRA Modification Statements from the BioPAX model.
To extract Modifications, this method reuses the structure of
BioPAX Pattern's
org.biopax.paxtools.pattern.PatternBox.constrolsStateChange pattern
with additional constraints to specify the type of state change
occurring (phosphorylation, deubiquitination, etc.).
"""
for modtype, modclass in modtype_to_modclass.items():
# TODO: we could possibly try to also extract generic
# modifications here
if modtype == 'modification':
continue
stmts = self._get_generic_modification(modclass)
self.statements += stmts | [
"def",
"get_modifications",
"(",
"self",
")",
":",
"for",
"modtype",
",",
"modclass",
"in",
"modtype_to_modclass",
".",
"items",
"(",
")",
":",
"# TODO: we could possibly try to also extract generic",
"# modifications here",
"if",
"modtype",
"==",
"'modification'",
":",
"continue",
"stmts",
"=",
"self",
".",
"_get_generic_modification",
"(",
"modclass",
")",
"self",
".",
"statements",
"+=",
"stmts"
]
| Extract INDRA Modification Statements from the BioPAX model.
To extract Modifications, this method reuses the structure of
BioPAX Pattern's
org.biopax.paxtools.pattern.PatternBox.constrolsStateChange pattern
with additional constraints to specify the type of state change
occurring (phosphorylation, deubiquitination, etc.). | [
"Extract",
"INDRA",
"Modification",
"Statements",
"from",
"the",
"BioPAX",
"model",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/processor.py#L111-L126 | train |
sorgerlab/indra | indra/sources/biopax/processor.py | BiopaxProcessor.get_activity_modification | def get_activity_modification(self):
"""Extract INDRA ActiveForm statements from the BioPAX model.
This method extracts ActiveForm Statements that are due to
protein modifications. This method reuses the structure of
BioPAX Pattern's
org.biopax.paxtools.pattern.PatternBox.constrolsStateChange pattern
with additional constraints to specify the gain or loss of a
modification occurring (phosphorylation, deubiquitination, etc.)
and the gain or loss of activity due to the modification state
change.
"""
mod_filter = 'residue modification, active'
for is_active in [True, False]:
p = self._construct_modification_pattern()
rel = mcct.GAIN if is_active else mcct.LOSS
p.add(mcc(rel, mod_filter),
"input simple PE", "output simple PE")
s = _bpp('Searcher')
res = s.searchPlain(self.model, p)
res_array = [_match_to_array(m) for m in res.toArray()]
for r in res_array:
reaction = r[p.indexOf('Conversion')]
activity = 'activity'
input_spe = r[p.indexOf('input simple PE')]
output_spe = r[p.indexOf('output simple PE')]
# Get the modifications
mod_in = \
BiopaxProcessor._get_entity_mods(input_spe)
mod_out = \
BiopaxProcessor._get_entity_mods(output_spe)
mod_shared = _get_mod_intersection(mod_in, mod_out)
gained_mods = _get_mod_difference(mod_out, mod_in)
# Here we get the evidence for the BiochemicalReaction
ev = self._get_evidence(reaction)
agents = self._get_agents_from_entity(output_spe)
for agent in _listify(agents):
static_mods = _get_mod_difference(agent.mods,
gained_mods)
# NOTE: with the ActiveForm representation we cannot
# separate static_mods and gained_mods. We assume here
# that the static_mods are inconsequential and therefore
# are not mentioned as an Agent condition, following
# don't care don't write semantics. Therefore only the
# gained_mods are listed in the ActiveForm as Agent
# conditions.
if gained_mods:
agent.mods = gained_mods
stmt = ActiveForm(agent, activity, is_active,
evidence=ev)
self.statements.append(decode_obj(stmt,
encoding='utf-8')) | python | def get_activity_modification(self):
"""Extract INDRA ActiveForm statements from the BioPAX model.
This method extracts ActiveForm Statements that are due to
protein modifications. This method reuses the structure of
BioPAX Pattern's
org.biopax.paxtools.pattern.PatternBox.constrolsStateChange pattern
with additional constraints to specify the gain or loss of a
modification occurring (phosphorylation, deubiquitination, etc.)
and the gain or loss of activity due to the modification state
change.
"""
mod_filter = 'residue modification, active'
for is_active in [True, False]:
p = self._construct_modification_pattern()
rel = mcct.GAIN if is_active else mcct.LOSS
p.add(mcc(rel, mod_filter),
"input simple PE", "output simple PE")
s = _bpp('Searcher')
res = s.searchPlain(self.model, p)
res_array = [_match_to_array(m) for m in res.toArray()]
for r in res_array:
reaction = r[p.indexOf('Conversion')]
activity = 'activity'
input_spe = r[p.indexOf('input simple PE')]
output_spe = r[p.indexOf('output simple PE')]
# Get the modifications
mod_in = \
BiopaxProcessor._get_entity_mods(input_spe)
mod_out = \
BiopaxProcessor._get_entity_mods(output_spe)
mod_shared = _get_mod_intersection(mod_in, mod_out)
gained_mods = _get_mod_difference(mod_out, mod_in)
# Here we get the evidence for the BiochemicalReaction
ev = self._get_evidence(reaction)
agents = self._get_agents_from_entity(output_spe)
for agent in _listify(agents):
static_mods = _get_mod_difference(agent.mods,
gained_mods)
# NOTE: with the ActiveForm representation we cannot
# separate static_mods and gained_mods. We assume here
# that the static_mods are inconsequential and therefore
# are not mentioned as an Agent condition, following
# don't care don't write semantics. Therefore only the
# gained_mods are listed in the ActiveForm as Agent
# conditions.
if gained_mods:
agent.mods = gained_mods
stmt = ActiveForm(agent, activity, is_active,
evidence=ev)
self.statements.append(decode_obj(stmt,
encoding='utf-8')) | [
"def",
"get_activity_modification",
"(",
"self",
")",
":",
"mod_filter",
"=",
"'residue modification, active'",
"for",
"is_active",
"in",
"[",
"True",
",",
"False",
"]",
":",
"p",
"=",
"self",
".",
"_construct_modification_pattern",
"(",
")",
"rel",
"=",
"mcct",
".",
"GAIN",
"if",
"is_active",
"else",
"mcct",
".",
"LOSS",
"p",
".",
"add",
"(",
"mcc",
"(",
"rel",
",",
"mod_filter",
")",
",",
"\"input simple PE\"",
",",
"\"output simple PE\"",
")",
"s",
"=",
"_bpp",
"(",
"'Searcher'",
")",
"res",
"=",
"s",
".",
"searchPlain",
"(",
"self",
".",
"model",
",",
"p",
")",
"res_array",
"=",
"[",
"_match_to_array",
"(",
"m",
")",
"for",
"m",
"in",
"res",
".",
"toArray",
"(",
")",
"]",
"for",
"r",
"in",
"res_array",
":",
"reaction",
"=",
"r",
"[",
"p",
".",
"indexOf",
"(",
"'Conversion'",
")",
"]",
"activity",
"=",
"'activity'",
"input_spe",
"=",
"r",
"[",
"p",
".",
"indexOf",
"(",
"'input simple PE'",
")",
"]",
"output_spe",
"=",
"r",
"[",
"p",
".",
"indexOf",
"(",
"'output simple PE'",
")",
"]",
"# Get the modifications",
"mod_in",
"=",
"BiopaxProcessor",
".",
"_get_entity_mods",
"(",
"input_spe",
")",
"mod_out",
"=",
"BiopaxProcessor",
".",
"_get_entity_mods",
"(",
"output_spe",
")",
"mod_shared",
"=",
"_get_mod_intersection",
"(",
"mod_in",
",",
"mod_out",
")",
"gained_mods",
"=",
"_get_mod_difference",
"(",
"mod_out",
",",
"mod_in",
")",
"# Here we get the evidence for the BiochemicalReaction",
"ev",
"=",
"self",
".",
"_get_evidence",
"(",
"reaction",
")",
"agents",
"=",
"self",
".",
"_get_agents_from_entity",
"(",
"output_spe",
")",
"for",
"agent",
"in",
"_listify",
"(",
"agents",
")",
":",
"static_mods",
"=",
"_get_mod_difference",
"(",
"agent",
".",
"mods",
",",
"gained_mods",
")",
"# NOTE: with the ActiveForm representation we cannot",
"# separate static_mods and gained_mods. We assume here",
"# that the static_mods are inconsequential and therefore",
"# are not mentioned as an Agent condition, following",
"# don't care don't write semantics. Therefore only the",
"# gained_mods are listed in the ActiveForm as Agent",
"# conditions.",
"if",
"gained_mods",
":",
"agent",
".",
"mods",
"=",
"gained_mods",
"stmt",
"=",
"ActiveForm",
"(",
"agent",
",",
"activity",
",",
"is_active",
",",
"evidence",
"=",
"ev",
")",
"self",
".",
"statements",
".",
"append",
"(",
"decode_obj",
"(",
"stmt",
",",
"encoding",
"=",
"'utf-8'",
")",
")"
]
| Extract INDRA ActiveForm statements from the BioPAX model.
This method extracts ActiveForm Statements that are due to
protein modifications. This method reuses the structure of
BioPAX Pattern's
org.biopax.paxtools.pattern.PatternBox.constrolsStateChange pattern
with additional constraints to specify the gain or loss of a
modification occurring (phosphorylation, deubiquitination, etc.)
and the gain or loss of activity due to the modification state
change. | [
"Extract",
"INDRA",
"ActiveForm",
"statements",
"from",
"the",
"BioPAX",
"model",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/processor.py#L128-L185 | train |
sorgerlab/indra | indra/sources/biopax/processor.py | BiopaxProcessor.get_regulate_amounts | def get_regulate_amounts(self):
"""Extract INDRA RegulateAmount Statements from the BioPAX model.
This method extracts IncreaseAmount/DecreaseAmount Statements from
the BioPAX model. It fully reuses BioPAX Pattern's
org.biopax.paxtools.pattern.PatternBox.controlsExpressionWithTemplateReac
pattern to find TemplateReactions which control the expression of
a protein.
"""
p = pb.controlsExpressionWithTemplateReac()
s = _bpp('Searcher')
res = s.searchPlain(self.model, p)
res_array = [_match_to_array(m) for m in res.toArray()]
stmts = []
for res in res_array:
# FIXME: for some reason labels are not accessible
# for these queries. It would be more reliable
# to get results by label instead of index.
'''
controller_er = res[p.indexOf('controller ER')]
generic_controller_er = res[p.indexOf('generic controller ER')]
controller_simple_pe = res[p.indexOf('controller simple PE')]
controller_pe = res[p.indexOf('controller PE')]
control = res[p.indexOf('Control')]
conversion = res[p.indexOf('Conversion')]
input_pe = res[p.indexOf('input PE')]
input_simple_pe = res[p.indexOf('input simple PE')]
changed_generic_er = res[p.indexOf('changed generic ER')]
output_pe = res[p.indexOf('output PE')]
output_simple_pe = res[p.indexOf('output simple PE')]
changed_er = res[p.indexOf('changed ER')]
'''
# TODO: here, res[3] is the complex physical entity
# for instance http://pathwaycommons.org/pc2/
# Complex_43c6b8330562c1b411d21e9d1185bae9
# consists of 3 components: JUN, FOS and NFAT
# where NFAT further contains 3 member physical entities.
#
# However, res[2] iterates over all 5 member physical entities
# of the complex which doesn't represent the underlying
# structure faithfully. It would be better to use res[3]
# (the complex itself) and look at components and then
# members. However, then, it would not be clear how to
# construct an INDRA Agent for the controller.
controller = self._get_agents_from_entity(res[2])
controlled_pe = res[6]
controlled = self._get_agents_from_entity(controlled_pe)
conversion = res[5]
direction = conversion.getTemplateDirection()
if direction is not None:
direction = direction.name()
if direction != 'FORWARD':
logger.warning('Unhandled conversion direction %s' %
direction)
continue
# Sometimes interaction type is annotated as
# term=='TRANSCRIPTION'. Other times this is not
# annotated.
int_type = conversion.getInteractionType().toArray()
if int_type:
for it in int_type:
for term in it.getTerm().toArray():
pass
control = res[4]
control_type = control.getControlType()
if control_type:
control_type = control_type.name()
ev = self._get_evidence(control)
for subj, obj in itertools.product(_listify(controller),
_listify(controlled)):
subj_act = ActivityCondition('transcription', True)
subj.activity = subj_act
if control_type == 'ACTIVATION':
st = IncreaseAmount(subj, obj, evidence=ev)
elif control_type == 'INHIBITION':
st = DecreaseAmount(subj, obj, evidence=ev)
else:
logger.warning('Unhandled control type %s' % control_type)
continue
st_dec = decode_obj(st, encoding='utf-8')
self.statements.append(st_dec) | python | def get_regulate_amounts(self):
"""Extract INDRA RegulateAmount Statements from the BioPAX model.
This method extracts IncreaseAmount/DecreaseAmount Statements from
the BioPAX model. It fully reuses BioPAX Pattern's
org.biopax.paxtools.pattern.PatternBox.controlsExpressionWithTemplateReac
pattern to find TemplateReactions which control the expression of
a protein.
"""
p = pb.controlsExpressionWithTemplateReac()
s = _bpp('Searcher')
res = s.searchPlain(self.model, p)
res_array = [_match_to_array(m) for m in res.toArray()]
stmts = []
for res in res_array:
# FIXME: for some reason labels are not accessible
# for these queries. It would be more reliable
# to get results by label instead of index.
'''
controller_er = res[p.indexOf('controller ER')]
generic_controller_er = res[p.indexOf('generic controller ER')]
controller_simple_pe = res[p.indexOf('controller simple PE')]
controller_pe = res[p.indexOf('controller PE')]
control = res[p.indexOf('Control')]
conversion = res[p.indexOf('Conversion')]
input_pe = res[p.indexOf('input PE')]
input_simple_pe = res[p.indexOf('input simple PE')]
changed_generic_er = res[p.indexOf('changed generic ER')]
output_pe = res[p.indexOf('output PE')]
output_simple_pe = res[p.indexOf('output simple PE')]
changed_er = res[p.indexOf('changed ER')]
'''
# TODO: here, res[3] is the complex physical entity
# for instance http://pathwaycommons.org/pc2/
# Complex_43c6b8330562c1b411d21e9d1185bae9
# consists of 3 components: JUN, FOS and NFAT
# where NFAT further contains 3 member physical entities.
#
# However, res[2] iterates over all 5 member physical entities
# of the complex which doesn't represent the underlying
# structure faithfully. It would be better to use res[3]
# (the complex itself) and look at components and then
# members. However, then, it would not be clear how to
# construct an INDRA Agent for the controller.
controller = self._get_agents_from_entity(res[2])
controlled_pe = res[6]
controlled = self._get_agents_from_entity(controlled_pe)
conversion = res[5]
direction = conversion.getTemplateDirection()
if direction is not None:
direction = direction.name()
if direction != 'FORWARD':
logger.warning('Unhandled conversion direction %s' %
direction)
continue
# Sometimes interaction type is annotated as
# term=='TRANSCRIPTION'. Other times this is not
# annotated.
int_type = conversion.getInteractionType().toArray()
if int_type:
for it in int_type:
for term in it.getTerm().toArray():
pass
control = res[4]
control_type = control.getControlType()
if control_type:
control_type = control_type.name()
ev = self._get_evidence(control)
for subj, obj in itertools.product(_listify(controller),
_listify(controlled)):
subj_act = ActivityCondition('transcription', True)
subj.activity = subj_act
if control_type == 'ACTIVATION':
st = IncreaseAmount(subj, obj, evidence=ev)
elif control_type == 'INHIBITION':
st = DecreaseAmount(subj, obj, evidence=ev)
else:
logger.warning('Unhandled control type %s' % control_type)
continue
st_dec = decode_obj(st, encoding='utf-8')
self.statements.append(st_dec) | [
"def",
"get_regulate_amounts",
"(",
"self",
")",
":",
"p",
"=",
"pb",
".",
"controlsExpressionWithTemplateReac",
"(",
")",
"s",
"=",
"_bpp",
"(",
"'Searcher'",
")",
"res",
"=",
"s",
".",
"searchPlain",
"(",
"self",
".",
"model",
",",
"p",
")",
"res_array",
"=",
"[",
"_match_to_array",
"(",
"m",
")",
"for",
"m",
"in",
"res",
".",
"toArray",
"(",
")",
"]",
"stmts",
"=",
"[",
"]",
"for",
"res",
"in",
"res_array",
":",
"# FIXME: for some reason labels are not accessible",
"# for these queries. It would be more reliable",
"# to get results by label instead of index.",
"'''\n controller_er = res[p.indexOf('controller ER')]\n generic_controller_er = res[p.indexOf('generic controller ER')]\n controller_simple_pe = res[p.indexOf('controller simple PE')]\n controller_pe = res[p.indexOf('controller PE')]\n control = res[p.indexOf('Control')]\n conversion = res[p.indexOf('Conversion')]\n input_pe = res[p.indexOf('input PE')]\n input_simple_pe = res[p.indexOf('input simple PE')]\n changed_generic_er = res[p.indexOf('changed generic ER')]\n output_pe = res[p.indexOf('output PE')]\n output_simple_pe = res[p.indexOf('output simple PE')]\n changed_er = res[p.indexOf('changed ER')]\n '''",
"# TODO: here, res[3] is the complex physical entity",
"# for instance http://pathwaycommons.org/pc2/",
"# Complex_43c6b8330562c1b411d21e9d1185bae9",
"# consists of 3 components: JUN, FOS and NFAT",
"# where NFAT further contains 3 member physical entities.",
"#",
"# However, res[2] iterates over all 5 member physical entities",
"# of the complex which doesn't represent the underlying",
"# structure faithfully. It would be better to use res[3]",
"# (the complex itself) and look at components and then",
"# members. However, then, it would not be clear how to",
"# construct an INDRA Agent for the controller.",
"controller",
"=",
"self",
".",
"_get_agents_from_entity",
"(",
"res",
"[",
"2",
"]",
")",
"controlled_pe",
"=",
"res",
"[",
"6",
"]",
"controlled",
"=",
"self",
".",
"_get_agents_from_entity",
"(",
"controlled_pe",
")",
"conversion",
"=",
"res",
"[",
"5",
"]",
"direction",
"=",
"conversion",
".",
"getTemplateDirection",
"(",
")",
"if",
"direction",
"is",
"not",
"None",
":",
"direction",
"=",
"direction",
".",
"name",
"(",
")",
"if",
"direction",
"!=",
"'FORWARD'",
":",
"logger",
".",
"warning",
"(",
"'Unhandled conversion direction %s'",
"%",
"direction",
")",
"continue",
"# Sometimes interaction type is annotated as",
"# term=='TRANSCRIPTION'. Other times this is not",
"# annotated.",
"int_type",
"=",
"conversion",
".",
"getInteractionType",
"(",
")",
".",
"toArray",
"(",
")",
"if",
"int_type",
":",
"for",
"it",
"in",
"int_type",
":",
"for",
"term",
"in",
"it",
".",
"getTerm",
"(",
")",
".",
"toArray",
"(",
")",
":",
"pass",
"control",
"=",
"res",
"[",
"4",
"]",
"control_type",
"=",
"control",
".",
"getControlType",
"(",
")",
"if",
"control_type",
":",
"control_type",
"=",
"control_type",
".",
"name",
"(",
")",
"ev",
"=",
"self",
".",
"_get_evidence",
"(",
"control",
")",
"for",
"subj",
",",
"obj",
"in",
"itertools",
".",
"product",
"(",
"_listify",
"(",
"controller",
")",
",",
"_listify",
"(",
"controlled",
")",
")",
":",
"subj_act",
"=",
"ActivityCondition",
"(",
"'transcription'",
",",
"True",
")",
"subj",
".",
"activity",
"=",
"subj_act",
"if",
"control_type",
"==",
"'ACTIVATION'",
":",
"st",
"=",
"IncreaseAmount",
"(",
"subj",
",",
"obj",
",",
"evidence",
"=",
"ev",
")",
"elif",
"control_type",
"==",
"'INHIBITION'",
":",
"st",
"=",
"DecreaseAmount",
"(",
"subj",
",",
"obj",
",",
"evidence",
"=",
"ev",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"'Unhandled control type %s'",
"%",
"control_type",
")",
"continue",
"st_dec",
"=",
"decode_obj",
"(",
"st",
",",
"encoding",
"=",
"'utf-8'",
")",
"self",
".",
"statements",
".",
"append",
"(",
"st_dec",
")"
]
| Extract INDRA RegulateAmount Statements from the BioPAX model.
This method extracts IncreaseAmount/DecreaseAmount Statements from
the BioPAX model. It fully reuses BioPAX Pattern's
org.biopax.paxtools.pattern.PatternBox.controlsExpressionWithTemplateReac
pattern to find TemplateReactions which control the expression of
a protein. | [
"Extract",
"INDRA",
"RegulateAmount",
"Statements",
"from",
"the",
"BioPAX",
"model",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/processor.py#L261-L342 | train |
sorgerlab/indra | indra/sources/biopax/processor.py | BiopaxProcessor.get_gef | def get_gef(self):
"""Extract Gef INDRA Statements from the BioPAX model.
This method uses a custom BioPAX Pattern
(one that is not implemented PatternBox) to query for controlled
BiochemicalReactions in which the same protein is in complex with
GDP on the left hand side and in complex with GTP on the
right hand side. This implies that the controller is a GEF for the
GDP/GTP-bound protein.
"""
p = self._gef_gap_base()
s = _bpp('Searcher')
res = s.searchPlain(self.model, p)
res_array = [_match_to_array(m) for m in res.toArray()]
for r in res_array:
controller_pe = r[p.indexOf('controller PE')]
input_pe = r[p.indexOf('input PE')]
input_spe = r[p.indexOf('input simple PE')]
output_pe = r[p.indexOf('output PE')]
output_spe = r[p.indexOf('output simple PE')]
reaction = r[p.indexOf('Conversion')]
control = r[p.indexOf('Control')]
# Make sure the GEF is not a complex
# TODO: it could be possible to extract certain complexes here, for
# instance ones that only have a single protein
if _is_complex(controller_pe):
continue
members_in = self._get_complex_members(input_pe)
members_out = self._get_complex_members(output_pe)
if not (members_in and members_out):
continue
# Make sure the outgoing complex has exactly 2 members
# TODO: by finding matching proteins on either side, in principle
# it would be possible to find Gef relationships in complexes
# with more members
if len(members_out) != 2:
continue
# Make sure complex starts with GDP that becomes GTP
gdp_in = False
for member in members_in:
if isinstance(member, Agent) and member.name == 'GDP':
gdp_in = True
gtp_out = False
for member in members_out:
if isinstance(member, Agent) and member.name == 'GTP':
gtp_out = True
if not (gdp_in and gtp_out):
continue
ras_list = self._get_agents_from_entity(input_spe)
gef_list = self._get_agents_from_entity(controller_pe)
ev = self._get_evidence(control)
for gef, ras in itertools.product(_listify(gef_list),
_listify(ras_list)):
st = Gef(gef, ras, evidence=ev)
st_dec = decode_obj(st, encoding='utf-8')
self.statements.append(st_dec) | python | def get_gef(self):
"""Extract Gef INDRA Statements from the BioPAX model.
This method uses a custom BioPAX Pattern
(one that is not implemented PatternBox) to query for controlled
BiochemicalReactions in which the same protein is in complex with
GDP on the left hand side and in complex with GTP on the
right hand side. This implies that the controller is a GEF for the
GDP/GTP-bound protein.
"""
p = self._gef_gap_base()
s = _bpp('Searcher')
res = s.searchPlain(self.model, p)
res_array = [_match_to_array(m) for m in res.toArray()]
for r in res_array:
controller_pe = r[p.indexOf('controller PE')]
input_pe = r[p.indexOf('input PE')]
input_spe = r[p.indexOf('input simple PE')]
output_pe = r[p.indexOf('output PE')]
output_spe = r[p.indexOf('output simple PE')]
reaction = r[p.indexOf('Conversion')]
control = r[p.indexOf('Control')]
# Make sure the GEF is not a complex
# TODO: it could be possible to extract certain complexes here, for
# instance ones that only have a single protein
if _is_complex(controller_pe):
continue
members_in = self._get_complex_members(input_pe)
members_out = self._get_complex_members(output_pe)
if not (members_in and members_out):
continue
# Make sure the outgoing complex has exactly 2 members
# TODO: by finding matching proteins on either side, in principle
# it would be possible to find Gef relationships in complexes
# with more members
if len(members_out) != 2:
continue
# Make sure complex starts with GDP that becomes GTP
gdp_in = False
for member in members_in:
if isinstance(member, Agent) and member.name == 'GDP':
gdp_in = True
gtp_out = False
for member in members_out:
if isinstance(member, Agent) and member.name == 'GTP':
gtp_out = True
if not (gdp_in and gtp_out):
continue
ras_list = self._get_agents_from_entity(input_spe)
gef_list = self._get_agents_from_entity(controller_pe)
ev = self._get_evidence(control)
for gef, ras in itertools.product(_listify(gef_list),
_listify(ras_list)):
st = Gef(gef, ras, evidence=ev)
st_dec = decode_obj(st, encoding='utf-8')
self.statements.append(st_dec) | [
"def",
"get_gef",
"(",
"self",
")",
":",
"p",
"=",
"self",
".",
"_gef_gap_base",
"(",
")",
"s",
"=",
"_bpp",
"(",
"'Searcher'",
")",
"res",
"=",
"s",
".",
"searchPlain",
"(",
"self",
".",
"model",
",",
"p",
")",
"res_array",
"=",
"[",
"_match_to_array",
"(",
"m",
")",
"for",
"m",
"in",
"res",
".",
"toArray",
"(",
")",
"]",
"for",
"r",
"in",
"res_array",
":",
"controller_pe",
"=",
"r",
"[",
"p",
".",
"indexOf",
"(",
"'controller PE'",
")",
"]",
"input_pe",
"=",
"r",
"[",
"p",
".",
"indexOf",
"(",
"'input PE'",
")",
"]",
"input_spe",
"=",
"r",
"[",
"p",
".",
"indexOf",
"(",
"'input simple PE'",
")",
"]",
"output_pe",
"=",
"r",
"[",
"p",
".",
"indexOf",
"(",
"'output PE'",
")",
"]",
"output_spe",
"=",
"r",
"[",
"p",
".",
"indexOf",
"(",
"'output simple PE'",
")",
"]",
"reaction",
"=",
"r",
"[",
"p",
".",
"indexOf",
"(",
"'Conversion'",
")",
"]",
"control",
"=",
"r",
"[",
"p",
".",
"indexOf",
"(",
"'Control'",
")",
"]",
"# Make sure the GEF is not a complex",
"# TODO: it could be possible to extract certain complexes here, for",
"# instance ones that only have a single protein",
"if",
"_is_complex",
"(",
"controller_pe",
")",
":",
"continue",
"members_in",
"=",
"self",
".",
"_get_complex_members",
"(",
"input_pe",
")",
"members_out",
"=",
"self",
".",
"_get_complex_members",
"(",
"output_pe",
")",
"if",
"not",
"(",
"members_in",
"and",
"members_out",
")",
":",
"continue",
"# Make sure the outgoing complex has exactly 2 members",
"# TODO: by finding matching proteins on either side, in principle",
"# it would be possible to find Gef relationships in complexes",
"# with more members",
"if",
"len",
"(",
"members_out",
")",
"!=",
"2",
":",
"continue",
"# Make sure complex starts with GDP that becomes GTP",
"gdp_in",
"=",
"False",
"for",
"member",
"in",
"members_in",
":",
"if",
"isinstance",
"(",
"member",
",",
"Agent",
")",
"and",
"member",
".",
"name",
"==",
"'GDP'",
":",
"gdp_in",
"=",
"True",
"gtp_out",
"=",
"False",
"for",
"member",
"in",
"members_out",
":",
"if",
"isinstance",
"(",
"member",
",",
"Agent",
")",
"and",
"member",
".",
"name",
"==",
"'GTP'",
":",
"gtp_out",
"=",
"True",
"if",
"not",
"(",
"gdp_in",
"and",
"gtp_out",
")",
":",
"continue",
"ras_list",
"=",
"self",
".",
"_get_agents_from_entity",
"(",
"input_spe",
")",
"gef_list",
"=",
"self",
".",
"_get_agents_from_entity",
"(",
"controller_pe",
")",
"ev",
"=",
"self",
".",
"_get_evidence",
"(",
"control",
")",
"for",
"gef",
",",
"ras",
"in",
"itertools",
".",
"product",
"(",
"_listify",
"(",
"gef_list",
")",
",",
"_listify",
"(",
"ras_list",
")",
")",
":",
"st",
"=",
"Gef",
"(",
"gef",
",",
"ras",
",",
"evidence",
"=",
"ev",
")",
"st_dec",
"=",
"decode_obj",
"(",
"st",
",",
"encoding",
"=",
"'utf-8'",
")",
"self",
".",
"statements",
".",
"append",
"(",
"st_dec",
")"
]
| Extract Gef INDRA Statements from the BioPAX model.
This method uses a custom BioPAX Pattern
(one that is not implemented PatternBox) to query for controlled
BiochemicalReactions in which the same protein is in complex with
GDP on the left hand side and in complex with GTP on the
right hand side. This implies that the controller is a GEF for the
GDP/GTP-bound protein. | [
"Extract",
"Gef",
"INDRA",
"Statements",
"from",
"the",
"BioPAX",
"model",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/processor.py#L474-L531 | train |
sorgerlab/indra | indra/sources/biopax/processor.py | BiopaxProcessor.get_gap | def get_gap(self):
"""Extract Gap INDRA Statements from the BioPAX model.
This method uses a custom BioPAX Pattern
(one that is not implemented PatternBox) to query for controlled
BiochemicalReactions in which the same protein is in complex with
GTP on the left hand side and in complex with GDP on the
right hand side. This implies that the controller is a GAP for the
GDP/GTP-bound protein.
"""
p = self._gef_gap_base()
s = _bpp('Searcher')
res = s.searchPlain(self.model, p)
res_array = [_match_to_array(m) for m in res.toArray()]
for r in res_array:
controller_pe = r[p.indexOf('controller PE')]
input_pe = r[p.indexOf('input PE')]
input_spe = r[p.indexOf('input simple PE')]
output_pe = r[p.indexOf('output PE')]
output_spe = r[p.indexOf('output simple PE')]
reaction = r[p.indexOf('Conversion')]
control = r[p.indexOf('Control')]
# Make sure the GAP is not a complex
# TODO: it could be possible to extract certain complexes here, for
# instance ones that only have a single protein
if _is_complex(controller_pe):
continue
members_in = self._get_complex_members(input_pe)
members_out = self._get_complex_members(output_pe)
if not (members_in and members_out):
continue
# Make sure the outgoing complex has exactly 2 members
# TODO: by finding matching proteins on either side, in principle
# it would be possible to find Gap relationships in complexes
# with more members
if len(members_out) != 2:
continue
# Make sure complex starts with GDP that becomes GTP
gtp_in = False
for member in members_in:
if isinstance(member, Agent) and member.name == 'GTP':
gtp_in = True
gdp_out = False
for member in members_out:
if isinstance(member, Agent) and member.name == 'GDP':
gdp_out = True
if not (gtp_in and gdp_out):
continue
ras_list = self._get_agents_from_entity(input_spe)
gap_list = self._get_agents_from_entity(controller_pe)
ev = self._get_evidence(control)
for gap, ras in itertools.product(_listify(gap_list),
_listify(ras_list)):
st = Gap(gap, ras, evidence=ev)
st_dec = decode_obj(st, encoding='utf-8')
self.statements.append(st_dec) | python | def get_gap(self):
"""Extract Gap INDRA Statements from the BioPAX model.
This method uses a custom BioPAX Pattern
(one that is not implemented PatternBox) to query for controlled
BiochemicalReactions in which the same protein is in complex with
GTP on the left hand side and in complex with GDP on the
right hand side. This implies that the controller is a GAP for the
GDP/GTP-bound protein.
"""
p = self._gef_gap_base()
s = _bpp('Searcher')
res = s.searchPlain(self.model, p)
res_array = [_match_to_array(m) for m in res.toArray()]
for r in res_array:
controller_pe = r[p.indexOf('controller PE')]
input_pe = r[p.indexOf('input PE')]
input_spe = r[p.indexOf('input simple PE')]
output_pe = r[p.indexOf('output PE')]
output_spe = r[p.indexOf('output simple PE')]
reaction = r[p.indexOf('Conversion')]
control = r[p.indexOf('Control')]
# Make sure the GAP is not a complex
# TODO: it could be possible to extract certain complexes here, for
# instance ones that only have a single protein
if _is_complex(controller_pe):
continue
members_in = self._get_complex_members(input_pe)
members_out = self._get_complex_members(output_pe)
if not (members_in and members_out):
continue
# Make sure the outgoing complex has exactly 2 members
# TODO: by finding matching proteins on either side, in principle
# it would be possible to find Gap relationships in complexes
# with more members
if len(members_out) != 2:
continue
# Make sure complex starts with GDP that becomes GTP
gtp_in = False
for member in members_in:
if isinstance(member, Agent) and member.name == 'GTP':
gtp_in = True
gdp_out = False
for member in members_out:
if isinstance(member, Agent) and member.name == 'GDP':
gdp_out = True
if not (gtp_in and gdp_out):
continue
ras_list = self._get_agents_from_entity(input_spe)
gap_list = self._get_agents_from_entity(controller_pe)
ev = self._get_evidence(control)
for gap, ras in itertools.product(_listify(gap_list),
_listify(ras_list)):
st = Gap(gap, ras, evidence=ev)
st_dec = decode_obj(st, encoding='utf-8')
self.statements.append(st_dec) | [
"def",
"get_gap",
"(",
"self",
")",
":",
"p",
"=",
"self",
".",
"_gef_gap_base",
"(",
")",
"s",
"=",
"_bpp",
"(",
"'Searcher'",
")",
"res",
"=",
"s",
".",
"searchPlain",
"(",
"self",
".",
"model",
",",
"p",
")",
"res_array",
"=",
"[",
"_match_to_array",
"(",
"m",
")",
"for",
"m",
"in",
"res",
".",
"toArray",
"(",
")",
"]",
"for",
"r",
"in",
"res_array",
":",
"controller_pe",
"=",
"r",
"[",
"p",
".",
"indexOf",
"(",
"'controller PE'",
")",
"]",
"input_pe",
"=",
"r",
"[",
"p",
".",
"indexOf",
"(",
"'input PE'",
")",
"]",
"input_spe",
"=",
"r",
"[",
"p",
".",
"indexOf",
"(",
"'input simple PE'",
")",
"]",
"output_pe",
"=",
"r",
"[",
"p",
".",
"indexOf",
"(",
"'output PE'",
")",
"]",
"output_spe",
"=",
"r",
"[",
"p",
".",
"indexOf",
"(",
"'output simple PE'",
")",
"]",
"reaction",
"=",
"r",
"[",
"p",
".",
"indexOf",
"(",
"'Conversion'",
")",
"]",
"control",
"=",
"r",
"[",
"p",
".",
"indexOf",
"(",
"'Control'",
")",
"]",
"# Make sure the GAP is not a complex",
"# TODO: it could be possible to extract certain complexes here, for",
"# instance ones that only have a single protein",
"if",
"_is_complex",
"(",
"controller_pe",
")",
":",
"continue",
"members_in",
"=",
"self",
".",
"_get_complex_members",
"(",
"input_pe",
")",
"members_out",
"=",
"self",
".",
"_get_complex_members",
"(",
"output_pe",
")",
"if",
"not",
"(",
"members_in",
"and",
"members_out",
")",
":",
"continue",
"# Make sure the outgoing complex has exactly 2 members",
"# TODO: by finding matching proteins on either side, in principle",
"# it would be possible to find Gap relationships in complexes",
"# with more members",
"if",
"len",
"(",
"members_out",
")",
"!=",
"2",
":",
"continue",
"# Make sure complex starts with GDP that becomes GTP",
"gtp_in",
"=",
"False",
"for",
"member",
"in",
"members_in",
":",
"if",
"isinstance",
"(",
"member",
",",
"Agent",
")",
"and",
"member",
".",
"name",
"==",
"'GTP'",
":",
"gtp_in",
"=",
"True",
"gdp_out",
"=",
"False",
"for",
"member",
"in",
"members_out",
":",
"if",
"isinstance",
"(",
"member",
",",
"Agent",
")",
"and",
"member",
".",
"name",
"==",
"'GDP'",
":",
"gdp_out",
"=",
"True",
"if",
"not",
"(",
"gtp_in",
"and",
"gdp_out",
")",
":",
"continue",
"ras_list",
"=",
"self",
".",
"_get_agents_from_entity",
"(",
"input_spe",
")",
"gap_list",
"=",
"self",
".",
"_get_agents_from_entity",
"(",
"controller_pe",
")",
"ev",
"=",
"self",
".",
"_get_evidence",
"(",
"control",
")",
"for",
"gap",
",",
"ras",
"in",
"itertools",
".",
"product",
"(",
"_listify",
"(",
"gap_list",
")",
",",
"_listify",
"(",
"ras_list",
")",
")",
":",
"st",
"=",
"Gap",
"(",
"gap",
",",
"ras",
",",
"evidence",
"=",
"ev",
")",
"st_dec",
"=",
"decode_obj",
"(",
"st",
",",
"encoding",
"=",
"'utf-8'",
")",
"self",
".",
"statements",
".",
"append",
"(",
"st_dec",
")"
]
| Extract Gap INDRA Statements from the BioPAX model.
This method uses a custom BioPAX Pattern
(one that is not implemented PatternBox) to query for controlled
BiochemicalReactions in which the same protein is in complex with
GTP on the left hand side and in complex with GDP on the
right hand side. This implies that the controller is a GAP for the
GDP/GTP-bound protein. | [
"Extract",
"Gap",
"INDRA",
"Statements",
"from",
"the",
"BioPAX",
"model",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/processor.py#L533-L590 | train |
sorgerlab/indra | indra/sources/biopax/processor.py | BiopaxProcessor._get_entity_mods | def _get_entity_mods(bpe):
"""Get all the modifications of an entity in INDRA format"""
if _is_entity(bpe):
features = bpe.getFeature().toArray()
else:
features = bpe.getEntityFeature().toArray()
mods = []
for feature in features:
if not _is_modification(feature):
continue
mc = BiopaxProcessor._extract_mod_from_feature(feature)
if mc is not None:
mods.append(mc)
return mods | python | def _get_entity_mods(bpe):
"""Get all the modifications of an entity in INDRA format"""
if _is_entity(bpe):
features = bpe.getFeature().toArray()
else:
features = bpe.getEntityFeature().toArray()
mods = []
for feature in features:
if not _is_modification(feature):
continue
mc = BiopaxProcessor._extract_mod_from_feature(feature)
if mc is not None:
mods.append(mc)
return mods | [
"def",
"_get_entity_mods",
"(",
"bpe",
")",
":",
"if",
"_is_entity",
"(",
"bpe",
")",
":",
"features",
"=",
"bpe",
".",
"getFeature",
"(",
")",
".",
"toArray",
"(",
")",
"else",
":",
"features",
"=",
"bpe",
".",
"getEntityFeature",
"(",
")",
".",
"toArray",
"(",
")",
"mods",
"=",
"[",
"]",
"for",
"feature",
"in",
"features",
":",
"if",
"not",
"_is_modification",
"(",
"feature",
")",
":",
"continue",
"mc",
"=",
"BiopaxProcessor",
".",
"_extract_mod_from_feature",
"(",
"feature",
")",
"if",
"mc",
"is",
"not",
"None",
":",
"mods",
".",
"append",
"(",
"mc",
")",
"return",
"mods"
]
| Get all the modifications of an entity in INDRA format | [
"Get",
"all",
"the",
"modifications",
"of",
"an",
"entity",
"in",
"INDRA",
"format"
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/processor.py#L633-L646 | train |
sorgerlab/indra | indra/sources/biopax/processor.py | BiopaxProcessor._get_generic_modification | def _get_generic_modification(self, mod_class):
"""Get all modification reactions given a Modification class."""
mod_type = modclass_to_modtype[mod_class]
if issubclass(mod_class, RemoveModification):
mod_gain_const = mcct.LOSS
mod_type = modtype_to_inverse[mod_type]
else:
mod_gain_const = mcct.GAIN
mod_filter = mod_type[:5]
# Start with a generic modification pattern
p = BiopaxProcessor._construct_modification_pattern()
p.add(mcc(mod_gain_const, mod_filter),
"input simple PE", "output simple PE")
s = _bpp('Searcher')
res = s.searchPlain(self.model, p)
res_array = [_match_to_array(m) for m in res.toArray()]
stmts = []
for r in res_array:
controller_pe = r[p.indexOf('controller PE')]
input_pe = r[p.indexOf('input PE')]
input_spe = r[p.indexOf('input simple PE')]
output_spe = r[p.indexOf('output simple PE')]
reaction = r[p.indexOf('Conversion')]
control = r[p.indexOf('Control')]
if not _is_catalysis(control):
continue
cat_dir = control.getCatalysisDirection()
if cat_dir is not None and cat_dir.name() != 'LEFT_TO_RIGHT':
logger.debug('Unexpected catalysis direction: %s.' % \
control.getCatalysisDirection())
continue
enzs = BiopaxProcessor._get_primary_controller(controller_pe)
if not enzs:
continue
'''
if _is_complex(input_pe):
sub_members_in = self._get_complex_members(input_pe)
sub_members_out = self._get_complex_members(output_pe)
# TODO: It is possible to find which member of the complex is
# actually modified. That member will be the substrate and
# all other members of the complex will be bound to it.
logger.info('Cannot handle complex substrates.')
continue
'''
subs = BiopaxProcessor._get_agents_from_entity(input_spe,
expand_pe=False)
ev = self._get_evidence(control)
for enz, sub in itertools.product(_listify(enzs), _listify(subs)):
# Get the modifications
mod_in = \
BiopaxProcessor._get_entity_mods(input_spe)
mod_out = \
BiopaxProcessor._get_entity_mods(output_spe)
sub.mods = _get_mod_intersection(mod_in, mod_out)
if issubclass(mod_class, AddModification):
gained_mods = _get_mod_difference(mod_out, mod_in)
else:
gained_mods = _get_mod_difference(mod_in, mod_out)
for mod in gained_mods:
# Is it guaranteed that these are all modifications
# of the type we are extracting?
if mod.mod_type not in (mod_type,
modtype_to_inverse[mod_type]):
continue
stmt = mod_class(enz, sub, mod.residue, mod.position,
evidence=ev)
stmts.append(decode_obj(stmt, encoding='utf-8'))
return stmts | python | def _get_generic_modification(self, mod_class):
"""Get all modification reactions given a Modification class."""
mod_type = modclass_to_modtype[mod_class]
if issubclass(mod_class, RemoveModification):
mod_gain_const = mcct.LOSS
mod_type = modtype_to_inverse[mod_type]
else:
mod_gain_const = mcct.GAIN
mod_filter = mod_type[:5]
# Start with a generic modification pattern
p = BiopaxProcessor._construct_modification_pattern()
p.add(mcc(mod_gain_const, mod_filter),
"input simple PE", "output simple PE")
s = _bpp('Searcher')
res = s.searchPlain(self.model, p)
res_array = [_match_to_array(m) for m in res.toArray()]
stmts = []
for r in res_array:
controller_pe = r[p.indexOf('controller PE')]
input_pe = r[p.indexOf('input PE')]
input_spe = r[p.indexOf('input simple PE')]
output_spe = r[p.indexOf('output simple PE')]
reaction = r[p.indexOf('Conversion')]
control = r[p.indexOf('Control')]
if not _is_catalysis(control):
continue
cat_dir = control.getCatalysisDirection()
if cat_dir is not None and cat_dir.name() != 'LEFT_TO_RIGHT':
logger.debug('Unexpected catalysis direction: %s.' % \
control.getCatalysisDirection())
continue
enzs = BiopaxProcessor._get_primary_controller(controller_pe)
if not enzs:
continue
'''
if _is_complex(input_pe):
sub_members_in = self._get_complex_members(input_pe)
sub_members_out = self._get_complex_members(output_pe)
# TODO: It is possible to find which member of the complex is
# actually modified. That member will be the substrate and
# all other members of the complex will be bound to it.
logger.info('Cannot handle complex substrates.')
continue
'''
subs = BiopaxProcessor._get_agents_from_entity(input_spe,
expand_pe=False)
ev = self._get_evidence(control)
for enz, sub in itertools.product(_listify(enzs), _listify(subs)):
# Get the modifications
mod_in = \
BiopaxProcessor._get_entity_mods(input_spe)
mod_out = \
BiopaxProcessor._get_entity_mods(output_spe)
sub.mods = _get_mod_intersection(mod_in, mod_out)
if issubclass(mod_class, AddModification):
gained_mods = _get_mod_difference(mod_out, mod_in)
else:
gained_mods = _get_mod_difference(mod_in, mod_out)
for mod in gained_mods:
# Is it guaranteed that these are all modifications
# of the type we are extracting?
if mod.mod_type not in (mod_type,
modtype_to_inverse[mod_type]):
continue
stmt = mod_class(enz, sub, mod.residue, mod.position,
evidence=ev)
stmts.append(decode_obj(stmt, encoding='utf-8'))
return stmts | [
"def",
"_get_generic_modification",
"(",
"self",
",",
"mod_class",
")",
":",
"mod_type",
"=",
"modclass_to_modtype",
"[",
"mod_class",
"]",
"if",
"issubclass",
"(",
"mod_class",
",",
"RemoveModification",
")",
":",
"mod_gain_const",
"=",
"mcct",
".",
"LOSS",
"mod_type",
"=",
"modtype_to_inverse",
"[",
"mod_type",
"]",
"else",
":",
"mod_gain_const",
"=",
"mcct",
".",
"GAIN",
"mod_filter",
"=",
"mod_type",
"[",
":",
"5",
"]",
"# Start with a generic modification pattern",
"p",
"=",
"BiopaxProcessor",
".",
"_construct_modification_pattern",
"(",
")",
"p",
".",
"add",
"(",
"mcc",
"(",
"mod_gain_const",
",",
"mod_filter",
")",
",",
"\"input simple PE\"",
",",
"\"output simple PE\"",
")",
"s",
"=",
"_bpp",
"(",
"'Searcher'",
")",
"res",
"=",
"s",
".",
"searchPlain",
"(",
"self",
".",
"model",
",",
"p",
")",
"res_array",
"=",
"[",
"_match_to_array",
"(",
"m",
")",
"for",
"m",
"in",
"res",
".",
"toArray",
"(",
")",
"]",
"stmts",
"=",
"[",
"]",
"for",
"r",
"in",
"res_array",
":",
"controller_pe",
"=",
"r",
"[",
"p",
".",
"indexOf",
"(",
"'controller PE'",
")",
"]",
"input_pe",
"=",
"r",
"[",
"p",
".",
"indexOf",
"(",
"'input PE'",
")",
"]",
"input_spe",
"=",
"r",
"[",
"p",
".",
"indexOf",
"(",
"'input simple PE'",
")",
"]",
"output_spe",
"=",
"r",
"[",
"p",
".",
"indexOf",
"(",
"'output simple PE'",
")",
"]",
"reaction",
"=",
"r",
"[",
"p",
".",
"indexOf",
"(",
"'Conversion'",
")",
"]",
"control",
"=",
"r",
"[",
"p",
".",
"indexOf",
"(",
"'Control'",
")",
"]",
"if",
"not",
"_is_catalysis",
"(",
"control",
")",
":",
"continue",
"cat_dir",
"=",
"control",
".",
"getCatalysisDirection",
"(",
")",
"if",
"cat_dir",
"is",
"not",
"None",
"and",
"cat_dir",
".",
"name",
"(",
")",
"!=",
"'LEFT_TO_RIGHT'",
":",
"logger",
".",
"debug",
"(",
"'Unexpected catalysis direction: %s.'",
"%",
"control",
".",
"getCatalysisDirection",
"(",
")",
")",
"continue",
"enzs",
"=",
"BiopaxProcessor",
".",
"_get_primary_controller",
"(",
"controller_pe",
")",
"if",
"not",
"enzs",
":",
"continue",
"'''\n if _is_complex(input_pe):\n sub_members_in = self._get_complex_members(input_pe)\n sub_members_out = self._get_complex_members(output_pe)\n # TODO: It is possible to find which member of the complex is\n # actually modified. That member will be the substrate and\n # all other members of the complex will be bound to it.\n logger.info('Cannot handle complex substrates.')\n continue\n '''",
"subs",
"=",
"BiopaxProcessor",
".",
"_get_agents_from_entity",
"(",
"input_spe",
",",
"expand_pe",
"=",
"False",
")",
"ev",
"=",
"self",
".",
"_get_evidence",
"(",
"control",
")",
"for",
"enz",
",",
"sub",
"in",
"itertools",
".",
"product",
"(",
"_listify",
"(",
"enzs",
")",
",",
"_listify",
"(",
"subs",
")",
")",
":",
"# Get the modifications",
"mod_in",
"=",
"BiopaxProcessor",
".",
"_get_entity_mods",
"(",
"input_spe",
")",
"mod_out",
"=",
"BiopaxProcessor",
".",
"_get_entity_mods",
"(",
"output_spe",
")",
"sub",
".",
"mods",
"=",
"_get_mod_intersection",
"(",
"mod_in",
",",
"mod_out",
")",
"if",
"issubclass",
"(",
"mod_class",
",",
"AddModification",
")",
":",
"gained_mods",
"=",
"_get_mod_difference",
"(",
"mod_out",
",",
"mod_in",
")",
"else",
":",
"gained_mods",
"=",
"_get_mod_difference",
"(",
"mod_in",
",",
"mod_out",
")",
"for",
"mod",
"in",
"gained_mods",
":",
"# Is it guaranteed that these are all modifications",
"# of the type we are extracting?",
"if",
"mod",
".",
"mod_type",
"not",
"in",
"(",
"mod_type",
",",
"modtype_to_inverse",
"[",
"mod_type",
"]",
")",
":",
"continue",
"stmt",
"=",
"mod_class",
"(",
"enz",
",",
"sub",
",",
"mod",
".",
"residue",
",",
"mod",
".",
"position",
",",
"evidence",
"=",
"ev",
")",
"stmts",
".",
"append",
"(",
"decode_obj",
"(",
"stmt",
",",
"encoding",
"=",
"'utf-8'",
")",
")",
"return",
"stmts"
]
| Get all modification reactions given a Modification class. | [
"Get",
"all",
"modification",
"reactions",
"given",
"a",
"Modification",
"class",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/processor.py#L648-L721 | train |
sorgerlab/indra | indra/sources/biopax/processor.py | BiopaxProcessor._construct_modification_pattern | def _construct_modification_pattern():
"""Construct the BioPAX pattern to extract modification reactions."""
# The following constraints were pieced together based on the
# following two higher level constrains: pb.controlsStateChange(),
# pb.controlsPhosphorylation().
p = _bpp('Pattern')(_bpimpl('PhysicalEntity')().getModelInterface(),
'controller PE')
# Getting the control itself
p.add(cb.peToControl(), "controller PE", "Control")
# Link the control to the conversion that it controls
p.add(cb.controlToConv(), "Control", "Conversion")
# The controller shouldn't be a participant of the conversion
p.add(_bpp('constraint.NOT')(cb.participant()),
"Conversion", "controller PE")
# Get the input participant of the conversion
p.add(pt(rt.INPUT, True), "Control", "Conversion", "input PE")
# Get the specific PhysicalEntity
p.add(cb.linkToSpecific(), "input PE", "input simple PE")
# Link to ER
p.add(cb.peToER(), "input simple PE", "input simple ER")
# Make sure the participant is a protein
p.add(tp(_bpimpl('Protein')().getModelInterface()), "input simple PE")
# Link to the other side of the conversion
p.add(cs(cst.OTHER_SIDE), "input PE", "Conversion", "output PE")
# Make sure the two sides are not the same
p.add(_bpp('constraint.Equality')(False), "input PE", "output PE")
# Get the specific PhysicalEntity
p.add(cb.linkToSpecific(), "output PE", "output simple PE")
# Link to ER
p.add(cb.peToER(), "output simple PE", "output simple ER")
p.add(_bpp('constraint.Equality')(True), "input simple ER",
"output simple ER")
# Make sure the output is a Protein
p.add(tp(_bpimpl('Protein')().getModelInterface()), "output simple PE")
p.add(_bpp('constraint.NOT')(cb.linkToSpecific()),
"input PE", "output simple PE")
p.add(_bpp('constraint.NOT')(cb.linkToSpecific()),
"output PE", "input simple PE")
return p | python | def _construct_modification_pattern():
"""Construct the BioPAX pattern to extract modification reactions."""
# The following constraints were pieced together based on the
# following two higher level constrains: pb.controlsStateChange(),
# pb.controlsPhosphorylation().
p = _bpp('Pattern')(_bpimpl('PhysicalEntity')().getModelInterface(),
'controller PE')
# Getting the control itself
p.add(cb.peToControl(), "controller PE", "Control")
# Link the control to the conversion that it controls
p.add(cb.controlToConv(), "Control", "Conversion")
# The controller shouldn't be a participant of the conversion
p.add(_bpp('constraint.NOT')(cb.participant()),
"Conversion", "controller PE")
# Get the input participant of the conversion
p.add(pt(rt.INPUT, True), "Control", "Conversion", "input PE")
# Get the specific PhysicalEntity
p.add(cb.linkToSpecific(), "input PE", "input simple PE")
# Link to ER
p.add(cb.peToER(), "input simple PE", "input simple ER")
# Make sure the participant is a protein
p.add(tp(_bpimpl('Protein')().getModelInterface()), "input simple PE")
# Link to the other side of the conversion
p.add(cs(cst.OTHER_SIDE), "input PE", "Conversion", "output PE")
# Make sure the two sides are not the same
p.add(_bpp('constraint.Equality')(False), "input PE", "output PE")
# Get the specific PhysicalEntity
p.add(cb.linkToSpecific(), "output PE", "output simple PE")
# Link to ER
p.add(cb.peToER(), "output simple PE", "output simple ER")
p.add(_bpp('constraint.Equality')(True), "input simple ER",
"output simple ER")
# Make sure the output is a Protein
p.add(tp(_bpimpl('Protein')().getModelInterface()), "output simple PE")
p.add(_bpp('constraint.NOT')(cb.linkToSpecific()),
"input PE", "output simple PE")
p.add(_bpp('constraint.NOT')(cb.linkToSpecific()),
"output PE", "input simple PE")
return p | [
"def",
"_construct_modification_pattern",
"(",
")",
":",
"# The following constraints were pieced together based on the",
"# following two higher level constrains: pb.controlsStateChange(),",
"# pb.controlsPhosphorylation().",
"p",
"=",
"_bpp",
"(",
"'Pattern'",
")",
"(",
"_bpimpl",
"(",
"'PhysicalEntity'",
")",
"(",
")",
".",
"getModelInterface",
"(",
")",
",",
"'controller PE'",
")",
"# Getting the control itself",
"p",
".",
"add",
"(",
"cb",
".",
"peToControl",
"(",
")",
",",
"\"controller PE\"",
",",
"\"Control\"",
")",
"# Link the control to the conversion that it controls",
"p",
".",
"add",
"(",
"cb",
".",
"controlToConv",
"(",
")",
",",
"\"Control\"",
",",
"\"Conversion\"",
")",
"# The controller shouldn't be a participant of the conversion",
"p",
".",
"add",
"(",
"_bpp",
"(",
"'constraint.NOT'",
")",
"(",
"cb",
".",
"participant",
"(",
")",
")",
",",
"\"Conversion\"",
",",
"\"controller PE\"",
")",
"# Get the input participant of the conversion",
"p",
".",
"add",
"(",
"pt",
"(",
"rt",
".",
"INPUT",
",",
"True",
")",
",",
"\"Control\"",
",",
"\"Conversion\"",
",",
"\"input PE\"",
")",
"# Get the specific PhysicalEntity",
"p",
".",
"add",
"(",
"cb",
".",
"linkToSpecific",
"(",
")",
",",
"\"input PE\"",
",",
"\"input simple PE\"",
")",
"# Link to ER",
"p",
".",
"add",
"(",
"cb",
".",
"peToER",
"(",
")",
",",
"\"input simple PE\"",
",",
"\"input simple ER\"",
")",
"# Make sure the participant is a protein",
"p",
".",
"add",
"(",
"tp",
"(",
"_bpimpl",
"(",
"'Protein'",
")",
"(",
")",
".",
"getModelInterface",
"(",
")",
")",
",",
"\"input simple PE\"",
")",
"# Link to the other side of the conversion",
"p",
".",
"add",
"(",
"cs",
"(",
"cst",
".",
"OTHER_SIDE",
")",
",",
"\"input PE\"",
",",
"\"Conversion\"",
",",
"\"output PE\"",
")",
"# Make sure the two sides are not the same",
"p",
".",
"add",
"(",
"_bpp",
"(",
"'constraint.Equality'",
")",
"(",
"False",
")",
",",
"\"input PE\"",
",",
"\"output PE\"",
")",
"# Get the specific PhysicalEntity",
"p",
".",
"add",
"(",
"cb",
".",
"linkToSpecific",
"(",
")",
",",
"\"output PE\"",
",",
"\"output simple PE\"",
")",
"# Link to ER",
"p",
".",
"add",
"(",
"cb",
".",
"peToER",
"(",
")",
",",
"\"output simple PE\"",
",",
"\"output simple ER\"",
")",
"p",
".",
"add",
"(",
"_bpp",
"(",
"'constraint.Equality'",
")",
"(",
"True",
")",
",",
"\"input simple ER\"",
",",
"\"output simple ER\"",
")",
"# Make sure the output is a Protein",
"p",
".",
"add",
"(",
"tp",
"(",
"_bpimpl",
"(",
"'Protein'",
")",
"(",
")",
".",
"getModelInterface",
"(",
")",
")",
",",
"\"output simple PE\"",
")",
"p",
".",
"add",
"(",
"_bpp",
"(",
"'constraint.NOT'",
")",
"(",
"cb",
".",
"linkToSpecific",
"(",
")",
")",
",",
"\"input PE\"",
",",
"\"output simple PE\"",
")",
"p",
".",
"add",
"(",
"_bpp",
"(",
"'constraint.NOT'",
")",
"(",
"cb",
".",
"linkToSpecific",
"(",
")",
")",
",",
"\"output PE\"",
",",
"\"input simple PE\"",
")",
"return",
"p"
]
| Construct the BioPAX pattern to extract modification reactions. | [
"Construct",
"the",
"BioPAX",
"pattern",
"to",
"extract",
"modification",
"reactions",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/processor.py#L788-L826 | train |
sorgerlab/indra | indra/sources/biopax/processor.py | BiopaxProcessor._extract_mod_from_feature | def _extract_mod_from_feature(mf):
"""Extract the type of modification and the position from
a ModificationFeature object in the INDRA format."""
# ModificationFeature / SequenceModificationVocabulary
mf_type = mf.getModificationType()
if mf_type is None:
return None
mf_type_terms = mf_type.getTerm().toArray()
known_mf_type = None
for t in mf_type_terms:
if t.startswith('MOD_RES '):
t = t[8:]
mf_type_indra = _mftype_dict.get(t)
if mf_type_indra is not None:
known_mf_type = mf_type_indra
break
if not known_mf_type:
logger.debug('Skipping modification with unknown terms: %s' %
', '.join(mf_type_terms))
return None
mod_type, residue = known_mf_type
# getFeatureLocation returns SequenceLocation, which is the
# generic parent class of SequenceSite and SequenceInterval.
# Here we need to cast to SequenceSite in order to get to
# the sequence position.
mf_pos = mf.getFeatureLocation()
if mf_pos is not None:
# If it is not a SequenceSite we can't handle it
if not mf_pos.modelInterface.getName() == \
'org.biopax.paxtools.model.level3.SequenceSite':
mod_pos = None
else:
mf_site = cast(_bp('SequenceSite'), mf_pos)
mf_pos_status = mf_site.getPositionStatus()
if mf_pos_status is None:
mod_pos = None
elif mf_pos_status and mf_pos_status.toString() != 'EQUAL':
logger.debug('Modification site position is %s' %
mf_pos_status.toString())
else:
mod_pos = mf_site.getSequencePosition()
mod_pos = '%s' % mod_pos
else:
mod_pos = None
mc = ModCondition(mod_type, residue, mod_pos, True)
return mc | python | def _extract_mod_from_feature(mf):
"""Extract the type of modification and the position from
a ModificationFeature object in the INDRA format."""
# ModificationFeature / SequenceModificationVocabulary
mf_type = mf.getModificationType()
if mf_type is None:
return None
mf_type_terms = mf_type.getTerm().toArray()
known_mf_type = None
for t in mf_type_terms:
if t.startswith('MOD_RES '):
t = t[8:]
mf_type_indra = _mftype_dict.get(t)
if mf_type_indra is not None:
known_mf_type = mf_type_indra
break
if not known_mf_type:
logger.debug('Skipping modification with unknown terms: %s' %
', '.join(mf_type_terms))
return None
mod_type, residue = known_mf_type
# getFeatureLocation returns SequenceLocation, which is the
# generic parent class of SequenceSite and SequenceInterval.
# Here we need to cast to SequenceSite in order to get to
# the sequence position.
mf_pos = mf.getFeatureLocation()
if mf_pos is not None:
# If it is not a SequenceSite we can't handle it
if not mf_pos.modelInterface.getName() == \
'org.biopax.paxtools.model.level3.SequenceSite':
mod_pos = None
else:
mf_site = cast(_bp('SequenceSite'), mf_pos)
mf_pos_status = mf_site.getPositionStatus()
if mf_pos_status is None:
mod_pos = None
elif mf_pos_status and mf_pos_status.toString() != 'EQUAL':
logger.debug('Modification site position is %s' %
mf_pos_status.toString())
else:
mod_pos = mf_site.getSequencePosition()
mod_pos = '%s' % mod_pos
else:
mod_pos = None
mc = ModCondition(mod_type, residue, mod_pos, True)
return mc | [
"def",
"_extract_mod_from_feature",
"(",
"mf",
")",
":",
"# ModificationFeature / SequenceModificationVocabulary",
"mf_type",
"=",
"mf",
".",
"getModificationType",
"(",
")",
"if",
"mf_type",
"is",
"None",
":",
"return",
"None",
"mf_type_terms",
"=",
"mf_type",
".",
"getTerm",
"(",
")",
".",
"toArray",
"(",
")",
"known_mf_type",
"=",
"None",
"for",
"t",
"in",
"mf_type_terms",
":",
"if",
"t",
".",
"startswith",
"(",
"'MOD_RES '",
")",
":",
"t",
"=",
"t",
"[",
"8",
":",
"]",
"mf_type_indra",
"=",
"_mftype_dict",
".",
"get",
"(",
"t",
")",
"if",
"mf_type_indra",
"is",
"not",
"None",
":",
"known_mf_type",
"=",
"mf_type_indra",
"break",
"if",
"not",
"known_mf_type",
":",
"logger",
".",
"debug",
"(",
"'Skipping modification with unknown terms: %s'",
"%",
"', '",
".",
"join",
"(",
"mf_type_terms",
")",
")",
"return",
"None",
"mod_type",
",",
"residue",
"=",
"known_mf_type",
"# getFeatureLocation returns SequenceLocation, which is the",
"# generic parent class of SequenceSite and SequenceInterval.",
"# Here we need to cast to SequenceSite in order to get to",
"# the sequence position.",
"mf_pos",
"=",
"mf",
".",
"getFeatureLocation",
"(",
")",
"if",
"mf_pos",
"is",
"not",
"None",
":",
"# If it is not a SequenceSite we can't handle it",
"if",
"not",
"mf_pos",
".",
"modelInterface",
".",
"getName",
"(",
")",
"==",
"'org.biopax.paxtools.model.level3.SequenceSite'",
":",
"mod_pos",
"=",
"None",
"else",
":",
"mf_site",
"=",
"cast",
"(",
"_bp",
"(",
"'SequenceSite'",
")",
",",
"mf_pos",
")",
"mf_pos_status",
"=",
"mf_site",
".",
"getPositionStatus",
"(",
")",
"if",
"mf_pos_status",
"is",
"None",
":",
"mod_pos",
"=",
"None",
"elif",
"mf_pos_status",
"and",
"mf_pos_status",
".",
"toString",
"(",
")",
"!=",
"'EQUAL'",
":",
"logger",
".",
"debug",
"(",
"'Modification site position is %s'",
"%",
"mf_pos_status",
".",
"toString",
"(",
")",
")",
"else",
":",
"mod_pos",
"=",
"mf_site",
".",
"getSequencePosition",
"(",
")",
"mod_pos",
"=",
"'%s'",
"%",
"mod_pos",
"else",
":",
"mod_pos",
"=",
"None",
"mc",
"=",
"ModCondition",
"(",
"mod_type",
",",
"residue",
",",
"mod_pos",
",",
"True",
")",
"return",
"mc"
]
| Extract the type of modification and the position from
a ModificationFeature object in the INDRA format. | [
"Extract",
"the",
"type",
"of",
"modification",
"and",
"the",
"position",
"from",
"a",
"ModificationFeature",
"object",
"in",
"the",
"INDRA",
"format",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/processor.py#L875-L922 | train |
sorgerlab/indra | indra/sources/biopax/processor.py | BiopaxProcessor._get_entref | def _get_entref(bpe):
"""Returns the entity reference of an entity if it exists or
return the entity reference that was passed in as argument."""
if not _is_reference(bpe):
try:
er = bpe.getEntityReference()
except AttributeError:
return None
return er
else:
return bpe | python | def _get_entref(bpe):
"""Returns the entity reference of an entity if it exists or
return the entity reference that was passed in as argument."""
if not _is_reference(bpe):
try:
er = bpe.getEntityReference()
except AttributeError:
return None
return er
else:
return bpe | [
"def",
"_get_entref",
"(",
"bpe",
")",
":",
"if",
"not",
"_is_reference",
"(",
"bpe",
")",
":",
"try",
":",
"er",
"=",
"bpe",
".",
"getEntityReference",
"(",
")",
"except",
"AttributeError",
":",
"return",
"None",
"return",
"er",
"else",
":",
"return",
"bpe"
]
| Returns the entity reference of an entity if it exists or
return the entity reference that was passed in as argument. | [
"Returns",
"the",
"entity",
"reference",
"of",
"an",
"entity",
"if",
"it",
"exists",
"or",
"return",
"the",
"entity",
"reference",
"that",
"was",
"passed",
"in",
"as",
"argument",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/processor.py#L1224-L1234 | train |
sorgerlab/indra | indra/sources/trips/processor.py | _stmt_location_to_agents | def _stmt_location_to_agents(stmt, location):
"""Apply an event location to the Agents in the corresponding Statement.
If a Statement is in a given location we represent that by requiring all
Agents in the Statement to be in that location.
"""
if location is None:
return
agents = stmt.agent_list()
for a in agents:
if a is not None:
a.location = location | python | def _stmt_location_to_agents(stmt, location):
"""Apply an event location to the Agents in the corresponding Statement.
If a Statement is in a given location we represent that by requiring all
Agents in the Statement to be in that location.
"""
if location is None:
return
agents = stmt.agent_list()
for a in agents:
if a is not None:
a.location = location | [
"def",
"_stmt_location_to_agents",
"(",
"stmt",
",",
"location",
")",
":",
"if",
"location",
"is",
"None",
":",
"return",
"agents",
"=",
"stmt",
".",
"agent_list",
"(",
")",
"for",
"a",
"in",
"agents",
":",
"if",
"a",
"is",
"not",
"None",
":",
"a",
".",
"location",
"=",
"location"
]
| Apply an event location to the Agents in the corresponding Statement.
If a Statement is in a given location we represent that by requiring all
Agents in the Statement to be in that location. | [
"Apply",
"an",
"event",
"location",
"to",
"the",
"Agents",
"in",
"the",
"corresponding",
"Statement",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/processor.py#L1710-L1721 | train |
sorgerlab/indra | indra/sources/trips/processor.py | TripsProcessor.get_all_events | def get_all_events(self):
"""Make a list of all events in the TRIPS EKB.
The events are stored in self.all_events.
"""
self.all_events = {}
events = self.tree.findall('EVENT')
events += self.tree.findall('CC')
for e in events:
event_id = e.attrib['id']
if event_id in self._static_events:
continue
event_type = e.find('type').text
try:
self.all_events[event_type].append(event_id)
except KeyError:
self.all_events[event_type] = [event_id] | python | def get_all_events(self):
"""Make a list of all events in the TRIPS EKB.
The events are stored in self.all_events.
"""
self.all_events = {}
events = self.tree.findall('EVENT')
events += self.tree.findall('CC')
for e in events:
event_id = e.attrib['id']
if event_id in self._static_events:
continue
event_type = e.find('type').text
try:
self.all_events[event_type].append(event_id)
except KeyError:
self.all_events[event_type] = [event_id] | [
"def",
"get_all_events",
"(",
"self",
")",
":",
"self",
".",
"all_events",
"=",
"{",
"}",
"events",
"=",
"self",
".",
"tree",
".",
"findall",
"(",
"'EVENT'",
")",
"events",
"+=",
"self",
".",
"tree",
".",
"findall",
"(",
"'CC'",
")",
"for",
"e",
"in",
"events",
":",
"event_id",
"=",
"e",
".",
"attrib",
"[",
"'id'",
"]",
"if",
"event_id",
"in",
"self",
".",
"_static_events",
":",
"continue",
"event_type",
"=",
"e",
".",
"find",
"(",
"'type'",
")",
".",
"text",
"try",
":",
"self",
".",
"all_events",
"[",
"event_type",
"]",
".",
"append",
"(",
"event_id",
")",
"except",
"KeyError",
":",
"self",
".",
"all_events",
"[",
"event_type",
"]",
"=",
"[",
"event_id",
"]"
]
| Make a list of all events in the TRIPS EKB.
The events are stored in self.all_events. | [
"Make",
"a",
"list",
"of",
"all",
"events",
"in",
"the",
"TRIPS",
"EKB",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/processor.py#L98-L114 | train |
sorgerlab/indra | indra/sources/trips/processor.py | TripsProcessor.get_activations | def get_activations(self):
"""Extract direct Activation INDRA Statements."""
act_events = self.tree.findall("EVENT/[type='ONT::ACTIVATE']")
inact_events = self.tree.findall("EVENT/[type='ONT::DEACTIVATE']")
inact_events += self.tree.findall("EVENT/[type='ONT::INHIBIT']")
for event in (act_events + inact_events):
event_id = event.attrib['id']
if event_id in self._static_events:
continue
# Get the activating agent in the event
agent = event.find(".//*[@role=':AGENT']")
if agent is None:
continue
agent_id = agent.attrib.get('id')
if agent_id is None:
logger.debug(
'Skipping activation with missing activator agent')
continue
activator_agent = self._get_agent_by_id(agent_id, event_id)
if activator_agent is None:
continue
# Get the activated agent in the event
affected = event.find(".//*[@role=':AFFECTED']")
if affected is None:
logger.debug(
'Skipping activation with missing affected agent')
continue
affected_id = affected.attrib.get('id')
if affected_id is None:
logger.debug(
'Skipping activation with missing affected agent')
continue
affected_agent = self._get_agent_by_id(affected_id, event_id)
if affected_agent is None:
logger.debug(
'Skipping activation with missing affected agent')
continue
is_activation = True
if _is_type(event, 'ONT::ACTIVATE'):
self._add_extracted('ONT::ACTIVATE', event.attrib['id'])
elif _is_type(event, 'ONT::INHIBIT'):
is_activation = False
self._add_extracted('ONT::INHIBIT', event.attrib['id'])
elif _is_type(event, 'ONT::DEACTIVATE'):
is_activation = False
self._add_extracted('ONT::DEACTIVATE', event.attrib['id'])
ev = self._get_evidence(event)
location = self._get_event_location(event)
for a1, a2 in _agent_list_product((activator_agent,
affected_agent)):
if is_activation:
st = Activation(a1, a2, evidence=[deepcopy(ev)])
else:
st = Inhibition(a1, a2, evidence=[deepcopy(ev)])
_stmt_location_to_agents(st, location)
self.statements.append(st) | python | def get_activations(self):
"""Extract direct Activation INDRA Statements."""
act_events = self.tree.findall("EVENT/[type='ONT::ACTIVATE']")
inact_events = self.tree.findall("EVENT/[type='ONT::DEACTIVATE']")
inact_events += self.tree.findall("EVENT/[type='ONT::INHIBIT']")
for event in (act_events + inact_events):
event_id = event.attrib['id']
if event_id in self._static_events:
continue
# Get the activating agent in the event
agent = event.find(".//*[@role=':AGENT']")
if agent is None:
continue
agent_id = agent.attrib.get('id')
if agent_id is None:
logger.debug(
'Skipping activation with missing activator agent')
continue
activator_agent = self._get_agent_by_id(agent_id, event_id)
if activator_agent is None:
continue
# Get the activated agent in the event
affected = event.find(".//*[@role=':AFFECTED']")
if affected is None:
logger.debug(
'Skipping activation with missing affected agent')
continue
affected_id = affected.attrib.get('id')
if affected_id is None:
logger.debug(
'Skipping activation with missing affected agent')
continue
affected_agent = self._get_agent_by_id(affected_id, event_id)
if affected_agent is None:
logger.debug(
'Skipping activation with missing affected agent')
continue
is_activation = True
if _is_type(event, 'ONT::ACTIVATE'):
self._add_extracted('ONT::ACTIVATE', event.attrib['id'])
elif _is_type(event, 'ONT::INHIBIT'):
is_activation = False
self._add_extracted('ONT::INHIBIT', event.attrib['id'])
elif _is_type(event, 'ONT::DEACTIVATE'):
is_activation = False
self._add_extracted('ONT::DEACTIVATE', event.attrib['id'])
ev = self._get_evidence(event)
location = self._get_event_location(event)
for a1, a2 in _agent_list_product((activator_agent,
affected_agent)):
if is_activation:
st = Activation(a1, a2, evidence=[deepcopy(ev)])
else:
st = Inhibition(a1, a2, evidence=[deepcopy(ev)])
_stmt_location_to_agents(st, location)
self.statements.append(st) | [
"def",
"get_activations",
"(",
"self",
")",
":",
"act_events",
"=",
"self",
".",
"tree",
".",
"findall",
"(",
"\"EVENT/[type='ONT::ACTIVATE']\"",
")",
"inact_events",
"=",
"self",
".",
"tree",
".",
"findall",
"(",
"\"EVENT/[type='ONT::DEACTIVATE']\"",
")",
"inact_events",
"+=",
"self",
".",
"tree",
".",
"findall",
"(",
"\"EVENT/[type='ONT::INHIBIT']\"",
")",
"for",
"event",
"in",
"(",
"act_events",
"+",
"inact_events",
")",
":",
"event_id",
"=",
"event",
".",
"attrib",
"[",
"'id'",
"]",
"if",
"event_id",
"in",
"self",
".",
"_static_events",
":",
"continue",
"# Get the activating agent in the event",
"agent",
"=",
"event",
".",
"find",
"(",
"\".//*[@role=':AGENT']\"",
")",
"if",
"agent",
"is",
"None",
":",
"continue",
"agent_id",
"=",
"agent",
".",
"attrib",
".",
"get",
"(",
"'id'",
")",
"if",
"agent_id",
"is",
"None",
":",
"logger",
".",
"debug",
"(",
"'Skipping activation with missing activator agent'",
")",
"continue",
"activator_agent",
"=",
"self",
".",
"_get_agent_by_id",
"(",
"agent_id",
",",
"event_id",
")",
"if",
"activator_agent",
"is",
"None",
":",
"continue",
"# Get the activated agent in the event",
"affected",
"=",
"event",
".",
"find",
"(",
"\".//*[@role=':AFFECTED']\"",
")",
"if",
"affected",
"is",
"None",
":",
"logger",
".",
"debug",
"(",
"'Skipping activation with missing affected agent'",
")",
"continue",
"affected_id",
"=",
"affected",
".",
"attrib",
".",
"get",
"(",
"'id'",
")",
"if",
"affected_id",
"is",
"None",
":",
"logger",
".",
"debug",
"(",
"'Skipping activation with missing affected agent'",
")",
"continue",
"affected_agent",
"=",
"self",
".",
"_get_agent_by_id",
"(",
"affected_id",
",",
"event_id",
")",
"if",
"affected_agent",
"is",
"None",
":",
"logger",
".",
"debug",
"(",
"'Skipping activation with missing affected agent'",
")",
"continue",
"is_activation",
"=",
"True",
"if",
"_is_type",
"(",
"event",
",",
"'ONT::ACTIVATE'",
")",
":",
"self",
".",
"_add_extracted",
"(",
"'ONT::ACTIVATE'",
",",
"event",
".",
"attrib",
"[",
"'id'",
"]",
")",
"elif",
"_is_type",
"(",
"event",
",",
"'ONT::INHIBIT'",
")",
":",
"is_activation",
"=",
"False",
"self",
".",
"_add_extracted",
"(",
"'ONT::INHIBIT'",
",",
"event",
".",
"attrib",
"[",
"'id'",
"]",
")",
"elif",
"_is_type",
"(",
"event",
",",
"'ONT::DEACTIVATE'",
")",
":",
"is_activation",
"=",
"False",
"self",
".",
"_add_extracted",
"(",
"'ONT::DEACTIVATE'",
",",
"event",
".",
"attrib",
"[",
"'id'",
"]",
")",
"ev",
"=",
"self",
".",
"_get_evidence",
"(",
"event",
")",
"location",
"=",
"self",
".",
"_get_event_location",
"(",
"event",
")",
"for",
"a1",
",",
"a2",
"in",
"_agent_list_product",
"(",
"(",
"activator_agent",
",",
"affected_agent",
")",
")",
":",
"if",
"is_activation",
":",
"st",
"=",
"Activation",
"(",
"a1",
",",
"a2",
",",
"evidence",
"=",
"[",
"deepcopy",
"(",
"ev",
")",
"]",
")",
"else",
":",
"st",
"=",
"Inhibition",
"(",
"a1",
",",
"a2",
",",
"evidence",
"=",
"[",
"deepcopy",
"(",
"ev",
")",
"]",
")",
"_stmt_location_to_agents",
"(",
"st",
",",
"location",
")",
"self",
".",
"statements",
".",
"append",
"(",
"st",
")"
]
| Extract direct Activation INDRA Statements. | [
"Extract",
"direct",
"Activation",
"INDRA",
"Statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/processor.py#L116-L174 | train |
sorgerlab/indra | indra/sources/trips/processor.py | TripsProcessor.get_activations_causal | def get_activations_causal(self):
"""Extract causal Activation INDRA Statements."""
# Search for causal connectives of type ONT::CAUSE
ccs = self.tree.findall("CC/[type='ONT::CAUSE']")
for cc in ccs:
factor = cc.find("arg/[@role=':FACTOR']")
outcome = cc.find("arg/[@role=':OUTCOME']")
# If either the factor or the outcome is missing, skip
if factor is None or outcome is None:
continue
factor_id = factor.attrib.get('id')
# Here, implicitly, we require that the factor is a TERM
# and not an EVENT
factor_term = self.tree.find("TERM/[@id='%s']" % factor_id)
outcome_id = outcome.attrib.get('id')
# Here it is implicit that the outcome is an event not
# a TERM
outcome_event = self.tree.find("EVENT/[@id='%s']" % outcome_id)
if factor_term is None or outcome_event is None:
continue
factor_term_type = factor_term.find('type')
# The factor term must be a molecular entity
if factor_term_type is None or \
factor_term_type.text not in molecule_types:
continue
factor_agent = self._get_agent_by_id(factor_id, None)
if factor_agent is None:
continue
outcome_event_type = outcome_event.find('type')
if outcome_event_type is None:
continue
# Construct evidence
ev = self._get_evidence(cc)
ev.epistemics['direct'] = False
location = self._get_event_location(outcome_event)
if outcome_event_type.text in ['ONT::ACTIVATE', 'ONT::ACTIVITY',
'ONT::DEACTIVATE']:
if outcome_event_type.text in ['ONT::ACTIVATE',
'ONT::DEACTIVATE']:
agent_tag = outcome_event.find(".//*[@role=':AFFECTED']")
elif outcome_event_type.text == 'ONT::ACTIVITY':
agent_tag = outcome_event.find(".//*[@role=':AGENT']")
if agent_tag is None or agent_tag.attrib.get('id') is None:
continue
outcome_agent = self._get_agent_by_id(agent_tag.attrib['id'],
outcome_id)
if outcome_agent is None:
continue
if outcome_event_type.text == 'ONT::DEACTIVATE':
is_activation = False
else:
is_activation = True
for a1, a2 in _agent_list_product((factor_agent,
outcome_agent)):
if is_activation:
st = Activation(a1, a2, evidence=[deepcopy(ev)])
else:
st = Inhibition(a1, a2, evidence=[deepcopy(ev)])
_stmt_location_to_agents(st, location)
self.statements.append(st) | python | def get_activations_causal(self):
"""Extract causal Activation INDRA Statements."""
# Search for causal connectives of type ONT::CAUSE
ccs = self.tree.findall("CC/[type='ONT::CAUSE']")
for cc in ccs:
factor = cc.find("arg/[@role=':FACTOR']")
outcome = cc.find("arg/[@role=':OUTCOME']")
# If either the factor or the outcome is missing, skip
if factor is None or outcome is None:
continue
factor_id = factor.attrib.get('id')
# Here, implicitly, we require that the factor is a TERM
# and not an EVENT
factor_term = self.tree.find("TERM/[@id='%s']" % factor_id)
outcome_id = outcome.attrib.get('id')
# Here it is implicit that the outcome is an event not
# a TERM
outcome_event = self.tree.find("EVENT/[@id='%s']" % outcome_id)
if factor_term is None or outcome_event is None:
continue
factor_term_type = factor_term.find('type')
# The factor term must be a molecular entity
if factor_term_type is None or \
factor_term_type.text not in molecule_types:
continue
factor_agent = self._get_agent_by_id(factor_id, None)
if factor_agent is None:
continue
outcome_event_type = outcome_event.find('type')
if outcome_event_type is None:
continue
# Construct evidence
ev = self._get_evidence(cc)
ev.epistemics['direct'] = False
location = self._get_event_location(outcome_event)
if outcome_event_type.text in ['ONT::ACTIVATE', 'ONT::ACTIVITY',
'ONT::DEACTIVATE']:
if outcome_event_type.text in ['ONT::ACTIVATE',
'ONT::DEACTIVATE']:
agent_tag = outcome_event.find(".//*[@role=':AFFECTED']")
elif outcome_event_type.text == 'ONT::ACTIVITY':
agent_tag = outcome_event.find(".//*[@role=':AGENT']")
if agent_tag is None or agent_tag.attrib.get('id') is None:
continue
outcome_agent = self._get_agent_by_id(agent_tag.attrib['id'],
outcome_id)
if outcome_agent is None:
continue
if outcome_event_type.text == 'ONT::DEACTIVATE':
is_activation = False
else:
is_activation = True
for a1, a2 in _agent_list_product((factor_agent,
outcome_agent)):
if is_activation:
st = Activation(a1, a2, evidence=[deepcopy(ev)])
else:
st = Inhibition(a1, a2, evidence=[deepcopy(ev)])
_stmt_location_to_agents(st, location)
self.statements.append(st) | [
"def",
"get_activations_causal",
"(",
"self",
")",
":",
"# Search for causal connectives of type ONT::CAUSE",
"ccs",
"=",
"self",
".",
"tree",
".",
"findall",
"(",
"\"CC/[type='ONT::CAUSE']\"",
")",
"for",
"cc",
"in",
"ccs",
":",
"factor",
"=",
"cc",
".",
"find",
"(",
"\"arg/[@role=':FACTOR']\"",
")",
"outcome",
"=",
"cc",
".",
"find",
"(",
"\"arg/[@role=':OUTCOME']\"",
")",
"# If either the factor or the outcome is missing, skip",
"if",
"factor",
"is",
"None",
"or",
"outcome",
"is",
"None",
":",
"continue",
"factor_id",
"=",
"factor",
".",
"attrib",
".",
"get",
"(",
"'id'",
")",
"# Here, implicitly, we require that the factor is a TERM",
"# and not an EVENT",
"factor_term",
"=",
"self",
".",
"tree",
".",
"find",
"(",
"\"TERM/[@id='%s']\"",
"%",
"factor_id",
")",
"outcome_id",
"=",
"outcome",
".",
"attrib",
".",
"get",
"(",
"'id'",
")",
"# Here it is implicit that the outcome is an event not",
"# a TERM",
"outcome_event",
"=",
"self",
".",
"tree",
".",
"find",
"(",
"\"EVENT/[@id='%s']\"",
"%",
"outcome_id",
")",
"if",
"factor_term",
"is",
"None",
"or",
"outcome_event",
"is",
"None",
":",
"continue",
"factor_term_type",
"=",
"factor_term",
".",
"find",
"(",
"'type'",
")",
"# The factor term must be a molecular entity",
"if",
"factor_term_type",
"is",
"None",
"or",
"factor_term_type",
".",
"text",
"not",
"in",
"molecule_types",
":",
"continue",
"factor_agent",
"=",
"self",
".",
"_get_agent_by_id",
"(",
"factor_id",
",",
"None",
")",
"if",
"factor_agent",
"is",
"None",
":",
"continue",
"outcome_event_type",
"=",
"outcome_event",
".",
"find",
"(",
"'type'",
")",
"if",
"outcome_event_type",
"is",
"None",
":",
"continue",
"# Construct evidence",
"ev",
"=",
"self",
".",
"_get_evidence",
"(",
"cc",
")",
"ev",
".",
"epistemics",
"[",
"'direct'",
"]",
"=",
"False",
"location",
"=",
"self",
".",
"_get_event_location",
"(",
"outcome_event",
")",
"if",
"outcome_event_type",
".",
"text",
"in",
"[",
"'ONT::ACTIVATE'",
",",
"'ONT::ACTIVITY'",
",",
"'ONT::DEACTIVATE'",
"]",
":",
"if",
"outcome_event_type",
".",
"text",
"in",
"[",
"'ONT::ACTIVATE'",
",",
"'ONT::DEACTIVATE'",
"]",
":",
"agent_tag",
"=",
"outcome_event",
".",
"find",
"(",
"\".//*[@role=':AFFECTED']\"",
")",
"elif",
"outcome_event_type",
".",
"text",
"==",
"'ONT::ACTIVITY'",
":",
"agent_tag",
"=",
"outcome_event",
".",
"find",
"(",
"\".//*[@role=':AGENT']\"",
")",
"if",
"agent_tag",
"is",
"None",
"or",
"agent_tag",
".",
"attrib",
".",
"get",
"(",
"'id'",
")",
"is",
"None",
":",
"continue",
"outcome_agent",
"=",
"self",
".",
"_get_agent_by_id",
"(",
"agent_tag",
".",
"attrib",
"[",
"'id'",
"]",
",",
"outcome_id",
")",
"if",
"outcome_agent",
"is",
"None",
":",
"continue",
"if",
"outcome_event_type",
".",
"text",
"==",
"'ONT::DEACTIVATE'",
":",
"is_activation",
"=",
"False",
"else",
":",
"is_activation",
"=",
"True",
"for",
"a1",
",",
"a2",
"in",
"_agent_list_product",
"(",
"(",
"factor_agent",
",",
"outcome_agent",
")",
")",
":",
"if",
"is_activation",
":",
"st",
"=",
"Activation",
"(",
"a1",
",",
"a2",
",",
"evidence",
"=",
"[",
"deepcopy",
"(",
"ev",
")",
"]",
")",
"else",
":",
"st",
"=",
"Inhibition",
"(",
"a1",
",",
"a2",
",",
"evidence",
"=",
"[",
"deepcopy",
"(",
"ev",
")",
"]",
")",
"_stmt_location_to_agents",
"(",
"st",
",",
"location",
")",
"self",
".",
"statements",
".",
"append",
"(",
"st",
")"
]
| Extract causal Activation INDRA Statements. | [
"Extract",
"causal",
"Activation",
"INDRA",
"Statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/processor.py#L176-L235 | train |
sorgerlab/indra | indra/sources/trips/processor.py | TripsProcessor.get_activations_stimulate | def get_activations_stimulate(self):
"""Extract Activation INDRA Statements via stimulation."""
# TODO: extract to other patterns:
# - Stimulation by EGF activates ERK
# - Stimulation by EGF leads to ERK activation
# Search for stimulation event
stim_events = self.tree.findall("EVENT/[type='ONT::STIMULATE']")
for event in stim_events:
event_id = event.attrib.get('id')
if event_id in self._static_events:
continue
controller = event.find("arg1/[@role=':AGENT']")
affected = event.find("arg2/[@role=':AFFECTED']")
# If either the controller or the affected is missing, skip
if controller is None or affected is None:
continue
controller_id = controller.attrib.get('id')
# Here, implicitly, we require that the controller is a TERM
# and not an EVENT
controller_term = self.tree.find("TERM/[@id='%s']" % controller_id)
affected_id = affected.attrib.get('id')
# Here it is implicit that the affected is an event not
# a TERM
affected_event = self.tree.find("EVENT/[@id='%s']" % affected_id)
if controller_term is None or affected_event is None:
continue
controller_term_type = controller_term.find('type')
# The controller term must be a molecular entity
if controller_term_type is None or \
controller_term_type.text not in molecule_types:
continue
controller_agent = self._get_agent_by_id(controller_id, None)
if controller_agent is None:
continue
affected_event_type = affected_event.find('type')
if affected_event_type is None:
continue
# Construct evidence
ev = self._get_evidence(event)
ev.epistemics['direct'] = False
location = self._get_event_location(affected_event)
if affected_event_type.text == 'ONT::ACTIVATE':
affected = affected_event.find(".//*[@role=':AFFECTED']")
if affected is None:
continue
affected_agent = self._get_agent_by_id(affected.attrib['id'],
affected_id)
if affected_agent is None:
continue
for a1, a2 in _agent_list_product((controller_agent,
affected_agent)):
st = Activation(a1, a2, evidence=[deepcopy(ev)])
_stmt_location_to_agents(st, location)
self.statements.append(st)
elif affected_event_type.text == 'ONT::ACTIVITY':
agent_tag = affected_event.find(".//*[@role=':AGENT']")
if agent_tag is None:
continue
affected_agent = self._get_agent_by_id(agent_tag.attrib['id'],
affected_id)
if affected_agent is None:
continue
for a1, a2 in _agent_list_product((controller_agent,
affected_agent)):
st = Activation(a1, a2, evidence=[deepcopy(ev)])
_stmt_location_to_agents(st, location)
self.statements.append(st) | python | def get_activations_stimulate(self):
"""Extract Activation INDRA Statements via stimulation."""
# TODO: extract to other patterns:
# - Stimulation by EGF activates ERK
# - Stimulation by EGF leads to ERK activation
# Search for stimulation event
stim_events = self.tree.findall("EVENT/[type='ONT::STIMULATE']")
for event in stim_events:
event_id = event.attrib.get('id')
if event_id in self._static_events:
continue
controller = event.find("arg1/[@role=':AGENT']")
affected = event.find("arg2/[@role=':AFFECTED']")
# If either the controller or the affected is missing, skip
if controller is None or affected is None:
continue
controller_id = controller.attrib.get('id')
# Here, implicitly, we require that the controller is a TERM
# and not an EVENT
controller_term = self.tree.find("TERM/[@id='%s']" % controller_id)
affected_id = affected.attrib.get('id')
# Here it is implicit that the affected is an event not
# a TERM
affected_event = self.tree.find("EVENT/[@id='%s']" % affected_id)
if controller_term is None or affected_event is None:
continue
controller_term_type = controller_term.find('type')
# The controller term must be a molecular entity
if controller_term_type is None or \
controller_term_type.text not in molecule_types:
continue
controller_agent = self._get_agent_by_id(controller_id, None)
if controller_agent is None:
continue
affected_event_type = affected_event.find('type')
if affected_event_type is None:
continue
# Construct evidence
ev = self._get_evidence(event)
ev.epistemics['direct'] = False
location = self._get_event_location(affected_event)
if affected_event_type.text == 'ONT::ACTIVATE':
affected = affected_event.find(".//*[@role=':AFFECTED']")
if affected is None:
continue
affected_agent = self._get_agent_by_id(affected.attrib['id'],
affected_id)
if affected_agent is None:
continue
for a1, a2 in _agent_list_product((controller_agent,
affected_agent)):
st = Activation(a1, a2, evidence=[deepcopy(ev)])
_stmt_location_to_agents(st, location)
self.statements.append(st)
elif affected_event_type.text == 'ONT::ACTIVITY':
agent_tag = affected_event.find(".//*[@role=':AGENT']")
if agent_tag is None:
continue
affected_agent = self._get_agent_by_id(agent_tag.attrib['id'],
affected_id)
if affected_agent is None:
continue
for a1, a2 in _agent_list_product((controller_agent,
affected_agent)):
st = Activation(a1, a2, evidence=[deepcopy(ev)])
_stmt_location_to_agents(st, location)
self.statements.append(st) | [
"def",
"get_activations_stimulate",
"(",
"self",
")",
":",
"# TODO: extract to other patterns:",
"# - Stimulation by EGF activates ERK",
"# - Stimulation by EGF leads to ERK activation",
"# Search for stimulation event",
"stim_events",
"=",
"self",
".",
"tree",
".",
"findall",
"(",
"\"EVENT/[type='ONT::STIMULATE']\"",
")",
"for",
"event",
"in",
"stim_events",
":",
"event_id",
"=",
"event",
".",
"attrib",
".",
"get",
"(",
"'id'",
")",
"if",
"event_id",
"in",
"self",
".",
"_static_events",
":",
"continue",
"controller",
"=",
"event",
".",
"find",
"(",
"\"arg1/[@role=':AGENT']\"",
")",
"affected",
"=",
"event",
".",
"find",
"(",
"\"arg2/[@role=':AFFECTED']\"",
")",
"# If either the controller or the affected is missing, skip",
"if",
"controller",
"is",
"None",
"or",
"affected",
"is",
"None",
":",
"continue",
"controller_id",
"=",
"controller",
".",
"attrib",
".",
"get",
"(",
"'id'",
")",
"# Here, implicitly, we require that the controller is a TERM",
"# and not an EVENT",
"controller_term",
"=",
"self",
".",
"tree",
".",
"find",
"(",
"\"TERM/[@id='%s']\"",
"%",
"controller_id",
")",
"affected_id",
"=",
"affected",
".",
"attrib",
".",
"get",
"(",
"'id'",
")",
"# Here it is implicit that the affected is an event not",
"# a TERM",
"affected_event",
"=",
"self",
".",
"tree",
".",
"find",
"(",
"\"EVENT/[@id='%s']\"",
"%",
"affected_id",
")",
"if",
"controller_term",
"is",
"None",
"or",
"affected_event",
"is",
"None",
":",
"continue",
"controller_term_type",
"=",
"controller_term",
".",
"find",
"(",
"'type'",
")",
"# The controller term must be a molecular entity",
"if",
"controller_term_type",
"is",
"None",
"or",
"controller_term_type",
".",
"text",
"not",
"in",
"molecule_types",
":",
"continue",
"controller_agent",
"=",
"self",
".",
"_get_agent_by_id",
"(",
"controller_id",
",",
"None",
")",
"if",
"controller_agent",
"is",
"None",
":",
"continue",
"affected_event_type",
"=",
"affected_event",
".",
"find",
"(",
"'type'",
")",
"if",
"affected_event_type",
"is",
"None",
":",
"continue",
"# Construct evidence",
"ev",
"=",
"self",
".",
"_get_evidence",
"(",
"event",
")",
"ev",
".",
"epistemics",
"[",
"'direct'",
"]",
"=",
"False",
"location",
"=",
"self",
".",
"_get_event_location",
"(",
"affected_event",
")",
"if",
"affected_event_type",
".",
"text",
"==",
"'ONT::ACTIVATE'",
":",
"affected",
"=",
"affected_event",
".",
"find",
"(",
"\".//*[@role=':AFFECTED']\"",
")",
"if",
"affected",
"is",
"None",
":",
"continue",
"affected_agent",
"=",
"self",
".",
"_get_agent_by_id",
"(",
"affected",
".",
"attrib",
"[",
"'id'",
"]",
",",
"affected_id",
")",
"if",
"affected_agent",
"is",
"None",
":",
"continue",
"for",
"a1",
",",
"a2",
"in",
"_agent_list_product",
"(",
"(",
"controller_agent",
",",
"affected_agent",
")",
")",
":",
"st",
"=",
"Activation",
"(",
"a1",
",",
"a2",
",",
"evidence",
"=",
"[",
"deepcopy",
"(",
"ev",
")",
"]",
")",
"_stmt_location_to_agents",
"(",
"st",
",",
"location",
")",
"self",
".",
"statements",
".",
"append",
"(",
"st",
")",
"elif",
"affected_event_type",
".",
"text",
"==",
"'ONT::ACTIVITY'",
":",
"agent_tag",
"=",
"affected_event",
".",
"find",
"(",
"\".//*[@role=':AGENT']\"",
")",
"if",
"agent_tag",
"is",
"None",
":",
"continue",
"affected_agent",
"=",
"self",
".",
"_get_agent_by_id",
"(",
"agent_tag",
".",
"attrib",
"[",
"'id'",
"]",
",",
"affected_id",
")",
"if",
"affected_agent",
"is",
"None",
":",
"continue",
"for",
"a1",
",",
"a2",
"in",
"_agent_list_product",
"(",
"(",
"controller_agent",
",",
"affected_agent",
")",
")",
":",
"st",
"=",
"Activation",
"(",
"a1",
",",
"a2",
",",
"evidence",
"=",
"[",
"deepcopy",
"(",
"ev",
")",
"]",
")",
"_stmt_location_to_agents",
"(",
"st",
",",
"location",
")",
"self",
".",
"statements",
".",
"append",
"(",
"st",
")"
]
| Extract Activation INDRA Statements via stimulation. | [
"Extract",
"Activation",
"INDRA",
"Statements",
"via",
"stimulation",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/processor.py#L237-L303 | train |
sorgerlab/indra | indra/sources/trips/processor.py | TripsProcessor.get_degradations | def get_degradations(self):
"""Extract Degradation INDRA Statements."""
deg_events = self.tree.findall("EVENT/[type='ONT::CONSUME']")
for event in deg_events:
if event.attrib['id'] in self._static_events:
continue
affected = event.find(".//*[@role=':AFFECTED']")
if affected is None:
msg = 'Skipping degradation event with no affected term.'
logger.debug(msg)
continue
# Make sure the degradation is affecting a molecule type
# Temporarily removed for CwC compatibility with no type tag
#affected_type = affected.find('type')
#if affected_type is None or \
# affected_type.text not in molecule_types:
# continue
affected_id = affected.attrib.get('id')
if affected_id is None:
logger.debug(
'Skipping degradation event with missing affected agent')
continue
affected_agent = self._get_agent_by_id(affected_id,
event.attrib['id'])
if affected_agent is None:
logger.debug(
'Skipping degradation event with missing affected agent')
continue
agent = event.find(".//*[@role=':AGENT']")
if agent is None:
agent_agent = None
else:
agent_id = agent.attrib.get('id')
if agent_id is None:
agent_agent = None
else:
agent_agent = self._get_agent_by_id(agent_id,
event.attrib['id'])
ev = self._get_evidence(event)
location = self._get_event_location(event)
for subj, obj in \
_agent_list_product((agent_agent, affected_agent)):
st = DecreaseAmount(subj, obj, evidence=deepcopy(ev))
_stmt_location_to_agents(st, location)
self.statements.append(st)
self._add_extracted(_get_type(event), event.attrib['id']) | python | def get_degradations(self):
"""Extract Degradation INDRA Statements."""
deg_events = self.tree.findall("EVENT/[type='ONT::CONSUME']")
for event in deg_events:
if event.attrib['id'] in self._static_events:
continue
affected = event.find(".//*[@role=':AFFECTED']")
if affected is None:
msg = 'Skipping degradation event with no affected term.'
logger.debug(msg)
continue
# Make sure the degradation is affecting a molecule type
# Temporarily removed for CwC compatibility with no type tag
#affected_type = affected.find('type')
#if affected_type is None or \
# affected_type.text not in molecule_types:
# continue
affected_id = affected.attrib.get('id')
if affected_id is None:
logger.debug(
'Skipping degradation event with missing affected agent')
continue
affected_agent = self._get_agent_by_id(affected_id,
event.attrib['id'])
if affected_agent is None:
logger.debug(
'Skipping degradation event with missing affected agent')
continue
agent = event.find(".//*[@role=':AGENT']")
if agent is None:
agent_agent = None
else:
agent_id = agent.attrib.get('id')
if agent_id is None:
agent_agent = None
else:
agent_agent = self._get_agent_by_id(agent_id,
event.attrib['id'])
ev = self._get_evidence(event)
location = self._get_event_location(event)
for subj, obj in \
_agent_list_product((agent_agent, affected_agent)):
st = DecreaseAmount(subj, obj, evidence=deepcopy(ev))
_stmt_location_to_agents(st, location)
self.statements.append(st)
self._add_extracted(_get_type(event), event.attrib['id']) | [
"def",
"get_degradations",
"(",
"self",
")",
":",
"deg_events",
"=",
"self",
".",
"tree",
".",
"findall",
"(",
"\"EVENT/[type='ONT::CONSUME']\"",
")",
"for",
"event",
"in",
"deg_events",
":",
"if",
"event",
".",
"attrib",
"[",
"'id'",
"]",
"in",
"self",
".",
"_static_events",
":",
"continue",
"affected",
"=",
"event",
".",
"find",
"(",
"\".//*[@role=':AFFECTED']\"",
")",
"if",
"affected",
"is",
"None",
":",
"msg",
"=",
"'Skipping degradation event with no affected term.'",
"logger",
".",
"debug",
"(",
"msg",
")",
"continue",
"# Make sure the degradation is affecting a molecule type",
"# Temporarily removed for CwC compatibility with no type tag",
"#affected_type = affected.find('type')",
"#if affected_type is None or \\",
"# affected_type.text not in molecule_types:",
"# continue",
"affected_id",
"=",
"affected",
".",
"attrib",
".",
"get",
"(",
"'id'",
")",
"if",
"affected_id",
"is",
"None",
":",
"logger",
".",
"debug",
"(",
"'Skipping degradation event with missing affected agent'",
")",
"continue",
"affected_agent",
"=",
"self",
".",
"_get_agent_by_id",
"(",
"affected_id",
",",
"event",
".",
"attrib",
"[",
"'id'",
"]",
")",
"if",
"affected_agent",
"is",
"None",
":",
"logger",
".",
"debug",
"(",
"'Skipping degradation event with missing affected agent'",
")",
"continue",
"agent",
"=",
"event",
".",
"find",
"(",
"\".//*[@role=':AGENT']\"",
")",
"if",
"agent",
"is",
"None",
":",
"agent_agent",
"=",
"None",
"else",
":",
"agent_id",
"=",
"agent",
".",
"attrib",
".",
"get",
"(",
"'id'",
")",
"if",
"agent_id",
"is",
"None",
":",
"agent_agent",
"=",
"None",
"else",
":",
"agent_agent",
"=",
"self",
".",
"_get_agent_by_id",
"(",
"agent_id",
",",
"event",
".",
"attrib",
"[",
"'id'",
"]",
")",
"ev",
"=",
"self",
".",
"_get_evidence",
"(",
"event",
")",
"location",
"=",
"self",
".",
"_get_event_location",
"(",
"event",
")",
"for",
"subj",
",",
"obj",
"in",
"_agent_list_product",
"(",
"(",
"agent_agent",
",",
"affected_agent",
")",
")",
":",
"st",
"=",
"DecreaseAmount",
"(",
"subj",
",",
"obj",
",",
"evidence",
"=",
"deepcopy",
"(",
"ev",
")",
")",
"_stmt_location_to_agents",
"(",
"st",
",",
"location",
")",
"self",
".",
"statements",
".",
"append",
"(",
"st",
")",
"self",
".",
"_add_extracted",
"(",
"_get_type",
"(",
"event",
")",
",",
"event",
".",
"attrib",
"[",
"'id'",
"]",
")"
]
| Extract Degradation INDRA Statements. | [
"Extract",
"Degradation",
"INDRA",
"Statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/processor.py#L305-L354 | train |
sorgerlab/indra | indra/sources/trips/processor.py | TripsProcessor.get_complexes | def get_complexes(self):
"""Extract Complex INDRA Statements."""
bind_events = self.tree.findall("EVENT/[type='ONT::BIND']")
bind_events += self.tree.findall("EVENT/[type='ONT::INTERACT']")
for event in bind_events:
if event.attrib['id'] in self._static_events:
continue
arg1 = event.find("arg1")
arg2 = event.find("arg2")
# EKB-AGENT
if arg1 is None and arg2 is None:
args = list(event.findall('arg'))
if len(args) < 2:
continue
arg1 = args[0]
arg2 = args[1]
if (arg1 is None or arg1.attrib.get('id') is None) or \
(arg2 is None or arg2.attrib.get('id') is None):
logger.debug('Skipping complex with less than 2 members')
continue
agent1 = self._get_agent_by_id(arg1.attrib['id'],
event.attrib['id'])
agent2 = self._get_agent_by_id(arg2.attrib['id'],
event.attrib['id'])
if agent1 is None or agent2 is None:
logger.debug('Skipping complex with less than 2 members')
continue
# Information on binding site is either attached to the agent term
# in a features/site tag or attached to the event itself in
# a site tag
'''
site_feature = self._find_in_term(arg1.attrib['id'], 'features/site')
if site_feature is not None:
sites, positions = self._get_site_by_id(site_id)
print sites, positions
site_feature = self._find_in_term(arg2.attrib['id'], 'features/site')
if site_feature is not None:
sites, positions = self._get_site_by_id(site_id)
print sites, positions
site = event.find("site")
if site is not None:
sites, positions = self._get_site_by_id(site.attrib['id'])
print sites, positions
'''
ev = self._get_evidence(event)
location = self._get_event_location(event)
for a1, a2 in _agent_list_product((agent1, agent2)):
st = Complex([a1, a2], evidence=deepcopy(ev))
_stmt_location_to_agents(st, location)
self.statements.append(st)
self._add_extracted(_get_type(event), event.attrib['id']) | python | def get_complexes(self):
"""Extract Complex INDRA Statements."""
bind_events = self.tree.findall("EVENT/[type='ONT::BIND']")
bind_events += self.tree.findall("EVENT/[type='ONT::INTERACT']")
for event in bind_events:
if event.attrib['id'] in self._static_events:
continue
arg1 = event.find("arg1")
arg2 = event.find("arg2")
# EKB-AGENT
if arg1 is None and arg2 is None:
args = list(event.findall('arg'))
if len(args) < 2:
continue
arg1 = args[0]
arg2 = args[1]
if (arg1 is None or arg1.attrib.get('id') is None) or \
(arg2 is None or arg2.attrib.get('id') is None):
logger.debug('Skipping complex with less than 2 members')
continue
agent1 = self._get_agent_by_id(arg1.attrib['id'],
event.attrib['id'])
agent2 = self._get_agent_by_id(arg2.attrib['id'],
event.attrib['id'])
if agent1 is None or agent2 is None:
logger.debug('Skipping complex with less than 2 members')
continue
# Information on binding site is either attached to the agent term
# in a features/site tag or attached to the event itself in
# a site tag
'''
site_feature = self._find_in_term(arg1.attrib['id'], 'features/site')
if site_feature is not None:
sites, positions = self._get_site_by_id(site_id)
print sites, positions
site_feature = self._find_in_term(arg2.attrib['id'], 'features/site')
if site_feature is not None:
sites, positions = self._get_site_by_id(site_id)
print sites, positions
site = event.find("site")
if site is not None:
sites, positions = self._get_site_by_id(site.attrib['id'])
print sites, positions
'''
ev = self._get_evidence(event)
location = self._get_event_location(event)
for a1, a2 in _agent_list_product((agent1, agent2)):
st = Complex([a1, a2], evidence=deepcopy(ev))
_stmt_location_to_agents(st, location)
self.statements.append(st)
self._add_extracted(_get_type(event), event.attrib['id']) | [
"def",
"get_complexes",
"(",
"self",
")",
":",
"bind_events",
"=",
"self",
".",
"tree",
".",
"findall",
"(",
"\"EVENT/[type='ONT::BIND']\"",
")",
"bind_events",
"+=",
"self",
".",
"tree",
".",
"findall",
"(",
"\"EVENT/[type='ONT::INTERACT']\"",
")",
"for",
"event",
"in",
"bind_events",
":",
"if",
"event",
".",
"attrib",
"[",
"'id'",
"]",
"in",
"self",
".",
"_static_events",
":",
"continue",
"arg1",
"=",
"event",
".",
"find",
"(",
"\"arg1\"",
")",
"arg2",
"=",
"event",
".",
"find",
"(",
"\"arg2\"",
")",
"# EKB-AGENT",
"if",
"arg1",
"is",
"None",
"and",
"arg2",
"is",
"None",
":",
"args",
"=",
"list",
"(",
"event",
".",
"findall",
"(",
"'arg'",
")",
")",
"if",
"len",
"(",
"args",
")",
"<",
"2",
":",
"continue",
"arg1",
"=",
"args",
"[",
"0",
"]",
"arg2",
"=",
"args",
"[",
"1",
"]",
"if",
"(",
"arg1",
"is",
"None",
"or",
"arg1",
".",
"attrib",
".",
"get",
"(",
"'id'",
")",
"is",
"None",
")",
"or",
"(",
"arg2",
"is",
"None",
"or",
"arg2",
".",
"attrib",
".",
"get",
"(",
"'id'",
")",
"is",
"None",
")",
":",
"logger",
".",
"debug",
"(",
"'Skipping complex with less than 2 members'",
")",
"continue",
"agent1",
"=",
"self",
".",
"_get_agent_by_id",
"(",
"arg1",
".",
"attrib",
"[",
"'id'",
"]",
",",
"event",
".",
"attrib",
"[",
"'id'",
"]",
")",
"agent2",
"=",
"self",
".",
"_get_agent_by_id",
"(",
"arg2",
".",
"attrib",
"[",
"'id'",
"]",
",",
"event",
".",
"attrib",
"[",
"'id'",
"]",
")",
"if",
"agent1",
"is",
"None",
"or",
"agent2",
"is",
"None",
":",
"logger",
".",
"debug",
"(",
"'Skipping complex with less than 2 members'",
")",
"continue",
"# Information on binding site is either attached to the agent term",
"# in a features/site tag or attached to the event itself in",
"# a site tag",
"'''\n site_feature = self._find_in_term(arg1.attrib['id'], 'features/site')\n if site_feature is not None:\n sites, positions = self._get_site_by_id(site_id)\n print sites, positions\n\n site_feature = self._find_in_term(arg2.attrib['id'], 'features/site')\n if site_feature is not None:\n sites, positions = self._get_site_by_id(site_id)\n print sites, positions\n\n site = event.find(\"site\")\n if site is not None:\n sites, positions = self._get_site_by_id(site.attrib['id'])\n print sites, positions\n '''",
"ev",
"=",
"self",
".",
"_get_evidence",
"(",
"event",
")",
"location",
"=",
"self",
".",
"_get_event_location",
"(",
"event",
")",
"for",
"a1",
",",
"a2",
"in",
"_agent_list_product",
"(",
"(",
"agent1",
",",
"agent2",
")",
")",
":",
"st",
"=",
"Complex",
"(",
"[",
"a1",
",",
"a2",
"]",
",",
"evidence",
"=",
"deepcopy",
"(",
"ev",
")",
")",
"_stmt_location_to_agents",
"(",
"st",
",",
"location",
")",
"self",
".",
"statements",
".",
"append",
"(",
"st",
")",
"self",
".",
"_add_extracted",
"(",
"_get_type",
"(",
"event",
")",
",",
"event",
".",
"attrib",
"[",
"'id'",
"]",
")"
]
| Extract Complex INDRA Statements. | [
"Extract",
"Complex",
"INDRA",
"Statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/processor.py#L637-L693 | train |
sorgerlab/indra | indra/sources/trips/processor.py | TripsProcessor.get_modifications | def get_modifications(self):
"""Extract all types of Modification INDRA Statements."""
# Get all the specific mod types
mod_event_types = list(ont_to_mod_type.keys())
# Add ONT::PTMs as a special case
mod_event_types += ['ONT::PTM']
mod_events = []
for mod_event_type in mod_event_types:
events = self.tree.findall("EVENT/[type='%s']" % mod_event_type)
mod_extracted = self.extracted_events.get(mod_event_type, [])
for event in events:
event_id = event.attrib.get('id')
if event_id not in mod_extracted:
mod_events.append(event)
# Iterate over all modification events
for event in mod_events:
stmts = self._get_modification_event(event)
if stmts:
for stmt in stmts:
self.statements.append(stmt) | python | def get_modifications(self):
"""Extract all types of Modification INDRA Statements."""
# Get all the specific mod types
mod_event_types = list(ont_to_mod_type.keys())
# Add ONT::PTMs as a special case
mod_event_types += ['ONT::PTM']
mod_events = []
for mod_event_type in mod_event_types:
events = self.tree.findall("EVENT/[type='%s']" % mod_event_type)
mod_extracted = self.extracted_events.get(mod_event_type, [])
for event in events:
event_id = event.attrib.get('id')
if event_id not in mod_extracted:
mod_events.append(event)
# Iterate over all modification events
for event in mod_events:
stmts = self._get_modification_event(event)
if stmts:
for stmt in stmts:
self.statements.append(stmt) | [
"def",
"get_modifications",
"(",
"self",
")",
":",
"# Get all the specific mod types",
"mod_event_types",
"=",
"list",
"(",
"ont_to_mod_type",
".",
"keys",
"(",
")",
")",
"# Add ONT::PTMs as a special case",
"mod_event_types",
"+=",
"[",
"'ONT::PTM'",
"]",
"mod_events",
"=",
"[",
"]",
"for",
"mod_event_type",
"in",
"mod_event_types",
":",
"events",
"=",
"self",
".",
"tree",
".",
"findall",
"(",
"\"EVENT/[type='%s']\"",
"%",
"mod_event_type",
")",
"mod_extracted",
"=",
"self",
".",
"extracted_events",
".",
"get",
"(",
"mod_event_type",
",",
"[",
"]",
")",
"for",
"event",
"in",
"events",
":",
"event_id",
"=",
"event",
".",
"attrib",
".",
"get",
"(",
"'id'",
")",
"if",
"event_id",
"not",
"in",
"mod_extracted",
":",
"mod_events",
".",
"append",
"(",
"event",
")",
"# Iterate over all modification events",
"for",
"event",
"in",
"mod_events",
":",
"stmts",
"=",
"self",
".",
"_get_modification_event",
"(",
"event",
")",
"if",
"stmts",
":",
"for",
"stmt",
"in",
"stmts",
":",
"self",
".",
"statements",
".",
"append",
"(",
"stmt",
")"
]
| Extract all types of Modification INDRA Statements. | [
"Extract",
"all",
"types",
"of",
"Modification",
"INDRA",
"Statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/processor.py#L695-L715 | train |
sorgerlab/indra | indra/sources/trips/processor.py | TripsProcessor.get_modifications_indirect | def get_modifications_indirect(self):
"""Extract indirect Modification INDRA Statements."""
# Get all the specific mod types
mod_event_types = list(ont_to_mod_type.keys())
# Add ONT::PTMs as a special case
mod_event_types += ['ONT::PTM']
def get_increase_events(mod_event_types):
mod_events = []
events = self.tree.findall("EVENT/[type='ONT::INCREASE']")
for event in events:
affected = event.find(".//*[@role=':AFFECTED']")
if affected is None:
continue
affected_id = affected.attrib.get('id')
if not affected_id:
continue
pattern = "EVENT/[@id='%s']" % affected_id
affected_event = self.tree.find(pattern)
if affected_event is not None:
affected_type = affected_event.find('type')
if affected_type is not None and \
affected_type.text in mod_event_types:
mod_events.append(event)
return mod_events
def get_cause_events(mod_event_types):
mod_events = []
ccs = self.tree.findall("CC/[type='ONT::CAUSE']")
for cc in ccs:
outcome = cc.find(".//*[@role=':OUTCOME']")
if outcome is None:
continue
outcome_id = outcome.attrib.get('id')
if not outcome_id:
continue
pattern = "EVENT/[@id='%s']" % outcome_id
outcome_event = self.tree.find(pattern)
if outcome_event is not None:
outcome_type = outcome_event.find('type')
if outcome_type is not None and \
outcome_type.text in mod_event_types:
mod_events.append(cc)
return mod_events
mod_events = get_increase_events(mod_event_types)
mod_events += get_cause_events(mod_event_types)
# Iterate over all modification events
for event in mod_events:
event_id = event.attrib['id']
if event_id in self._static_events:
continue
event_type = _get_type(event)
# Get enzyme Agent
enzyme = event.find(".//*[@role=':AGENT']")
if enzyme is None:
enzyme = event.find(".//*[@role=':FACTOR']")
if enzyme is None:
return
enzyme_id = enzyme.attrib.get('id')
if enzyme_id is None:
continue
enzyme_agent = self._get_agent_by_id(enzyme_id, event_id)
affected_event_tag = event.find(".//*[@role=':AFFECTED']")
if affected_event_tag is None:
affected_event_tag = event.find(".//*[@role=':OUTCOME']")
if affected_event_tag is None:
return
affected_id = affected_event_tag.attrib.get('id')
if not affected_id:
return
affected_event = self.tree.find("EVENT/[@id='%s']" % affected_id)
if affected_event is None:
return
# Iterate over all enzyme agents if there are multiple ones
for enz_t in _agent_list_product((enzyme_agent, )):
# enz_t comes out as a tuple so we need to take the first
# element here
enz = enz_t[0]
# Note that we re-run the extraction code here potentially
# multiple times. This is mainly to make sure each Statement
# object created here is independent (i.e. has different UUIDs)
# without having to manipulate it after creation.
stmts = self._get_modification_event(affected_event)
stmts_to_make = []
if stmts:
for stmt in stmts:
# The affected event should have no enzyme but should
# have a substrate
if stmt.enz is None and stmt.sub is not None:
stmts_to_make.append(stmt)
for stmt in stmts_to_make:
stmt.enz = enz
for ev in stmt.evidence:
ev.epistemics['direct'] = False
self.statements.append(stmt)
self._add_extracted(event_type, event.attrib['id'])
self._add_extracted(affected_event.find('type').text, affected_id) | python | def get_modifications_indirect(self):
"""Extract indirect Modification INDRA Statements."""
# Get all the specific mod types
mod_event_types = list(ont_to_mod_type.keys())
# Add ONT::PTMs as a special case
mod_event_types += ['ONT::PTM']
def get_increase_events(mod_event_types):
mod_events = []
events = self.tree.findall("EVENT/[type='ONT::INCREASE']")
for event in events:
affected = event.find(".//*[@role=':AFFECTED']")
if affected is None:
continue
affected_id = affected.attrib.get('id')
if not affected_id:
continue
pattern = "EVENT/[@id='%s']" % affected_id
affected_event = self.tree.find(pattern)
if affected_event is not None:
affected_type = affected_event.find('type')
if affected_type is not None and \
affected_type.text in mod_event_types:
mod_events.append(event)
return mod_events
def get_cause_events(mod_event_types):
mod_events = []
ccs = self.tree.findall("CC/[type='ONT::CAUSE']")
for cc in ccs:
outcome = cc.find(".//*[@role=':OUTCOME']")
if outcome is None:
continue
outcome_id = outcome.attrib.get('id')
if not outcome_id:
continue
pattern = "EVENT/[@id='%s']" % outcome_id
outcome_event = self.tree.find(pattern)
if outcome_event is not None:
outcome_type = outcome_event.find('type')
if outcome_type is not None and \
outcome_type.text in mod_event_types:
mod_events.append(cc)
return mod_events
mod_events = get_increase_events(mod_event_types)
mod_events += get_cause_events(mod_event_types)
# Iterate over all modification events
for event in mod_events:
event_id = event.attrib['id']
if event_id in self._static_events:
continue
event_type = _get_type(event)
# Get enzyme Agent
enzyme = event.find(".//*[@role=':AGENT']")
if enzyme is None:
enzyme = event.find(".//*[@role=':FACTOR']")
if enzyme is None:
return
enzyme_id = enzyme.attrib.get('id')
if enzyme_id is None:
continue
enzyme_agent = self._get_agent_by_id(enzyme_id, event_id)
affected_event_tag = event.find(".//*[@role=':AFFECTED']")
if affected_event_tag is None:
affected_event_tag = event.find(".//*[@role=':OUTCOME']")
if affected_event_tag is None:
return
affected_id = affected_event_tag.attrib.get('id')
if not affected_id:
return
affected_event = self.tree.find("EVENT/[@id='%s']" % affected_id)
if affected_event is None:
return
# Iterate over all enzyme agents if there are multiple ones
for enz_t in _agent_list_product((enzyme_agent, )):
# enz_t comes out as a tuple so we need to take the first
# element here
enz = enz_t[0]
# Note that we re-run the extraction code here potentially
# multiple times. This is mainly to make sure each Statement
# object created here is independent (i.e. has different UUIDs)
# without having to manipulate it after creation.
stmts = self._get_modification_event(affected_event)
stmts_to_make = []
if stmts:
for stmt in stmts:
# The affected event should have no enzyme but should
# have a substrate
if stmt.enz is None and stmt.sub is not None:
stmts_to_make.append(stmt)
for stmt in stmts_to_make:
stmt.enz = enz
for ev in stmt.evidence:
ev.epistemics['direct'] = False
self.statements.append(stmt)
self._add_extracted(event_type, event.attrib['id'])
self._add_extracted(affected_event.find('type').text, affected_id) | [
"def",
"get_modifications_indirect",
"(",
"self",
")",
":",
"# Get all the specific mod types",
"mod_event_types",
"=",
"list",
"(",
"ont_to_mod_type",
".",
"keys",
"(",
")",
")",
"# Add ONT::PTMs as a special case",
"mod_event_types",
"+=",
"[",
"'ONT::PTM'",
"]",
"def",
"get_increase_events",
"(",
"mod_event_types",
")",
":",
"mod_events",
"=",
"[",
"]",
"events",
"=",
"self",
".",
"tree",
".",
"findall",
"(",
"\"EVENT/[type='ONT::INCREASE']\"",
")",
"for",
"event",
"in",
"events",
":",
"affected",
"=",
"event",
".",
"find",
"(",
"\".//*[@role=':AFFECTED']\"",
")",
"if",
"affected",
"is",
"None",
":",
"continue",
"affected_id",
"=",
"affected",
".",
"attrib",
".",
"get",
"(",
"'id'",
")",
"if",
"not",
"affected_id",
":",
"continue",
"pattern",
"=",
"\"EVENT/[@id='%s']\"",
"%",
"affected_id",
"affected_event",
"=",
"self",
".",
"tree",
".",
"find",
"(",
"pattern",
")",
"if",
"affected_event",
"is",
"not",
"None",
":",
"affected_type",
"=",
"affected_event",
".",
"find",
"(",
"'type'",
")",
"if",
"affected_type",
"is",
"not",
"None",
"and",
"affected_type",
".",
"text",
"in",
"mod_event_types",
":",
"mod_events",
".",
"append",
"(",
"event",
")",
"return",
"mod_events",
"def",
"get_cause_events",
"(",
"mod_event_types",
")",
":",
"mod_events",
"=",
"[",
"]",
"ccs",
"=",
"self",
".",
"tree",
".",
"findall",
"(",
"\"CC/[type='ONT::CAUSE']\"",
")",
"for",
"cc",
"in",
"ccs",
":",
"outcome",
"=",
"cc",
".",
"find",
"(",
"\".//*[@role=':OUTCOME']\"",
")",
"if",
"outcome",
"is",
"None",
":",
"continue",
"outcome_id",
"=",
"outcome",
".",
"attrib",
".",
"get",
"(",
"'id'",
")",
"if",
"not",
"outcome_id",
":",
"continue",
"pattern",
"=",
"\"EVENT/[@id='%s']\"",
"%",
"outcome_id",
"outcome_event",
"=",
"self",
".",
"tree",
".",
"find",
"(",
"pattern",
")",
"if",
"outcome_event",
"is",
"not",
"None",
":",
"outcome_type",
"=",
"outcome_event",
".",
"find",
"(",
"'type'",
")",
"if",
"outcome_type",
"is",
"not",
"None",
"and",
"outcome_type",
".",
"text",
"in",
"mod_event_types",
":",
"mod_events",
".",
"append",
"(",
"cc",
")",
"return",
"mod_events",
"mod_events",
"=",
"get_increase_events",
"(",
"mod_event_types",
")",
"mod_events",
"+=",
"get_cause_events",
"(",
"mod_event_types",
")",
"# Iterate over all modification events",
"for",
"event",
"in",
"mod_events",
":",
"event_id",
"=",
"event",
".",
"attrib",
"[",
"'id'",
"]",
"if",
"event_id",
"in",
"self",
".",
"_static_events",
":",
"continue",
"event_type",
"=",
"_get_type",
"(",
"event",
")",
"# Get enzyme Agent",
"enzyme",
"=",
"event",
".",
"find",
"(",
"\".//*[@role=':AGENT']\"",
")",
"if",
"enzyme",
"is",
"None",
":",
"enzyme",
"=",
"event",
".",
"find",
"(",
"\".//*[@role=':FACTOR']\"",
")",
"if",
"enzyme",
"is",
"None",
":",
"return",
"enzyme_id",
"=",
"enzyme",
".",
"attrib",
".",
"get",
"(",
"'id'",
")",
"if",
"enzyme_id",
"is",
"None",
":",
"continue",
"enzyme_agent",
"=",
"self",
".",
"_get_agent_by_id",
"(",
"enzyme_id",
",",
"event_id",
")",
"affected_event_tag",
"=",
"event",
".",
"find",
"(",
"\".//*[@role=':AFFECTED']\"",
")",
"if",
"affected_event_tag",
"is",
"None",
":",
"affected_event_tag",
"=",
"event",
".",
"find",
"(",
"\".//*[@role=':OUTCOME']\"",
")",
"if",
"affected_event_tag",
"is",
"None",
":",
"return",
"affected_id",
"=",
"affected_event_tag",
".",
"attrib",
".",
"get",
"(",
"'id'",
")",
"if",
"not",
"affected_id",
":",
"return",
"affected_event",
"=",
"self",
".",
"tree",
".",
"find",
"(",
"\"EVENT/[@id='%s']\"",
"%",
"affected_id",
")",
"if",
"affected_event",
"is",
"None",
":",
"return",
"# Iterate over all enzyme agents if there are multiple ones",
"for",
"enz_t",
"in",
"_agent_list_product",
"(",
"(",
"enzyme_agent",
",",
")",
")",
":",
"# enz_t comes out as a tuple so we need to take the first",
"# element here",
"enz",
"=",
"enz_t",
"[",
"0",
"]",
"# Note that we re-run the extraction code here potentially",
"# multiple times. This is mainly to make sure each Statement",
"# object created here is independent (i.e. has different UUIDs)",
"# without having to manipulate it after creation.",
"stmts",
"=",
"self",
".",
"_get_modification_event",
"(",
"affected_event",
")",
"stmts_to_make",
"=",
"[",
"]",
"if",
"stmts",
":",
"for",
"stmt",
"in",
"stmts",
":",
"# The affected event should have no enzyme but should",
"# have a substrate",
"if",
"stmt",
".",
"enz",
"is",
"None",
"and",
"stmt",
".",
"sub",
"is",
"not",
"None",
":",
"stmts_to_make",
".",
"append",
"(",
"stmt",
")",
"for",
"stmt",
"in",
"stmts_to_make",
":",
"stmt",
".",
"enz",
"=",
"enz",
"for",
"ev",
"in",
"stmt",
".",
"evidence",
":",
"ev",
".",
"epistemics",
"[",
"'direct'",
"]",
"=",
"False",
"self",
".",
"statements",
".",
"append",
"(",
"stmt",
")",
"self",
".",
"_add_extracted",
"(",
"event_type",
",",
"event",
".",
"attrib",
"[",
"'id'",
"]",
")",
"self",
".",
"_add_extracted",
"(",
"affected_event",
".",
"find",
"(",
"'type'",
")",
".",
"text",
",",
"affected_id",
")"
]
| Extract indirect Modification INDRA Statements. | [
"Extract",
"indirect",
"Modification",
"INDRA",
"Statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/processor.py#L717-L821 | train |
sorgerlab/indra | indra/sources/trips/processor.py | TripsProcessor.get_agents | def get_agents(self):
"""Return list of INDRA Agents corresponding to TERMs in the EKB.
This is meant to be used when entities e.g. "phosphorylated ERK",
rather than events need to be extracted from processed natural
language. These entities with their respective states are represented
as INDRA Agents.
Returns
-------
agents : list[indra.statements.Agent]
List of INDRA Agents extracted from EKB.
"""
agents_dict = self.get_term_agents()
agents = [a for a in agents_dict.values() if a is not None]
return agents | python | def get_agents(self):
"""Return list of INDRA Agents corresponding to TERMs in the EKB.
This is meant to be used when entities e.g. "phosphorylated ERK",
rather than events need to be extracted from processed natural
language. These entities with their respective states are represented
as INDRA Agents.
Returns
-------
agents : list[indra.statements.Agent]
List of INDRA Agents extracted from EKB.
"""
agents_dict = self.get_term_agents()
agents = [a for a in agents_dict.values() if a is not None]
return agents | [
"def",
"get_agents",
"(",
"self",
")",
":",
"agents_dict",
"=",
"self",
".",
"get_term_agents",
"(",
")",
"agents",
"=",
"[",
"a",
"for",
"a",
"in",
"agents_dict",
".",
"values",
"(",
")",
"if",
"a",
"is",
"not",
"None",
"]",
"return",
"agents"
]
| Return list of INDRA Agents corresponding to TERMs in the EKB.
This is meant to be used when entities e.g. "phosphorylated ERK",
rather than events need to be extracted from processed natural
language. These entities with their respective states are represented
as INDRA Agents.
Returns
-------
agents : list[indra.statements.Agent]
List of INDRA Agents extracted from EKB. | [
"Return",
"list",
"of",
"INDRA",
"Agents",
"corresponding",
"to",
"TERMs",
"in",
"the",
"EKB",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/processor.py#L1059-L1074 | train |
sorgerlab/indra | indra/sources/trips/processor.py | TripsProcessor.get_term_agents | def get_term_agents(self):
"""Return dict of INDRA Agents keyed by corresponding TERMs in the EKB.
This is meant to be used when entities e.g. "phosphorylated ERK",
rather than events need to be extracted from processed natural
language. These entities with their respective states are represented
as INDRA Agents. Further, each key of the dictionary corresponds to
the ID assigned by TRIPS to the given TERM that the Agent was
extracted from.
Returns
-------
agents : dict[str, indra.statements.Agent]
Dict of INDRA Agents extracted from EKB.
"""
terms = self.tree.findall('TERM')
agents = {}
assoc_links = []
for term in terms:
term_id = term.attrib.get('id')
if term_id:
agent = self._get_agent_by_id(term_id, None)
agents[term_id] = agent
# Handle assoc-with links
aw = term.find('assoc-with')
if aw is not None:
aw_id = aw.attrib.get('id')
if aw_id:
assoc_links.append((term_id, aw_id))
# We only keep the target end of assoc with links if both
# source and target are in the list
for source, target in assoc_links:
if target in agents and source in agents:
agents.pop(source)
return agents | python | def get_term_agents(self):
"""Return dict of INDRA Agents keyed by corresponding TERMs in the EKB.
This is meant to be used when entities e.g. "phosphorylated ERK",
rather than events need to be extracted from processed natural
language. These entities with their respective states are represented
as INDRA Agents. Further, each key of the dictionary corresponds to
the ID assigned by TRIPS to the given TERM that the Agent was
extracted from.
Returns
-------
agents : dict[str, indra.statements.Agent]
Dict of INDRA Agents extracted from EKB.
"""
terms = self.tree.findall('TERM')
agents = {}
assoc_links = []
for term in terms:
term_id = term.attrib.get('id')
if term_id:
agent = self._get_agent_by_id(term_id, None)
agents[term_id] = agent
# Handle assoc-with links
aw = term.find('assoc-with')
if aw is not None:
aw_id = aw.attrib.get('id')
if aw_id:
assoc_links.append((term_id, aw_id))
# We only keep the target end of assoc with links if both
# source and target are in the list
for source, target in assoc_links:
if target in agents and source in agents:
agents.pop(source)
return agents | [
"def",
"get_term_agents",
"(",
"self",
")",
":",
"terms",
"=",
"self",
".",
"tree",
".",
"findall",
"(",
"'TERM'",
")",
"agents",
"=",
"{",
"}",
"assoc_links",
"=",
"[",
"]",
"for",
"term",
"in",
"terms",
":",
"term_id",
"=",
"term",
".",
"attrib",
".",
"get",
"(",
"'id'",
")",
"if",
"term_id",
":",
"agent",
"=",
"self",
".",
"_get_agent_by_id",
"(",
"term_id",
",",
"None",
")",
"agents",
"[",
"term_id",
"]",
"=",
"agent",
"# Handle assoc-with links",
"aw",
"=",
"term",
".",
"find",
"(",
"'assoc-with'",
")",
"if",
"aw",
"is",
"not",
"None",
":",
"aw_id",
"=",
"aw",
".",
"attrib",
".",
"get",
"(",
"'id'",
")",
"if",
"aw_id",
":",
"assoc_links",
".",
"append",
"(",
"(",
"term_id",
",",
"aw_id",
")",
")",
"# We only keep the target end of assoc with links if both",
"# source and target are in the list",
"for",
"source",
",",
"target",
"in",
"assoc_links",
":",
"if",
"target",
"in",
"agents",
"and",
"source",
"in",
"agents",
":",
"agents",
".",
"pop",
"(",
"source",
")",
"return",
"agents"
]
| Return dict of INDRA Agents keyed by corresponding TERMs in the EKB.
This is meant to be used when entities e.g. "phosphorylated ERK",
rather than events need to be extracted from processed natural
language. These entities with their respective states are represented
as INDRA Agents. Further, each key of the dictionary corresponds to
the ID assigned by TRIPS to the given TERM that the Agent was
extracted from.
Returns
-------
agents : dict[str, indra.statements.Agent]
Dict of INDRA Agents extracted from EKB. | [
"Return",
"dict",
"of",
"INDRA",
"Agents",
"keyed",
"by",
"corresponding",
"TERMs",
"in",
"the",
"EKB",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/processor.py#L1076-L1110 | train |
sorgerlab/indra | indra/sources/trips/processor.py | TripsProcessor._get_evidence_text | def _get_evidence_text(self, event_tag):
"""Extract the evidence for an event.
Pieces of text linked to an EVENT are fragments of a sentence. The
EVENT refers to the paragraph ID and the "uttnum", which corresponds
to a sentence ID. Here we find and return the full sentence from which
the event was taken.
"""
par_id = event_tag.attrib.get('paragraph')
uttnum = event_tag.attrib.get('uttnum')
event_text = event_tag.find('text')
if self.sentences is not None and uttnum is not None:
sentence = self.sentences[uttnum]
elif event_text is not None:
sentence = event_text.text
else:
sentence = None
return sentence | python | def _get_evidence_text(self, event_tag):
"""Extract the evidence for an event.
Pieces of text linked to an EVENT are fragments of a sentence. The
EVENT refers to the paragraph ID and the "uttnum", which corresponds
to a sentence ID. Here we find and return the full sentence from which
the event was taken.
"""
par_id = event_tag.attrib.get('paragraph')
uttnum = event_tag.attrib.get('uttnum')
event_text = event_tag.find('text')
if self.sentences is not None and uttnum is not None:
sentence = self.sentences[uttnum]
elif event_text is not None:
sentence = event_text.text
else:
sentence = None
return sentence | [
"def",
"_get_evidence_text",
"(",
"self",
",",
"event_tag",
")",
":",
"par_id",
"=",
"event_tag",
".",
"attrib",
".",
"get",
"(",
"'paragraph'",
")",
"uttnum",
"=",
"event_tag",
".",
"attrib",
".",
"get",
"(",
"'uttnum'",
")",
"event_text",
"=",
"event_tag",
".",
"find",
"(",
"'text'",
")",
"if",
"self",
".",
"sentences",
"is",
"not",
"None",
"and",
"uttnum",
"is",
"not",
"None",
":",
"sentence",
"=",
"self",
".",
"sentences",
"[",
"uttnum",
"]",
"elif",
"event_text",
"is",
"not",
"None",
":",
"sentence",
"=",
"event_text",
".",
"text",
"else",
":",
"sentence",
"=",
"None",
"return",
"sentence"
]
| Extract the evidence for an event.
Pieces of text linked to an EVENT are fragments of a sentence. The
EVENT refers to the paragraph ID and the "uttnum", which corresponds
to a sentence ID. Here we find and return the full sentence from which
the event was taken. | [
"Extract",
"the",
"evidence",
"for",
"an",
"event",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/processor.py#L1596-L1613 | train |
sorgerlab/indra | indra/assemblers/pybel/assembler.py | get_causal_edge | def get_causal_edge(stmt, activates):
"""Returns the causal, polar edge with the correct "contact"."""
any_contact = any(
evidence.epistemics.get('direct', False)
for evidence in stmt.evidence
)
if any_contact:
return pc.DIRECTLY_INCREASES if activates else pc.DIRECTLY_DECREASES
return pc.INCREASES if activates else pc.DECREASES | python | def get_causal_edge(stmt, activates):
"""Returns the causal, polar edge with the correct "contact"."""
any_contact = any(
evidence.epistemics.get('direct', False)
for evidence in stmt.evidence
)
if any_contact:
return pc.DIRECTLY_INCREASES if activates else pc.DIRECTLY_DECREASES
return pc.INCREASES if activates else pc.DECREASES | [
"def",
"get_causal_edge",
"(",
"stmt",
",",
"activates",
")",
":",
"any_contact",
"=",
"any",
"(",
"evidence",
".",
"epistemics",
".",
"get",
"(",
"'direct'",
",",
"False",
")",
"for",
"evidence",
"in",
"stmt",
".",
"evidence",
")",
"if",
"any_contact",
":",
"return",
"pc",
".",
"DIRECTLY_INCREASES",
"if",
"activates",
"else",
"pc",
".",
"DIRECTLY_DECREASES",
"return",
"pc",
".",
"INCREASES",
"if",
"activates",
"else",
"pc",
".",
"DECREASES"
]
| Returns the causal, polar edge with the correct "contact". | [
"Returns",
"the",
"causal",
"polar",
"edge",
"with",
"the",
"correct",
"contact",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pybel/assembler.py#L568-L577 | train |
sorgerlab/indra | indra/assemblers/pybel/assembler.py | PybelAssembler.to_database | def to_database(self, manager=None):
"""Send the model to the PyBEL database
This function wraps :py:func:`pybel.to_database`.
Parameters
----------
manager : Optional[pybel.manager.Manager]
A PyBEL database manager. If none, first checks the PyBEL
configuration for ``PYBEL_CONNECTION`` then checks the
environment variable ``PYBEL_REMOTE_HOST``. Finally,
defaults to using SQLite database in PyBEL data directory
(automatically configured by PyBEL)
Returns
-------
network : Optional[pybel.manager.models.Network]
The SQLAlchemy model representing the network that was uploaded.
Returns None if upload fails.
"""
network = pybel.to_database(self.model, manager=manager)
return network | python | def to_database(self, manager=None):
"""Send the model to the PyBEL database
This function wraps :py:func:`pybel.to_database`.
Parameters
----------
manager : Optional[pybel.manager.Manager]
A PyBEL database manager. If none, first checks the PyBEL
configuration for ``PYBEL_CONNECTION`` then checks the
environment variable ``PYBEL_REMOTE_HOST``. Finally,
defaults to using SQLite database in PyBEL data directory
(automatically configured by PyBEL)
Returns
-------
network : Optional[pybel.manager.models.Network]
The SQLAlchemy model representing the network that was uploaded.
Returns None if upload fails.
"""
network = pybel.to_database(self.model, manager=manager)
return network | [
"def",
"to_database",
"(",
"self",
",",
"manager",
"=",
"None",
")",
":",
"network",
"=",
"pybel",
".",
"to_database",
"(",
"self",
".",
"model",
",",
"manager",
"=",
"manager",
")",
"return",
"network"
]
| Send the model to the PyBEL database
This function wraps :py:func:`pybel.to_database`.
Parameters
----------
manager : Optional[pybel.manager.Manager]
A PyBEL database manager. If none, first checks the PyBEL
configuration for ``PYBEL_CONNECTION`` then checks the
environment variable ``PYBEL_REMOTE_HOST``. Finally,
defaults to using SQLite database in PyBEL data directory
(automatically configured by PyBEL)
Returns
-------
network : Optional[pybel.manager.models.Network]
The SQLAlchemy model representing the network that was uploaded.
Returns None if upload fails. | [
"Send",
"the",
"model",
"to",
"the",
"PyBEL",
"database"
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pybel/assembler.py#L149-L170 | train |
sorgerlab/indra | indra/assemblers/pysb/sites.py | get_binding_site_name | def get_binding_site_name(agent):
"""Return a binding site name from a given agent."""
# Try to construct a binding site name based on parent
grounding = agent.get_grounding()
if grounding != (None, None):
uri = hierarchies['entity'].get_uri(grounding[0], grounding[1])
# Get highest level parents in hierarchy
parents = hierarchies['entity'].get_parents(uri, 'top')
if parents:
# Choose the first parent if there are more than one
parent_uri = sorted(parents)[0]
parent_agent = _agent_from_uri(parent_uri)
binding_site = _n(parent_agent.name).lower()
return binding_site
# Fall back to Agent's own name if one from parent can't be constructed
binding_site = _n(agent.name).lower()
return binding_site | python | def get_binding_site_name(agent):
"""Return a binding site name from a given agent."""
# Try to construct a binding site name based on parent
grounding = agent.get_grounding()
if grounding != (None, None):
uri = hierarchies['entity'].get_uri(grounding[0], grounding[1])
# Get highest level parents in hierarchy
parents = hierarchies['entity'].get_parents(uri, 'top')
if parents:
# Choose the first parent if there are more than one
parent_uri = sorted(parents)[0]
parent_agent = _agent_from_uri(parent_uri)
binding_site = _n(parent_agent.name).lower()
return binding_site
# Fall back to Agent's own name if one from parent can't be constructed
binding_site = _n(agent.name).lower()
return binding_site | [
"def",
"get_binding_site_name",
"(",
"agent",
")",
":",
"# Try to construct a binding site name based on parent",
"grounding",
"=",
"agent",
".",
"get_grounding",
"(",
")",
"if",
"grounding",
"!=",
"(",
"None",
",",
"None",
")",
":",
"uri",
"=",
"hierarchies",
"[",
"'entity'",
"]",
".",
"get_uri",
"(",
"grounding",
"[",
"0",
"]",
",",
"grounding",
"[",
"1",
"]",
")",
"# Get highest level parents in hierarchy",
"parents",
"=",
"hierarchies",
"[",
"'entity'",
"]",
".",
"get_parents",
"(",
"uri",
",",
"'top'",
")",
"if",
"parents",
":",
"# Choose the first parent if there are more than one",
"parent_uri",
"=",
"sorted",
"(",
"parents",
")",
"[",
"0",
"]",
"parent_agent",
"=",
"_agent_from_uri",
"(",
"parent_uri",
")",
"binding_site",
"=",
"_n",
"(",
"parent_agent",
".",
"name",
")",
".",
"lower",
"(",
")",
"return",
"binding_site",
"# Fall back to Agent's own name if one from parent can't be constructed",
"binding_site",
"=",
"_n",
"(",
"agent",
".",
"name",
")",
".",
"lower",
"(",
")",
"return",
"binding_site"
]
| Return a binding site name from a given agent. | [
"Return",
"a",
"binding",
"site",
"name",
"from",
"a",
"given",
"agent",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/sites.py#L68-L84 | train |
sorgerlab/indra | indra/assemblers/pysb/sites.py | get_mod_site_name | def get_mod_site_name(mod_condition):
"""Return site names for a modification."""
if mod_condition.residue is None:
mod_str = abbrevs[mod_condition.mod_type]
else:
mod_str = mod_condition.residue
mod_pos = mod_condition.position if \
mod_condition.position is not None else ''
name = ('%s%s' % (mod_str, mod_pos))
return name | python | def get_mod_site_name(mod_condition):
"""Return site names for a modification."""
if mod_condition.residue is None:
mod_str = abbrevs[mod_condition.mod_type]
else:
mod_str = mod_condition.residue
mod_pos = mod_condition.position if \
mod_condition.position is not None else ''
name = ('%s%s' % (mod_str, mod_pos))
return name | [
"def",
"get_mod_site_name",
"(",
"mod_condition",
")",
":",
"if",
"mod_condition",
".",
"residue",
"is",
"None",
":",
"mod_str",
"=",
"abbrevs",
"[",
"mod_condition",
".",
"mod_type",
"]",
"else",
":",
"mod_str",
"=",
"mod_condition",
".",
"residue",
"mod_pos",
"=",
"mod_condition",
".",
"position",
"if",
"mod_condition",
".",
"position",
"is",
"not",
"None",
"else",
"''",
"name",
"=",
"(",
"'%s%s'",
"%",
"(",
"mod_str",
",",
"mod_pos",
")",
")",
"return",
"name"
]
| Return site names for a modification. | [
"Return",
"site",
"names",
"for",
"a",
"modification",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/sites.py#L87-L96 | train |
sorgerlab/indra | indra/sources/hprd/api.py | process_flat_files | def process_flat_files(id_mappings_file, complexes_file=None, ptm_file=None,
ppi_file=None, seq_file=None, motif_window=7):
"""Get INDRA Statements from HPRD data.
Of the arguments, `id_mappings_file` is required, and at least one of
`complexes_file`, `ptm_file`, and `ppi_file` must also be given. If
`ptm_file` is given, `seq_file` must also be given.
Note that many proteins (> 1,600) in the HPRD content are associated with
outdated RefSeq IDs that cannot be mapped to Uniprot IDs. For these, the
Uniprot ID obtained from the HGNC ID (itself obtained from the Entrez ID)
is used. Because the sequence referenced by the Uniprot ID obtained this
way may be different from the (outdated) RefSeq sequence included with the
HPRD content, it is possible that this will lead to invalid site positions
with respect to the Uniprot IDs.
To allow these site positions to be mapped during assembly, the
Modification statements produced by the HprdProcessor include an additional
key in the `annotations` field of their Evidence object. The annotations
field is called 'site_motif' and it maps to a dictionary with three
elements: 'motif', 'respos', and 'off_by_one'. 'motif' gives the peptide
sequence obtained from the RefSeq sequence included with HPRD. 'respos'
indicates the position in the peptide sequence containing the residue.
Note that these positions are ONE-INDEXED (not zero-indexed). Finally, the
'off-by-one' field contains a boolean value indicating whether the correct
position was inferred as being an off-by-one (methionine cleavage) error.
If True, it means that the given residue could not be found in the HPRD
RefSeq sequence at the given position, but a matching residue was found at
position+1, suggesting a sequence numbering based on the methionine-cleaved
sequence. The peptide included in the 'site_motif' dictionary is based on
this updated position.
Parameters
----------
id_mappings_file : str
Path to HPRD_ID_MAPPINGS.txt file.
complexes_file : Optional[str]
Path to PROTEIN_COMPLEXES.txt file.
ptm_file : Optional[str]
Path to POST_TRANSLATIONAL_MODIFICATIONS.txt file.
ppi_file : Optional[str]
Path to BINARY_PROTEIN_PROTEIN_INTERACTIONS.txt file.
seq_file : Optional[str]
Path to PROTEIN_SEQUENCES.txt file.
motif_window : int
Number of flanking amino acids to include on each side of the
PTM target residue in the 'site_motif' annotations field of the
Evidence for Modification Statements. Default is 7.
Returns
-------
HprdProcessor
An HprdProcessor object which contains a list of extracted INDRA
Statements in its statements attribute.
"""
id_df = pd.read_csv(id_mappings_file, delimiter='\t', names=_hprd_id_cols,
dtype='str')
id_df = id_df.set_index('HPRD_ID')
if complexes_file is None and ptm_file is None and ppi_file is None:
raise ValueError('At least one of complexes_file, ptm_file, or '
'ppi_file must be given.')
if ptm_file and not seq_file:
raise ValueError('If ptm_file is given, seq_file must also be given.')
# Load complexes into dataframe
cplx_df = None
if complexes_file:
cplx_df = pd.read_csv(complexes_file, delimiter='\t', names=_cplx_cols,
dtype='str', na_values=['-', 'None'])
# Load ptm data into dataframe
ptm_df = None
seq_dict = None
if ptm_file:
ptm_df = pd.read_csv(ptm_file, delimiter='\t', names=_ptm_cols,
dtype='str', na_values='-')
# Load protein sequences as a dict keyed by RefSeq ID
seq_dict = load_fasta_sequences(seq_file, id_index=2)
# Load the PPI data into dataframe
ppi_df = None
if ppi_file:
ppi_df = pd.read_csv(ppi_file, delimiter='\t', names=_ppi_cols,
dtype='str')
# Create the processor
return HprdProcessor(id_df, cplx_df, ptm_df, ppi_df, seq_dict, motif_window) | python | def process_flat_files(id_mappings_file, complexes_file=None, ptm_file=None,
ppi_file=None, seq_file=None, motif_window=7):
"""Get INDRA Statements from HPRD data.
Of the arguments, `id_mappings_file` is required, and at least one of
`complexes_file`, `ptm_file`, and `ppi_file` must also be given. If
`ptm_file` is given, `seq_file` must also be given.
Note that many proteins (> 1,600) in the HPRD content are associated with
outdated RefSeq IDs that cannot be mapped to Uniprot IDs. For these, the
Uniprot ID obtained from the HGNC ID (itself obtained from the Entrez ID)
is used. Because the sequence referenced by the Uniprot ID obtained this
way may be different from the (outdated) RefSeq sequence included with the
HPRD content, it is possible that this will lead to invalid site positions
with respect to the Uniprot IDs.
To allow these site positions to be mapped during assembly, the
Modification statements produced by the HprdProcessor include an additional
key in the `annotations` field of their Evidence object. The annotations
field is called 'site_motif' and it maps to a dictionary with three
elements: 'motif', 'respos', and 'off_by_one'. 'motif' gives the peptide
sequence obtained from the RefSeq sequence included with HPRD. 'respos'
indicates the position in the peptide sequence containing the residue.
Note that these positions are ONE-INDEXED (not zero-indexed). Finally, the
'off-by-one' field contains a boolean value indicating whether the correct
position was inferred as being an off-by-one (methionine cleavage) error.
If True, it means that the given residue could not be found in the HPRD
RefSeq sequence at the given position, but a matching residue was found at
position+1, suggesting a sequence numbering based on the methionine-cleaved
sequence. The peptide included in the 'site_motif' dictionary is based on
this updated position.
Parameters
----------
id_mappings_file : str
Path to HPRD_ID_MAPPINGS.txt file.
complexes_file : Optional[str]
Path to PROTEIN_COMPLEXES.txt file.
ptm_file : Optional[str]
Path to POST_TRANSLATIONAL_MODIFICATIONS.txt file.
ppi_file : Optional[str]
Path to BINARY_PROTEIN_PROTEIN_INTERACTIONS.txt file.
seq_file : Optional[str]
Path to PROTEIN_SEQUENCES.txt file.
motif_window : int
Number of flanking amino acids to include on each side of the
PTM target residue in the 'site_motif' annotations field of the
Evidence for Modification Statements. Default is 7.
Returns
-------
HprdProcessor
An HprdProcessor object which contains a list of extracted INDRA
Statements in its statements attribute.
"""
id_df = pd.read_csv(id_mappings_file, delimiter='\t', names=_hprd_id_cols,
dtype='str')
id_df = id_df.set_index('HPRD_ID')
if complexes_file is None and ptm_file is None and ppi_file is None:
raise ValueError('At least one of complexes_file, ptm_file, or '
'ppi_file must be given.')
if ptm_file and not seq_file:
raise ValueError('If ptm_file is given, seq_file must also be given.')
# Load complexes into dataframe
cplx_df = None
if complexes_file:
cplx_df = pd.read_csv(complexes_file, delimiter='\t', names=_cplx_cols,
dtype='str', na_values=['-', 'None'])
# Load ptm data into dataframe
ptm_df = None
seq_dict = None
if ptm_file:
ptm_df = pd.read_csv(ptm_file, delimiter='\t', names=_ptm_cols,
dtype='str', na_values='-')
# Load protein sequences as a dict keyed by RefSeq ID
seq_dict = load_fasta_sequences(seq_file, id_index=2)
# Load the PPI data into dataframe
ppi_df = None
if ppi_file:
ppi_df = pd.read_csv(ppi_file, delimiter='\t', names=_ppi_cols,
dtype='str')
# Create the processor
return HprdProcessor(id_df, cplx_df, ptm_df, ppi_df, seq_dict, motif_window) | [
"def",
"process_flat_files",
"(",
"id_mappings_file",
",",
"complexes_file",
"=",
"None",
",",
"ptm_file",
"=",
"None",
",",
"ppi_file",
"=",
"None",
",",
"seq_file",
"=",
"None",
",",
"motif_window",
"=",
"7",
")",
":",
"id_df",
"=",
"pd",
".",
"read_csv",
"(",
"id_mappings_file",
",",
"delimiter",
"=",
"'\\t'",
",",
"names",
"=",
"_hprd_id_cols",
",",
"dtype",
"=",
"'str'",
")",
"id_df",
"=",
"id_df",
".",
"set_index",
"(",
"'HPRD_ID'",
")",
"if",
"complexes_file",
"is",
"None",
"and",
"ptm_file",
"is",
"None",
"and",
"ppi_file",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'At least one of complexes_file, ptm_file, or '",
"'ppi_file must be given.'",
")",
"if",
"ptm_file",
"and",
"not",
"seq_file",
":",
"raise",
"ValueError",
"(",
"'If ptm_file is given, seq_file must also be given.'",
")",
"# Load complexes into dataframe",
"cplx_df",
"=",
"None",
"if",
"complexes_file",
":",
"cplx_df",
"=",
"pd",
".",
"read_csv",
"(",
"complexes_file",
",",
"delimiter",
"=",
"'\\t'",
",",
"names",
"=",
"_cplx_cols",
",",
"dtype",
"=",
"'str'",
",",
"na_values",
"=",
"[",
"'-'",
",",
"'None'",
"]",
")",
"# Load ptm data into dataframe",
"ptm_df",
"=",
"None",
"seq_dict",
"=",
"None",
"if",
"ptm_file",
":",
"ptm_df",
"=",
"pd",
".",
"read_csv",
"(",
"ptm_file",
",",
"delimiter",
"=",
"'\\t'",
",",
"names",
"=",
"_ptm_cols",
",",
"dtype",
"=",
"'str'",
",",
"na_values",
"=",
"'-'",
")",
"# Load protein sequences as a dict keyed by RefSeq ID",
"seq_dict",
"=",
"load_fasta_sequences",
"(",
"seq_file",
",",
"id_index",
"=",
"2",
")",
"# Load the PPI data into dataframe",
"ppi_df",
"=",
"None",
"if",
"ppi_file",
":",
"ppi_df",
"=",
"pd",
".",
"read_csv",
"(",
"ppi_file",
",",
"delimiter",
"=",
"'\\t'",
",",
"names",
"=",
"_ppi_cols",
",",
"dtype",
"=",
"'str'",
")",
"# Create the processor",
"return",
"HprdProcessor",
"(",
"id_df",
",",
"cplx_df",
",",
"ptm_df",
",",
"ppi_df",
",",
"seq_dict",
",",
"motif_window",
")"
]
| Get INDRA Statements from HPRD data.
Of the arguments, `id_mappings_file` is required, and at least one of
`complexes_file`, `ptm_file`, and `ppi_file` must also be given. If
`ptm_file` is given, `seq_file` must also be given.
Note that many proteins (> 1,600) in the HPRD content are associated with
outdated RefSeq IDs that cannot be mapped to Uniprot IDs. For these, the
Uniprot ID obtained from the HGNC ID (itself obtained from the Entrez ID)
is used. Because the sequence referenced by the Uniprot ID obtained this
way may be different from the (outdated) RefSeq sequence included with the
HPRD content, it is possible that this will lead to invalid site positions
with respect to the Uniprot IDs.
To allow these site positions to be mapped during assembly, the
Modification statements produced by the HprdProcessor include an additional
key in the `annotations` field of their Evidence object. The annotations
field is called 'site_motif' and it maps to a dictionary with three
elements: 'motif', 'respos', and 'off_by_one'. 'motif' gives the peptide
sequence obtained from the RefSeq sequence included with HPRD. 'respos'
indicates the position in the peptide sequence containing the residue.
Note that these positions are ONE-INDEXED (not zero-indexed). Finally, the
'off-by-one' field contains a boolean value indicating whether the correct
position was inferred as being an off-by-one (methionine cleavage) error.
If True, it means that the given residue could not be found in the HPRD
RefSeq sequence at the given position, but a matching residue was found at
position+1, suggesting a sequence numbering based on the methionine-cleaved
sequence. The peptide included in the 'site_motif' dictionary is based on
this updated position.
Parameters
----------
id_mappings_file : str
Path to HPRD_ID_MAPPINGS.txt file.
complexes_file : Optional[str]
Path to PROTEIN_COMPLEXES.txt file.
ptm_file : Optional[str]
Path to POST_TRANSLATIONAL_MODIFICATIONS.txt file.
ppi_file : Optional[str]
Path to BINARY_PROTEIN_PROTEIN_INTERACTIONS.txt file.
seq_file : Optional[str]
Path to PROTEIN_SEQUENCES.txt file.
motif_window : int
Number of flanking amino acids to include on each side of the
PTM target residue in the 'site_motif' annotations field of the
Evidence for Modification Statements. Default is 7.
Returns
-------
HprdProcessor
An HprdProcessor object which contains a list of extracted INDRA
Statements in its statements attribute. | [
"Get",
"INDRA",
"Statements",
"from",
"HPRD",
"data",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/hprd/api.py#L22-L104 | train |
sorgerlab/indra | indra/assemblers/pysb/preassembler.py | PysbPreassembler._gather_active_forms | def _gather_active_forms(self):
"""Collect all the active forms of each Agent in the Statements."""
for stmt in self.statements:
if isinstance(stmt, ActiveForm):
base_agent = self.agent_set.get_create_base_agent(stmt.agent)
# Handle the case where an activity flag is set
agent_to_add = stmt.agent
if stmt.agent.activity:
new_agent = fast_deepcopy(stmt.agent)
new_agent.activity = None
agent_to_add = new_agent
base_agent.add_activity_form(agent_to_add, stmt.is_active) | python | def _gather_active_forms(self):
"""Collect all the active forms of each Agent in the Statements."""
for stmt in self.statements:
if isinstance(stmt, ActiveForm):
base_agent = self.agent_set.get_create_base_agent(stmt.agent)
# Handle the case where an activity flag is set
agent_to_add = stmt.agent
if stmt.agent.activity:
new_agent = fast_deepcopy(stmt.agent)
new_agent.activity = None
agent_to_add = new_agent
base_agent.add_activity_form(agent_to_add, stmt.is_active) | [
"def",
"_gather_active_forms",
"(",
"self",
")",
":",
"for",
"stmt",
"in",
"self",
".",
"statements",
":",
"if",
"isinstance",
"(",
"stmt",
",",
"ActiveForm",
")",
":",
"base_agent",
"=",
"self",
".",
"agent_set",
".",
"get_create_base_agent",
"(",
"stmt",
".",
"agent",
")",
"# Handle the case where an activity flag is set",
"agent_to_add",
"=",
"stmt",
".",
"agent",
"if",
"stmt",
".",
"agent",
".",
"activity",
":",
"new_agent",
"=",
"fast_deepcopy",
"(",
"stmt",
".",
"agent",
")",
"new_agent",
".",
"activity",
"=",
"None",
"agent_to_add",
"=",
"new_agent",
"base_agent",
".",
"add_activity_form",
"(",
"agent_to_add",
",",
"stmt",
".",
"is_active",
")"
]
| Collect all the active forms of each Agent in the Statements. | [
"Collect",
"all",
"the",
"active",
"forms",
"of",
"each",
"Agent",
"in",
"the",
"Statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/preassembler.py#L28-L39 | train |
sorgerlab/indra | indra/assemblers/pysb/preassembler.py | PysbPreassembler.replace_activities | def replace_activities(self):
"""Replace ative flags with Agent states when possible."""
logger.debug('Running PySB Preassembler replace activities')
# TODO: handle activity hierarchies
new_stmts = []
def has_agent_activity(stmt):
"""Return True if any agents in the Statement have activity."""
for agent in stmt.agent_list():
if isinstance(agent, Agent) and agent.activity is not None:
return True
return False
# First collect all explicit active forms
self._gather_active_forms()
# Iterate over all statements
for j, stmt in enumerate(self.statements):
logger.debug('%d/%d %s' % (j + 1, len(self.statements), stmt))
# If the Statement doesn't have any activities, we can just
# keep it and move on
if not has_agent_activity(stmt):
new_stmts.append(stmt)
continue
stmt_agents = stmt.agent_list()
num_agents = len(stmt_agents)
# Make a list with an empty list for each Agent so that later
# we can build combinations of Agent forms
agent_forms = [[] for a in stmt_agents]
for i, agent in enumerate(stmt_agents):
# This is the case where there is an activity flag on an
# Agent which we will attempt to replace with an explicit
# active form
if agent is not None and isinstance(agent, Agent) and \
agent.activity is not None:
base_agent = self.agent_set.get_create_base_agent(agent)
# If it is an "active" state
if agent.activity.is_active:
active_forms = base_agent.active_forms
# If no explicit active forms are known then we use
# the generic one
if not active_forms:
active_forms = [agent]
# If it is an "inactive" state
else:
active_forms = base_agent.inactive_forms
# If no explicit inactive forms are known then we use
# the generic one
if not active_forms:
active_forms = [agent]
# We now iterate over the active agent forms and create
# new agents
for af in active_forms:
new_agent = fast_deepcopy(agent)
self._set_agent_context(af, new_agent)
agent_forms[i].append(new_agent)
# Otherwise we just copy over the agent as is
else:
agent_forms[i].append(agent)
# Now create all possible combinations of the agents and create new
# statements as needed
agent_combs = itertools.product(*agent_forms)
for agent_comb in agent_combs:
new_stmt = fast_deepcopy(stmt)
new_stmt.set_agent_list(agent_comb)
new_stmts.append(new_stmt)
self.statements = new_stmts | python | def replace_activities(self):
"""Replace ative flags with Agent states when possible."""
logger.debug('Running PySB Preassembler replace activities')
# TODO: handle activity hierarchies
new_stmts = []
def has_agent_activity(stmt):
"""Return True if any agents in the Statement have activity."""
for agent in stmt.agent_list():
if isinstance(agent, Agent) and agent.activity is not None:
return True
return False
# First collect all explicit active forms
self._gather_active_forms()
# Iterate over all statements
for j, stmt in enumerate(self.statements):
logger.debug('%d/%d %s' % (j + 1, len(self.statements), stmt))
# If the Statement doesn't have any activities, we can just
# keep it and move on
if not has_agent_activity(stmt):
new_stmts.append(stmt)
continue
stmt_agents = stmt.agent_list()
num_agents = len(stmt_agents)
# Make a list with an empty list for each Agent so that later
# we can build combinations of Agent forms
agent_forms = [[] for a in stmt_agents]
for i, agent in enumerate(stmt_agents):
# This is the case where there is an activity flag on an
# Agent which we will attempt to replace with an explicit
# active form
if agent is not None and isinstance(agent, Agent) and \
agent.activity is not None:
base_agent = self.agent_set.get_create_base_agent(agent)
# If it is an "active" state
if agent.activity.is_active:
active_forms = base_agent.active_forms
# If no explicit active forms are known then we use
# the generic one
if not active_forms:
active_forms = [agent]
# If it is an "inactive" state
else:
active_forms = base_agent.inactive_forms
# If no explicit inactive forms are known then we use
# the generic one
if not active_forms:
active_forms = [agent]
# We now iterate over the active agent forms and create
# new agents
for af in active_forms:
new_agent = fast_deepcopy(agent)
self._set_agent_context(af, new_agent)
agent_forms[i].append(new_agent)
# Otherwise we just copy over the agent as is
else:
agent_forms[i].append(agent)
# Now create all possible combinations of the agents and create new
# statements as needed
agent_combs = itertools.product(*agent_forms)
for agent_comb in agent_combs:
new_stmt = fast_deepcopy(stmt)
new_stmt.set_agent_list(agent_comb)
new_stmts.append(new_stmt)
self.statements = new_stmts | [
"def",
"replace_activities",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"'Running PySB Preassembler replace activities'",
")",
"# TODO: handle activity hierarchies",
"new_stmts",
"=",
"[",
"]",
"def",
"has_agent_activity",
"(",
"stmt",
")",
":",
"\"\"\"Return True if any agents in the Statement have activity.\"\"\"",
"for",
"agent",
"in",
"stmt",
".",
"agent_list",
"(",
")",
":",
"if",
"isinstance",
"(",
"agent",
",",
"Agent",
")",
"and",
"agent",
".",
"activity",
"is",
"not",
"None",
":",
"return",
"True",
"return",
"False",
"# First collect all explicit active forms",
"self",
".",
"_gather_active_forms",
"(",
")",
"# Iterate over all statements",
"for",
"j",
",",
"stmt",
"in",
"enumerate",
"(",
"self",
".",
"statements",
")",
":",
"logger",
".",
"debug",
"(",
"'%d/%d %s'",
"%",
"(",
"j",
"+",
"1",
",",
"len",
"(",
"self",
".",
"statements",
")",
",",
"stmt",
")",
")",
"# If the Statement doesn't have any activities, we can just",
"# keep it and move on",
"if",
"not",
"has_agent_activity",
"(",
"stmt",
")",
":",
"new_stmts",
".",
"append",
"(",
"stmt",
")",
"continue",
"stmt_agents",
"=",
"stmt",
".",
"agent_list",
"(",
")",
"num_agents",
"=",
"len",
"(",
"stmt_agents",
")",
"# Make a list with an empty list for each Agent so that later",
"# we can build combinations of Agent forms",
"agent_forms",
"=",
"[",
"[",
"]",
"for",
"a",
"in",
"stmt_agents",
"]",
"for",
"i",
",",
"agent",
"in",
"enumerate",
"(",
"stmt_agents",
")",
":",
"# This is the case where there is an activity flag on an",
"# Agent which we will attempt to replace with an explicit",
"# active form",
"if",
"agent",
"is",
"not",
"None",
"and",
"isinstance",
"(",
"agent",
",",
"Agent",
")",
"and",
"agent",
".",
"activity",
"is",
"not",
"None",
":",
"base_agent",
"=",
"self",
".",
"agent_set",
".",
"get_create_base_agent",
"(",
"agent",
")",
"# If it is an \"active\" state",
"if",
"agent",
".",
"activity",
".",
"is_active",
":",
"active_forms",
"=",
"base_agent",
".",
"active_forms",
"# If no explicit active forms are known then we use",
"# the generic one",
"if",
"not",
"active_forms",
":",
"active_forms",
"=",
"[",
"agent",
"]",
"# If it is an \"inactive\" state",
"else",
":",
"active_forms",
"=",
"base_agent",
".",
"inactive_forms",
"# If no explicit inactive forms are known then we use",
"# the generic one",
"if",
"not",
"active_forms",
":",
"active_forms",
"=",
"[",
"agent",
"]",
"# We now iterate over the active agent forms and create",
"# new agents",
"for",
"af",
"in",
"active_forms",
":",
"new_agent",
"=",
"fast_deepcopy",
"(",
"agent",
")",
"self",
".",
"_set_agent_context",
"(",
"af",
",",
"new_agent",
")",
"agent_forms",
"[",
"i",
"]",
".",
"append",
"(",
"new_agent",
")",
"# Otherwise we just copy over the agent as is",
"else",
":",
"agent_forms",
"[",
"i",
"]",
".",
"append",
"(",
"agent",
")",
"# Now create all possible combinations of the agents and create new",
"# statements as needed",
"agent_combs",
"=",
"itertools",
".",
"product",
"(",
"*",
"agent_forms",
")",
"for",
"agent_comb",
"in",
"agent_combs",
":",
"new_stmt",
"=",
"fast_deepcopy",
"(",
"stmt",
")",
"new_stmt",
".",
"set_agent_list",
"(",
"agent_comb",
")",
"new_stmts",
".",
"append",
"(",
"new_stmt",
")",
"self",
".",
"statements",
"=",
"new_stmts"
]
| Replace ative flags with Agent states when possible. | [
"Replace",
"ative",
"flags",
"with",
"Agent",
"states",
"when",
"possible",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/preassembler.py#L41-L105 | train |
sorgerlab/indra | indra/assemblers/pysb/preassembler.py | PysbPreassembler.add_reverse_effects | def add_reverse_effects(self):
"""Add Statements for the reverse effects of some Statements.
For instance, if a protein is phosphorylated but never dephosphorylated
in the model, we add a generic dephosphorylation here. This step is
usually optional in the assembly process.
"""
# TODO: generalize to other modification sites
pos_mod_sites = {}
neg_mod_sites = {}
syntheses = []
degradations = []
for stmt in self.statements:
if isinstance(stmt, Phosphorylation):
agent = stmt.sub.name
try:
pos_mod_sites[agent].append((stmt.residue, stmt.position))
except KeyError:
pos_mod_sites[agent] = [(stmt.residue, stmt.position)]
elif isinstance(stmt, Dephosphorylation):
agent = stmt.sub.name
try:
neg_mod_sites[agent].append((stmt.residue, stmt.position))
except KeyError:
neg_mod_sites[agent] = [(stmt.residue, stmt.position)]
elif isinstance(stmt, Influence):
if stmt.overall_polarity() == 1:
syntheses.append(stmt.obj.name)
elif stmt.overall_polarity() == -1:
degradations.append(stmt.obj.name)
elif isinstance(stmt, IncreaseAmount):
syntheses.append(stmt.obj.name)
elif isinstance(stmt, DecreaseAmount):
degradations.append(stmt.obj.name)
new_stmts = []
for agent_name, pos_sites in pos_mod_sites.items():
neg_sites = neg_mod_sites.get(agent_name, [])
no_neg_site = set(pos_sites).difference(set(neg_sites))
for residue, position in no_neg_site:
st = Dephosphorylation(Agent('phosphatase'),
Agent(agent_name),
residue, position)
new_stmts.append(st)
for agent_name in syntheses:
if agent_name not in degradations:
st = DecreaseAmount(None, Agent(agent_name))
new_stmts.append(st)
self.statements += new_stmts | python | def add_reverse_effects(self):
"""Add Statements for the reverse effects of some Statements.
For instance, if a protein is phosphorylated but never dephosphorylated
in the model, we add a generic dephosphorylation here. This step is
usually optional in the assembly process.
"""
# TODO: generalize to other modification sites
pos_mod_sites = {}
neg_mod_sites = {}
syntheses = []
degradations = []
for stmt in self.statements:
if isinstance(stmt, Phosphorylation):
agent = stmt.sub.name
try:
pos_mod_sites[agent].append((stmt.residue, stmt.position))
except KeyError:
pos_mod_sites[agent] = [(stmt.residue, stmt.position)]
elif isinstance(stmt, Dephosphorylation):
agent = stmt.sub.name
try:
neg_mod_sites[agent].append((stmt.residue, stmt.position))
except KeyError:
neg_mod_sites[agent] = [(stmt.residue, stmt.position)]
elif isinstance(stmt, Influence):
if stmt.overall_polarity() == 1:
syntheses.append(stmt.obj.name)
elif stmt.overall_polarity() == -1:
degradations.append(stmt.obj.name)
elif isinstance(stmt, IncreaseAmount):
syntheses.append(stmt.obj.name)
elif isinstance(stmt, DecreaseAmount):
degradations.append(stmt.obj.name)
new_stmts = []
for agent_name, pos_sites in pos_mod_sites.items():
neg_sites = neg_mod_sites.get(agent_name, [])
no_neg_site = set(pos_sites).difference(set(neg_sites))
for residue, position in no_neg_site:
st = Dephosphorylation(Agent('phosphatase'),
Agent(agent_name),
residue, position)
new_stmts.append(st)
for agent_name in syntheses:
if agent_name not in degradations:
st = DecreaseAmount(None, Agent(agent_name))
new_stmts.append(st)
self.statements += new_stmts | [
"def",
"add_reverse_effects",
"(",
"self",
")",
":",
"# TODO: generalize to other modification sites",
"pos_mod_sites",
"=",
"{",
"}",
"neg_mod_sites",
"=",
"{",
"}",
"syntheses",
"=",
"[",
"]",
"degradations",
"=",
"[",
"]",
"for",
"stmt",
"in",
"self",
".",
"statements",
":",
"if",
"isinstance",
"(",
"stmt",
",",
"Phosphorylation",
")",
":",
"agent",
"=",
"stmt",
".",
"sub",
".",
"name",
"try",
":",
"pos_mod_sites",
"[",
"agent",
"]",
".",
"append",
"(",
"(",
"stmt",
".",
"residue",
",",
"stmt",
".",
"position",
")",
")",
"except",
"KeyError",
":",
"pos_mod_sites",
"[",
"agent",
"]",
"=",
"[",
"(",
"stmt",
".",
"residue",
",",
"stmt",
".",
"position",
")",
"]",
"elif",
"isinstance",
"(",
"stmt",
",",
"Dephosphorylation",
")",
":",
"agent",
"=",
"stmt",
".",
"sub",
".",
"name",
"try",
":",
"neg_mod_sites",
"[",
"agent",
"]",
".",
"append",
"(",
"(",
"stmt",
".",
"residue",
",",
"stmt",
".",
"position",
")",
")",
"except",
"KeyError",
":",
"neg_mod_sites",
"[",
"agent",
"]",
"=",
"[",
"(",
"stmt",
".",
"residue",
",",
"stmt",
".",
"position",
")",
"]",
"elif",
"isinstance",
"(",
"stmt",
",",
"Influence",
")",
":",
"if",
"stmt",
".",
"overall_polarity",
"(",
")",
"==",
"1",
":",
"syntheses",
".",
"append",
"(",
"stmt",
".",
"obj",
".",
"name",
")",
"elif",
"stmt",
".",
"overall_polarity",
"(",
")",
"==",
"-",
"1",
":",
"degradations",
".",
"append",
"(",
"stmt",
".",
"obj",
".",
"name",
")",
"elif",
"isinstance",
"(",
"stmt",
",",
"IncreaseAmount",
")",
":",
"syntheses",
".",
"append",
"(",
"stmt",
".",
"obj",
".",
"name",
")",
"elif",
"isinstance",
"(",
"stmt",
",",
"DecreaseAmount",
")",
":",
"degradations",
".",
"append",
"(",
"stmt",
".",
"obj",
".",
"name",
")",
"new_stmts",
"=",
"[",
"]",
"for",
"agent_name",
",",
"pos_sites",
"in",
"pos_mod_sites",
".",
"items",
"(",
")",
":",
"neg_sites",
"=",
"neg_mod_sites",
".",
"get",
"(",
"agent_name",
",",
"[",
"]",
")",
"no_neg_site",
"=",
"set",
"(",
"pos_sites",
")",
".",
"difference",
"(",
"set",
"(",
"neg_sites",
")",
")",
"for",
"residue",
",",
"position",
"in",
"no_neg_site",
":",
"st",
"=",
"Dephosphorylation",
"(",
"Agent",
"(",
"'phosphatase'",
")",
",",
"Agent",
"(",
"agent_name",
")",
",",
"residue",
",",
"position",
")",
"new_stmts",
".",
"append",
"(",
"st",
")",
"for",
"agent_name",
"in",
"syntheses",
":",
"if",
"agent_name",
"not",
"in",
"degradations",
":",
"st",
"=",
"DecreaseAmount",
"(",
"None",
",",
"Agent",
"(",
"agent_name",
")",
")",
"new_stmts",
".",
"append",
"(",
"st",
")",
"self",
".",
"statements",
"+=",
"new_stmts"
]
| Add Statements for the reverse effects of some Statements.
For instance, if a protein is phosphorylated but never dephosphorylated
in the model, we add a generic dephosphorylation here. This step is
usually optional in the assembly process. | [
"Add",
"Statements",
"for",
"the",
"reverse",
"effects",
"of",
"some",
"Statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/preassembler.py#L107-L156 | train |
sorgerlab/indra | indra/preassembler/sitemapper.py | _get_uniprot_id | def _get_uniprot_id(agent):
"""Return the UniProt ID for an agent, looking up in HGNC if necessary.
If the UniProt ID is a list then return the first ID by default.
"""
up_id = agent.db_refs.get('UP')
hgnc_id = agent.db_refs.get('HGNC')
if up_id is None:
if hgnc_id is None:
# If both UniProt and HGNC refs are missing we can't
# sequence check and so don't report a failure.
return None
# Try to get UniProt ID from HGNC
up_id = hgnc_client.get_uniprot_id(hgnc_id)
# If this fails, again, we can't sequence check
if up_id is None:
return None
# If the UniProt ID is a list then choose the first one.
if not isinstance(up_id, basestring) and \
isinstance(up_id[0], basestring):
up_id = up_id[0]
return up_id | python | def _get_uniprot_id(agent):
"""Return the UniProt ID for an agent, looking up in HGNC if necessary.
If the UniProt ID is a list then return the first ID by default.
"""
up_id = agent.db_refs.get('UP')
hgnc_id = agent.db_refs.get('HGNC')
if up_id is None:
if hgnc_id is None:
# If both UniProt and HGNC refs are missing we can't
# sequence check and so don't report a failure.
return None
# Try to get UniProt ID from HGNC
up_id = hgnc_client.get_uniprot_id(hgnc_id)
# If this fails, again, we can't sequence check
if up_id is None:
return None
# If the UniProt ID is a list then choose the first one.
if not isinstance(up_id, basestring) and \
isinstance(up_id[0], basestring):
up_id = up_id[0]
return up_id | [
"def",
"_get_uniprot_id",
"(",
"agent",
")",
":",
"up_id",
"=",
"agent",
".",
"db_refs",
".",
"get",
"(",
"'UP'",
")",
"hgnc_id",
"=",
"agent",
".",
"db_refs",
".",
"get",
"(",
"'HGNC'",
")",
"if",
"up_id",
"is",
"None",
":",
"if",
"hgnc_id",
"is",
"None",
":",
"# If both UniProt and HGNC refs are missing we can't",
"# sequence check and so don't report a failure.",
"return",
"None",
"# Try to get UniProt ID from HGNC",
"up_id",
"=",
"hgnc_client",
".",
"get_uniprot_id",
"(",
"hgnc_id",
")",
"# If this fails, again, we can't sequence check",
"if",
"up_id",
"is",
"None",
":",
"return",
"None",
"# If the UniProt ID is a list then choose the first one.",
"if",
"not",
"isinstance",
"(",
"up_id",
",",
"basestring",
")",
"and",
"isinstance",
"(",
"up_id",
"[",
"0",
"]",
",",
"basestring",
")",
":",
"up_id",
"=",
"up_id",
"[",
"0",
"]",
"return",
"up_id"
]
| Return the UniProt ID for an agent, looking up in HGNC if necessary.
If the UniProt ID is a list then return the first ID by default. | [
"Return",
"the",
"UniProt",
"ID",
"for",
"an",
"agent",
"looking",
"up",
"in",
"HGNC",
"if",
"necessary",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/sitemapper.py#L333-L354 | train |
sorgerlab/indra | indra/preassembler/sitemapper.py | SiteMapper.map_sites | def map_sites(self, stmts):
"""Check a set of statements for invalid modification sites.
Statements are checked against Uniprot reference sequences to determine
if residues referred to by post-translational modifications exist at
the given positions.
If there is nothing amiss with a statement (modifications on any of the
agents, modifications made in the statement, etc.), then the statement
goes into the list of valid statements. If there is a problem with the
statement, the offending modifications are looked up in the site map
(:py:attr:`site_map`), and an instance of :py:class:`MappedStatement`
is added to the list of mapped statements.
Parameters
----------
stmts : list of :py:class:`indra.statement.Statement`
The statements to check for site errors.
Returns
-------
tuple
2-tuple containing (valid_statements, mapped_statements). The first
element of the tuple is a list of valid statements
(:py:class:`indra.statement.Statement`) that were not found to
contain any site errors. The second element of the tuple is a list
of mapped statements (:py:class:`MappedStatement`) with information
on the incorrect sites and corresponding statements with correctly
mapped sites.
"""
valid_statements = []
mapped_statements = []
for stmt in stmts:
mapped_stmt = self.map_stmt_sites(stmt)
# If we got a MappedStatement as a return value, we add that to the
# list of mapped statements, otherwise, the original Statement is
# not invalid so we add it to the other list directly.
if mapped_stmt is not None:
mapped_statements.append(mapped_stmt)
else:
valid_statements.append(stmt)
return valid_statements, mapped_statements | python | def map_sites(self, stmts):
"""Check a set of statements for invalid modification sites.
Statements are checked against Uniprot reference sequences to determine
if residues referred to by post-translational modifications exist at
the given positions.
If there is nothing amiss with a statement (modifications on any of the
agents, modifications made in the statement, etc.), then the statement
goes into the list of valid statements. If there is a problem with the
statement, the offending modifications are looked up in the site map
(:py:attr:`site_map`), and an instance of :py:class:`MappedStatement`
is added to the list of mapped statements.
Parameters
----------
stmts : list of :py:class:`indra.statement.Statement`
The statements to check for site errors.
Returns
-------
tuple
2-tuple containing (valid_statements, mapped_statements). The first
element of the tuple is a list of valid statements
(:py:class:`indra.statement.Statement`) that were not found to
contain any site errors. The second element of the tuple is a list
of mapped statements (:py:class:`MappedStatement`) with information
on the incorrect sites and corresponding statements with correctly
mapped sites.
"""
valid_statements = []
mapped_statements = []
for stmt in stmts:
mapped_stmt = self.map_stmt_sites(stmt)
# If we got a MappedStatement as a return value, we add that to the
# list of mapped statements, otherwise, the original Statement is
# not invalid so we add it to the other list directly.
if mapped_stmt is not None:
mapped_statements.append(mapped_stmt)
else:
valid_statements.append(stmt)
return valid_statements, mapped_statements | [
"def",
"map_sites",
"(",
"self",
",",
"stmts",
")",
":",
"valid_statements",
"=",
"[",
"]",
"mapped_statements",
"=",
"[",
"]",
"for",
"stmt",
"in",
"stmts",
":",
"mapped_stmt",
"=",
"self",
".",
"map_stmt_sites",
"(",
"stmt",
")",
"# If we got a MappedStatement as a return value, we add that to the",
"# list of mapped statements, otherwise, the original Statement is",
"# not invalid so we add it to the other list directly.",
"if",
"mapped_stmt",
"is",
"not",
"None",
":",
"mapped_statements",
".",
"append",
"(",
"mapped_stmt",
")",
"else",
":",
"valid_statements",
".",
"append",
"(",
"stmt",
")",
"return",
"valid_statements",
",",
"mapped_statements"
]
| Check a set of statements for invalid modification sites.
Statements are checked against Uniprot reference sequences to determine
if residues referred to by post-translational modifications exist at
the given positions.
If there is nothing amiss with a statement (modifications on any of the
agents, modifications made in the statement, etc.), then the statement
goes into the list of valid statements. If there is a problem with the
statement, the offending modifications are looked up in the site map
(:py:attr:`site_map`), and an instance of :py:class:`MappedStatement`
is added to the list of mapped statements.
Parameters
----------
stmts : list of :py:class:`indra.statement.Statement`
The statements to check for site errors.
Returns
-------
tuple
2-tuple containing (valid_statements, mapped_statements). The first
element of the tuple is a list of valid statements
(:py:class:`indra.statement.Statement`) that were not found to
contain any site errors. The second element of the tuple is a list
of mapped statements (:py:class:`MappedStatement`) with information
on the incorrect sites and corresponding statements with correctly
mapped sites. | [
"Check",
"a",
"set",
"of",
"statements",
"for",
"invalid",
"modification",
"sites",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/sitemapper.py#L203-L246 | train |
sorgerlab/indra | indra/preassembler/sitemapper.py | SiteMapper._map_agent_sites | def _map_agent_sites(self, agent):
"""Check an agent for invalid sites and update if necessary.
Parameters
----------
agent : :py:class:`indra.statements.Agent`
Agent to check for invalid modification sites.
Returns
-------
tuple
The first element is a list of MappedSite objects, the second
element is either the original Agent, if unchanged, or a copy
of it.
"""
# If there are no modifications on this agent, then we can return the
# copy of the agent
if agent is None or not agent.mods:
return [], agent
new_agent = deepcopy(agent)
mapped_sites = []
# Now iterate over all the modifications and map each one
for idx, mod_condition in enumerate(agent.mods):
mapped_site = \
self._map_agent_mod(agent, mod_condition)
# If we couldn't do the mapping or the mapped site isn't invalid
# then we don't need to change the existing ModCondition
if not mapped_site or mapped_site.not_invalid():
continue
# Otherwise, if there is a mapping, we replace the old ModCondition
# with the new one where only the residue and position are updated,
# the mod type and the is modified flag are kept.
if mapped_site.has_mapping():
mc = ModCondition(mod_condition.mod_type,
mapped_site.mapped_res,
mapped_site.mapped_pos,
mod_condition.is_modified)
new_agent.mods[idx] = mc
# Finally, whether or not we have a mapping, we keep track of mapped
# sites and make them available to the caller
mapped_sites.append(mapped_site)
return mapped_sites, new_agent | python | def _map_agent_sites(self, agent):
"""Check an agent for invalid sites and update if necessary.
Parameters
----------
agent : :py:class:`indra.statements.Agent`
Agent to check for invalid modification sites.
Returns
-------
tuple
The first element is a list of MappedSite objects, the second
element is either the original Agent, if unchanged, or a copy
of it.
"""
# If there are no modifications on this agent, then we can return the
# copy of the agent
if agent is None or not agent.mods:
return [], agent
new_agent = deepcopy(agent)
mapped_sites = []
# Now iterate over all the modifications and map each one
for idx, mod_condition in enumerate(agent.mods):
mapped_site = \
self._map_agent_mod(agent, mod_condition)
# If we couldn't do the mapping or the mapped site isn't invalid
# then we don't need to change the existing ModCondition
if not mapped_site or mapped_site.not_invalid():
continue
# Otherwise, if there is a mapping, we replace the old ModCondition
# with the new one where only the residue and position are updated,
# the mod type and the is modified flag are kept.
if mapped_site.has_mapping():
mc = ModCondition(mod_condition.mod_type,
mapped_site.mapped_res,
mapped_site.mapped_pos,
mod_condition.is_modified)
new_agent.mods[idx] = mc
# Finally, whether or not we have a mapping, we keep track of mapped
# sites and make them available to the caller
mapped_sites.append(mapped_site)
return mapped_sites, new_agent | [
"def",
"_map_agent_sites",
"(",
"self",
",",
"agent",
")",
":",
"# If there are no modifications on this agent, then we can return the",
"# copy of the agent",
"if",
"agent",
"is",
"None",
"or",
"not",
"agent",
".",
"mods",
":",
"return",
"[",
"]",
",",
"agent",
"new_agent",
"=",
"deepcopy",
"(",
"agent",
")",
"mapped_sites",
"=",
"[",
"]",
"# Now iterate over all the modifications and map each one",
"for",
"idx",
",",
"mod_condition",
"in",
"enumerate",
"(",
"agent",
".",
"mods",
")",
":",
"mapped_site",
"=",
"self",
".",
"_map_agent_mod",
"(",
"agent",
",",
"mod_condition",
")",
"# If we couldn't do the mapping or the mapped site isn't invalid",
"# then we don't need to change the existing ModCondition",
"if",
"not",
"mapped_site",
"or",
"mapped_site",
".",
"not_invalid",
"(",
")",
":",
"continue",
"# Otherwise, if there is a mapping, we replace the old ModCondition",
"# with the new one where only the residue and position are updated,",
"# the mod type and the is modified flag are kept.",
"if",
"mapped_site",
".",
"has_mapping",
"(",
")",
":",
"mc",
"=",
"ModCondition",
"(",
"mod_condition",
".",
"mod_type",
",",
"mapped_site",
".",
"mapped_res",
",",
"mapped_site",
".",
"mapped_pos",
",",
"mod_condition",
".",
"is_modified",
")",
"new_agent",
".",
"mods",
"[",
"idx",
"]",
"=",
"mc",
"# Finally, whether or not we have a mapping, we keep track of mapped",
"# sites and make them available to the caller",
"mapped_sites",
".",
"append",
"(",
"mapped_site",
")",
"return",
"mapped_sites",
",",
"new_agent"
]
| Check an agent for invalid sites and update if necessary.
Parameters
----------
agent : :py:class:`indra.statements.Agent`
Agent to check for invalid modification sites.
Returns
-------
tuple
The first element is a list of MappedSite objects, the second
element is either the original Agent, if unchanged, or a copy
of it. | [
"Check",
"an",
"agent",
"for",
"invalid",
"sites",
"and",
"update",
"if",
"necessary",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/sitemapper.py#L248-L289 | train |
sorgerlab/indra | indra/preassembler/sitemapper.py | SiteMapper._map_agent_mod | def _map_agent_mod(self, agent, mod_condition):
"""Map a single modification condition on an agent.
Parameters
----------
agent : :py:class:`indra.statements.Agent`
Agent to check for invalid modification sites.
mod_condition : :py:class:`indra.statements.ModCondition`
Modification to check for validity and map.
Returns
-------
protmapper.MappedSite or None
A MappedSite object is returned if a UniProt ID was found for the
agent, and if both the position and residue for the modification
condition were available. Otherwise None is returned.
"""
# Get the UniProt ID of the agent, if not found, return
up_id = _get_uniprot_id(agent)
if not up_id:
logger.debug("No uniprot ID for %s" % agent.name)
return None
# If no site information for this residue, skip
if mod_condition.position is None or mod_condition.residue is None:
return None
# Otherwise, try to map it and return the mapped site
mapped_site = \
self.map_to_human_ref(up_id, 'uniprot',
mod_condition.residue,
mod_condition.position,
do_methionine_offset=self.do_methionine_offset,
do_orthology_mapping=self.do_orthology_mapping,
do_isoform_mapping=self.do_isoform_mapping)
return mapped_site | python | def _map_agent_mod(self, agent, mod_condition):
"""Map a single modification condition on an agent.
Parameters
----------
agent : :py:class:`indra.statements.Agent`
Agent to check for invalid modification sites.
mod_condition : :py:class:`indra.statements.ModCondition`
Modification to check for validity and map.
Returns
-------
protmapper.MappedSite or None
A MappedSite object is returned if a UniProt ID was found for the
agent, and if both the position and residue for the modification
condition were available. Otherwise None is returned.
"""
# Get the UniProt ID of the agent, if not found, return
up_id = _get_uniprot_id(agent)
if not up_id:
logger.debug("No uniprot ID for %s" % agent.name)
return None
# If no site information for this residue, skip
if mod_condition.position is None or mod_condition.residue is None:
return None
# Otherwise, try to map it and return the mapped site
mapped_site = \
self.map_to_human_ref(up_id, 'uniprot',
mod_condition.residue,
mod_condition.position,
do_methionine_offset=self.do_methionine_offset,
do_orthology_mapping=self.do_orthology_mapping,
do_isoform_mapping=self.do_isoform_mapping)
return mapped_site | [
"def",
"_map_agent_mod",
"(",
"self",
",",
"agent",
",",
"mod_condition",
")",
":",
"# Get the UniProt ID of the agent, if not found, return",
"up_id",
"=",
"_get_uniprot_id",
"(",
"agent",
")",
"if",
"not",
"up_id",
":",
"logger",
".",
"debug",
"(",
"\"No uniprot ID for %s\"",
"%",
"agent",
".",
"name",
")",
"return",
"None",
"# If no site information for this residue, skip",
"if",
"mod_condition",
".",
"position",
"is",
"None",
"or",
"mod_condition",
".",
"residue",
"is",
"None",
":",
"return",
"None",
"# Otherwise, try to map it and return the mapped site",
"mapped_site",
"=",
"self",
".",
"map_to_human_ref",
"(",
"up_id",
",",
"'uniprot'",
",",
"mod_condition",
".",
"residue",
",",
"mod_condition",
".",
"position",
",",
"do_methionine_offset",
"=",
"self",
".",
"do_methionine_offset",
",",
"do_orthology_mapping",
"=",
"self",
".",
"do_orthology_mapping",
",",
"do_isoform_mapping",
"=",
"self",
".",
"do_isoform_mapping",
")",
"return",
"mapped_site"
]
| Map a single modification condition on an agent.
Parameters
----------
agent : :py:class:`indra.statements.Agent`
Agent to check for invalid modification sites.
mod_condition : :py:class:`indra.statements.ModCondition`
Modification to check for validity and map.
Returns
-------
protmapper.MappedSite or None
A MappedSite object is returned if a UniProt ID was found for the
agent, and if both the position and residue for the modification
condition were available. Otherwise None is returned. | [
"Map",
"a",
"single",
"modification",
"condition",
"on",
"an",
"agent",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/sitemapper.py#L291-L324 | train |
sorgerlab/indra | indra/mechlinker/__init__.py | _get_graph_reductions | def _get_graph_reductions(graph):
"""Return transitive reductions on a DAG.
This is used to reduce the set of activities of a BaseAgent to the most
specific one(s) possible. For instance, if a BaseAgent is know to have
'activity', 'catalytic' and 'kinase' activity, then this function will
return {'activity': 'kinase', 'catalytic': 'kinase', 'kinase': 'kinase'}
as the set of reductions.
"""
def frontier(g, nd):
"""Return the nodes after nd in the topological sort that are at the
lowest possible level of the topological sort."""
if g.out_degree(nd) == 0:
return set([nd])
else:
frontiers = set()
for n in g.successors(nd):
frontiers = frontiers.union(frontier(graph, n))
return frontiers
reductions = {}
nodes_sort = list(networkx.algorithms.dag.topological_sort(graph))
frontiers = [frontier(graph, n) for n in nodes_sort]
# This loop ensures that if a node n2 comes after node n1 in the topological
# sort, and their frontiers are identical then n1 can be reduced to n2.
# If their frontiers aren't identical, the reduction cannot be done.
for i, n1 in enumerate(nodes_sort):
for j, n2 in enumerate(nodes_sort):
if i > j:
continue
if frontiers[i] == frontiers[j]:
reductions[n1] = n2
return reductions | python | def _get_graph_reductions(graph):
"""Return transitive reductions on a DAG.
This is used to reduce the set of activities of a BaseAgent to the most
specific one(s) possible. For instance, if a BaseAgent is know to have
'activity', 'catalytic' and 'kinase' activity, then this function will
return {'activity': 'kinase', 'catalytic': 'kinase', 'kinase': 'kinase'}
as the set of reductions.
"""
def frontier(g, nd):
"""Return the nodes after nd in the topological sort that are at the
lowest possible level of the topological sort."""
if g.out_degree(nd) == 0:
return set([nd])
else:
frontiers = set()
for n in g.successors(nd):
frontiers = frontiers.union(frontier(graph, n))
return frontiers
reductions = {}
nodes_sort = list(networkx.algorithms.dag.topological_sort(graph))
frontiers = [frontier(graph, n) for n in nodes_sort]
# This loop ensures that if a node n2 comes after node n1 in the topological
# sort, and their frontiers are identical then n1 can be reduced to n2.
# If their frontiers aren't identical, the reduction cannot be done.
for i, n1 in enumerate(nodes_sort):
for j, n2 in enumerate(nodes_sort):
if i > j:
continue
if frontiers[i] == frontiers[j]:
reductions[n1] = n2
return reductions | [
"def",
"_get_graph_reductions",
"(",
"graph",
")",
":",
"def",
"frontier",
"(",
"g",
",",
"nd",
")",
":",
"\"\"\"Return the nodes after nd in the topological sort that are at the\n lowest possible level of the topological sort.\"\"\"",
"if",
"g",
".",
"out_degree",
"(",
"nd",
")",
"==",
"0",
":",
"return",
"set",
"(",
"[",
"nd",
"]",
")",
"else",
":",
"frontiers",
"=",
"set",
"(",
")",
"for",
"n",
"in",
"g",
".",
"successors",
"(",
"nd",
")",
":",
"frontiers",
"=",
"frontiers",
".",
"union",
"(",
"frontier",
"(",
"graph",
",",
"n",
")",
")",
"return",
"frontiers",
"reductions",
"=",
"{",
"}",
"nodes_sort",
"=",
"list",
"(",
"networkx",
".",
"algorithms",
".",
"dag",
".",
"topological_sort",
"(",
"graph",
")",
")",
"frontiers",
"=",
"[",
"frontier",
"(",
"graph",
",",
"n",
")",
"for",
"n",
"in",
"nodes_sort",
"]",
"# This loop ensures that if a node n2 comes after node n1 in the topological",
"# sort, and their frontiers are identical then n1 can be reduced to n2.",
"# If their frontiers aren't identical, the reduction cannot be done.",
"for",
"i",
",",
"n1",
"in",
"enumerate",
"(",
"nodes_sort",
")",
":",
"for",
"j",
",",
"n2",
"in",
"enumerate",
"(",
"nodes_sort",
")",
":",
"if",
"i",
">",
"j",
":",
"continue",
"if",
"frontiers",
"[",
"i",
"]",
"==",
"frontiers",
"[",
"j",
"]",
":",
"reductions",
"[",
"n1",
"]",
"=",
"n2",
"return",
"reductions"
]
| Return transitive reductions on a DAG.
This is used to reduce the set of activities of a BaseAgent to the most
specific one(s) possible. For instance, if a BaseAgent is know to have
'activity', 'catalytic' and 'kinase' activity, then this function will
return {'activity': 'kinase', 'catalytic': 'kinase', 'kinase': 'kinase'}
as the set of reductions. | [
"Return",
"transitive",
"reductions",
"on",
"a",
"DAG",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/mechlinker/__init__.py#L764-L795 | train |
sorgerlab/indra | indra/mechlinker/__init__.py | MechLinker.gather_explicit_activities | def gather_explicit_activities(self):
"""Aggregate all explicit activities and active forms of Agents.
This function iterates over self.statements and extracts explicitly
stated activity types and active forms for Agents.
"""
for stmt in self.statements:
agents = stmt.agent_list()
# Activity types given as ActivityConditions
for agent in agents:
if agent is not None and agent.activity is not None:
agent_base = self._get_base(agent)
agent_base.add_activity(agent.activity.activity_type)
# Object activities given in RegulateActivity statements
if isinstance(stmt, RegulateActivity):
if stmt.obj is not None:
obj_base = self._get_base(stmt.obj)
obj_base.add_activity(stmt.obj_activity)
# Activity types given in ActiveForms
elif isinstance(stmt, ActiveForm):
agent_base = self._get_base(stmt.agent)
agent_base.add_activity(stmt.activity)
if stmt.is_active:
agent_base.add_active_state(stmt.activity, stmt.agent,
stmt.evidence)
else:
agent_base.add_inactive_state(stmt.activity, stmt.agent,
stmt.evidence) | python | def gather_explicit_activities(self):
"""Aggregate all explicit activities and active forms of Agents.
This function iterates over self.statements and extracts explicitly
stated activity types and active forms for Agents.
"""
for stmt in self.statements:
agents = stmt.agent_list()
# Activity types given as ActivityConditions
for agent in agents:
if agent is not None and agent.activity is not None:
agent_base = self._get_base(agent)
agent_base.add_activity(agent.activity.activity_type)
# Object activities given in RegulateActivity statements
if isinstance(stmt, RegulateActivity):
if stmt.obj is not None:
obj_base = self._get_base(stmt.obj)
obj_base.add_activity(stmt.obj_activity)
# Activity types given in ActiveForms
elif isinstance(stmt, ActiveForm):
agent_base = self._get_base(stmt.agent)
agent_base.add_activity(stmt.activity)
if stmt.is_active:
agent_base.add_active_state(stmt.activity, stmt.agent,
stmt.evidence)
else:
agent_base.add_inactive_state(stmt.activity, stmt.agent,
stmt.evidence) | [
"def",
"gather_explicit_activities",
"(",
"self",
")",
":",
"for",
"stmt",
"in",
"self",
".",
"statements",
":",
"agents",
"=",
"stmt",
".",
"agent_list",
"(",
")",
"# Activity types given as ActivityConditions",
"for",
"agent",
"in",
"agents",
":",
"if",
"agent",
"is",
"not",
"None",
"and",
"agent",
".",
"activity",
"is",
"not",
"None",
":",
"agent_base",
"=",
"self",
".",
"_get_base",
"(",
"agent",
")",
"agent_base",
".",
"add_activity",
"(",
"agent",
".",
"activity",
".",
"activity_type",
")",
"# Object activities given in RegulateActivity statements",
"if",
"isinstance",
"(",
"stmt",
",",
"RegulateActivity",
")",
":",
"if",
"stmt",
".",
"obj",
"is",
"not",
"None",
":",
"obj_base",
"=",
"self",
".",
"_get_base",
"(",
"stmt",
".",
"obj",
")",
"obj_base",
".",
"add_activity",
"(",
"stmt",
".",
"obj_activity",
")",
"# Activity types given in ActiveForms",
"elif",
"isinstance",
"(",
"stmt",
",",
"ActiveForm",
")",
":",
"agent_base",
"=",
"self",
".",
"_get_base",
"(",
"stmt",
".",
"agent",
")",
"agent_base",
".",
"add_activity",
"(",
"stmt",
".",
"activity",
")",
"if",
"stmt",
".",
"is_active",
":",
"agent_base",
".",
"add_active_state",
"(",
"stmt",
".",
"activity",
",",
"stmt",
".",
"agent",
",",
"stmt",
".",
"evidence",
")",
"else",
":",
"agent_base",
".",
"add_inactive_state",
"(",
"stmt",
".",
"activity",
",",
"stmt",
".",
"agent",
",",
"stmt",
".",
"evidence",
")"
]
| Aggregate all explicit activities and active forms of Agents.
This function iterates over self.statements and extracts explicitly
stated activity types and active forms for Agents. | [
"Aggregate",
"all",
"explicit",
"activities",
"and",
"active",
"forms",
"of",
"Agents",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/mechlinker/__init__.py#L39-L66 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.