repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
sorgerlab/indra
indra/sources/reach/api.py
process_pubmed_abstract
def process_pubmed_abstract(pubmed_id, offline=False, output_fname=default_output_fname, **kwargs): """Return a ReachProcessor by processing an abstract with a given Pubmed id. Uses the Pubmed client to get the abstract. If that fails, None is returned. Parameters ---------- pubmed_id : str The ID of a Pubmed article. The string may start with PMID but passing just the ID also works. Examples: 27168024, PMID27168024 https://www.ncbi.nlm.nih.gov/pubmed/ offline : Optional[bool] If set to True, the REACH system is ran offline. Otherwise (by default) the web service is called. Default: False output_fname : Optional[str] The file to output the REACH JSON output to. Defaults to reach_output.json in current working directory. **kwargs : keyword arguments All other keyword arguments are passed directly to `process_text`. Returns ------- rp : ReachProcessor A ReachProcessor containing the extracted INDRA Statements in rp.statements. """ abs_txt = pubmed_client.get_abstract(pubmed_id) if abs_txt is None: return None rp = process_text(abs_txt, citation=pubmed_id, offline=offline, output_fname=output_fname, **kwargs) if rp and rp.statements: for st in rp.statements: for ev in st.evidence: ev.epistemics['section_type'] = 'abstract' return rp
python
def process_pubmed_abstract(pubmed_id, offline=False, output_fname=default_output_fname, **kwargs): """Return a ReachProcessor by processing an abstract with a given Pubmed id. Uses the Pubmed client to get the abstract. If that fails, None is returned. Parameters ---------- pubmed_id : str The ID of a Pubmed article. The string may start with PMID but passing just the ID also works. Examples: 27168024, PMID27168024 https://www.ncbi.nlm.nih.gov/pubmed/ offline : Optional[bool] If set to True, the REACH system is ran offline. Otherwise (by default) the web service is called. Default: False output_fname : Optional[str] The file to output the REACH JSON output to. Defaults to reach_output.json in current working directory. **kwargs : keyword arguments All other keyword arguments are passed directly to `process_text`. Returns ------- rp : ReachProcessor A ReachProcessor containing the extracted INDRA Statements in rp.statements. """ abs_txt = pubmed_client.get_abstract(pubmed_id) if abs_txt is None: return None rp = process_text(abs_txt, citation=pubmed_id, offline=offline, output_fname=output_fname, **kwargs) if rp and rp.statements: for st in rp.statements: for ev in st.evidence: ev.epistemics['section_type'] = 'abstract' return rp
[ "def", "process_pubmed_abstract", "(", "pubmed_id", ",", "offline", "=", "False", ",", "output_fname", "=", "default_output_fname", ",", "*", "*", "kwargs", ")", ":", "abs_txt", "=", "pubmed_client", ".", "get_abstract", "(", "pubmed_id", ")", "if", "abs_txt", "is", "None", ":", "return", "None", "rp", "=", "process_text", "(", "abs_txt", ",", "citation", "=", "pubmed_id", ",", "offline", "=", "offline", ",", "output_fname", "=", "output_fname", ",", "*", "*", "kwargs", ")", "if", "rp", "and", "rp", ".", "statements", ":", "for", "st", "in", "rp", ".", "statements", ":", "for", "ev", "in", "st", ".", "evidence", ":", "ev", ".", "epistemics", "[", "'section_type'", "]", "=", "'abstract'", "return", "rp" ]
Return a ReachProcessor by processing an abstract with a given Pubmed id. Uses the Pubmed client to get the abstract. If that fails, None is returned. Parameters ---------- pubmed_id : str The ID of a Pubmed article. The string may start with PMID but passing just the ID also works. Examples: 27168024, PMID27168024 https://www.ncbi.nlm.nih.gov/pubmed/ offline : Optional[bool] If set to True, the REACH system is ran offline. Otherwise (by default) the web service is called. Default: False output_fname : Optional[str] The file to output the REACH JSON output to. Defaults to reach_output.json in current working directory. **kwargs : keyword arguments All other keyword arguments are passed directly to `process_text`. Returns ------- rp : ReachProcessor A ReachProcessor containing the extracted INDRA Statements in rp.statements.
[ "Return", "a", "ReachProcessor", "by", "processing", "an", "abstract", "with", "a", "given", "Pubmed", "id", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/reach/api.py#L77-L115
train
sorgerlab/indra
indra/sources/reach/api.py
process_text
def process_text(text, citation=None, offline=False, output_fname=default_output_fname, timeout=None): """Return a ReachProcessor by processing the given text. Parameters ---------- text : str The text to be processed. citation : Optional[str] A PubMed ID passed to be used in the evidence for the extracted INDRA Statements. This is used when the text to be processed comes from a publication that is not otherwise identified. Default: None offline : Optional[bool] If set to True, the REACH system is ran offline. Otherwise (by default) the web service is called. Default: False output_fname : Optional[str] The file to output the REACH JSON output to. Defaults to reach_output.json in current working directory. timeout : Optional[float] This only applies when reading online (`offline=False`). Only wait for `timeout` seconds for the api to respond. Returns ------- rp : ReachProcessor A ReachProcessor containing the extracted INDRA Statements in rp.statements. """ if offline: if not try_offline: logger.error('Offline reading is not available.') return None try: api_ruler = reach_reader.get_api_ruler() except ReachOfflineReadingError as e: logger.error(e) logger.error('Cannot read offline because the REACH ApiRuler ' 'could not be instantiated.') return None try: result_map = api_ruler.annotateText(text, 'fries') except JavaException as e: logger.error('Could not process text.') logger.error(e) return None # REACH version < 1.3.3 json_str = result_map.get('resultJson') if not json_str: # REACH version >= 1.3.3 json_str = result_map.get('result') if not isinstance(json_str, bytes): json_str = json_str.encode('utf-8') else: data = {'text': text.encode('utf-8')} try: res = requests.post(reach_text_url, data, timeout=timeout) except requests.exceptions.RequestException as e: logger.error('Could not connect to REACH service:') logger.error(e) return None # TODO: we could use res.json() here to get a dict # directly # This is a byte string json_str = res.content if not isinstance(json_str, bytes): raise TypeError('{} is {} instead of {}'.format(json_str, json_str.__class__, bytes)) with open(output_fname, 'wb') as fh: fh.write(json_str) return process_json_str(json_str.decode('utf-8'), citation)
python
def process_text(text, citation=None, offline=False, output_fname=default_output_fname, timeout=None): """Return a ReachProcessor by processing the given text. Parameters ---------- text : str The text to be processed. citation : Optional[str] A PubMed ID passed to be used in the evidence for the extracted INDRA Statements. This is used when the text to be processed comes from a publication that is not otherwise identified. Default: None offline : Optional[bool] If set to True, the REACH system is ran offline. Otherwise (by default) the web service is called. Default: False output_fname : Optional[str] The file to output the REACH JSON output to. Defaults to reach_output.json in current working directory. timeout : Optional[float] This only applies when reading online (`offline=False`). Only wait for `timeout` seconds for the api to respond. Returns ------- rp : ReachProcessor A ReachProcessor containing the extracted INDRA Statements in rp.statements. """ if offline: if not try_offline: logger.error('Offline reading is not available.') return None try: api_ruler = reach_reader.get_api_ruler() except ReachOfflineReadingError as e: logger.error(e) logger.error('Cannot read offline because the REACH ApiRuler ' 'could not be instantiated.') return None try: result_map = api_ruler.annotateText(text, 'fries') except JavaException as e: logger.error('Could not process text.') logger.error(e) return None # REACH version < 1.3.3 json_str = result_map.get('resultJson') if not json_str: # REACH version >= 1.3.3 json_str = result_map.get('result') if not isinstance(json_str, bytes): json_str = json_str.encode('utf-8') else: data = {'text': text.encode('utf-8')} try: res = requests.post(reach_text_url, data, timeout=timeout) except requests.exceptions.RequestException as e: logger.error('Could not connect to REACH service:') logger.error(e) return None # TODO: we could use res.json() here to get a dict # directly # This is a byte string json_str = res.content if not isinstance(json_str, bytes): raise TypeError('{} is {} instead of {}'.format(json_str, json_str.__class__, bytes)) with open(output_fname, 'wb') as fh: fh.write(json_str) return process_json_str(json_str.decode('utf-8'), citation)
[ "def", "process_text", "(", "text", ",", "citation", "=", "None", ",", "offline", "=", "False", ",", "output_fname", "=", "default_output_fname", ",", "timeout", "=", "None", ")", ":", "if", "offline", ":", "if", "not", "try_offline", ":", "logger", ".", "error", "(", "'Offline reading is not available.'", ")", "return", "None", "try", ":", "api_ruler", "=", "reach_reader", ".", "get_api_ruler", "(", ")", "except", "ReachOfflineReadingError", "as", "e", ":", "logger", ".", "error", "(", "e", ")", "logger", ".", "error", "(", "'Cannot read offline because the REACH ApiRuler '", "'could not be instantiated.'", ")", "return", "None", "try", ":", "result_map", "=", "api_ruler", ".", "annotateText", "(", "text", ",", "'fries'", ")", "except", "JavaException", "as", "e", ":", "logger", ".", "error", "(", "'Could not process text.'", ")", "logger", ".", "error", "(", "e", ")", "return", "None", "# REACH version < 1.3.3", "json_str", "=", "result_map", ".", "get", "(", "'resultJson'", ")", "if", "not", "json_str", ":", "# REACH version >= 1.3.3", "json_str", "=", "result_map", ".", "get", "(", "'result'", ")", "if", "not", "isinstance", "(", "json_str", ",", "bytes", ")", ":", "json_str", "=", "json_str", ".", "encode", "(", "'utf-8'", ")", "else", ":", "data", "=", "{", "'text'", ":", "text", ".", "encode", "(", "'utf-8'", ")", "}", "try", ":", "res", "=", "requests", ".", "post", "(", "reach_text_url", ",", "data", ",", "timeout", "=", "timeout", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "e", ":", "logger", ".", "error", "(", "'Could not connect to REACH service:'", ")", "logger", ".", "error", "(", "e", ")", "return", "None", "# TODO: we could use res.json() here to get a dict", "# directly", "# This is a byte string", "json_str", "=", "res", ".", "content", "if", "not", "isinstance", "(", "json_str", ",", "bytes", ")", ":", "raise", "TypeError", "(", "'{} is {} instead of {}'", ".", "format", "(", "json_str", ",", "json_str", ".", "__class__", ",", "bytes", ")", ")", "with", "open", "(", "output_fname", ",", "'wb'", ")", "as", "fh", ":", "fh", ".", "write", "(", "json_str", ")", "return", "process_json_str", "(", "json_str", ".", "decode", "(", "'utf-8'", ")", ",", "citation", ")" ]
Return a ReachProcessor by processing the given text. Parameters ---------- text : str The text to be processed. citation : Optional[str] A PubMed ID passed to be used in the evidence for the extracted INDRA Statements. This is used when the text to be processed comes from a publication that is not otherwise identified. Default: None offline : Optional[bool] If set to True, the REACH system is ran offline. Otherwise (by default) the web service is called. Default: False output_fname : Optional[str] The file to output the REACH JSON output to. Defaults to reach_output.json in current working directory. timeout : Optional[float] This only applies when reading online (`offline=False`). Only wait for `timeout` seconds for the api to respond. Returns ------- rp : ReachProcessor A ReachProcessor containing the extracted INDRA Statements in rp.statements.
[ "Return", "a", "ReachProcessor", "by", "processing", "the", "given", "text", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/reach/api.py#L118-L187
train
sorgerlab/indra
indra/sources/reach/api.py
process_nxml_str
def process_nxml_str(nxml_str, citation=None, offline=False, output_fname=default_output_fname): """Return a ReachProcessor by processing the given NXML string. NXML is the format used by PubmedCentral for papers in the open access subset. Parameters ---------- nxml_str : str The NXML string to be processed. citation : Optional[str] A PubMed ID passed to be used in the evidence for the extracted INDRA Statements. Default: None offline : Optional[bool] If set to True, the REACH system is ran offline. Otherwise (by default) the web service is called. Default: False output_fname : Optional[str] The file to output the REACH JSON output to. Defaults to reach_output.json in current working directory. Returns ------- rp : ReachProcessor A ReachProcessor containing the extracted INDRA Statements in rp.statements. """ if offline: if not try_offline: logger.error('Offline reading is not available.') return None try: api_ruler = reach_reader.get_api_ruler() except ReachOfflineReadingError as e: logger.error(e) logger.error('Cannot read offline because the REACH ApiRuler ' 'could not be instantiated.') return None try: result_map = api_ruler.annotateNxml(nxml_str, 'fries') except JavaException as e: logger.error('Could not process NXML.') logger.error(e) return None # REACH version < 1.3.3 json_str = result_map.get('resultJson') if not json_str: # REACH version >= 1.3.3 json_str = result_map.get('result') if json_str is None: logger.warning('No results retrieved') return None if isinstance(json_str, bytes): json_str = json_str.decode('utf-8') return process_json_str(json_str, citation) else: data = {'nxml': nxml_str} try: res = requests.post(reach_nxml_url, data) except requests.exceptions.RequestException as e: logger.error('Could not connect to REACH service:') logger.error(e) return None if res.status_code != 200: logger.error('Could not process NXML via REACH service.' + 'Status code: %d' % res.status_code) return None json_str = res.text with open(output_fname, 'wb') as fh: fh.write(json_str.encode('utf-8')) return process_json_str(json_str, citation)
python
def process_nxml_str(nxml_str, citation=None, offline=False, output_fname=default_output_fname): """Return a ReachProcessor by processing the given NXML string. NXML is the format used by PubmedCentral for papers in the open access subset. Parameters ---------- nxml_str : str The NXML string to be processed. citation : Optional[str] A PubMed ID passed to be used in the evidence for the extracted INDRA Statements. Default: None offline : Optional[bool] If set to True, the REACH system is ran offline. Otherwise (by default) the web service is called. Default: False output_fname : Optional[str] The file to output the REACH JSON output to. Defaults to reach_output.json in current working directory. Returns ------- rp : ReachProcessor A ReachProcessor containing the extracted INDRA Statements in rp.statements. """ if offline: if not try_offline: logger.error('Offline reading is not available.') return None try: api_ruler = reach_reader.get_api_ruler() except ReachOfflineReadingError as e: logger.error(e) logger.error('Cannot read offline because the REACH ApiRuler ' 'could not be instantiated.') return None try: result_map = api_ruler.annotateNxml(nxml_str, 'fries') except JavaException as e: logger.error('Could not process NXML.') logger.error(e) return None # REACH version < 1.3.3 json_str = result_map.get('resultJson') if not json_str: # REACH version >= 1.3.3 json_str = result_map.get('result') if json_str is None: logger.warning('No results retrieved') return None if isinstance(json_str, bytes): json_str = json_str.decode('utf-8') return process_json_str(json_str, citation) else: data = {'nxml': nxml_str} try: res = requests.post(reach_nxml_url, data) except requests.exceptions.RequestException as e: logger.error('Could not connect to REACH service:') logger.error(e) return None if res.status_code != 200: logger.error('Could not process NXML via REACH service.' + 'Status code: %d' % res.status_code) return None json_str = res.text with open(output_fname, 'wb') as fh: fh.write(json_str.encode('utf-8')) return process_json_str(json_str, citation)
[ "def", "process_nxml_str", "(", "nxml_str", ",", "citation", "=", "None", ",", "offline", "=", "False", ",", "output_fname", "=", "default_output_fname", ")", ":", "if", "offline", ":", "if", "not", "try_offline", ":", "logger", ".", "error", "(", "'Offline reading is not available.'", ")", "return", "None", "try", ":", "api_ruler", "=", "reach_reader", ".", "get_api_ruler", "(", ")", "except", "ReachOfflineReadingError", "as", "e", ":", "logger", ".", "error", "(", "e", ")", "logger", ".", "error", "(", "'Cannot read offline because the REACH ApiRuler '", "'could not be instantiated.'", ")", "return", "None", "try", ":", "result_map", "=", "api_ruler", ".", "annotateNxml", "(", "nxml_str", ",", "'fries'", ")", "except", "JavaException", "as", "e", ":", "logger", ".", "error", "(", "'Could not process NXML.'", ")", "logger", ".", "error", "(", "e", ")", "return", "None", "# REACH version < 1.3.3", "json_str", "=", "result_map", ".", "get", "(", "'resultJson'", ")", "if", "not", "json_str", ":", "# REACH version >= 1.3.3", "json_str", "=", "result_map", ".", "get", "(", "'result'", ")", "if", "json_str", "is", "None", ":", "logger", ".", "warning", "(", "'No results retrieved'", ")", "return", "None", "if", "isinstance", "(", "json_str", ",", "bytes", ")", ":", "json_str", "=", "json_str", ".", "decode", "(", "'utf-8'", ")", "return", "process_json_str", "(", "json_str", ",", "citation", ")", "else", ":", "data", "=", "{", "'nxml'", ":", "nxml_str", "}", "try", ":", "res", "=", "requests", ".", "post", "(", "reach_nxml_url", ",", "data", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "e", ":", "logger", ".", "error", "(", "'Could not connect to REACH service:'", ")", "logger", ".", "error", "(", "e", ")", "return", "None", "if", "res", ".", "status_code", "!=", "200", ":", "logger", ".", "error", "(", "'Could not process NXML via REACH service.'", "+", "'Status code: %d'", "%", "res", ".", "status_code", ")", "return", "None", "json_str", "=", "res", ".", "text", "with", "open", "(", "output_fname", ",", "'wb'", ")", "as", "fh", ":", "fh", ".", "write", "(", "json_str", ".", "encode", "(", "'utf-8'", ")", ")", "return", "process_json_str", "(", "json_str", ",", "citation", ")" ]
Return a ReachProcessor by processing the given NXML string. NXML is the format used by PubmedCentral for papers in the open access subset. Parameters ---------- nxml_str : str The NXML string to be processed. citation : Optional[str] A PubMed ID passed to be used in the evidence for the extracted INDRA Statements. Default: None offline : Optional[bool] If set to True, the REACH system is ran offline. Otherwise (by default) the web service is called. Default: False output_fname : Optional[str] The file to output the REACH JSON output to. Defaults to reach_output.json in current working directory. Returns ------- rp : ReachProcessor A ReachProcessor containing the extracted INDRA Statements in rp.statements.
[ "Return", "a", "ReachProcessor", "by", "processing", "the", "given", "NXML", "string", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/reach/api.py#L190-L261
train
sorgerlab/indra
indra/sources/reach/api.py
process_nxml_file
def process_nxml_file(file_name, citation=None, offline=False, output_fname=default_output_fname): """Return a ReachProcessor by processing the given NXML file. NXML is the format used by PubmedCentral for papers in the open access subset. Parameters ---------- file_name : str The name of the NXML file to be processed. citation : Optional[str] A PubMed ID passed to be used in the evidence for the extracted INDRA Statements. Default: None offline : Optional[bool] If set to True, the REACH system is ran offline. Otherwise (by default) the web service is called. Default: False output_fname : Optional[str] The file to output the REACH JSON output to. Defaults to reach_output.json in current working directory. Returns ------- rp : ReachProcessor A ReachProcessor containing the extracted INDRA Statements in rp.statements. """ with open(file_name, 'rb') as f: nxml_str = f.read().decode('utf-8') return process_nxml_str(nxml_str, citation, False, output_fname)
python
def process_nxml_file(file_name, citation=None, offline=False, output_fname=default_output_fname): """Return a ReachProcessor by processing the given NXML file. NXML is the format used by PubmedCentral for papers in the open access subset. Parameters ---------- file_name : str The name of the NXML file to be processed. citation : Optional[str] A PubMed ID passed to be used in the evidence for the extracted INDRA Statements. Default: None offline : Optional[bool] If set to True, the REACH system is ran offline. Otherwise (by default) the web service is called. Default: False output_fname : Optional[str] The file to output the REACH JSON output to. Defaults to reach_output.json in current working directory. Returns ------- rp : ReachProcessor A ReachProcessor containing the extracted INDRA Statements in rp.statements. """ with open(file_name, 'rb') as f: nxml_str = f.read().decode('utf-8') return process_nxml_str(nxml_str, citation, False, output_fname)
[ "def", "process_nxml_file", "(", "file_name", ",", "citation", "=", "None", ",", "offline", "=", "False", ",", "output_fname", "=", "default_output_fname", ")", ":", "with", "open", "(", "file_name", ",", "'rb'", ")", "as", "f", ":", "nxml_str", "=", "f", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "return", "process_nxml_str", "(", "nxml_str", ",", "citation", ",", "False", ",", "output_fname", ")" ]
Return a ReachProcessor by processing the given NXML file. NXML is the format used by PubmedCentral for papers in the open access subset. Parameters ---------- file_name : str The name of the NXML file to be processed. citation : Optional[str] A PubMed ID passed to be used in the evidence for the extracted INDRA Statements. Default: None offline : Optional[bool] If set to True, the REACH system is ran offline. Otherwise (by default) the web service is called. Default: False output_fname : Optional[str] The file to output the REACH JSON output to. Defaults to reach_output.json in current working directory. Returns ------- rp : ReachProcessor A ReachProcessor containing the extracted INDRA Statements in rp.statements.
[ "Return", "a", "ReachProcessor", "by", "processing", "the", "given", "NXML", "file", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/reach/api.py#L264-L293
train
sorgerlab/indra
indra/sources/reach/api.py
process_json_file
def process_json_file(file_name, citation=None): """Return a ReachProcessor by processing the given REACH json file. The output from the REACH parser is in this json format. This function is useful if the output is saved as a file and needs to be processed. For more information on the format, see: https://github.com/clulab/reach Parameters ---------- file_name : str The name of the json file to be processed. citation : Optional[str] A PubMed ID passed to be used in the evidence for the extracted INDRA Statements. Default: None Returns ------- rp : ReachProcessor A ReachProcessor containing the extracted INDRA Statements in rp.statements. """ try: with open(file_name, 'rb') as fh: json_str = fh.read().decode('utf-8') return process_json_str(json_str, citation) except IOError: logger.error('Could not read file %s.' % file_name)
python
def process_json_file(file_name, citation=None): """Return a ReachProcessor by processing the given REACH json file. The output from the REACH parser is in this json format. This function is useful if the output is saved as a file and needs to be processed. For more information on the format, see: https://github.com/clulab/reach Parameters ---------- file_name : str The name of the json file to be processed. citation : Optional[str] A PubMed ID passed to be used in the evidence for the extracted INDRA Statements. Default: None Returns ------- rp : ReachProcessor A ReachProcessor containing the extracted INDRA Statements in rp.statements. """ try: with open(file_name, 'rb') as fh: json_str = fh.read().decode('utf-8') return process_json_str(json_str, citation) except IOError: logger.error('Could not read file %s.' % file_name)
[ "def", "process_json_file", "(", "file_name", ",", "citation", "=", "None", ")", ":", "try", ":", "with", "open", "(", "file_name", ",", "'rb'", ")", "as", "fh", ":", "json_str", "=", "fh", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "return", "process_json_str", "(", "json_str", ",", "citation", ")", "except", "IOError", ":", "logger", ".", "error", "(", "'Could not read file %s.'", "%", "file_name", ")" ]
Return a ReachProcessor by processing the given REACH json file. The output from the REACH parser is in this json format. This function is useful if the output is saved as a file and needs to be processed. For more information on the format, see: https://github.com/clulab/reach Parameters ---------- file_name : str The name of the json file to be processed. citation : Optional[str] A PubMed ID passed to be used in the evidence for the extracted INDRA Statements. Default: None Returns ------- rp : ReachProcessor A ReachProcessor containing the extracted INDRA Statements in rp.statements.
[ "Return", "a", "ReachProcessor", "by", "processing", "the", "given", "REACH", "json", "file", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/reach/api.py#L296-L322
train
sorgerlab/indra
indra/sources/reach/api.py
process_json_str
def process_json_str(json_str, citation=None): """Return a ReachProcessor by processing the given REACH json string. The output from the REACH parser is in this json format. For more information on the format, see: https://github.com/clulab/reach Parameters ---------- json_str : str The json string to be processed. citation : Optional[str] A PubMed ID passed to be used in the evidence for the extracted INDRA Statements. Default: None Returns ------- rp : ReachProcessor A ReachProcessor containing the extracted INDRA Statements in rp.statements. """ if not isinstance(json_str, basestring): raise TypeError('{} is {} instead of {}'.format(json_str, json_str.__class__, basestring)) json_str = json_str.replace('frame-id', 'frame_id') json_str = json_str.replace('argument-label', 'argument_label') json_str = json_str.replace('object-meta', 'object_meta') json_str = json_str.replace('doc-id', 'doc_id') json_str = json_str.replace('is-hypothesis', 'is_hypothesis') json_str = json_str.replace('is-negated', 'is_negated') json_str = json_str.replace('is-direct', 'is_direct') json_str = json_str.replace('found-by', 'found_by') try: json_dict = json.loads(json_str) except ValueError: logger.error('Could not decode JSON string.') return None rp = ReachProcessor(json_dict, citation) rp.get_modifications() rp.get_complexes() rp.get_activation() rp.get_translocation() rp.get_regulate_amounts() return rp
python
def process_json_str(json_str, citation=None): """Return a ReachProcessor by processing the given REACH json string. The output from the REACH parser is in this json format. For more information on the format, see: https://github.com/clulab/reach Parameters ---------- json_str : str The json string to be processed. citation : Optional[str] A PubMed ID passed to be used in the evidence for the extracted INDRA Statements. Default: None Returns ------- rp : ReachProcessor A ReachProcessor containing the extracted INDRA Statements in rp.statements. """ if not isinstance(json_str, basestring): raise TypeError('{} is {} instead of {}'.format(json_str, json_str.__class__, basestring)) json_str = json_str.replace('frame-id', 'frame_id') json_str = json_str.replace('argument-label', 'argument_label') json_str = json_str.replace('object-meta', 'object_meta') json_str = json_str.replace('doc-id', 'doc_id') json_str = json_str.replace('is-hypothesis', 'is_hypothesis') json_str = json_str.replace('is-negated', 'is_negated') json_str = json_str.replace('is-direct', 'is_direct') json_str = json_str.replace('found-by', 'found_by') try: json_dict = json.loads(json_str) except ValueError: logger.error('Could not decode JSON string.') return None rp = ReachProcessor(json_dict, citation) rp.get_modifications() rp.get_complexes() rp.get_activation() rp.get_translocation() rp.get_regulate_amounts() return rp
[ "def", "process_json_str", "(", "json_str", ",", "citation", "=", "None", ")", ":", "if", "not", "isinstance", "(", "json_str", ",", "basestring", ")", ":", "raise", "TypeError", "(", "'{} is {} instead of {}'", ".", "format", "(", "json_str", ",", "json_str", ".", "__class__", ",", "basestring", ")", ")", "json_str", "=", "json_str", ".", "replace", "(", "'frame-id'", ",", "'frame_id'", ")", "json_str", "=", "json_str", ".", "replace", "(", "'argument-label'", ",", "'argument_label'", ")", "json_str", "=", "json_str", ".", "replace", "(", "'object-meta'", ",", "'object_meta'", ")", "json_str", "=", "json_str", ".", "replace", "(", "'doc-id'", ",", "'doc_id'", ")", "json_str", "=", "json_str", ".", "replace", "(", "'is-hypothesis'", ",", "'is_hypothesis'", ")", "json_str", "=", "json_str", ".", "replace", "(", "'is-negated'", ",", "'is_negated'", ")", "json_str", "=", "json_str", ".", "replace", "(", "'is-direct'", ",", "'is_direct'", ")", "json_str", "=", "json_str", ".", "replace", "(", "'found-by'", ",", "'found_by'", ")", "try", ":", "json_dict", "=", "json", ".", "loads", "(", "json_str", ")", "except", "ValueError", ":", "logger", ".", "error", "(", "'Could not decode JSON string.'", ")", "return", "None", "rp", "=", "ReachProcessor", "(", "json_dict", ",", "citation", ")", "rp", ".", "get_modifications", "(", ")", "rp", ".", "get_complexes", "(", ")", "rp", ".", "get_activation", "(", ")", "rp", ".", "get_translocation", "(", ")", "rp", ".", "get_regulate_amounts", "(", ")", "return", "rp" ]
Return a ReachProcessor by processing the given REACH json string. The output from the REACH parser is in this json format. For more information on the format, see: https://github.com/clulab/reach Parameters ---------- json_str : str The json string to be processed. citation : Optional[str] A PubMed ID passed to be used in the evidence for the extracted INDRA Statements. Default: None Returns ------- rp : ReachProcessor A ReachProcessor containing the extracted INDRA Statements in rp.statements.
[ "Return", "a", "ReachProcessor", "by", "processing", "the", "given", "REACH", "json", "string", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/reach/api.py#L325-L369
train
sorgerlab/indra
indra/tools/reading/wait_for_complete.py
make_parser
def make_parser(): """Generate the parser for this script.""" parser = ArgumentParser( 'wait_for_complete.py', usage='%(prog)s [-h] queue_name [options]', description=('Wait for a set of batch jobs to complete, and monitor ' 'them as they run.'), epilog=('Jobs can also be monitored, terminated, and otherwise ' 'managed on the AWS website. However this tool will also tag ' 'the instances, and should be run whenever a job is submitted ' 'to AWS.') ) parser.add_argument( dest='queue_name', help=('The name of the queue to watch and wait for completion. If no ' 'jobs are specified, this will wait until all jobs in the queue ' 'are completed (either SUCCEEDED or FAILED).') ) parser.add_argument( '--watch', '-w', dest='job_list', metavar='JOB_ID', nargs='+', help=('Specify particular jobs using their job ids, as reported by ' 'the submit command. Many ids may be specified.') ) parser.add_argument( '--prefix', '-p', dest='job_name_prefix', help='Specify a prefix for the name of the jobs to watch and wait for.' ) parser.add_argument( '--interval', '-i', dest='poll_interval', default=10, type=int, help=('The time interval to wait between job status checks, in ' 'seconds (default: %(default)d seconds).') ) parser.add_argument( '--timeout', '-T', metavar='TIMEOUT', type=int, help=('If the logs are not updated for %(metavar)s seconds, ' 'print a warning. If `--kill_on_log_timeout` flag is set, then ' 'the offending jobs will be automatically terminated.') ) parser.add_argument( '--kill_on_timeout', '-K', action='store_true', help='If a log times out, terminate the offending job.' ) parser.add_argument( '--stash_log_method', '-l', choices=['s3', 'local'], metavar='METHOD', help=('Select a method from: [%(choices)s] to store the job logs. ' 'If no method is specified, the logs will not be ' 'loaded off of AWS. If \'s3\' is specified, then ' '`job_name_prefix` must also be given, as this will indicate ' 'where on s3 to store the logs.') ) return parser
python
def make_parser(): """Generate the parser for this script.""" parser = ArgumentParser( 'wait_for_complete.py', usage='%(prog)s [-h] queue_name [options]', description=('Wait for a set of batch jobs to complete, and monitor ' 'them as they run.'), epilog=('Jobs can also be monitored, terminated, and otherwise ' 'managed on the AWS website. However this tool will also tag ' 'the instances, and should be run whenever a job is submitted ' 'to AWS.') ) parser.add_argument( dest='queue_name', help=('The name of the queue to watch and wait for completion. If no ' 'jobs are specified, this will wait until all jobs in the queue ' 'are completed (either SUCCEEDED or FAILED).') ) parser.add_argument( '--watch', '-w', dest='job_list', metavar='JOB_ID', nargs='+', help=('Specify particular jobs using their job ids, as reported by ' 'the submit command. Many ids may be specified.') ) parser.add_argument( '--prefix', '-p', dest='job_name_prefix', help='Specify a prefix for the name of the jobs to watch and wait for.' ) parser.add_argument( '--interval', '-i', dest='poll_interval', default=10, type=int, help=('The time interval to wait between job status checks, in ' 'seconds (default: %(default)d seconds).') ) parser.add_argument( '--timeout', '-T', metavar='TIMEOUT', type=int, help=('If the logs are not updated for %(metavar)s seconds, ' 'print a warning. If `--kill_on_log_timeout` flag is set, then ' 'the offending jobs will be automatically terminated.') ) parser.add_argument( '--kill_on_timeout', '-K', action='store_true', help='If a log times out, terminate the offending job.' ) parser.add_argument( '--stash_log_method', '-l', choices=['s3', 'local'], metavar='METHOD', help=('Select a method from: [%(choices)s] to store the job logs. ' 'If no method is specified, the logs will not be ' 'loaded off of AWS. If \'s3\' is specified, then ' '`job_name_prefix` must also be given, as this will indicate ' 'where on s3 to store the logs.') ) return parser
[ "def", "make_parser", "(", ")", ":", "parser", "=", "ArgumentParser", "(", "'wait_for_complete.py'", ",", "usage", "=", "'%(prog)s [-h] queue_name [options]'", ",", "description", "=", "(", "'Wait for a set of batch jobs to complete, and monitor '", "'them as they run.'", ")", ",", "epilog", "=", "(", "'Jobs can also be monitored, terminated, and otherwise '", "'managed on the AWS website. However this tool will also tag '", "'the instances, and should be run whenever a job is submitted '", "'to AWS.'", ")", ")", "parser", ".", "add_argument", "(", "dest", "=", "'queue_name'", ",", "help", "=", "(", "'The name of the queue to watch and wait for completion. If no '", "'jobs are specified, this will wait until all jobs in the queue '", "'are completed (either SUCCEEDED or FAILED).'", ")", ")", "parser", ".", "add_argument", "(", "'--watch'", ",", "'-w'", ",", "dest", "=", "'job_list'", ",", "metavar", "=", "'JOB_ID'", ",", "nargs", "=", "'+'", ",", "help", "=", "(", "'Specify particular jobs using their job ids, as reported by '", "'the submit command. Many ids may be specified.'", ")", ")", "parser", ".", "add_argument", "(", "'--prefix'", ",", "'-p'", ",", "dest", "=", "'job_name_prefix'", ",", "help", "=", "'Specify a prefix for the name of the jobs to watch and wait for.'", ")", "parser", ".", "add_argument", "(", "'--interval'", ",", "'-i'", ",", "dest", "=", "'poll_interval'", ",", "default", "=", "10", ",", "type", "=", "int", ",", "help", "=", "(", "'The time interval to wait between job status checks, in '", "'seconds (default: %(default)d seconds).'", ")", ")", "parser", ".", "add_argument", "(", "'--timeout'", ",", "'-T'", ",", "metavar", "=", "'TIMEOUT'", ",", "type", "=", "int", ",", "help", "=", "(", "'If the logs are not updated for %(metavar)s seconds, '", "'print a warning. If `--kill_on_log_timeout` flag is set, then '", "'the offending jobs will be automatically terminated.'", ")", ")", "parser", ".", "add_argument", "(", "'--kill_on_timeout'", ",", "'-K'", ",", "action", "=", "'store_true'", ",", "help", "=", "'If a log times out, terminate the offending job.'", ")", "parser", ".", "add_argument", "(", "'--stash_log_method'", ",", "'-l'", ",", "choices", "=", "[", "'s3'", ",", "'local'", "]", ",", "metavar", "=", "'METHOD'", ",", "help", "=", "(", "'Select a method from: [%(choices)s] to store the job logs. '", "'If no method is specified, the logs will not be '", "'loaded off of AWS. If \\'s3\\' is specified, then '", "'`job_name_prefix` must also be given, as this will indicate '", "'where on s3 to store the logs.'", ")", ")", "return", "parser" ]
Generate the parser for this script.
[ "Generate", "the", "parser", "for", "this", "script", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/wait_for_complete.py#L4-L66
train
sorgerlab/indra
indra/literature/__init__.py
id_lookup
def id_lookup(paper_id, idtype): """Take an ID of type PMID, PMCID, or DOI and lookup the other IDs. If the DOI is not found in Pubmed, try to obtain the DOI by doing a reverse-lookup of the DOI in CrossRef using article metadata. Parameters ---------- paper_id : str ID of the article. idtype : str Type of the ID: 'pmid', 'pmcid', or 'doi Returns ------- ids : dict A dictionary with the following keys: pmid, pmcid and doi. """ if idtype not in ('pmid', 'pmcid', 'doi'): raise ValueError("Invalid idtype %s; must be 'pmid', 'pmcid', " "or 'doi'." % idtype) ids = {'doi': None, 'pmid': None, 'pmcid': None} pmc_id_results = pmc_client.id_lookup(paper_id, idtype) # Start with the results of the PMC lookup and then override with the # provided ID ids['pmid'] = pmc_id_results.get('pmid') ids['pmcid'] = pmc_id_results.get('pmcid') ids['doi'] = pmc_id_results.get('doi') ids[idtype] = paper_id # If we gave a DOI, then our work is done after looking for PMID and PMCID if idtype == 'doi': return ids # If we gave a PMID or PMCID, we need to check to see if we got a DOI. # If we got a DOI back, we're done. elif ids.get('doi'): return ids # If we get here, then we've given PMID or PMCID and don't have a DOI yet. # If we gave a PMCID and have neither a PMID nor a DOI, then we'll run # into problems later on when we try to the reverse lookup using CrossRef. # So we bail here and return what we have (PMCID only) with a warning. if ids.get('pmcid') and ids.get('doi') is None and ids.get('pmid') is None: logger.warning('%s: PMCID without PMID or DOI' % ids.get('pmcid')) return ids # To clarify the state of things at this point: assert ids.get('pmid') is not None assert ids.get('doi') is None # As a last result, we try to get the DOI from CrossRef (which internally # tries to get the DOI from Pubmed in the process of collecting the # necessary metadata for the lookup): ids['doi'] = crossref_client.doi_query(ids['pmid']) # It may still be None, but at this point there's nothing we can do... return ids
python
def id_lookup(paper_id, idtype): """Take an ID of type PMID, PMCID, or DOI and lookup the other IDs. If the DOI is not found in Pubmed, try to obtain the DOI by doing a reverse-lookup of the DOI in CrossRef using article metadata. Parameters ---------- paper_id : str ID of the article. idtype : str Type of the ID: 'pmid', 'pmcid', or 'doi Returns ------- ids : dict A dictionary with the following keys: pmid, pmcid and doi. """ if idtype not in ('pmid', 'pmcid', 'doi'): raise ValueError("Invalid idtype %s; must be 'pmid', 'pmcid', " "or 'doi'." % idtype) ids = {'doi': None, 'pmid': None, 'pmcid': None} pmc_id_results = pmc_client.id_lookup(paper_id, idtype) # Start with the results of the PMC lookup and then override with the # provided ID ids['pmid'] = pmc_id_results.get('pmid') ids['pmcid'] = pmc_id_results.get('pmcid') ids['doi'] = pmc_id_results.get('doi') ids[idtype] = paper_id # If we gave a DOI, then our work is done after looking for PMID and PMCID if idtype == 'doi': return ids # If we gave a PMID or PMCID, we need to check to see if we got a DOI. # If we got a DOI back, we're done. elif ids.get('doi'): return ids # If we get here, then we've given PMID or PMCID and don't have a DOI yet. # If we gave a PMCID and have neither a PMID nor a DOI, then we'll run # into problems later on when we try to the reverse lookup using CrossRef. # So we bail here and return what we have (PMCID only) with a warning. if ids.get('pmcid') and ids.get('doi') is None and ids.get('pmid') is None: logger.warning('%s: PMCID without PMID or DOI' % ids.get('pmcid')) return ids # To clarify the state of things at this point: assert ids.get('pmid') is not None assert ids.get('doi') is None # As a last result, we try to get the DOI from CrossRef (which internally # tries to get the DOI from Pubmed in the process of collecting the # necessary metadata for the lookup): ids['doi'] = crossref_client.doi_query(ids['pmid']) # It may still be None, but at this point there's nothing we can do... return ids
[ "def", "id_lookup", "(", "paper_id", ",", "idtype", ")", ":", "if", "idtype", "not", "in", "(", "'pmid'", ",", "'pmcid'", ",", "'doi'", ")", ":", "raise", "ValueError", "(", "\"Invalid idtype %s; must be 'pmid', 'pmcid', \"", "\"or 'doi'.\"", "%", "idtype", ")", "ids", "=", "{", "'doi'", ":", "None", ",", "'pmid'", ":", "None", ",", "'pmcid'", ":", "None", "}", "pmc_id_results", "=", "pmc_client", ".", "id_lookup", "(", "paper_id", ",", "idtype", ")", "# Start with the results of the PMC lookup and then override with the", "# provided ID", "ids", "[", "'pmid'", "]", "=", "pmc_id_results", ".", "get", "(", "'pmid'", ")", "ids", "[", "'pmcid'", "]", "=", "pmc_id_results", ".", "get", "(", "'pmcid'", ")", "ids", "[", "'doi'", "]", "=", "pmc_id_results", ".", "get", "(", "'doi'", ")", "ids", "[", "idtype", "]", "=", "paper_id", "# If we gave a DOI, then our work is done after looking for PMID and PMCID", "if", "idtype", "==", "'doi'", ":", "return", "ids", "# If we gave a PMID or PMCID, we need to check to see if we got a DOI.", "# If we got a DOI back, we're done.", "elif", "ids", ".", "get", "(", "'doi'", ")", ":", "return", "ids", "# If we get here, then we've given PMID or PMCID and don't have a DOI yet.", "# If we gave a PMCID and have neither a PMID nor a DOI, then we'll run", "# into problems later on when we try to the reverse lookup using CrossRef.", "# So we bail here and return what we have (PMCID only) with a warning.", "if", "ids", ".", "get", "(", "'pmcid'", ")", "and", "ids", ".", "get", "(", "'doi'", ")", "is", "None", "and", "ids", ".", "get", "(", "'pmid'", ")", "is", "None", ":", "logger", ".", "warning", "(", "'%s: PMCID without PMID or DOI'", "%", "ids", ".", "get", "(", "'pmcid'", ")", ")", "return", "ids", "# To clarify the state of things at this point:", "assert", "ids", ".", "get", "(", "'pmid'", ")", "is", "not", "None", "assert", "ids", ".", "get", "(", "'doi'", ")", "is", "None", "# As a last result, we try to get the DOI from CrossRef (which internally", "# tries to get the DOI from Pubmed in the process of collecting the", "# necessary metadata for the lookup):", "ids", "[", "'doi'", "]", "=", "crossref_client", ".", "doi_query", "(", "ids", "[", "'pmid'", "]", ")", "# It may still be None, but at this point there's nothing we can do...", "return", "ids" ]
Take an ID of type PMID, PMCID, or DOI and lookup the other IDs. If the DOI is not found in Pubmed, try to obtain the DOI by doing a reverse-lookup of the DOI in CrossRef using article metadata. Parameters ---------- paper_id : str ID of the article. idtype : str Type of the ID: 'pmid', 'pmcid', or 'doi Returns ------- ids : dict A dictionary with the following keys: pmid, pmcid and doi.
[ "Take", "an", "ID", "of", "type", "PMID", "PMCID", "or", "DOI", "and", "lookup", "the", "other", "IDs", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/__init__.py#L19-L71
train
sorgerlab/indra
indra/literature/__init__.py
get_full_text
def get_full_text(paper_id, idtype, preferred_content_type='text/xml'): """Return the content and the content type of an article. This function retreives the content of an article by its PubMed ID, PubMed Central ID, or DOI. It prioritizes full text content when available and returns an abstract from PubMed as a fallback. Parameters ---------- paper_id : string ID of the article. idtype : 'pmid', 'pmcid', or 'doi Type of the ID. preferred_content_type : Optional[st]r Preference for full-text format, if available. Can be one of 'text/xml', 'text/plain', 'application/pdf'. Default: 'text/xml' Returns ------- content : str The content of the article. content_type : str The content type of the article """ if preferred_content_type not in \ ('text/xml', 'text/plain', 'application/pdf'): raise ValueError("preferred_content_type must be one of 'text/xml', " "'text/plain', or 'application/pdf'.") ids = id_lookup(paper_id, idtype) pmcid = ids.get('pmcid') pmid = ids.get('pmid') doi = ids.get('doi') # First try to find paper via PMC if pmcid: nxml = pmc_client.get_xml(pmcid) if nxml: return nxml, 'pmc_oa_xml' # If we got here, it means we didn't find the full text in PMC, so we'll # need either the DOI (for lookup in CrossRef) and/or the PMID (so we # can fall back on the abstract. If by some strange turn we have neither, # give up now. if not doi and not pmid: return (None, None) # If it does not have PMC NXML then we attempt to obtain the full-text # through the CrossRef Click-through API if doi: # Get publisher publisher = crossref_client.get_publisher(doi) # First check for whether this is Elsevier--if so, use the Elsevier # client directly, because the Clickthrough API key seems unreliable. # Return full XML. if publisher == 'Elsevier BV': logger.info('Elsevier: %s' % pmid) #article = elsevier_client.get_article(doi, output='txt') try: article_xml = elsevier_client.download_article(doi) except Exception as e: logger.error("Error downloading Elsevier article: %s" % e) article_xml = None if article_xml is not None: return (article_xml, 'elsevier_xml') # FIXME FIXME FIXME # Because we don't yet have a way to process non-Elsevier content # obtained from CrossRef, which includes both XML of unknown format # and PDFs, we just comment this section out for now """ # Check if there are any full text links links = crossref_client.get_fulltext_links(doi) if links: headers = {} # Set the Cross Ref Clickthrough API key in the header, if we've # got one cr_api_key = crossref_client.get_api_key() if cr_api_key is not None: headers['CR-Clickthrough-Client-Token'] = cr_api_key # Utility function to get particular links by content-type def lookup_content_type(link_list, content_type): content_list = [l.get('URL') for l in link_list if l.get('content-type') == content_type] return None if not content_list else content_list[0] # First check for what the user asked for if lookup_content_type(links, preferred_content_type): req = requests.get(lookup_content_type(links, preferred_content_type), headers=headers) if req.status_code == 200: req_content_type = req.headers['Content-Type'] return req.text, req_content_type elif req.status_code == 400: logger.warning('Full text query returned 400 (Bad Request): ' 'Perhaps missing CrossRef Clickthrough API ' 'key?') return (None, None) # Check for XML first if lookup_content_type(links, 'text/xml'): req = requests.get(lookup_content_type(links, 'text/xml'), headers=headers) if req.status_code == 200: req_content_type = req.headers['Content-Type'] return req.text, req_content_type elif req.status_code == 400: logger.warning('Full text query returned 400 (Bad Request):' 'Perhaps missing CrossRef Clickthrough API ' 'key?') return (None, None) # Next, plain text elif lookup_content_type(links, 'text/plain'): req = requests.get(lookup_content_type(links, 'text/plain'), headers=headers) if req.status_code == 200: req_content_type = req.headers['Content-Type'] return req.text, req_content_type elif req.status_code == 400: logger.warning('Full text query returned 400 (Bad Request):' 'Perhaps missing CrossRef Clickthrough API ' 'key?') return (None, None) elif lookup_content_type(links, 'application/pdf'): pass # Wiley's links are often of content-type 'unspecified'. elif lookup_content_type(links, 'unspecified'): req = requests.get(lookup_content_type(links, 'unspecified'), headers=headers) if req.status_code == 200: req_content_type = req.headers['Content-Type'] return 'foo', req_content_type elif req.status_code == 400: logger.warning('Full text query returned 400 (Bad Request):' 'Perhaps missing CrossRef Clickthrough API ' 'key?') return (None, None) elif req.status_code == 401: logger.warning('Full text query returned 401 (Unauthorized)') return (None, None) elif req.status_code == 403: logger.warning('Full text query returned 403 (Forbidden)') return (None, None) else: raise Exception("Unknown content type(s): %s" % links) elif publisher == 'American Society for Biochemistry & Molecular ' \ 'Biology (ASBMB)': url = crossref_client.get_url(doi) return get_asbmb_full_text(url) """ # end FIXME FIXME FIXME # No full text links and not a publisher we support. We'll have to # fall back to the abstract. #elif pmid: if pmid: abstract = pubmed_client.get_abstract(pmid) if abstract is None: return (None, None) else: return abstract, 'abstract' # We have a useless DOI and no PMID. Give up. else: return (None, None) # We don't have a DOI but we're guaranteed to have a PMID at this point, # so we fall back to the abstract: else: abstract = pubmed_client.get_abstract(pmid) if abstract is None: return (None, None) else: return abstract, 'abstract' # We'll only get here if we've missed a combination of conditions assert False
python
def get_full_text(paper_id, idtype, preferred_content_type='text/xml'): """Return the content and the content type of an article. This function retreives the content of an article by its PubMed ID, PubMed Central ID, or DOI. It prioritizes full text content when available and returns an abstract from PubMed as a fallback. Parameters ---------- paper_id : string ID of the article. idtype : 'pmid', 'pmcid', or 'doi Type of the ID. preferred_content_type : Optional[st]r Preference for full-text format, if available. Can be one of 'text/xml', 'text/plain', 'application/pdf'. Default: 'text/xml' Returns ------- content : str The content of the article. content_type : str The content type of the article """ if preferred_content_type not in \ ('text/xml', 'text/plain', 'application/pdf'): raise ValueError("preferred_content_type must be one of 'text/xml', " "'text/plain', or 'application/pdf'.") ids = id_lookup(paper_id, idtype) pmcid = ids.get('pmcid') pmid = ids.get('pmid') doi = ids.get('doi') # First try to find paper via PMC if pmcid: nxml = pmc_client.get_xml(pmcid) if nxml: return nxml, 'pmc_oa_xml' # If we got here, it means we didn't find the full text in PMC, so we'll # need either the DOI (for lookup in CrossRef) and/or the PMID (so we # can fall back on the abstract. If by some strange turn we have neither, # give up now. if not doi and not pmid: return (None, None) # If it does not have PMC NXML then we attempt to obtain the full-text # through the CrossRef Click-through API if doi: # Get publisher publisher = crossref_client.get_publisher(doi) # First check for whether this is Elsevier--if so, use the Elsevier # client directly, because the Clickthrough API key seems unreliable. # Return full XML. if publisher == 'Elsevier BV': logger.info('Elsevier: %s' % pmid) #article = elsevier_client.get_article(doi, output='txt') try: article_xml = elsevier_client.download_article(doi) except Exception as e: logger.error("Error downloading Elsevier article: %s" % e) article_xml = None if article_xml is not None: return (article_xml, 'elsevier_xml') # FIXME FIXME FIXME # Because we don't yet have a way to process non-Elsevier content # obtained from CrossRef, which includes both XML of unknown format # and PDFs, we just comment this section out for now """ # Check if there are any full text links links = crossref_client.get_fulltext_links(doi) if links: headers = {} # Set the Cross Ref Clickthrough API key in the header, if we've # got one cr_api_key = crossref_client.get_api_key() if cr_api_key is not None: headers['CR-Clickthrough-Client-Token'] = cr_api_key # Utility function to get particular links by content-type def lookup_content_type(link_list, content_type): content_list = [l.get('URL') for l in link_list if l.get('content-type') == content_type] return None if not content_list else content_list[0] # First check for what the user asked for if lookup_content_type(links, preferred_content_type): req = requests.get(lookup_content_type(links, preferred_content_type), headers=headers) if req.status_code == 200: req_content_type = req.headers['Content-Type'] return req.text, req_content_type elif req.status_code == 400: logger.warning('Full text query returned 400 (Bad Request): ' 'Perhaps missing CrossRef Clickthrough API ' 'key?') return (None, None) # Check for XML first if lookup_content_type(links, 'text/xml'): req = requests.get(lookup_content_type(links, 'text/xml'), headers=headers) if req.status_code == 200: req_content_type = req.headers['Content-Type'] return req.text, req_content_type elif req.status_code == 400: logger.warning('Full text query returned 400 (Bad Request):' 'Perhaps missing CrossRef Clickthrough API ' 'key?') return (None, None) # Next, plain text elif lookup_content_type(links, 'text/plain'): req = requests.get(lookup_content_type(links, 'text/plain'), headers=headers) if req.status_code == 200: req_content_type = req.headers['Content-Type'] return req.text, req_content_type elif req.status_code == 400: logger.warning('Full text query returned 400 (Bad Request):' 'Perhaps missing CrossRef Clickthrough API ' 'key?') return (None, None) elif lookup_content_type(links, 'application/pdf'): pass # Wiley's links are often of content-type 'unspecified'. elif lookup_content_type(links, 'unspecified'): req = requests.get(lookup_content_type(links, 'unspecified'), headers=headers) if req.status_code == 200: req_content_type = req.headers['Content-Type'] return 'foo', req_content_type elif req.status_code == 400: logger.warning('Full text query returned 400 (Bad Request):' 'Perhaps missing CrossRef Clickthrough API ' 'key?') return (None, None) elif req.status_code == 401: logger.warning('Full text query returned 401 (Unauthorized)') return (None, None) elif req.status_code == 403: logger.warning('Full text query returned 403 (Forbidden)') return (None, None) else: raise Exception("Unknown content type(s): %s" % links) elif publisher == 'American Society for Biochemistry & Molecular ' \ 'Biology (ASBMB)': url = crossref_client.get_url(doi) return get_asbmb_full_text(url) """ # end FIXME FIXME FIXME # No full text links and not a publisher we support. We'll have to # fall back to the abstract. #elif pmid: if pmid: abstract = pubmed_client.get_abstract(pmid) if abstract is None: return (None, None) else: return abstract, 'abstract' # We have a useless DOI and no PMID. Give up. else: return (None, None) # We don't have a DOI but we're guaranteed to have a PMID at this point, # so we fall back to the abstract: else: abstract = pubmed_client.get_abstract(pmid) if abstract is None: return (None, None) else: return abstract, 'abstract' # We'll only get here if we've missed a combination of conditions assert False
[ "def", "get_full_text", "(", "paper_id", ",", "idtype", ",", "preferred_content_type", "=", "'text/xml'", ")", ":", "if", "preferred_content_type", "not", "in", "(", "'text/xml'", ",", "'text/plain'", ",", "'application/pdf'", ")", ":", "raise", "ValueError", "(", "\"preferred_content_type must be one of 'text/xml', \"", "\"'text/plain', or 'application/pdf'.\"", ")", "ids", "=", "id_lookup", "(", "paper_id", ",", "idtype", ")", "pmcid", "=", "ids", ".", "get", "(", "'pmcid'", ")", "pmid", "=", "ids", ".", "get", "(", "'pmid'", ")", "doi", "=", "ids", ".", "get", "(", "'doi'", ")", "# First try to find paper via PMC", "if", "pmcid", ":", "nxml", "=", "pmc_client", ".", "get_xml", "(", "pmcid", ")", "if", "nxml", ":", "return", "nxml", ",", "'pmc_oa_xml'", "# If we got here, it means we didn't find the full text in PMC, so we'll", "# need either the DOI (for lookup in CrossRef) and/or the PMID (so we", "# can fall back on the abstract. If by some strange turn we have neither,", "# give up now.", "if", "not", "doi", "and", "not", "pmid", ":", "return", "(", "None", ",", "None", ")", "# If it does not have PMC NXML then we attempt to obtain the full-text", "# through the CrossRef Click-through API", "if", "doi", ":", "# Get publisher", "publisher", "=", "crossref_client", ".", "get_publisher", "(", "doi", ")", "# First check for whether this is Elsevier--if so, use the Elsevier", "# client directly, because the Clickthrough API key seems unreliable.", "# Return full XML.", "if", "publisher", "==", "'Elsevier BV'", ":", "logger", ".", "info", "(", "'Elsevier: %s'", "%", "pmid", ")", "#article = elsevier_client.get_article(doi, output='txt')", "try", ":", "article_xml", "=", "elsevier_client", ".", "download_article", "(", "doi", ")", "except", "Exception", "as", "e", ":", "logger", ".", "error", "(", "\"Error downloading Elsevier article: %s\"", "%", "e", ")", "article_xml", "=", "None", "if", "article_xml", "is", "not", "None", ":", "return", "(", "article_xml", ",", "'elsevier_xml'", ")", "# FIXME FIXME FIXME", "# Because we don't yet have a way to process non-Elsevier content", "# obtained from CrossRef, which includes both XML of unknown format", "# and PDFs, we just comment this section out for now", "\"\"\"\n # Check if there are any full text links\n links = crossref_client.get_fulltext_links(doi)\n if links:\n headers = {}\n # Set the Cross Ref Clickthrough API key in the header, if we've\n # got one\n cr_api_key = crossref_client.get_api_key()\n if cr_api_key is not None:\n headers['CR-Clickthrough-Client-Token'] = cr_api_key\n # Utility function to get particular links by content-type\n def lookup_content_type(link_list, content_type):\n content_list = [l.get('URL') for l in link_list\n if l.get('content-type') == content_type]\n return None if not content_list else content_list[0]\n # First check for what the user asked for\n if lookup_content_type(links, preferred_content_type):\n req = requests.get(lookup_content_type(links,\n preferred_content_type),\n headers=headers)\n if req.status_code == 200:\n req_content_type = req.headers['Content-Type']\n return req.text, req_content_type\n elif req.status_code == 400:\n logger.warning('Full text query returned 400 (Bad Request): '\n 'Perhaps missing CrossRef Clickthrough API '\n 'key?')\n return (None, None)\n # Check for XML first\n if lookup_content_type(links, 'text/xml'):\n req = requests.get(lookup_content_type(links, 'text/xml'),\n headers=headers)\n if req.status_code == 200:\n req_content_type = req.headers['Content-Type']\n return req.text, req_content_type\n elif req.status_code == 400:\n logger.warning('Full text query returned 400 (Bad Request):'\n 'Perhaps missing CrossRef Clickthrough API '\n 'key?')\n return (None, None)\n # Next, plain text\n elif lookup_content_type(links, 'text/plain'):\n req = requests.get(lookup_content_type(links, 'text/plain'),\n headers=headers)\n if req.status_code == 200:\n req_content_type = req.headers['Content-Type']\n return req.text, req_content_type\n elif req.status_code == 400:\n logger.warning('Full text query returned 400 (Bad Request):'\n 'Perhaps missing CrossRef Clickthrough API '\n 'key?')\n return (None, None)\n elif lookup_content_type(links, 'application/pdf'):\n pass\n # Wiley's links are often of content-type 'unspecified'.\n elif lookup_content_type(links, 'unspecified'):\n req = requests.get(lookup_content_type(links, 'unspecified'),\n headers=headers)\n if req.status_code == 200:\n req_content_type = req.headers['Content-Type']\n return 'foo', req_content_type\n elif req.status_code == 400:\n logger.warning('Full text query returned 400 (Bad Request):'\n 'Perhaps missing CrossRef Clickthrough API '\n 'key?')\n return (None, None)\n elif req.status_code == 401:\n logger.warning('Full text query returned 401 (Unauthorized)')\n return (None, None)\n elif req.status_code == 403:\n logger.warning('Full text query returned 403 (Forbidden)')\n return (None, None)\n else:\n raise Exception(\"Unknown content type(s): %s\" % links)\n elif publisher == 'American Society for Biochemistry & Molecular ' \\\n 'Biology (ASBMB)':\n url = crossref_client.get_url(doi)\n return get_asbmb_full_text(url)\n \"\"\"", "# end FIXME FIXME FIXME", "# No full text links and not a publisher we support. We'll have to", "# fall back to the abstract.", "#elif pmid:", "if", "pmid", ":", "abstract", "=", "pubmed_client", ".", "get_abstract", "(", "pmid", ")", "if", "abstract", "is", "None", ":", "return", "(", "None", ",", "None", ")", "else", ":", "return", "abstract", ",", "'abstract'", "# We have a useless DOI and no PMID. Give up.", "else", ":", "return", "(", "None", ",", "None", ")", "# We don't have a DOI but we're guaranteed to have a PMID at this point,", "# so we fall back to the abstract:", "else", ":", "abstract", "=", "pubmed_client", ".", "get_abstract", "(", "pmid", ")", "if", "abstract", "is", "None", ":", "return", "(", "None", ",", "None", ")", "else", ":", "return", "abstract", ",", "'abstract'", "# We'll only get here if we've missed a combination of conditions", "assert", "False" ]
Return the content and the content type of an article. This function retreives the content of an article by its PubMed ID, PubMed Central ID, or DOI. It prioritizes full text content when available and returns an abstract from PubMed as a fallback. Parameters ---------- paper_id : string ID of the article. idtype : 'pmid', 'pmcid', or 'doi Type of the ID. preferred_content_type : Optional[st]r Preference for full-text format, if available. Can be one of 'text/xml', 'text/plain', 'application/pdf'. Default: 'text/xml' Returns ------- content : str The content of the article. content_type : str The content type of the article
[ "Return", "the", "content", "and", "the", "content", "type", "of", "an", "article", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/__init__.py#L74-L244
train
sorgerlab/indra
indra/sources/reach/reader.py
ReachReader.get_api_ruler
def get_api_ruler(self): """Return the existing reader if it exists or launch a new one. Returns ------- api_ruler : org.clulab.reach.apis.ApiRuler An instance of the REACH ApiRuler class (java object). """ if self.api_ruler is None: try: self.api_ruler = \ autoclass('org.clulab.reach.export.apis.ApiRuler') except JavaException as e: raise ReachOfflineReadingError(e) return self.api_ruler
python
def get_api_ruler(self): """Return the existing reader if it exists or launch a new one. Returns ------- api_ruler : org.clulab.reach.apis.ApiRuler An instance of the REACH ApiRuler class (java object). """ if self.api_ruler is None: try: self.api_ruler = \ autoclass('org.clulab.reach.export.apis.ApiRuler') except JavaException as e: raise ReachOfflineReadingError(e) return self.api_ruler
[ "def", "get_api_ruler", "(", "self", ")", ":", "if", "self", ".", "api_ruler", "is", "None", ":", "try", ":", "self", ".", "api_ruler", "=", "autoclass", "(", "'org.clulab.reach.export.apis.ApiRuler'", ")", "except", "JavaException", "as", "e", ":", "raise", "ReachOfflineReadingError", "(", "e", ")", "return", "self", ".", "api_ruler" ]
Return the existing reader if it exists or launch a new one. Returns ------- api_ruler : org.clulab.reach.apis.ApiRuler An instance of the REACH ApiRuler class (java object).
[ "Return", "the", "existing", "reader", "if", "it", "exists", "or", "launch", "a", "new", "one", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/reach/reader.py#L55-L69
train
sorgerlab/indra
indra/sources/biogrid.py
_download_biogrid_data
def _download_biogrid_data(url): """Downloads zipped, tab-separated Biogrid data in .tab2 format. Parameters: ----------- url : str URL of the BioGrid zip file. Returns ------- csv.reader A csv.reader object for iterating over the rows (header has already been skipped). """ res = requests.get(biogrid_file_url) if res.status_code != 200: raise Exception('Unable to download Biogrid data: status code %s' % res.status_code) zip_bytes = BytesIO(res.content) zip_file = ZipFile(zip_bytes) zip_info_list = zip_file.infolist() # There should be only one file in this zip archive if len(zip_info_list) != 1: raise Exception('There should be exactly zipfile in BioGrid zip ' 'archive: %s' % str(zip_info_list)) unzipped_bytes = zip_file.read(zip_info_list[0]) # Unzip the file biogrid_str = StringIO(unzipped_bytes.decode('utf8')) # Make file-like obj csv_reader = csv.reader(biogrid_str, delimiter='\t') # Get csv reader next(csv_reader) # Skip the header return csv_reader
python
def _download_biogrid_data(url): """Downloads zipped, tab-separated Biogrid data in .tab2 format. Parameters: ----------- url : str URL of the BioGrid zip file. Returns ------- csv.reader A csv.reader object for iterating over the rows (header has already been skipped). """ res = requests.get(biogrid_file_url) if res.status_code != 200: raise Exception('Unable to download Biogrid data: status code %s' % res.status_code) zip_bytes = BytesIO(res.content) zip_file = ZipFile(zip_bytes) zip_info_list = zip_file.infolist() # There should be only one file in this zip archive if len(zip_info_list) != 1: raise Exception('There should be exactly zipfile in BioGrid zip ' 'archive: %s' % str(zip_info_list)) unzipped_bytes = zip_file.read(zip_info_list[0]) # Unzip the file biogrid_str = StringIO(unzipped_bytes.decode('utf8')) # Make file-like obj csv_reader = csv.reader(biogrid_str, delimiter='\t') # Get csv reader next(csv_reader) # Skip the header return csv_reader
[ "def", "_download_biogrid_data", "(", "url", ")", ":", "res", "=", "requests", ".", "get", "(", "biogrid_file_url", ")", "if", "res", ".", "status_code", "!=", "200", ":", "raise", "Exception", "(", "'Unable to download Biogrid data: status code %s'", "%", "res", ".", "status_code", ")", "zip_bytes", "=", "BytesIO", "(", "res", ".", "content", ")", "zip_file", "=", "ZipFile", "(", "zip_bytes", ")", "zip_info_list", "=", "zip_file", ".", "infolist", "(", ")", "# There should be only one file in this zip archive", "if", "len", "(", "zip_info_list", ")", "!=", "1", ":", "raise", "Exception", "(", "'There should be exactly zipfile in BioGrid zip '", "'archive: %s'", "%", "str", "(", "zip_info_list", ")", ")", "unzipped_bytes", "=", "zip_file", ".", "read", "(", "zip_info_list", "[", "0", "]", ")", "# Unzip the file", "biogrid_str", "=", "StringIO", "(", "unzipped_bytes", ".", "decode", "(", "'utf8'", ")", ")", "# Make file-like obj", "csv_reader", "=", "csv", ".", "reader", "(", "biogrid_str", ",", "delimiter", "=", "'\\t'", ")", "# Get csv reader", "next", "(", "csv_reader", ")", "# Skip the header", "return", "csv_reader" ]
Downloads zipped, tab-separated Biogrid data in .tab2 format. Parameters: ----------- url : str URL of the BioGrid zip file. Returns ------- csv.reader A csv.reader object for iterating over the rows (header has already been skipped).
[ "Downloads", "zipped", "tab", "-", "separated", "Biogrid", "data", "in", ".", "tab2", "format", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biogrid.py#L156-L185
train
sorgerlab/indra
indra/sources/biogrid.py
BiogridProcessor._make_agent
def _make_agent(self, entrez_id, text_id): """Make an Agent object, appropriately grounded. Parameters ---------- entrez_id : str Entrez id number text_id : str A plain text systematic name, or None if not listed. Returns ------- agent : indra.statements.Agent A grounded agent object. """ hgnc_name, db_refs = self._make_db_refs(entrez_id, text_id) if hgnc_name is not None: name = hgnc_name elif text_id is not None: name = text_id # Handle case where the name is None else: return None return Agent(name, db_refs=db_refs)
python
def _make_agent(self, entrez_id, text_id): """Make an Agent object, appropriately grounded. Parameters ---------- entrez_id : str Entrez id number text_id : str A plain text systematic name, or None if not listed. Returns ------- agent : indra.statements.Agent A grounded agent object. """ hgnc_name, db_refs = self._make_db_refs(entrez_id, text_id) if hgnc_name is not None: name = hgnc_name elif text_id is not None: name = text_id # Handle case where the name is None else: return None return Agent(name, db_refs=db_refs)
[ "def", "_make_agent", "(", "self", ",", "entrez_id", ",", "text_id", ")", ":", "hgnc_name", ",", "db_refs", "=", "self", ".", "_make_db_refs", "(", "entrez_id", ",", "text_id", ")", "if", "hgnc_name", "is", "not", "None", ":", "name", "=", "hgnc_name", "elif", "text_id", "is", "not", "None", ":", "name", "=", "text_id", "# Handle case where the name is None", "else", ":", "return", "None", "return", "Agent", "(", "name", ",", "db_refs", "=", "db_refs", ")" ]
Make an Agent object, appropriately grounded. Parameters ---------- entrez_id : str Entrez id number text_id : str A plain text systematic name, or None if not listed. Returns ------- agent : indra.statements.Agent A grounded agent object.
[ "Make", "an", "Agent", "object", "appropriately", "grounded", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biogrid.py#L97-L121
train
sorgerlab/indra
indra/sources/biogrid.py
BiogridProcessor._make_db_refs
def _make_db_refs(self, entrez_id, text_id): """Looks up the HGNC ID and name, as well as the Uniprot ID. Parameters ---------- entrez_id : str Entrez gene ID. text_id : str or None A plain text systematic name, or None if not listed in the Biogrid data. Returns ------- hgnc_name : str Official HGNC symbol for the gene. db_refs : dict db_refs grounding dictionary, used when constructing the Agent object. """ db_refs = {} if text_id != '-' and text_id is not None: db_refs['TEXT'] = text_id hgnc_id = hgnc_client.get_hgnc_from_entrez(entrez_id) hgnc_name = hgnc_client.get_hgnc_name(hgnc_id) if hgnc_id is not None: db_refs['HGNC'] = hgnc_id up_id = hgnc_client.get_uniprot_id(hgnc_id) if up_id is not None: db_refs['UP'] = up_id return (hgnc_name, db_refs)
python
def _make_db_refs(self, entrez_id, text_id): """Looks up the HGNC ID and name, as well as the Uniprot ID. Parameters ---------- entrez_id : str Entrez gene ID. text_id : str or None A plain text systematic name, or None if not listed in the Biogrid data. Returns ------- hgnc_name : str Official HGNC symbol for the gene. db_refs : dict db_refs grounding dictionary, used when constructing the Agent object. """ db_refs = {} if text_id != '-' and text_id is not None: db_refs['TEXT'] = text_id hgnc_id = hgnc_client.get_hgnc_from_entrez(entrez_id) hgnc_name = hgnc_client.get_hgnc_name(hgnc_id) if hgnc_id is not None: db_refs['HGNC'] = hgnc_id up_id = hgnc_client.get_uniprot_id(hgnc_id) if up_id is not None: db_refs['UP'] = up_id return (hgnc_name, db_refs)
[ "def", "_make_db_refs", "(", "self", ",", "entrez_id", ",", "text_id", ")", ":", "db_refs", "=", "{", "}", "if", "text_id", "!=", "'-'", "and", "text_id", "is", "not", "None", ":", "db_refs", "[", "'TEXT'", "]", "=", "text_id", "hgnc_id", "=", "hgnc_client", ".", "get_hgnc_from_entrez", "(", "entrez_id", ")", "hgnc_name", "=", "hgnc_client", ".", "get_hgnc_name", "(", "hgnc_id", ")", "if", "hgnc_id", "is", "not", "None", ":", "db_refs", "[", "'HGNC'", "]", "=", "hgnc_id", "up_id", "=", "hgnc_client", ".", "get_uniprot_id", "(", "hgnc_id", ")", "if", "up_id", "is", "not", "None", ":", "db_refs", "[", "'UP'", "]", "=", "up_id", "return", "(", "hgnc_name", ",", "db_refs", ")" ]
Looks up the HGNC ID and name, as well as the Uniprot ID. Parameters ---------- entrez_id : str Entrez gene ID. text_id : str or None A plain text systematic name, or None if not listed in the Biogrid data. Returns ------- hgnc_name : str Official HGNC symbol for the gene. db_refs : dict db_refs grounding dictionary, used when constructing the Agent object.
[ "Looks", "up", "the", "HGNC", "ID", "and", "name", "as", "well", "as", "the", "Uniprot", "ID", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biogrid.py#L123-L153
train
sorgerlab/indra
indra/assemblers/kami/assembler.py
KamiAssembler.make_model
def make_model(self, policies=None, initial_conditions=True, reverse_effects=False): """Assemble the Kami model from the collected INDRA Statements. This method assembles a Kami model from the set of INDRA Statements. The assembled model is both returned and set as the assembler's model argument. Parameters ---------- policies : Optional[Union[str, dict]] A string or dictionary of policies, as defined in :py:class:`indra.assemblers.KamiAssembler`. This set of policies locally supersedes the default setting in the assembler. This is useful when this function is called multiple times with different policies. initial_conditions : Optional[bool] If True, default initial conditions are generated for the agents in the model. Returns ------- model : dict The assembled Kami model. """ self.processed_policies = self.process_policies(policies) ppa = PysbPreassembler(self.statements) ppa.replace_activities() if reverse_effects: ppa.add_reverse_effects() self.statements = ppa.statements # Set local policies for this make_model call that overwrite # the global policies of the Kami assembler if policies is not None: global_policies = self.policies if isinstance(policies, basestring): local_policies = {'other': policies} else: local_policies = {'other': 'default'} local_policies.update(policies) self.policies = local_policies self.model = {} graphs = [] self.model['graphs'] = graphs self.model['typing'] = [] # Action graph generated here action_graph = {'id': 'action_graph', 'attrs': {'name': 'action_graph'}} action_graph['graph'] = {'nodes': [], 'edges': []} graphs.append(action_graph) # Iterate over the statements to generate rules self._assemble() # Add initial conditions #if initial_conditions: # self.add_default_initial_conditions() # If local policies were applied, revert to the global one if policies is not None: self.policies = global_policies return self.model
python
def make_model(self, policies=None, initial_conditions=True, reverse_effects=False): """Assemble the Kami model from the collected INDRA Statements. This method assembles a Kami model from the set of INDRA Statements. The assembled model is both returned and set as the assembler's model argument. Parameters ---------- policies : Optional[Union[str, dict]] A string or dictionary of policies, as defined in :py:class:`indra.assemblers.KamiAssembler`. This set of policies locally supersedes the default setting in the assembler. This is useful when this function is called multiple times with different policies. initial_conditions : Optional[bool] If True, default initial conditions are generated for the agents in the model. Returns ------- model : dict The assembled Kami model. """ self.processed_policies = self.process_policies(policies) ppa = PysbPreassembler(self.statements) ppa.replace_activities() if reverse_effects: ppa.add_reverse_effects() self.statements = ppa.statements # Set local policies for this make_model call that overwrite # the global policies of the Kami assembler if policies is not None: global_policies = self.policies if isinstance(policies, basestring): local_policies = {'other': policies} else: local_policies = {'other': 'default'} local_policies.update(policies) self.policies = local_policies self.model = {} graphs = [] self.model['graphs'] = graphs self.model['typing'] = [] # Action graph generated here action_graph = {'id': 'action_graph', 'attrs': {'name': 'action_graph'}} action_graph['graph'] = {'nodes': [], 'edges': []} graphs.append(action_graph) # Iterate over the statements to generate rules self._assemble() # Add initial conditions #if initial_conditions: # self.add_default_initial_conditions() # If local policies were applied, revert to the global one if policies is not None: self.policies = global_policies return self.model
[ "def", "make_model", "(", "self", ",", "policies", "=", "None", ",", "initial_conditions", "=", "True", ",", "reverse_effects", "=", "False", ")", ":", "self", ".", "processed_policies", "=", "self", ".", "process_policies", "(", "policies", ")", "ppa", "=", "PysbPreassembler", "(", "self", ".", "statements", ")", "ppa", ".", "replace_activities", "(", ")", "if", "reverse_effects", ":", "ppa", ".", "add_reverse_effects", "(", ")", "self", ".", "statements", "=", "ppa", ".", "statements", "# Set local policies for this make_model call that overwrite", "# the global policies of the Kami assembler", "if", "policies", "is", "not", "None", ":", "global_policies", "=", "self", ".", "policies", "if", "isinstance", "(", "policies", ",", "basestring", ")", ":", "local_policies", "=", "{", "'other'", ":", "policies", "}", "else", ":", "local_policies", "=", "{", "'other'", ":", "'default'", "}", "local_policies", ".", "update", "(", "policies", ")", "self", ".", "policies", "=", "local_policies", "self", ".", "model", "=", "{", "}", "graphs", "=", "[", "]", "self", ".", "model", "[", "'graphs'", "]", "=", "graphs", "self", ".", "model", "[", "'typing'", "]", "=", "[", "]", "# Action graph generated here", "action_graph", "=", "{", "'id'", ":", "'action_graph'", ",", "'attrs'", ":", "{", "'name'", ":", "'action_graph'", "}", "}", "action_graph", "[", "'graph'", "]", "=", "{", "'nodes'", ":", "[", "]", ",", "'edges'", ":", "[", "]", "}", "graphs", ".", "append", "(", "action_graph", ")", "# Iterate over the statements to generate rules", "self", ".", "_assemble", "(", ")", "# Add initial conditions", "#if initial_conditions:", "# self.add_default_initial_conditions()", "# If local policies were applied, revert to the global one", "if", "policies", "is", "not", "None", ":", "self", ".", "policies", "=", "global_policies", "return", "self", ".", "model" ]
Assemble the Kami model from the collected INDRA Statements. This method assembles a Kami model from the set of INDRA Statements. The assembled model is both returned and set as the assembler's model argument. Parameters ---------- policies : Optional[Union[str, dict]] A string or dictionary of policies, as defined in :py:class:`indra.assemblers.KamiAssembler`. This set of policies locally supersedes the default setting in the assembler. This is useful when this function is called multiple times with different policies. initial_conditions : Optional[bool] If True, default initial conditions are generated for the agents in the model. Returns ------- model : dict The assembled Kami model.
[ "Assemble", "the", "Kami", "model", "from", "the", "collected", "INDRA", "Statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/kami/assembler.py#L24-L87
train
sorgerlab/indra
indra/assemblers/kami/assembler.py
Nugget.add_agent
def add_agent(self, agent): """Add an INDRA Agent and its conditions to the Nugget.""" agent_id = self.add_node(agent.name) self.add_typing(agent_id, 'agent') # Handle bound conditions for bc in agent.bound_conditions: # Here we make the assumption that the binding site # is simply named after the binding partner if bc.is_bound: test_type = 'is_bnd' else: test_type = 'is_free' bound_name = bc.agent.name agent_bs = get_binding_site_name(bc.agent) test_name = '%s_bound_to_%s_test' % (agent_id, bound_name) agent_bs_id = self.add_node(agent_bs) test_id = self.add_node(test_name) self.add_edge(agent_bs_id, agent_id) self.add_edge(agent_bs_id, test_id) self.add_typing(agent_bs_id, 'locus') self.add_typing(test_id, test_type) for mod in agent.mods: mod_site_str = abbrevs[mod.mod_type] if mod.residue is not None: mod_site_str = mod.residue mod_pos_str = mod.position if mod.position is not None else '' mod_site = ('%s%s' % (mod_site_str, mod_pos_str)) site_states = states[mod.mod_type] if mod.is_modified: val = site_states[1] else: val = site_states[0] mod_site_id = self.add_node(mod_site, {'val': val}) self.add_edge(mod_site_id, agent_id) self.add_typing(mod_site_id, 'state') return agent_id
python
def add_agent(self, agent): """Add an INDRA Agent and its conditions to the Nugget.""" agent_id = self.add_node(agent.name) self.add_typing(agent_id, 'agent') # Handle bound conditions for bc in agent.bound_conditions: # Here we make the assumption that the binding site # is simply named after the binding partner if bc.is_bound: test_type = 'is_bnd' else: test_type = 'is_free' bound_name = bc.agent.name agent_bs = get_binding_site_name(bc.agent) test_name = '%s_bound_to_%s_test' % (agent_id, bound_name) agent_bs_id = self.add_node(agent_bs) test_id = self.add_node(test_name) self.add_edge(agent_bs_id, agent_id) self.add_edge(agent_bs_id, test_id) self.add_typing(agent_bs_id, 'locus') self.add_typing(test_id, test_type) for mod in agent.mods: mod_site_str = abbrevs[mod.mod_type] if mod.residue is not None: mod_site_str = mod.residue mod_pos_str = mod.position if mod.position is not None else '' mod_site = ('%s%s' % (mod_site_str, mod_pos_str)) site_states = states[mod.mod_type] if mod.is_modified: val = site_states[1] else: val = site_states[0] mod_site_id = self.add_node(mod_site, {'val': val}) self.add_edge(mod_site_id, agent_id) self.add_typing(mod_site_id, 'state') return agent_id
[ "def", "add_agent", "(", "self", ",", "agent", ")", ":", "agent_id", "=", "self", ".", "add_node", "(", "agent", ".", "name", ")", "self", ".", "add_typing", "(", "agent_id", ",", "'agent'", ")", "# Handle bound conditions", "for", "bc", "in", "agent", ".", "bound_conditions", ":", "# Here we make the assumption that the binding site", "# is simply named after the binding partner", "if", "bc", ".", "is_bound", ":", "test_type", "=", "'is_bnd'", "else", ":", "test_type", "=", "'is_free'", "bound_name", "=", "bc", ".", "agent", ".", "name", "agent_bs", "=", "get_binding_site_name", "(", "bc", ".", "agent", ")", "test_name", "=", "'%s_bound_to_%s_test'", "%", "(", "agent_id", ",", "bound_name", ")", "agent_bs_id", "=", "self", ".", "add_node", "(", "agent_bs", ")", "test_id", "=", "self", ".", "add_node", "(", "test_name", ")", "self", ".", "add_edge", "(", "agent_bs_id", ",", "agent_id", ")", "self", ".", "add_edge", "(", "agent_bs_id", ",", "test_id", ")", "self", ".", "add_typing", "(", "agent_bs_id", ",", "'locus'", ")", "self", ".", "add_typing", "(", "test_id", ",", "test_type", ")", "for", "mod", "in", "agent", ".", "mods", ":", "mod_site_str", "=", "abbrevs", "[", "mod", ".", "mod_type", "]", "if", "mod", ".", "residue", "is", "not", "None", ":", "mod_site_str", "=", "mod", ".", "residue", "mod_pos_str", "=", "mod", ".", "position", "if", "mod", ".", "position", "is", "not", "None", "else", "''", "mod_site", "=", "(", "'%s%s'", "%", "(", "mod_site_str", ",", "mod_pos_str", ")", ")", "site_states", "=", "states", "[", "mod", ".", "mod_type", "]", "if", "mod", ".", "is_modified", ":", "val", "=", "site_states", "[", "1", "]", "else", ":", "val", "=", "site_states", "[", "0", "]", "mod_site_id", "=", "self", ".", "add_node", "(", "mod_site", ",", "{", "'val'", ":", "val", "}", ")", "self", ".", "add_edge", "(", "mod_site_id", ",", "agent_id", ")", "self", ".", "add_typing", "(", "mod_site_id", ",", "'state'", ")", "return", "agent_id" ]
Add an INDRA Agent and its conditions to the Nugget.
[ "Add", "an", "INDRA", "Agent", "and", "its", "conditions", "to", "the", "Nugget", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/kami/assembler.py#L134-L170
train
sorgerlab/indra
indra/assemblers/kami/assembler.py
Nugget.add_node
def add_node(self, name_base, attrs=None): """Add a node with a given base name to the Nugget and return ID.""" if name_base not in self.counters: node_id = name_base else: node_id = '%s_%d' % (name_base, self.counters[name_base]) node = {'id': node_id} if attrs: node['attrs'] = attrs self.nodes.append(node) self.counters[node_id] += 1 return node_id
python
def add_node(self, name_base, attrs=None): """Add a node with a given base name to the Nugget and return ID.""" if name_base not in self.counters: node_id = name_base else: node_id = '%s_%d' % (name_base, self.counters[name_base]) node = {'id': node_id} if attrs: node['attrs'] = attrs self.nodes.append(node) self.counters[node_id] += 1 return node_id
[ "def", "add_node", "(", "self", ",", "name_base", ",", "attrs", "=", "None", ")", ":", "if", "name_base", "not", "in", "self", ".", "counters", ":", "node_id", "=", "name_base", "else", ":", "node_id", "=", "'%s_%d'", "%", "(", "name_base", ",", "self", ".", "counters", "[", "name_base", "]", ")", "node", "=", "{", "'id'", ":", "node_id", "}", "if", "attrs", ":", "node", "[", "'attrs'", "]", "=", "attrs", "self", ".", "nodes", ".", "append", "(", "node", ")", "self", ".", "counters", "[", "node_id", "]", "+=", "1", "return", "node_id" ]
Add a node with a given base name to the Nugget and return ID.
[ "Add", "a", "node", "with", "a", "given", "base", "name", "to", "the", "Nugget", "and", "return", "ID", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/kami/assembler.py#L172-L183
train
sorgerlab/indra
indra/assemblers/kami/assembler.py
Nugget.get_nugget_dict
def get_nugget_dict(self): """Return the Nugget as a dictionary.""" nugget_dict = \ {'id': self.id, 'graph': { 'nodes': self.nodes, 'edges': self.edges }, 'attrs': { 'name': self.name, 'rate': self.rate } } return nugget_dict
python
def get_nugget_dict(self): """Return the Nugget as a dictionary.""" nugget_dict = \ {'id': self.id, 'graph': { 'nodes': self.nodes, 'edges': self.edges }, 'attrs': { 'name': self.name, 'rate': self.rate } } return nugget_dict
[ "def", "get_nugget_dict", "(", "self", ")", ":", "nugget_dict", "=", "{", "'id'", ":", "self", ".", "id", ",", "'graph'", ":", "{", "'nodes'", ":", "self", ".", "nodes", ",", "'edges'", ":", "self", ".", "edges", "}", ",", "'attrs'", ":", "{", "'name'", ":", "self", ".", "name", ",", "'rate'", ":", "self", ".", "rate", "}", "}", "return", "nugget_dict" ]
Return the Nugget as a dictionary.
[ "Return", "the", "Nugget", "as", "a", "dictionary", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/kami/assembler.py#L193-L206
train
sorgerlab/indra
indra/sources/tees/api.py
process_text
def process_text(text, pmid=None, python2_path=None): """Processes the specified plain text with TEES and converts output to supported INDRA statements. Check for the TEES installation is the TEES_PATH environment variable, and configuration file; if not found, checks candidate paths in tees_candidate_paths. Raises an exception if TEES cannot be found in any of these places. Parameters ---------- text : str Plain text to process with TEES pmid : str The PMID from which the paper comes from, to be stored in the Evidence object of statements. Set to None if this is unspecified. python2_path : str TEES is only compatible with python 2. This processor invokes this external python 2 interpreter so that the processor can be run in either python 2 or python 3. If None, searches for an executible named python2 in the PATH environment variable. Returns ------- tp : TEESProcessor A TEESProcessor object which contains a list of INDRA statements extracted from TEES extractions """ # Try to locate python2 in one of the directories of the PATH environment # variable if it is not provided if python2_path is None: for path in os.environ["PATH"].split(os.pathsep): proposed_python2_path = os.path.join(path, 'python2.7') if os.path.isfile(proposed_python2_path): python2_path = proposed_python2_path print('Found python 2 interpreter at', python2_path) break if python2_path is None: raise Exception('Could not find python2 in the directories ' + 'listed in the PATH environment variable. ' + 'Need python2 to run TEES.') # Run TEES a1_text, a2_text, sentence_segmentations = run_on_text(text, python2_path) # Run the TEES processor tp = TEESProcessor(a1_text, a2_text, sentence_segmentations, pmid) return tp
python
def process_text(text, pmid=None, python2_path=None): """Processes the specified plain text with TEES and converts output to supported INDRA statements. Check for the TEES installation is the TEES_PATH environment variable, and configuration file; if not found, checks candidate paths in tees_candidate_paths. Raises an exception if TEES cannot be found in any of these places. Parameters ---------- text : str Plain text to process with TEES pmid : str The PMID from which the paper comes from, to be stored in the Evidence object of statements. Set to None if this is unspecified. python2_path : str TEES is only compatible with python 2. This processor invokes this external python 2 interpreter so that the processor can be run in either python 2 or python 3. If None, searches for an executible named python2 in the PATH environment variable. Returns ------- tp : TEESProcessor A TEESProcessor object which contains a list of INDRA statements extracted from TEES extractions """ # Try to locate python2 in one of the directories of the PATH environment # variable if it is not provided if python2_path is None: for path in os.environ["PATH"].split(os.pathsep): proposed_python2_path = os.path.join(path, 'python2.7') if os.path.isfile(proposed_python2_path): python2_path = proposed_python2_path print('Found python 2 interpreter at', python2_path) break if python2_path is None: raise Exception('Could not find python2 in the directories ' + 'listed in the PATH environment variable. ' + 'Need python2 to run TEES.') # Run TEES a1_text, a2_text, sentence_segmentations = run_on_text(text, python2_path) # Run the TEES processor tp = TEESProcessor(a1_text, a2_text, sentence_segmentations, pmid) return tp
[ "def", "process_text", "(", "text", ",", "pmid", "=", "None", ",", "python2_path", "=", "None", ")", ":", "# Try to locate python2 in one of the directories of the PATH environment", "# variable if it is not provided", "if", "python2_path", "is", "None", ":", "for", "path", "in", "os", ".", "environ", "[", "\"PATH\"", "]", ".", "split", "(", "os", ".", "pathsep", ")", ":", "proposed_python2_path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "'python2.7'", ")", "if", "os", ".", "path", ".", "isfile", "(", "proposed_python2_path", ")", ":", "python2_path", "=", "proposed_python2_path", "print", "(", "'Found python 2 interpreter at'", ",", "python2_path", ")", "break", "if", "python2_path", "is", "None", ":", "raise", "Exception", "(", "'Could not find python2 in the directories '", "+", "'listed in the PATH environment variable. '", "+", "'Need python2 to run TEES.'", ")", "# Run TEES", "a1_text", ",", "a2_text", ",", "sentence_segmentations", "=", "run_on_text", "(", "text", ",", "python2_path", ")", "# Run the TEES processor", "tp", "=", "TEESProcessor", "(", "a1_text", ",", "a2_text", ",", "sentence_segmentations", ",", "pmid", ")", "return", "tp" ]
Processes the specified plain text with TEES and converts output to supported INDRA statements. Check for the TEES installation is the TEES_PATH environment variable, and configuration file; if not found, checks candidate paths in tees_candidate_paths. Raises an exception if TEES cannot be found in any of these places. Parameters ---------- text : str Plain text to process with TEES pmid : str The PMID from which the paper comes from, to be stored in the Evidence object of statements. Set to None if this is unspecified. python2_path : str TEES is only compatible with python 2. This processor invokes this external python 2 interpreter so that the processor can be run in either python 2 or python 3. If None, searches for an executible named python2 in the PATH environment variable. Returns ------- tp : TEESProcessor A TEESProcessor object which contains a list of INDRA statements extracted from TEES extractions
[ "Processes", "the", "specified", "plain", "text", "with", "TEES", "and", "converts", "output", "to", "supported", "INDRA", "statements", ".", "Check", "for", "the", "TEES", "installation", "is", "the", "TEES_PATH", "environment", "variable", "and", "configuration", "file", ";", "if", "not", "found", "checks", "candidate", "paths", "in", "tees_candidate_paths", ".", "Raises", "an", "exception", "if", "TEES", "cannot", "be", "found", "in", "any", "of", "these", "places", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/tees/api.py#L46-L93
train
sorgerlab/indra
indra/sources/tees/api.py
run_on_text
def run_on_text(text, python2_path): """Runs TEES on the given text in a temporary directory and returns a temporary directory with TEES output. The caller should delete this directory when done with it. This function runs TEES and produces TEES output files but does not process TEES output into INDRA statements. Parameters ---------- text : str Text from which to extract relationships python2_path : str The path to the python 2 interpreter Returns ------- output_dir : str Temporary directory with TEES output. The caller should delete this directgory when done with it. """ tees_path = get_config('TEES_PATH') if tees_path is None: # If TEES directory is not specifies, see if any of the candidate paths # exist and contain all of the files expected for a TEES installation. for cpath in tees_candidate_paths: cpath = os.path.expanduser(cpath) if os.path.isdir(cpath): # Check to see if it has all of the expected files and # directories has_expected_files = True for f in tees_installation_files: fpath = os.path.join(cpath, f) present = os.path.isfile(fpath) has_expected_files = has_expected_files and present has_expected_dirs = True for d in tees_installation_dirs: dpath = os.path.join(cpath, d) present = os.path.isdir(dpath) has_expected_dirs = has_expected_dirs and present if has_expected_files and has_expected_dirs: # We found a directory with all of the files and # directories we expected in a TEES installation - let's # assume it's a TEES installation tees_path = cpath print('Found TEES installation at ' + cpath) break # Make sure the provided TEES directory exists if not os.path.isdir(tees_path): raise Exception('Provided TEES directory does not exist.') # Make sure the classify.py script exists within this directory classify_path = 'classify.py' # if not os.path.isfile(classify_path): # raise Exception('classify.py does not exist in provided TEES path.') # Create a temporary directory to tag the shared-task files tmp_dir = tempfile.mkdtemp(suffix='indra_tees_processor') pwd = os.path.abspath(os.getcwd()) try: # Write text to a file in the temporary directory text_path = os.path.join(tmp_dir, 'text.txt') # Had some trouble with non-ascii characters. A possible TODO item in # the future is to look into resolving this, for now just ignoring # non-latin-1 characters with codecs.open(text_path, 'w', encoding='latin-1', errors='ignore') \ as f: f.write(text) # Run TEES output_path = os.path.join(tmp_dir, 'output') model_path = os.path.join(tees_path, 'tees_data/models/GE11-test/') command = [python2_path, classify_path, '-m', model_path, '-i', text_path, '-o', output_path] try: pwd = os.path.abspath(os.getcwd()) os.chdir(tees_path) # Change to TEES directory # print('cwd is:', os.getcwd()) # out = subprocess.check_output(command, stderr=subprocess.STDOUT) p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=tees_path) p.wait() (so, se) = p.communicate() print(so) print(se) os.chdir(pwd) # Change back to previous directory # print('cwd is:', os.getcwd()) # print(out.decode('utf-8')) except BaseException as e: # If there's an error, print it out and then propagate the # exception os.chdir(pwd) # Change back to previous directory # print (e.output.decode('utf-8')) raise e except BaseException as e: # If there was an exception, delete the temporary directory and # pass on the exception shutil.rmtree(tmp_dir) raise e # Return the temporary directory with the TEES output output_tuple = extract_output(tmp_dir) shutil.rmtree(tmp_dir) return output_tuple
python
def run_on_text(text, python2_path): """Runs TEES on the given text in a temporary directory and returns a temporary directory with TEES output. The caller should delete this directory when done with it. This function runs TEES and produces TEES output files but does not process TEES output into INDRA statements. Parameters ---------- text : str Text from which to extract relationships python2_path : str The path to the python 2 interpreter Returns ------- output_dir : str Temporary directory with TEES output. The caller should delete this directgory when done with it. """ tees_path = get_config('TEES_PATH') if tees_path is None: # If TEES directory is not specifies, see if any of the candidate paths # exist and contain all of the files expected for a TEES installation. for cpath in tees_candidate_paths: cpath = os.path.expanduser(cpath) if os.path.isdir(cpath): # Check to see if it has all of the expected files and # directories has_expected_files = True for f in tees_installation_files: fpath = os.path.join(cpath, f) present = os.path.isfile(fpath) has_expected_files = has_expected_files and present has_expected_dirs = True for d in tees_installation_dirs: dpath = os.path.join(cpath, d) present = os.path.isdir(dpath) has_expected_dirs = has_expected_dirs and present if has_expected_files and has_expected_dirs: # We found a directory with all of the files and # directories we expected in a TEES installation - let's # assume it's a TEES installation tees_path = cpath print('Found TEES installation at ' + cpath) break # Make sure the provided TEES directory exists if not os.path.isdir(tees_path): raise Exception('Provided TEES directory does not exist.') # Make sure the classify.py script exists within this directory classify_path = 'classify.py' # if not os.path.isfile(classify_path): # raise Exception('classify.py does not exist in provided TEES path.') # Create a temporary directory to tag the shared-task files tmp_dir = tempfile.mkdtemp(suffix='indra_tees_processor') pwd = os.path.abspath(os.getcwd()) try: # Write text to a file in the temporary directory text_path = os.path.join(tmp_dir, 'text.txt') # Had some trouble with non-ascii characters. A possible TODO item in # the future is to look into resolving this, for now just ignoring # non-latin-1 characters with codecs.open(text_path, 'w', encoding='latin-1', errors='ignore') \ as f: f.write(text) # Run TEES output_path = os.path.join(tmp_dir, 'output') model_path = os.path.join(tees_path, 'tees_data/models/GE11-test/') command = [python2_path, classify_path, '-m', model_path, '-i', text_path, '-o', output_path] try: pwd = os.path.abspath(os.getcwd()) os.chdir(tees_path) # Change to TEES directory # print('cwd is:', os.getcwd()) # out = subprocess.check_output(command, stderr=subprocess.STDOUT) p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=tees_path) p.wait() (so, se) = p.communicate() print(so) print(se) os.chdir(pwd) # Change back to previous directory # print('cwd is:', os.getcwd()) # print(out.decode('utf-8')) except BaseException as e: # If there's an error, print it out and then propagate the # exception os.chdir(pwd) # Change back to previous directory # print (e.output.decode('utf-8')) raise e except BaseException as e: # If there was an exception, delete the temporary directory and # pass on the exception shutil.rmtree(tmp_dir) raise e # Return the temporary directory with the TEES output output_tuple = extract_output(tmp_dir) shutil.rmtree(tmp_dir) return output_tuple
[ "def", "run_on_text", "(", "text", ",", "python2_path", ")", ":", "tees_path", "=", "get_config", "(", "'TEES_PATH'", ")", "if", "tees_path", "is", "None", ":", "# If TEES directory is not specifies, see if any of the candidate paths", "# exist and contain all of the files expected for a TEES installation.", "for", "cpath", "in", "tees_candidate_paths", ":", "cpath", "=", "os", ".", "path", ".", "expanduser", "(", "cpath", ")", "if", "os", ".", "path", ".", "isdir", "(", "cpath", ")", ":", "# Check to see if it has all of the expected files and", "# directories", "has_expected_files", "=", "True", "for", "f", "in", "tees_installation_files", ":", "fpath", "=", "os", ".", "path", ".", "join", "(", "cpath", ",", "f", ")", "present", "=", "os", ".", "path", ".", "isfile", "(", "fpath", ")", "has_expected_files", "=", "has_expected_files", "and", "present", "has_expected_dirs", "=", "True", "for", "d", "in", "tees_installation_dirs", ":", "dpath", "=", "os", ".", "path", ".", "join", "(", "cpath", ",", "d", ")", "present", "=", "os", ".", "path", ".", "isdir", "(", "dpath", ")", "has_expected_dirs", "=", "has_expected_dirs", "and", "present", "if", "has_expected_files", "and", "has_expected_dirs", ":", "# We found a directory with all of the files and", "# directories we expected in a TEES installation - let's", "# assume it's a TEES installation", "tees_path", "=", "cpath", "print", "(", "'Found TEES installation at '", "+", "cpath", ")", "break", "# Make sure the provided TEES directory exists", "if", "not", "os", ".", "path", ".", "isdir", "(", "tees_path", ")", ":", "raise", "Exception", "(", "'Provided TEES directory does not exist.'", ")", "# Make sure the classify.py script exists within this directory", "classify_path", "=", "'classify.py'", "# if not os.path.isfile(classify_path):", "# raise Exception('classify.py does not exist in provided TEES path.')", "# Create a temporary directory to tag the shared-task files", "tmp_dir", "=", "tempfile", ".", "mkdtemp", "(", "suffix", "=", "'indra_tees_processor'", ")", "pwd", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "getcwd", "(", ")", ")", "try", ":", "# Write text to a file in the temporary directory", "text_path", "=", "os", ".", "path", ".", "join", "(", "tmp_dir", ",", "'text.txt'", ")", "# Had some trouble with non-ascii characters. A possible TODO item in", "# the future is to look into resolving this, for now just ignoring", "# non-latin-1 characters", "with", "codecs", ".", "open", "(", "text_path", ",", "'w'", ",", "encoding", "=", "'latin-1'", ",", "errors", "=", "'ignore'", ")", "as", "f", ":", "f", ".", "write", "(", "text", ")", "# Run TEES", "output_path", "=", "os", ".", "path", ".", "join", "(", "tmp_dir", ",", "'output'", ")", "model_path", "=", "os", ".", "path", ".", "join", "(", "tees_path", ",", "'tees_data/models/GE11-test/'", ")", "command", "=", "[", "python2_path", ",", "classify_path", ",", "'-m'", ",", "model_path", ",", "'-i'", ",", "text_path", ",", "'-o'", ",", "output_path", "]", "try", ":", "pwd", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "getcwd", "(", ")", ")", "os", ".", "chdir", "(", "tees_path", ")", "# Change to TEES directory", "# print('cwd is:', os.getcwd())", "# out = subprocess.check_output(command, stderr=subprocess.STDOUT)", "p", "=", "subprocess", ".", "Popen", "(", "command", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", "cwd", "=", "tees_path", ")", "p", ".", "wait", "(", ")", "(", "so", ",", "se", ")", "=", "p", ".", "communicate", "(", ")", "print", "(", "so", ")", "print", "(", "se", ")", "os", ".", "chdir", "(", "pwd", ")", "# Change back to previous directory", "# print('cwd is:', os.getcwd())", "# print(out.decode('utf-8'))", "except", "BaseException", "as", "e", ":", "# If there's an error, print it out and then propagate the", "# exception", "os", ".", "chdir", "(", "pwd", ")", "# Change back to previous directory", "# print (e.output.decode('utf-8'))", "raise", "e", "except", "BaseException", "as", "e", ":", "# If there was an exception, delete the temporary directory and", "# pass on the exception", "shutil", ".", "rmtree", "(", "tmp_dir", ")", "raise", "e", "# Return the temporary directory with the TEES output", "output_tuple", "=", "extract_output", "(", "tmp_dir", ")", "shutil", ".", "rmtree", "(", "tmp_dir", ")", "return", "output_tuple" ]
Runs TEES on the given text in a temporary directory and returns a temporary directory with TEES output. The caller should delete this directory when done with it. This function runs TEES and produces TEES output files but does not process TEES output into INDRA statements. Parameters ---------- text : str Text from which to extract relationships python2_path : str The path to the python 2 interpreter Returns ------- output_dir : str Temporary directory with TEES output. The caller should delete this directgory when done with it.
[ "Runs", "TEES", "on", "the", "given", "text", "in", "a", "temporary", "directory", "and", "returns", "a", "temporary", "directory", "with", "TEES", "output", ".", "The", "caller", "should", "delete", "this", "directory", "when", "done", "with", "it", ".", "This", "function", "runs", "TEES", "and", "produces", "TEES", "output", "files", "but", "does", "not", "process", "TEES", "output", "into", "INDRA", "statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/tees/api.py#L95-L206
train
sorgerlab/indra
indra/sources/tees/api.py
extract_output
def extract_output(output_dir): """Extract the text of the a1, a2, and sentence segmentation files from the TEES output directory. These files are located within a compressed archive. Parameters ---------- output_dir : str Directory containing the output of the TEES system Returns ------- a1_text : str The text of the TEES a1 file (specifying the entities) a2_text : str The text of the TEES a2 file (specifying the event graph) sentence_segmentations : str The text of the XML file specifying the sentence segmentation """ # Locate the file of sentences segmented by the TEES system, described # in a compressed xml document sentences_glob = os.path.join(output_dir, '*-preprocessed.xml.gz') sentences_filename_candidates = glob.glob(sentences_glob) # Make sure there is exactly one such file if len(sentences_filename_candidates) != 1: m = 'Looking for exactly one file matching %s but found %d matches' raise Exception(m % ( sentences_glob, len(sentences_filename_candidates))) return None, None, None # Read in the sentence segmentation XML sentence_segmentation_filename = sentences_filename_candidates[0] with gzip.GzipFile(sentences_filename_candidates[0], 'r') as f: sentence_segmentations = f.read().decode('utf-8') # Create a temporary directory to which to extract the a1 and a2 files from # the tarball tmp_dir = tempfile.mkdtemp(suffix='indra_tees_processor') try: # Make sure the tarfile with the extracted events is in shared task # format is in the output directory tarfile_glob = os.path.join(output_dir, '*-events.tar.gz') candidate_tarfiles = glob.glob(tarfile_glob) if len(candidate_tarfiles) != 1: raise Exception('Expected exactly one match for glob %s' % tarfile_glob) return None, None, None # Decide what tar files to extract # (We're not blindly extracting all files because of the security # warning in the documentation for TarFile.extractall # In particular, we want to make sure that the filename doesn't # try to specify a relative or absolute path other than the current # directory by making sure the filename starts with an alphanumeric # character. # We're also only interested in files with the .a1 or .a2 extension tar_file = tarfile.open(candidate_tarfiles[0]) a1_file = None a2_file = None extract_these = [] for m in tar_file.getmembers(): if re.match('[a-zA-Z0-9].*.a[12]', m.name): extract_these.append(m) if m.name.endswith('.a1'): a1_file = m.name elif m.name.endswith('.a2'): a2_file = m.name else: assert(False) # There should be exactly two files that match these criteria if len(extract_these) != 2 or a1_file is None or a2_file is None: raise Exception('We thought there would be one .a1 and one .a2' + ' file in the tarball, but we got %d files total' % len(extract_these)) return None, None, None # Extract the files that we decided to extract tar_file.extractall(path=tmp_dir, members=extract_these) # Read the text of the a1 (entities) file with codecs.open(os.path.join(tmp_dir, a1_file), 'r', encoding='utf-8') as f: a1_text = f.read() # Read the text of the a2 (events) file with codecs.open(os.path.join(tmp_dir, a2_file), 'r', encoding='utf-8') as f: a2_text = f.read() # Now that we're done, remove the temporary directory shutil.rmtree(tmp_dir) # Return the extracted text return a1_text, a2_text, sentence_segmentations except BaseException as e: # If there was an exception, delete the temporary directory and # pass on the exception print('Not removing temporary directory: ' + tmp_dir) shutil.rmtree(tmp_dir) raise e return None, None, None
python
def extract_output(output_dir): """Extract the text of the a1, a2, and sentence segmentation files from the TEES output directory. These files are located within a compressed archive. Parameters ---------- output_dir : str Directory containing the output of the TEES system Returns ------- a1_text : str The text of the TEES a1 file (specifying the entities) a2_text : str The text of the TEES a2 file (specifying the event graph) sentence_segmentations : str The text of the XML file specifying the sentence segmentation """ # Locate the file of sentences segmented by the TEES system, described # in a compressed xml document sentences_glob = os.path.join(output_dir, '*-preprocessed.xml.gz') sentences_filename_candidates = glob.glob(sentences_glob) # Make sure there is exactly one such file if len(sentences_filename_candidates) != 1: m = 'Looking for exactly one file matching %s but found %d matches' raise Exception(m % ( sentences_glob, len(sentences_filename_candidates))) return None, None, None # Read in the sentence segmentation XML sentence_segmentation_filename = sentences_filename_candidates[0] with gzip.GzipFile(sentences_filename_candidates[0], 'r') as f: sentence_segmentations = f.read().decode('utf-8') # Create a temporary directory to which to extract the a1 and a2 files from # the tarball tmp_dir = tempfile.mkdtemp(suffix='indra_tees_processor') try: # Make sure the tarfile with the extracted events is in shared task # format is in the output directory tarfile_glob = os.path.join(output_dir, '*-events.tar.gz') candidate_tarfiles = glob.glob(tarfile_glob) if len(candidate_tarfiles) != 1: raise Exception('Expected exactly one match for glob %s' % tarfile_glob) return None, None, None # Decide what tar files to extract # (We're not blindly extracting all files because of the security # warning in the documentation for TarFile.extractall # In particular, we want to make sure that the filename doesn't # try to specify a relative or absolute path other than the current # directory by making sure the filename starts with an alphanumeric # character. # We're also only interested in files with the .a1 or .a2 extension tar_file = tarfile.open(candidate_tarfiles[0]) a1_file = None a2_file = None extract_these = [] for m in tar_file.getmembers(): if re.match('[a-zA-Z0-9].*.a[12]', m.name): extract_these.append(m) if m.name.endswith('.a1'): a1_file = m.name elif m.name.endswith('.a2'): a2_file = m.name else: assert(False) # There should be exactly two files that match these criteria if len(extract_these) != 2 or a1_file is None or a2_file is None: raise Exception('We thought there would be one .a1 and one .a2' + ' file in the tarball, but we got %d files total' % len(extract_these)) return None, None, None # Extract the files that we decided to extract tar_file.extractall(path=tmp_dir, members=extract_these) # Read the text of the a1 (entities) file with codecs.open(os.path.join(tmp_dir, a1_file), 'r', encoding='utf-8') as f: a1_text = f.read() # Read the text of the a2 (events) file with codecs.open(os.path.join(tmp_dir, a2_file), 'r', encoding='utf-8') as f: a2_text = f.read() # Now that we're done, remove the temporary directory shutil.rmtree(tmp_dir) # Return the extracted text return a1_text, a2_text, sentence_segmentations except BaseException as e: # If there was an exception, delete the temporary directory and # pass on the exception print('Not removing temporary directory: ' + tmp_dir) shutil.rmtree(tmp_dir) raise e return None, None, None
[ "def", "extract_output", "(", "output_dir", ")", ":", "# Locate the file of sentences segmented by the TEES system, described", "# in a compressed xml document", "sentences_glob", "=", "os", ".", "path", ".", "join", "(", "output_dir", ",", "'*-preprocessed.xml.gz'", ")", "sentences_filename_candidates", "=", "glob", ".", "glob", "(", "sentences_glob", ")", "# Make sure there is exactly one such file", "if", "len", "(", "sentences_filename_candidates", ")", "!=", "1", ":", "m", "=", "'Looking for exactly one file matching %s but found %d matches'", "raise", "Exception", "(", "m", "%", "(", "sentences_glob", ",", "len", "(", "sentences_filename_candidates", ")", ")", ")", "return", "None", ",", "None", ",", "None", "# Read in the sentence segmentation XML", "sentence_segmentation_filename", "=", "sentences_filename_candidates", "[", "0", "]", "with", "gzip", ".", "GzipFile", "(", "sentences_filename_candidates", "[", "0", "]", ",", "'r'", ")", "as", "f", ":", "sentence_segmentations", "=", "f", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "# Create a temporary directory to which to extract the a1 and a2 files from", "# the tarball", "tmp_dir", "=", "tempfile", ".", "mkdtemp", "(", "suffix", "=", "'indra_tees_processor'", ")", "try", ":", "# Make sure the tarfile with the extracted events is in shared task", "# format is in the output directory", "tarfile_glob", "=", "os", ".", "path", ".", "join", "(", "output_dir", ",", "'*-events.tar.gz'", ")", "candidate_tarfiles", "=", "glob", ".", "glob", "(", "tarfile_glob", ")", "if", "len", "(", "candidate_tarfiles", ")", "!=", "1", ":", "raise", "Exception", "(", "'Expected exactly one match for glob %s'", "%", "tarfile_glob", ")", "return", "None", ",", "None", ",", "None", "# Decide what tar files to extract", "# (We're not blindly extracting all files because of the security", "# warning in the documentation for TarFile.extractall", "# In particular, we want to make sure that the filename doesn't", "# try to specify a relative or absolute path other than the current", "# directory by making sure the filename starts with an alphanumeric", "# character.", "# We're also only interested in files with the .a1 or .a2 extension", "tar_file", "=", "tarfile", ".", "open", "(", "candidate_tarfiles", "[", "0", "]", ")", "a1_file", "=", "None", "a2_file", "=", "None", "extract_these", "=", "[", "]", "for", "m", "in", "tar_file", ".", "getmembers", "(", ")", ":", "if", "re", ".", "match", "(", "'[a-zA-Z0-9].*.a[12]'", ",", "m", ".", "name", ")", ":", "extract_these", ".", "append", "(", "m", ")", "if", "m", ".", "name", ".", "endswith", "(", "'.a1'", ")", ":", "a1_file", "=", "m", ".", "name", "elif", "m", ".", "name", ".", "endswith", "(", "'.a2'", ")", ":", "a2_file", "=", "m", ".", "name", "else", ":", "assert", "(", "False", ")", "# There should be exactly two files that match these criteria", "if", "len", "(", "extract_these", ")", "!=", "2", "or", "a1_file", "is", "None", "or", "a2_file", "is", "None", ":", "raise", "Exception", "(", "'We thought there would be one .a1 and one .a2'", "+", "' file in the tarball, but we got %d files total'", "%", "len", "(", "extract_these", ")", ")", "return", "None", ",", "None", ",", "None", "# Extract the files that we decided to extract", "tar_file", ".", "extractall", "(", "path", "=", "tmp_dir", ",", "members", "=", "extract_these", ")", "# Read the text of the a1 (entities) file", "with", "codecs", ".", "open", "(", "os", ".", "path", ".", "join", "(", "tmp_dir", ",", "a1_file", ")", ",", "'r'", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "a1_text", "=", "f", ".", "read", "(", ")", "# Read the text of the a2 (events) file", "with", "codecs", ".", "open", "(", "os", ".", "path", ".", "join", "(", "tmp_dir", ",", "a2_file", ")", ",", "'r'", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "a2_text", "=", "f", ".", "read", "(", ")", "# Now that we're done, remove the temporary directory", "shutil", ".", "rmtree", "(", "tmp_dir", ")", "# Return the extracted text", "return", "a1_text", ",", "a2_text", ",", "sentence_segmentations", "except", "BaseException", "as", "e", ":", "# If there was an exception, delete the temporary directory and", "# pass on the exception", "print", "(", "'Not removing temporary directory: '", "+", "tmp_dir", ")", "shutil", ".", "rmtree", "(", "tmp_dir", ")", "raise", "e", "return", "None", ",", "None", ",", "None" ]
Extract the text of the a1, a2, and sentence segmentation files from the TEES output directory. These files are located within a compressed archive. Parameters ---------- output_dir : str Directory containing the output of the TEES system Returns ------- a1_text : str The text of the TEES a1 file (specifying the entities) a2_text : str The text of the TEES a2 file (specifying the event graph) sentence_segmentations : str The text of the XML file specifying the sentence segmentation
[ "Extract", "the", "text", "of", "the", "a1", "a2", "and", "sentence", "segmentation", "files", "from", "the", "TEES", "output", "directory", ".", "These", "files", "are", "located", "within", "a", "compressed", "archive", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/tees/api.py#L208-L312
train
sorgerlab/indra
indra/sources/eidos/reader.py
_list_to_seq
def _list_to_seq(lst): """Return a scala.collection.Seq from a Python list.""" ml = autoclass('scala.collection.mutable.MutableList')() for element in lst: ml.appendElem(element) return ml
python
def _list_to_seq(lst): """Return a scala.collection.Seq from a Python list.""" ml = autoclass('scala.collection.mutable.MutableList')() for element in lst: ml.appendElem(element) return ml
[ "def", "_list_to_seq", "(", "lst", ")", ":", "ml", "=", "autoclass", "(", "'scala.collection.mutable.MutableList'", ")", "(", ")", "for", "element", "in", "lst", ":", "ml", ".", "appendElem", "(", "element", ")", "return", "ml" ]
Return a scala.collection.Seq from a Python list.
[ "Return", "a", "scala", ".", "collection", ".", "Seq", "from", "a", "Python", "list", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/eidos/reader.py#L130-L135
train
sorgerlab/indra
indra/sources/eidos/reader.py
EidosReader.process_text
def process_text(self, text, format='json'): """Return a mentions JSON object given text. Parameters ---------- text : str Text to be processed. format : str The format of the output to produce, one of "json" or "json_ld". Default: "json" Returns ------- json_dict : dict A JSON object of mentions extracted from text. """ if self.eidos_reader is None: self.initialize_reader() default_arg = lambda x: autoclass('scala.Some')(x) today = datetime.date.today().strftime("%Y-%m-%d") fname = 'default_file_name' annot_doc = self.eidos_reader.extractFromText( text, True, # keep text False, # CAG-relevant only default_arg(today), # doc creation time default_arg(fname) # file name ) if format == 'json': mentions = annot_doc.odinMentions() ser = autoclass(eidos_package + '.serialization.json.WMJSONSerializer') mentions_json = ser.toJsonStr(mentions) elif format == 'json_ld': # We need to get a Scala Seq of annot docs here ml = _list_to_seq([annot_doc]) # We currently do not need toinstantiate the adjective grounder # if we want to reinstate it, we would need to do the following # ag = EidosAdjectiveGrounder.fromConfig( # EidosSystem.defaultConfig.getConfig("adjectiveGrounder")) # We now create a JSON-LD corpus jc = autoclass(eidos_package + '.serialization.json.JLDCorpus') corpus = jc(ml) # Finally, serialize the corpus into JSON string mentions_json = corpus.toJsonStr() json_dict = json.loads(mentions_json) return json_dict
python
def process_text(self, text, format='json'): """Return a mentions JSON object given text. Parameters ---------- text : str Text to be processed. format : str The format of the output to produce, one of "json" or "json_ld". Default: "json" Returns ------- json_dict : dict A JSON object of mentions extracted from text. """ if self.eidos_reader is None: self.initialize_reader() default_arg = lambda x: autoclass('scala.Some')(x) today = datetime.date.today().strftime("%Y-%m-%d") fname = 'default_file_name' annot_doc = self.eidos_reader.extractFromText( text, True, # keep text False, # CAG-relevant only default_arg(today), # doc creation time default_arg(fname) # file name ) if format == 'json': mentions = annot_doc.odinMentions() ser = autoclass(eidos_package + '.serialization.json.WMJSONSerializer') mentions_json = ser.toJsonStr(mentions) elif format == 'json_ld': # We need to get a Scala Seq of annot docs here ml = _list_to_seq([annot_doc]) # We currently do not need toinstantiate the adjective grounder # if we want to reinstate it, we would need to do the following # ag = EidosAdjectiveGrounder.fromConfig( # EidosSystem.defaultConfig.getConfig("adjectiveGrounder")) # We now create a JSON-LD corpus jc = autoclass(eidos_package + '.serialization.json.JLDCorpus') corpus = jc(ml) # Finally, serialize the corpus into JSON string mentions_json = corpus.toJsonStr() json_dict = json.loads(mentions_json) return json_dict
[ "def", "process_text", "(", "self", ",", "text", ",", "format", "=", "'json'", ")", ":", "if", "self", ".", "eidos_reader", "is", "None", ":", "self", ".", "initialize_reader", "(", ")", "default_arg", "=", "lambda", "x", ":", "autoclass", "(", "'scala.Some'", ")", "(", "x", ")", "today", "=", "datetime", ".", "date", ".", "today", "(", ")", ".", "strftime", "(", "\"%Y-%m-%d\"", ")", "fname", "=", "'default_file_name'", "annot_doc", "=", "self", ".", "eidos_reader", ".", "extractFromText", "(", "text", ",", "True", ",", "# keep text", "False", ",", "# CAG-relevant only", "default_arg", "(", "today", ")", ",", "# doc creation time", "default_arg", "(", "fname", ")", "# file name", ")", "if", "format", "==", "'json'", ":", "mentions", "=", "annot_doc", ".", "odinMentions", "(", ")", "ser", "=", "autoclass", "(", "eidos_package", "+", "'.serialization.json.WMJSONSerializer'", ")", "mentions_json", "=", "ser", ".", "toJsonStr", "(", "mentions", ")", "elif", "format", "==", "'json_ld'", ":", "# We need to get a Scala Seq of annot docs here", "ml", "=", "_list_to_seq", "(", "[", "annot_doc", "]", ")", "# We currently do not need toinstantiate the adjective grounder", "# if we want to reinstate it, we would need to do the following", "# ag = EidosAdjectiveGrounder.fromConfig(", "# EidosSystem.defaultConfig.getConfig(\"adjectiveGrounder\"))", "# We now create a JSON-LD corpus", "jc", "=", "autoclass", "(", "eidos_package", "+", "'.serialization.json.JLDCorpus'", ")", "corpus", "=", "jc", "(", "ml", ")", "# Finally, serialize the corpus into JSON string", "mentions_json", "=", "corpus", ".", "toJsonStr", "(", ")", "json_dict", "=", "json", ".", "loads", "(", "mentions_json", ")", "return", "json_dict" ]
Return a mentions JSON object given text. Parameters ---------- text : str Text to be processed. format : str The format of the output to produce, one of "json" or "json_ld". Default: "json" Returns ------- json_dict : dict A JSON object of mentions extracted from text.
[ "Return", "a", "mentions", "JSON", "object", "given", "text", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/eidos/reader.py#L80-L127
train
sorgerlab/indra
indra/sources/eidos/api.py
process_text
def process_text(text, out_format='json_ld', save_json='eidos_output.json', webservice=None): """Return an EidosProcessor by processing the given text. This constructs a reader object via Java and extracts mentions from the text. It then serializes the mentions into JSON and processes the result with process_json. Parameters ---------- text : str The text to be processed. out_format : Optional[str] The type of Eidos output to read into and process. Currently only 'json-ld' is supported which is also the default value used. save_json : Optional[str] The name of a file in which to dump the JSON output of Eidos. webservice : Optional[str] An Eidos reader web service URL to send the request to. If None, the reading is assumed to be done with the Eidos JAR rather than via a web service. Default: None Returns ------- ep : EidosProcessor An EidosProcessor containing the extracted INDRA Statements in its statements attribute. """ if not webservice: if eidos_reader is None: logger.error('Eidos reader is not available.') return None json_dict = eidos_reader.process_text(text, out_format) else: res = requests.post('%s/process_text' % webservice, json={'text': text}) json_dict = res.json() if save_json: with open(save_json, 'wt') as fh: json.dump(json_dict, fh, indent=2) return process_json(json_dict)
python
def process_text(text, out_format='json_ld', save_json='eidos_output.json', webservice=None): """Return an EidosProcessor by processing the given text. This constructs a reader object via Java and extracts mentions from the text. It then serializes the mentions into JSON and processes the result with process_json. Parameters ---------- text : str The text to be processed. out_format : Optional[str] The type of Eidos output to read into and process. Currently only 'json-ld' is supported which is also the default value used. save_json : Optional[str] The name of a file in which to dump the JSON output of Eidos. webservice : Optional[str] An Eidos reader web service URL to send the request to. If None, the reading is assumed to be done with the Eidos JAR rather than via a web service. Default: None Returns ------- ep : EidosProcessor An EidosProcessor containing the extracted INDRA Statements in its statements attribute. """ if not webservice: if eidos_reader is None: logger.error('Eidos reader is not available.') return None json_dict = eidos_reader.process_text(text, out_format) else: res = requests.post('%s/process_text' % webservice, json={'text': text}) json_dict = res.json() if save_json: with open(save_json, 'wt') as fh: json.dump(json_dict, fh, indent=2) return process_json(json_dict)
[ "def", "process_text", "(", "text", ",", "out_format", "=", "'json_ld'", ",", "save_json", "=", "'eidos_output.json'", ",", "webservice", "=", "None", ")", ":", "if", "not", "webservice", ":", "if", "eidos_reader", "is", "None", ":", "logger", ".", "error", "(", "'Eidos reader is not available.'", ")", "return", "None", "json_dict", "=", "eidos_reader", ".", "process_text", "(", "text", ",", "out_format", ")", "else", ":", "res", "=", "requests", ".", "post", "(", "'%s/process_text'", "%", "webservice", ",", "json", "=", "{", "'text'", ":", "text", "}", ")", "json_dict", "=", "res", ".", "json", "(", ")", "if", "save_json", ":", "with", "open", "(", "save_json", ",", "'wt'", ")", "as", "fh", ":", "json", ".", "dump", "(", "json_dict", ",", "fh", ",", "indent", "=", "2", ")", "return", "process_json", "(", "json_dict", ")" ]
Return an EidosProcessor by processing the given text. This constructs a reader object via Java and extracts mentions from the text. It then serializes the mentions into JSON and processes the result with process_json. Parameters ---------- text : str The text to be processed. out_format : Optional[str] The type of Eidos output to read into and process. Currently only 'json-ld' is supported which is also the default value used. save_json : Optional[str] The name of a file in which to dump the JSON output of Eidos. webservice : Optional[str] An Eidos reader web service URL to send the request to. If None, the reading is assumed to be done with the Eidos JAR rather than via a web service. Default: None Returns ------- ep : EidosProcessor An EidosProcessor containing the extracted INDRA Statements in its statements attribute.
[ "Return", "an", "EidosProcessor", "by", "processing", "the", "given", "text", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/eidos/api.py#L22-L62
train
sorgerlab/indra
indra/sources/eidos/api.py
process_json_file
def process_json_file(file_name): """Return an EidosProcessor by processing the given Eidos JSON-LD file. This function is useful if the output from Eidos is saved as a file and needs to be processed. Parameters ---------- file_name : str The name of the JSON-LD file to be processed. Returns ------- ep : EidosProcessor A EidosProcessor containing the extracted INDRA Statements in its statements attribute. """ try: with open(file_name, 'rb') as fh: json_str = fh.read().decode('utf-8') return process_json_str(json_str) except IOError: logger.exception('Could not read file %s.' % file_name)
python
def process_json_file(file_name): """Return an EidosProcessor by processing the given Eidos JSON-LD file. This function is useful if the output from Eidos is saved as a file and needs to be processed. Parameters ---------- file_name : str The name of the JSON-LD file to be processed. Returns ------- ep : EidosProcessor A EidosProcessor containing the extracted INDRA Statements in its statements attribute. """ try: with open(file_name, 'rb') as fh: json_str = fh.read().decode('utf-8') return process_json_str(json_str) except IOError: logger.exception('Could not read file %s.' % file_name)
[ "def", "process_json_file", "(", "file_name", ")", ":", "try", ":", "with", "open", "(", "file_name", ",", "'rb'", ")", "as", "fh", ":", "json_str", "=", "fh", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "return", "process_json_str", "(", "json_str", ")", "except", "IOError", ":", "logger", ".", "exception", "(", "'Could not read file %s.'", "%", "file_name", ")" ]
Return an EidosProcessor by processing the given Eidos JSON-LD file. This function is useful if the output from Eidos is saved as a file and needs to be processed. Parameters ---------- file_name : str The name of the JSON-LD file to be processed. Returns ------- ep : EidosProcessor A EidosProcessor containing the extracted INDRA Statements in its statements attribute.
[ "Return", "an", "EidosProcessor", "by", "processing", "the", "given", "Eidos", "JSON", "-", "LD", "file", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/eidos/api.py#L65-L87
train
sorgerlab/indra
indra/sources/eidos/api.py
process_json
def process_json(json_dict): """Return an EidosProcessor by processing a Eidos JSON-LD dict. Parameters ---------- json_dict : dict The JSON-LD dict to be processed. Returns ------- ep : EidosProcessor A EidosProcessor containing the extracted INDRA Statements in its statements attribute. """ ep = EidosProcessor(json_dict) ep.extract_causal_relations() ep.extract_correlations() ep.extract_events() return ep
python
def process_json(json_dict): """Return an EidosProcessor by processing a Eidos JSON-LD dict. Parameters ---------- json_dict : dict The JSON-LD dict to be processed. Returns ------- ep : EidosProcessor A EidosProcessor containing the extracted INDRA Statements in its statements attribute. """ ep = EidosProcessor(json_dict) ep.extract_causal_relations() ep.extract_correlations() ep.extract_events() return ep
[ "def", "process_json", "(", "json_dict", ")", ":", "ep", "=", "EidosProcessor", "(", "json_dict", ")", "ep", ".", "extract_causal_relations", "(", ")", "ep", ".", "extract_correlations", "(", ")", "ep", ".", "extract_events", "(", ")", "return", "ep" ]
Return an EidosProcessor by processing a Eidos JSON-LD dict. Parameters ---------- json_dict : dict The JSON-LD dict to be processed. Returns ------- ep : EidosProcessor A EidosProcessor containing the extracted INDRA Statements in its statements attribute.
[ "Return", "an", "EidosProcessor", "by", "processing", "a", "Eidos", "JSON", "-", "LD", "dict", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/eidos/api.py#L108-L126
train
sorgerlab/indra
indra/databases/chembl_client.py
get_drug_inhibition_stmts
def get_drug_inhibition_stmts(drug): """Query ChEMBL for kinetics data given drug as Agent get back statements Parameters ---------- drug : Agent Agent representing drug with MESH or CHEBI grounding Returns ------- stmts : list of INDRA statements INDRA statements generated by querying ChEMBL for all kinetics data of a drug interacting with protein targets """ chebi_id = drug.db_refs.get('CHEBI') mesh_id = drug.db_refs.get('MESH') if chebi_id: drug_chembl_id = chebi_client.get_chembl_id(chebi_id) elif mesh_id: drug_chembl_id = get_chembl_id(mesh_id) else: logger.error('Drug missing ChEBI or MESH grounding.') return None logger.info('Drug: %s' % (drug_chembl_id)) query_dict = {'query': 'activity', 'params': {'molecule_chembl_id': drug_chembl_id, 'limit': 10000} } res = send_query(query_dict) activities = res['activities'] targ_act_dict = activities_by_target(activities) target_chembl_ids = [x for x in targ_act_dict] protein_targets = get_protein_targets_only(target_chembl_ids) filtered_targ_act_dict = {t: targ_act_dict[t] for t in [x for x in protein_targets]} stmts = [] for target_chembl_id in filtered_targ_act_dict: target_activity_ids = filtered_targ_act_dict[target_chembl_id] target_activites = [x for x in activities if x['activity_id'] in target_activity_ids] target_upids = [] targ_comp = protein_targets[target_chembl_id]['target_components'] for t_c in targ_comp: target_upids.append(t_c['accession']) evidence = [] for assay in target_activites: ev = get_evidence(assay) if not ev: continue evidence.append(ev) if len(evidence) > 0: for target_upid in target_upids: agent_name = uniprot_client.get_gene_name(target_upid) target_agent = Agent(agent_name, db_refs={'UP': target_upid}) st = Inhibition(drug, target_agent, evidence=evidence) stmts.append(st) return stmts
python
def get_drug_inhibition_stmts(drug): """Query ChEMBL for kinetics data given drug as Agent get back statements Parameters ---------- drug : Agent Agent representing drug with MESH or CHEBI grounding Returns ------- stmts : list of INDRA statements INDRA statements generated by querying ChEMBL for all kinetics data of a drug interacting with protein targets """ chebi_id = drug.db_refs.get('CHEBI') mesh_id = drug.db_refs.get('MESH') if chebi_id: drug_chembl_id = chebi_client.get_chembl_id(chebi_id) elif mesh_id: drug_chembl_id = get_chembl_id(mesh_id) else: logger.error('Drug missing ChEBI or MESH grounding.') return None logger.info('Drug: %s' % (drug_chembl_id)) query_dict = {'query': 'activity', 'params': {'molecule_chembl_id': drug_chembl_id, 'limit': 10000} } res = send_query(query_dict) activities = res['activities'] targ_act_dict = activities_by_target(activities) target_chembl_ids = [x for x in targ_act_dict] protein_targets = get_protein_targets_only(target_chembl_ids) filtered_targ_act_dict = {t: targ_act_dict[t] for t in [x for x in protein_targets]} stmts = [] for target_chembl_id in filtered_targ_act_dict: target_activity_ids = filtered_targ_act_dict[target_chembl_id] target_activites = [x for x in activities if x['activity_id'] in target_activity_ids] target_upids = [] targ_comp = protein_targets[target_chembl_id]['target_components'] for t_c in targ_comp: target_upids.append(t_c['accession']) evidence = [] for assay in target_activites: ev = get_evidence(assay) if not ev: continue evidence.append(ev) if len(evidence) > 0: for target_upid in target_upids: agent_name = uniprot_client.get_gene_name(target_upid) target_agent = Agent(agent_name, db_refs={'UP': target_upid}) st = Inhibition(drug, target_agent, evidence=evidence) stmts.append(st) return stmts
[ "def", "get_drug_inhibition_stmts", "(", "drug", ")", ":", "chebi_id", "=", "drug", ".", "db_refs", ".", "get", "(", "'CHEBI'", ")", "mesh_id", "=", "drug", ".", "db_refs", ".", "get", "(", "'MESH'", ")", "if", "chebi_id", ":", "drug_chembl_id", "=", "chebi_client", ".", "get_chembl_id", "(", "chebi_id", ")", "elif", "mesh_id", ":", "drug_chembl_id", "=", "get_chembl_id", "(", "mesh_id", ")", "else", ":", "logger", ".", "error", "(", "'Drug missing ChEBI or MESH grounding.'", ")", "return", "None", "logger", ".", "info", "(", "'Drug: %s'", "%", "(", "drug_chembl_id", ")", ")", "query_dict", "=", "{", "'query'", ":", "'activity'", ",", "'params'", ":", "{", "'molecule_chembl_id'", ":", "drug_chembl_id", ",", "'limit'", ":", "10000", "}", "}", "res", "=", "send_query", "(", "query_dict", ")", "activities", "=", "res", "[", "'activities'", "]", "targ_act_dict", "=", "activities_by_target", "(", "activities", ")", "target_chembl_ids", "=", "[", "x", "for", "x", "in", "targ_act_dict", "]", "protein_targets", "=", "get_protein_targets_only", "(", "target_chembl_ids", ")", "filtered_targ_act_dict", "=", "{", "t", ":", "targ_act_dict", "[", "t", "]", "for", "t", "in", "[", "x", "for", "x", "in", "protein_targets", "]", "}", "stmts", "=", "[", "]", "for", "target_chembl_id", "in", "filtered_targ_act_dict", ":", "target_activity_ids", "=", "filtered_targ_act_dict", "[", "target_chembl_id", "]", "target_activites", "=", "[", "x", "for", "x", "in", "activities", "if", "x", "[", "'activity_id'", "]", "in", "target_activity_ids", "]", "target_upids", "=", "[", "]", "targ_comp", "=", "protein_targets", "[", "target_chembl_id", "]", "[", "'target_components'", "]", "for", "t_c", "in", "targ_comp", ":", "target_upids", ".", "append", "(", "t_c", "[", "'accession'", "]", ")", "evidence", "=", "[", "]", "for", "assay", "in", "target_activites", ":", "ev", "=", "get_evidence", "(", "assay", ")", "if", "not", "ev", ":", "continue", "evidence", ".", "append", "(", "ev", ")", "if", "len", "(", "evidence", ")", ">", "0", ":", "for", "target_upid", "in", "target_upids", ":", "agent_name", "=", "uniprot_client", ".", "get_gene_name", "(", "target_upid", ")", "target_agent", "=", "Agent", "(", "agent_name", ",", "db_refs", "=", "{", "'UP'", ":", "target_upid", "}", ")", "st", "=", "Inhibition", "(", "drug", ",", "target_agent", ",", "evidence", "=", "evidence", ")", "stmts", ".", "append", "(", "st", ")", "return", "stmts" ]
Query ChEMBL for kinetics data given drug as Agent get back statements Parameters ---------- drug : Agent Agent representing drug with MESH or CHEBI grounding Returns ------- stmts : list of INDRA statements INDRA statements generated by querying ChEMBL for all kinetics data of a drug interacting with protein targets
[ "Query", "ChEMBL", "for", "kinetics", "data", "given", "drug", "as", "Agent", "get", "back", "statements" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/chembl_client.py#L46-L102
train
sorgerlab/indra
indra/databases/chembl_client.py
send_query
def send_query(query_dict): """Query ChEMBL API Parameters ---------- query_dict : dict 'query' : string of the endpoint to query 'params' : dict of params for the query Returns ------- js : dict dict parsed from json that is unique to the submitted query """ query = query_dict['query'] params = query_dict['params'] url = 'https://www.ebi.ac.uk/chembl/api/data/' + query + '.json' r = requests.get(url, params=params) r.raise_for_status() js = r.json() return js
python
def send_query(query_dict): """Query ChEMBL API Parameters ---------- query_dict : dict 'query' : string of the endpoint to query 'params' : dict of params for the query Returns ------- js : dict dict parsed from json that is unique to the submitted query """ query = query_dict['query'] params = query_dict['params'] url = 'https://www.ebi.ac.uk/chembl/api/data/' + query + '.json' r = requests.get(url, params=params) r.raise_for_status() js = r.json() return js
[ "def", "send_query", "(", "query_dict", ")", ":", "query", "=", "query_dict", "[", "'query'", "]", "params", "=", "query_dict", "[", "'params'", "]", "url", "=", "'https://www.ebi.ac.uk/chembl/api/data/'", "+", "query", "+", "'.json'", "r", "=", "requests", ".", "get", "(", "url", ",", "params", "=", "params", ")", "r", ".", "raise_for_status", "(", ")", "js", "=", "r", ".", "json", "(", ")", "return", "js" ]
Query ChEMBL API Parameters ---------- query_dict : dict 'query' : string of the endpoint to query 'params' : dict of params for the query Returns ------- js : dict dict parsed from json that is unique to the submitted query
[ "Query", "ChEMBL", "API" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/chembl_client.py#L105-L125
train
sorgerlab/indra
indra/databases/chembl_client.py
query_target
def query_target(target_chembl_id): """Query ChEMBL API target by id Parameters ---------- target_chembl_id : str Returns ------- target : dict dict parsed from json that is unique for the target """ query_dict = {'query': 'target', 'params': {'target_chembl_id': target_chembl_id, 'limit': 1}} res = send_query(query_dict) target = res['targets'][0] return target
python
def query_target(target_chembl_id): """Query ChEMBL API target by id Parameters ---------- target_chembl_id : str Returns ------- target : dict dict parsed from json that is unique for the target """ query_dict = {'query': 'target', 'params': {'target_chembl_id': target_chembl_id, 'limit': 1}} res = send_query(query_dict) target = res['targets'][0] return target
[ "def", "query_target", "(", "target_chembl_id", ")", ":", "query_dict", "=", "{", "'query'", ":", "'target'", ",", "'params'", ":", "{", "'target_chembl_id'", ":", "target_chembl_id", ",", "'limit'", ":", "1", "}", "}", "res", "=", "send_query", "(", "query_dict", ")", "target", "=", "res", "[", "'targets'", "]", "[", "0", "]", "return", "target" ]
Query ChEMBL API target by id Parameters ---------- target_chembl_id : str Returns ------- target : dict dict parsed from json that is unique for the target
[ "Query", "ChEMBL", "API", "target", "by", "id" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/chembl_client.py#L128-L145
train
sorgerlab/indra
indra/databases/chembl_client.py
activities_by_target
def activities_by_target(activities): """Get back lists of activities in a dict keyed by ChEMBL target id Parameters ---------- activities : list response from a query returning activities for a drug Returns ------- targ_act_dict : dict dictionary keyed to ChEMBL target ids with lists of activity ids """ targ_act_dict = defaultdict(lambda: []) for activity in activities: target_chembl_id = activity['target_chembl_id'] activity_id = activity['activity_id'] targ_act_dict[target_chembl_id].append(activity_id) for target_chembl_id in targ_act_dict: targ_act_dict[target_chembl_id] = \ list(set(targ_act_dict[target_chembl_id])) return targ_act_dict
python
def activities_by_target(activities): """Get back lists of activities in a dict keyed by ChEMBL target id Parameters ---------- activities : list response from a query returning activities for a drug Returns ------- targ_act_dict : dict dictionary keyed to ChEMBL target ids with lists of activity ids """ targ_act_dict = defaultdict(lambda: []) for activity in activities: target_chembl_id = activity['target_chembl_id'] activity_id = activity['activity_id'] targ_act_dict[target_chembl_id].append(activity_id) for target_chembl_id in targ_act_dict: targ_act_dict[target_chembl_id] = \ list(set(targ_act_dict[target_chembl_id])) return targ_act_dict
[ "def", "activities_by_target", "(", "activities", ")", ":", "targ_act_dict", "=", "defaultdict", "(", "lambda", ":", "[", "]", ")", "for", "activity", "in", "activities", ":", "target_chembl_id", "=", "activity", "[", "'target_chembl_id'", "]", "activity_id", "=", "activity", "[", "'activity_id'", "]", "targ_act_dict", "[", "target_chembl_id", "]", ".", "append", "(", "activity_id", ")", "for", "target_chembl_id", "in", "targ_act_dict", ":", "targ_act_dict", "[", "target_chembl_id", "]", "=", "list", "(", "set", "(", "targ_act_dict", "[", "target_chembl_id", "]", ")", ")", "return", "targ_act_dict" ]
Get back lists of activities in a dict keyed by ChEMBL target id Parameters ---------- activities : list response from a query returning activities for a drug Returns ------- targ_act_dict : dict dictionary keyed to ChEMBL target ids with lists of activity ids
[ "Get", "back", "lists", "of", "activities", "in", "a", "dict", "keyed", "by", "ChEMBL", "target", "id" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/chembl_client.py#L148-L169
train
sorgerlab/indra
indra/databases/chembl_client.py
get_protein_targets_only
def get_protein_targets_only(target_chembl_ids): """Given list of ChEMBL target ids, return dict of SINGLE PROTEIN targets Parameters ---------- target_chembl_ids : list list of chembl_ids as strings Returns ------- protein_targets : dict dictionary keyed to ChEMBL target ids with lists of activity ids """ protein_targets = {} for target_chembl_id in target_chembl_ids: target = query_target(target_chembl_id) if 'SINGLE PROTEIN' in target['target_type']: protein_targets[target_chembl_id] = target return protein_targets
python
def get_protein_targets_only(target_chembl_ids): """Given list of ChEMBL target ids, return dict of SINGLE PROTEIN targets Parameters ---------- target_chembl_ids : list list of chembl_ids as strings Returns ------- protein_targets : dict dictionary keyed to ChEMBL target ids with lists of activity ids """ protein_targets = {} for target_chembl_id in target_chembl_ids: target = query_target(target_chembl_id) if 'SINGLE PROTEIN' in target['target_type']: protein_targets[target_chembl_id] = target return protein_targets
[ "def", "get_protein_targets_only", "(", "target_chembl_ids", ")", ":", "protein_targets", "=", "{", "}", "for", "target_chembl_id", "in", "target_chembl_ids", ":", "target", "=", "query_target", "(", "target_chembl_id", ")", "if", "'SINGLE PROTEIN'", "in", "target", "[", "'target_type'", "]", ":", "protein_targets", "[", "target_chembl_id", "]", "=", "target", "return", "protein_targets" ]
Given list of ChEMBL target ids, return dict of SINGLE PROTEIN targets Parameters ---------- target_chembl_ids : list list of chembl_ids as strings Returns ------- protein_targets : dict dictionary keyed to ChEMBL target ids with lists of activity ids
[ "Given", "list", "of", "ChEMBL", "target", "ids", "return", "dict", "of", "SINGLE", "PROTEIN", "targets" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/chembl_client.py#L172-L190
train
sorgerlab/indra
indra/databases/chembl_client.py
get_evidence
def get_evidence(assay): """Given an activity, return an INDRA Evidence object. Parameters ---------- assay : dict an activity from the activities list returned by a query to the API Returns ------- ev : :py:class:`Evidence` an :py:class:`Evidence` object containing the kinetics of the """ kin = get_kinetics(assay) source_id = assay.get('assay_chembl_id') if not kin: return None annotations = {'kinetics': kin} chembl_doc_id = str(assay.get('document_chembl_id')) pmid = get_pmid(chembl_doc_id) ev = Evidence(source_api='chembl', pmid=pmid, source_id=source_id, annotations=annotations) return ev
python
def get_evidence(assay): """Given an activity, return an INDRA Evidence object. Parameters ---------- assay : dict an activity from the activities list returned by a query to the API Returns ------- ev : :py:class:`Evidence` an :py:class:`Evidence` object containing the kinetics of the """ kin = get_kinetics(assay) source_id = assay.get('assay_chembl_id') if not kin: return None annotations = {'kinetics': kin} chembl_doc_id = str(assay.get('document_chembl_id')) pmid = get_pmid(chembl_doc_id) ev = Evidence(source_api='chembl', pmid=pmid, source_id=source_id, annotations=annotations) return ev
[ "def", "get_evidence", "(", "assay", ")", ":", "kin", "=", "get_kinetics", "(", "assay", ")", "source_id", "=", "assay", ".", "get", "(", "'assay_chembl_id'", ")", "if", "not", "kin", ":", "return", "None", "annotations", "=", "{", "'kinetics'", ":", "kin", "}", "chembl_doc_id", "=", "str", "(", "assay", ".", "get", "(", "'document_chembl_id'", ")", ")", "pmid", "=", "get_pmid", "(", "chembl_doc_id", ")", "ev", "=", "Evidence", "(", "source_api", "=", "'chembl'", ",", "pmid", "=", "pmid", ",", "source_id", "=", "source_id", ",", "annotations", "=", "annotations", ")", "return", "ev" ]
Given an activity, return an INDRA Evidence object. Parameters ---------- assay : dict an activity from the activities list returned by a query to the API Returns ------- ev : :py:class:`Evidence` an :py:class:`Evidence` object containing the kinetics of the
[ "Given", "an", "activity", "return", "an", "INDRA", "Evidence", "object", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/chembl_client.py#L193-L215
train
sorgerlab/indra
indra/databases/chembl_client.py
get_kinetics
def get_kinetics(assay): """Given an activity, return its kinetics values. Parameters ---------- assay : dict an activity from the activities list returned by a query to the API Returns ------- kin : dict dictionary of values with units keyed to value types 'IC50', 'EC50', 'INH', 'Potency', 'Kd' """ try: val = float(assay.get('standard_value')) except TypeError: logger.warning('Invalid assay value: %s' % assay.get('standard_value')) return None unit = assay.get('standard_units') if unit == 'nM': unit_sym = 1e-9 * units.mol / units.liter elif unit == 'uM': unit_sym = 1e-6 * units.mol / units.liter else: logger.warning('Unhandled unit: %s' % unit) return None param_type = assay.get('standard_type') if param_type not in ['IC50', 'EC50', 'INH', 'Potency', 'Kd']: logger.warning('Unhandled parameter type: %s' % param_type) logger.info(str(assay)) return None kin = {param_type: val * unit_sym} return kin
python
def get_kinetics(assay): """Given an activity, return its kinetics values. Parameters ---------- assay : dict an activity from the activities list returned by a query to the API Returns ------- kin : dict dictionary of values with units keyed to value types 'IC50', 'EC50', 'INH', 'Potency', 'Kd' """ try: val = float(assay.get('standard_value')) except TypeError: logger.warning('Invalid assay value: %s' % assay.get('standard_value')) return None unit = assay.get('standard_units') if unit == 'nM': unit_sym = 1e-9 * units.mol / units.liter elif unit == 'uM': unit_sym = 1e-6 * units.mol / units.liter else: logger.warning('Unhandled unit: %s' % unit) return None param_type = assay.get('standard_type') if param_type not in ['IC50', 'EC50', 'INH', 'Potency', 'Kd']: logger.warning('Unhandled parameter type: %s' % param_type) logger.info(str(assay)) return None kin = {param_type: val * unit_sym} return kin
[ "def", "get_kinetics", "(", "assay", ")", ":", "try", ":", "val", "=", "float", "(", "assay", ".", "get", "(", "'standard_value'", ")", ")", "except", "TypeError", ":", "logger", ".", "warning", "(", "'Invalid assay value: %s'", "%", "assay", ".", "get", "(", "'standard_value'", ")", ")", "return", "None", "unit", "=", "assay", ".", "get", "(", "'standard_units'", ")", "if", "unit", "==", "'nM'", ":", "unit_sym", "=", "1e-9", "*", "units", ".", "mol", "/", "units", ".", "liter", "elif", "unit", "==", "'uM'", ":", "unit_sym", "=", "1e-6", "*", "units", ".", "mol", "/", "units", ".", "liter", "else", ":", "logger", ".", "warning", "(", "'Unhandled unit: %s'", "%", "unit", ")", "return", "None", "param_type", "=", "assay", ".", "get", "(", "'standard_type'", ")", "if", "param_type", "not", "in", "[", "'IC50'", ",", "'EC50'", ",", "'INH'", ",", "'Potency'", ",", "'Kd'", "]", ":", "logger", ".", "warning", "(", "'Unhandled parameter type: %s'", "%", "param_type", ")", "logger", ".", "info", "(", "str", "(", "assay", ")", ")", "return", "None", "kin", "=", "{", "param_type", ":", "val", "*", "unit_sym", "}", "return", "kin" ]
Given an activity, return its kinetics values. Parameters ---------- assay : dict an activity from the activities list returned by a query to the API Returns ------- kin : dict dictionary of values with units keyed to value types 'IC50', 'EC50', 'INH', 'Potency', 'Kd'
[ "Given", "an", "activity", "return", "its", "kinetics", "values", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/chembl_client.py#L218-L251
train
sorgerlab/indra
indra/databases/chembl_client.py
get_pmid
def get_pmid(doc_id): """Get PMID from document_chembl_id Parameters ---------- doc_id : str Returns ------- pmid : str """ url_pmid = 'https://www.ebi.ac.uk/chembl/api/data/document.json' params = {'document_chembl_id': doc_id} res = requests.get(url_pmid, params=params) js = res.json() pmid = str(js['documents'][0]['pubmed_id']) return pmid
python
def get_pmid(doc_id): """Get PMID from document_chembl_id Parameters ---------- doc_id : str Returns ------- pmid : str """ url_pmid = 'https://www.ebi.ac.uk/chembl/api/data/document.json' params = {'document_chembl_id': doc_id} res = requests.get(url_pmid, params=params) js = res.json() pmid = str(js['documents'][0]['pubmed_id']) return pmid
[ "def", "get_pmid", "(", "doc_id", ")", ":", "url_pmid", "=", "'https://www.ebi.ac.uk/chembl/api/data/document.json'", "params", "=", "{", "'document_chembl_id'", ":", "doc_id", "}", "res", "=", "requests", ".", "get", "(", "url_pmid", ",", "params", "=", "params", ")", "js", "=", "res", ".", "json", "(", ")", "pmid", "=", "str", "(", "js", "[", "'documents'", "]", "[", "0", "]", "[", "'pubmed_id'", "]", ")", "return", "pmid" ]
Get PMID from document_chembl_id Parameters ---------- doc_id : str Returns ------- pmid : str
[ "Get", "PMID", "from", "document_chembl_id" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/chembl_client.py#L254-L270
train
sorgerlab/indra
indra/databases/chembl_client.py
get_target_chemblid
def get_target_chemblid(target_upid): """Get ChEMBL ID from UniProt upid Parameters ---------- target_upid : str Returns ------- target_chembl_id : str """ url = 'https://www.ebi.ac.uk/chembl/api/data/target.json' params = {'target_components__accession': target_upid} r = requests.get(url, params=params) r.raise_for_status() js = r.json() target_chemblid = js['targets'][0]['target_chembl_id'] return target_chemblid
python
def get_target_chemblid(target_upid): """Get ChEMBL ID from UniProt upid Parameters ---------- target_upid : str Returns ------- target_chembl_id : str """ url = 'https://www.ebi.ac.uk/chembl/api/data/target.json' params = {'target_components__accession': target_upid} r = requests.get(url, params=params) r.raise_for_status() js = r.json() target_chemblid = js['targets'][0]['target_chembl_id'] return target_chemblid
[ "def", "get_target_chemblid", "(", "target_upid", ")", ":", "url", "=", "'https://www.ebi.ac.uk/chembl/api/data/target.json'", "params", "=", "{", "'target_components__accession'", ":", "target_upid", "}", "r", "=", "requests", ".", "get", "(", "url", ",", "params", "=", "params", ")", "r", ".", "raise_for_status", "(", ")", "js", "=", "r", ".", "json", "(", ")", "target_chemblid", "=", "js", "[", "'targets'", "]", "[", "0", "]", "[", "'target_chembl_id'", "]", "return", "target_chemblid" ]
Get ChEMBL ID from UniProt upid Parameters ---------- target_upid : str Returns ------- target_chembl_id : str
[ "Get", "ChEMBL", "ID", "from", "UniProt", "upid" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/chembl_client.py#L273-L290
train
sorgerlab/indra
indra/databases/chembl_client.py
get_mesh_id
def get_mesh_id(nlm_mesh): """Get MESH ID from NLM MESH Parameters ---------- nlm_mesh : str Returns ------- mesh_id : str """ url_nlm2mesh = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi' params = {'db': 'mesh', 'term': nlm_mesh, 'retmode': 'JSON'} r = requests.get(url_nlm2mesh, params=params) res = r.json() mesh_id = res['esearchresult']['idlist'][0] return mesh_id
python
def get_mesh_id(nlm_mesh): """Get MESH ID from NLM MESH Parameters ---------- nlm_mesh : str Returns ------- mesh_id : str """ url_nlm2mesh = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi' params = {'db': 'mesh', 'term': nlm_mesh, 'retmode': 'JSON'} r = requests.get(url_nlm2mesh, params=params) res = r.json() mesh_id = res['esearchresult']['idlist'][0] return mesh_id
[ "def", "get_mesh_id", "(", "nlm_mesh", ")", ":", "url_nlm2mesh", "=", "'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi'", "params", "=", "{", "'db'", ":", "'mesh'", ",", "'term'", ":", "nlm_mesh", ",", "'retmode'", ":", "'JSON'", "}", "r", "=", "requests", ".", "get", "(", "url_nlm2mesh", ",", "params", "=", "params", ")", "res", "=", "r", ".", "json", "(", ")", "mesh_id", "=", "res", "[", "'esearchresult'", "]", "[", "'idlist'", "]", "[", "0", "]", "return", "mesh_id" ]
Get MESH ID from NLM MESH Parameters ---------- nlm_mesh : str Returns ------- mesh_id : str
[ "Get", "MESH", "ID", "from", "NLM", "MESH" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/chembl_client.py#L293-L309
train
sorgerlab/indra
indra/databases/chembl_client.py
get_pcid
def get_pcid(mesh_id): """Get PC ID from MESH ID Parameters ---------- mesh : str Returns ------- pcid : str """ url_mesh2pcid = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/elink.fcgi' params = {'dbfrom': 'mesh', 'id': mesh_id, 'db': 'pccompound', 'retmode': 'JSON'} r = requests.get(url_mesh2pcid, params=params) res = r.json() pcid = res['linksets'][0]['linksetdbs'][0]['links'][0] return pcid
python
def get_pcid(mesh_id): """Get PC ID from MESH ID Parameters ---------- mesh : str Returns ------- pcid : str """ url_mesh2pcid = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/elink.fcgi' params = {'dbfrom': 'mesh', 'id': mesh_id, 'db': 'pccompound', 'retmode': 'JSON'} r = requests.get(url_mesh2pcid, params=params) res = r.json() pcid = res['linksets'][0]['linksetdbs'][0]['links'][0] return pcid
[ "def", "get_pcid", "(", "mesh_id", ")", ":", "url_mesh2pcid", "=", "'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/elink.fcgi'", "params", "=", "{", "'dbfrom'", ":", "'mesh'", ",", "'id'", ":", "mesh_id", ",", "'db'", ":", "'pccompound'", ",", "'retmode'", ":", "'JSON'", "}", "r", "=", "requests", ".", "get", "(", "url_mesh2pcid", ",", "params", "=", "params", ")", "res", "=", "r", ".", "json", "(", ")", "pcid", "=", "res", "[", "'linksets'", "]", "[", "0", "]", "[", "'linksetdbs'", "]", "[", "0", "]", "[", "'links'", "]", "[", "0", "]", "return", "pcid" ]
Get PC ID from MESH ID Parameters ---------- mesh : str Returns ------- pcid : str
[ "Get", "PC", "ID", "from", "MESH", "ID" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/chembl_client.py#L312-L329
train
sorgerlab/indra
indra/databases/chembl_client.py
get_chembl_id
def get_chembl_id(nlm_mesh): """Get ChEMBL ID from NLM MESH Parameters ---------- nlm_mesh : str Returns ------- chembl_id : str """ mesh_id = get_mesh_id(nlm_mesh) pcid = get_pcid(mesh_id) url_mesh2pcid = 'https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/' + \ 'cid/%s/synonyms/JSON' % pcid r = requests.get(url_mesh2pcid) res = r.json() synonyms = res['InformationList']['Information'][0]['Synonym'] chembl_id = [syn for syn in synonyms if 'CHEMBL' in syn and 'SCHEMBL' not in syn][0] return chembl_id
python
def get_chembl_id(nlm_mesh): """Get ChEMBL ID from NLM MESH Parameters ---------- nlm_mesh : str Returns ------- chembl_id : str """ mesh_id = get_mesh_id(nlm_mesh) pcid = get_pcid(mesh_id) url_mesh2pcid = 'https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/' + \ 'cid/%s/synonyms/JSON' % pcid r = requests.get(url_mesh2pcid) res = r.json() synonyms = res['InformationList']['Information'][0]['Synonym'] chembl_id = [syn for syn in synonyms if 'CHEMBL' in syn and 'SCHEMBL' not in syn][0] return chembl_id
[ "def", "get_chembl_id", "(", "nlm_mesh", ")", ":", "mesh_id", "=", "get_mesh_id", "(", "nlm_mesh", ")", "pcid", "=", "get_pcid", "(", "mesh_id", ")", "url_mesh2pcid", "=", "'https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/'", "+", "'cid/%s/synonyms/JSON'", "%", "pcid", "r", "=", "requests", ".", "get", "(", "url_mesh2pcid", ")", "res", "=", "r", ".", "json", "(", ")", "synonyms", "=", "res", "[", "'InformationList'", "]", "[", "'Information'", "]", "[", "0", "]", "[", "'Synonym'", "]", "chembl_id", "=", "[", "syn", "for", "syn", "in", "synonyms", "if", "'CHEMBL'", "in", "syn", "and", "'SCHEMBL'", "not", "in", "syn", "]", "[", "0", "]", "return", "chembl_id" ]
Get ChEMBL ID from NLM MESH Parameters ---------- nlm_mesh : str Returns ------- chembl_id : str
[ "Get", "ChEMBL", "ID", "from", "NLM", "MESH" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/chembl_client.py#L332-L352
train
sorgerlab/indra
indra/sources/geneways/find_full_text_sentence.py
FullTextMention.get_sentences
def get_sentences(self, root_element, block_tags): """Returns a list of plain-text sentences by iterating through XML tags except for those listed in block_tags.""" sentences = [] for element in root_element: if not self.any_ends_with(block_tags, element.tag): # tag not in block_tags if element.text is not None and not re.match('^\s*$', element.text): sentences.extend(self.sentence_tokenize(element.text)) sentences.extend(self.get_sentences(element, block_tags)) f = open('sentence_debug.txt', 'w') for s in sentences: f.write(s.lower() + '\n') f.close() return sentences
python
def get_sentences(self, root_element, block_tags): """Returns a list of plain-text sentences by iterating through XML tags except for those listed in block_tags.""" sentences = [] for element in root_element: if not self.any_ends_with(block_tags, element.tag): # tag not in block_tags if element.text is not None and not re.match('^\s*$', element.text): sentences.extend(self.sentence_tokenize(element.text)) sentences.extend(self.get_sentences(element, block_tags)) f = open('sentence_debug.txt', 'w') for s in sentences: f.write(s.lower() + '\n') f.close() return sentences
[ "def", "get_sentences", "(", "self", ",", "root_element", ",", "block_tags", ")", ":", "sentences", "=", "[", "]", "for", "element", "in", "root_element", ":", "if", "not", "self", ".", "any_ends_with", "(", "block_tags", ",", "element", ".", "tag", ")", ":", "# tag not in block_tags", "if", "element", ".", "text", "is", "not", "None", "and", "not", "re", ".", "match", "(", "'^\\s*$'", ",", "element", ".", "text", ")", ":", "sentences", ".", "extend", "(", "self", ".", "sentence_tokenize", "(", "element", ".", "text", ")", ")", "sentences", ".", "extend", "(", "self", ".", "get_sentences", "(", "element", ",", "block_tags", ")", ")", "f", "=", "open", "(", "'sentence_debug.txt'", ",", "'w'", ")", "for", "s", "in", "sentences", ":", "f", ".", "write", "(", "s", ".", "lower", "(", ")", "+", "'\\n'", ")", "f", ".", "close", "(", ")", "return", "sentences" ]
Returns a list of plain-text sentences by iterating through XML tags except for those listed in block_tags.
[ "Returns", "a", "list", "of", "plain", "-", "text", "sentences", "by", "iterating", "through", "XML", "tags", "except", "for", "those", "listed", "in", "block_tags", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/geneways/find_full_text_sentence.py#L33-L49
train
sorgerlab/indra
indra/sources/geneways/find_full_text_sentence.py
FullTextMention.any_ends_with
def any_ends_with(self, string_list, pattern): """Returns true iff one of the strings in string_list ends in pattern.""" try: s_base = basestring except: s_base = str is_string = isinstance(pattern, s_base) if not is_string: return False for s in string_list: if pattern.endswith(s): return True return False
python
def any_ends_with(self, string_list, pattern): """Returns true iff one of the strings in string_list ends in pattern.""" try: s_base = basestring except: s_base = str is_string = isinstance(pattern, s_base) if not is_string: return False for s in string_list: if pattern.endswith(s): return True return False
[ "def", "any_ends_with", "(", "self", ",", "string_list", ",", "pattern", ")", ":", "try", ":", "s_base", "=", "basestring", "except", ":", "s_base", "=", "str", "is_string", "=", "isinstance", "(", "pattern", ",", "s_base", ")", "if", "not", "is_string", ":", "return", "False", "for", "s", "in", "string_list", ":", "if", "pattern", ".", "endswith", "(", "s", ")", ":", "return", "True", "return", "False" ]
Returns true iff one of the strings in string_list ends in pattern.
[ "Returns", "true", "iff", "one", "of", "the", "strings", "in", "string_list", "ends", "in", "pattern", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/geneways/find_full_text_sentence.py#L51-L66
train
sorgerlab/indra
indra/sources/geneways/find_full_text_sentence.py
FullTextMention.get_tag_names
def get_tag_names(self): """Returns the set of tag names present in the XML.""" root = etree.fromstring(self.xml_full_text.encode('utf-8')) return self.get_children_tag_names(root)
python
def get_tag_names(self): """Returns the set of tag names present in the XML.""" root = etree.fromstring(self.xml_full_text.encode('utf-8')) return self.get_children_tag_names(root)
[ "def", "get_tag_names", "(", "self", ")", ":", "root", "=", "etree", ".", "fromstring", "(", "self", ".", "xml_full_text", ".", "encode", "(", "'utf-8'", ")", ")", "return", "self", ".", "get_children_tag_names", "(", "root", ")" ]
Returns the set of tag names present in the XML.
[ "Returns", "the", "set", "of", "tag", "names", "present", "in", "the", "XML", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/geneways/find_full_text_sentence.py#L120-L123
train
sorgerlab/indra
indra/sources/geneways/find_full_text_sentence.py
FullTextMention.get_children_tag_names
def get_children_tag_names(self, xml_element): """Returns all tag names of xml element and its children.""" tags = set() tags.add(self.remove_namespace_from_tag(xml_element.tag)) for element in xml_element.iter(tag=etree.Element): if element != xml_element: new_tags = self.get_children_tag_names(element) if new_tags is not None: tags.update(new_tags) return tags
python
def get_children_tag_names(self, xml_element): """Returns all tag names of xml element and its children.""" tags = set() tags.add(self.remove_namespace_from_tag(xml_element.tag)) for element in xml_element.iter(tag=etree.Element): if element != xml_element: new_tags = self.get_children_tag_names(element) if new_tags is not None: tags.update(new_tags) return tags
[ "def", "get_children_tag_names", "(", "self", ",", "xml_element", ")", ":", "tags", "=", "set", "(", ")", "tags", ".", "add", "(", "self", ".", "remove_namespace_from_tag", "(", "xml_element", ".", "tag", ")", ")", "for", "element", "in", "xml_element", ".", "iter", "(", "tag", "=", "etree", ".", "Element", ")", ":", "if", "element", "!=", "xml_element", ":", "new_tags", "=", "self", ".", "get_children_tag_names", "(", "element", ")", "if", "new_tags", "is", "not", "None", ":", "tags", ".", "update", "(", "new_tags", ")", "return", "tags" ]
Returns all tag names of xml element and its children.
[ "Returns", "all", "tag", "names", "of", "xml", "element", "and", "its", "children", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/geneways/find_full_text_sentence.py#L125-L135
train
sorgerlab/indra
indra/sources/geneways/find_full_text_sentence.py
FullTextMention.string_matches_sans_whitespace
def string_matches_sans_whitespace(self, str1, str2_fuzzy_whitespace): """Check if two strings match, modulo their whitespace.""" str2_fuzzy_whitespace = re.sub('\s+', '\s*', str2_fuzzy_whitespace) return re.search(str2_fuzzy_whitespace, str1) is not None
python
def string_matches_sans_whitespace(self, str1, str2_fuzzy_whitespace): """Check if two strings match, modulo their whitespace.""" str2_fuzzy_whitespace = re.sub('\s+', '\s*', str2_fuzzy_whitespace) return re.search(str2_fuzzy_whitespace, str1) is not None
[ "def", "string_matches_sans_whitespace", "(", "self", ",", "str1", ",", "str2_fuzzy_whitespace", ")", ":", "str2_fuzzy_whitespace", "=", "re", ".", "sub", "(", "'\\s+'", ",", "'\\s*'", ",", "str2_fuzzy_whitespace", ")", "return", "re", ".", "search", "(", "str2_fuzzy_whitespace", ",", "str1", ")", "is", "not", "None" ]
Check if two strings match, modulo their whitespace.
[ "Check", "if", "two", "strings", "match", "modulo", "their", "whitespace", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/geneways/find_full_text_sentence.py#L137-L140
train
sorgerlab/indra
indra/sources/geneways/find_full_text_sentence.py
FullTextMention.sentence_matches
def sentence_matches(self, sentence_text): """Returns true iff the sentence contains this mention's upstream and downstream participants, and if one of the stemmed verbs in the sentence is the same as the stemmed action type.""" has_upstream = False has_downstream = False has_verb = False # Get the first word of the action type and assume this is the verb # (Ex. get depends for depends on) actiontype_words = word_tokenize(self.mention.actiontype) actiontype_verb_stemmed = stem(actiontype_words[0]) words = word_tokenize(sentence_text) if self.string_matches_sans_whitespace(sentence_text.lower(), self.mention.upstream.lower()): has_upstream = True if self.string_matches_sans_whitespace(sentence_text.lower(), self.mention.downstream.lower()): has_downstream = True for word in words: if actiontype_verb_stemmed == stem(word): has_verb = True return has_upstream and has_downstream and has_verb
python
def sentence_matches(self, sentence_text): """Returns true iff the sentence contains this mention's upstream and downstream participants, and if one of the stemmed verbs in the sentence is the same as the stemmed action type.""" has_upstream = False has_downstream = False has_verb = False # Get the first word of the action type and assume this is the verb # (Ex. get depends for depends on) actiontype_words = word_tokenize(self.mention.actiontype) actiontype_verb_stemmed = stem(actiontype_words[0]) words = word_tokenize(sentence_text) if self.string_matches_sans_whitespace(sentence_text.lower(), self.mention.upstream.lower()): has_upstream = True if self.string_matches_sans_whitespace(sentence_text.lower(), self.mention.downstream.lower()): has_downstream = True for word in words: if actiontype_verb_stemmed == stem(word): has_verb = True return has_upstream and has_downstream and has_verb
[ "def", "sentence_matches", "(", "self", ",", "sentence_text", ")", ":", "has_upstream", "=", "False", "has_downstream", "=", "False", "has_verb", "=", "False", "# Get the first word of the action type and assume this is the verb", "# (Ex. get depends for depends on)", "actiontype_words", "=", "word_tokenize", "(", "self", ".", "mention", ".", "actiontype", ")", "actiontype_verb_stemmed", "=", "stem", "(", "actiontype_words", "[", "0", "]", ")", "words", "=", "word_tokenize", "(", "sentence_text", ")", "if", "self", ".", "string_matches_sans_whitespace", "(", "sentence_text", ".", "lower", "(", ")", ",", "self", ".", "mention", ".", "upstream", ".", "lower", "(", ")", ")", ":", "has_upstream", "=", "True", "if", "self", ".", "string_matches_sans_whitespace", "(", "sentence_text", ".", "lower", "(", ")", ",", "self", ".", "mention", ".", "downstream", ".", "lower", "(", ")", ")", ":", "has_downstream", "=", "True", "for", "word", "in", "words", ":", "if", "actiontype_verb_stemmed", "==", "stem", "(", "word", ")", ":", "has_verb", "=", "True", "return", "has_upstream", "and", "has_downstream", "and", "has_verb" ]
Returns true iff the sentence contains this mention's upstream and downstream participants, and if one of the stemmed verbs in the sentence is the same as the stemmed action type.
[ "Returns", "true", "iff", "the", "sentence", "contains", "this", "mention", "s", "upstream", "and", "downstream", "participants", "and", "if", "one", "of", "the", "stemmed", "verbs", "in", "the", "sentence", "is", "the", "same", "as", "the", "stemmed", "action", "type", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/geneways/find_full_text_sentence.py#L142-L169
train
sorgerlab/indra
indra/databases/__init__.py
get_identifiers_url
def get_identifiers_url(db_name, db_id): """Return an identifiers.org URL for a given database name and ID. Parameters ---------- db_name : str An internal database name: HGNC, UP, CHEBI, etc. db_id : str An identifier in the given database. Returns ------- url : str An identifiers.org URL corresponding to the given database name and ID. """ identifiers_url = 'http://identifiers.org/' bel_scai_url = 'https://arty.scai.fraunhofer.de/artifactory/bel/namespace/' if db_name == 'UP': url = identifiers_url + 'uniprot/%s' % db_id elif db_name == 'HGNC': url = identifiers_url + 'hgnc/HGNC:%s' % db_id elif db_name == 'IP': url = identifiers_url + 'interpro/%s' % db_id elif db_name == 'IPR': url = identifiers_url + 'interpro/%s' % db_id elif db_name == 'CHEBI': url = identifiers_url + 'chebi/%s' % db_id elif db_name == 'NCIT': url = identifiers_url + 'ncit/%s' % db_id elif db_name == 'GO': if db_id.startswith('GO:'): url = identifiers_url + 'go/%s' % db_id else: url = identifiers_url + 'go/GO:%s' % db_id elif db_name in ('PUBCHEM', 'PCID'): # Assuming PCID = PubChem compound ID if db_id.startswith('PUBCHEM:'): db_id = db_id[8:] elif db_id.startswith('PCID:'): db_id = db_id[5:] url = identifiers_url + 'pubchem.compound/%s' % db_id elif db_name == 'PF': url = identifiers_url + 'pfam/%s' % db_id elif db_name == 'MIRBASEM': url = identifiers_url + 'mirbase.mature/%s' % db_id elif db_name == 'MIRBASE': url = identifiers_url + 'mirbase/%s' % db_id elif db_name == 'MESH': url = identifiers_url + 'mesh/%s' % db_id elif db_name == 'EGID': url = identifiers_url + 'ncbigene/%s' % db_id elif db_name == 'HMDB': url = identifiers_url + 'hmdb/%s' % db_id elif db_name == 'LINCS': if db_id.startswith('LSM-'): # Lincs Small Molecule ID url = identifiers_url + 'lincs.smallmolecule/%s' % db_id elif db_id.startswith('LCL-'): # Lincs Cell Line ID url = identifiers_url + 'lincs.cell/%s' % db_id else: # Assume LINCS Protein url = identifiers_url + 'lincs.protein/%s' % db_id elif db_name == 'HMS-LINCS': url = 'http://lincs.hms.harvard.edu/db/sm/%s-101' % db_id # Special cases with no identifiers entry elif db_name == 'SCHEM': url = bel_scai_url + 'selventa-legacy-chemicals/' + \ 'selventa-legacy-chemicals-20150601.belns' elif db_name == 'SCOMP': url = bel_scai_url + 'selventa-named-complexes/' + \ 'selventa-named-complexes-20150601.belns' elif db_name == 'SFAM': url = bel_scai_url + 'selventa-protein-families/' + \ 'selventa-protein-families-20150601.belns' elif db_name == 'FPLX': url = 'http://identifiers.org/fplx/%s' % db_id elif db_name == 'LNCRNADB': if db_id.startswith('ENSG'): url = 'http://www.lncrnadb.org/search/?q=%s' % db_id else: # Assmuing HGNC symbol url = 'http://www.lncrnadb.org/%s/' % db_id elif db_name == 'NXPFA': url = 'https://www.nextprot.org/term/FA-%s' % db_id elif db_name in ('UN', 'WDI', 'FAO'): url = 'https://github.com/clulab/eidos/wiki/JSON-LD#Grounding/%s' % \ db_id elif db_name == 'HUME': url = ('https://github.com/BBN-E/Hume/blob/master/resource/ontologies/' 'hume_ontology/%s' % db_id) elif db_name == 'CWMS': url = 'http://trips.ihmc.us/%s' % db_id elif db_name == 'SIGNOR': # Assuming db_id == Primary ID url = 'https://signor.uniroma2.it/relation_result.php?id=%s' % db_id elif db_name == 'SOFIA': url = 'http://cs.cmu.edu/sofia/%s' % db_id elif db_name == 'CHEMBL': if not db_id.startswith('CHEMBL'): db_id = 'CHEMBL%s' % db_id url = identifiers_url + 'chembl.compound/%s' % db_id elif db_name == 'NONCODE': url = 'http://www.noncode.org/show_gene.php?id=NONHSAG%s' % db_id elif db_name == 'TEXT': return None else: logger.warning('Unhandled name space %s' % db_name) url = None return url
python
def get_identifiers_url(db_name, db_id): """Return an identifiers.org URL for a given database name and ID. Parameters ---------- db_name : str An internal database name: HGNC, UP, CHEBI, etc. db_id : str An identifier in the given database. Returns ------- url : str An identifiers.org URL corresponding to the given database name and ID. """ identifiers_url = 'http://identifiers.org/' bel_scai_url = 'https://arty.scai.fraunhofer.de/artifactory/bel/namespace/' if db_name == 'UP': url = identifiers_url + 'uniprot/%s' % db_id elif db_name == 'HGNC': url = identifiers_url + 'hgnc/HGNC:%s' % db_id elif db_name == 'IP': url = identifiers_url + 'interpro/%s' % db_id elif db_name == 'IPR': url = identifiers_url + 'interpro/%s' % db_id elif db_name == 'CHEBI': url = identifiers_url + 'chebi/%s' % db_id elif db_name == 'NCIT': url = identifiers_url + 'ncit/%s' % db_id elif db_name == 'GO': if db_id.startswith('GO:'): url = identifiers_url + 'go/%s' % db_id else: url = identifiers_url + 'go/GO:%s' % db_id elif db_name in ('PUBCHEM', 'PCID'): # Assuming PCID = PubChem compound ID if db_id.startswith('PUBCHEM:'): db_id = db_id[8:] elif db_id.startswith('PCID:'): db_id = db_id[5:] url = identifiers_url + 'pubchem.compound/%s' % db_id elif db_name == 'PF': url = identifiers_url + 'pfam/%s' % db_id elif db_name == 'MIRBASEM': url = identifiers_url + 'mirbase.mature/%s' % db_id elif db_name == 'MIRBASE': url = identifiers_url + 'mirbase/%s' % db_id elif db_name == 'MESH': url = identifiers_url + 'mesh/%s' % db_id elif db_name == 'EGID': url = identifiers_url + 'ncbigene/%s' % db_id elif db_name == 'HMDB': url = identifiers_url + 'hmdb/%s' % db_id elif db_name == 'LINCS': if db_id.startswith('LSM-'): # Lincs Small Molecule ID url = identifiers_url + 'lincs.smallmolecule/%s' % db_id elif db_id.startswith('LCL-'): # Lincs Cell Line ID url = identifiers_url + 'lincs.cell/%s' % db_id else: # Assume LINCS Protein url = identifiers_url + 'lincs.protein/%s' % db_id elif db_name == 'HMS-LINCS': url = 'http://lincs.hms.harvard.edu/db/sm/%s-101' % db_id # Special cases with no identifiers entry elif db_name == 'SCHEM': url = bel_scai_url + 'selventa-legacy-chemicals/' + \ 'selventa-legacy-chemicals-20150601.belns' elif db_name == 'SCOMP': url = bel_scai_url + 'selventa-named-complexes/' + \ 'selventa-named-complexes-20150601.belns' elif db_name == 'SFAM': url = bel_scai_url + 'selventa-protein-families/' + \ 'selventa-protein-families-20150601.belns' elif db_name == 'FPLX': url = 'http://identifiers.org/fplx/%s' % db_id elif db_name == 'LNCRNADB': if db_id.startswith('ENSG'): url = 'http://www.lncrnadb.org/search/?q=%s' % db_id else: # Assmuing HGNC symbol url = 'http://www.lncrnadb.org/%s/' % db_id elif db_name == 'NXPFA': url = 'https://www.nextprot.org/term/FA-%s' % db_id elif db_name in ('UN', 'WDI', 'FAO'): url = 'https://github.com/clulab/eidos/wiki/JSON-LD#Grounding/%s' % \ db_id elif db_name == 'HUME': url = ('https://github.com/BBN-E/Hume/blob/master/resource/ontologies/' 'hume_ontology/%s' % db_id) elif db_name == 'CWMS': url = 'http://trips.ihmc.us/%s' % db_id elif db_name == 'SIGNOR': # Assuming db_id == Primary ID url = 'https://signor.uniroma2.it/relation_result.php?id=%s' % db_id elif db_name == 'SOFIA': url = 'http://cs.cmu.edu/sofia/%s' % db_id elif db_name == 'CHEMBL': if not db_id.startswith('CHEMBL'): db_id = 'CHEMBL%s' % db_id url = identifiers_url + 'chembl.compound/%s' % db_id elif db_name == 'NONCODE': url = 'http://www.noncode.org/show_gene.php?id=NONHSAG%s' % db_id elif db_name == 'TEXT': return None else: logger.warning('Unhandled name space %s' % db_name) url = None return url
[ "def", "get_identifiers_url", "(", "db_name", ",", "db_id", ")", ":", "identifiers_url", "=", "'http://identifiers.org/'", "bel_scai_url", "=", "'https://arty.scai.fraunhofer.de/artifactory/bel/namespace/'", "if", "db_name", "==", "'UP'", ":", "url", "=", "identifiers_url", "+", "'uniprot/%s'", "%", "db_id", "elif", "db_name", "==", "'HGNC'", ":", "url", "=", "identifiers_url", "+", "'hgnc/HGNC:%s'", "%", "db_id", "elif", "db_name", "==", "'IP'", ":", "url", "=", "identifiers_url", "+", "'interpro/%s'", "%", "db_id", "elif", "db_name", "==", "'IPR'", ":", "url", "=", "identifiers_url", "+", "'interpro/%s'", "%", "db_id", "elif", "db_name", "==", "'CHEBI'", ":", "url", "=", "identifiers_url", "+", "'chebi/%s'", "%", "db_id", "elif", "db_name", "==", "'NCIT'", ":", "url", "=", "identifiers_url", "+", "'ncit/%s'", "%", "db_id", "elif", "db_name", "==", "'GO'", ":", "if", "db_id", ".", "startswith", "(", "'GO:'", ")", ":", "url", "=", "identifiers_url", "+", "'go/%s'", "%", "db_id", "else", ":", "url", "=", "identifiers_url", "+", "'go/GO:%s'", "%", "db_id", "elif", "db_name", "in", "(", "'PUBCHEM'", ",", "'PCID'", ")", ":", "# Assuming PCID = PubChem compound ID", "if", "db_id", ".", "startswith", "(", "'PUBCHEM:'", ")", ":", "db_id", "=", "db_id", "[", "8", ":", "]", "elif", "db_id", ".", "startswith", "(", "'PCID:'", ")", ":", "db_id", "=", "db_id", "[", "5", ":", "]", "url", "=", "identifiers_url", "+", "'pubchem.compound/%s'", "%", "db_id", "elif", "db_name", "==", "'PF'", ":", "url", "=", "identifiers_url", "+", "'pfam/%s'", "%", "db_id", "elif", "db_name", "==", "'MIRBASEM'", ":", "url", "=", "identifiers_url", "+", "'mirbase.mature/%s'", "%", "db_id", "elif", "db_name", "==", "'MIRBASE'", ":", "url", "=", "identifiers_url", "+", "'mirbase/%s'", "%", "db_id", "elif", "db_name", "==", "'MESH'", ":", "url", "=", "identifiers_url", "+", "'mesh/%s'", "%", "db_id", "elif", "db_name", "==", "'EGID'", ":", "url", "=", "identifiers_url", "+", "'ncbigene/%s'", "%", "db_id", "elif", "db_name", "==", "'HMDB'", ":", "url", "=", "identifiers_url", "+", "'hmdb/%s'", "%", "db_id", "elif", "db_name", "==", "'LINCS'", ":", "if", "db_id", ".", "startswith", "(", "'LSM-'", ")", ":", "# Lincs Small Molecule ID", "url", "=", "identifiers_url", "+", "'lincs.smallmolecule/%s'", "%", "db_id", "elif", "db_id", ".", "startswith", "(", "'LCL-'", ")", ":", "# Lincs Cell Line ID", "url", "=", "identifiers_url", "+", "'lincs.cell/%s'", "%", "db_id", "else", ":", "# Assume LINCS Protein", "url", "=", "identifiers_url", "+", "'lincs.protein/%s'", "%", "db_id", "elif", "db_name", "==", "'HMS-LINCS'", ":", "url", "=", "'http://lincs.hms.harvard.edu/db/sm/%s-101'", "%", "db_id", "# Special cases with no identifiers entry", "elif", "db_name", "==", "'SCHEM'", ":", "url", "=", "bel_scai_url", "+", "'selventa-legacy-chemicals/'", "+", "'selventa-legacy-chemicals-20150601.belns'", "elif", "db_name", "==", "'SCOMP'", ":", "url", "=", "bel_scai_url", "+", "'selventa-named-complexes/'", "+", "'selventa-named-complexes-20150601.belns'", "elif", "db_name", "==", "'SFAM'", ":", "url", "=", "bel_scai_url", "+", "'selventa-protein-families/'", "+", "'selventa-protein-families-20150601.belns'", "elif", "db_name", "==", "'FPLX'", ":", "url", "=", "'http://identifiers.org/fplx/%s'", "%", "db_id", "elif", "db_name", "==", "'LNCRNADB'", ":", "if", "db_id", ".", "startswith", "(", "'ENSG'", ")", ":", "url", "=", "'http://www.lncrnadb.org/search/?q=%s'", "%", "db_id", "else", ":", "# Assmuing HGNC symbol", "url", "=", "'http://www.lncrnadb.org/%s/'", "%", "db_id", "elif", "db_name", "==", "'NXPFA'", ":", "url", "=", "'https://www.nextprot.org/term/FA-%s'", "%", "db_id", "elif", "db_name", "in", "(", "'UN'", ",", "'WDI'", ",", "'FAO'", ")", ":", "url", "=", "'https://github.com/clulab/eidos/wiki/JSON-LD#Grounding/%s'", "%", "db_id", "elif", "db_name", "==", "'HUME'", ":", "url", "=", "(", "'https://github.com/BBN-E/Hume/blob/master/resource/ontologies/'", "'hume_ontology/%s'", "%", "db_id", ")", "elif", "db_name", "==", "'CWMS'", ":", "url", "=", "'http://trips.ihmc.us/%s'", "%", "db_id", "elif", "db_name", "==", "'SIGNOR'", ":", "# Assuming db_id == Primary ID", "url", "=", "'https://signor.uniroma2.it/relation_result.php?id=%s'", "%", "db_id", "elif", "db_name", "==", "'SOFIA'", ":", "url", "=", "'http://cs.cmu.edu/sofia/%s'", "%", "db_id", "elif", "db_name", "==", "'CHEMBL'", ":", "if", "not", "db_id", ".", "startswith", "(", "'CHEMBL'", ")", ":", "db_id", "=", "'CHEMBL%s'", "%", "db_id", "url", "=", "identifiers_url", "+", "'chembl.compound/%s'", "%", "db_id", "elif", "db_name", "==", "'NONCODE'", ":", "url", "=", "'http://www.noncode.org/show_gene.php?id=NONHSAG%s'", "%", "db_id", "elif", "db_name", "==", "'TEXT'", ":", "return", "None", "else", ":", "logger", ".", "warning", "(", "'Unhandled name space %s'", "%", "db_name", ")", "url", "=", "None", "return", "url" ]
Return an identifiers.org URL for a given database name and ID. Parameters ---------- db_name : str An internal database name: HGNC, UP, CHEBI, etc. db_id : str An identifier in the given database. Returns ------- url : str An identifiers.org URL corresponding to the given database name and ID.
[ "Return", "an", "identifiers", ".", "org", "URL", "for", "a", "given", "database", "name", "and", "ID", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/__init__.py#L6-L110
train
sorgerlab/indra
indra/tools/assemble_corpus.py
dump_statements
def dump_statements(stmts, fname, protocol=4): """Dump a list of statements into a pickle file. Parameters ---------- fname : str The name of the pickle file to dump statements into. protocol : Optional[int] The pickle protocol to use (use 2 for Python 2 compatibility). Default: 4 """ logger.info('Dumping %d statements into %s...' % (len(stmts), fname)) with open(fname, 'wb') as fh: pickle.dump(stmts, fh, protocol=protocol)
python
def dump_statements(stmts, fname, protocol=4): """Dump a list of statements into a pickle file. Parameters ---------- fname : str The name of the pickle file to dump statements into. protocol : Optional[int] The pickle protocol to use (use 2 for Python 2 compatibility). Default: 4 """ logger.info('Dumping %d statements into %s...' % (len(stmts), fname)) with open(fname, 'wb') as fh: pickle.dump(stmts, fh, protocol=protocol)
[ "def", "dump_statements", "(", "stmts", ",", "fname", ",", "protocol", "=", "4", ")", ":", "logger", ".", "info", "(", "'Dumping %d statements into %s...'", "%", "(", "len", "(", "stmts", ")", ",", "fname", ")", ")", "with", "open", "(", "fname", ",", "'wb'", ")", "as", "fh", ":", "pickle", ".", "dump", "(", "stmts", ",", "fh", ",", "protocol", "=", "protocol", ")" ]
Dump a list of statements into a pickle file. Parameters ---------- fname : str The name of the pickle file to dump statements into. protocol : Optional[int] The pickle protocol to use (use 2 for Python 2 compatibility). Default: 4
[ "Dump", "a", "list", "of", "statements", "into", "a", "pickle", "file", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L27-L40
train
sorgerlab/indra
indra/tools/assemble_corpus.py
load_statements
def load_statements(fname, as_dict=False): """Load statements from a pickle file. Parameters ---------- fname : str The name of the pickle file to load statements from. as_dict : Optional[bool] If True and the pickle file contains a dictionary of statements, it is returned as a dictionary. If False, the statements are always returned in a list. Default: False Returns ------- stmts : list A list or dict of statements that were loaded. """ logger.info('Loading %s...' % fname) with open(fname, 'rb') as fh: # Encoding argument not available in pickle for Python 2 if sys.version_info[0] < 3: stmts = pickle.load(fh) # Encoding argument specified here to enable compatibility with # pickle files created with Python 2 else: stmts = pickle.load(fh, encoding='latin1') if isinstance(stmts, dict): if as_dict: return stmts st = [] for pmid, st_list in stmts.items(): st += st_list stmts = st logger.info('Loaded %d statements' % len(stmts)) return stmts
python
def load_statements(fname, as_dict=False): """Load statements from a pickle file. Parameters ---------- fname : str The name of the pickle file to load statements from. as_dict : Optional[bool] If True and the pickle file contains a dictionary of statements, it is returned as a dictionary. If False, the statements are always returned in a list. Default: False Returns ------- stmts : list A list or dict of statements that were loaded. """ logger.info('Loading %s...' % fname) with open(fname, 'rb') as fh: # Encoding argument not available in pickle for Python 2 if sys.version_info[0] < 3: stmts = pickle.load(fh) # Encoding argument specified here to enable compatibility with # pickle files created with Python 2 else: stmts = pickle.load(fh, encoding='latin1') if isinstance(stmts, dict): if as_dict: return stmts st = [] for pmid, st_list in stmts.items(): st += st_list stmts = st logger.info('Loaded %d statements' % len(stmts)) return stmts
[ "def", "load_statements", "(", "fname", ",", "as_dict", "=", "False", ")", ":", "logger", ".", "info", "(", "'Loading %s...'", "%", "fname", ")", "with", "open", "(", "fname", ",", "'rb'", ")", "as", "fh", ":", "# Encoding argument not available in pickle for Python 2", "if", "sys", ".", "version_info", "[", "0", "]", "<", "3", ":", "stmts", "=", "pickle", ".", "load", "(", "fh", ")", "# Encoding argument specified here to enable compatibility with", "# pickle files created with Python 2", "else", ":", "stmts", "=", "pickle", ".", "load", "(", "fh", ",", "encoding", "=", "'latin1'", ")", "if", "isinstance", "(", "stmts", ",", "dict", ")", ":", "if", "as_dict", ":", "return", "stmts", "st", "=", "[", "]", "for", "pmid", ",", "st_list", "in", "stmts", ".", "items", "(", ")", ":", "st", "+=", "st_list", "stmts", "=", "st", "logger", ".", "info", "(", "'Loaded %d statements'", "%", "len", "(", "stmts", ")", ")", "return", "stmts" ]
Load statements from a pickle file. Parameters ---------- fname : str The name of the pickle file to load statements from. as_dict : Optional[bool] If True and the pickle file contains a dictionary of statements, it is returned as a dictionary. If False, the statements are always returned in a list. Default: False Returns ------- stmts : list A list or dict of statements that were loaded.
[ "Load", "statements", "from", "a", "pickle", "file", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L43-L78
train
sorgerlab/indra
indra/tools/assemble_corpus.py
map_grounding
def map_grounding(stmts_in, **kwargs): """Map grounding using the GroundingMapper. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to map. do_rename : Optional[bool] If True, Agents are renamed based on their mapped grounding. grounding_map : Optional[dict] A user supplied grounding map which maps a string to a dictionary of database IDs (in the format used by Agents' db_refs). use_deft : Optional[bool] If True, Deft will be attempted to be used for acronym disambiguation. Default: True save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of mapped statements. """ from indra.preassembler.grounding_mapper import GroundingMapper from indra.preassembler.grounding_mapper import gm as grounding_map from indra.preassembler.grounding_mapper import \ default_agent_map as agent_map logger.info('Mapping grounding on %d statements...' % len(stmts_in)) do_rename = kwargs.get('do_rename') gm = kwargs.get('grounding_map', grounding_map) if do_rename is None: do_rename = True gm = GroundingMapper(gm, agent_map, use_deft=kwargs.get('use_deft', True)) stmts_out = gm.map_agents(stmts_in, do_rename=do_rename) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
python
def map_grounding(stmts_in, **kwargs): """Map grounding using the GroundingMapper. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to map. do_rename : Optional[bool] If True, Agents are renamed based on their mapped grounding. grounding_map : Optional[dict] A user supplied grounding map which maps a string to a dictionary of database IDs (in the format used by Agents' db_refs). use_deft : Optional[bool] If True, Deft will be attempted to be used for acronym disambiguation. Default: True save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of mapped statements. """ from indra.preassembler.grounding_mapper import GroundingMapper from indra.preassembler.grounding_mapper import gm as grounding_map from indra.preassembler.grounding_mapper import \ default_agent_map as agent_map logger.info('Mapping grounding on %d statements...' % len(stmts_in)) do_rename = kwargs.get('do_rename') gm = kwargs.get('grounding_map', grounding_map) if do_rename is None: do_rename = True gm = GroundingMapper(gm, agent_map, use_deft=kwargs.get('use_deft', True)) stmts_out = gm.map_agents(stmts_in, do_rename=do_rename) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
[ "def", "map_grounding", "(", "stmts_in", ",", "*", "*", "kwargs", ")", ":", "from", "indra", ".", "preassembler", ".", "grounding_mapper", "import", "GroundingMapper", "from", "indra", ".", "preassembler", ".", "grounding_mapper", "import", "gm", "as", "grounding_map", "from", "indra", ".", "preassembler", ".", "grounding_mapper", "import", "default_agent_map", "as", "agent_map", "logger", ".", "info", "(", "'Mapping grounding on %d statements...'", "%", "len", "(", "stmts_in", ")", ")", "do_rename", "=", "kwargs", ".", "get", "(", "'do_rename'", ")", "gm", "=", "kwargs", ".", "get", "(", "'grounding_map'", ",", "grounding_map", ")", "if", "do_rename", "is", "None", ":", "do_rename", "=", "True", "gm", "=", "GroundingMapper", "(", "gm", ",", "agent_map", ",", "use_deft", "=", "kwargs", ".", "get", "(", "'use_deft'", ",", "True", ")", ")", "stmts_out", "=", "gm", ".", "map_agents", "(", "stmts_in", ",", "do_rename", "=", "do_rename", ")", "dump_pkl", "=", "kwargs", ".", "get", "(", "'save'", ")", "if", "dump_pkl", ":", "dump_statements", "(", "stmts_out", ",", "dump_pkl", ")", "return", "stmts_out" ]
Map grounding using the GroundingMapper. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to map. do_rename : Optional[bool] If True, Agents are renamed based on their mapped grounding. grounding_map : Optional[dict] A user supplied grounding map which maps a string to a dictionary of database IDs (in the format used by Agents' db_refs). use_deft : Optional[bool] If True, Deft will be attempted to be used for acronym disambiguation. Default: True save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of mapped statements.
[ "Map", "grounding", "using", "the", "GroundingMapper", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L81-L119
train
sorgerlab/indra
indra/tools/assemble_corpus.py
merge_groundings
def merge_groundings(stmts_in): """Gather and merge original grounding information from evidences. Each Statement's evidences are traversed to find original grounding information. These groundings are then merged into an overall consensus grounding dict with as much detail as possible. The current implementation is only applicable to Statements whose concept/agent roles are fixed. Complexes, Associations and Conversions cannot be handled correctly. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of INDRA Statements whose groundings should be merged. These Statements are meant to have been preassembled and potentially have multiple pieces of evidence. Returns ------- stmts_out : list[indra.statements.Statement] The list of Statements now with groundings merged at the Statement level. """ def surface_grounding(stmt): # Find the "best" grounding for a given concept and its evidences # and surface that for idx, concept in enumerate(stmt.agent_list()): if concept is None: continue aggregate_groundings = {} for ev in stmt.evidence: if 'agents' in ev.annotations: groundings = ev.annotations['agents']['raw_grounding'][idx] for ns, value in groundings.items(): if ns not in aggregate_groundings: aggregate_groundings[ns] = [] if isinstance(value, list): aggregate_groundings[ns] += value else: aggregate_groundings[ns].append(value) best_groundings = get_best_groundings(aggregate_groundings) concept.db_refs = best_groundings def get_best_groundings(aggregate_groundings): best_groundings = {} for ns, values in aggregate_groundings.items(): # There are 3 possibilities here # 1. All the entries in the list are scored in which case we # get unique entries and sort them by score if all([isinstance(v, (tuple, list)) for v in values]): best_groundings[ns] = [] for unique_value in {v[0] for v in values}: scores = [v[1] for v in values if v[0] == unique_value] best_groundings[ns].append((unique_value, max(scores))) best_groundings[ns] = \ sorted(best_groundings[ns], key=lambda x: x[1], reverse=True) # 2. All the entries in the list are unscored in which case we # get the highest frequency entry elif all([not isinstance(v, (tuple, list)) for v in values]): best_groundings[ns] = max(set(values), key=values.count) # 3. There is a mixture, which can happen when some entries were # mapped with scores and others had no scores to begin with. # In this case, we again pick the highest frequency non-scored # entry assuming that the unmapped version is more reliable. else: unscored_vals = [v for v in values if not isinstance(v, (tuple, list))] best_groundings[ns] = max(set(unscored_vals), key=unscored_vals.count) return best_groundings stmts_out = [] for stmt in stmts_in: if not isinstance(stmt, (Complex, Conversion)): surface_grounding(stmt) stmts_out.append(stmt) return stmts_out
python
def merge_groundings(stmts_in): """Gather and merge original grounding information from evidences. Each Statement's evidences are traversed to find original grounding information. These groundings are then merged into an overall consensus grounding dict with as much detail as possible. The current implementation is only applicable to Statements whose concept/agent roles are fixed. Complexes, Associations and Conversions cannot be handled correctly. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of INDRA Statements whose groundings should be merged. These Statements are meant to have been preassembled and potentially have multiple pieces of evidence. Returns ------- stmts_out : list[indra.statements.Statement] The list of Statements now with groundings merged at the Statement level. """ def surface_grounding(stmt): # Find the "best" grounding for a given concept and its evidences # and surface that for idx, concept in enumerate(stmt.agent_list()): if concept is None: continue aggregate_groundings = {} for ev in stmt.evidence: if 'agents' in ev.annotations: groundings = ev.annotations['agents']['raw_grounding'][idx] for ns, value in groundings.items(): if ns not in aggregate_groundings: aggregate_groundings[ns] = [] if isinstance(value, list): aggregate_groundings[ns] += value else: aggregate_groundings[ns].append(value) best_groundings = get_best_groundings(aggregate_groundings) concept.db_refs = best_groundings def get_best_groundings(aggregate_groundings): best_groundings = {} for ns, values in aggregate_groundings.items(): # There are 3 possibilities here # 1. All the entries in the list are scored in which case we # get unique entries and sort them by score if all([isinstance(v, (tuple, list)) for v in values]): best_groundings[ns] = [] for unique_value in {v[0] for v in values}: scores = [v[1] for v in values if v[0] == unique_value] best_groundings[ns].append((unique_value, max(scores))) best_groundings[ns] = \ sorted(best_groundings[ns], key=lambda x: x[1], reverse=True) # 2. All the entries in the list are unscored in which case we # get the highest frequency entry elif all([not isinstance(v, (tuple, list)) for v in values]): best_groundings[ns] = max(set(values), key=values.count) # 3. There is a mixture, which can happen when some entries were # mapped with scores and others had no scores to begin with. # In this case, we again pick the highest frequency non-scored # entry assuming that the unmapped version is more reliable. else: unscored_vals = [v for v in values if not isinstance(v, (tuple, list))] best_groundings[ns] = max(set(unscored_vals), key=unscored_vals.count) return best_groundings stmts_out = [] for stmt in stmts_in: if not isinstance(stmt, (Complex, Conversion)): surface_grounding(stmt) stmts_out.append(stmt) return stmts_out
[ "def", "merge_groundings", "(", "stmts_in", ")", ":", "def", "surface_grounding", "(", "stmt", ")", ":", "# Find the \"best\" grounding for a given concept and its evidences", "# and surface that", "for", "idx", ",", "concept", "in", "enumerate", "(", "stmt", ".", "agent_list", "(", ")", ")", ":", "if", "concept", "is", "None", ":", "continue", "aggregate_groundings", "=", "{", "}", "for", "ev", "in", "stmt", ".", "evidence", ":", "if", "'agents'", "in", "ev", ".", "annotations", ":", "groundings", "=", "ev", ".", "annotations", "[", "'agents'", "]", "[", "'raw_grounding'", "]", "[", "idx", "]", "for", "ns", ",", "value", "in", "groundings", ".", "items", "(", ")", ":", "if", "ns", "not", "in", "aggregate_groundings", ":", "aggregate_groundings", "[", "ns", "]", "=", "[", "]", "if", "isinstance", "(", "value", ",", "list", ")", ":", "aggregate_groundings", "[", "ns", "]", "+=", "value", "else", ":", "aggregate_groundings", "[", "ns", "]", ".", "append", "(", "value", ")", "best_groundings", "=", "get_best_groundings", "(", "aggregate_groundings", ")", "concept", ".", "db_refs", "=", "best_groundings", "def", "get_best_groundings", "(", "aggregate_groundings", ")", ":", "best_groundings", "=", "{", "}", "for", "ns", ",", "values", "in", "aggregate_groundings", ".", "items", "(", ")", ":", "# There are 3 possibilities here", "# 1. All the entries in the list are scored in which case we", "# get unique entries and sort them by score", "if", "all", "(", "[", "isinstance", "(", "v", ",", "(", "tuple", ",", "list", ")", ")", "for", "v", "in", "values", "]", ")", ":", "best_groundings", "[", "ns", "]", "=", "[", "]", "for", "unique_value", "in", "{", "v", "[", "0", "]", "for", "v", "in", "values", "}", ":", "scores", "=", "[", "v", "[", "1", "]", "for", "v", "in", "values", "if", "v", "[", "0", "]", "==", "unique_value", "]", "best_groundings", "[", "ns", "]", ".", "append", "(", "(", "unique_value", ",", "max", "(", "scores", ")", ")", ")", "best_groundings", "[", "ns", "]", "=", "sorted", "(", "best_groundings", "[", "ns", "]", ",", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ",", "reverse", "=", "True", ")", "# 2. All the entries in the list are unscored in which case we", "# get the highest frequency entry", "elif", "all", "(", "[", "not", "isinstance", "(", "v", ",", "(", "tuple", ",", "list", ")", ")", "for", "v", "in", "values", "]", ")", ":", "best_groundings", "[", "ns", "]", "=", "max", "(", "set", "(", "values", ")", ",", "key", "=", "values", ".", "count", ")", "# 3. There is a mixture, which can happen when some entries were", "# mapped with scores and others had no scores to begin with.", "# In this case, we again pick the highest frequency non-scored", "# entry assuming that the unmapped version is more reliable.", "else", ":", "unscored_vals", "=", "[", "v", "for", "v", "in", "values", "if", "not", "isinstance", "(", "v", ",", "(", "tuple", ",", "list", ")", ")", "]", "best_groundings", "[", "ns", "]", "=", "max", "(", "set", "(", "unscored_vals", ")", ",", "key", "=", "unscored_vals", ".", "count", ")", "return", "best_groundings", "stmts_out", "=", "[", "]", "for", "stmt", "in", "stmts_in", ":", "if", "not", "isinstance", "(", "stmt", ",", "(", "Complex", ",", "Conversion", ")", ")", ":", "surface_grounding", "(", "stmt", ")", "stmts_out", ".", "append", "(", "stmt", ")", "return", "stmts_out" ]
Gather and merge original grounding information from evidences. Each Statement's evidences are traversed to find original grounding information. These groundings are then merged into an overall consensus grounding dict with as much detail as possible. The current implementation is only applicable to Statements whose concept/agent roles are fixed. Complexes, Associations and Conversions cannot be handled correctly. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of INDRA Statements whose groundings should be merged. These Statements are meant to have been preassembled and potentially have multiple pieces of evidence. Returns ------- stmts_out : list[indra.statements.Statement] The list of Statements now with groundings merged at the Statement level.
[ "Gather", "and", "merge", "original", "grounding", "information", "from", "evidences", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L122-L201
train
sorgerlab/indra
indra/tools/assemble_corpus.py
merge_deltas
def merge_deltas(stmts_in): """Gather and merge original Influence delta information from evidence. This function is only applicable to Influence Statements that have subj and obj deltas. All other statement types are passed through unchanged. Polarities and adjectives for subjects and objects respectivey are collected and merged by travesrsing all evidences of a Statement. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of INDRA Statements whose influence deltas should be merged. These Statements are meant to have been preassembled and potentially have multiple pieces of evidence. Returns ------- stmts_out : list[indra.statements.Statement] The list of Statements now with deltas merged at the Statement level. """ stmts_out = [] for stmt in stmts_in: # This operation is only applicable to Influences if not isinstance(stmt, Influence): stmts_out.append(stmt) continue # At this point this is guaranteed to be an Influence deltas = {} for role in ('subj', 'obj'): for info in ('polarity', 'adjectives'): key = (role, info) deltas[key] = [] for ev in stmt.evidence: entry = ev.annotations.get('%s_%s' % key) deltas[key].append(entry if entry else None) # POLARITY # For polarity we need to work in pairs polarity_pairs = list(zip(deltas[('subj', 'polarity')], deltas[('obj', 'polarity')])) # If we have some fully defined pairs, we take the most common one both_pols = [pair for pair in polarity_pairs if pair[0] is not None and pair[1] is not None] if both_pols: subj_pol, obj_pol = max(set(both_pols), key=both_pols.count) stmt.subj.delta['polarity'] = subj_pol stmt.obj.delta['polarity'] = obj_pol # Otherwise we prefer the case when at least one entry of the # pair is given else: one_pol = [pair for pair in polarity_pairs if pair[0] is not None or pair[1] is not None] if one_pol: subj_pol, obj_pol = max(set(one_pol), key=one_pol.count) stmt.subj.delta['polarity'] = subj_pol stmt.obj.delta['polarity'] = obj_pol # ADJECTIVES for attr, role in ((stmt.subj.delta, 'subj'), (stmt.obj.delta, 'obj')): all_adjectives = [] for adj in deltas[(role, 'adjectives')]: if isinstance(adj, list): all_adjectives += adj elif adj is not None: all_adjectives.append(adj) attr['adjectives'] = all_adjectives stmts_out.append(stmt) return stmts_out
python
def merge_deltas(stmts_in): """Gather and merge original Influence delta information from evidence. This function is only applicable to Influence Statements that have subj and obj deltas. All other statement types are passed through unchanged. Polarities and adjectives for subjects and objects respectivey are collected and merged by travesrsing all evidences of a Statement. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of INDRA Statements whose influence deltas should be merged. These Statements are meant to have been preassembled and potentially have multiple pieces of evidence. Returns ------- stmts_out : list[indra.statements.Statement] The list of Statements now with deltas merged at the Statement level. """ stmts_out = [] for stmt in stmts_in: # This operation is only applicable to Influences if not isinstance(stmt, Influence): stmts_out.append(stmt) continue # At this point this is guaranteed to be an Influence deltas = {} for role in ('subj', 'obj'): for info in ('polarity', 'adjectives'): key = (role, info) deltas[key] = [] for ev in stmt.evidence: entry = ev.annotations.get('%s_%s' % key) deltas[key].append(entry if entry else None) # POLARITY # For polarity we need to work in pairs polarity_pairs = list(zip(deltas[('subj', 'polarity')], deltas[('obj', 'polarity')])) # If we have some fully defined pairs, we take the most common one both_pols = [pair for pair in polarity_pairs if pair[0] is not None and pair[1] is not None] if both_pols: subj_pol, obj_pol = max(set(both_pols), key=both_pols.count) stmt.subj.delta['polarity'] = subj_pol stmt.obj.delta['polarity'] = obj_pol # Otherwise we prefer the case when at least one entry of the # pair is given else: one_pol = [pair for pair in polarity_pairs if pair[0] is not None or pair[1] is not None] if one_pol: subj_pol, obj_pol = max(set(one_pol), key=one_pol.count) stmt.subj.delta['polarity'] = subj_pol stmt.obj.delta['polarity'] = obj_pol # ADJECTIVES for attr, role in ((stmt.subj.delta, 'subj'), (stmt.obj.delta, 'obj')): all_adjectives = [] for adj in deltas[(role, 'adjectives')]: if isinstance(adj, list): all_adjectives += adj elif adj is not None: all_adjectives.append(adj) attr['adjectives'] = all_adjectives stmts_out.append(stmt) return stmts_out
[ "def", "merge_deltas", "(", "stmts_in", ")", ":", "stmts_out", "=", "[", "]", "for", "stmt", "in", "stmts_in", ":", "# This operation is only applicable to Influences", "if", "not", "isinstance", "(", "stmt", ",", "Influence", ")", ":", "stmts_out", ".", "append", "(", "stmt", ")", "continue", "# At this point this is guaranteed to be an Influence", "deltas", "=", "{", "}", "for", "role", "in", "(", "'subj'", ",", "'obj'", ")", ":", "for", "info", "in", "(", "'polarity'", ",", "'adjectives'", ")", ":", "key", "=", "(", "role", ",", "info", ")", "deltas", "[", "key", "]", "=", "[", "]", "for", "ev", "in", "stmt", ".", "evidence", ":", "entry", "=", "ev", ".", "annotations", ".", "get", "(", "'%s_%s'", "%", "key", ")", "deltas", "[", "key", "]", ".", "append", "(", "entry", "if", "entry", "else", "None", ")", "# POLARITY", "# For polarity we need to work in pairs", "polarity_pairs", "=", "list", "(", "zip", "(", "deltas", "[", "(", "'subj'", ",", "'polarity'", ")", "]", ",", "deltas", "[", "(", "'obj'", ",", "'polarity'", ")", "]", ")", ")", "# If we have some fully defined pairs, we take the most common one", "both_pols", "=", "[", "pair", "for", "pair", "in", "polarity_pairs", "if", "pair", "[", "0", "]", "is", "not", "None", "and", "pair", "[", "1", "]", "is", "not", "None", "]", "if", "both_pols", ":", "subj_pol", ",", "obj_pol", "=", "max", "(", "set", "(", "both_pols", ")", ",", "key", "=", "both_pols", ".", "count", ")", "stmt", ".", "subj", ".", "delta", "[", "'polarity'", "]", "=", "subj_pol", "stmt", ".", "obj", ".", "delta", "[", "'polarity'", "]", "=", "obj_pol", "# Otherwise we prefer the case when at least one entry of the", "# pair is given", "else", ":", "one_pol", "=", "[", "pair", "for", "pair", "in", "polarity_pairs", "if", "pair", "[", "0", "]", "is", "not", "None", "or", "pair", "[", "1", "]", "is", "not", "None", "]", "if", "one_pol", ":", "subj_pol", ",", "obj_pol", "=", "max", "(", "set", "(", "one_pol", ")", ",", "key", "=", "one_pol", ".", "count", ")", "stmt", ".", "subj", ".", "delta", "[", "'polarity'", "]", "=", "subj_pol", "stmt", ".", "obj", ".", "delta", "[", "'polarity'", "]", "=", "obj_pol", "# ADJECTIVES", "for", "attr", ",", "role", "in", "(", "(", "stmt", ".", "subj", ".", "delta", ",", "'subj'", ")", ",", "(", "stmt", ".", "obj", ".", "delta", ",", "'obj'", ")", ")", ":", "all_adjectives", "=", "[", "]", "for", "adj", "in", "deltas", "[", "(", "role", ",", "'adjectives'", ")", "]", ":", "if", "isinstance", "(", "adj", ",", "list", ")", ":", "all_adjectives", "+=", "adj", "elif", "adj", "is", "not", "None", ":", "all_adjectives", ".", "append", "(", "adj", ")", "attr", "[", "'adjectives'", "]", "=", "all_adjectives", "stmts_out", ".", "append", "(", "stmt", ")", "return", "stmts_out" ]
Gather and merge original Influence delta information from evidence. This function is only applicable to Influence Statements that have subj and obj deltas. All other statement types are passed through unchanged. Polarities and adjectives for subjects and objects respectivey are collected and merged by travesrsing all evidences of a Statement. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of INDRA Statements whose influence deltas should be merged. These Statements are meant to have been preassembled and potentially have multiple pieces of evidence. Returns ------- stmts_out : list[indra.statements.Statement] The list of Statements now with deltas merged at the Statement level.
[ "Gather", "and", "merge", "original", "Influence", "delta", "information", "from", "evidence", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L204-L272
train
sorgerlab/indra
indra/tools/assemble_corpus.py
map_sequence
def map_sequence(stmts_in, **kwargs): """Map sequences using the SiteMapper. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to map. do_methionine_offset : boolean Whether to check for off-by-one errors in site position (possibly) attributable to site numbering from mature proteins after cleavage of the initial methionine. If True, checks the reference sequence for a known modification at 1 site position greater than the given one; if there exists such a site, creates the mapping. Default is True. do_orthology_mapping : boolean Whether to check sequence positions for known modification sites in mouse or rat sequences (based on PhosphoSitePlus data). If a mouse/rat site is found that is linked to a site in the human reference sequence, a mapping is created. Default is True. do_isoform_mapping : boolean Whether to check sequence positions for known modifications in other human isoforms of the protein (based on PhosphoSitePlus data). If a site is found that is linked to a site in the human reference sequence, a mapping is created. Default is True. use_cache : boolean If True, a cache will be created/used from the laction specified by SITEMAPPER_CACHE_PATH, defined in your INDRA config or the environment. If False, no cache is used. For more details on the cache, see the SiteMapper class definition. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of mapped statements. """ from indra.preassembler.sitemapper import SiteMapper, default_site_map logger.info('Mapping sites on %d statements...' % len(stmts_in)) kwarg_list = ['do_methionine_offset', 'do_orthology_mapping', 'do_isoform_mapping'] sm = SiteMapper(default_site_map, use_cache=kwargs.pop('use_cache', False), **_filter(kwargs, kwarg_list)) valid, mapped = sm.map_sites(stmts_in) correctly_mapped_stmts = [] for ms in mapped: correctly_mapped = all([mm.has_mapping() for mm in ms.mapped_mods]) if correctly_mapped: correctly_mapped_stmts.append(ms.mapped_stmt) stmts_out = valid + correctly_mapped_stmts logger.info('%d statements with valid sites' % len(stmts_out)) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) del sm return stmts_out
python
def map_sequence(stmts_in, **kwargs): """Map sequences using the SiteMapper. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to map. do_methionine_offset : boolean Whether to check for off-by-one errors in site position (possibly) attributable to site numbering from mature proteins after cleavage of the initial methionine. If True, checks the reference sequence for a known modification at 1 site position greater than the given one; if there exists such a site, creates the mapping. Default is True. do_orthology_mapping : boolean Whether to check sequence positions for known modification sites in mouse or rat sequences (based on PhosphoSitePlus data). If a mouse/rat site is found that is linked to a site in the human reference sequence, a mapping is created. Default is True. do_isoform_mapping : boolean Whether to check sequence positions for known modifications in other human isoforms of the protein (based on PhosphoSitePlus data). If a site is found that is linked to a site in the human reference sequence, a mapping is created. Default is True. use_cache : boolean If True, a cache will be created/used from the laction specified by SITEMAPPER_CACHE_PATH, defined in your INDRA config or the environment. If False, no cache is used. For more details on the cache, see the SiteMapper class definition. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of mapped statements. """ from indra.preassembler.sitemapper import SiteMapper, default_site_map logger.info('Mapping sites on %d statements...' % len(stmts_in)) kwarg_list = ['do_methionine_offset', 'do_orthology_mapping', 'do_isoform_mapping'] sm = SiteMapper(default_site_map, use_cache=kwargs.pop('use_cache', False), **_filter(kwargs, kwarg_list)) valid, mapped = sm.map_sites(stmts_in) correctly_mapped_stmts = [] for ms in mapped: correctly_mapped = all([mm.has_mapping() for mm in ms.mapped_mods]) if correctly_mapped: correctly_mapped_stmts.append(ms.mapped_stmt) stmts_out = valid + correctly_mapped_stmts logger.info('%d statements with valid sites' % len(stmts_out)) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) del sm return stmts_out
[ "def", "map_sequence", "(", "stmts_in", ",", "*", "*", "kwargs", ")", ":", "from", "indra", ".", "preassembler", ".", "sitemapper", "import", "SiteMapper", ",", "default_site_map", "logger", ".", "info", "(", "'Mapping sites on %d statements...'", "%", "len", "(", "stmts_in", ")", ")", "kwarg_list", "=", "[", "'do_methionine_offset'", ",", "'do_orthology_mapping'", ",", "'do_isoform_mapping'", "]", "sm", "=", "SiteMapper", "(", "default_site_map", ",", "use_cache", "=", "kwargs", ".", "pop", "(", "'use_cache'", ",", "False", ")", ",", "*", "*", "_filter", "(", "kwargs", ",", "kwarg_list", ")", ")", "valid", ",", "mapped", "=", "sm", ".", "map_sites", "(", "stmts_in", ")", "correctly_mapped_stmts", "=", "[", "]", "for", "ms", "in", "mapped", ":", "correctly_mapped", "=", "all", "(", "[", "mm", ".", "has_mapping", "(", ")", "for", "mm", "in", "ms", ".", "mapped_mods", "]", ")", "if", "correctly_mapped", ":", "correctly_mapped_stmts", ".", "append", "(", "ms", ".", "mapped_stmt", ")", "stmts_out", "=", "valid", "+", "correctly_mapped_stmts", "logger", ".", "info", "(", "'%d statements with valid sites'", "%", "len", "(", "stmts_out", ")", ")", "dump_pkl", "=", "kwargs", ".", "get", "(", "'save'", ")", "if", "dump_pkl", ":", "dump_statements", "(", "stmts_out", ",", "dump_pkl", ")", "del", "sm", "return", "stmts_out" ]
Map sequences using the SiteMapper. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to map. do_methionine_offset : boolean Whether to check for off-by-one errors in site position (possibly) attributable to site numbering from mature proteins after cleavage of the initial methionine. If True, checks the reference sequence for a known modification at 1 site position greater than the given one; if there exists such a site, creates the mapping. Default is True. do_orthology_mapping : boolean Whether to check sequence positions for known modification sites in mouse or rat sequences (based on PhosphoSitePlus data). If a mouse/rat site is found that is linked to a site in the human reference sequence, a mapping is created. Default is True. do_isoform_mapping : boolean Whether to check sequence positions for known modifications in other human isoforms of the protein (based on PhosphoSitePlus data). If a site is found that is linked to a site in the human reference sequence, a mapping is created. Default is True. use_cache : boolean If True, a cache will be created/used from the laction specified by SITEMAPPER_CACHE_PATH, defined in your INDRA config or the environment. If False, no cache is used. For more details on the cache, see the SiteMapper class definition. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of mapped statements.
[ "Map", "sequences", "using", "the", "SiteMapper", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L275-L331
train
sorgerlab/indra
indra/tools/assemble_corpus.py
run_preassembly
def run_preassembly(stmts_in, **kwargs): """Run preassembly on a list of statements. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to preassemble. return_toplevel : Optional[bool] If True, only the top-level statements are returned. If False, all statements are returned irrespective of level of specificity. Default: True poolsize : Optional[int] The number of worker processes to use to parallelize the comparisons performed by the function. If None (default), no parallelization is performed. NOTE: Parallelization is only available on Python 3.4 and above. size_cutoff : Optional[int] Groups with size_cutoff or more statements are sent to worker processes, while smaller groups are compared in the parent process. Default value is 100. Not relevant when parallelization is not used. belief_scorer : Optional[indra.belief.BeliefScorer] Instance of BeliefScorer class to use in calculating Statement probabilities. If None is provided (default), then the default scorer is used. hierarchies : Optional[dict] Dict of hierarchy managers to use for preassembly flatten_evidence : Optional[bool] If True, evidences are collected and flattened via supports/supported_by links. Default: False flatten_evidence_collect_from : Optional[str] String indicating whether to collect and flatten evidence from the `supports` attribute of each statement or the `supported_by` attribute. If not set, defaults to 'supported_by'. Only relevant when flatten_evidence is True. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. save_unique : Optional[str] The name of a pickle file to save the unique statements into. Returns ------- stmts_out : list[indra.statements.Statement] A list of preassembled top-level statements. """ dump_pkl_unique = kwargs.get('save_unique') belief_scorer = kwargs.get('belief_scorer') use_hierarchies = kwargs['hierarchies'] if 'hierarchies' in kwargs else \ hierarchies be = BeliefEngine(scorer=belief_scorer) pa = Preassembler(hierarchies, stmts_in) run_preassembly_duplicate(pa, be, save=dump_pkl_unique) dump_pkl = kwargs.get('save') return_toplevel = kwargs.get('return_toplevel', True) poolsize = kwargs.get('poolsize', None) size_cutoff = kwargs.get('size_cutoff', 100) options = {'save': dump_pkl, 'return_toplevel': return_toplevel, 'poolsize': poolsize, 'size_cutoff': size_cutoff, 'flatten_evidence': kwargs.get('flatten_evidence', False), 'flatten_evidence_collect_from': kwargs.get('flatten_evidence_collect_from', 'supported_by') } stmts_out = run_preassembly_related(pa, be, **options) return stmts_out
python
def run_preassembly(stmts_in, **kwargs): """Run preassembly on a list of statements. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to preassemble. return_toplevel : Optional[bool] If True, only the top-level statements are returned. If False, all statements are returned irrespective of level of specificity. Default: True poolsize : Optional[int] The number of worker processes to use to parallelize the comparisons performed by the function. If None (default), no parallelization is performed. NOTE: Parallelization is only available on Python 3.4 and above. size_cutoff : Optional[int] Groups with size_cutoff or more statements are sent to worker processes, while smaller groups are compared in the parent process. Default value is 100. Not relevant when parallelization is not used. belief_scorer : Optional[indra.belief.BeliefScorer] Instance of BeliefScorer class to use in calculating Statement probabilities. If None is provided (default), then the default scorer is used. hierarchies : Optional[dict] Dict of hierarchy managers to use for preassembly flatten_evidence : Optional[bool] If True, evidences are collected and flattened via supports/supported_by links. Default: False flatten_evidence_collect_from : Optional[str] String indicating whether to collect and flatten evidence from the `supports` attribute of each statement or the `supported_by` attribute. If not set, defaults to 'supported_by'. Only relevant when flatten_evidence is True. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. save_unique : Optional[str] The name of a pickle file to save the unique statements into. Returns ------- stmts_out : list[indra.statements.Statement] A list of preassembled top-level statements. """ dump_pkl_unique = kwargs.get('save_unique') belief_scorer = kwargs.get('belief_scorer') use_hierarchies = kwargs['hierarchies'] if 'hierarchies' in kwargs else \ hierarchies be = BeliefEngine(scorer=belief_scorer) pa = Preassembler(hierarchies, stmts_in) run_preassembly_duplicate(pa, be, save=dump_pkl_unique) dump_pkl = kwargs.get('save') return_toplevel = kwargs.get('return_toplevel', True) poolsize = kwargs.get('poolsize', None) size_cutoff = kwargs.get('size_cutoff', 100) options = {'save': dump_pkl, 'return_toplevel': return_toplevel, 'poolsize': poolsize, 'size_cutoff': size_cutoff, 'flatten_evidence': kwargs.get('flatten_evidence', False), 'flatten_evidence_collect_from': kwargs.get('flatten_evidence_collect_from', 'supported_by') } stmts_out = run_preassembly_related(pa, be, **options) return stmts_out
[ "def", "run_preassembly", "(", "stmts_in", ",", "*", "*", "kwargs", ")", ":", "dump_pkl_unique", "=", "kwargs", ".", "get", "(", "'save_unique'", ")", "belief_scorer", "=", "kwargs", ".", "get", "(", "'belief_scorer'", ")", "use_hierarchies", "=", "kwargs", "[", "'hierarchies'", "]", "if", "'hierarchies'", "in", "kwargs", "else", "hierarchies", "be", "=", "BeliefEngine", "(", "scorer", "=", "belief_scorer", ")", "pa", "=", "Preassembler", "(", "hierarchies", ",", "stmts_in", ")", "run_preassembly_duplicate", "(", "pa", ",", "be", ",", "save", "=", "dump_pkl_unique", ")", "dump_pkl", "=", "kwargs", ".", "get", "(", "'save'", ")", "return_toplevel", "=", "kwargs", ".", "get", "(", "'return_toplevel'", ",", "True", ")", "poolsize", "=", "kwargs", ".", "get", "(", "'poolsize'", ",", "None", ")", "size_cutoff", "=", "kwargs", ".", "get", "(", "'size_cutoff'", ",", "100", ")", "options", "=", "{", "'save'", ":", "dump_pkl", ",", "'return_toplevel'", ":", "return_toplevel", ",", "'poolsize'", ":", "poolsize", ",", "'size_cutoff'", ":", "size_cutoff", ",", "'flatten_evidence'", ":", "kwargs", ".", "get", "(", "'flatten_evidence'", ",", "False", ")", ",", "'flatten_evidence_collect_from'", ":", "kwargs", ".", "get", "(", "'flatten_evidence_collect_from'", ",", "'supported_by'", ")", "}", "stmts_out", "=", "run_preassembly_related", "(", "pa", ",", "be", ",", "*", "*", "options", ")", "return", "stmts_out" ]
Run preassembly on a list of statements. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to preassemble. return_toplevel : Optional[bool] If True, only the top-level statements are returned. If False, all statements are returned irrespective of level of specificity. Default: True poolsize : Optional[int] The number of worker processes to use to parallelize the comparisons performed by the function. If None (default), no parallelization is performed. NOTE: Parallelization is only available on Python 3.4 and above. size_cutoff : Optional[int] Groups with size_cutoff or more statements are sent to worker processes, while smaller groups are compared in the parent process. Default value is 100. Not relevant when parallelization is not used. belief_scorer : Optional[indra.belief.BeliefScorer] Instance of BeliefScorer class to use in calculating Statement probabilities. If None is provided (default), then the default scorer is used. hierarchies : Optional[dict] Dict of hierarchy managers to use for preassembly flatten_evidence : Optional[bool] If True, evidences are collected and flattened via supports/supported_by links. Default: False flatten_evidence_collect_from : Optional[str] String indicating whether to collect and flatten evidence from the `supports` attribute of each statement or the `supported_by` attribute. If not set, defaults to 'supported_by'. Only relevant when flatten_evidence is True. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. save_unique : Optional[str] The name of a pickle file to save the unique statements into. Returns ------- stmts_out : list[indra.statements.Statement] A list of preassembled top-level statements.
[ "Run", "preassembly", "on", "a", "list", "of", "statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L334-L398
train
sorgerlab/indra
indra/tools/assemble_corpus.py
run_preassembly_duplicate
def run_preassembly_duplicate(preassembler, beliefengine, **kwargs): """Run deduplication stage of preassembly on a list of statements. Parameters ---------- preassembler : indra.preassembler.Preassembler A Preassembler instance beliefengine : indra.belief.BeliefEngine A BeliefEngine instance. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of unique statements. """ logger.info('Combining duplicates on %d statements...' % len(preassembler.stmts)) dump_pkl = kwargs.get('save') stmts_out = preassembler.combine_duplicates() beliefengine.set_prior_probs(stmts_out) logger.info('%d unique statements' % len(stmts_out)) if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
python
def run_preassembly_duplicate(preassembler, beliefengine, **kwargs): """Run deduplication stage of preassembly on a list of statements. Parameters ---------- preassembler : indra.preassembler.Preassembler A Preassembler instance beliefengine : indra.belief.BeliefEngine A BeliefEngine instance. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of unique statements. """ logger.info('Combining duplicates on %d statements...' % len(preassembler.stmts)) dump_pkl = kwargs.get('save') stmts_out = preassembler.combine_duplicates() beliefengine.set_prior_probs(stmts_out) logger.info('%d unique statements' % len(stmts_out)) if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
[ "def", "run_preassembly_duplicate", "(", "preassembler", ",", "beliefengine", ",", "*", "*", "kwargs", ")", ":", "logger", ".", "info", "(", "'Combining duplicates on %d statements...'", "%", "len", "(", "preassembler", ".", "stmts", ")", ")", "dump_pkl", "=", "kwargs", ".", "get", "(", "'save'", ")", "stmts_out", "=", "preassembler", ".", "combine_duplicates", "(", ")", "beliefengine", ".", "set_prior_probs", "(", "stmts_out", ")", "logger", ".", "info", "(", "'%d unique statements'", "%", "len", "(", "stmts_out", ")", ")", "if", "dump_pkl", ":", "dump_statements", "(", "stmts_out", ",", "dump_pkl", ")", "return", "stmts_out" ]
Run deduplication stage of preassembly on a list of statements. Parameters ---------- preassembler : indra.preassembler.Preassembler A Preassembler instance beliefengine : indra.belief.BeliefEngine A BeliefEngine instance. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of unique statements.
[ "Run", "deduplication", "stage", "of", "preassembly", "on", "a", "list", "of", "statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L401-L426
train
sorgerlab/indra
indra/tools/assemble_corpus.py
run_preassembly_related
def run_preassembly_related(preassembler, beliefengine, **kwargs): """Run related stage of preassembly on a list of statements. Parameters ---------- preassembler : indra.preassembler.Preassembler A Preassembler instance which already has a set of unique statements internally. beliefengine : indra.belief.BeliefEngine A BeliefEngine instance. return_toplevel : Optional[bool] If True, only the top-level statements are returned. If False, all statements are returned irrespective of level of specificity. Default: True poolsize : Optional[int] The number of worker processes to use to parallelize the comparisons performed by the function. If None (default), no parallelization is performed. NOTE: Parallelization is only available on Python 3.4 and above. size_cutoff : Optional[int] Groups with size_cutoff or more statements are sent to worker processes, while smaller groups are compared in the parent process. Default value is 100. Not relevant when parallelization is not used. flatten_evidence : Optional[bool] If True, evidences are collected and flattened via supports/supported_by links. Default: False flatten_evidence_collect_from : Optional[str] String indicating whether to collect and flatten evidence from the `supports` attribute of each statement or the `supported_by` attribute. If not set, defaults to 'supported_by'. Only relevant when flatten_evidence is True. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of preassembled top-level statements. """ logger.info('Combining related on %d statements...' % len(preassembler.unique_stmts)) return_toplevel = kwargs.get('return_toplevel', True) poolsize = kwargs.get('poolsize', None) size_cutoff = kwargs.get('size_cutoff', 100) stmts_out = preassembler.combine_related(return_toplevel=False, poolsize=poolsize, size_cutoff=size_cutoff) # Calculate beliefs beliefengine.set_hierarchy_probs(stmts_out) # Flatten evidence if needed do_flatten_evidence = kwargs.get('flatten_evidence', False) if do_flatten_evidence: flatten_evidences_collect_from = \ kwargs.get('flatten_evidence_collect_from', 'supported_by') stmts_out = flatten_evidence(stmts_out, flatten_evidences_collect_from) # Filter to top if needed stmts_top = filter_top_level(stmts_out) if return_toplevel: stmts_out = stmts_top logger.info('%d top-level statements' % len(stmts_out)) else: logger.info('%d statements out of which %d are top-level' % (len(stmts_out), len(stmts_top))) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
python
def run_preassembly_related(preassembler, beliefengine, **kwargs): """Run related stage of preassembly on a list of statements. Parameters ---------- preassembler : indra.preassembler.Preassembler A Preassembler instance which already has a set of unique statements internally. beliefengine : indra.belief.BeliefEngine A BeliefEngine instance. return_toplevel : Optional[bool] If True, only the top-level statements are returned. If False, all statements are returned irrespective of level of specificity. Default: True poolsize : Optional[int] The number of worker processes to use to parallelize the comparisons performed by the function. If None (default), no parallelization is performed. NOTE: Parallelization is only available on Python 3.4 and above. size_cutoff : Optional[int] Groups with size_cutoff or more statements are sent to worker processes, while smaller groups are compared in the parent process. Default value is 100. Not relevant when parallelization is not used. flatten_evidence : Optional[bool] If True, evidences are collected and flattened via supports/supported_by links. Default: False flatten_evidence_collect_from : Optional[str] String indicating whether to collect and flatten evidence from the `supports` attribute of each statement or the `supported_by` attribute. If not set, defaults to 'supported_by'. Only relevant when flatten_evidence is True. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of preassembled top-level statements. """ logger.info('Combining related on %d statements...' % len(preassembler.unique_stmts)) return_toplevel = kwargs.get('return_toplevel', True) poolsize = kwargs.get('poolsize', None) size_cutoff = kwargs.get('size_cutoff', 100) stmts_out = preassembler.combine_related(return_toplevel=False, poolsize=poolsize, size_cutoff=size_cutoff) # Calculate beliefs beliefengine.set_hierarchy_probs(stmts_out) # Flatten evidence if needed do_flatten_evidence = kwargs.get('flatten_evidence', False) if do_flatten_evidence: flatten_evidences_collect_from = \ kwargs.get('flatten_evidence_collect_from', 'supported_by') stmts_out = flatten_evidence(stmts_out, flatten_evidences_collect_from) # Filter to top if needed stmts_top = filter_top_level(stmts_out) if return_toplevel: stmts_out = stmts_top logger.info('%d top-level statements' % len(stmts_out)) else: logger.info('%d statements out of which %d are top-level' % (len(stmts_out), len(stmts_top))) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
[ "def", "run_preassembly_related", "(", "preassembler", ",", "beliefengine", ",", "*", "*", "kwargs", ")", ":", "logger", ".", "info", "(", "'Combining related on %d statements...'", "%", "len", "(", "preassembler", ".", "unique_stmts", ")", ")", "return_toplevel", "=", "kwargs", ".", "get", "(", "'return_toplevel'", ",", "True", ")", "poolsize", "=", "kwargs", ".", "get", "(", "'poolsize'", ",", "None", ")", "size_cutoff", "=", "kwargs", ".", "get", "(", "'size_cutoff'", ",", "100", ")", "stmts_out", "=", "preassembler", ".", "combine_related", "(", "return_toplevel", "=", "False", ",", "poolsize", "=", "poolsize", ",", "size_cutoff", "=", "size_cutoff", ")", "# Calculate beliefs", "beliefengine", ".", "set_hierarchy_probs", "(", "stmts_out", ")", "# Flatten evidence if needed", "do_flatten_evidence", "=", "kwargs", ".", "get", "(", "'flatten_evidence'", ",", "False", ")", "if", "do_flatten_evidence", ":", "flatten_evidences_collect_from", "=", "kwargs", ".", "get", "(", "'flatten_evidence_collect_from'", ",", "'supported_by'", ")", "stmts_out", "=", "flatten_evidence", "(", "stmts_out", ",", "flatten_evidences_collect_from", ")", "# Filter to top if needed", "stmts_top", "=", "filter_top_level", "(", "stmts_out", ")", "if", "return_toplevel", ":", "stmts_out", "=", "stmts_top", "logger", ".", "info", "(", "'%d top-level statements'", "%", "len", "(", "stmts_out", ")", ")", "else", ":", "logger", ".", "info", "(", "'%d statements out of which %d are top-level'", "%", "(", "len", "(", "stmts_out", ")", ",", "len", "(", "stmts_top", ")", ")", ")", "dump_pkl", "=", "kwargs", ".", "get", "(", "'save'", ")", "if", "dump_pkl", ":", "dump_statements", "(", "stmts_out", ",", "dump_pkl", ")", "return", "stmts_out" ]
Run related stage of preassembly on a list of statements. Parameters ---------- preassembler : indra.preassembler.Preassembler A Preassembler instance which already has a set of unique statements internally. beliefengine : indra.belief.BeliefEngine A BeliefEngine instance. return_toplevel : Optional[bool] If True, only the top-level statements are returned. If False, all statements are returned irrespective of level of specificity. Default: True poolsize : Optional[int] The number of worker processes to use to parallelize the comparisons performed by the function. If None (default), no parallelization is performed. NOTE: Parallelization is only available on Python 3.4 and above. size_cutoff : Optional[int] Groups with size_cutoff or more statements are sent to worker processes, while smaller groups are compared in the parent process. Default value is 100. Not relevant when parallelization is not used. flatten_evidence : Optional[bool] If True, evidences are collected and flattened via supports/supported_by links. Default: False flatten_evidence_collect_from : Optional[str] String indicating whether to collect and flatten evidence from the `supports` attribute of each statement or the `supported_by` attribute. If not set, defaults to 'supported_by'. Only relevant when flatten_evidence is True. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of preassembled top-level statements.
[ "Run", "related", "stage", "of", "preassembly", "on", "a", "list", "of", "statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L429-L499
train
sorgerlab/indra
indra/tools/assemble_corpus.py
filter_by_type
def filter_by_type(stmts_in, stmt_type, **kwargs): """Filter to a given statement type. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. stmt_type : indra.statements.Statement The class of the statement type to filter for. Example: indra.statements.Modification invert : Optional[bool] If True, the statements that are not of the given type are returned. Default: False save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements. """ invert = kwargs.get('invert', False) logger.info('Filtering %d statements for type %s%s...' % (len(stmts_in), 'not ' if invert else '', stmt_type.__name__)) if not invert: stmts_out = [st for st in stmts_in if isinstance(st, stmt_type)] else: stmts_out = [st for st in stmts_in if not isinstance(st, stmt_type)] logger.info('%d statements after filter...' % len(stmts_out)) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
python
def filter_by_type(stmts_in, stmt_type, **kwargs): """Filter to a given statement type. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. stmt_type : indra.statements.Statement The class of the statement type to filter for. Example: indra.statements.Modification invert : Optional[bool] If True, the statements that are not of the given type are returned. Default: False save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements. """ invert = kwargs.get('invert', False) logger.info('Filtering %d statements for type %s%s...' % (len(stmts_in), 'not ' if invert else '', stmt_type.__name__)) if not invert: stmts_out = [st for st in stmts_in if isinstance(st, stmt_type)] else: stmts_out = [st for st in stmts_in if not isinstance(st, stmt_type)] logger.info('%d statements after filter...' % len(stmts_out)) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
[ "def", "filter_by_type", "(", "stmts_in", ",", "stmt_type", ",", "*", "*", "kwargs", ")", ":", "invert", "=", "kwargs", ".", "get", "(", "'invert'", ",", "False", ")", "logger", ".", "info", "(", "'Filtering %d statements for type %s%s...'", "%", "(", "len", "(", "stmts_in", ")", ",", "'not '", "if", "invert", "else", "''", ",", "stmt_type", ".", "__name__", ")", ")", "if", "not", "invert", ":", "stmts_out", "=", "[", "st", "for", "st", "in", "stmts_in", "if", "isinstance", "(", "st", ",", "stmt_type", ")", "]", "else", ":", "stmts_out", "=", "[", "st", "for", "st", "in", "stmts_in", "if", "not", "isinstance", "(", "st", ",", "stmt_type", ")", "]", "logger", ".", "info", "(", "'%d statements after filter...'", "%", "len", "(", "stmts_out", ")", ")", "dump_pkl", "=", "kwargs", ".", "get", "(", "'save'", ")", "if", "dump_pkl", ":", "dump_statements", "(", "stmts_out", ",", "dump_pkl", ")", "return", "stmts_out" ]
Filter to a given statement type. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. stmt_type : indra.statements.Statement The class of the statement type to filter for. Example: indra.statements.Modification invert : Optional[bool] If True, the statements that are not of the given type are returned. Default: False save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements.
[ "Filter", "to", "a", "given", "statement", "type", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L502-L536
train
sorgerlab/indra
indra/tools/assemble_corpus.py
_remove_bound_conditions
def _remove_bound_conditions(agent, keep_criterion): """Removes bound conditions of agent such that keep_criterion is False. Parameters ---------- agent: Agent The agent whose bound conditions we evaluate keep_criterion: function Evaluates removal_criterion(a) for each agent a in a bound condition and if it evaluates to False, removes a from agent's bound_conditions """ new_bc = [] for ind in range(len(agent.bound_conditions)): if keep_criterion(agent.bound_conditions[ind].agent): new_bc.append(agent.bound_conditions[ind]) agent.bound_conditions = new_bc
python
def _remove_bound_conditions(agent, keep_criterion): """Removes bound conditions of agent such that keep_criterion is False. Parameters ---------- agent: Agent The agent whose bound conditions we evaluate keep_criterion: function Evaluates removal_criterion(a) for each agent a in a bound condition and if it evaluates to False, removes a from agent's bound_conditions """ new_bc = [] for ind in range(len(agent.bound_conditions)): if keep_criterion(agent.bound_conditions[ind].agent): new_bc.append(agent.bound_conditions[ind]) agent.bound_conditions = new_bc
[ "def", "_remove_bound_conditions", "(", "agent", ",", "keep_criterion", ")", ":", "new_bc", "=", "[", "]", "for", "ind", "in", "range", "(", "len", "(", "agent", ".", "bound_conditions", ")", ")", ":", "if", "keep_criterion", "(", "agent", ".", "bound_conditions", "[", "ind", "]", ".", "agent", ")", ":", "new_bc", ".", "append", "(", "agent", ".", "bound_conditions", "[", "ind", "]", ")", "agent", ".", "bound_conditions", "=", "new_bc" ]
Removes bound conditions of agent such that keep_criterion is False. Parameters ---------- agent: Agent The agent whose bound conditions we evaluate keep_criterion: function Evaluates removal_criterion(a) for each agent a in a bound condition and if it evaluates to False, removes a from agent's bound_conditions
[ "Removes", "bound", "conditions", "of", "agent", "such", "that", "keep_criterion", "is", "False", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L566-L581
train
sorgerlab/indra
indra/tools/assemble_corpus.py
_any_bound_condition_fails_criterion
def _any_bound_condition_fails_criterion(agent, criterion): """Returns True if any bound condition fails to meet the specified criterion. Parameters ---------- agent: Agent The agent whose bound conditions we evaluate criterion: function Evaluates criterion(a) for each a in a bound condition and returns True if any agents fail to meet the criterion. Returns ------- any_meets: bool True if and only if any of the agents in a bound condition fail to match the specified criteria """ bc_agents = [bc.agent for bc in agent.bound_conditions] for b in bc_agents: if not criterion(b): return True return False
python
def _any_bound_condition_fails_criterion(agent, criterion): """Returns True if any bound condition fails to meet the specified criterion. Parameters ---------- agent: Agent The agent whose bound conditions we evaluate criterion: function Evaluates criterion(a) for each a in a bound condition and returns True if any agents fail to meet the criterion. Returns ------- any_meets: bool True if and only if any of the agents in a bound condition fail to match the specified criteria """ bc_agents = [bc.agent for bc in agent.bound_conditions] for b in bc_agents: if not criterion(b): return True return False
[ "def", "_any_bound_condition_fails_criterion", "(", "agent", ",", "criterion", ")", ":", "bc_agents", "=", "[", "bc", ".", "agent", "for", "bc", "in", "agent", ".", "bound_conditions", "]", "for", "b", "in", "bc_agents", ":", "if", "not", "criterion", "(", "b", ")", ":", "return", "True", "return", "False" ]
Returns True if any bound condition fails to meet the specified criterion. Parameters ---------- agent: Agent The agent whose bound conditions we evaluate criterion: function Evaluates criterion(a) for each a in a bound condition and returns True if any agents fail to meet the criterion. Returns ------- any_meets: bool True if and only if any of the agents in a bound condition fail to match the specified criteria
[ "Returns", "True", "if", "any", "bound", "condition", "fails", "to", "meet", "the", "specified", "criterion", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L584-L606
train
sorgerlab/indra
indra/tools/assemble_corpus.py
filter_grounded_only
def filter_grounded_only(stmts_in, **kwargs): """Filter to statements that have grounded agents. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. score_threshold : Optional[float] If scored groundings are available in a list and the highest score if below this threshold, the Statement is filtered out. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. remove_bound: Optional[bool] If true, removes ungrounded bound conditions from a statement. If false (default), filters out statements with ungrounded bound conditions. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements. """ remove_bound = kwargs.get('remove_bound', False) logger.info('Filtering %d statements for grounded agents...' % len(stmts_in)) stmts_out = [] score_threshold = kwargs.get('score_threshold') for st in stmts_in: grounded = True for agent in st.agent_list(): if agent is not None: criterion = lambda x: _agent_is_grounded(x, score_threshold) if not criterion(agent): grounded = False break if not isinstance(agent, Agent): continue if remove_bound: _remove_bound_conditions(agent, criterion) elif _any_bound_condition_fails_criterion(agent, criterion): grounded = False break if grounded: stmts_out.append(st) logger.info('%d statements after filter...' % len(stmts_out)) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
python
def filter_grounded_only(stmts_in, **kwargs): """Filter to statements that have grounded agents. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. score_threshold : Optional[float] If scored groundings are available in a list and the highest score if below this threshold, the Statement is filtered out. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. remove_bound: Optional[bool] If true, removes ungrounded bound conditions from a statement. If false (default), filters out statements with ungrounded bound conditions. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements. """ remove_bound = kwargs.get('remove_bound', False) logger.info('Filtering %d statements for grounded agents...' % len(stmts_in)) stmts_out = [] score_threshold = kwargs.get('score_threshold') for st in stmts_in: grounded = True for agent in st.agent_list(): if agent is not None: criterion = lambda x: _agent_is_grounded(x, score_threshold) if not criterion(agent): grounded = False break if not isinstance(agent, Agent): continue if remove_bound: _remove_bound_conditions(agent, criterion) elif _any_bound_condition_fails_criterion(agent, criterion): grounded = False break if grounded: stmts_out.append(st) logger.info('%d statements after filter...' % len(stmts_out)) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
[ "def", "filter_grounded_only", "(", "stmts_in", ",", "*", "*", "kwargs", ")", ":", "remove_bound", "=", "kwargs", ".", "get", "(", "'remove_bound'", ",", "False", ")", "logger", ".", "info", "(", "'Filtering %d statements for grounded agents...'", "%", "len", "(", "stmts_in", ")", ")", "stmts_out", "=", "[", "]", "score_threshold", "=", "kwargs", ".", "get", "(", "'score_threshold'", ")", "for", "st", "in", "stmts_in", ":", "grounded", "=", "True", "for", "agent", "in", "st", ".", "agent_list", "(", ")", ":", "if", "agent", "is", "not", "None", ":", "criterion", "=", "lambda", "x", ":", "_agent_is_grounded", "(", "x", ",", "score_threshold", ")", "if", "not", "criterion", "(", "agent", ")", ":", "grounded", "=", "False", "break", "if", "not", "isinstance", "(", "agent", ",", "Agent", ")", ":", "continue", "if", "remove_bound", ":", "_remove_bound_conditions", "(", "agent", ",", "criterion", ")", "elif", "_any_bound_condition_fails_criterion", "(", "agent", ",", "criterion", ")", ":", "grounded", "=", "False", "break", "if", "grounded", ":", "stmts_out", ".", "append", "(", "st", ")", "logger", ".", "info", "(", "'%d statements after filter...'", "%", "len", "(", "stmts_out", ")", ")", "dump_pkl", "=", "kwargs", ".", "get", "(", "'save'", ")", "if", "dump_pkl", ":", "dump_statements", "(", "stmts_out", ",", "dump_pkl", ")", "return", "stmts_out" ]
Filter to statements that have grounded agents. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. score_threshold : Optional[float] If scored groundings are available in a list and the highest score if below this threshold, the Statement is filtered out. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. remove_bound: Optional[bool] If true, removes ungrounded bound conditions from a statement. If false (default), filters out statements with ungrounded bound conditions. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements.
[ "Filter", "to", "statements", "that", "have", "grounded", "agents", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L609-L658
train
sorgerlab/indra
indra/tools/assemble_corpus.py
_agent_is_gene
def _agent_is_gene(agent, specific_only): """Returns whether an agent is for a gene. Parameters ---------- agent: Agent The agent to evaluate specific_only : Optional[bool] If True, only elementary genes/proteins evaluate as genes and families will be filtered out. If False, families are also included. Returns ------- is_gene: bool Whether the agent is a gene """ if not specific_only: if not(agent.db_refs.get('HGNC') or \ agent.db_refs.get('UP') or \ agent.db_refs.get('FPLX')): return False else: if not(agent.db_refs.get('HGNC') or \ agent.db_refs.get('UP')): return False return True
python
def _agent_is_gene(agent, specific_only): """Returns whether an agent is for a gene. Parameters ---------- agent: Agent The agent to evaluate specific_only : Optional[bool] If True, only elementary genes/proteins evaluate as genes and families will be filtered out. If False, families are also included. Returns ------- is_gene: bool Whether the agent is a gene """ if not specific_only: if not(agent.db_refs.get('HGNC') or \ agent.db_refs.get('UP') or \ agent.db_refs.get('FPLX')): return False else: if not(agent.db_refs.get('HGNC') or \ agent.db_refs.get('UP')): return False return True
[ "def", "_agent_is_gene", "(", "agent", ",", "specific_only", ")", ":", "if", "not", "specific_only", ":", "if", "not", "(", "agent", ".", "db_refs", ".", "get", "(", "'HGNC'", ")", "or", "agent", ".", "db_refs", ".", "get", "(", "'UP'", ")", "or", "agent", ".", "db_refs", ".", "get", "(", "'FPLX'", ")", ")", ":", "return", "False", "else", ":", "if", "not", "(", "agent", ".", "db_refs", ".", "get", "(", "'HGNC'", ")", "or", "agent", ".", "db_refs", ".", "get", "(", "'UP'", ")", ")", ":", "return", "False", "return", "True" ]
Returns whether an agent is for a gene. Parameters ---------- agent: Agent The agent to evaluate specific_only : Optional[bool] If True, only elementary genes/proteins evaluate as genes and families will be filtered out. If False, families are also included. Returns ------- is_gene: bool Whether the agent is a gene
[ "Returns", "whether", "an", "agent", "is", "for", "a", "gene", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L661-L686
train
sorgerlab/indra
indra/tools/assemble_corpus.py
filter_genes_only
def filter_genes_only(stmts_in, **kwargs): """Filter to statements containing genes only. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. specific_only : Optional[bool] If True, only elementary genes/proteins will be kept and families will be filtered out. If False, families are also included in the output. Default: False save : Optional[str] The name of a pickle file to save the results (stmts_out) into. remove_bound: Optional[bool] If true, removes bound conditions that are not genes If false (default), filters out statements with non-gene bound conditions Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements. """ remove_bound = 'remove_bound' in kwargs and kwargs['remove_bound'] specific_only = kwargs.get('specific_only') logger.info('Filtering %d statements for ones containing genes only...' % len(stmts_in)) stmts_out = [] for st in stmts_in: genes_only = True for agent in st.agent_list(): if agent is not None: criterion = lambda a: _agent_is_gene(a, specific_only) if not criterion(agent): genes_only = False break if remove_bound: _remove_bound_conditions(agent, criterion) else: if _any_bound_condition_fails_criterion(agent, criterion): genes_only = False break if genes_only: stmts_out.append(st) logger.info('%d statements after filter...' % len(stmts_out)) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
python
def filter_genes_only(stmts_in, **kwargs): """Filter to statements containing genes only. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. specific_only : Optional[bool] If True, only elementary genes/proteins will be kept and families will be filtered out. If False, families are also included in the output. Default: False save : Optional[str] The name of a pickle file to save the results (stmts_out) into. remove_bound: Optional[bool] If true, removes bound conditions that are not genes If false (default), filters out statements with non-gene bound conditions Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements. """ remove_bound = 'remove_bound' in kwargs and kwargs['remove_bound'] specific_only = kwargs.get('specific_only') logger.info('Filtering %d statements for ones containing genes only...' % len(stmts_in)) stmts_out = [] for st in stmts_in: genes_only = True for agent in st.agent_list(): if agent is not None: criterion = lambda a: _agent_is_gene(a, specific_only) if not criterion(agent): genes_only = False break if remove_bound: _remove_bound_conditions(agent, criterion) else: if _any_bound_condition_fails_criterion(agent, criterion): genes_only = False break if genes_only: stmts_out.append(st) logger.info('%d statements after filter...' % len(stmts_out)) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
[ "def", "filter_genes_only", "(", "stmts_in", ",", "*", "*", "kwargs", ")", ":", "remove_bound", "=", "'remove_bound'", "in", "kwargs", "and", "kwargs", "[", "'remove_bound'", "]", "specific_only", "=", "kwargs", ".", "get", "(", "'specific_only'", ")", "logger", ".", "info", "(", "'Filtering %d statements for ones containing genes only...'", "%", "len", "(", "stmts_in", ")", ")", "stmts_out", "=", "[", "]", "for", "st", "in", "stmts_in", ":", "genes_only", "=", "True", "for", "agent", "in", "st", ".", "agent_list", "(", ")", ":", "if", "agent", "is", "not", "None", ":", "criterion", "=", "lambda", "a", ":", "_agent_is_gene", "(", "a", ",", "specific_only", ")", "if", "not", "criterion", "(", "agent", ")", ":", "genes_only", "=", "False", "break", "if", "remove_bound", ":", "_remove_bound_conditions", "(", "agent", ",", "criterion", ")", "else", ":", "if", "_any_bound_condition_fails_criterion", "(", "agent", ",", "criterion", ")", ":", "genes_only", "=", "False", "break", "if", "genes_only", ":", "stmts_out", ".", "append", "(", "st", ")", "logger", ".", "info", "(", "'%d statements after filter...'", "%", "len", "(", "stmts_out", ")", ")", "dump_pkl", "=", "kwargs", ".", "get", "(", "'save'", ")", "if", "dump_pkl", ":", "dump_statements", "(", "stmts_out", ",", "dump_pkl", ")", "return", "stmts_out" ]
Filter to statements containing genes only. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. specific_only : Optional[bool] If True, only elementary genes/proteins will be kept and families will be filtered out. If False, families are also included in the output. Default: False save : Optional[str] The name of a pickle file to save the results (stmts_out) into. remove_bound: Optional[bool] If true, removes bound conditions that are not genes If false (default), filters out statements with non-gene bound conditions Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements.
[ "Filter", "to", "statements", "containing", "genes", "only", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L689-L739
train
sorgerlab/indra
indra/tools/assemble_corpus.py
filter_belief
def filter_belief(stmts_in, belief_cutoff, **kwargs): """Filter to statements with belief above a given cutoff. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. belief_cutoff : float Only statements with belief above the belief_cutoff will be returned. Here 0 < belief_cutoff < 1. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements. """ dump_pkl = kwargs.get('save') logger.info('Filtering %d statements to above %f belief' % (len(stmts_in), belief_cutoff)) # The first round of filtering is in the top-level list stmts_out = [] # Now we eliminate supports/supported-by for stmt in stmts_in: if stmt.belief < belief_cutoff: continue stmts_out.append(stmt) supp_by = [] supp = [] for st in stmt.supports: if st.belief >= belief_cutoff: supp.append(st) for st in stmt.supported_by: if st.belief >= belief_cutoff: supp_by.append(st) stmt.supports = supp stmt.supported_by = supp_by logger.info('%d statements after filter...' % len(stmts_out)) if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
python
def filter_belief(stmts_in, belief_cutoff, **kwargs): """Filter to statements with belief above a given cutoff. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. belief_cutoff : float Only statements with belief above the belief_cutoff will be returned. Here 0 < belief_cutoff < 1. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements. """ dump_pkl = kwargs.get('save') logger.info('Filtering %d statements to above %f belief' % (len(stmts_in), belief_cutoff)) # The first round of filtering is in the top-level list stmts_out = [] # Now we eliminate supports/supported-by for stmt in stmts_in: if stmt.belief < belief_cutoff: continue stmts_out.append(stmt) supp_by = [] supp = [] for st in stmt.supports: if st.belief >= belief_cutoff: supp.append(st) for st in stmt.supported_by: if st.belief >= belief_cutoff: supp_by.append(st) stmt.supports = supp stmt.supported_by = supp_by logger.info('%d statements after filter...' % len(stmts_out)) if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
[ "def", "filter_belief", "(", "stmts_in", ",", "belief_cutoff", ",", "*", "*", "kwargs", ")", ":", "dump_pkl", "=", "kwargs", ".", "get", "(", "'save'", ")", "logger", ".", "info", "(", "'Filtering %d statements to above %f belief'", "%", "(", "len", "(", "stmts_in", ")", ",", "belief_cutoff", ")", ")", "# The first round of filtering is in the top-level list", "stmts_out", "=", "[", "]", "# Now we eliminate supports/supported-by", "for", "stmt", "in", "stmts_in", ":", "if", "stmt", ".", "belief", "<", "belief_cutoff", ":", "continue", "stmts_out", ".", "append", "(", "stmt", ")", "supp_by", "=", "[", "]", "supp", "=", "[", "]", "for", "st", "in", "stmt", ".", "supports", ":", "if", "st", ".", "belief", ">=", "belief_cutoff", ":", "supp", ".", "append", "(", "st", ")", "for", "st", "in", "stmt", ".", "supported_by", ":", "if", "st", ".", "belief", ">=", "belief_cutoff", ":", "supp_by", ".", "append", "(", "st", ")", "stmt", ".", "supports", "=", "supp", "stmt", ".", "supported_by", "=", "supp_by", "logger", ".", "info", "(", "'%d statements after filter...'", "%", "len", "(", "stmts_out", ")", ")", "if", "dump_pkl", ":", "dump_statements", "(", "stmts_out", ",", "dump_pkl", ")", "return", "stmts_out" ]
Filter to statements with belief above a given cutoff. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. belief_cutoff : float Only statements with belief above the belief_cutoff will be returned. Here 0 < belief_cutoff < 1. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements.
[ "Filter", "to", "statements", "with", "belief", "above", "a", "given", "cutoff", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L742-L783
train
sorgerlab/indra
indra/tools/assemble_corpus.py
filter_gene_list
def filter_gene_list(stmts_in, gene_list, policy, allow_families=False, **kwargs): """Return statements that contain genes given in a list. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. gene_list : list[str] A list of gene symbols to filter for. policy : str The policy to apply when filtering for the list of genes. "one": keep statements that contain at least one of the list of genes and possibly others not in the list "all": keep statements that only contain genes given in the list allow_families : Optional[bool] Will include statements involving FamPlex families containing one of the genes in the gene list. Default: False save : Optional[str] The name of a pickle file to save the results (stmts_out) into. remove_bound: Optional[str] If true, removes bound conditions that are not genes in the list If false (default), looks at agents in the bound conditions in addition to those participating in the statement directly when applying the specified policy. invert : Optional[bool] If True, the statements that do not match according to the policy are returned. Default: False Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements. """ invert = kwargs.get('invert', False) remove_bound = kwargs.get('remove_bound', False) if policy not in ('one', 'all'): logger.error('Policy %s is invalid, not applying filter.' % policy) else: genes_str = ', '.join(gene_list) inv_str = 'not ' if invert else '' logger.info(('Filtering %d statements for ones %scontaining "%s" of: ' '%s...') % (len(stmts_in), inv_str, policy, genes_str)) # If we're allowing families, make a list of all FamPlex IDs that # contain members of the gene list, and add them to the filter list filter_list = copy(gene_list) if allow_families: for hgnc_name in gene_list: gene_uri = hierarchies['entity'].get_uri('HGNC', hgnc_name) parents = hierarchies['entity'].get_parents(gene_uri) for par_uri in parents: ns, id = hierarchies['entity'].ns_id_from_uri(par_uri) filter_list.append(id) stmts_out = [] if remove_bound: # If requested, remove agents whose names are not in the list from # all bound conditions if not invert: keep_criterion = lambda a: a.name in filter_list else: keep_criterion = lambda a: a.name not in filter_list for st in stmts_in: for agent in st.agent_list(): _remove_bound_conditions(agent, keep_criterion) if policy == 'one': for st in stmts_in: found_gene = False if not remove_bound: agent_list = st.agent_list_with_bound_condition_agents() else: agent_list = st.agent_list() for agent in agent_list: if agent is not None: if agent.name in filter_list: found_gene = True break if (found_gene and not invert) or (not found_gene and invert): stmts_out.append(st) elif policy == 'all': for st in stmts_in: found_genes = True if not remove_bound: agent_list = st.agent_list_with_bound_condition_agents() else: agent_list = st.agent_list() for agent in agent_list: if agent is not None: if agent.name not in filter_list: found_genes = False break if (found_genes and not invert) or (not found_genes and invert): stmts_out.append(st) else: stmts_out = stmts_in logger.info('%d statements after filter...' % len(stmts_out)) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
python
def filter_gene_list(stmts_in, gene_list, policy, allow_families=False, **kwargs): """Return statements that contain genes given in a list. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. gene_list : list[str] A list of gene symbols to filter for. policy : str The policy to apply when filtering for the list of genes. "one": keep statements that contain at least one of the list of genes and possibly others not in the list "all": keep statements that only contain genes given in the list allow_families : Optional[bool] Will include statements involving FamPlex families containing one of the genes in the gene list. Default: False save : Optional[str] The name of a pickle file to save the results (stmts_out) into. remove_bound: Optional[str] If true, removes bound conditions that are not genes in the list If false (default), looks at agents in the bound conditions in addition to those participating in the statement directly when applying the specified policy. invert : Optional[bool] If True, the statements that do not match according to the policy are returned. Default: False Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements. """ invert = kwargs.get('invert', False) remove_bound = kwargs.get('remove_bound', False) if policy not in ('one', 'all'): logger.error('Policy %s is invalid, not applying filter.' % policy) else: genes_str = ', '.join(gene_list) inv_str = 'not ' if invert else '' logger.info(('Filtering %d statements for ones %scontaining "%s" of: ' '%s...') % (len(stmts_in), inv_str, policy, genes_str)) # If we're allowing families, make a list of all FamPlex IDs that # contain members of the gene list, and add them to the filter list filter_list = copy(gene_list) if allow_families: for hgnc_name in gene_list: gene_uri = hierarchies['entity'].get_uri('HGNC', hgnc_name) parents = hierarchies['entity'].get_parents(gene_uri) for par_uri in parents: ns, id = hierarchies['entity'].ns_id_from_uri(par_uri) filter_list.append(id) stmts_out = [] if remove_bound: # If requested, remove agents whose names are not in the list from # all bound conditions if not invert: keep_criterion = lambda a: a.name in filter_list else: keep_criterion = lambda a: a.name not in filter_list for st in stmts_in: for agent in st.agent_list(): _remove_bound_conditions(agent, keep_criterion) if policy == 'one': for st in stmts_in: found_gene = False if not remove_bound: agent_list = st.agent_list_with_bound_condition_agents() else: agent_list = st.agent_list() for agent in agent_list: if agent is not None: if agent.name in filter_list: found_gene = True break if (found_gene and not invert) or (not found_gene and invert): stmts_out.append(st) elif policy == 'all': for st in stmts_in: found_genes = True if not remove_bound: agent_list = st.agent_list_with_bound_condition_agents() else: agent_list = st.agent_list() for agent in agent_list: if agent is not None: if agent.name not in filter_list: found_genes = False break if (found_genes and not invert) or (not found_genes and invert): stmts_out.append(st) else: stmts_out = stmts_in logger.info('%d statements after filter...' % len(stmts_out)) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
[ "def", "filter_gene_list", "(", "stmts_in", ",", "gene_list", ",", "policy", ",", "allow_families", "=", "False", ",", "*", "*", "kwargs", ")", ":", "invert", "=", "kwargs", ".", "get", "(", "'invert'", ",", "False", ")", "remove_bound", "=", "kwargs", ".", "get", "(", "'remove_bound'", ",", "False", ")", "if", "policy", "not", "in", "(", "'one'", ",", "'all'", ")", ":", "logger", ".", "error", "(", "'Policy %s is invalid, not applying filter.'", "%", "policy", ")", "else", ":", "genes_str", "=", "', '", ".", "join", "(", "gene_list", ")", "inv_str", "=", "'not '", "if", "invert", "else", "''", "logger", ".", "info", "(", "(", "'Filtering %d statements for ones %scontaining \"%s\" of: '", "'%s...'", ")", "%", "(", "len", "(", "stmts_in", ")", ",", "inv_str", ",", "policy", ",", "genes_str", ")", ")", "# If we're allowing families, make a list of all FamPlex IDs that", "# contain members of the gene list, and add them to the filter list", "filter_list", "=", "copy", "(", "gene_list", ")", "if", "allow_families", ":", "for", "hgnc_name", "in", "gene_list", ":", "gene_uri", "=", "hierarchies", "[", "'entity'", "]", ".", "get_uri", "(", "'HGNC'", ",", "hgnc_name", ")", "parents", "=", "hierarchies", "[", "'entity'", "]", ".", "get_parents", "(", "gene_uri", ")", "for", "par_uri", "in", "parents", ":", "ns", ",", "id", "=", "hierarchies", "[", "'entity'", "]", ".", "ns_id_from_uri", "(", "par_uri", ")", "filter_list", ".", "append", "(", "id", ")", "stmts_out", "=", "[", "]", "if", "remove_bound", ":", "# If requested, remove agents whose names are not in the list from", "# all bound conditions", "if", "not", "invert", ":", "keep_criterion", "=", "lambda", "a", ":", "a", ".", "name", "in", "filter_list", "else", ":", "keep_criterion", "=", "lambda", "a", ":", "a", ".", "name", "not", "in", "filter_list", "for", "st", "in", "stmts_in", ":", "for", "agent", "in", "st", ".", "agent_list", "(", ")", ":", "_remove_bound_conditions", "(", "agent", ",", "keep_criterion", ")", "if", "policy", "==", "'one'", ":", "for", "st", "in", "stmts_in", ":", "found_gene", "=", "False", "if", "not", "remove_bound", ":", "agent_list", "=", "st", ".", "agent_list_with_bound_condition_agents", "(", ")", "else", ":", "agent_list", "=", "st", ".", "agent_list", "(", ")", "for", "agent", "in", "agent_list", ":", "if", "agent", "is", "not", "None", ":", "if", "agent", ".", "name", "in", "filter_list", ":", "found_gene", "=", "True", "break", "if", "(", "found_gene", "and", "not", "invert", ")", "or", "(", "not", "found_gene", "and", "invert", ")", ":", "stmts_out", ".", "append", "(", "st", ")", "elif", "policy", "==", "'all'", ":", "for", "st", "in", "stmts_in", ":", "found_genes", "=", "True", "if", "not", "remove_bound", ":", "agent_list", "=", "st", ".", "agent_list_with_bound_condition_agents", "(", ")", "else", ":", "agent_list", "=", "st", ".", "agent_list", "(", ")", "for", "agent", "in", "agent_list", ":", "if", "agent", "is", "not", "None", ":", "if", "agent", ".", "name", "not", "in", "filter_list", ":", "found_genes", "=", "False", "break", "if", "(", "found_genes", "and", "not", "invert", ")", "or", "(", "not", "found_genes", "and", "invert", ")", ":", "stmts_out", ".", "append", "(", "st", ")", "else", ":", "stmts_out", "=", "stmts_in", "logger", ".", "info", "(", "'%d statements after filter...'", "%", "len", "(", "stmts_out", ")", ")", "dump_pkl", "=", "kwargs", ".", "get", "(", "'save'", ")", "if", "dump_pkl", ":", "dump_statements", "(", "stmts_out", ",", "dump_pkl", ")", "return", "stmts_out" ]
Return statements that contain genes given in a list. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. gene_list : list[str] A list of gene symbols to filter for. policy : str The policy to apply when filtering for the list of genes. "one": keep statements that contain at least one of the list of genes and possibly others not in the list "all": keep statements that only contain genes given in the list allow_families : Optional[bool] Will include statements involving FamPlex families containing one of the genes in the gene list. Default: False save : Optional[str] The name of a pickle file to save the results (stmts_out) into. remove_bound: Optional[str] If true, removes bound conditions that are not genes in the list If false (default), looks at agents in the bound conditions in addition to those participating in the statement directly when applying the specified policy. invert : Optional[bool] If True, the statements that do not match according to the policy are returned. Default: False Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements.
[ "Return", "statements", "that", "contain", "genes", "given", "in", "a", "list", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L786-L890
train
sorgerlab/indra
indra/tools/assemble_corpus.py
filter_by_db_refs
def filter_by_db_refs(stmts_in, namespace, values, policy, **kwargs): """Filter to Statements whose agents are grounded to a matching entry. Statements are filtered so that the db_refs entry (of the given namespace) of their Agent/Concept arguments take a value in the given list of values. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of Statements to filter. namespace : str The namespace in db_refs to which the filter should apply. values : list[str] A list of values in the given namespace to which the filter should apply. policy : str The policy to apply when filtering for the db_refs. "one": keep Statements that contain at least one of the list of db_refs and possibly others not in the list "all": keep Statements that only contain db_refs given in the list save : Optional[str] The name of a pickle file to save the results (stmts_out) into. invert : Optional[bool] If True, the Statements that do not match according to the policy are returned. Default: False match_suffix : Optional[bool] If True, the suffix of the db_refs entry is matches agains the list of entries Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered Statements. """ invert = kwargs.get('invert', False) match_suffix = kwargs.get('match_suffix', False) if policy not in ('one', 'all'): logger.error('Policy %s is invalid, not applying filter.' % policy) return else: name_str = ', '.join(values) rev_mod = 'not ' if invert else '' logger.info(('Filtering %d statements for those with %s agents %s' 'grounded to: %s in the %s namespace...') % (len(stmts_in), policy, rev_mod, name_str, namespace)) def meets_criterion(agent): if namespace not in agent.db_refs: return False entry = agent.db_refs[namespace] if isinstance(entry, list): entry = entry[0][0] ret = False # Match suffix or entire entry if match_suffix: if any([entry.endswith(e) for e in values]): ret = True else: if entry in values: ret = True # Invert if needed if invert: return not ret else: return ret enough = all if policy == 'all' else any stmts_out = [s for s in stmts_in if enough([meets_criterion(ag) for ag in s.agent_list() if ag is not None])] logger.info('%d Statements after filter...' % len(stmts_out)) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
python
def filter_by_db_refs(stmts_in, namespace, values, policy, **kwargs): """Filter to Statements whose agents are grounded to a matching entry. Statements are filtered so that the db_refs entry (of the given namespace) of their Agent/Concept arguments take a value in the given list of values. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of Statements to filter. namespace : str The namespace in db_refs to which the filter should apply. values : list[str] A list of values in the given namespace to which the filter should apply. policy : str The policy to apply when filtering for the db_refs. "one": keep Statements that contain at least one of the list of db_refs and possibly others not in the list "all": keep Statements that only contain db_refs given in the list save : Optional[str] The name of a pickle file to save the results (stmts_out) into. invert : Optional[bool] If True, the Statements that do not match according to the policy are returned. Default: False match_suffix : Optional[bool] If True, the suffix of the db_refs entry is matches agains the list of entries Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered Statements. """ invert = kwargs.get('invert', False) match_suffix = kwargs.get('match_suffix', False) if policy not in ('one', 'all'): logger.error('Policy %s is invalid, not applying filter.' % policy) return else: name_str = ', '.join(values) rev_mod = 'not ' if invert else '' logger.info(('Filtering %d statements for those with %s agents %s' 'grounded to: %s in the %s namespace...') % (len(stmts_in), policy, rev_mod, name_str, namespace)) def meets_criterion(agent): if namespace not in agent.db_refs: return False entry = agent.db_refs[namespace] if isinstance(entry, list): entry = entry[0][0] ret = False # Match suffix or entire entry if match_suffix: if any([entry.endswith(e) for e in values]): ret = True else: if entry in values: ret = True # Invert if needed if invert: return not ret else: return ret enough = all if policy == 'all' else any stmts_out = [s for s in stmts_in if enough([meets_criterion(ag) for ag in s.agent_list() if ag is not None])] logger.info('%d Statements after filter...' % len(stmts_out)) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
[ "def", "filter_by_db_refs", "(", "stmts_in", ",", "namespace", ",", "values", ",", "policy", ",", "*", "*", "kwargs", ")", ":", "invert", "=", "kwargs", ".", "get", "(", "'invert'", ",", "False", ")", "match_suffix", "=", "kwargs", ".", "get", "(", "'match_suffix'", ",", "False", ")", "if", "policy", "not", "in", "(", "'one'", ",", "'all'", ")", ":", "logger", ".", "error", "(", "'Policy %s is invalid, not applying filter.'", "%", "policy", ")", "return", "else", ":", "name_str", "=", "', '", ".", "join", "(", "values", ")", "rev_mod", "=", "'not '", "if", "invert", "else", "''", "logger", ".", "info", "(", "(", "'Filtering %d statements for those with %s agents %s'", "'grounded to: %s in the %s namespace...'", ")", "%", "(", "len", "(", "stmts_in", ")", ",", "policy", ",", "rev_mod", ",", "name_str", ",", "namespace", ")", ")", "def", "meets_criterion", "(", "agent", ")", ":", "if", "namespace", "not", "in", "agent", ".", "db_refs", ":", "return", "False", "entry", "=", "agent", ".", "db_refs", "[", "namespace", "]", "if", "isinstance", "(", "entry", ",", "list", ")", ":", "entry", "=", "entry", "[", "0", "]", "[", "0", "]", "ret", "=", "False", "# Match suffix or entire entry", "if", "match_suffix", ":", "if", "any", "(", "[", "entry", ".", "endswith", "(", "e", ")", "for", "e", "in", "values", "]", ")", ":", "ret", "=", "True", "else", ":", "if", "entry", "in", "values", ":", "ret", "=", "True", "# Invert if needed", "if", "invert", ":", "return", "not", "ret", "else", ":", "return", "ret", "enough", "=", "all", "if", "policy", "==", "'all'", "else", "any", "stmts_out", "=", "[", "s", "for", "s", "in", "stmts_in", "if", "enough", "(", "[", "meets_criterion", "(", "ag", ")", "for", "ag", "in", "s", ".", "agent_list", "(", ")", "if", "ag", "is", "not", "None", "]", ")", "]", "logger", ".", "info", "(", "'%d Statements after filter...'", "%", "len", "(", "stmts_out", ")", ")", "dump_pkl", "=", "kwargs", ".", "get", "(", "'save'", ")", "if", "dump_pkl", ":", "dump_statements", "(", "stmts_out", ",", "dump_pkl", ")", "return", "stmts_out" ]
Filter to Statements whose agents are grounded to a matching entry. Statements are filtered so that the db_refs entry (of the given namespace) of their Agent/Concept arguments take a value in the given list of values. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of Statements to filter. namespace : str The namespace in db_refs to which the filter should apply. values : list[str] A list of values in the given namespace to which the filter should apply. policy : str The policy to apply when filtering for the db_refs. "one": keep Statements that contain at least one of the list of db_refs and possibly others not in the list "all": keep Statements that only contain db_refs given in the list save : Optional[str] The name of a pickle file to save the results (stmts_out) into. invert : Optional[bool] If True, the Statements that do not match according to the policy are returned. Default: False match_suffix : Optional[bool] If True, the suffix of the db_refs entry is matches agains the list of entries Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered Statements.
[ "Filter", "to", "Statements", "whose", "agents", "are", "grounded", "to", "a", "matching", "entry", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L962-L1039
train
sorgerlab/indra
indra/tools/assemble_corpus.py
filter_human_only
def filter_human_only(stmts_in, **kwargs): """Filter out statements that are grounded, but not to a human gene. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. remove_bound: Optional[bool] If true, removes all bound conditions that are grounded but not to human genes. If false (default), filters out statements with boundary conditions that are grounded to non-human genes. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements. """ from indra.databases import uniprot_client if 'remove_bound' in kwargs and kwargs['remove_bound']: remove_bound = True else: remove_bound = False dump_pkl = kwargs.get('save') logger.info('Filtering %d statements for human genes only...' % len(stmts_in)) stmts_out = [] def criterion(agent): upid = agent.db_refs.get('UP') if upid and not uniprot_client.is_human(upid): return False else: return True for st in stmts_in: human_genes = True for agent in st.agent_list(): if agent is not None: if not criterion(agent): human_genes = False break if remove_bound: _remove_bound_conditions(agent, criterion) elif _any_bound_condition_fails_criterion(agent, criterion): human_genes = False break if human_genes: stmts_out.append(st) logger.info('%d statements after filter...' % len(stmts_out)) if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
python
def filter_human_only(stmts_in, **kwargs): """Filter out statements that are grounded, but not to a human gene. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. remove_bound: Optional[bool] If true, removes all bound conditions that are grounded but not to human genes. If false (default), filters out statements with boundary conditions that are grounded to non-human genes. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements. """ from indra.databases import uniprot_client if 'remove_bound' in kwargs and kwargs['remove_bound']: remove_bound = True else: remove_bound = False dump_pkl = kwargs.get('save') logger.info('Filtering %d statements for human genes only...' % len(stmts_in)) stmts_out = [] def criterion(agent): upid = agent.db_refs.get('UP') if upid and not uniprot_client.is_human(upid): return False else: return True for st in stmts_in: human_genes = True for agent in st.agent_list(): if agent is not None: if not criterion(agent): human_genes = False break if remove_bound: _remove_bound_conditions(agent, criterion) elif _any_bound_condition_fails_criterion(agent, criterion): human_genes = False break if human_genes: stmts_out.append(st) logger.info('%d statements after filter...' % len(stmts_out)) if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
[ "def", "filter_human_only", "(", "stmts_in", ",", "*", "*", "kwargs", ")", ":", "from", "indra", ".", "databases", "import", "uniprot_client", "if", "'remove_bound'", "in", "kwargs", "and", "kwargs", "[", "'remove_bound'", "]", ":", "remove_bound", "=", "True", "else", ":", "remove_bound", "=", "False", "dump_pkl", "=", "kwargs", ".", "get", "(", "'save'", ")", "logger", ".", "info", "(", "'Filtering %d statements for human genes only...'", "%", "len", "(", "stmts_in", ")", ")", "stmts_out", "=", "[", "]", "def", "criterion", "(", "agent", ")", ":", "upid", "=", "agent", ".", "db_refs", ".", "get", "(", "'UP'", ")", "if", "upid", "and", "not", "uniprot_client", ".", "is_human", "(", "upid", ")", ":", "return", "False", "else", ":", "return", "True", "for", "st", "in", "stmts_in", ":", "human_genes", "=", "True", "for", "agent", "in", "st", ".", "agent_list", "(", ")", ":", "if", "agent", "is", "not", "None", ":", "if", "not", "criterion", "(", "agent", ")", ":", "human_genes", "=", "False", "break", "if", "remove_bound", ":", "_remove_bound_conditions", "(", "agent", ",", "criterion", ")", "elif", "_any_bound_condition_fails_criterion", "(", "agent", ",", "criterion", ")", ":", "human_genes", "=", "False", "break", "if", "human_genes", ":", "stmts_out", ".", "append", "(", "st", ")", "logger", ".", "info", "(", "'%d statements after filter...'", "%", "len", "(", "stmts_out", ")", ")", "if", "dump_pkl", ":", "dump_statements", "(", "stmts_out", ",", "dump_pkl", ")", "return", "stmts_out" ]
Filter out statements that are grounded, but not to a human gene. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. remove_bound: Optional[bool] If true, removes all bound conditions that are grounded but not to human genes. If false (default), filters out statements with boundary conditions that are grounded to non-human genes. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements.
[ "Filter", "out", "statements", "that", "are", "grounded", "but", "not", "to", "a", "human", "gene", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L1042-L1097
train
sorgerlab/indra
indra/tools/assemble_corpus.py
filter_direct
def filter_direct(stmts_in, **kwargs): """Filter to statements that are direct interactions Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements. """ def get_is_direct(stmt): """Returns true if there is evidence that the statement is a direct interaction. If any of the evidences associated with the statement indicates a direct interatcion then we assume the interaction is direct. If there is no evidence for the interaction being indirect then we default to direct. """ any_indirect = False for ev in stmt.evidence: if ev.epistemics.get('direct') is True: return True elif ev.epistemics.get('direct') is False: # This guarantees that we have seen at least # some evidence that the statement is indirect any_indirect = True if any_indirect: return False return True logger.info('Filtering %d statements to direct ones...' % len(stmts_in)) stmts_out = [] for st in stmts_in: if get_is_direct(st): stmts_out.append(st) logger.info('%d statements after filter...' % len(stmts_out)) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
python
def filter_direct(stmts_in, **kwargs): """Filter to statements that are direct interactions Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements. """ def get_is_direct(stmt): """Returns true if there is evidence that the statement is a direct interaction. If any of the evidences associated with the statement indicates a direct interatcion then we assume the interaction is direct. If there is no evidence for the interaction being indirect then we default to direct. """ any_indirect = False for ev in stmt.evidence: if ev.epistemics.get('direct') is True: return True elif ev.epistemics.get('direct') is False: # This guarantees that we have seen at least # some evidence that the statement is indirect any_indirect = True if any_indirect: return False return True logger.info('Filtering %d statements to direct ones...' % len(stmts_in)) stmts_out = [] for st in stmts_in: if get_is_direct(st): stmts_out.append(st) logger.info('%d statements after filter...' % len(stmts_out)) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
[ "def", "filter_direct", "(", "stmts_in", ",", "*", "*", "kwargs", ")", ":", "def", "get_is_direct", "(", "stmt", ")", ":", "\"\"\"Returns true if there is evidence that the statement is a direct\n interaction.\n\n If any of the evidences associated with the statement\n indicates a direct interatcion then we assume the interaction\n is direct. If there is no evidence for the interaction being indirect\n then we default to direct.\n \"\"\"", "any_indirect", "=", "False", "for", "ev", "in", "stmt", ".", "evidence", ":", "if", "ev", ".", "epistemics", ".", "get", "(", "'direct'", ")", "is", "True", ":", "return", "True", "elif", "ev", ".", "epistemics", ".", "get", "(", "'direct'", ")", "is", "False", ":", "# This guarantees that we have seen at least", "# some evidence that the statement is indirect", "any_indirect", "=", "True", "if", "any_indirect", ":", "return", "False", "return", "True", "logger", ".", "info", "(", "'Filtering %d statements to direct ones...'", "%", "len", "(", "stmts_in", ")", ")", "stmts_out", "=", "[", "]", "for", "st", "in", "stmts_in", ":", "if", "get_is_direct", "(", "st", ")", ":", "stmts_out", ".", "append", "(", "st", ")", "logger", ".", "info", "(", "'%d statements after filter...'", "%", "len", "(", "stmts_out", ")", ")", "dump_pkl", "=", "kwargs", ".", "get", "(", "'save'", ")", "if", "dump_pkl", ":", "dump_statements", "(", "stmts_out", ",", "dump_pkl", ")", "return", "stmts_out" ]
Filter to statements that are direct interactions Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements.
[ "Filter", "to", "statements", "that", "are", "direct", "interactions" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L1100-L1144
train
sorgerlab/indra
indra/tools/assemble_corpus.py
filter_no_hypothesis
def filter_no_hypothesis(stmts_in, **kwargs): """Filter to statements that are not marked as hypothesis in epistemics. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements. """ logger.info('Filtering %d statements to no hypothesis...' % len(stmts_in)) stmts_out = [] for st in stmts_in: all_hypotheses = True ev = None for ev in st.evidence: if not ev.epistemics.get('hypothesis', False): all_hypotheses = False break if ev is None: all_hypotheses = False if not all_hypotheses: stmts_out.append(st) logger.info('%d statements after filter...' % len(stmts_out)) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
python
def filter_no_hypothesis(stmts_in, **kwargs): """Filter to statements that are not marked as hypothesis in epistemics. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements. """ logger.info('Filtering %d statements to no hypothesis...' % len(stmts_in)) stmts_out = [] for st in stmts_in: all_hypotheses = True ev = None for ev in st.evidence: if not ev.epistemics.get('hypothesis', False): all_hypotheses = False break if ev is None: all_hypotheses = False if not all_hypotheses: stmts_out.append(st) logger.info('%d statements after filter...' % len(stmts_out)) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
[ "def", "filter_no_hypothesis", "(", "stmts_in", ",", "*", "*", "kwargs", ")", ":", "logger", ".", "info", "(", "'Filtering %d statements to no hypothesis...'", "%", "len", "(", "stmts_in", ")", ")", "stmts_out", "=", "[", "]", "for", "st", "in", "stmts_in", ":", "all_hypotheses", "=", "True", "ev", "=", "None", "for", "ev", "in", "st", ".", "evidence", ":", "if", "not", "ev", ".", "epistemics", ".", "get", "(", "'hypothesis'", ",", "False", ")", ":", "all_hypotheses", "=", "False", "break", "if", "ev", "is", "None", ":", "all_hypotheses", "=", "False", "if", "not", "all_hypotheses", ":", "stmts_out", ".", "append", "(", "st", ")", "logger", ".", "info", "(", "'%d statements after filter...'", "%", "len", "(", "stmts_out", ")", ")", "dump_pkl", "=", "kwargs", ".", "get", "(", "'save'", ")", "if", "dump_pkl", ":", "dump_statements", "(", "stmts_out", ",", "dump_pkl", ")", "return", "stmts_out" ]
Filter to statements that are not marked as hypothesis in epistemics. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements.
[ "Filter", "to", "statements", "that", "are", "not", "marked", "as", "hypothesis", "in", "epistemics", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L1147-L1179
train
sorgerlab/indra
indra/tools/assemble_corpus.py
filter_evidence_source
def filter_evidence_source(stmts_in, source_apis, policy='one', **kwargs): """Filter to statements that have evidence from a given set of sources. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. source_apis : list[str] A list of sources to filter for. Examples: biopax, bel, reach policy : Optional[str] If 'one', a statement that hase evidence from any of the sources is kept. If 'all', only those statements are kept which have evidence from all the input sources specified in source_apis. If 'none', only those statements are kept that don't have evidence from any of the sources specified in source_apis. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements. """ logger.info('Filtering %d statements to evidence source "%s" of: %s...' % (len(stmts_in), policy, ', '.join(source_apis))) stmts_out = [] for st in stmts_in: sources = set([ev.source_api for ev in st.evidence]) if policy == 'one': if sources.intersection(source_apis): stmts_out.append(st) if policy == 'all': if sources.intersection(source_apis) == set(source_apis): stmts_out.append(st) if policy == 'none': if not sources.intersection(source_apis): stmts_out.append(st) logger.info('%d statements after filter...' % len(stmts_out)) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
python
def filter_evidence_source(stmts_in, source_apis, policy='one', **kwargs): """Filter to statements that have evidence from a given set of sources. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. source_apis : list[str] A list of sources to filter for. Examples: biopax, bel, reach policy : Optional[str] If 'one', a statement that hase evidence from any of the sources is kept. If 'all', only those statements are kept which have evidence from all the input sources specified in source_apis. If 'none', only those statements are kept that don't have evidence from any of the sources specified in source_apis. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements. """ logger.info('Filtering %d statements to evidence source "%s" of: %s...' % (len(stmts_in), policy, ', '.join(source_apis))) stmts_out = [] for st in stmts_in: sources = set([ev.source_api for ev in st.evidence]) if policy == 'one': if sources.intersection(source_apis): stmts_out.append(st) if policy == 'all': if sources.intersection(source_apis) == set(source_apis): stmts_out.append(st) if policy == 'none': if not sources.intersection(source_apis): stmts_out.append(st) logger.info('%d statements after filter...' % len(stmts_out)) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
[ "def", "filter_evidence_source", "(", "stmts_in", ",", "source_apis", ",", "policy", "=", "'one'", ",", "*", "*", "kwargs", ")", ":", "logger", ".", "info", "(", "'Filtering %d statements to evidence source \"%s\" of: %s...'", "%", "(", "len", "(", "stmts_in", ")", ",", "policy", ",", "', '", ".", "join", "(", "source_apis", ")", ")", ")", "stmts_out", "=", "[", "]", "for", "st", "in", "stmts_in", ":", "sources", "=", "set", "(", "[", "ev", ".", "source_api", "for", "ev", "in", "st", ".", "evidence", "]", ")", "if", "policy", "==", "'one'", ":", "if", "sources", ".", "intersection", "(", "source_apis", ")", ":", "stmts_out", ".", "append", "(", "st", ")", "if", "policy", "==", "'all'", ":", "if", "sources", ".", "intersection", "(", "source_apis", ")", "==", "set", "(", "source_apis", ")", ":", "stmts_out", ".", "append", "(", "st", ")", "if", "policy", "==", "'none'", ":", "if", "not", "sources", ".", "intersection", "(", "source_apis", ")", ":", "stmts_out", ".", "append", "(", "st", ")", "logger", ".", "info", "(", "'%d statements after filter...'", "%", "len", "(", "stmts_out", ")", ")", "dump_pkl", "=", "kwargs", ".", "get", "(", "'save'", ")", "if", "dump_pkl", ":", "dump_statements", "(", "stmts_out", ",", "dump_pkl", ")", "return", "stmts_out" ]
Filter to statements that have evidence from a given set of sources. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. source_apis : list[str] A list of sources to filter for. Examples: biopax, bel, reach policy : Optional[str] If 'one', a statement that hase evidence from any of the sources is kept. If 'all', only those statements are kept which have evidence from all the input sources specified in source_apis. If 'none', only those statements are kept that don't have evidence from any of the sources specified in source_apis. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements.
[ "Filter", "to", "statements", "that", "have", "evidence", "from", "a", "given", "set", "of", "sources", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L1217-L1258
train
sorgerlab/indra
indra/tools/assemble_corpus.py
filter_top_level
def filter_top_level(stmts_in, **kwargs): """Filter to statements that are at the top-level of the hierarchy. Here top-level statements correspond to most specific ones. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements. """ logger.info('Filtering %d statements for top-level...' % len(stmts_in)) stmts_out = [st for st in stmts_in if not st.supports] logger.info('%d statements after filter...' % len(stmts_out)) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
python
def filter_top_level(stmts_in, **kwargs): """Filter to statements that are at the top-level of the hierarchy. Here top-level statements correspond to most specific ones. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements. """ logger.info('Filtering %d statements for top-level...' % len(stmts_in)) stmts_out = [st for st in stmts_in if not st.supports] logger.info('%d statements after filter...' % len(stmts_out)) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
[ "def", "filter_top_level", "(", "stmts_in", ",", "*", "*", "kwargs", ")", ":", "logger", ".", "info", "(", "'Filtering %d statements for top-level...'", "%", "len", "(", "stmts_in", ")", ")", "stmts_out", "=", "[", "st", "for", "st", "in", "stmts_in", "if", "not", "st", ".", "supports", "]", "logger", ".", "info", "(", "'%d statements after filter...'", "%", "len", "(", "stmts_out", ")", ")", "dump_pkl", "=", "kwargs", ".", "get", "(", "'save'", ")", "if", "dump_pkl", ":", "dump_statements", "(", "stmts_out", ",", "dump_pkl", ")", "return", "stmts_out" ]
Filter to statements that are at the top-level of the hierarchy. Here top-level statements correspond to most specific ones. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements.
[ "Filter", "to", "statements", "that", "are", "at", "the", "top", "-", "level", "of", "the", "hierarchy", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L1261-L1284
train
sorgerlab/indra
indra/tools/assemble_corpus.py
filter_inconsequential_mods
def filter_inconsequential_mods(stmts_in, whitelist=None, **kwargs): """Filter out Modifications that modify inconsequential sites Inconsequential here means that the site is not mentioned / tested in any other statement. In some cases specific sites should be preserved, for instance, to be used as readouts in a model. In this case, the given sites can be passed in a whitelist. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. whitelist : Optional[dict] A whitelist containing agent modification sites whose modifications should be preserved even if no other statement refers to them. The whitelist parameter is a dictionary in which the key is a gene name and the value is a list of tuples of (modification_type, residue, position). Example: whitelist = {'MAP2K1': [('phosphorylation', 'S', '222')]} save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements. """ if whitelist is None: whitelist = {} logger.info('Filtering %d statements to remove' % len(stmts_in) + ' inconsequential modifications...') states_used = whitelist for stmt in stmts_in: for agent in stmt.agent_list(): if agent is not None: if agent.mods: for mc in agent.mods: mod = (mc.mod_type, mc.residue, mc.position) try: states_used[agent.name].append(mod) except KeyError: states_used[agent.name] = [mod] for k, v in states_used.items(): states_used[k] = list(set(v)) stmts_out = [] for stmt in stmts_in: skip = False if isinstance(stmt, Modification): mod_type = modclass_to_modtype[stmt.__class__] if isinstance(stmt, RemoveModification): mod_type = modtype_to_inverse[mod_type] mod = (mod_type, stmt.residue, stmt.position) used = states_used.get(stmt.sub.name, []) if mod not in used: skip = True if not skip: stmts_out.append(stmt) logger.info('%d statements after filter...' % len(stmts_out)) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
python
def filter_inconsequential_mods(stmts_in, whitelist=None, **kwargs): """Filter out Modifications that modify inconsequential sites Inconsequential here means that the site is not mentioned / tested in any other statement. In some cases specific sites should be preserved, for instance, to be used as readouts in a model. In this case, the given sites can be passed in a whitelist. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. whitelist : Optional[dict] A whitelist containing agent modification sites whose modifications should be preserved even if no other statement refers to them. The whitelist parameter is a dictionary in which the key is a gene name and the value is a list of tuples of (modification_type, residue, position). Example: whitelist = {'MAP2K1': [('phosphorylation', 'S', '222')]} save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements. """ if whitelist is None: whitelist = {} logger.info('Filtering %d statements to remove' % len(stmts_in) + ' inconsequential modifications...') states_used = whitelist for stmt in stmts_in: for agent in stmt.agent_list(): if agent is not None: if agent.mods: for mc in agent.mods: mod = (mc.mod_type, mc.residue, mc.position) try: states_used[agent.name].append(mod) except KeyError: states_used[agent.name] = [mod] for k, v in states_used.items(): states_used[k] = list(set(v)) stmts_out = [] for stmt in stmts_in: skip = False if isinstance(stmt, Modification): mod_type = modclass_to_modtype[stmt.__class__] if isinstance(stmt, RemoveModification): mod_type = modtype_to_inverse[mod_type] mod = (mod_type, stmt.residue, stmt.position) used = states_used.get(stmt.sub.name, []) if mod not in used: skip = True if not skip: stmts_out.append(stmt) logger.info('%d statements after filter...' % len(stmts_out)) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
[ "def", "filter_inconsequential_mods", "(", "stmts_in", ",", "whitelist", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "whitelist", "is", "None", ":", "whitelist", "=", "{", "}", "logger", ".", "info", "(", "'Filtering %d statements to remove'", "%", "len", "(", "stmts_in", ")", "+", "' inconsequential modifications...'", ")", "states_used", "=", "whitelist", "for", "stmt", "in", "stmts_in", ":", "for", "agent", "in", "stmt", ".", "agent_list", "(", ")", ":", "if", "agent", "is", "not", "None", ":", "if", "agent", ".", "mods", ":", "for", "mc", "in", "agent", ".", "mods", ":", "mod", "=", "(", "mc", ".", "mod_type", ",", "mc", ".", "residue", ",", "mc", ".", "position", ")", "try", ":", "states_used", "[", "agent", ".", "name", "]", ".", "append", "(", "mod", ")", "except", "KeyError", ":", "states_used", "[", "agent", ".", "name", "]", "=", "[", "mod", "]", "for", "k", ",", "v", "in", "states_used", ".", "items", "(", ")", ":", "states_used", "[", "k", "]", "=", "list", "(", "set", "(", "v", ")", ")", "stmts_out", "=", "[", "]", "for", "stmt", "in", "stmts_in", ":", "skip", "=", "False", "if", "isinstance", "(", "stmt", ",", "Modification", ")", ":", "mod_type", "=", "modclass_to_modtype", "[", "stmt", ".", "__class__", "]", "if", "isinstance", "(", "stmt", ",", "RemoveModification", ")", ":", "mod_type", "=", "modtype_to_inverse", "[", "mod_type", "]", "mod", "=", "(", "mod_type", ",", "stmt", ".", "residue", ",", "stmt", ".", "position", ")", "used", "=", "states_used", ".", "get", "(", "stmt", ".", "sub", ".", "name", ",", "[", "]", ")", "if", "mod", "not", "in", "used", ":", "skip", "=", "True", "if", "not", "skip", ":", "stmts_out", ".", "append", "(", "stmt", ")", "logger", ".", "info", "(", "'%d statements after filter...'", "%", "len", "(", "stmts_out", ")", ")", "dump_pkl", "=", "kwargs", ".", "get", "(", "'save'", ")", "if", "dump_pkl", ":", "dump_statements", "(", "stmts_out", ",", "dump_pkl", ")", "return", "stmts_out" ]
Filter out Modifications that modify inconsequential sites Inconsequential here means that the site is not mentioned / tested in any other statement. In some cases specific sites should be preserved, for instance, to be used as readouts in a model. In this case, the given sites can be passed in a whitelist. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. whitelist : Optional[dict] A whitelist containing agent modification sites whose modifications should be preserved even if no other statement refers to them. The whitelist parameter is a dictionary in which the key is a gene name and the value is a list of tuples of (modification_type, residue, position). Example: whitelist = {'MAP2K1': [('phosphorylation', 'S', '222')]} save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements.
[ "Filter", "out", "Modifications", "that", "modify", "inconsequential", "sites" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L1287-L1348
train
sorgerlab/indra
indra/tools/assemble_corpus.py
filter_inconsequential_acts
def filter_inconsequential_acts(stmts_in, whitelist=None, **kwargs): """Filter out Activations that modify inconsequential activities Inconsequential here means that the site is not mentioned / tested in any other statement. In some cases specific activity types should be preserved, for instance, to be used as readouts in a model. In this case, the given activities can be passed in a whitelist. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. whitelist : Optional[dict] A whitelist containing agent activity types which should be preserved even if no other statement refers to them. The whitelist parameter is a dictionary in which the key is a gene name and the value is a list of activity types. Example: whitelist = {'MAP2K1': ['kinase']} save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements. """ if whitelist is None: whitelist = {} logger.info('Filtering %d statements to remove' % len(stmts_in) + ' inconsequential activations...') states_used = whitelist for stmt in stmts_in: for agent in stmt.agent_list(): if agent is not None: if agent.activity: act = agent.activity.activity_type try: states_used[agent.name].append(act) except KeyError: states_used[agent.name] = [act] for k, v in states_used.items(): states_used[k] = list(set(v)) stmts_out = [] for stmt in stmts_in: skip = False if isinstance(stmt, RegulateActivity): used = states_used.get(stmt.obj.name, []) if stmt.obj_activity not in used: skip = True if not skip: stmts_out.append(stmt) logger.info('%d statements after filter...' % len(stmts_out)) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
python
def filter_inconsequential_acts(stmts_in, whitelist=None, **kwargs): """Filter out Activations that modify inconsequential activities Inconsequential here means that the site is not mentioned / tested in any other statement. In some cases specific activity types should be preserved, for instance, to be used as readouts in a model. In this case, the given activities can be passed in a whitelist. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. whitelist : Optional[dict] A whitelist containing agent activity types which should be preserved even if no other statement refers to them. The whitelist parameter is a dictionary in which the key is a gene name and the value is a list of activity types. Example: whitelist = {'MAP2K1': ['kinase']} save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements. """ if whitelist is None: whitelist = {} logger.info('Filtering %d statements to remove' % len(stmts_in) + ' inconsequential activations...') states_used = whitelist for stmt in stmts_in: for agent in stmt.agent_list(): if agent is not None: if agent.activity: act = agent.activity.activity_type try: states_used[agent.name].append(act) except KeyError: states_used[agent.name] = [act] for k, v in states_used.items(): states_used[k] = list(set(v)) stmts_out = [] for stmt in stmts_in: skip = False if isinstance(stmt, RegulateActivity): used = states_used.get(stmt.obj.name, []) if stmt.obj_activity not in used: skip = True if not skip: stmts_out.append(stmt) logger.info('%d statements after filter...' % len(stmts_out)) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
[ "def", "filter_inconsequential_acts", "(", "stmts_in", ",", "whitelist", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "whitelist", "is", "None", ":", "whitelist", "=", "{", "}", "logger", ".", "info", "(", "'Filtering %d statements to remove'", "%", "len", "(", "stmts_in", ")", "+", "' inconsequential activations...'", ")", "states_used", "=", "whitelist", "for", "stmt", "in", "stmts_in", ":", "for", "agent", "in", "stmt", ".", "agent_list", "(", ")", ":", "if", "agent", "is", "not", "None", ":", "if", "agent", ".", "activity", ":", "act", "=", "agent", ".", "activity", ".", "activity_type", "try", ":", "states_used", "[", "agent", ".", "name", "]", ".", "append", "(", "act", ")", "except", "KeyError", ":", "states_used", "[", "agent", ".", "name", "]", "=", "[", "act", "]", "for", "k", ",", "v", "in", "states_used", ".", "items", "(", ")", ":", "states_used", "[", "k", "]", "=", "list", "(", "set", "(", "v", ")", ")", "stmts_out", "=", "[", "]", "for", "stmt", "in", "stmts_in", ":", "skip", "=", "False", "if", "isinstance", "(", "stmt", ",", "RegulateActivity", ")", ":", "used", "=", "states_used", ".", "get", "(", "stmt", ".", "obj", ".", "name", ",", "[", "]", ")", "if", "stmt", ".", "obj_activity", "not", "in", "used", ":", "skip", "=", "True", "if", "not", "skip", ":", "stmts_out", ".", "append", "(", "stmt", ")", "logger", ".", "info", "(", "'%d statements after filter...'", "%", "len", "(", "stmts_out", ")", ")", "dump_pkl", "=", "kwargs", ".", "get", "(", "'save'", ")", "if", "dump_pkl", ":", "dump_statements", "(", "stmts_out", ",", "dump_pkl", ")", "return", "stmts_out" ]
Filter out Activations that modify inconsequential activities Inconsequential here means that the site is not mentioned / tested in any other statement. In some cases specific activity types should be preserved, for instance, to be used as readouts in a model. In this case, the given activities can be passed in a whitelist. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. whitelist : Optional[dict] A whitelist containing agent activity types which should be preserved even if no other statement refers to them. The whitelist parameter is a dictionary in which the key is a gene name and the value is a list of activity types. Example: whitelist = {'MAP2K1': ['kinase']} save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements.
[ "Filter", "out", "Activations", "that", "modify", "inconsequential", "activities" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L1351-L1406
train
sorgerlab/indra
indra/tools/assemble_corpus.py
filter_enzyme_kinase
def filter_enzyme_kinase(stmts_in, **kwargs): """Filter Phosphorylations to ones where the enzyme is a known kinase. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements. """ logger.info('Filtering %d statements to remove ' % len(stmts_in) + 'phosphorylation by non-kinases...') path = os.path.dirname(os.path.abspath(__file__)) kinase_table = read_unicode_csv(path + '/../resources/kinases.tsv', delimiter='\t') gene_names = [lin[1] for lin in list(kinase_table)[1:]] stmts_out = [] for st in stmts_in: if isinstance(st, Phosphorylation): if st.enz is not None: if st.enz.name in gene_names: stmts_out.append(st) else: stmts_out.append(st) logger.info('%d statements after filter...' % len(stmts_out)) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
python
def filter_enzyme_kinase(stmts_in, **kwargs): """Filter Phosphorylations to ones where the enzyme is a known kinase. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements. """ logger.info('Filtering %d statements to remove ' % len(stmts_in) + 'phosphorylation by non-kinases...') path = os.path.dirname(os.path.abspath(__file__)) kinase_table = read_unicode_csv(path + '/../resources/kinases.tsv', delimiter='\t') gene_names = [lin[1] for lin in list(kinase_table)[1:]] stmts_out = [] for st in stmts_in: if isinstance(st, Phosphorylation): if st.enz is not None: if st.enz.name in gene_names: stmts_out.append(st) else: stmts_out.append(st) logger.info('%d statements after filter...' % len(stmts_out)) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
[ "def", "filter_enzyme_kinase", "(", "stmts_in", ",", "*", "*", "kwargs", ")", ":", "logger", ".", "info", "(", "'Filtering %d statements to remove '", "%", "len", "(", "stmts_in", ")", "+", "'phosphorylation by non-kinases...'", ")", "path", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "__file__", ")", ")", "kinase_table", "=", "read_unicode_csv", "(", "path", "+", "'/../resources/kinases.tsv'", ",", "delimiter", "=", "'\\t'", ")", "gene_names", "=", "[", "lin", "[", "1", "]", "for", "lin", "in", "list", "(", "kinase_table", ")", "[", "1", ":", "]", "]", "stmts_out", "=", "[", "]", "for", "st", "in", "stmts_in", ":", "if", "isinstance", "(", "st", ",", "Phosphorylation", ")", ":", "if", "st", ".", "enz", "is", "not", "None", ":", "if", "st", ".", "enz", ".", "name", "in", "gene_names", ":", "stmts_out", ".", "append", "(", "st", ")", "else", ":", "stmts_out", ".", "append", "(", "st", ")", "logger", ".", "info", "(", "'%d statements after filter...'", "%", "len", "(", "stmts_out", ")", ")", "dump_pkl", "=", "kwargs", ".", "get", "(", "'save'", ")", "if", "dump_pkl", ":", "dump_statements", "(", "stmts_out", ",", "dump_pkl", ")", "return", "stmts_out" ]
Filter Phosphorylations to ones where the enzyme is a known kinase. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements.
[ "Filter", "Phosphorylations", "to", "ones", "where", "the", "enzyme", "is", "a", "known", "kinase", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L1506-L1539
train
sorgerlab/indra
indra/tools/assemble_corpus.py
filter_transcription_factor
def filter_transcription_factor(stmts_in, **kwargs): """Filter out RegulateAmounts where subject is not a transcription factor. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements. """ logger.info('Filtering %d statements to remove ' % len(stmts_in) + 'amount regulations by non-transcription-factors...') path = os.path.dirname(os.path.abspath(__file__)) tf_table = \ read_unicode_csv(path + '/../resources/transcription_factors.csv') gene_names = [lin[1] for lin in list(tf_table)[1:]] stmts_out = [] for st in stmts_in: if isinstance(st, RegulateAmount): if st.subj is not None: if st.subj.name in gene_names: stmts_out.append(st) else: stmts_out.append(st) logger.info('%d statements after filter...' % len(stmts_out)) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
python
def filter_transcription_factor(stmts_in, **kwargs): """Filter out RegulateAmounts where subject is not a transcription factor. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements. """ logger.info('Filtering %d statements to remove ' % len(stmts_in) + 'amount regulations by non-transcription-factors...') path = os.path.dirname(os.path.abspath(__file__)) tf_table = \ read_unicode_csv(path + '/../resources/transcription_factors.csv') gene_names = [lin[1] for lin in list(tf_table)[1:]] stmts_out = [] for st in stmts_in: if isinstance(st, RegulateAmount): if st.subj is not None: if st.subj.name in gene_names: stmts_out.append(st) else: stmts_out.append(st) logger.info('%d statements after filter...' % len(stmts_out)) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
[ "def", "filter_transcription_factor", "(", "stmts_in", ",", "*", "*", "kwargs", ")", ":", "logger", ".", "info", "(", "'Filtering %d statements to remove '", "%", "len", "(", "stmts_in", ")", "+", "'amount regulations by non-transcription-factors...'", ")", "path", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "__file__", ")", ")", "tf_table", "=", "read_unicode_csv", "(", "path", "+", "'/../resources/transcription_factors.csv'", ")", "gene_names", "=", "[", "lin", "[", "1", "]", "for", "lin", "in", "list", "(", "tf_table", ")", "[", "1", ":", "]", "]", "stmts_out", "=", "[", "]", "for", "st", "in", "stmts_in", ":", "if", "isinstance", "(", "st", ",", "RegulateAmount", ")", ":", "if", "st", ".", "subj", "is", "not", "None", ":", "if", "st", ".", "subj", ".", "name", "in", "gene_names", ":", "stmts_out", ".", "append", "(", "st", ")", "else", ":", "stmts_out", ".", "append", "(", "st", ")", "logger", ".", "info", "(", "'%d statements after filter...'", "%", "len", "(", "stmts_out", ")", ")", "dump_pkl", "=", "kwargs", ".", "get", "(", "'save'", ")", "if", "dump_pkl", ":", "dump_statements", "(", "stmts_out", ",", "dump_pkl", ")", "return", "stmts_out" ]
Filter out RegulateAmounts where subject is not a transcription factor. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements.
[ "Filter", "out", "RegulateAmounts", "where", "subject", "is", "not", "a", "transcription", "factor", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L1579-L1612
train
sorgerlab/indra
indra/tools/assemble_corpus.py
filter_uuid_list
def filter_uuid_list(stmts_in, uuids, **kwargs): """Filter to Statements corresponding to given UUIDs Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. uuids : list[str] A list of UUIDs to filter for. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. invert : Optional[bool] Invert the filter to remove the Statements corresponding to the given UUIDs. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements. """ invert = kwargs.get('invert', False) logger.info('Filtering %d statements for %d UUID%s...' % (len(stmts_in), len(uuids), 's' if len(uuids) > 1 else '')) stmts_out = [] for st in stmts_in: if not invert: if st.uuid in uuids: stmts_out.append(st) else: if st.uuid not in uuids: stmts_out.append(st) logger.info('%d statements after filter...' % len(stmts_out)) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
python
def filter_uuid_list(stmts_in, uuids, **kwargs): """Filter to Statements corresponding to given UUIDs Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. uuids : list[str] A list of UUIDs to filter for. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. invert : Optional[bool] Invert the filter to remove the Statements corresponding to the given UUIDs. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements. """ invert = kwargs.get('invert', False) logger.info('Filtering %d statements for %d UUID%s...' % (len(stmts_in), len(uuids), 's' if len(uuids) > 1 else '')) stmts_out = [] for st in stmts_in: if not invert: if st.uuid in uuids: stmts_out.append(st) else: if st.uuid not in uuids: stmts_out.append(st) logger.info('%d statements after filter...' % len(stmts_out)) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
[ "def", "filter_uuid_list", "(", "stmts_in", ",", "uuids", ",", "*", "*", "kwargs", ")", ":", "invert", "=", "kwargs", ".", "get", "(", "'invert'", ",", "False", ")", "logger", ".", "info", "(", "'Filtering %d statements for %d UUID%s...'", "%", "(", "len", "(", "stmts_in", ")", ",", "len", "(", "uuids", ")", ",", "'s'", "if", "len", "(", "uuids", ")", ">", "1", "else", "''", ")", ")", "stmts_out", "=", "[", "]", "for", "st", "in", "stmts_in", ":", "if", "not", "invert", ":", "if", "st", ".", "uuid", "in", "uuids", ":", "stmts_out", ".", "append", "(", "st", ")", "else", ":", "if", "st", ".", "uuid", "not", "in", "uuids", ":", "stmts_out", ".", "append", "(", "st", ")", "logger", ".", "info", "(", "'%d statements after filter...'", "%", "len", "(", "stmts_out", ")", ")", "dump_pkl", "=", "kwargs", ".", "get", "(", "'save'", ")", "if", "dump_pkl", ":", "dump_statements", "(", "stmts_out", ",", "dump_pkl", ")", "return", "stmts_out" ]
Filter to Statements corresponding to given UUIDs Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. uuids : list[str] A list of UUIDs to filter for. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. invert : Optional[bool] Invert the filter to remove the Statements corresponding to the given UUIDs. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements.
[ "Filter", "to", "Statements", "corresponding", "to", "given", "UUIDs" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L1615-L1651
train
sorgerlab/indra
indra/tools/assemble_corpus.py
expand_families
def expand_families(stmts_in, **kwargs): """Expand FamPlex Agents to individual genes. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to expand. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of expanded statements. """ from indra.tools.expand_families import Expander logger.info('Expanding families on %d statements...' % len(stmts_in)) expander = Expander(hierarchies) stmts_out = expander.expand_families(stmts_in) logger.info('%d statements after expanding families...' % len(stmts_out)) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
python
def expand_families(stmts_in, **kwargs): """Expand FamPlex Agents to individual genes. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to expand. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of expanded statements. """ from indra.tools.expand_families import Expander logger.info('Expanding families on %d statements...' % len(stmts_in)) expander = Expander(hierarchies) stmts_out = expander.expand_families(stmts_in) logger.info('%d statements after expanding families...' % len(stmts_out)) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
[ "def", "expand_families", "(", "stmts_in", ",", "*", "*", "kwargs", ")", ":", "from", "indra", ".", "tools", ".", "expand_families", "import", "Expander", "logger", ".", "info", "(", "'Expanding families on %d statements...'", "%", "len", "(", "stmts_in", ")", ")", "expander", "=", "Expander", "(", "hierarchies", ")", "stmts_out", "=", "expander", ".", "expand_families", "(", "stmts_in", ")", "logger", ".", "info", "(", "'%d statements after expanding families...'", "%", "len", "(", "stmts_out", ")", ")", "dump_pkl", "=", "kwargs", ".", "get", "(", "'save'", ")", "if", "dump_pkl", ":", "dump_statements", "(", "stmts_out", ",", "dump_pkl", ")", "return", "stmts_out" ]
Expand FamPlex Agents to individual genes. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to expand. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of expanded statements.
[ "Expand", "FamPlex", "Agents", "to", "individual", "genes", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L1654-L1677
train
sorgerlab/indra
indra/tools/assemble_corpus.py
reduce_activities
def reduce_activities(stmts_in, **kwargs): """Reduce the activity types in a list of statements Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to reduce activity types in. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of reduced activity statements. """ logger.info('Reducing activities on %d statements...' % len(stmts_in)) stmts_out = [deepcopy(st) for st in stmts_in] ml = MechLinker(stmts_out) ml.gather_explicit_activities() ml.reduce_activities() stmts_out = ml.statements dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
python
def reduce_activities(stmts_in, **kwargs): """Reduce the activity types in a list of statements Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to reduce activity types in. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of reduced activity statements. """ logger.info('Reducing activities on %d statements...' % len(stmts_in)) stmts_out = [deepcopy(st) for st in stmts_in] ml = MechLinker(stmts_out) ml.gather_explicit_activities() ml.reduce_activities() stmts_out = ml.statements dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
[ "def", "reduce_activities", "(", "stmts_in", ",", "*", "*", "kwargs", ")", ":", "logger", ".", "info", "(", "'Reducing activities on %d statements...'", "%", "len", "(", "stmts_in", ")", ")", "stmts_out", "=", "[", "deepcopy", "(", "st", ")", "for", "st", "in", "stmts_in", "]", "ml", "=", "MechLinker", "(", "stmts_out", ")", "ml", ".", "gather_explicit_activities", "(", ")", "ml", ".", "reduce_activities", "(", ")", "stmts_out", "=", "ml", ".", "statements", "dump_pkl", "=", "kwargs", ".", "get", "(", "'save'", ")", "if", "dump_pkl", ":", "dump_statements", "(", "stmts_out", ",", "dump_pkl", ")", "return", "stmts_out" ]
Reduce the activity types in a list of statements Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to reduce activity types in. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of reduced activity statements.
[ "Reduce", "the", "activity", "types", "in", "a", "list", "of", "statements" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L1680-L1704
train
sorgerlab/indra
indra/tools/assemble_corpus.py
strip_agent_context
def strip_agent_context(stmts_in, **kwargs): """Strip any context on agents within each statement. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements whose agent context should be stripped. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of stripped statements. """ logger.info('Stripping agent context on %d statements...' % len(stmts_in)) stmts_out = [] for st in stmts_in: new_st = deepcopy(st) for agent in new_st.agent_list(): if agent is None: continue agent.mods = [] agent.mutations = [] agent.activity = None agent.location = None agent.bound_conditions = [] stmts_out.append(new_st) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
python
def strip_agent_context(stmts_in, **kwargs): """Strip any context on agents within each statement. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements whose agent context should be stripped. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of stripped statements. """ logger.info('Stripping agent context on %d statements...' % len(stmts_in)) stmts_out = [] for st in stmts_in: new_st = deepcopy(st) for agent in new_st.agent_list(): if agent is None: continue agent.mods = [] agent.mutations = [] agent.activity = None agent.location = None agent.bound_conditions = [] stmts_out.append(new_st) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
[ "def", "strip_agent_context", "(", "stmts_in", ",", "*", "*", "kwargs", ")", ":", "logger", ".", "info", "(", "'Stripping agent context on %d statements...'", "%", "len", "(", "stmts_in", ")", ")", "stmts_out", "=", "[", "]", "for", "st", "in", "stmts_in", ":", "new_st", "=", "deepcopy", "(", "st", ")", "for", "agent", "in", "new_st", ".", "agent_list", "(", ")", ":", "if", "agent", "is", "None", ":", "continue", "agent", ".", "mods", "=", "[", "]", "agent", ".", "mutations", "=", "[", "]", "agent", ".", "activity", "=", "None", "agent", ".", "location", "=", "None", "agent", ".", "bound_conditions", "=", "[", "]", "stmts_out", ".", "append", "(", "new_st", ")", "dump_pkl", "=", "kwargs", ".", "get", "(", "'save'", ")", "if", "dump_pkl", ":", "dump_statements", "(", "stmts_out", ",", "dump_pkl", ")", "return", "stmts_out" ]
Strip any context on agents within each statement. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements whose agent context should be stripped. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of stripped statements.
[ "Strip", "any", "context", "on", "agents", "within", "each", "statement", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L1707-L1738
train
sorgerlab/indra
indra/tools/assemble_corpus.py
standardize_names_groundings
def standardize_names_groundings(stmts): """Standardize the names of Concepts with respect to an ontology. NOTE: this function is currently optimized for Influence Statements obtained from Eidos, Hume, Sofia and CWMS. It will possibly yield unexpected results for biology-specific Statements. """ print('Standardize names to groundings') for stmt in stmts: for concept in stmt.agent_list(): db_ns, db_id = concept.get_grounding() if db_id is not None: if isinstance(db_id, list): db_id = db_id[0][0].split('/')[-1] else: db_id = db_id.split('/')[-1] db_id = db_id.replace('|', ' ') db_id = db_id.replace('_', ' ') db_id = db_id.replace('ONT::', '') db_id = db_id.capitalize() concept.name = db_id return stmts
python
def standardize_names_groundings(stmts): """Standardize the names of Concepts with respect to an ontology. NOTE: this function is currently optimized for Influence Statements obtained from Eidos, Hume, Sofia and CWMS. It will possibly yield unexpected results for biology-specific Statements. """ print('Standardize names to groundings') for stmt in stmts: for concept in stmt.agent_list(): db_ns, db_id = concept.get_grounding() if db_id is not None: if isinstance(db_id, list): db_id = db_id[0][0].split('/')[-1] else: db_id = db_id.split('/')[-1] db_id = db_id.replace('|', ' ') db_id = db_id.replace('_', ' ') db_id = db_id.replace('ONT::', '') db_id = db_id.capitalize() concept.name = db_id return stmts
[ "def", "standardize_names_groundings", "(", "stmts", ")", ":", "print", "(", "'Standardize names to groundings'", ")", "for", "stmt", "in", "stmts", ":", "for", "concept", "in", "stmt", ".", "agent_list", "(", ")", ":", "db_ns", ",", "db_id", "=", "concept", ".", "get_grounding", "(", ")", "if", "db_id", "is", "not", "None", ":", "if", "isinstance", "(", "db_id", ",", "list", ")", ":", "db_id", "=", "db_id", "[", "0", "]", "[", "0", "]", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", "else", ":", "db_id", "=", "db_id", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", "db_id", "=", "db_id", ".", "replace", "(", "'|'", ",", "' '", ")", "db_id", "=", "db_id", ".", "replace", "(", "'_'", ",", "' '", ")", "db_id", "=", "db_id", ".", "replace", "(", "'ONT::'", ",", "''", ")", "db_id", "=", "db_id", ".", "capitalize", "(", ")", "concept", ".", "name", "=", "db_id", "return", "stmts" ]
Standardize the names of Concepts with respect to an ontology. NOTE: this function is currently optimized for Influence Statements obtained from Eidos, Hume, Sofia and CWMS. It will possibly yield unexpected results for biology-specific Statements.
[ "Standardize", "the", "names", "of", "Concepts", "with", "respect", "to", "an", "ontology", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L1741-L1762
train
sorgerlab/indra
indra/tools/assemble_corpus.py
dump_stmt_strings
def dump_stmt_strings(stmts, fname): """Save printed statements in a file. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to save in a text file. fname : Optional[str] The name of a text file to save the printed statements into. """ with open(fname, 'wb') as fh: for st in stmts: fh.write(('%s\n' % st).encode('utf-8'))
python
def dump_stmt_strings(stmts, fname): """Save printed statements in a file. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to save in a text file. fname : Optional[str] The name of a text file to save the printed statements into. """ with open(fname, 'wb') as fh: for st in stmts: fh.write(('%s\n' % st).encode('utf-8'))
[ "def", "dump_stmt_strings", "(", "stmts", ",", "fname", ")", ":", "with", "open", "(", "fname", ",", "'wb'", ")", "as", "fh", ":", "for", "st", "in", "stmts", ":", "fh", ".", "write", "(", "(", "'%s\\n'", "%", "st", ")", ".", "encode", "(", "'utf-8'", ")", ")" ]
Save printed statements in a file. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to save in a text file. fname : Optional[str] The name of a text file to save the printed statements into.
[ "Save", "printed", "statements", "in", "a", "file", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L1765-L1777
train
sorgerlab/indra
indra/tools/assemble_corpus.py
rename_db_ref
def rename_db_ref(stmts_in, ns_from, ns_to, **kwargs): """Rename an entry in the db_refs of each Agent. This is particularly useful when old Statements in pickle files need to be updated after a namespace was changed such as 'BE' to 'FPLX'. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements whose Agents' db_refs need to be changed ns_from : str The namespace identifier to replace ns_to : str The namespace identifier to replace to save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of Statements with Agents' db_refs changed. """ logger.info('Remapping "%s" to "%s" in db_refs on %d statements...' % (ns_from, ns_to, len(stmts_in))) stmts_out = [deepcopy(st) for st in stmts_in] for stmt in stmts_out: for agent in stmt.agent_list(): if agent is not None and ns_from in agent.db_refs: agent.db_refs[ns_to] = agent.db_refs.pop(ns_from) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
python
def rename_db_ref(stmts_in, ns_from, ns_to, **kwargs): """Rename an entry in the db_refs of each Agent. This is particularly useful when old Statements in pickle files need to be updated after a namespace was changed such as 'BE' to 'FPLX'. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements whose Agents' db_refs need to be changed ns_from : str The namespace identifier to replace ns_to : str The namespace identifier to replace to save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of Statements with Agents' db_refs changed. """ logger.info('Remapping "%s" to "%s" in db_refs on %d statements...' % (ns_from, ns_to, len(stmts_in))) stmts_out = [deepcopy(st) for st in stmts_in] for stmt in stmts_out: for agent in stmt.agent_list(): if agent is not None and ns_from in agent.db_refs: agent.db_refs[ns_to] = agent.db_refs.pop(ns_from) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
[ "def", "rename_db_ref", "(", "stmts_in", ",", "ns_from", ",", "ns_to", ",", "*", "*", "kwargs", ")", ":", "logger", ".", "info", "(", "'Remapping \"%s\" to \"%s\" in db_refs on %d statements...'", "%", "(", "ns_from", ",", "ns_to", ",", "len", "(", "stmts_in", ")", ")", ")", "stmts_out", "=", "[", "deepcopy", "(", "st", ")", "for", "st", "in", "stmts_in", "]", "for", "stmt", "in", "stmts_out", ":", "for", "agent", "in", "stmt", ".", "agent_list", "(", ")", ":", "if", "agent", "is", "not", "None", "and", "ns_from", "in", "agent", ".", "db_refs", ":", "agent", ".", "db_refs", "[", "ns_to", "]", "=", "agent", ".", "db_refs", ".", "pop", "(", "ns_from", ")", "dump_pkl", "=", "kwargs", ".", "get", "(", "'save'", ")", "if", "dump_pkl", ":", "dump_statements", "(", "stmts_out", ",", "dump_pkl", ")", "return", "stmts_out" ]
Rename an entry in the db_refs of each Agent. This is particularly useful when old Statements in pickle files need to be updated after a namespace was changed such as 'BE' to 'FPLX'. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements whose Agents' db_refs need to be changed ns_from : str The namespace identifier to replace ns_to : str The namespace identifier to replace to save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of Statements with Agents' db_refs changed.
[ "Rename", "an", "entry", "in", "the", "db_refs", "of", "each", "Agent", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L1780-L1813
train
sorgerlab/indra
indra/tools/assemble_corpus.py
align_statements
def align_statements(stmts1, stmts2, keyfun=None): """Return alignment of two lists of statements by key. Parameters ---------- stmts1 : list[indra.statements.Statement] A list of INDRA Statements to align stmts2 : list[indra.statements.Statement] A list of INDRA Statements to align keyfun : Optional[function] A function that takes a Statement as an argument and returns a key to align by. If not given, the default key function is a tuble of the names of the Agents in the Statement. Return ------ matches : list(tuple) A list of tuples where each tuple has two elements, the first corresponding to an element of the stmts1 list and the second corresponding to an element of the stmts2 list. If a given element is not matched, its corresponding pair in the tuple is None. """ def name_keyfun(stmt): return tuple(a.name if a is not None else None for a in stmt.agent_list()) if not keyfun: keyfun = name_keyfun matches = [] keys1 = [keyfun(s) for s in stmts1] keys2 = [keyfun(s) for s in stmts2] for stmt, key in zip(stmts1, keys1): try: match_idx = keys2.index(key) match_stmt = stmts2[match_idx] matches.append((stmt, match_stmt)) except ValueError: matches.append((stmt, None)) for stmt, key in zip(stmts2, keys2): try: match_idx = keys1.index(key) except ValueError: matches.append((None, stmt)) return matches
python
def align_statements(stmts1, stmts2, keyfun=None): """Return alignment of two lists of statements by key. Parameters ---------- stmts1 : list[indra.statements.Statement] A list of INDRA Statements to align stmts2 : list[indra.statements.Statement] A list of INDRA Statements to align keyfun : Optional[function] A function that takes a Statement as an argument and returns a key to align by. If not given, the default key function is a tuble of the names of the Agents in the Statement. Return ------ matches : list(tuple) A list of tuples where each tuple has two elements, the first corresponding to an element of the stmts1 list and the second corresponding to an element of the stmts2 list. If a given element is not matched, its corresponding pair in the tuple is None. """ def name_keyfun(stmt): return tuple(a.name if a is not None else None for a in stmt.agent_list()) if not keyfun: keyfun = name_keyfun matches = [] keys1 = [keyfun(s) for s in stmts1] keys2 = [keyfun(s) for s in stmts2] for stmt, key in zip(stmts1, keys1): try: match_idx = keys2.index(key) match_stmt = stmts2[match_idx] matches.append((stmt, match_stmt)) except ValueError: matches.append((stmt, None)) for stmt, key in zip(stmts2, keys2): try: match_idx = keys1.index(key) except ValueError: matches.append((None, stmt)) return matches
[ "def", "align_statements", "(", "stmts1", ",", "stmts2", ",", "keyfun", "=", "None", ")", ":", "def", "name_keyfun", "(", "stmt", ")", ":", "return", "tuple", "(", "a", ".", "name", "if", "a", "is", "not", "None", "else", "None", "for", "a", "in", "stmt", ".", "agent_list", "(", ")", ")", "if", "not", "keyfun", ":", "keyfun", "=", "name_keyfun", "matches", "=", "[", "]", "keys1", "=", "[", "keyfun", "(", "s", ")", "for", "s", "in", "stmts1", "]", "keys2", "=", "[", "keyfun", "(", "s", ")", "for", "s", "in", "stmts2", "]", "for", "stmt", ",", "key", "in", "zip", "(", "stmts1", ",", "keys1", ")", ":", "try", ":", "match_idx", "=", "keys2", ".", "index", "(", "key", ")", "match_stmt", "=", "stmts2", "[", "match_idx", "]", "matches", ".", "append", "(", "(", "stmt", ",", "match_stmt", ")", ")", "except", "ValueError", ":", "matches", ".", "append", "(", "(", "stmt", ",", "None", ")", ")", "for", "stmt", ",", "key", "in", "zip", "(", "stmts2", ",", "keys2", ")", ":", "try", ":", "match_idx", "=", "keys1", ".", "index", "(", "key", ")", "except", "ValueError", ":", "matches", ".", "append", "(", "(", "None", ",", "stmt", ")", ")", "return", "matches" ]
Return alignment of two lists of statements by key. Parameters ---------- stmts1 : list[indra.statements.Statement] A list of INDRA Statements to align stmts2 : list[indra.statements.Statement] A list of INDRA Statements to align keyfun : Optional[function] A function that takes a Statement as an argument and returns a key to align by. If not given, the default key function is a tuble of the names of the Agents in the Statement. Return ------ matches : list(tuple) A list of tuples where each tuple has two elements, the first corresponding to an element of the stmts1 list and the second corresponding to an element of the stmts2 list. If a given element is not matched, its corresponding pair in the tuple is None.
[ "Return", "alignment", "of", "two", "lists", "of", "statements", "by", "key", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L1816-L1860
train
sorgerlab/indra
indra/sources/indra_db_rest/util.py
submit_query_request
def submit_query_request(end_point, *args, **kwargs): """Low level function to format the query string.""" ev_limit = kwargs.pop('ev_limit', 10) best_first = kwargs.pop('best_first', True) tries = kwargs.pop('tries', 2) # This isn't handled by requests because of the multiple identical agent # keys, e.g. {'agent': 'MEK', 'agent': 'ERK'} which is not supported in # python, but is allowed and necessary in these query strings. # TODO because we use the API Gateway, this feature is not longer needed. # We should just use the requests parameters dict. query_str = '?' + '&'.join(['%s=%s' % (k, v) for k, v in kwargs.items() if v is not None] + list(args)) return submit_statement_request('get', end_point, query_str, ev_limit=ev_limit, best_first=best_first, tries=tries)
python
def submit_query_request(end_point, *args, **kwargs): """Low level function to format the query string.""" ev_limit = kwargs.pop('ev_limit', 10) best_first = kwargs.pop('best_first', True) tries = kwargs.pop('tries', 2) # This isn't handled by requests because of the multiple identical agent # keys, e.g. {'agent': 'MEK', 'agent': 'ERK'} which is not supported in # python, but is allowed and necessary in these query strings. # TODO because we use the API Gateway, this feature is not longer needed. # We should just use the requests parameters dict. query_str = '?' + '&'.join(['%s=%s' % (k, v) for k, v in kwargs.items() if v is not None] + list(args)) return submit_statement_request('get', end_point, query_str, ev_limit=ev_limit, best_first=best_first, tries=tries)
[ "def", "submit_query_request", "(", "end_point", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "ev_limit", "=", "kwargs", ".", "pop", "(", "'ev_limit'", ",", "10", ")", "best_first", "=", "kwargs", ".", "pop", "(", "'best_first'", ",", "True", ")", "tries", "=", "kwargs", ".", "pop", "(", "'tries'", ",", "2", ")", "# This isn't handled by requests because of the multiple identical agent", "# keys, e.g. {'agent': 'MEK', 'agent': 'ERK'} which is not supported in", "# python, but is allowed and necessary in these query strings.", "# TODO because we use the API Gateway, this feature is not longer needed.", "# We should just use the requests parameters dict.", "query_str", "=", "'?'", "+", "'&'", ".", "join", "(", "[", "'%s=%s'", "%", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", "if", "v", "is", "not", "None", "]", "+", "list", "(", "args", ")", ")", "return", "submit_statement_request", "(", "'get'", ",", "end_point", ",", "query_str", ",", "ev_limit", "=", "ev_limit", ",", "best_first", "=", "best_first", ",", "tries", "=", "tries", ")" ]
Low level function to format the query string.
[ "Low", "level", "function", "to", "format", "the", "query", "string", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/indra_db_rest/util.py#L11-L26
train
sorgerlab/indra
indra/sources/indra_db_rest/util.py
submit_statement_request
def submit_statement_request(meth, end_point, query_str='', data=None, tries=2, **params): """Even lower level function to make the request.""" full_end_point = 'statements/' + end_point.lstrip('/') return make_db_rest_request(meth, full_end_point, query_str, data, params, tries)
python
def submit_statement_request(meth, end_point, query_str='', data=None, tries=2, **params): """Even lower level function to make the request.""" full_end_point = 'statements/' + end_point.lstrip('/') return make_db_rest_request(meth, full_end_point, query_str, data, params, tries)
[ "def", "submit_statement_request", "(", "meth", ",", "end_point", ",", "query_str", "=", "''", ",", "data", "=", "None", ",", "tries", "=", "2", ",", "*", "*", "params", ")", ":", "full_end_point", "=", "'statements/'", "+", "end_point", ".", "lstrip", "(", "'/'", ")", "return", "make_db_rest_request", "(", "meth", ",", "full_end_point", ",", "query_str", ",", "data", ",", "params", ",", "tries", ")" ]
Even lower level function to make the request.
[ "Even", "lower", "level", "function", "to", "make", "the", "request", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/indra_db_rest/util.py#L29-L33
train
sorgerlab/indra
indra/preassembler/__init__.py
render_stmt_graph
def render_stmt_graph(statements, reduce=True, english=False, rankdir=None, agent_style=None): """Render the statement hierarchy as a pygraphviz graph. Parameters ---------- stmts : list of :py:class:`indra.statements.Statement` A list of top-level statements with associated supporting statements resulting from building a statement hierarchy with :py:meth:`combine_related`. reduce : bool Whether to perform a transitive reduction of the edges in the graph. Default is True. english : bool If True, the statements in the graph are represented by their English-assembled equivalent; otherwise they are represented as text-formatted Statements. rank_dir : str or None Argument to pass through to the pygraphviz `AGraph` constructor specifying graph layout direction. In particular, a value of 'LR' specifies a left-to-right direction. If None, the pygraphviz default is used. agent_style : dict or None Dict of attributes specifying the visual properties of nodes. If None, the following default attributes are used:: agent_style = {'color': 'lightgray', 'style': 'filled', 'fontname': 'arial'} Returns ------- pygraphviz.AGraph Pygraphviz graph with nodes representing statements and edges pointing from supported statements to supported_by statements. Examples -------- Pattern for getting statements and rendering as a Graphviz graph: >>> from indra.preassembler.hierarchy_manager import hierarchies >>> braf = Agent('BRAF') >>> map2k1 = Agent('MAP2K1') >>> st1 = Phosphorylation(braf, map2k1) >>> st2 = Phosphorylation(braf, map2k1, residue='S') >>> pa = Preassembler(hierarchies, [st1, st2]) >>> pa.combine_related() # doctest:+ELLIPSIS [Phosphorylation(BRAF(), MAP2K1(), S)] >>> graph = render_stmt_graph(pa.related_stmts) >>> graph.write('example_graph.dot') # To make the DOT file >>> graph.draw('example_graph.png', prog='dot') # To make an image Resulting graph: .. image:: /images/example_graph.png :align: center :alt: Example statement graph rendered by Graphviz """ from indra.assemblers.english import EnglishAssembler # Set the default agent formatting properties if agent_style is None: agent_style = {'color': 'lightgray', 'style': 'filled', 'fontname': 'arial'} # Sets to store all of the nodes and edges as we recursively process all # of the statements nodes = set([]) edges = set([]) stmt_dict = {} # Recursive function for processing all statements def process_stmt(stmt): nodes.add(str(stmt.matches_key())) stmt_dict[str(stmt.matches_key())] = stmt for sby_ix, sby_stmt in enumerate(stmt.supported_by): edges.add((str(stmt.matches_key()), str(sby_stmt.matches_key()))) process_stmt(sby_stmt) # Process all of the top-level statements, getting the supporting statements # recursively for stmt in statements: process_stmt(stmt) # Create a networkx graph from the nodes nx_graph = nx.DiGraph() nx_graph.add_edges_from(edges) # Perform transitive reduction if desired if reduce: nx_graph = nx.algorithms.dag.transitive_reduction(nx_graph) # Create a pygraphviz graph from the nx graph try: pgv_graph = pgv.AGraph(name='statements', directed=True, rankdir=rankdir) except NameError: logger.error('Cannot generate graph because ' 'pygraphviz could not be imported.') return None for node in nx_graph.nodes(): stmt = stmt_dict[node] if english: ea = EnglishAssembler([stmt]) stmt_str = ea.make_model() else: stmt_str = str(stmt) pgv_graph.add_node(node, label='%s (%d)' % (stmt_str, len(stmt.evidence)), **agent_style) pgv_graph.add_edges_from(nx_graph.edges()) return pgv_graph
python
def render_stmt_graph(statements, reduce=True, english=False, rankdir=None, agent_style=None): """Render the statement hierarchy as a pygraphviz graph. Parameters ---------- stmts : list of :py:class:`indra.statements.Statement` A list of top-level statements with associated supporting statements resulting from building a statement hierarchy with :py:meth:`combine_related`. reduce : bool Whether to perform a transitive reduction of the edges in the graph. Default is True. english : bool If True, the statements in the graph are represented by their English-assembled equivalent; otherwise they are represented as text-formatted Statements. rank_dir : str or None Argument to pass through to the pygraphviz `AGraph` constructor specifying graph layout direction. In particular, a value of 'LR' specifies a left-to-right direction. If None, the pygraphviz default is used. agent_style : dict or None Dict of attributes specifying the visual properties of nodes. If None, the following default attributes are used:: agent_style = {'color': 'lightgray', 'style': 'filled', 'fontname': 'arial'} Returns ------- pygraphviz.AGraph Pygraphviz graph with nodes representing statements and edges pointing from supported statements to supported_by statements. Examples -------- Pattern for getting statements and rendering as a Graphviz graph: >>> from indra.preassembler.hierarchy_manager import hierarchies >>> braf = Agent('BRAF') >>> map2k1 = Agent('MAP2K1') >>> st1 = Phosphorylation(braf, map2k1) >>> st2 = Phosphorylation(braf, map2k1, residue='S') >>> pa = Preassembler(hierarchies, [st1, st2]) >>> pa.combine_related() # doctest:+ELLIPSIS [Phosphorylation(BRAF(), MAP2K1(), S)] >>> graph = render_stmt_graph(pa.related_stmts) >>> graph.write('example_graph.dot') # To make the DOT file >>> graph.draw('example_graph.png', prog='dot') # To make an image Resulting graph: .. image:: /images/example_graph.png :align: center :alt: Example statement graph rendered by Graphviz """ from indra.assemblers.english import EnglishAssembler # Set the default agent formatting properties if agent_style is None: agent_style = {'color': 'lightgray', 'style': 'filled', 'fontname': 'arial'} # Sets to store all of the nodes and edges as we recursively process all # of the statements nodes = set([]) edges = set([]) stmt_dict = {} # Recursive function for processing all statements def process_stmt(stmt): nodes.add(str(stmt.matches_key())) stmt_dict[str(stmt.matches_key())] = stmt for sby_ix, sby_stmt in enumerate(stmt.supported_by): edges.add((str(stmt.matches_key()), str(sby_stmt.matches_key()))) process_stmt(sby_stmt) # Process all of the top-level statements, getting the supporting statements # recursively for stmt in statements: process_stmt(stmt) # Create a networkx graph from the nodes nx_graph = nx.DiGraph() nx_graph.add_edges_from(edges) # Perform transitive reduction if desired if reduce: nx_graph = nx.algorithms.dag.transitive_reduction(nx_graph) # Create a pygraphviz graph from the nx graph try: pgv_graph = pgv.AGraph(name='statements', directed=True, rankdir=rankdir) except NameError: logger.error('Cannot generate graph because ' 'pygraphviz could not be imported.') return None for node in nx_graph.nodes(): stmt = stmt_dict[node] if english: ea = EnglishAssembler([stmt]) stmt_str = ea.make_model() else: stmt_str = str(stmt) pgv_graph.add_node(node, label='%s (%d)' % (stmt_str, len(stmt.evidence)), **agent_style) pgv_graph.add_edges_from(nx_graph.edges()) return pgv_graph
[ "def", "render_stmt_graph", "(", "statements", ",", "reduce", "=", "True", ",", "english", "=", "False", ",", "rankdir", "=", "None", ",", "agent_style", "=", "None", ")", ":", "from", "indra", ".", "assemblers", ".", "english", "import", "EnglishAssembler", "# Set the default agent formatting properties", "if", "agent_style", "is", "None", ":", "agent_style", "=", "{", "'color'", ":", "'lightgray'", ",", "'style'", ":", "'filled'", ",", "'fontname'", ":", "'arial'", "}", "# Sets to store all of the nodes and edges as we recursively process all", "# of the statements", "nodes", "=", "set", "(", "[", "]", ")", "edges", "=", "set", "(", "[", "]", ")", "stmt_dict", "=", "{", "}", "# Recursive function for processing all statements", "def", "process_stmt", "(", "stmt", ")", ":", "nodes", ".", "add", "(", "str", "(", "stmt", ".", "matches_key", "(", ")", ")", ")", "stmt_dict", "[", "str", "(", "stmt", ".", "matches_key", "(", ")", ")", "]", "=", "stmt", "for", "sby_ix", ",", "sby_stmt", "in", "enumerate", "(", "stmt", ".", "supported_by", ")", ":", "edges", ".", "add", "(", "(", "str", "(", "stmt", ".", "matches_key", "(", ")", ")", ",", "str", "(", "sby_stmt", ".", "matches_key", "(", ")", ")", ")", ")", "process_stmt", "(", "sby_stmt", ")", "# Process all of the top-level statements, getting the supporting statements", "# recursively", "for", "stmt", "in", "statements", ":", "process_stmt", "(", "stmt", ")", "# Create a networkx graph from the nodes", "nx_graph", "=", "nx", ".", "DiGraph", "(", ")", "nx_graph", ".", "add_edges_from", "(", "edges", ")", "# Perform transitive reduction if desired", "if", "reduce", ":", "nx_graph", "=", "nx", ".", "algorithms", ".", "dag", ".", "transitive_reduction", "(", "nx_graph", ")", "# Create a pygraphviz graph from the nx graph", "try", ":", "pgv_graph", "=", "pgv", ".", "AGraph", "(", "name", "=", "'statements'", ",", "directed", "=", "True", ",", "rankdir", "=", "rankdir", ")", "except", "NameError", ":", "logger", ".", "error", "(", "'Cannot generate graph because '", "'pygraphviz could not be imported.'", ")", "return", "None", "for", "node", "in", "nx_graph", ".", "nodes", "(", ")", ":", "stmt", "=", "stmt_dict", "[", "node", "]", "if", "english", ":", "ea", "=", "EnglishAssembler", "(", "[", "stmt", "]", ")", "stmt_str", "=", "ea", ".", "make_model", "(", ")", "else", ":", "stmt_str", "=", "str", "(", "stmt", ")", "pgv_graph", ".", "add_node", "(", "node", ",", "label", "=", "'%s (%d)'", "%", "(", "stmt_str", ",", "len", "(", "stmt", ".", "evidence", ")", ")", ",", "*", "*", "agent_style", ")", "pgv_graph", ".", "add_edges_from", "(", "nx_graph", ".", "edges", "(", ")", ")", "return", "pgv_graph" ]
Render the statement hierarchy as a pygraphviz graph. Parameters ---------- stmts : list of :py:class:`indra.statements.Statement` A list of top-level statements with associated supporting statements resulting from building a statement hierarchy with :py:meth:`combine_related`. reduce : bool Whether to perform a transitive reduction of the edges in the graph. Default is True. english : bool If True, the statements in the graph are represented by their English-assembled equivalent; otherwise they are represented as text-formatted Statements. rank_dir : str or None Argument to pass through to the pygraphviz `AGraph` constructor specifying graph layout direction. In particular, a value of 'LR' specifies a left-to-right direction. If None, the pygraphviz default is used. agent_style : dict or None Dict of attributes specifying the visual properties of nodes. If None, the following default attributes are used:: agent_style = {'color': 'lightgray', 'style': 'filled', 'fontname': 'arial'} Returns ------- pygraphviz.AGraph Pygraphviz graph with nodes representing statements and edges pointing from supported statements to supported_by statements. Examples -------- Pattern for getting statements and rendering as a Graphviz graph: >>> from indra.preassembler.hierarchy_manager import hierarchies >>> braf = Agent('BRAF') >>> map2k1 = Agent('MAP2K1') >>> st1 = Phosphorylation(braf, map2k1) >>> st2 = Phosphorylation(braf, map2k1, residue='S') >>> pa = Preassembler(hierarchies, [st1, st2]) >>> pa.combine_related() # doctest:+ELLIPSIS [Phosphorylation(BRAF(), MAP2K1(), S)] >>> graph = render_stmt_graph(pa.related_stmts) >>> graph.write('example_graph.dot') # To make the DOT file >>> graph.draw('example_graph.png', prog='dot') # To make an image Resulting graph: .. image:: /images/example_graph.png :align: center :alt: Example statement graph rendered by Graphviz
[ "Render", "the", "statement", "hierarchy", "as", "a", "pygraphviz", "graph", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/__init__.py#L646-L752
train
sorgerlab/indra
indra/preassembler/__init__.py
flatten_stmts
def flatten_stmts(stmts): """Return the full set of unique stms in a pre-assembled stmt graph. The flattened list of statements returned by this function can be compared to the original set of unique statements to make sure no statements have been lost during the preassembly process. Parameters ---------- stmts : list of :py:class:`indra.statements.Statement` A list of top-level statements with associated supporting statements resulting from building a statement hierarchy with :py:meth:`combine_related`. Returns ------- stmts : list of :py:class:`indra.statements.Statement` List of all statements contained in the hierarchical statement graph. Examples -------- Calling :py:meth:`combine_related` on two statements results in one top-level statement; calling :py:func:`flatten_stmts` recovers both: >>> from indra.preassembler.hierarchy_manager import hierarchies >>> braf = Agent('BRAF') >>> map2k1 = Agent('MAP2K1') >>> st1 = Phosphorylation(braf, map2k1) >>> st2 = Phosphorylation(braf, map2k1, residue='S') >>> pa = Preassembler(hierarchies, [st1, st2]) >>> pa.combine_related() # doctest:+ELLIPSIS [Phosphorylation(BRAF(), MAP2K1(), S)] >>> flattened = flatten_stmts(pa.related_stmts) >>> flattened.sort(key=lambda x: x.matches_key()) >>> flattened [Phosphorylation(BRAF(), MAP2K1()), Phosphorylation(BRAF(), MAP2K1(), S)] """ total_stmts = set(stmts) for stmt in stmts: if stmt.supported_by: children = flatten_stmts(stmt.supported_by) total_stmts = total_stmts.union(children) return list(total_stmts)
python
def flatten_stmts(stmts): """Return the full set of unique stms in a pre-assembled stmt graph. The flattened list of statements returned by this function can be compared to the original set of unique statements to make sure no statements have been lost during the preassembly process. Parameters ---------- stmts : list of :py:class:`indra.statements.Statement` A list of top-level statements with associated supporting statements resulting from building a statement hierarchy with :py:meth:`combine_related`. Returns ------- stmts : list of :py:class:`indra.statements.Statement` List of all statements contained in the hierarchical statement graph. Examples -------- Calling :py:meth:`combine_related` on two statements results in one top-level statement; calling :py:func:`flatten_stmts` recovers both: >>> from indra.preassembler.hierarchy_manager import hierarchies >>> braf = Agent('BRAF') >>> map2k1 = Agent('MAP2K1') >>> st1 = Phosphorylation(braf, map2k1) >>> st2 = Phosphorylation(braf, map2k1, residue='S') >>> pa = Preassembler(hierarchies, [st1, st2]) >>> pa.combine_related() # doctest:+ELLIPSIS [Phosphorylation(BRAF(), MAP2K1(), S)] >>> flattened = flatten_stmts(pa.related_stmts) >>> flattened.sort(key=lambda x: x.matches_key()) >>> flattened [Phosphorylation(BRAF(), MAP2K1()), Phosphorylation(BRAF(), MAP2K1(), S)] """ total_stmts = set(stmts) for stmt in stmts: if stmt.supported_by: children = flatten_stmts(stmt.supported_by) total_stmts = total_stmts.union(children) return list(total_stmts)
[ "def", "flatten_stmts", "(", "stmts", ")", ":", "total_stmts", "=", "set", "(", "stmts", ")", "for", "stmt", "in", "stmts", ":", "if", "stmt", ".", "supported_by", ":", "children", "=", "flatten_stmts", "(", "stmt", ".", "supported_by", ")", "total_stmts", "=", "total_stmts", ".", "union", "(", "children", ")", "return", "list", "(", "total_stmts", ")" ]
Return the full set of unique stms in a pre-assembled stmt graph. The flattened list of statements returned by this function can be compared to the original set of unique statements to make sure no statements have been lost during the preassembly process. Parameters ---------- stmts : list of :py:class:`indra.statements.Statement` A list of top-level statements with associated supporting statements resulting from building a statement hierarchy with :py:meth:`combine_related`. Returns ------- stmts : list of :py:class:`indra.statements.Statement` List of all statements contained in the hierarchical statement graph. Examples -------- Calling :py:meth:`combine_related` on two statements results in one top-level statement; calling :py:func:`flatten_stmts` recovers both: >>> from indra.preassembler.hierarchy_manager import hierarchies >>> braf = Agent('BRAF') >>> map2k1 = Agent('MAP2K1') >>> st1 = Phosphorylation(braf, map2k1) >>> st2 = Phosphorylation(braf, map2k1, residue='S') >>> pa = Preassembler(hierarchies, [st1, st2]) >>> pa.combine_related() # doctest:+ELLIPSIS [Phosphorylation(BRAF(), MAP2K1(), S)] >>> flattened = flatten_stmts(pa.related_stmts) >>> flattened.sort(key=lambda x: x.matches_key()) >>> flattened [Phosphorylation(BRAF(), MAP2K1()), Phosphorylation(BRAF(), MAP2K1(), S)]
[ "Return", "the", "full", "set", "of", "unique", "stms", "in", "a", "pre", "-", "assembled", "stmt", "graph", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/__init__.py#L755-L797
train
sorgerlab/indra
indra/preassembler/__init__.py
Preassembler.combine_duplicates
def combine_duplicates(self): """Combine duplicates among `stmts` and save result in `unique_stmts`. A wrapper around the static method :py:meth:`combine_duplicate_stmts`. """ if self.unique_stmts is None: self.unique_stmts = self.combine_duplicate_stmts(self.stmts) return self.unique_stmts
python
def combine_duplicates(self): """Combine duplicates among `stmts` and save result in `unique_stmts`. A wrapper around the static method :py:meth:`combine_duplicate_stmts`. """ if self.unique_stmts is None: self.unique_stmts = self.combine_duplicate_stmts(self.stmts) return self.unique_stmts
[ "def", "combine_duplicates", "(", "self", ")", ":", "if", "self", ".", "unique_stmts", "is", "None", ":", "self", ".", "unique_stmts", "=", "self", ".", "combine_duplicate_stmts", "(", "self", ".", "stmts", ")", "return", "self", ".", "unique_stmts" ]
Combine duplicates among `stmts` and save result in `unique_stmts`. A wrapper around the static method :py:meth:`combine_duplicate_stmts`.
[ "Combine", "duplicates", "among", "stmts", "and", "save", "result", "in", "unique_stmts", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/__init__.py#L68-L75
train
sorgerlab/indra
indra/preassembler/__init__.py
Preassembler._get_stmt_matching_groups
def _get_stmt_matching_groups(stmts): """Use the matches_key method to get sets of matching statements.""" def match_func(x): return x.matches_key() # Remove exact duplicates using a set() call, then make copies: logger.debug('%d statements before removing object duplicates.' % len(stmts)) st = list(set(stmts)) logger.debug('%d statements after removing object duplicates.' % len(stmts)) # Group statements according to whether they are matches (differing # only in their evidence). # Sort the statements in place by matches_key() st.sort(key=match_func) return itertools.groupby(st, key=match_func)
python
def _get_stmt_matching_groups(stmts): """Use the matches_key method to get sets of matching statements.""" def match_func(x): return x.matches_key() # Remove exact duplicates using a set() call, then make copies: logger.debug('%d statements before removing object duplicates.' % len(stmts)) st = list(set(stmts)) logger.debug('%d statements after removing object duplicates.' % len(stmts)) # Group statements according to whether they are matches (differing # only in their evidence). # Sort the statements in place by matches_key() st.sort(key=match_func) return itertools.groupby(st, key=match_func)
[ "def", "_get_stmt_matching_groups", "(", "stmts", ")", ":", "def", "match_func", "(", "x", ")", ":", "return", "x", ".", "matches_key", "(", ")", "# Remove exact duplicates using a set() call, then make copies:", "logger", ".", "debug", "(", "'%d statements before removing object duplicates.'", "%", "len", "(", "stmts", ")", ")", "st", "=", "list", "(", "set", "(", "stmts", ")", ")", "logger", ".", "debug", "(", "'%d statements after removing object duplicates.'", "%", "len", "(", "stmts", ")", ")", "# Group statements according to whether they are matches (differing", "# only in their evidence).", "# Sort the statements in place by matches_key()", "st", ".", "sort", "(", "key", "=", "match_func", ")", "return", "itertools", ".", "groupby", "(", "st", ",", "key", "=", "match_func", ")" ]
Use the matches_key method to get sets of matching statements.
[ "Use", "the", "matches_key", "method", "to", "get", "sets", "of", "matching", "statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/__init__.py#L78-L93
train
sorgerlab/indra
indra/preassembler/__init__.py
Preassembler.combine_duplicate_stmts
def combine_duplicate_stmts(stmts): """Combine evidence from duplicate Statements. Statements are deemed to be duplicates if they have the same key returned by the `matches_key()` method of the Statement class. This generally means that statements must be identical in terms of their arguments and can differ only in their associated `Evidence` objects. This function keeps the first instance of each set of duplicate statements and merges the lists of Evidence from all of the other statements. Parameters ---------- stmts : list of :py:class:`indra.statements.Statement` Set of statements to de-duplicate. Returns ------- list of :py:class:`indra.statements.Statement` Unique statements with accumulated evidence across duplicates. Examples -------- De-duplicate and combine evidence for two statements differing only in their evidence lists: >>> map2k1 = Agent('MAP2K1') >>> mapk1 = Agent('MAPK1') >>> stmt1 = Phosphorylation(map2k1, mapk1, 'T', '185', ... evidence=[Evidence(text='evidence 1')]) >>> stmt2 = Phosphorylation(map2k1, mapk1, 'T', '185', ... evidence=[Evidence(text='evidence 2')]) >>> uniq_stmts = Preassembler.combine_duplicate_stmts([stmt1, stmt2]) >>> uniq_stmts [Phosphorylation(MAP2K1(), MAPK1(), T, 185)] >>> sorted([e.text for e in uniq_stmts[0].evidence]) # doctest:+IGNORE_UNICODE ['evidence 1', 'evidence 2'] """ # Helper function to get a list of evidence matches keys def _ev_keys(sts): ev_keys = [] for stmt in sts: for ev in stmt.evidence: ev_keys.append(ev.matches_key()) return ev_keys # Iterate over groups of duplicate statements unique_stmts = [] for _, duplicates in Preassembler._get_stmt_matching_groups(stmts): ev_keys = set() # Get the first statement and add the evidence of all subsequent # Statements to it duplicates = list(duplicates) start_ev_keys = _ev_keys(duplicates) for stmt_ix, stmt in enumerate(duplicates): if stmt_ix is 0: new_stmt = stmt.make_generic_copy() if len(duplicates) == 1: new_stmt.uuid = stmt.uuid raw_text = [None if ag is None else ag.db_refs.get('TEXT') for ag in stmt.agent_list(deep_sorted=True)] raw_grounding = [None if ag is None else ag.db_refs for ag in stmt.agent_list(deep_sorted=True)] for ev in stmt.evidence: ev_key = ev.matches_key() + str(raw_text) + \ str(raw_grounding) if ev_key not in ev_keys: # In case there are already agents annotations, we # just add a new key for raw_text, otherwise create # a new key if 'agents' in ev.annotations: ev.annotations['agents']['raw_text'] = raw_text ev.annotations['agents']['raw_grounding'] = \ raw_grounding else: ev.annotations['agents'] = \ {'raw_text': raw_text, 'raw_grounding': raw_grounding} if 'prior_uuids' not in ev.annotations: ev.annotations['prior_uuids'] = [] ev.annotations['prior_uuids'].append(stmt.uuid) new_stmt.evidence.append(ev) ev_keys.add(ev_key) end_ev_keys = _ev_keys([new_stmt]) if len(end_ev_keys) != len(start_ev_keys): logger.debug('%d redundant evidences eliminated.' % (len(start_ev_keys) - len(end_ev_keys))) # This should never be None or anything else assert isinstance(new_stmt, Statement) unique_stmts.append(new_stmt) return unique_stmts
python
def combine_duplicate_stmts(stmts): """Combine evidence from duplicate Statements. Statements are deemed to be duplicates if they have the same key returned by the `matches_key()` method of the Statement class. This generally means that statements must be identical in terms of their arguments and can differ only in their associated `Evidence` objects. This function keeps the first instance of each set of duplicate statements and merges the lists of Evidence from all of the other statements. Parameters ---------- stmts : list of :py:class:`indra.statements.Statement` Set of statements to de-duplicate. Returns ------- list of :py:class:`indra.statements.Statement` Unique statements with accumulated evidence across duplicates. Examples -------- De-duplicate and combine evidence for two statements differing only in their evidence lists: >>> map2k1 = Agent('MAP2K1') >>> mapk1 = Agent('MAPK1') >>> stmt1 = Phosphorylation(map2k1, mapk1, 'T', '185', ... evidence=[Evidence(text='evidence 1')]) >>> stmt2 = Phosphorylation(map2k1, mapk1, 'T', '185', ... evidence=[Evidence(text='evidence 2')]) >>> uniq_stmts = Preassembler.combine_duplicate_stmts([stmt1, stmt2]) >>> uniq_stmts [Phosphorylation(MAP2K1(), MAPK1(), T, 185)] >>> sorted([e.text for e in uniq_stmts[0].evidence]) # doctest:+IGNORE_UNICODE ['evidence 1', 'evidence 2'] """ # Helper function to get a list of evidence matches keys def _ev_keys(sts): ev_keys = [] for stmt in sts: for ev in stmt.evidence: ev_keys.append(ev.matches_key()) return ev_keys # Iterate over groups of duplicate statements unique_stmts = [] for _, duplicates in Preassembler._get_stmt_matching_groups(stmts): ev_keys = set() # Get the first statement and add the evidence of all subsequent # Statements to it duplicates = list(duplicates) start_ev_keys = _ev_keys(duplicates) for stmt_ix, stmt in enumerate(duplicates): if stmt_ix is 0: new_stmt = stmt.make_generic_copy() if len(duplicates) == 1: new_stmt.uuid = stmt.uuid raw_text = [None if ag is None else ag.db_refs.get('TEXT') for ag in stmt.agent_list(deep_sorted=True)] raw_grounding = [None if ag is None else ag.db_refs for ag in stmt.agent_list(deep_sorted=True)] for ev in stmt.evidence: ev_key = ev.matches_key() + str(raw_text) + \ str(raw_grounding) if ev_key not in ev_keys: # In case there are already agents annotations, we # just add a new key for raw_text, otherwise create # a new key if 'agents' in ev.annotations: ev.annotations['agents']['raw_text'] = raw_text ev.annotations['agents']['raw_grounding'] = \ raw_grounding else: ev.annotations['agents'] = \ {'raw_text': raw_text, 'raw_grounding': raw_grounding} if 'prior_uuids' not in ev.annotations: ev.annotations['prior_uuids'] = [] ev.annotations['prior_uuids'].append(stmt.uuid) new_stmt.evidence.append(ev) ev_keys.add(ev_key) end_ev_keys = _ev_keys([new_stmt]) if len(end_ev_keys) != len(start_ev_keys): logger.debug('%d redundant evidences eliminated.' % (len(start_ev_keys) - len(end_ev_keys))) # This should never be None or anything else assert isinstance(new_stmt, Statement) unique_stmts.append(new_stmt) return unique_stmts
[ "def", "combine_duplicate_stmts", "(", "stmts", ")", ":", "# Helper function to get a list of evidence matches keys", "def", "_ev_keys", "(", "sts", ")", ":", "ev_keys", "=", "[", "]", "for", "stmt", "in", "sts", ":", "for", "ev", "in", "stmt", ".", "evidence", ":", "ev_keys", ".", "append", "(", "ev", ".", "matches_key", "(", ")", ")", "return", "ev_keys", "# Iterate over groups of duplicate statements", "unique_stmts", "=", "[", "]", "for", "_", ",", "duplicates", "in", "Preassembler", ".", "_get_stmt_matching_groups", "(", "stmts", ")", ":", "ev_keys", "=", "set", "(", ")", "# Get the first statement and add the evidence of all subsequent", "# Statements to it", "duplicates", "=", "list", "(", "duplicates", ")", "start_ev_keys", "=", "_ev_keys", "(", "duplicates", ")", "for", "stmt_ix", ",", "stmt", "in", "enumerate", "(", "duplicates", ")", ":", "if", "stmt_ix", "is", "0", ":", "new_stmt", "=", "stmt", ".", "make_generic_copy", "(", ")", "if", "len", "(", "duplicates", ")", "==", "1", ":", "new_stmt", ".", "uuid", "=", "stmt", ".", "uuid", "raw_text", "=", "[", "None", "if", "ag", "is", "None", "else", "ag", ".", "db_refs", ".", "get", "(", "'TEXT'", ")", "for", "ag", "in", "stmt", ".", "agent_list", "(", "deep_sorted", "=", "True", ")", "]", "raw_grounding", "=", "[", "None", "if", "ag", "is", "None", "else", "ag", ".", "db_refs", "for", "ag", "in", "stmt", ".", "agent_list", "(", "deep_sorted", "=", "True", ")", "]", "for", "ev", "in", "stmt", ".", "evidence", ":", "ev_key", "=", "ev", ".", "matches_key", "(", ")", "+", "str", "(", "raw_text", ")", "+", "str", "(", "raw_grounding", ")", "if", "ev_key", "not", "in", "ev_keys", ":", "# In case there are already agents annotations, we", "# just add a new key for raw_text, otherwise create", "# a new key", "if", "'agents'", "in", "ev", ".", "annotations", ":", "ev", ".", "annotations", "[", "'agents'", "]", "[", "'raw_text'", "]", "=", "raw_text", "ev", ".", "annotations", "[", "'agents'", "]", "[", "'raw_grounding'", "]", "=", "raw_grounding", "else", ":", "ev", ".", "annotations", "[", "'agents'", "]", "=", "{", "'raw_text'", ":", "raw_text", ",", "'raw_grounding'", ":", "raw_grounding", "}", "if", "'prior_uuids'", "not", "in", "ev", ".", "annotations", ":", "ev", ".", "annotations", "[", "'prior_uuids'", "]", "=", "[", "]", "ev", ".", "annotations", "[", "'prior_uuids'", "]", ".", "append", "(", "stmt", ".", "uuid", ")", "new_stmt", ".", "evidence", ".", "append", "(", "ev", ")", "ev_keys", ".", "add", "(", "ev_key", ")", "end_ev_keys", "=", "_ev_keys", "(", "[", "new_stmt", "]", ")", "if", "len", "(", "end_ev_keys", ")", "!=", "len", "(", "start_ev_keys", ")", ":", "logger", ".", "debug", "(", "'%d redundant evidences eliminated.'", "%", "(", "len", "(", "start_ev_keys", ")", "-", "len", "(", "end_ev_keys", ")", ")", ")", "# This should never be None or anything else", "assert", "isinstance", "(", "new_stmt", ",", "Statement", ")", "unique_stmts", ".", "append", "(", "new_stmt", ")", "return", "unique_stmts" ]
Combine evidence from duplicate Statements. Statements are deemed to be duplicates if they have the same key returned by the `matches_key()` method of the Statement class. This generally means that statements must be identical in terms of their arguments and can differ only in their associated `Evidence` objects. This function keeps the first instance of each set of duplicate statements and merges the lists of Evidence from all of the other statements. Parameters ---------- stmts : list of :py:class:`indra.statements.Statement` Set of statements to de-duplicate. Returns ------- list of :py:class:`indra.statements.Statement` Unique statements with accumulated evidence across duplicates. Examples -------- De-duplicate and combine evidence for two statements differing only in their evidence lists: >>> map2k1 = Agent('MAP2K1') >>> mapk1 = Agent('MAPK1') >>> stmt1 = Phosphorylation(map2k1, mapk1, 'T', '185', ... evidence=[Evidence(text='evidence 1')]) >>> stmt2 = Phosphorylation(map2k1, mapk1, 'T', '185', ... evidence=[Evidence(text='evidence 2')]) >>> uniq_stmts = Preassembler.combine_duplicate_stmts([stmt1, stmt2]) >>> uniq_stmts [Phosphorylation(MAP2K1(), MAPK1(), T, 185)] >>> sorted([e.text for e in uniq_stmts[0].evidence]) # doctest:+IGNORE_UNICODE ['evidence 1', 'evidence 2']
[ "Combine", "evidence", "from", "duplicate", "Statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/__init__.py#L96-L186
train
sorgerlab/indra
indra/preassembler/__init__.py
Preassembler._get_stmt_by_group
def _get_stmt_by_group(self, stmt_type, stmts_this_type, eh): """Group Statements of `stmt_type` by their hierarchical relations.""" # Dict of stmt group key tuples, indexed by their first Agent stmt_by_first = collections.defaultdict(lambda: []) # Dict of stmt group key tuples, indexed by their second Agent stmt_by_second = collections.defaultdict(lambda: []) # Dict of statements with None first, with second Agent as keys none_first = collections.defaultdict(lambda: []) # Dict of statements with None second, with first Agent as keys none_second = collections.defaultdict(lambda: []) # The dict of all statement groups, with tuples of components # or entity_matches_keys as keys stmt_by_group = collections.defaultdict(lambda: []) # Here we group Statements according to the hierarchy graph # components that their agents are part of for stmt_tuple in stmts_this_type: _, stmt = stmt_tuple entities = self._get_entities(stmt, stmt_type, eh) # At this point we have an entity list # If we're dealing with Complexes, sort the entities and use # as dict key if stmt_type == Complex: # There shouldn't be any statements of the type # e.g., Complex([Foo, None, Bar]) assert None not in entities assert len(entities) > 0 entities.sort() key = tuple(entities) if stmt_tuple not in stmt_by_group[key]: stmt_by_group[key].append(stmt_tuple) elif stmt_type == Conversion: assert len(entities) > 0 key = (entities[0], tuple(sorted(entities[1:len(stmt.obj_from)+1])), tuple(sorted(entities[-len(stmt.obj_to):]))) if stmt_tuple not in stmt_by_group[key]: stmt_by_group[key].append(stmt_tuple) # Now look at all other statement types # All other statements will have one or two entities elif len(entities) == 1: # If only one entity, we only need the one key # It should not be None! assert None not in entities key = tuple(entities) if stmt_tuple not in stmt_by_group[key]: stmt_by_group[key].append(stmt_tuple) else: # Make sure we only have two entities, and they are not both # None key = tuple(entities) assert len(key) == 2 assert key != (None, None) # First agent is None; add in the statements, indexed by # 2nd if key[0] is None and stmt_tuple not in none_first[key[1]]: none_first[key[1]].append(stmt_tuple) # Second agent is None; add in the statements, indexed by # 1st elif key[1] is None and stmt_tuple not in none_second[key[0]]: none_second[key[0]].append(stmt_tuple) # Neither entity is None! elif None not in key: if stmt_tuple not in stmt_by_group[key]: stmt_by_group[key].append(stmt_tuple) if key not in stmt_by_first[key[0]]: stmt_by_first[key[0]].append(key) if key not in stmt_by_second[key[1]]: stmt_by_second[key[1]].append(key) # When we've gotten here, we should have stmt_by_group entries, and # we may or may not have stmt_by_first/second dicts filled out # (depending on the statement type). if none_first: # Get the keys associated with stmts having a None first # argument for second_arg, stmts in none_first.items(): # Look for any statements with this second arg second_arg_keys = stmt_by_second[second_arg] # If there are no more specific statements matching this # set of statements with a None first arg, then the # statements with the None first arg deserve to be in # their own group. if not second_arg_keys: stmt_by_group[(None, second_arg)] = stmts # On the other hand, if there are statements with a matching # second arg component, we need to add the None first # statements to all groups with the matching second arg for second_arg_key in second_arg_keys: stmt_by_group[second_arg_key] += stmts # Now do the corresponding steps for the statements with None as the # second argument: if none_second: for first_arg, stmts in none_second.items(): # Look for any statements with this first arg first_arg_keys = stmt_by_first[first_arg] # If there are no more specific statements matching this # set of statements with a None second arg, then the # statements with the None second arg deserve to be in # their own group. if not first_arg_keys: stmt_by_group[(first_arg, None)] = stmts # On the other hand, if there are statements with a matching # first arg component, we need to add the None second # statements to all groups with the matching first arg for first_arg_key in first_arg_keys: stmt_by_group[first_arg_key] += stmts return stmt_by_group
python
def _get_stmt_by_group(self, stmt_type, stmts_this_type, eh): """Group Statements of `stmt_type` by their hierarchical relations.""" # Dict of stmt group key tuples, indexed by their first Agent stmt_by_first = collections.defaultdict(lambda: []) # Dict of stmt group key tuples, indexed by their second Agent stmt_by_second = collections.defaultdict(lambda: []) # Dict of statements with None first, with second Agent as keys none_first = collections.defaultdict(lambda: []) # Dict of statements with None second, with first Agent as keys none_second = collections.defaultdict(lambda: []) # The dict of all statement groups, with tuples of components # or entity_matches_keys as keys stmt_by_group = collections.defaultdict(lambda: []) # Here we group Statements according to the hierarchy graph # components that their agents are part of for stmt_tuple in stmts_this_type: _, stmt = stmt_tuple entities = self._get_entities(stmt, stmt_type, eh) # At this point we have an entity list # If we're dealing with Complexes, sort the entities and use # as dict key if stmt_type == Complex: # There shouldn't be any statements of the type # e.g., Complex([Foo, None, Bar]) assert None not in entities assert len(entities) > 0 entities.sort() key = tuple(entities) if stmt_tuple not in stmt_by_group[key]: stmt_by_group[key].append(stmt_tuple) elif stmt_type == Conversion: assert len(entities) > 0 key = (entities[0], tuple(sorted(entities[1:len(stmt.obj_from)+1])), tuple(sorted(entities[-len(stmt.obj_to):]))) if stmt_tuple not in stmt_by_group[key]: stmt_by_group[key].append(stmt_tuple) # Now look at all other statement types # All other statements will have one or two entities elif len(entities) == 1: # If only one entity, we only need the one key # It should not be None! assert None not in entities key = tuple(entities) if stmt_tuple not in stmt_by_group[key]: stmt_by_group[key].append(stmt_tuple) else: # Make sure we only have two entities, and they are not both # None key = tuple(entities) assert len(key) == 2 assert key != (None, None) # First agent is None; add in the statements, indexed by # 2nd if key[0] is None and stmt_tuple not in none_first[key[1]]: none_first[key[1]].append(stmt_tuple) # Second agent is None; add in the statements, indexed by # 1st elif key[1] is None and stmt_tuple not in none_second[key[0]]: none_second[key[0]].append(stmt_tuple) # Neither entity is None! elif None not in key: if stmt_tuple not in stmt_by_group[key]: stmt_by_group[key].append(stmt_tuple) if key not in stmt_by_first[key[0]]: stmt_by_first[key[0]].append(key) if key not in stmt_by_second[key[1]]: stmt_by_second[key[1]].append(key) # When we've gotten here, we should have stmt_by_group entries, and # we may or may not have stmt_by_first/second dicts filled out # (depending on the statement type). if none_first: # Get the keys associated with stmts having a None first # argument for second_arg, stmts in none_first.items(): # Look for any statements with this second arg second_arg_keys = stmt_by_second[second_arg] # If there are no more specific statements matching this # set of statements with a None first arg, then the # statements with the None first arg deserve to be in # their own group. if not second_arg_keys: stmt_by_group[(None, second_arg)] = stmts # On the other hand, if there are statements with a matching # second arg component, we need to add the None first # statements to all groups with the matching second arg for second_arg_key in second_arg_keys: stmt_by_group[second_arg_key] += stmts # Now do the corresponding steps for the statements with None as the # second argument: if none_second: for first_arg, stmts in none_second.items(): # Look for any statements with this first arg first_arg_keys = stmt_by_first[first_arg] # If there are no more specific statements matching this # set of statements with a None second arg, then the # statements with the None second arg deserve to be in # their own group. if not first_arg_keys: stmt_by_group[(first_arg, None)] = stmts # On the other hand, if there are statements with a matching # first arg component, we need to add the None second # statements to all groups with the matching first arg for first_arg_key in first_arg_keys: stmt_by_group[first_arg_key] += stmts return stmt_by_group
[ "def", "_get_stmt_by_group", "(", "self", ",", "stmt_type", ",", "stmts_this_type", ",", "eh", ")", ":", "# Dict of stmt group key tuples, indexed by their first Agent", "stmt_by_first", "=", "collections", ".", "defaultdict", "(", "lambda", ":", "[", "]", ")", "# Dict of stmt group key tuples, indexed by their second Agent", "stmt_by_second", "=", "collections", ".", "defaultdict", "(", "lambda", ":", "[", "]", ")", "# Dict of statements with None first, with second Agent as keys", "none_first", "=", "collections", ".", "defaultdict", "(", "lambda", ":", "[", "]", ")", "# Dict of statements with None second, with first Agent as keys", "none_second", "=", "collections", ".", "defaultdict", "(", "lambda", ":", "[", "]", ")", "# The dict of all statement groups, with tuples of components", "# or entity_matches_keys as keys", "stmt_by_group", "=", "collections", ".", "defaultdict", "(", "lambda", ":", "[", "]", ")", "# Here we group Statements according to the hierarchy graph", "# components that their agents are part of", "for", "stmt_tuple", "in", "stmts_this_type", ":", "_", ",", "stmt", "=", "stmt_tuple", "entities", "=", "self", ".", "_get_entities", "(", "stmt", ",", "stmt_type", ",", "eh", ")", "# At this point we have an entity list", "# If we're dealing with Complexes, sort the entities and use", "# as dict key", "if", "stmt_type", "==", "Complex", ":", "# There shouldn't be any statements of the type", "# e.g., Complex([Foo, None, Bar])", "assert", "None", "not", "in", "entities", "assert", "len", "(", "entities", ")", ">", "0", "entities", ".", "sort", "(", ")", "key", "=", "tuple", "(", "entities", ")", "if", "stmt_tuple", "not", "in", "stmt_by_group", "[", "key", "]", ":", "stmt_by_group", "[", "key", "]", ".", "append", "(", "stmt_tuple", ")", "elif", "stmt_type", "==", "Conversion", ":", "assert", "len", "(", "entities", ")", ">", "0", "key", "=", "(", "entities", "[", "0", "]", ",", "tuple", "(", "sorted", "(", "entities", "[", "1", ":", "len", "(", "stmt", ".", "obj_from", ")", "+", "1", "]", ")", ")", ",", "tuple", "(", "sorted", "(", "entities", "[", "-", "len", "(", "stmt", ".", "obj_to", ")", ":", "]", ")", ")", ")", "if", "stmt_tuple", "not", "in", "stmt_by_group", "[", "key", "]", ":", "stmt_by_group", "[", "key", "]", ".", "append", "(", "stmt_tuple", ")", "# Now look at all other statement types", "# All other statements will have one or two entities", "elif", "len", "(", "entities", ")", "==", "1", ":", "# If only one entity, we only need the one key", "# It should not be None!", "assert", "None", "not", "in", "entities", "key", "=", "tuple", "(", "entities", ")", "if", "stmt_tuple", "not", "in", "stmt_by_group", "[", "key", "]", ":", "stmt_by_group", "[", "key", "]", ".", "append", "(", "stmt_tuple", ")", "else", ":", "# Make sure we only have two entities, and they are not both", "# None", "key", "=", "tuple", "(", "entities", ")", "assert", "len", "(", "key", ")", "==", "2", "assert", "key", "!=", "(", "None", ",", "None", ")", "# First agent is None; add in the statements, indexed by", "# 2nd", "if", "key", "[", "0", "]", "is", "None", "and", "stmt_tuple", "not", "in", "none_first", "[", "key", "[", "1", "]", "]", ":", "none_first", "[", "key", "[", "1", "]", "]", ".", "append", "(", "stmt_tuple", ")", "# Second agent is None; add in the statements, indexed by", "# 1st", "elif", "key", "[", "1", "]", "is", "None", "and", "stmt_tuple", "not", "in", "none_second", "[", "key", "[", "0", "]", "]", ":", "none_second", "[", "key", "[", "0", "]", "]", ".", "append", "(", "stmt_tuple", ")", "# Neither entity is None!", "elif", "None", "not", "in", "key", ":", "if", "stmt_tuple", "not", "in", "stmt_by_group", "[", "key", "]", ":", "stmt_by_group", "[", "key", "]", ".", "append", "(", "stmt_tuple", ")", "if", "key", "not", "in", "stmt_by_first", "[", "key", "[", "0", "]", "]", ":", "stmt_by_first", "[", "key", "[", "0", "]", "]", ".", "append", "(", "key", ")", "if", "key", "not", "in", "stmt_by_second", "[", "key", "[", "1", "]", "]", ":", "stmt_by_second", "[", "key", "[", "1", "]", "]", ".", "append", "(", "key", ")", "# When we've gotten here, we should have stmt_by_group entries, and", "# we may or may not have stmt_by_first/second dicts filled out", "# (depending on the statement type).", "if", "none_first", ":", "# Get the keys associated with stmts having a None first", "# argument", "for", "second_arg", ",", "stmts", "in", "none_first", ".", "items", "(", ")", ":", "# Look for any statements with this second arg", "second_arg_keys", "=", "stmt_by_second", "[", "second_arg", "]", "# If there are no more specific statements matching this", "# set of statements with a None first arg, then the", "# statements with the None first arg deserve to be in", "# their own group.", "if", "not", "second_arg_keys", ":", "stmt_by_group", "[", "(", "None", ",", "second_arg", ")", "]", "=", "stmts", "# On the other hand, if there are statements with a matching", "# second arg component, we need to add the None first", "# statements to all groups with the matching second arg", "for", "second_arg_key", "in", "second_arg_keys", ":", "stmt_by_group", "[", "second_arg_key", "]", "+=", "stmts", "# Now do the corresponding steps for the statements with None as the", "# second argument:", "if", "none_second", ":", "for", "first_arg", ",", "stmts", "in", "none_second", ".", "items", "(", ")", ":", "# Look for any statements with this first arg", "first_arg_keys", "=", "stmt_by_first", "[", "first_arg", "]", "# If there are no more specific statements matching this", "# set of statements with a None second arg, then the", "# statements with the None second arg deserve to be in", "# their own group.", "if", "not", "first_arg_keys", ":", "stmt_by_group", "[", "(", "first_arg", ",", "None", ")", "]", "=", "stmts", "# On the other hand, if there are statements with a matching", "# first arg component, we need to add the None second", "# statements to all groups with the matching first arg", "for", "first_arg_key", "in", "first_arg_keys", ":", "stmt_by_group", "[", "first_arg_key", "]", "+=", "stmts", "return", "stmt_by_group" ]
Group Statements of `stmt_type` by their hierarchical relations.
[ "Group", "Statements", "of", "stmt_type", "by", "their", "hierarchical", "relations", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/__init__.py#L220-L326
train
sorgerlab/indra
indra/preassembler/__init__.py
Preassembler.combine_related
def combine_related(self, return_toplevel=True, poolsize=None, size_cutoff=100): """Connect related statements based on their refinement relationships. This function takes as a starting point the unique statements (with duplicates removed) and returns a modified flat list of statements containing only those statements which do not represent a refinement of other existing statements. In other words, the more general versions of a given statement do not appear at the top level, but instead are listed in the `supports` field of the top-level statements. If :py:attr:`unique_stmts` has not been initialized with the de-duplicated statements, :py:meth:`combine_duplicates` is called internally. After this function is called the attribute :py:attr:`related_stmts` is set as a side-effect. The procedure for combining statements in this way involves a series of steps: 1. The statements are grouped by type (e.g., Phosphorylation) and each type is iterated over independently. 2. Statements of the same type are then grouped according to their Agents' entity hierarchy component identifiers. For instance, ERK, MAPK1 and MAPK3 are all in the same connected component in the entity hierarchy and therefore all Statements of the same type referencing these entities will be grouped. This grouping assures that relations are only possible within Statement groups and not among groups. For two Statements to be in the same group at this step, the Statements must be the same type and the Agents at each position in the Agent lists must either be in the same hierarchy component, or if they are not in the hierarchy, must have identical entity_matches_keys. Statements with None in one of the Agent list positions are collected separately at this stage. 3. Statements with None at either the first or second position are iterated over. For a statement with a None as the first Agent, the second Agent is examined; then the Statement with None is added to all Statement groups with a corresponding component or entity_matches_key in the second position. The same procedure is performed for Statements with None at the second Agent position. 4. The statements within each group are then compared; if one statement represents a refinement of the other (as defined by the `refinement_of()` method implemented for the Statement), then the more refined statement is added to the `supports` field of the more general statement, and the more general statement is added to the `supported_by` field of the more refined statement. 5. A new flat list of statements is created that contains only those statements that have no `supports` entries (statements containing such entries are not eliminated, because they will be retrievable from the `supported_by` fields of other statements). This list is returned to the caller. On multi-core machines, the algorithm can be parallelized by setting the poolsize argument to the desired number of worker processes. This feature is only available in Python > 3.4. .. note:: Subfamily relationships must be consistent across arguments For now, we require that merges can only occur if the *isa* relationships are all in the *same direction for all the agents* in a Statement. For example, the two statement groups: `RAF_family -> MEK1` and `BRAF -> MEK_family` would not be merged, since BRAF *isa* RAF_family, but MEK_family is not a MEK1. In the future this restriction could be revisited. Parameters ---------- return_toplevel : Optional[bool] If True only the top level statements are returned. If False, all statements are returned. Default: True poolsize : Optional[int] The number of worker processes to use to parallelize the comparisons performed by the function. If None (default), no parallelization is performed. NOTE: Parallelization is only available on Python 3.4 and above. size_cutoff : Optional[int] Groups with size_cutoff or more statements are sent to worker processes, while smaller groups are compared in the parent process. Default value is 100. Not relevant when parallelization is not used. Returns ------- list of :py:class:`indra.statement.Statement` The returned list contains Statements representing the more concrete/refined versions of the Statements involving particular entities. The attribute :py:attr:`related_stmts` is also set to this list. However, if return_toplevel is False then all statements are returned, irrespective of level of specificity. In this case the relationships between statements can be accessed via the supports/supported_by attributes. Examples -------- A more general statement with no information about a Phosphorylation site is identified as supporting a more specific statement: >>> from indra.preassembler.hierarchy_manager import hierarchies >>> braf = Agent('BRAF') >>> map2k1 = Agent('MAP2K1') >>> st1 = Phosphorylation(braf, map2k1) >>> st2 = Phosphorylation(braf, map2k1, residue='S') >>> pa = Preassembler(hierarchies, [st1, st2]) >>> combined_stmts = pa.combine_related() # doctest:+ELLIPSIS >>> combined_stmts [Phosphorylation(BRAF(), MAP2K1(), S)] >>> combined_stmts[0].supported_by [Phosphorylation(BRAF(), MAP2K1())] >>> combined_stmts[0].supported_by[0].supports [Phosphorylation(BRAF(), MAP2K1(), S)] """ if self.related_stmts is not None: if return_toplevel: return self.related_stmts else: assert self.unique_stmts is not None return self.unique_stmts # Call combine_duplicates, which lazily initializes self.unique_stmts unique_stmts = self.combine_duplicates() # Generate the index map, linking related statements. idx_map = self._generate_id_maps(unique_stmts, poolsize, size_cutoff) # Now iterate over all indices and set supports/supported by for ix1, ix2 in idx_map: unique_stmts[ix1].supported_by.append(unique_stmts[ix2]) unique_stmts[ix2].supports.append(unique_stmts[ix1]) # Get the top level statements self.related_stmts = [st for st in unique_stmts if not st.supports] logger.debug('%d top level' % len(self.related_stmts)) if return_toplevel: return self.related_stmts else: return unique_stmts
python
def combine_related(self, return_toplevel=True, poolsize=None, size_cutoff=100): """Connect related statements based on their refinement relationships. This function takes as a starting point the unique statements (with duplicates removed) and returns a modified flat list of statements containing only those statements which do not represent a refinement of other existing statements. In other words, the more general versions of a given statement do not appear at the top level, but instead are listed in the `supports` field of the top-level statements. If :py:attr:`unique_stmts` has not been initialized with the de-duplicated statements, :py:meth:`combine_duplicates` is called internally. After this function is called the attribute :py:attr:`related_stmts` is set as a side-effect. The procedure for combining statements in this way involves a series of steps: 1. The statements are grouped by type (e.g., Phosphorylation) and each type is iterated over independently. 2. Statements of the same type are then grouped according to their Agents' entity hierarchy component identifiers. For instance, ERK, MAPK1 and MAPK3 are all in the same connected component in the entity hierarchy and therefore all Statements of the same type referencing these entities will be grouped. This grouping assures that relations are only possible within Statement groups and not among groups. For two Statements to be in the same group at this step, the Statements must be the same type and the Agents at each position in the Agent lists must either be in the same hierarchy component, or if they are not in the hierarchy, must have identical entity_matches_keys. Statements with None in one of the Agent list positions are collected separately at this stage. 3. Statements with None at either the first or second position are iterated over. For a statement with a None as the first Agent, the second Agent is examined; then the Statement with None is added to all Statement groups with a corresponding component or entity_matches_key in the second position. The same procedure is performed for Statements with None at the second Agent position. 4. The statements within each group are then compared; if one statement represents a refinement of the other (as defined by the `refinement_of()` method implemented for the Statement), then the more refined statement is added to the `supports` field of the more general statement, and the more general statement is added to the `supported_by` field of the more refined statement. 5. A new flat list of statements is created that contains only those statements that have no `supports` entries (statements containing such entries are not eliminated, because they will be retrievable from the `supported_by` fields of other statements). This list is returned to the caller. On multi-core machines, the algorithm can be parallelized by setting the poolsize argument to the desired number of worker processes. This feature is only available in Python > 3.4. .. note:: Subfamily relationships must be consistent across arguments For now, we require that merges can only occur if the *isa* relationships are all in the *same direction for all the agents* in a Statement. For example, the two statement groups: `RAF_family -> MEK1` and `BRAF -> MEK_family` would not be merged, since BRAF *isa* RAF_family, but MEK_family is not a MEK1. In the future this restriction could be revisited. Parameters ---------- return_toplevel : Optional[bool] If True only the top level statements are returned. If False, all statements are returned. Default: True poolsize : Optional[int] The number of worker processes to use to parallelize the comparisons performed by the function. If None (default), no parallelization is performed. NOTE: Parallelization is only available on Python 3.4 and above. size_cutoff : Optional[int] Groups with size_cutoff or more statements are sent to worker processes, while smaller groups are compared in the parent process. Default value is 100. Not relevant when parallelization is not used. Returns ------- list of :py:class:`indra.statement.Statement` The returned list contains Statements representing the more concrete/refined versions of the Statements involving particular entities. The attribute :py:attr:`related_stmts` is also set to this list. However, if return_toplevel is False then all statements are returned, irrespective of level of specificity. In this case the relationships between statements can be accessed via the supports/supported_by attributes. Examples -------- A more general statement with no information about a Phosphorylation site is identified as supporting a more specific statement: >>> from indra.preassembler.hierarchy_manager import hierarchies >>> braf = Agent('BRAF') >>> map2k1 = Agent('MAP2K1') >>> st1 = Phosphorylation(braf, map2k1) >>> st2 = Phosphorylation(braf, map2k1, residue='S') >>> pa = Preassembler(hierarchies, [st1, st2]) >>> combined_stmts = pa.combine_related() # doctest:+ELLIPSIS >>> combined_stmts [Phosphorylation(BRAF(), MAP2K1(), S)] >>> combined_stmts[0].supported_by [Phosphorylation(BRAF(), MAP2K1())] >>> combined_stmts[0].supported_by[0].supports [Phosphorylation(BRAF(), MAP2K1(), S)] """ if self.related_stmts is not None: if return_toplevel: return self.related_stmts else: assert self.unique_stmts is not None return self.unique_stmts # Call combine_duplicates, which lazily initializes self.unique_stmts unique_stmts = self.combine_duplicates() # Generate the index map, linking related statements. idx_map = self._generate_id_maps(unique_stmts, poolsize, size_cutoff) # Now iterate over all indices and set supports/supported by for ix1, ix2 in idx_map: unique_stmts[ix1].supported_by.append(unique_stmts[ix2]) unique_stmts[ix2].supports.append(unique_stmts[ix1]) # Get the top level statements self.related_stmts = [st for st in unique_stmts if not st.supports] logger.debug('%d top level' % len(self.related_stmts)) if return_toplevel: return self.related_stmts else: return unique_stmts
[ "def", "combine_related", "(", "self", ",", "return_toplevel", "=", "True", ",", "poolsize", "=", "None", ",", "size_cutoff", "=", "100", ")", ":", "if", "self", ".", "related_stmts", "is", "not", "None", ":", "if", "return_toplevel", ":", "return", "self", ".", "related_stmts", "else", ":", "assert", "self", ".", "unique_stmts", "is", "not", "None", "return", "self", ".", "unique_stmts", "# Call combine_duplicates, which lazily initializes self.unique_stmts", "unique_stmts", "=", "self", ".", "combine_duplicates", "(", ")", "# Generate the index map, linking related statements.", "idx_map", "=", "self", ".", "_generate_id_maps", "(", "unique_stmts", ",", "poolsize", ",", "size_cutoff", ")", "# Now iterate over all indices and set supports/supported by", "for", "ix1", ",", "ix2", "in", "idx_map", ":", "unique_stmts", "[", "ix1", "]", ".", "supported_by", ".", "append", "(", "unique_stmts", "[", "ix2", "]", ")", "unique_stmts", "[", "ix2", "]", ".", "supports", ".", "append", "(", "unique_stmts", "[", "ix1", "]", ")", "# Get the top level statements", "self", ".", "related_stmts", "=", "[", "st", "for", "st", "in", "unique_stmts", "if", "not", "st", ".", "supports", "]", "logger", ".", "debug", "(", "'%d top level'", "%", "len", "(", "self", ".", "related_stmts", ")", ")", "if", "return_toplevel", ":", "return", "self", ".", "related_stmts", "else", ":", "return", "unique_stmts" ]
Connect related statements based on their refinement relationships. This function takes as a starting point the unique statements (with duplicates removed) and returns a modified flat list of statements containing only those statements which do not represent a refinement of other existing statements. In other words, the more general versions of a given statement do not appear at the top level, but instead are listed in the `supports` field of the top-level statements. If :py:attr:`unique_stmts` has not been initialized with the de-duplicated statements, :py:meth:`combine_duplicates` is called internally. After this function is called the attribute :py:attr:`related_stmts` is set as a side-effect. The procedure for combining statements in this way involves a series of steps: 1. The statements are grouped by type (e.g., Phosphorylation) and each type is iterated over independently. 2. Statements of the same type are then grouped according to their Agents' entity hierarchy component identifiers. For instance, ERK, MAPK1 and MAPK3 are all in the same connected component in the entity hierarchy and therefore all Statements of the same type referencing these entities will be grouped. This grouping assures that relations are only possible within Statement groups and not among groups. For two Statements to be in the same group at this step, the Statements must be the same type and the Agents at each position in the Agent lists must either be in the same hierarchy component, or if they are not in the hierarchy, must have identical entity_matches_keys. Statements with None in one of the Agent list positions are collected separately at this stage. 3. Statements with None at either the first or second position are iterated over. For a statement with a None as the first Agent, the second Agent is examined; then the Statement with None is added to all Statement groups with a corresponding component or entity_matches_key in the second position. The same procedure is performed for Statements with None at the second Agent position. 4. The statements within each group are then compared; if one statement represents a refinement of the other (as defined by the `refinement_of()` method implemented for the Statement), then the more refined statement is added to the `supports` field of the more general statement, and the more general statement is added to the `supported_by` field of the more refined statement. 5. A new flat list of statements is created that contains only those statements that have no `supports` entries (statements containing such entries are not eliminated, because they will be retrievable from the `supported_by` fields of other statements). This list is returned to the caller. On multi-core machines, the algorithm can be parallelized by setting the poolsize argument to the desired number of worker processes. This feature is only available in Python > 3.4. .. note:: Subfamily relationships must be consistent across arguments For now, we require that merges can only occur if the *isa* relationships are all in the *same direction for all the agents* in a Statement. For example, the two statement groups: `RAF_family -> MEK1` and `BRAF -> MEK_family` would not be merged, since BRAF *isa* RAF_family, but MEK_family is not a MEK1. In the future this restriction could be revisited. Parameters ---------- return_toplevel : Optional[bool] If True only the top level statements are returned. If False, all statements are returned. Default: True poolsize : Optional[int] The number of worker processes to use to parallelize the comparisons performed by the function. If None (default), no parallelization is performed. NOTE: Parallelization is only available on Python 3.4 and above. size_cutoff : Optional[int] Groups with size_cutoff or more statements are sent to worker processes, while smaller groups are compared in the parent process. Default value is 100. Not relevant when parallelization is not used. Returns ------- list of :py:class:`indra.statement.Statement` The returned list contains Statements representing the more concrete/refined versions of the Statements involving particular entities. The attribute :py:attr:`related_stmts` is also set to this list. However, if return_toplevel is False then all statements are returned, irrespective of level of specificity. In this case the relationships between statements can be accessed via the supports/supported_by attributes. Examples -------- A more general statement with no information about a Phosphorylation site is identified as supporting a more specific statement: >>> from indra.preassembler.hierarchy_manager import hierarchies >>> braf = Agent('BRAF') >>> map2k1 = Agent('MAP2K1') >>> st1 = Phosphorylation(braf, map2k1) >>> st2 = Phosphorylation(braf, map2k1, residue='S') >>> pa = Preassembler(hierarchies, [st1, st2]) >>> combined_stmts = pa.combine_related() # doctest:+ELLIPSIS >>> combined_stmts [Phosphorylation(BRAF(), MAP2K1(), S)] >>> combined_stmts[0].supported_by [Phosphorylation(BRAF(), MAP2K1())] >>> combined_stmts[0].supported_by[0].supports [Phosphorylation(BRAF(), MAP2K1(), S)]
[ "Connect", "related", "statements", "based", "on", "their", "refinement", "relationships", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/__init__.py#L428-L563
train
sorgerlab/indra
indra/preassembler/__init__.py
Preassembler.find_contradicts
def find_contradicts(self): """Return pairs of contradicting Statements. Returns ------- contradicts : list(tuple(Statement, Statement)) A list of Statement pairs that are contradicting. """ eh = self.hierarchies['entity'] # Make a dict of Statement by type stmts_by_type = collections.defaultdict(lambda: []) for idx, stmt in enumerate(self.stmts): stmts_by_type[indra_stmt_type(stmt)].append((idx, stmt)) # Handle Statements with polarity first pos_stmts = AddModification.__subclasses__() neg_stmts = [modclass_to_inverse[c] for c in pos_stmts] pos_stmts += [Activation, IncreaseAmount] neg_stmts += [Inhibition, DecreaseAmount] contradicts = [] for pst, nst in zip(pos_stmts, neg_stmts): poss = stmts_by_type.get(pst, []) negs = stmts_by_type.get(nst, []) pos_stmt_by_group = self._get_stmt_by_group(pst, poss, eh) neg_stmt_by_group = self._get_stmt_by_group(nst, negs, eh) for key, pg in pos_stmt_by_group.items(): ng = neg_stmt_by_group.get(key, []) for (_, st1), (_, st2) in itertools.product(pg, ng): if st1.contradicts(st2, self.hierarchies): contradicts.append((st1, st2)) # Handle neutral Statements next neu_stmts = [Influence, ActiveForm] for stt in neu_stmts: stmts = stmts_by_type.get(stt, []) for (_, st1), (_, st2) in itertools.combinations(stmts, 2): if st1.contradicts(st2, self.hierarchies): contradicts.append((st1, st2)) return contradicts
python
def find_contradicts(self): """Return pairs of contradicting Statements. Returns ------- contradicts : list(tuple(Statement, Statement)) A list of Statement pairs that are contradicting. """ eh = self.hierarchies['entity'] # Make a dict of Statement by type stmts_by_type = collections.defaultdict(lambda: []) for idx, stmt in enumerate(self.stmts): stmts_by_type[indra_stmt_type(stmt)].append((idx, stmt)) # Handle Statements with polarity first pos_stmts = AddModification.__subclasses__() neg_stmts = [modclass_to_inverse[c] for c in pos_stmts] pos_stmts += [Activation, IncreaseAmount] neg_stmts += [Inhibition, DecreaseAmount] contradicts = [] for pst, nst in zip(pos_stmts, neg_stmts): poss = stmts_by_type.get(pst, []) negs = stmts_by_type.get(nst, []) pos_stmt_by_group = self._get_stmt_by_group(pst, poss, eh) neg_stmt_by_group = self._get_stmt_by_group(nst, negs, eh) for key, pg in pos_stmt_by_group.items(): ng = neg_stmt_by_group.get(key, []) for (_, st1), (_, st2) in itertools.product(pg, ng): if st1.contradicts(st2, self.hierarchies): contradicts.append((st1, st2)) # Handle neutral Statements next neu_stmts = [Influence, ActiveForm] for stt in neu_stmts: stmts = stmts_by_type.get(stt, []) for (_, st1), (_, st2) in itertools.combinations(stmts, 2): if st1.contradicts(st2, self.hierarchies): contradicts.append((st1, st2)) return contradicts
[ "def", "find_contradicts", "(", "self", ")", ":", "eh", "=", "self", ".", "hierarchies", "[", "'entity'", "]", "# Make a dict of Statement by type", "stmts_by_type", "=", "collections", ".", "defaultdict", "(", "lambda", ":", "[", "]", ")", "for", "idx", ",", "stmt", "in", "enumerate", "(", "self", ".", "stmts", ")", ":", "stmts_by_type", "[", "indra_stmt_type", "(", "stmt", ")", "]", ".", "append", "(", "(", "idx", ",", "stmt", ")", ")", "# Handle Statements with polarity first", "pos_stmts", "=", "AddModification", ".", "__subclasses__", "(", ")", "neg_stmts", "=", "[", "modclass_to_inverse", "[", "c", "]", "for", "c", "in", "pos_stmts", "]", "pos_stmts", "+=", "[", "Activation", ",", "IncreaseAmount", "]", "neg_stmts", "+=", "[", "Inhibition", ",", "DecreaseAmount", "]", "contradicts", "=", "[", "]", "for", "pst", ",", "nst", "in", "zip", "(", "pos_stmts", ",", "neg_stmts", ")", ":", "poss", "=", "stmts_by_type", ".", "get", "(", "pst", ",", "[", "]", ")", "negs", "=", "stmts_by_type", ".", "get", "(", "nst", ",", "[", "]", ")", "pos_stmt_by_group", "=", "self", ".", "_get_stmt_by_group", "(", "pst", ",", "poss", ",", "eh", ")", "neg_stmt_by_group", "=", "self", ".", "_get_stmt_by_group", "(", "nst", ",", "negs", ",", "eh", ")", "for", "key", ",", "pg", "in", "pos_stmt_by_group", ".", "items", "(", ")", ":", "ng", "=", "neg_stmt_by_group", ".", "get", "(", "key", ",", "[", "]", ")", "for", "(", "_", ",", "st1", ")", ",", "(", "_", ",", "st2", ")", "in", "itertools", ".", "product", "(", "pg", ",", "ng", ")", ":", "if", "st1", ".", "contradicts", "(", "st2", ",", "self", ".", "hierarchies", ")", ":", "contradicts", ".", "append", "(", "(", "st1", ",", "st2", ")", ")", "# Handle neutral Statements next", "neu_stmts", "=", "[", "Influence", ",", "ActiveForm", "]", "for", "stt", "in", "neu_stmts", ":", "stmts", "=", "stmts_by_type", ".", "get", "(", "stt", ",", "[", "]", ")", "for", "(", "_", ",", "st1", ")", ",", "(", "_", ",", "st2", ")", "in", "itertools", ".", "combinations", "(", "stmts", ",", "2", ")", ":", "if", "st1", ".", "contradicts", "(", "st2", ",", "self", ".", "hierarchies", ")", ":", "contradicts", ".", "append", "(", "(", "st1", ",", "st2", ")", ")", "return", "contradicts" ]
Return pairs of contradicting Statements. Returns ------- contradicts : list(tuple(Statement, Statement)) A list of Statement pairs that are contradicting.
[ "Return", "pairs", "of", "contradicting", "Statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/__init__.py#L565-L608
train
sorgerlab/indra
indra/literature/deft_tools.py
get_text_content_for_pmids
def get_text_content_for_pmids(pmids): """Get text content for articles given a list of their pmids Parameters ---------- pmids : list of str Returns ------- text_content : list of str """ pmc_pmids = set(pmc_client.filter_pmids(pmids, source_type='fulltext')) pmc_ids = [] for pmid in pmc_pmids: pmc_id = pmc_client.id_lookup(pmid, idtype='pmid')['pmcid'] if pmc_id: pmc_ids.append(pmc_id) else: pmc_pmids.discard(pmid) pmc_xmls = [] failed = set() for pmc_id in pmc_ids: if pmc_id is not None: pmc_xmls.append(pmc_client.get_xml(pmc_id)) else: failed.append(pmid) time.sleep(0.5) remaining_pmids = set(pmids) - pmc_pmids | failed abstracts = [] for pmid in remaining_pmids: abstract = pubmed_client.get_abstract(pmid) abstracts.append(abstract) time.sleep(0.5) return [text_content for source in (pmc_xmls, abstracts) for text_content in source if text_content is not None]
python
def get_text_content_for_pmids(pmids): """Get text content for articles given a list of their pmids Parameters ---------- pmids : list of str Returns ------- text_content : list of str """ pmc_pmids = set(pmc_client.filter_pmids(pmids, source_type='fulltext')) pmc_ids = [] for pmid in pmc_pmids: pmc_id = pmc_client.id_lookup(pmid, idtype='pmid')['pmcid'] if pmc_id: pmc_ids.append(pmc_id) else: pmc_pmids.discard(pmid) pmc_xmls = [] failed = set() for pmc_id in pmc_ids: if pmc_id is not None: pmc_xmls.append(pmc_client.get_xml(pmc_id)) else: failed.append(pmid) time.sleep(0.5) remaining_pmids = set(pmids) - pmc_pmids | failed abstracts = [] for pmid in remaining_pmids: abstract = pubmed_client.get_abstract(pmid) abstracts.append(abstract) time.sleep(0.5) return [text_content for source in (pmc_xmls, abstracts) for text_content in source if text_content is not None]
[ "def", "get_text_content_for_pmids", "(", "pmids", ")", ":", "pmc_pmids", "=", "set", "(", "pmc_client", ".", "filter_pmids", "(", "pmids", ",", "source_type", "=", "'fulltext'", ")", ")", "pmc_ids", "=", "[", "]", "for", "pmid", "in", "pmc_pmids", ":", "pmc_id", "=", "pmc_client", ".", "id_lookup", "(", "pmid", ",", "idtype", "=", "'pmid'", ")", "[", "'pmcid'", "]", "if", "pmc_id", ":", "pmc_ids", ".", "append", "(", "pmc_id", ")", "else", ":", "pmc_pmids", ".", "discard", "(", "pmid", ")", "pmc_xmls", "=", "[", "]", "failed", "=", "set", "(", ")", "for", "pmc_id", "in", "pmc_ids", ":", "if", "pmc_id", "is", "not", "None", ":", "pmc_xmls", ".", "append", "(", "pmc_client", ".", "get_xml", "(", "pmc_id", ")", ")", "else", ":", "failed", ".", "append", "(", "pmid", ")", "time", ".", "sleep", "(", "0.5", ")", "remaining_pmids", "=", "set", "(", "pmids", ")", "-", "pmc_pmids", "|", "failed", "abstracts", "=", "[", "]", "for", "pmid", "in", "remaining_pmids", ":", "abstract", "=", "pubmed_client", ".", "get_abstract", "(", "pmid", ")", "abstracts", ".", "append", "(", "abstract", ")", "time", ".", "sleep", "(", "0.5", ")", "return", "[", "text_content", "for", "source", "in", "(", "pmc_xmls", ",", "abstracts", ")", "for", "text_content", "in", "source", "if", "text_content", "is", "not", "None", "]" ]
Get text content for articles given a list of their pmids Parameters ---------- pmids : list of str Returns ------- text_content : list of str
[ "Get", "text", "content", "for", "articles", "given", "a", "list", "of", "their", "pmids" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/deft_tools.py#L46-L84
train
sorgerlab/indra
indra/literature/deft_tools.py
universal_extract_paragraphs
def universal_extract_paragraphs(xml): """Extract paragraphs from xml that could be from different sources First try to parse the xml as if it came from elsevier. if we do not have valid elsevier xml this will throw an exception. the text extraction function in the pmc client may not throw an exception when parsing elsevier xml, silently processing the xml incorrectly Parameters ---------- xml : str Either an NLM xml, Elsevier xml or plaintext Returns ------- paragraphs : str Extracted plaintext paragraphs from NLM or Elsevier XML """ try: paragraphs = elsevier_client.extract_paragraphs(xml) except Exception: paragraphs = None if paragraphs is None: try: paragraphs = pmc_client.extract_paragraphs(xml) except Exception: paragraphs = [xml] return paragraphs
python
def universal_extract_paragraphs(xml): """Extract paragraphs from xml that could be from different sources First try to parse the xml as if it came from elsevier. if we do not have valid elsevier xml this will throw an exception. the text extraction function in the pmc client may not throw an exception when parsing elsevier xml, silently processing the xml incorrectly Parameters ---------- xml : str Either an NLM xml, Elsevier xml or plaintext Returns ------- paragraphs : str Extracted plaintext paragraphs from NLM or Elsevier XML """ try: paragraphs = elsevier_client.extract_paragraphs(xml) except Exception: paragraphs = None if paragraphs is None: try: paragraphs = pmc_client.extract_paragraphs(xml) except Exception: paragraphs = [xml] return paragraphs
[ "def", "universal_extract_paragraphs", "(", "xml", ")", ":", "try", ":", "paragraphs", "=", "elsevier_client", ".", "extract_paragraphs", "(", "xml", ")", "except", "Exception", ":", "paragraphs", "=", "None", "if", "paragraphs", "is", "None", ":", "try", ":", "paragraphs", "=", "pmc_client", ".", "extract_paragraphs", "(", "xml", ")", "except", "Exception", ":", "paragraphs", "=", "[", "xml", "]", "return", "paragraphs" ]
Extract paragraphs from xml that could be from different sources First try to parse the xml as if it came from elsevier. if we do not have valid elsevier xml this will throw an exception. the text extraction function in the pmc client may not throw an exception when parsing elsevier xml, silently processing the xml incorrectly Parameters ---------- xml : str Either an NLM xml, Elsevier xml or plaintext Returns ------- paragraphs : str Extracted plaintext paragraphs from NLM or Elsevier XML
[ "Extract", "paragraphs", "from", "xml", "that", "could", "be", "from", "different", "sources" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/deft_tools.py#L87-L114
train
sorgerlab/indra
indra/literature/deft_tools.py
filter_paragraphs
def filter_paragraphs(paragraphs, contains=None): """Filter paragraphs to only those containing one of a list of strings Parameters ---------- paragraphs : list of str List of plaintext paragraphs from an article contains : str or list of str Exclude paragraphs not containing this string as a token, or at least one of the strings in contains if it is a list Returns ------- str Plaintext consisting of all input paragraphs containing at least one of the supplied tokens. """ if contains is None: pattern = '' else: if isinstance(contains, str): contains = [contains] pattern = '|'.join(r'[^\w]%s[^\w]' % shortform for shortform in contains) paragraphs = [p for p in paragraphs if re.search(pattern, p)] return '\n'.join(paragraphs) + '\n'
python
def filter_paragraphs(paragraphs, contains=None): """Filter paragraphs to only those containing one of a list of strings Parameters ---------- paragraphs : list of str List of plaintext paragraphs from an article contains : str or list of str Exclude paragraphs not containing this string as a token, or at least one of the strings in contains if it is a list Returns ------- str Plaintext consisting of all input paragraphs containing at least one of the supplied tokens. """ if contains is None: pattern = '' else: if isinstance(contains, str): contains = [contains] pattern = '|'.join(r'[^\w]%s[^\w]' % shortform for shortform in contains) paragraphs = [p for p in paragraphs if re.search(pattern, p)] return '\n'.join(paragraphs) + '\n'
[ "def", "filter_paragraphs", "(", "paragraphs", ",", "contains", "=", "None", ")", ":", "if", "contains", "is", "None", ":", "pattern", "=", "''", "else", ":", "if", "isinstance", "(", "contains", ",", "str", ")", ":", "contains", "=", "[", "contains", "]", "pattern", "=", "'|'", ".", "join", "(", "r'[^\\w]%s[^\\w]'", "%", "shortform", "for", "shortform", "in", "contains", ")", "paragraphs", "=", "[", "p", "for", "p", "in", "paragraphs", "if", "re", ".", "search", "(", "pattern", ",", "p", ")", "]", "return", "'\\n'", ".", "join", "(", "paragraphs", ")", "+", "'\\n'" ]
Filter paragraphs to only those containing one of a list of strings Parameters ---------- paragraphs : list of str List of plaintext paragraphs from an article contains : str or list of str Exclude paragraphs not containing this string as a token, or at least one of the strings in contains if it is a list Returns ------- str Plaintext consisting of all input paragraphs containing at least one of the supplied tokens.
[ "Filter", "paragraphs", "to", "only", "those", "containing", "one", "of", "a", "list", "of", "strings" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/deft_tools.py#L117-L143
train
sorgerlab/indra
indra/statements/resources.py
get_valid_residue
def get_valid_residue(residue): """Check if the given string represents a valid amino acid residue.""" if residue is not None and amino_acids.get(residue) is None: res = amino_acids_reverse.get(residue.lower()) if res is None: raise InvalidResidueError(residue) else: return res return residue
python
def get_valid_residue(residue): """Check if the given string represents a valid amino acid residue.""" if residue is not None and amino_acids.get(residue) is None: res = amino_acids_reverse.get(residue.lower()) if res is None: raise InvalidResidueError(residue) else: return res return residue
[ "def", "get_valid_residue", "(", "residue", ")", ":", "if", "residue", "is", "not", "None", "and", "amino_acids", ".", "get", "(", "residue", ")", "is", "None", ":", "res", "=", "amino_acids_reverse", ".", "get", "(", "residue", ".", "lower", "(", ")", ")", "if", "res", "is", "None", ":", "raise", "InvalidResidueError", "(", "residue", ")", "else", ":", "return", "res", "return", "residue" ]
Check if the given string represents a valid amino acid residue.
[ "Check", "if", "the", "given", "string", "represents", "a", "valid", "amino", "acid", "residue", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/statements/resources.py#L15-L23
train
sorgerlab/indra
indra/statements/resources.py
get_valid_location
def get_valid_location(location): """Check if the given location represents a valid cellular component.""" # If we're given None, return None if location is not None and cellular_components.get(location) is None: loc = cellular_components_reverse.get(location) if loc is None: raise InvalidLocationError(location) else: return loc return location
python
def get_valid_location(location): """Check if the given location represents a valid cellular component.""" # If we're given None, return None if location is not None and cellular_components.get(location) is None: loc = cellular_components_reverse.get(location) if loc is None: raise InvalidLocationError(location) else: return loc return location
[ "def", "get_valid_location", "(", "location", ")", ":", "# If we're given None, return None", "if", "location", "is", "not", "None", "and", "cellular_components", ".", "get", "(", "location", ")", "is", "None", ":", "loc", "=", "cellular_components_reverse", ".", "get", "(", "location", ")", "if", "loc", "is", "None", ":", "raise", "InvalidLocationError", "(", "location", ")", "else", ":", "return", "loc", "return", "location" ]
Check if the given location represents a valid cellular component.
[ "Check", "if", "the", "given", "location", "represents", "a", "valid", "cellular", "component", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/statements/resources.py#L26-L35
train
sorgerlab/indra
indra/statements/resources.py
_read_activity_types
def _read_activity_types(): """Read types of valid activities from a resource file.""" this_dir = os.path.dirname(os.path.abspath(__file__)) ac_file = os.path.join(this_dir, os.pardir, 'resources', 'activity_hierarchy.rdf') g = rdflib.Graph() with open(ac_file, 'r'): g.parse(ac_file, format='nt') act_types = set() for s, _, o in g: subj = s.rpartition('/')[-1] obj = o.rpartition('/')[-1] act_types.add(subj) act_types.add(obj) return sorted(list(act_types))
python
def _read_activity_types(): """Read types of valid activities from a resource file.""" this_dir = os.path.dirname(os.path.abspath(__file__)) ac_file = os.path.join(this_dir, os.pardir, 'resources', 'activity_hierarchy.rdf') g = rdflib.Graph() with open(ac_file, 'r'): g.parse(ac_file, format='nt') act_types = set() for s, _, o in g: subj = s.rpartition('/')[-1] obj = o.rpartition('/')[-1] act_types.add(subj) act_types.add(obj) return sorted(list(act_types))
[ "def", "_read_activity_types", "(", ")", ":", "this_dir", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "__file__", ")", ")", "ac_file", "=", "os", ".", "path", ".", "join", "(", "this_dir", ",", "os", ".", "pardir", ",", "'resources'", ",", "'activity_hierarchy.rdf'", ")", "g", "=", "rdflib", ".", "Graph", "(", ")", "with", "open", "(", "ac_file", ",", "'r'", ")", ":", "g", ".", "parse", "(", "ac_file", ",", "format", "=", "'nt'", ")", "act_types", "=", "set", "(", ")", "for", "s", ",", "_", ",", "o", "in", "g", ":", "subj", "=", "s", ".", "rpartition", "(", "'/'", ")", "[", "-", "1", "]", "obj", "=", "o", ".", "rpartition", "(", "'/'", ")", "[", "-", "1", "]", "act_types", ".", "add", "(", "subj", ")", "act_types", ".", "add", "(", "obj", ")", "return", "sorted", "(", "list", "(", "act_types", ")", ")" ]
Read types of valid activities from a resource file.
[ "Read", "types", "of", "valid", "activities", "from", "a", "resource", "file", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/statements/resources.py#L38-L52
train
sorgerlab/indra
indra/statements/resources.py
_read_cellular_components
def _read_cellular_components(): """Read cellular components from a resource file.""" # Here we load a patch file in addition to the current cellular components # file to make sure we don't error with InvalidLocationError with some # deprecated cellular location names this_dir = os.path.dirname(os.path.abspath(__file__)) cc_file = os.path.join(this_dir, os.pardir, 'resources', 'cellular_components.tsv') cc_patch_file = os.path.join(this_dir, os.pardir, 'resources', 'cellular_components_patch.tsv') cellular_components = {} cellular_components_reverse = {} with open(cc_file, 'rt') as fh: lines = list(fh.readlines()) # We add the patch to the end of the lines list with open(cc_patch_file, 'rt') as fh: lines += list(fh.readlines()) for lin in lines[1:]: terms = lin.strip().split('\t') cellular_components[terms[1]] = terms[0] # If the GO -> name mapping doesn't exist yet, we add a mapping # but if it already exists (i.e. the try doesn't error) then # we don't add the GO -> name mapping. This ensures that names from # the patch file aren't mapped to in the reverse list. try: cellular_components_reverse[terms[0]] except KeyError: cellular_components_reverse[terms[0]] = terms[1] return cellular_components, cellular_components_reverse
python
def _read_cellular_components(): """Read cellular components from a resource file.""" # Here we load a patch file in addition to the current cellular components # file to make sure we don't error with InvalidLocationError with some # deprecated cellular location names this_dir = os.path.dirname(os.path.abspath(__file__)) cc_file = os.path.join(this_dir, os.pardir, 'resources', 'cellular_components.tsv') cc_patch_file = os.path.join(this_dir, os.pardir, 'resources', 'cellular_components_patch.tsv') cellular_components = {} cellular_components_reverse = {} with open(cc_file, 'rt') as fh: lines = list(fh.readlines()) # We add the patch to the end of the lines list with open(cc_patch_file, 'rt') as fh: lines += list(fh.readlines()) for lin in lines[1:]: terms = lin.strip().split('\t') cellular_components[terms[1]] = terms[0] # If the GO -> name mapping doesn't exist yet, we add a mapping # but if it already exists (i.e. the try doesn't error) then # we don't add the GO -> name mapping. This ensures that names from # the patch file aren't mapped to in the reverse list. try: cellular_components_reverse[terms[0]] except KeyError: cellular_components_reverse[terms[0]] = terms[1] return cellular_components, cellular_components_reverse
[ "def", "_read_cellular_components", "(", ")", ":", "# Here we load a patch file in addition to the current cellular components", "# file to make sure we don't error with InvalidLocationError with some", "# deprecated cellular location names", "this_dir", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "__file__", ")", ")", "cc_file", "=", "os", ".", "path", ".", "join", "(", "this_dir", ",", "os", ".", "pardir", ",", "'resources'", ",", "'cellular_components.tsv'", ")", "cc_patch_file", "=", "os", ".", "path", ".", "join", "(", "this_dir", ",", "os", ".", "pardir", ",", "'resources'", ",", "'cellular_components_patch.tsv'", ")", "cellular_components", "=", "{", "}", "cellular_components_reverse", "=", "{", "}", "with", "open", "(", "cc_file", ",", "'rt'", ")", "as", "fh", ":", "lines", "=", "list", "(", "fh", ".", "readlines", "(", ")", ")", "# We add the patch to the end of the lines list", "with", "open", "(", "cc_patch_file", ",", "'rt'", ")", "as", "fh", ":", "lines", "+=", "list", "(", "fh", ".", "readlines", "(", ")", ")", "for", "lin", "in", "lines", "[", "1", ":", "]", ":", "terms", "=", "lin", ".", "strip", "(", ")", ".", "split", "(", "'\\t'", ")", "cellular_components", "[", "terms", "[", "1", "]", "]", "=", "terms", "[", "0", "]", "# If the GO -> name mapping doesn't exist yet, we add a mapping", "# but if it already exists (i.e. the try doesn't error) then", "# we don't add the GO -> name mapping. This ensures that names from", "# the patch file aren't mapped to in the reverse list.", "try", ":", "cellular_components_reverse", "[", "terms", "[", "0", "]", "]", "except", "KeyError", ":", "cellular_components_reverse", "[", "terms", "[", "0", "]", "]", "=", "terms", "[", "1", "]", "return", "cellular_components", ",", "cellular_components_reverse" ]
Read cellular components from a resource file.
[ "Read", "cellular", "components", "from", "a", "resource", "file", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/statements/resources.py#L58-L86
train
sorgerlab/indra
indra/statements/resources.py
_read_amino_acids
def _read_amino_acids(): """Read the amino acid information from a resource file.""" this_dir = os.path.dirname(os.path.abspath(__file__)) aa_file = os.path.join(this_dir, os.pardir, 'resources', 'amino_acids.tsv') amino_acids = {} amino_acids_reverse = {} with open(aa_file, 'rt') as fh: lines = fh.readlines() for lin in lines[1:]: terms = lin.strip().split('\t') key = terms[2] val = {'full_name': terms[0], 'short_name': terms[1], 'indra_name': terms[3]} amino_acids[key] = val for v in val.values(): amino_acids_reverse[v] = key return amino_acids, amino_acids_reverse
python
def _read_amino_acids(): """Read the amino acid information from a resource file.""" this_dir = os.path.dirname(os.path.abspath(__file__)) aa_file = os.path.join(this_dir, os.pardir, 'resources', 'amino_acids.tsv') amino_acids = {} amino_acids_reverse = {} with open(aa_file, 'rt') as fh: lines = fh.readlines() for lin in lines[1:]: terms = lin.strip().split('\t') key = terms[2] val = {'full_name': terms[0], 'short_name': terms[1], 'indra_name': terms[3]} amino_acids[key] = val for v in val.values(): amino_acids_reverse[v] = key return amino_acids, amino_acids_reverse
[ "def", "_read_amino_acids", "(", ")", ":", "this_dir", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "__file__", ")", ")", "aa_file", "=", "os", ".", "path", ".", "join", "(", "this_dir", ",", "os", ".", "pardir", ",", "'resources'", ",", "'amino_acids.tsv'", ")", "amino_acids", "=", "{", "}", "amino_acids_reverse", "=", "{", "}", "with", "open", "(", "aa_file", ",", "'rt'", ")", "as", "fh", ":", "lines", "=", "fh", ".", "readlines", "(", ")", "for", "lin", "in", "lines", "[", "1", ":", "]", ":", "terms", "=", "lin", ".", "strip", "(", ")", ".", "split", "(", "'\\t'", ")", "key", "=", "terms", "[", "2", "]", "val", "=", "{", "'full_name'", ":", "terms", "[", "0", "]", ",", "'short_name'", ":", "terms", "[", "1", "]", ",", "'indra_name'", ":", "terms", "[", "3", "]", "}", "amino_acids", "[", "key", "]", "=", "val", "for", "v", "in", "val", ".", "values", "(", ")", ":", "amino_acids_reverse", "[", "v", "]", "=", "key", "return", "amino_acids", ",", "amino_acids_reverse" ]
Read the amino acid information from a resource file.
[ "Read", "the", "amino", "acid", "information", "from", "a", "resource", "file", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/statements/resources.py#L92-L109
train
sorgerlab/indra
indra/assemblers/pysb/export.py
export_sbgn
def export_sbgn(model): """Return an SBGN model string corresponding to the PySB model. This function first calls generate_equations on the PySB model to obtain a reaction network (i.e. individual species, reactions). It then iterates over each reaction and and instantiates its reactants, products, and the process itself as SBGN glyphs and arcs. Parameters ---------- model : pysb.core.Model A PySB model to be exported into SBGN Returns ------- sbgn_str : str An SBGN model as string """ import lxml.etree import lxml.builder from pysb.bng import generate_equations from indra.assemblers.sbgn import SBGNAssembler logger.info('Generating reaction network with BNG for SBGN export. ' + 'This could take a long time.') generate_equations(model) sa = SBGNAssembler() glyphs = {} for idx, species in enumerate(model.species): glyph = sa._glyph_for_complex_pattern(species) if glyph is None: continue sa._map.append(glyph) glyphs[idx] = glyph for reaction in model.reactions: # Get all the reactions / products / controllers of the reaction reactants = set(reaction['reactants']) - set(reaction['products']) products = set(reaction['products']) - set(reaction['reactants']) controllers = set(reaction['reactants']) & set(reaction['products']) # Add glyph for reaction process_glyph = sa._process_glyph('process') # Connect reactants with arcs if not reactants: glyph_id = sa._none_glyph() sa._arc('consumption', glyph_id, process_glyph) else: for r in reactants: glyph = glyphs.get(r) if glyph is None: glyph_id = sa._none_glyph() else: glyph_id = glyph.attrib['id'] sa._arc('consumption', glyph_id, process_glyph) # Connect products with arcs if not products: glyph_id = sa._none_glyph() sa._arc('production', process_glyph, glyph_id) else: for p in products: glyph = glyphs.get(p) if glyph is None: glyph_id = sa._none_glyph() else: glyph_id = glyph.attrib['id'] sa._arc('production', process_glyph, glyph_id) # Connect controllers with arcs for c in controllers: glyph = glyphs[c] sa._arc('catalysis', glyph.attrib['id'], process_glyph) sbgn_str = sa.print_model().decode('utf-8') return sbgn_str
python
def export_sbgn(model): """Return an SBGN model string corresponding to the PySB model. This function first calls generate_equations on the PySB model to obtain a reaction network (i.e. individual species, reactions). It then iterates over each reaction and and instantiates its reactants, products, and the process itself as SBGN glyphs and arcs. Parameters ---------- model : pysb.core.Model A PySB model to be exported into SBGN Returns ------- sbgn_str : str An SBGN model as string """ import lxml.etree import lxml.builder from pysb.bng import generate_equations from indra.assemblers.sbgn import SBGNAssembler logger.info('Generating reaction network with BNG for SBGN export. ' + 'This could take a long time.') generate_equations(model) sa = SBGNAssembler() glyphs = {} for idx, species in enumerate(model.species): glyph = sa._glyph_for_complex_pattern(species) if glyph is None: continue sa._map.append(glyph) glyphs[idx] = glyph for reaction in model.reactions: # Get all the reactions / products / controllers of the reaction reactants = set(reaction['reactants']) - set(reaction['products']) products = set(reaction['products']) - set(reaction['reactants']) controllers = set(reaction['reactants']) & set(reaction['products']) # Add glyph for reaction process_glyph = sa._process_glyph('process') # Connect reactants with arcs if not reactants: glyph_id = sa._none_glyph() sa._arc('consumption', glyph_id, process_glyph) else: for r in reactants: glyph = glyphs.get(r) if glyph is None: glyph_id = sa._none_glyph() else: glyph_id = glyph.attrib['id'] sa._arc('consumption', glyph_id, process_glyph) # Connect products with arcs if not products: glyph_id = sa._none_glyph() sa._arc('production', process_glyph, glyph_id) else: for p in products: glyph = glyphs.get(p) if glyph is None: glyph_id = sa._none_glyph() else: glyph_id = glyph.attrib['id'] sa._arc('production', process_glyph, glyph_id) # Connect controllers with arcs for c in controllers: glyph = glyphs[c] sa._arc('catalysis', glyph.attrib['id'], process_glyph) sbgn_str = sa.print_model().decode('utf-8') return sbgn_str
[ "def", "export_sbgn", "(", "model", ")", ":", "import", "lxml", ".", "etree", "import", "lxml", ".", "builder", "from", "pysb", ".", "bng", "import", "generate_equations", "from", "indra", ".", "assemblers", ".", "sbgn", "import", "SBGNAssembler", "logger", ".", "info", "(", "'Generating reaction network with BNG for SBGN export. '", "+", "'This could take a long time.'", ")", "generate_equations", "(", "model", ")", "sa", "=", "SBGNAssembler", "(", ")", "glyphs", "=", "{", "}", "for", "idx", ",", "species", "in", "enumerate", "(", "model", ".", "species", ")", ":", "glyph", "=", "sa", ".", "_glyph_for_complex_pattern", "(", "species", ")", "if", "glyph", "is", "None", ":", "continue", "sa", ".", "_map", ".", "append", "(", "glyph", ")", "glyphs", "[", "idx", "]", "=", "glyph", "for", "reaction", "in", "model", ".", "reactions", ":", "# Get all the reactions / products / controllers of the reaction", "reactants", "=", "set", "(", "reaction", "[", "'reactants'", "]", ")", "-", "set", "(", "reaction", "[", "'products'", "]", ")", "products", "=", "set", "(", "reaction", "[", "'products'", "]", ")", "-", "set", "(", "reaction", "[", "'reactants'", "]", ")", "controllers", "=", "set", "(", "reaction", "[", "'reactants'", "]", ")", "&", "set", "(", "reaction", "[", "'products'", "]", ")", "# Add glyph for reaction", "process_glyph", "=", "sa", ".", "_process_glyph", "(", "'process'", ")", "# Connect reactants with arcs", "if", "not", "reactants", ":", "glyph_id", "=", "sa", ".", "_none_glyph", "(", ")", "sa", ".", "_arc", "(", "'consumption'", ",", "glyph_id", ",", "process_glyph", ")", "else", ":", "for", "r", "in", "reactants", ":", "glyph", "=", "glyphs", ".", "get", "(", "r", ")", "if", "glyph", "is", "None", ":", "glyph_id", "=", "sa", ".", "_none_glyph", "(", ")", "else", ":", "glyph_id", "=", "glyph", ".", "attrib", "[", "'id'", "]", "sa", ".", "_arc", "(", "'consumption'", ",", "glyph_id", ",", "process_glyph", ")", "# Connect products with arcs", "if", "not", "products", ":", "glyph_id", "=", "sa", ".", "_none_glyph", "(", ")", "sa", ".", "_arc", "(", "'production'", ",", "process_glyph", ",", "glyph_id", ")", "else", ":", "for", "p", "in", "products", ":", "glyph", "=", "glyphs", ".", "get", "(", "p", ")", "if", "glyph", "is", "None", ":", "glyph_id", "=", "sa", ".", "_none_glyph", "(", ")", "else", ":", "glyph_id", "=", "glyph", ".", "attrib", "[", "'id'", "]", "sa", ".", "_arc", "(", "'production'", ",", "process_glyph", ",", "glyph_id", ")", "# Connect controllers with arcs", "for", "c", "in", "controllers", ":", "glyph", "=", "glyphs", "[", "c", "]", "sa", ".", "_arc", "(", "'catalysis'", ",", "glyph", ".", "attrib", "[", "'id'", "]", ",", "process_glyph", ")", "sbgn_str", "=", "sa", ".", "print_model", "(", ")", ".", "decode", "(", "'utf-8'", ")", "return", "sbgn_str" ]
Return an SBGN model string corresponding to the PySB model. This function first calls generate_equations on the PySB model to obtain a reaction network (i.e. individual species, reactions). It then iterates over each reaction and and instantiates its reactants, products, and the process itself as SBGN glyphs and arcs. Parameters ---------- model : pysb.core.Model A PySB model to be exported into SBGN Returns ------- sbgn_str : str An SBGN model as string
[ "Return", "an", "SBGN", "model", "string", "corresponding", "to", "the", "PySB", "model", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/export.py#L9-L82
train
sorgerlab/indra
indra/assemblers/pysb/export.py
export_kappa_im
def export_kappa_im(model, fname=None): """Return a networkx graph representing the model's Kappa influence map. Parameters ---------- model : pysb.core.Model A PySB model to be exported into a Kappa IM. fname : Optional[str] A file name, typically with .png or .pdf extension in which the IM is rendered using pygraphviz. Returns ------- networkx.MultiDiGraph A graph object representing the influence map. """ from .kappa_util import im_json_to_graph kappa = _prepare_kappa(model) imap = kappa.analyses_influence_map() im = im_json_to_graph(imap) for param in model.parameters: try: im.remove_node(param.name) except: pass if fname: agraph = networkx.nx_agraph.to_agraph(im) agraph.draw(fname, prog='dot') return im
python
def export_kappa_im(model, fname=None): """Return a networkx graph representing the model's Kappa influence map. Parameters ---------- model : pysb.core.Model A PySB model to be exported into a Kappa IM. fname : Optional[str] A file name, typically with .png or .pdf extension in which the IM is rendered using pygraphviz. Returns ------- networkx.MultiDiGraph A graph object representing the influence map. """ from .kappa_util import im_json_to_graph kappa = _prepare_kappa(model) imap = kappa.analyses_influence_map() im = im_json_to_graph(imap) for param in model.parameters: try: im.remove_node(param.name) except: pass if fname: agraph = networkx.nx_agraph.to_agraph(im) agraph.draw(fname, prog='dot') return im
[ "def", "export_kappa_im", "(", "model", ",", "fname", "=", "None", ")", ":", "from", ".", "kappa_util", "import", "im_json_to_graph", "kappa", "=", "_prepare_kappa", "(", "model", ")", "imap", "=", "kappa", ".", "analyses_influence_map", "(", ")", "im", "=", "im_json_to_graph", "(", "imap", ")", "for", "param", "in", "model", ".", "parameters", ":", "try", ":", "im", ".", "remove_node", "(", "param", ".", "name", ")", "except", ":", "pass", "if", "fname", ":", "agraph", "=", "networkx", ".", "nx_agraph", ".", "to_agraph", "(", "im", ")", "agraph", ".", "draw", "(", "fname", ",", "prog", "=", "'dot'", ")", "return", "im" ]
Return a networkx graph representing the model's Kappa influence map. Parameters ---------- model : pysb.core.Model A PySB model to be exported into a Kappa IM. fname : Optional[str] A file name, typically with .png or .pdf extension in which the IM is rendered using pygraphviz. Returns ------- networkx.MultiDiGraph A graph object representing the influence map.
[ "Return", "a", "networkx", "graph", "representing", "the", "model", "s", "Kappa", "influence", "map", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/export.py#L85-L113
train
sorgerlab/indra
indra/assemblers/pysb/export.py
export_kappa_cm
def export_kappa_cm(model, fname=None): """Return a networkx graph representing the model's Kappa contact map. Parameters ---------- model : pysb.core.Model A PySB model to be exported into a Kappa CM. fname : Optional[str] A file name, typically with .png or .pdf extension in which the CM is rendered using pygraphviz. Returns ------- npygraphviz.Agraph A graph object representing the contact map. """ from .kappa_util import cm_json_to_graph kappa = _prepare_kappa(model) cmap = kappa.analyses_contact_map() cm = cm_json_to_graph(cmap) if fname: cm.draw(fname, prog='dot') return cm
python
def export_kappa_cm(model, fname=None): """Return a networkx graph representing the model's Kappa contact map. Parameters ---------- model : pysb.core.Model A PySB model to be exported into a Kappa CM. fname : Optional[str] A file name, typically with .png or .pdf extension in which the CM is rendered using pygraphviz. Returns ------- npygraphviz.Agraph A graph object representing the contact map. """ from .kappa_util import cm_json_to_graph kappa = _prepare_kappa(model) cmap = kappa.analyses_contact_map() cm = cm_json_to_graph(cmap) if fname: cm.draw(fname, prog='dot') return cm
[ "def", "export_kappa_cm", "(", "model", ",", "fname", "=", "None", ")", ":", "from", ".", "kappa_util", "import", "cm_json_to_graph", "kappa", "=", "_prepare_kappa", "(", "model", ")", "cmap", "=", "kappa", ".", "analyses_contact_map", "(", ")", "cm", "=", "cm_json_to_graph", "(", "cmap", ")", "if", "fname", ":", "cm", ".", "draw", "(", "fname", ",", "prog", "=", "'dot'", ")", "return", "cm" ]
Return a networkx graph representing the model's Kappa contact map. Parameters ---------- model : pysb.core.Model A PySB model to be exported into a Kappa CM. fname : Optional[str] A file name, typically with .png or .pdf extension in which the CM is rendered using pygraphviz. Returns ------- npygraphviz.Agraph A graph object representing the contact map.
[ "Return", "a", "networkx", "graph", "representing", "the", "model", "s", "Kappa", "contact", "map", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/export.py#L116-L138
train