repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
sorgerlab/indra | indra/literature/pmc_client.py | filter_pmids | def filter_pmids(pmid_list, source_type):
"""Filter a list of PMIDs for ones with full text from PMC.
Parameters
----------
pmid_list : list of str
List of PMIDs to filter.
source_type : string
One of 'fulltext', 'oa_xml', 'oa_txt', or 'auth_xml'.
Returns
-------
list of str
PMIDs available in the specified source/format type.
"""
global pmids_fulltext_dict
# Check args
if source_type not in ('fulltext', 'oa_xml', 'oa_txt', 'auth_xml'):
raise ValueError("source_type must be one of: 'fulltext', 'oa_xml', "
"'oa_txt', or 'auth_xml'.")
# Check if we've loaded this type, and lazily initialize
if pmids_fulltext_dict.get(source_type) is None:
fulltext_list_path = os.path.join(os.path.dirname(__file__),
'pmids_%s.txt' % source_type)
with open(fulltext_list_path, 'rb') as f:
fulltext_list = set([line.strip().decode('utf-8')
for line in f.readlines()])
pmids_fulltext_dict[source_type] = fulltext_list
return list(set(pmid_list).intersection(
pmids_fulltext_dict.get(source_type))) | python | def filter_pmids(pmid_list, source_type):
"""Filter a list of PMIDs for ones with full text from PMC.
Parameters
----------
pmid_list : list of str
List of PMIDs to filter.
source_type : string
One of 'fulltext', 'oa_xml', 'oa_txt', or 'auth_xml'.
Returns
-------
list of str
PMIDs available in the specified source/format type.
"""
global pmids_fulltext_dict
# Check args
if source_type not in ('fulltext', 'oa_xml', 'oa_txt', 'auth_xml'):
raise ValueError("source_type must be one of: 'fulltext', 'oa_xml', "
"'oa_txt', or 'auth_xml'.")
# Check if we've loaded this type, and lazily initialize
if pmids_fulltext_dict.get(source_type) is None:
fulltext_list_path = os.path.join(os.path.dirname(__file__),
'pmids_%s.txt' % source_type)
with open(fulltext_list_path, 'rb') as f:
fulltext_list = set([line.strip().decode('utf-8')
for line in f.readlines()])
pmids_fulltext_dict[source_type] = fulltext_list
return list(set(pmid_list).intersection(
pmids_fulltext_dict.get(source_type))) | [
"def",
"filter_pmids",
"(",
"pmid_list",
",",
"source_type",
")",
":",
"global",
"pmids_fulltext_dict",
"# Check args",
"if",
"source_type",
"not",
"in",
"(",
"'fulltext'",
",",
"'oa_xml'",
",",
"'oa_txt'",
",",
"'auth_xml'",
")",
":",
"raise",
"ValueError",
"(",
"\"source_type must be one of: 'fulltext', 'oa_xml', \"",
"\"'oa_txt', or 'auth_xml'.\"",
")",
"# Check if we've loaded this type, and lazily initialize",
"if",
"pmids_fulltext_dict",
".",
"get",
"(",
"source_type",
")",
"is",
"None",
":",
"fulltext_list_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"'pmids_%s.txt'",
"%",
"source_type",
")",
"with",
"open",
"(",
"fulltext_list_path",
",",
"'rb'",
")",
"as",
"f",
":",
"fulltext_list",
"=",
"set",
"(",
"[",
"line",
".",
"strip",
"(",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"for",
"line",
"in",
"f",
".",
"readlines",
"(",
")",
"]",
")",
"pmids_fulltext_dict",
"[",
"source_type",
"]",
"=",
"fulltext_list",
"return",
"list",
"(",
"set",
"(",
"pmid_list",
")",
".",
"intersection",
"(",
"pmids_fulltext_dict",
".",
"get",
"(",
"source_type",
")",
")",
")"
]
| Filter a list of PMIDs for ones with full text from PMC.
Parameters
----------
pmid_list : list of str
List of PMIDs to filter.
source_type : string
One of 'fulltext', 'oa_xml', 'oa_txt', or 'auth_xml'.
Returns
-------
list of str
PMIDs available in the specified source/format type. | [
"Filter",
"a",
"list",
"of",
"PMIDs",
"for",
"ones",
"with",
"full",
"text",
"from",
"PMC",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/pmc_client.py#L159-L188 | train |
sorgerlab/indra | indra/sources/cwms/util.py | get_example_extractions | def get_example_extractions(fname):
"Get extractions from one of the examples in `cag_examples`."
with open(fname, 'r') as f:
sentences = f.read().splitlines()
rdf_xml_dict = {}
for sentence in sentences:
logger.info("Reading \"%s\"..." % sentence)
html = tc.send_query(sentence, 'cwms')
try:
rdf_xml_dict[sentence] = tc.get_xml(html, 'rdf:RDF',
fail_if_empty=True)
except AssertionError as e:
logger.error("Got error for %s." % sentence)
logger.exception(e)
return rdf_xml_dict | python | def get_example_extractions(fname):
"Get extractions from one of the examples in `cag_examples`."
with open(fname, 'r') as f:
sentences = f.read().splitlines()
rdf_xml_dict = {}
for sentence in sentences:
logger.info("Reading \"%s\"..." % sentence)
html = tc.send_query(sentence, 'cwms')
try:
rdf_xml_dict[sentence] = tc.get_xml(html, 'rdf:RDF',
fail_if_empty=True)
except AssertionError as e:
logger.error("Got error for %s." % sentence)
logger.exception(e)
return rdf_xml_dict | [
"def",
"get_example_extractions",
"(",
"fname",
")",
":",
"with",
"open",
"(",
"fname",
",",
"'r'",
")",
"as",
"f",
":",
"sentences",
"=",
"f",
".",
"read",
"(",
")",
".",
"splitlines",
"(",
")",
"rdf_xml_dict",
"=",
"{",
"}",
"for",
"sentence",
"in",
"sentences",
":",
"logger",
".",
"info",
"(",
"\"Reading \\\"%s\\\"...\"",
"%",
"sentence",
")",
"html",
"=",
"tc",
".",
"send_query",
"(",
"sentence",
",",
"'cwms'",
")",
"try",
":",
"rdf_xml_dict",
"[",
"sentence",
"]",
"=",
"tc",
".",
"get_xml",
"(",
"html",
",",
"'rdf:RDF'",
",",
"fail_if_empty",
"=",
"True",
")",
"except",
"AssertionError",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"\"Got error for %s.\"",
"%",
"sentence",
")",
"logger",
".",
"exception",
"(",
"e",
")",
"return",
"rdf_xml_dict"
]
| Get extractions from one of the examples in `cag_examples`. | [
"Get",
"extractions",
"from",
"one",
"of",
"the",
"examples",
"in",
"cag_examples",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/cwms/util.py#L63-L77 | train |
sorgerlab/indra | indra/sources/cwms/util.py | make_example_graphs | def make_example_graphs():
"Make graphs from all the examples in cag_examples."
cag_example_rdfs = {}
for i, fname in enumerate(os.listdir('cag_examples')):
cag_example_rdfs[i+1] = get_example_extractions(fname)
return make_cag_graphs(cag_example_rdfs) | python | def make_example_graphs():
"Make graphs from all the examples in cag_examples."
cag_example_rdfs = {}
for i, fname in enumerate(os.listdir('cag_examples')):
cag_example_rdfs[i+1] = get_example_extractions(fname)
return make_cag_graphs(cag_example_rdfs) | [
"def",
"make_example_graphs",
"(",
")",
":",
"cag_example_rdfs",
"=",
"{",
"}",
"for",
"i",
",",
"fname",
"in",
"enumerate",
"(",
"os",
".",
"listdir",
"(",
"'cag_examples'",
")",
")",
":",
"cag_example_rdfs",
"[",
"i",
"+",
"1",
"]",
"=",
"get_example_extractions",
"(",
"fname",
")",
"return",
"make_cag_graphs",
"(",
"cag_example_rdfs",
")"
]
| Make graphs from all the examples in cag_examples. | [
"Make",
"graphs",
"from",
"all",
"the",
"examples",
"in",
"cag_examples",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/cwms/util.py#L80-L85 | train |
sorgerlab/indra | indra/assemblers/english/assembler.py | _join_list | def _join_list(lst, oxford=False):
"""Join a list of words in a gramatically correct way."""
if len(lst) > 2:
s = ', '.join(lst[:-1])
if oxford:
s += ','
s += ' and ' + lst[-1]
elif len(lst) == 2:
s = lst[0] + ' and ' + lst[1]
elif len(lst) == 1:
s = lst[0]
else:
s = ''
return s | python | def _join_list(lst, oxford=False):
"""Join a list of words in a gramatically correct way."""
if len(lst) > 2:
s = ', '.join(lst[:-1])
if oxford:
s += ','
s += ' and ' + lst[-1]
elif len(lst) == 2:
s = lst[0] + ' and ' + lst[1]
elif len(lst) == 1:
s = lst[0]
else:
s = ''
return s | [
"def",
"_join_list",
"(",
"lst",
",",
"oxford",
"=",
"False",
")",
":",
"if",
"len",
"(",
"lst",
")",
">",
"2",
":",
"s",
"=",
"', '",
".",
"join",
"(",
"lst",
"[",
":",
"-",
"1",
"]",
")",
"if",
"oxford",
":",
"s",
"+=",
"','",
"s",
"+=",
"' and '",
"+",
"lst",
"[",
"-",
"1",
"]",
"elif",
"len",
"(",
"lst",
")",
"==",
"2",
":",
"s",
"=",
"lst",
"[",
"0",
"]",
"+",
"' and '",
"+",
"lst",
"[",
"1",
"]",
"elif",
"len",
"(",
"lst",
")",
"==",
"1",
":",
"s",
"=",
"lst",
"[",
"0",
"]",
"else",
":",
"s",
"=",
"''",
"return",
"s"
]
| Join a list of words in a gramatically correct way. | [
"Join",
"a",
"list",
"of",
"words",
"in",
"a",
"gramatically",
"correct",
"way",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/english/assembler.py#L184-L197 | train |
sorgerlab/indra | indra/assemblers/english/assembler.py | _assemble_activeform | def _assemble_activeform(stmt):
"""Assemble ActiveForm statements into text."""
subj_str = _assemble_agent_str(stmt.agent)
if stmt.is_active:
is_active_str = 'active'
else:
is_active_str = 'inactive'
if stmt.activity == 'activity':
stmt_str = subj_str + ' is ' + is_active_str
elif stmt.activity == 'kinase':
stmt_str = subj_str + ' is kinase-' + is_active_str
elif stmt.activity == 'phosphatase':
stmt_str = subj_str + ' is phosphatase-' + is_active_str
elif stmt.activity == 'catalytic':
stmt_str = subj_str + ' is catalytically ' + is_active_str
elif stmt.activity == 'transcription':
stmt_str = subj_str + ' is transcriptionally ' + is_active_str
elif stmt.activity == 'gtpbound':
stmt_str = subj_str + ' is GTP-bound ' + is_active_str
return _make_sentence(stmt_str) | python | def _assemble_activeform(stmt):
"""Assemble ActiveForm statements into text."""
subj_str = _assemble_agent_str(stmt.agent)
if stmt.is_active:
is_active_str = 'active'
else:
is_active_str = 'inactive'
if stmt.activity == 'activity':
stmt_str = subj_str + ' is ' + is_active_str
elif stmt.activity == 'kinase':
stmt_str = subj_str + ' is kinase-' + is_active_str
elif stmt.activity == 'phosphatase':
stmt_str = subj_str + ' is phosphatase-' + is_active_str
elif stmt.activity == 'catalytic':
stmt_str = subj_str + ' is catalytically ' + is_active_str
elif stmt.activity == 'transcription':
stmt_str = subj_str + ' is transcriptionally ' + is_active_str
elif stmt.activity == 'gtpbound':
stmt_str = subj_str + ' is GTP-bound ' + is_active_str
return _make_sentence(stmt_str) | [
"def",
"_assemble_activeform",
"(",
"stmt",
")",
":",
"subj_str",
"=",
"_assemble_agent_str",
"(",
"stmt",
".",
"agent",
")",
"if",
"stmt",
".",
"is_active",
":",
"is_active_str",
"=",
"'active'",
"else",
":",
"is_active_str",
"=",
"'inactive'",
"if",
"stmt",
".",
"activity",
"==",
"'activity'",
":",
"stmt_str",
"=",
"subj_str",
"+",
"' is '",
"+",
"is_active_str",
"elif",
"stmt",
".",
"activity",
"==",
"'kinase'",
":",
"stmt_str",
"=",
"subj_str",
"+",
"' is kinase-'",
"+",
"is_active_str",
"elif",
"stmt",
".",
"activity",
"==",
"'phosphatase'",
":",
"stmt_str",
"=",
"subj_str",
"+",
"' is phosphatase-'",
"+",
"is_active_str",
"elif",
"stmt",
".",
"activity",
"==",
"'catalytic'",
":",
"stmt_str",
"=",
"subj_str",
"+",
"' is catalytically '",
"+",
"is_active_str",
"elif",
"stmt",
".",
"activity",
"==",
"'transcription'",
":",
"stmt_str",
"=",
"subj_str",
"+",
"' is transcriptionally '",
"+",
"is_active_str",
"elif",
"stmt",
".",
"activity",
"==",
"'gtpbound'",
":",
"stmt_str",
"=",
"subj_str",
"+",
"' is GTP-bound '",
"+",
"is_active_str",
"return",
"_make_sentence",
"(",
"stmt_str",
")"
]
| Assemble ActiveForm statements into text. | [
"Assemble",
"ActiveForm",
"statements",
"into",
"text",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/english/assembler.py#L200-L219 | train |
sorgerlab/indra | indra/assemblers/english/assembler.py | _assemble_modification | def _assemble_modification(stmt):
"""Assemble Modification statements into text."""
sub_str = _assemble_agent_str(stmt.sub)
if stmt.enz is not None:
enz_str = _assemble_agent_str(stmt.enz)
if _get_is_direct(stmt):
mod_str = ' ' + _mod_process_verb(stmt) + ' '
else:
mod_str = ' leads to the ' + _mod_process_noun(stmt) + ' of '
stmt_str = enz_str + mod_str + sub_str
else:
stmt_str = sub_str + ' is ' + _mod_state_stmt(stmt)
if stmt.residue is not None:
if stmt.position is None:
mod_str = 'on ' + ist.amino_acids[stmt.residue]['full_name']
else:
mod_str = 'on ' + stmt.residue + stmt.position
else:
mod_str = ''
stmt_str += ' ' + mod_str
return _make_sentence(stmt_str) | python | def _assemble_modification(stmt):
"""Assemble Modification statements into text."""
sub_str = _assemble_agent_str(stmt.sub)
if stmt.enz is not None:
enz_str = _assemble_agent_str(stmt.enz)
if _get_is_direct(stmt):
mod_str = ' ' + _mod_process_verb(stmt) + ' '
else:
mod_str = ' leads to the ' + _mod_process_noun(stmt) + ' of '
stmt_str = enz_str + mod_str + sub_str
else:
stmt_str = sub_str + ' is ' + _mod_state_stmt(stmt)
if stmt.residue is not None:
if stmt.position is None:
mod_str = 'on ' + ist.amino_acids[stmt.residue]['full_name']
else:
mod_str = 'on ' + stmt.residue + stmt.position
else:
mod_str = ''
stmt_str += ' ' + mod_str
return _make_sentence(stmt_str) | [
"def",
"_assemble_modification",
"(",
"stmt",
")",
":",
"sub_str",
"=",
"_assemble_agent_str",
"(",
"stmt",
".",
"sub",
")",
"if",
"stmt",
".",
"enz",
"is",
"not",
"None",
":",
"enz_str",
"=",
"_assemble_agent_str",
"(",
"stmt",
".",
"enz",
")",
"if",
"_get_is_direct",
"(",
"stmt",
")",
":",
"mod_str",
"=",
"' '",
"+",
"_mod_process_verb",
"(",
"stmt",
")",
"+",
"' '",
"else",
":",
"mod_str",
"=",
"' leads to the '",
"+",
"_mod_process_noun",
"(",
"stmt",
")",
"+",
"' of '",
"stmt_str",
"=",
"enz_str",
"+",
"mod_str",
"+",
"sub_str",
"else",
":",
"stmt_str",
"=",
"sub_str",
"+",
"' is '",
"+",
"_mod_state_stmt",
"(",
"stmt",
")",
"if",
"stmt",
".",
"residue",
"is",
"not",
"None",
":",
"if",
"stmt",
".",
"position",
"is",
"None",
":",
"mod_str",
"=",
"'on '",
"+",
"ist",
".",
"amino_acids",
"[",
"stmt",
".",
"residue",
"]",
"[",
"'full_name'",
"]",
"else",
":",
"mod_str",
"=",
"'on '",
"+",
"stmt",
".",
"residue",
"+",
"stmt",
".",
"position",
"else",
":",
"mod_str",
"=",
"''",
"stmt_str",
"+=",
"' '",
"+",
"mod_str",
"return",
"_make_sentence",
"(",
"stmt_str",
")"
]
| Assemble Modification statements into text. | [
"Assemble",
"Modification",
"statements",
"into",
"text",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/english/assembler.py#L222-L243 | train |
sorgerlab/indra | indra/assemblers/english/assembler.py | _assemble_association | def _assemble_association(stmt):
"""Assemble Association statements into text."""
member_strs = [_assemble_agent_str(m.concept) for m in stmt.members]
stmt_str = member_strs[0] + ' is associated with ' + \
_join_list(member_strs[1:])
return _make_sentence(stmt_str) | python | def _assemble_association(stmt):
"""Assemble Association statements into text."""
member_strs = [_assemble_agent_str(m.concept) for m in stmt.members]
stmt_str = member_strs[0] + ' is associated with ' + \
_join_list(member_strs[1:])
return _make_sentence(stmt_str) | [
"def",
"_assemble_association",
"(",
"stmt",
")",
":",
"member_strs",
"=",
"[",
"_assemble_agent_str",
"(",
"m",
".",
"concept",
")",
"for",
"m",
"in",
"stmt",
".",
"members",
"]",
"stmt_str",
"=",
"member_strs",
"[",
"0",
"]",
"+",
"' is associated with '",
"+",
"_join_list",
"(",
"member_strs",
"[",
"1",
":",
"]",
")",
"return",
"_make_sentence",
"(",
"stmt_str",
")"
]
| Assemble Association statements into text. | [
"Assemble",
"Association",
"statements",
"into",
"text",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/english/assembler.py#L246-L251 | train |
sorgerlab/indra | indra/assemblers/english/assembler.py | _assemble_complex | def _assemble_complex(stmt):
"""Assemble Complex statements into text."""
member_strs = [_assemble_agent_str(m) for m in stmt.members]
stmt_str = member_strs[0] + ' binds ' + _join_list(member_strs[1:])
return _make_sentence(stmt_str) | python | def _assemble_complex(stmt):
"""Assemble Complex statements into text."""
member_strs = [_assemble_agent_str(m) for m in stmt.members]
stmt_str = member_strs[0] + ' binds ' + _join_list(member_strs[1:])
return _make_sentence(stmt_str) | [
"def",
"_assemble_complex",
"(",
"stmt",
")",
":",
"member_strs",
"=",
"[",
"_assemble_agent_str",
"(",
"m",
")",
"for",
"m",
"in",
"stmt",
".",
"members",
"]",
"stmt_str",
"=",
"member_strs",
"[",
"0",
"]",
"+",
"' binds '",
"+",
"_join_list",
"(",
"member_strs",
"[",
"1",
":",
"]",
")",
"return",
"_make_sentence",
"(",
"stmt_str",
")"
]
| Assemble Complex statements into text. | [
"Assemble",
"Complex",
"statements",
"into",
"text",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/english/assembler.py#L254-L258 | train |
sorgerlab/indra | indra/assemblers/english/assembler.py | _assemble_autophosphorylation | def _assemble_autophosphorylation(stmt):
"""Assemble Autophosphorylation statements into text."""
enz_str = _assemble_agent_str(stmt.enz)
stmt_str = enz_str + ' phosphorylates itself'
if stmt.residue is not None:
if stmt.position is None:
mod_str = 'on ' + ist.amino_acids[stmt.residue]['full_name']
else:
mod_str = 'on ' + stmt.residue + stmt.position
else:
mod_str = ''
stmt_str += ' ' + mod_str
return _make_sentence(stmt_str) | python | def _assemble_autophosphorylation(stmt):
"""Assemble Autophosphorylation statements into text."""
enz_str = _assemble_agent_str(stmt.enz)
stmt_str = enz_str + ' phosphorylates itself'
if stmt.residue is not None:
if stmt.position is None:
mod_str = 'on ' + ist.amino_acids[stmt.residue]['full_name']
else:
mod_str = 'on ' + stmt.residue + stmt.position
else:
mod_str = ''
stmt_str += ' ' + mod_str
return _make_sentence(stmt_str) | [
"def",
"_assemble_autophosphorylation",
"(",
"stmt",
")",
":",
"enz_str",
"=",
"_assemble_agent_str",
"(",
"stmt",
".",
"enz",
")",
"stmt_str",
"=",
"enz_str",
"+",
"' phosphorylates itself'",
"if",
"stmt",
".",
"residue",
"is",
"not",
"None",
":",
"if",
"stmt",
".",
"position",
"is",
"None",
":",
"mod_str",
"=",
"'on '",
"+",
"ist",
".",
"amino_acids",
"[",
"stmt",
".",
"residue",
"]",
"[",
"'full_name'",
"]",
"else",
":",
"mod_str",
"=",
"'on '",
"+",
"stmt",
".",
"residue",
"+",
"stmt",
".",
"position",
"else",
":",
"mod_str",
"=",
"''",
"stmt_str",
"+=",
"' '",
"+",
"mod_str",
"return",
"_make_sentence",
"(",
"stmt_str",
")"
]
| Assemble Autophosphorylation statements into text. | [
"Assemble",
"Autophosphorylation",
"statements",
"into",
"text",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/english/assembler.py#L261-L273 | train |
sorgerlab/indra | indra/assemblers/english/assembler.py | _assemble_regulate_activity | def _assemble_regulate_activity(stmt):
"""Assemble RegulateActivity statements into text."""
subj_str = _assemble_agent_str(stmt.subj)
obj_str = _assemble_agent_str(stmt.obj)
if stmt.is_activation:
rel_str = ' activates '
else:
rel_str = ' inhibits '
stmt_str = subj_str + rel_str + obj_str
return _make_sentence(stmt_str) | python | def _assemble_regulate_activity(stmt):
"""Assemble RegulateActivity statements into text."""
subj_str = _assemble_agent_str(stmt.subj)
obj_str = _assemble_agent_str(stmt.obj)
if stmt.is_activation:
rel_str = ' activates '
else:
rel_str = ' inhibits '
stmt_str = subj_str + rel_str + obj_str
return _make_sentence(stmt_str) | [
"def",
"_assemble_regulate_activity",
"(",
"stmt",
")",
":",
"subj_str",
"=",
"_assemble_agent_str",
"(",
"stmt",
".",
"subj",
")",
"obj_str",
"=",
"_assemble_agent_str",
"(",
"stmt",
".",
"obj",
")",
"if",
"stmt",
".",
"is_activation",
":",
"rel_str",
"=",
"' activates '",
"else",
":",
"rel_str",
"=",
"' inhibits '",
"stmt_str",
"=",
"subj_str",
"+",
"rel_str",
"+",
"obj_str",
"return",
"_make_sentence",
"(",
"stmt_str",
")"
]
| Assemble RegulateActivity statements into text. | [
"Assemble",
"RegulateActivity",
"statements",
"into",
"text",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/english/assembler.py#L276-L285 | train |
sorgerlab/indra | indra/assemblers/english/assembler.py | _assemble_regulate_amount | def _assemble_regulate_amount(stmt):
"""Assemble RegulateAmount statements into text."""
obj_str = _assemble_agent_str(stmt.obj)
if stmt.subj is not None:
subj_str = _assemble_agent_str(stmt.subj)
if isinstance(stmt, ist.IncreaseAmount):
rel_str = ' increases the amount of '
elif isinstance(stmt, ist.DecreaseAmount):
rel_str = ' decreases the amount of '
stmt_str = subj_str + rel_str + obj_str
else:
if isinstance(stmt, ist.IncreaseAmount):
stmt_str = obj_str + ' is produced'
elif isinstance(stmt, ist.DecreaseAmount):
stmt_str = obj_str + ' is degraded'
return _make_sentence(stmt_str) | python | def _assemble_regulate_amount(stmt):
"""Assemble RegulateAmount statements into text."""
obj_str = _assemble_agent_str(stmt.obj)
if stmt.subj is not None:
subj_str = _assemble_agent_str(stmt.subj)
if isinstance(stmt, ist.IncreaseAmount):
rel_str = ' increases the amount of '
elif isinstance(stmt, ist.DecreaseAmount):
rel_str = ' decreases the amount of '
stmt_str = subj_str + rel_str + obj_str
else:
if isinstance(stmt, ist.IncreaseAmount):
stmt_str = obj_str + ' is produced'
elif isinstance(stmt, ist.DecreaseAmount):
stmt_str = obj_str + ' is degraded'
return _make_sentence(stmt_str) | [
"def",
"_assemble_regulate_amount",
"(",
"stmt",
")",
":",
"obj_str",
"=",
"_assemble_agent_str",
"(",
"stmt",
".",
"obj",
")",
"if",
"stmt",
".",
"subj",
"is",
"not",
"None",
":",
"subj_str",
"=",
"_assemble_agent_str",
"(",
"stmt",
".",
"subj",
")",
"if",
"isinstance",
"(",
"stmt",
",",
"ist",
".",
"IncreaseAmount",
")",
":",
"rel_str",
"=",
"' increases the amount of '",
"elif",
"isinstance",
"(",
"stmt",
",",
"ist",
".",
"DecreaseAmount",
")",
":",
"rel_str",
"=",
"' decreases the amount of '",
"stmt_str",
"=",
"subj_str",
"+",
"rel_str",
"+",
"obj_str",
"else",
":",
"if",
"isinstance",
"(",
"stmt",
",",
"ist",
".",
"IncreaseAmount",
")",
":",
"stmt_str",
"=",
"obj_str",
"+",
"' is produced'",
"elif",
"isinstance",
"(",
"stmt",
",",
"ist",
".",
"DecreaseAmount",
")",
":",
"stmt_str",
"=",
"obj_str",
"+",
"' is degraded'",
"return",
"_make_sentence",
"(",
"stmt_str",
")"
]
| Assemble RegulateAmount statements into text. | [
"Assemble",
"RegulateAmount",
"statements",
"into",
"text",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/english/assembler.py#L288-L303 | train |
sorgerlab/indra | indra/assemblers/english/assembler.py | _assemble_translocation | def _assemble_translocation(stmt):
"""Assemble Translocation statements into text."""
agent_str = _assemble_agent_str(stmt.agent)
stmt_str = agent_str + ' translocates'
if stmt.from_location is not None:
stmt_str += ' from the ' + stmt.from_location
if stmt.to_location is not None:
stmt_str += ' to the ' + stmt.to_location
return _make_sentence(stmt_str) | python | def _assemble_translocation(stmt):
"""Assemble Translocation statements into text."""
agent_str = _assemble_agent_str(stmt.agent)
stmt_str = agent_str + ' translocates'
if stmt.from_location is not None:
stmt_str += ' from the ' + stmt.from_location
if stmt.to_location is not None:
stmt_str += ' to the ' + stmt.to_location
return _make_sentence(stmt_str) | [
"def",
"_assemble_translocation",
"(",
"stmt",
")",
":",
"agent_str",
"=",
"_assemble_agent_str",
"(",
"stmt",
".",
"agent",
")",
"stmt_str",
"=",
"agent_str",
"+",
"' translocates'",
"if",
"stmt",
".",
"from_location",
"is",
"not",
"None",
":",
"stmt_str",
"+=",
"' from the '",
"+",
"stmt",
".",
"from_location",
"if",
"stmt",
".",
"to_location",
"is",
"not",
"None",
":",
"stmt_str",
"+=",
"' to the '",
"+",
"stmt",
".",
"to_location",
"return",
"_make_sentence",
"(",
"stmt_str",
")"
]
| Assemble Translocation statements into text. | [
"Assemble",
"Translocation",
"statements",
"into",
"text",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/english/assembler.py#L306-L314 | train |
sorgerlab/indra | indra/assemblers/english/assembler.py | _assemble_gap | def _assemble_gap(stmt):
"""Assemble Gap statements into text."""
subj_str = _assemble_agent_str(stmt.gap)
obj_str = _assemble_agent_str(stmt.ras)
stmt_str = subj_str + ' is a GAP for ' + obj_str
return _make_sentence(stmt_str) | python | def _assemble_gap(stmt):
"""Assemble Gap statements into text."""
subj_str = _assemble_agent_str(stmt.gap)
obj_str = _assemble_agent_str(stmt.ras)
stmt_str = subj_str + ' is a GAP for ' + obj_str
return _make_sentence(stmt_str) | [
"def",
"_assemble_gap",
"(",
"stmt",
")",
":",
"subj_str",
"=",
"_assemble_agent_str",
"(",
"stmt",
".",
"gap",
")",
"obj_str",
"=",
"_assemble_agent_str",
"(",
"stmt",
".",
"ras",
")",
"stmt_str",
"=",
"subj_str",
"+",
"' is a GAP for '",
"+",
"obj_str",
"return",
"_make_sentence",
"(",
"stmt_str",
")"
]
| Assemble Gap statements into text. | [
"Assemble",
"Gap",
"statements",
"into",
"text",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/english/assembler.py#L317-L322 | train |
sorgerlab/indra | indra/assemblers/english/assembler.py | _assemble_gef | def _assemble_gef(stmt):
"""Assemble Gef statements into text."""
subj_str = _assemble_agent_str(stmt.gef)
obj_str = _assemble_agent_str(stmt.ras)
stmt_str = subj_str + ' is a GEF for ' + obj_str
return _make_sentence(stmt_str) | python | def _assemble_gef(stmt):
"""Assemble Gef statements into text."""
subj_str = _assemble_agent_str(stmt.gef)
obj_str = _assemble_agent_str(stmt.ras)
stmt_str = subj_str + ' is a GEF for ' + obj_str
return _make_sentence(stmt_str) | [
"def",
"_assemble_gef",
"(",
"stmt",
")",
":",
"subj_str",
"=",
"_assemble_agent_str",
"(",
"stmt",
".",
"gef",
")",
"obj_str",
"=",
"_assemble_agent_str",
"(",
"stmt",
".",
"ras",
")",
"stmt_str",
"=",
"subj_str",
"+",
"' is a GEF for '",
"+",
"obj_str",
"return",
"_make_sentence",
"(",
"stmt_str",
")"
]
| Assemble Gef statements into text. | [
"Assemble",
"Gef",
"statements",
"into",
"text",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/english/assembler.py#L325-L330 | train |
sorgerlab/indra | indra/assemblers/english/assembler.py | _assemble_conversion | def _assemble_conversion(stmt):
"""Assemble a Conversion statement into text."""
reactants = _join_list([_assemble_agent_str(r) for r in stmt.obj_from])
products = _join_list([_assemble_agent_str(r) for r in stmt.obj_to])
if stmt.subj is not None:
subj_str = _assemble_agent_str(stmt.subj)
stmt_str = '%s catalyzes the conversion of %s into %s' % \
(subj_str, reactants, products)
else:
stmt_str = '%s is converted into %s' % (reactants, products)
return _make_sentence(stmt_str) | python | def _assemble_conversion(stmt):
"""Assemble a Conversion statement into text."""
reactants = _join_list([_assemble_agent_str(r) for r in stmt.obj_from])
products = _join_list([_assemble_agent_str(r) for r in stmt.obj_to])
if stmt.subj is not None:
subj_str = _assemble_agent_str(stmt.subj)
stmt_str = '%s catalyzes the conversion of %s into %s' % \
(subj_str, reactants, products)
else:
stmt_str = '%s is converted into %s' % (reactants, products)
return _make_sentence(stmt_str) | [
"def",
"_assemble_conversion",
"(",
"stmt",
")",
":",
"reactants",
"=",
"_join_list",
"(",
"[",
"_assemble_agent_str",
"(",
"r",
")",
"for",
"r",
"in",
"stmt",
".",
"obj_from",
"]",
")",
"products",
"=",
"_join_list",
"(",
"[",
"_assemble_agent_str",
"(",
"r",
")",
"for",
"r",
"in",
"stmt",
".",
"obj_to",
"]",
")",
"if",
"stmt",
".",
"subj",
"is",
"not",
"None",
":",
"subj_str",
"=",
"_assemble_agent_str",
"(",
"stmt",
".",
"subj",
")",
"stmt_str",
"=",
"'%s catalyzes the conversion of %s into %s'",
"%",
"(",
"subj_str",
",",
"reactants",
",",
"products",
")",
"else",
":",
"stmt_str",
"=",
"'%s is converted into %s'",
"%",
"(",
"reactants",
",",
"products",
")",
"return",
"_make_sentence",
"(",
"stmt_str",
")"
]
| Assemble a Conversion statement into text. | [
"Assemble",
"a",
"Conversion",
"statement",
"into",
"text",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/english/assembler.py#L333-L344 | train |
sorgerlab/indra | indra/assemblers/english/assembler.py | _assemble_influence | def _assemble_influence(stmt):
"""Assemble an Influence statement into text."""
subj_str = _assemble_agent_str(stmt.subj.concept)
obj_str = _assemble_agent_str(stmt.obj.concept)
# Note that n is prepended to increase to make it "an increase"
if stmt.subj.delta['polarity'] is not None:
subj_delta_str = ' decrease' if stmt.subj.delta['polarity'] == -1 \
else 'n increase'
subj_str = 'a%s in %s' % (subj_delta_str, subj_str)
if stmt.obj.delta['polarity'] is not None:
obj_delta_str = ' decrease' if stmt.obj.delta['polarity'] == -1 \
else 'n increase'
obj_str = 'a%s in %s' % (obj_delta_str, obj_str)
stmt_str = '%s causes %s' % (subj_str, obj_str)
return _make_sentence(stmt_str) | python | def _assemble_influence(stmt):
"""Assemble an Influence statement into text."""
subj_str = _assemble_agent_str(stmt.subj.concept)
obj_str = _assemble_agent_str(stmt.obj.concept)
# Note that n is prepended to increase to make it "an increase"
if stmt.subj.delta['polarity'] is not None:
subj_delta_str = ' decrease' if stmt.subj.delta['polarity'] == -1 \
else 'n increase'
subj_str = 'a%s in %s' % (subj_delta_str, subj_str)
if stmt.obj.delta['polarity'] is not None:
obj_delta_str = ' decrease' if stmt.obj.delta['polarity'] == -1 \
else 'n increase'
obj_str = 'a%s in %s' % (obj_delta_str, obj_str)
stmt_str = '%s causes %s' % (subj_str, obj_str)
return _make_sentence(stmt_str) | [
"def",
"_assemble_influence",
"(",
"stmt",
")",
":",
"subj_str",
"=",
"_assemble_agent_str",
"(",
"stmt",
".",
"subj",
".",
"concept",
")",
"obj_str",
"=",
"_assemble_agent_str",
"(",
"stmt",
".",
"obj",
".",
"concept",
")",
"# Note that n is prepended to increase to make it \"an increase\"",
"if",
"stmt",
".",
"subj",
".",
"delta",
"[",
"'polarity'",
"]",
"is",
"not",
"None",
":",
"subj_delta_str",
"=",
"' decrease'",
"if",
"stmt",
".",
"subj",
".",
"delta",
"[",
"'polarity'",
"]",
"==",
"-",
"1",
"else",
"'n increase'",
"subj_str",
"=",
"'a%s in %s'",
"%",
"(",
"subj_delta_str",
",",
"subj_str",
")",
"if",
"stmt",
".",
"obj",
".",
"delta",
"[",
"'polarity'",
"]",
"is",
"not",
"None",
":",
"obj_delta_str",
"=",
"' decrease'",
"if",
"stmt",
".",
"obj",
".",
"delta",
"[",
"'polarity'",
"]",
"==",
"-",
"1",
"else",
"'n increase'",
"obj_str",
"=",
"'a%s in %s'",
"%",
"(",
"obj_delta_str",
",",
"obj_str",
")",
"stmt_str",
"=",
"'%s causes %s'",
"%",
"(",
"subj_str",
",",
"obj_str",
")",
"return",
"_make_sentence",
"(",
"stmt_str",
")"
]
| Assemble an Influence statement into text. | [
"Assemble",
"an",
"Influence",
"statement",
"into",
"text",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/english/assembler.py#L347-L364 | train |
sorgerlab/indra | indra/assemblers/english/assembler.py | _make_sentence | def _make_sentence(txt):
"""Make a sentence from a piece of text."""
#Make sure first letter is capitalized
txt = txt.strip(' ')
txt = txt[0].upper() + txt[1:] + '.'
return txt | python | def _make_sentence(txt):
"""Make a sentence from a piece of text."""
#Make sure first letter is capitalized
txt = txt.strip(' ')
txt = txt[0].upper() + txt[1:] + '.'
return txt | [
"def",
"_make_sentence",
"(",
"txt",
")",
":",
"#Make sure first letter is capitalized",
"txt",
"=",
"txt",
".",
"strip",
"(",
"' '",
")",
"txt",
"=",
"txt",
"[",
"0",
"]",
".",
"upper",
"(",
")",
"+",
"txt",
"[",
"1",
":",
"]",
"+",
"'.'",
"return",
"txt"
]
| Make a sentence from a piece of text. | [
"Make",
"a",
"sentence",
"from",
"a",
"piece",
"of",
"text",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/english/assembler.py#L367-L372 | train |
sorgerlab/indra | indra/assemblers/english/assembler.py | _get_is_hypothesis | def _get_is_hypothesis(stmt):
'''Returns true if there is evidence that the statement is only
hypothetical. If all of the evidences associated with the statement
indicate a hypothetical interaction then we assume the interaction
is hypothetical.'''
for ev in stmt.evidence:
if not ev.epistemics.get('hypothesis') is True:
return True
return False | python | def _get_is_hypothesis(stmt):
'''Returns true if there is evidence that the statement is only
hypothetical. If all of the evidences associated with the statement
indicate a hypothetical interaction then we assume the interaction
is hypothetical.'''
for ev in stmt.evidence:
if not ev.epistemics.get('hypothesis') is True:
return True
return False | [
"def",
"_get_is_hypothesis",
"(",
"stmt",
")",
":",
"for",
"ev",
"in",
"stmt",
".",
"evidence",
":",
"if",
"not",
"ev",
".",
"epistemics",
".",
"get",
"(",
"'hypothesis'",
")",
"is",
"True",
":",
"return",
"True",
"return",
"False"
]
| Returns true if there is evidence that the statement is only
hypothetical. If all of the evidences associated with the statement
indicate a hypothetical interaction then we assume the interaction
is hypothetical. | [
"Returns",
"true",
"if",
"there",
"is",
"evidence",
"that",
"the",
"statement",
"is",
"only",
"hypothetical",
".",
"If",
"all",
"of",
"the",
"evidences",
"associated",
"with",
"the",
"statement",
"indicate",
"a",
"hypothetical",
"interaction",
"then",
"we",
"assume",
"the",
"interaction",
"is",
"hypothetical",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/english/assembler.py#L394-L402 | train |
sorgerlab/indra | indra/assemblers/english/assembler.py | EnglishAssembler.make_model | def make_model(self):
"""Assemble text from the set of collected INDRA Statements.
Returns
-------
stmt_strs : str
Return the assembled text as unicode string. By default, the text
is a single string consisting of one or more sentences with
periods at the end.
"""
stmt_strs = []
for stmt in self.statements:
if isinstance(stmt, ist.Modification):
stmt_strs.append(_assemble_modification(stmt))
elif isinstance(stmt, ist.Autophosphorylation):
stmt_strs.append(_assemble_autophosphorylation(stmt))
elif isinstance(stmt, ist.Association):
stmt_strs.append(_assemble_association(stmt))
elif isinstance(stmt, ist.Complex):
stmt_strs.append(_assemble_complex(stmt))
elif isinstance(stmt, ist.Influence):
stmt_strs.append(_assemble_influence(stmt))
elif isinstance(stmt, ist.RegulateActivity):
stmt_strs.append(_assemble_regulate_activity(stmt))
elif isinstance(stmt, ist.RegulateAmount):
stmt_strs.append(_assemble_regulate_amount(stmt))
elif isinstance(stmt, ist.ActiveForm):
stmt_strs.append(_assemble_activeform(stmt))
elif isinstance(stmt, ist.Translocation):
stmt_strs.append(_assemble_translocation(stmt))
elif isinstance(stmt, ist.Gef):
stmt_strs.append(_assemble_gef(stmt))
elif isinstance(stmt, ist.Gap):
stmt_strs.append(_assemble_gap(stmt))
elif isinstance(stmt, ist.Conversion):
stmt_strs.append(_assemble_conversion(stmt))
else:
logger.warning('Unhandled statement type: %s.' % type(stmt))
if stmt_strs:
return ' '.join(stmt_strs)
else:
return '' | python | def make_model(self):
"""Assemble text from the set of collected INDRA Statements.
Returns
-------
stmt_strs : str
Return the assembled text as unicode string. By default, the text
is a single string consisting of one or more sentences with
periods at the end.
"""
stmt_strs = []
for stmt in self.statements:
if isinstance(stmt, ist.Modification):
stmt_strs.append(_assemble_modification(stmt))
elif isinstance(stmt, ist.Autophosphorylation):
stmt_strs.append(_assemble_autophosphorylation(stmt))
elif isinstance(stmt, ist.Association):
stmt_strs.append(_assemble_association(stmt))
elif isinstance(stmt, ist.Complex):
stmt_strs.append(_assemble_complex(stmt))
elif isinstance(stmt, ist.Influence):
stmt_strs.append(_assemble_influence(stmt))
elif isinstance(stmt, ist.RegulateActivity):
stmt_strs.append(_assemble_regulate_activity(stmt))
elif isinstance(stmt, ist.RegulateAmount):
stmt_strs.append(_assemble_regulate_amount(stmt))
elif isinstance(stmt, ist.ActiveForm):
stmt_strs.append(_assemble_activeform(stmt))
elif isinstance(stmt, ist.Translocation):
stmt_strs.append(_assemble_translocation(stmt))
elif isinstance(stmt, ist.Gef):
stmt_strs.append(_assemble_gef(stmt))
elif isinstance(stmt, ist.Gap):
stmt_strs.append(_assemble_gap(stmt))
elif isinstance(stmt, ist.Conversion):
stmt_strs.append(_assemble_conversion(stmt))
else:
logger.warning('Unhandled statement type: %s.' % type(stmt))
if stmt_strs:
return ' '.join(stmt_strs)
else:
return '' | [
"def",
"make_model",
"(",
"self",
")",
":",
"stmt_strs",
"=",
"[",
"]",
"for",
"stmt",
"in",
"self",
".",
"statements",
":",
"if",
"isinstance",
"(",
"stmt",
",",
"ist",
".",
"Modification",
")",
":",
"stmt_strs",
".",
"append",
"(",
"_assemble_modification",
"(",
"stmt",
")",
")",
"elif",
"isinstance",
"(",
"stmt",
",",
"ist",
".",
"Autophosphorylation",
")",
":",
"stmt_strs",
".",
"append",
"(",
"_assemble_autophosphorylation",
"(",
"stmt",
")",
")",
"elif",
"isinstance",
"(",
"stmt",
",",
"ist",
".",
"Association",
")",
":",
"stmt_strs",
".",
"append",
"(",
"_assemble_association",
"(",
"stmt",
")",
")",
"elif",
"isinstance",
"(",
"stmt",
",",
"ist",
".",
"Complex",
")",
":",
"stmt_strs",
".",
"append",
"(",
"_assemble_complex",
"(",
"stmt",
")",
")",
"elif",
"isinstance",
"(",
"stmt",
",",
"ist",
".",
"Influence",
")",
":",
"stmt_strs",
".",
"append",
"(",
"_assemble_influence",
"(",
"stmt",
")",
")",
"elif",
"isinstance",
"(",
"stmt",
",",
"ist",
".",
"RegulateActivity",
")",
":",
"stmt_strs",
".",
"append",
"(",
"_assemble_regulate_activity",
"(",
"stmt",
")",
")",
"elif",
"isinstance",
"(",
"stmt",
",",
"ist",
".",
"RegulateAmount",
")",
":",
"stmt_strs",
".",
"append",
"(",
"_assemble_regulate_amount",
"(",
"stmt",
")",
")",
"elif",
"isinstance",
"(",
"stmt",
",",
"ist",
".",
"ActiveForm",
")",
":",
"stmt_strs",
".",
"append",
"(",
"_assemble_activeform",
"(",
"stmt",
")",
")",
"elif",
"isinstance",
"(",
"stmt",
",",
"ist",
".",
"Translocation",
")",
":",
"stmt_strs",
".",
"append",
"(",
"_assemble_translocation",
"(",
"stmt",
")",
")",
"elif",
"isinstance",
"(",
"stmt",
",",
"ist",
".",
"Gef",
")",
":",
"stmt_strs",
".",
"append",
"(",
"_assemble_gef",
"(",
"stmt",
")",
")",
"elif",
"isinstance",
"(",
"stmt",
",",
"ist",
".",
"Gap",
")",
":",
"stmt_strs",
".",
"append",
"(",
"_assemble_gap",
"(",
"stmt",
")",
")",
"elif",
"isinstance",
"(",
"stmt",
",",
"ist",
".",
"Conversion",
")",
":",
"stmt_strs",
".",
"append",
"(",
"_assemble_conversion",
"(",
"stmt",
")",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"'Unhandled statement type: %s.'",
"%",
"type",
"(",
"stmt",
")",
")",
"if",
"stmt_strs",
":",
"return",
"' '",
".",
"join",
"(",
"stmt_strs",
")",
"else",
":",
"return",
"''"
]
| Assemble text from the set of collected INDRA Statements.
Returns
-------
stmt_strs : str
Return the assembled text as unicode string. By default, the text
is a single string consisting of one or more sentences with
periods at the end. | [
"Assemble",
"text",
"from",
"the",
"set",
"of",
"collected",
"INDRA",
"Statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/english/assembler.py#L41-L82 | train |
sorgerlab/indra | indra/assemblers/sbgn/assembler.py | SBGNAssembler.add_statements | def add_statements(self, stmts):
"""Add INDRA Statements to the assembler's list of statements.
Parameters
----------
stmts : list[indra.statements.Statement]
A list of :py:class:`indra.statements.Statement`
to be added to the statement list of the assembler.
"""
for stmt in stmts:
if not self.statement_exists(stmt):
self.statements.append(stmt) | python | def add_statements(self, stmts):
"""Add INDRA Statements to the assembler's list of statements.
Parameters
----------
stmts : list[indra.statements.Statement]
A list of :py:class:`indra.statements.Statement`
to be added to the statement list of the assembler.
"""
for stmt in stmts:
if not self.statement_exists(stmt):
self.statements.append(stmt) | [
"def",
"add_statements",
"(",
"self",
",",
"stmts",
")",
":",
"for",
"stmt",
"in",
"stmts",
":",
"if",
"not",
"self",
".",
"statement_exists",
"(",
"stmt",
")",
":",
"self",
".",
"statements",
".",
"append",
"(",
"stmt",
")"
]
| Add INDRA Statements to the assembler's list of statements.
Parameters
----------
stmts : list[indra.statements.Statement]
A list of :py:class:`indra.statements.Statement`
to be added to the statement list of the assembler. | [
"Add",
"INDRA",
"Statements",
"to",
"the",
"assembler",
"s",
"list",
"of",
"statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/sbgn/assembler.py#L58-L69 | train |
sorgerlab/indra | indra/assemblers/sbgn/assembler.py | SBGNAssembler.make_model | def make_model(self):
"""Assemble the SBGN model from the collected INDRA Statements.
This method assembles an SBGN model from the set of INDRA Statements.
The assembled model is set as the assembler's sbgn attribute (it is
represented as an XML ElementTree internally). The model is returned
as a serialized XML string.
Returns
-------
sbgn_str : str
The XML serialized SBGN model.
"""
ppa = PysbPreassembler(self.statements)
ppa.replace_activities()
self.statements = ppa.statements
self.sbgn = emaker.sbgn()
self._map = emaker.map()
self.sbgn.append(self._map)
for stmt in self.statements:
if isinstance(stmt, Modification):
self._assemble_modification(stmt)
elif isinstance(stmt, RegulateActivity):
self._assemble_regulateactivity(stmt)
elif isinstance(stmt, RegulateAmount):
self._assemble_regulateamount(stmt)
elif isinstance(stmt, Complex):
self._assemble_complex(stmt)
elif isinstance(stmt, ActiveForm):
#self._assemble_activeform(stmt)
pass
else:
logger.warning("Unhandled Statement type %s" % type(stmt))
continue
sbgn_str = self.print_model()
return sbgn_str | python | def make_model(self):
"""Assemble the SBGN model from the collected INDRA Statements.
This method assembles an SBGN model from the set of INDRA Statements.
The assembled model is set as the assembler's sbgn attribute (it is
represented as an XML ElementTree internally). The model is returned
as a serialized XML string.
Returns
-------
sbgn_str : str
The XML serialized SBGN model.
"""
ppa = PysbPreassembler(self.statements)
ppa.replace_activities()
self.statements = ppa.statements
self.sbgn = emaker.sbgn()
self._map = emaker.map()
self.sbgn.append(self._map)
for stmt in self.statements:
if isinstance(stmt, Modification):
self._assemble_modification(stmt)
elif isinstance(stmt, RegulateActivity):
self._assemble_regulateactivity(stmt)
elif isinstance(stmt, RegulateAmount):
self._assemble_regulateamount(stmt)
elif isinstance(stmt, Complex):
self._assemble_complex(stmt)
elif isinstance(stmt, ActiveForm):
#self._assemble_activeform(stmt)
pass
else:
logger.warning("Unhandled Statement type %s" % type(stmt))
continue
sbgn_str = self.print_model()
return sbgn_str | [
"def",
"make_model",
"(",
"self",
")",
":",
"ppa",
"=",
"PysbPreassembler",
"(",
"self",
".",
"statements",
")",
"ppa",
".",
"replace_activities",
"(",
")",
"self",
".",
"statements",
"=",
"ppa",
".",
"statements",
"self",
".",
"sbgn",
"=",
"emaker",
".",
"sbgn",
"(",
")",
"self",
".",
"_map",
"=",
"emaker",
".",
"map",
"(",
")",
"self",
".",
"sbgn",
".",
"append",
"(",
"self",
".",
"_map",
")",
"for",
"stmt",
"in",
"self",
".",
"statements",
":",
"if",
"isinstance",
"(",
"stmt",
",",
"Modification",
")",
":",
"self",
".",
"_assemble_modification",
"(",
"stmt",
")",
"elif",
"isinstance",
"(",
"stmt",
",",
"RegulateActivity",
")",
":",
"self",
".",
"_assemble_regulateactivity",
"(",
"stmt",
")",
"elif",
"isinstance",
"(",
"stmt",
",",
"RegulateAmount",
")",
":",
"self",
".",
"_assemble_regulateamount",
"(",
"stmt",
")",
"elif",
"isinstance",
"(",
"stmt",
",",
"Complex",
")",
":",
"self",
".",
"_assemble_complex",
"(",
"stmt",
")",
"elif",
"isinstance",
"(",
"stmt",
",",
"ActiveForm",
")",
":",
"#self._assemble_activeform(stmt)",
"pass",
"else",
":",
"logger",
".",
"warning",
"(",
"\"Unhandled Statement type %s\"",
"%",
"type",
"(",
"stmt",
")",
")",
"continue",
"sbgn_str",
"=",
"self",
".",
"print_model",
"(",
")",
"return",
"sbgn_str"
]
| Assemble the SBGN model from the collected INDRA Statements.
This method assembles an SBGN model from the set of INDRA Statements.
The assembled model is set as the assembler's sbgn attribute (it is
represented as an XML ElementTree internally). The model is returned
as a serialized XML string.
Returns
-------
sbgn_str : str
The XML serialized SBGN model. | [
"Assemble",
"the",
"SBGN",
"model",
"from",
"the",
"collected",
"INDRA",
"Statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/sbgn/assembler.py#L71-L106 | train |
sorgerlab/indra | indra/assemblers/sbgn/assembler.py | SBGNAssembler.print_model | def print_model(self, pretty=True, encoding='utf8'):
"""Return the assembled SBGN model as an XML string.
Parameters
----------
pretty : Optional[bool]
If True, the SBGN string is formatted with indentation (for human
viewing) otherwise no indentation is used. Default: True
Returns
-------
sbgn_str : bytes (str in Python 2)
An XML string representation of the SBGN model.
"""
return lxml.etree.tostring(self.sbgn, pretty_print=pretty,
encoding=encoding, xml_declaration=True) | python | def print_model(self, pretty=True, encoding='utf8'):
"""Return the assembled SBGN model as an XML string.
Parameters
----------
pretty : Optional[bool]
If True, the SBGN string is formatted with indentation (for human
viewing) otherwise no indentation is used. Default: True
Returns
-------
sbgn_str : bytes (str in Python 2)
An XML string representation of the SBGN model.
"""
return lxml.etree.tostring(self.sbgn, pretty_print=pretty,
encoding=encoding, xml_declaration=True) | [
"def",
"print_model",
"(",
"self",
",",
"pretty",
"=",
"True",
",",
"encoding",
"=",
"'utf8'",
")",
":",
"return",
"lxml",
".",
"etree",
".",
"tostring",
"(",
"self",
".",
"sbgn",
",",
"pretty_print",
"=",
"pretty",
",",
"encoding",
"=",
"encoding",
",",
"xml_declaration",
"=",
"True",
")"
]
| Return the assembled SBGN model as an XML string.
Parameters
----------
pretty : Optional[bool]
If True, the SBGN string is formatted with indentation (for human
viewing) otherwise no indentation is used. Default: True
Returns
-------
sbgn_str : bytes (str in Python 2)
An XML string representation of the SBGN model. | [
"Return",
"the",
"assembled",
"SBGN",
"model",
"as",
"an",
"XML",
"string",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/sbgn/assembler.py#L108-L123 | train |
sorgerlab/indra | indra/assemblers/sbgn/assembler.py | SBGNAssembler.save_model | def save_model(self, file_name='model.sbgn'):
"""Save the assembled SBGN model in a file.
Parameters
----------
file_name : Optional[str]
The name of the file to save the SBGN network to.
Default: model.sbgn
"""
model = self.print_model()
with open(file_name, 'wb') as fh:
fh.write(model) | python | def save_model(self, file_name='model.sbgn'):
"""Save the assembled SBGN model in a file.
Parameters
----------
file_name : Optional[str]
The name of the file to save the SBGN network to.
Default: model.sbgn
"""
model = self.print_model()
with open(file_name, 'wb') as fh:
fh.write(model) | [
"def",
"save_model",
"(",
"self",
",",
"file_name",
"=",
"'model.sbgn'",
")",
":",
"model",
"=",
"self",
".",
"print_model",
"(",
")",
"with",
"open",
"(",
"file_name",
",",
"'wb'",
")",
"as",
"fh",
":",
"fh",
".",
"write",
"(",
"model",
")"
]
| Save the assembled SBGN model in a file.
Parameters
----------
file_name : Optional[str]
The name of the file to save the SBGN network to.
Default: model.sbgn | [
"Save",
"the",
"assembled",
"SBGN",
"model",
"in",
"a",
"file",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/sbgn/assembler.py#L125-L136 | train |
sorgerlab/indra | indra/assemblers/sbgn/assembler.py | SBGNAssembler._glyph_for_complex_pattern | def _glyph_for_complex_pattern(self, pattern):
"""Add glyph and member glyphs for a PySB ComplexPattern."""
# Make the main glyph for the agent
monomer_glyphs = []
for monomer_pattern in pattern.monomer_patterns:
glyph = self._glyph_for_monomer_pattern(monomer_pattern)
monomer_glyphs.append(glyph)
if len(monomer_glyphs) > 1:
pattern.matches_key = lambda: str(pattern)
agent_id = self._make_agent_id(pattern)
complex_glyph = \
emaker.glyph(emaker.bbox(**self.complex_style),
class_('complex'), id=agent_id)
for glyph in monomer_glyphs:
glyph.attrib['id'] = agent_id + glyph.attrib['id']
complex_glyph.append(glyph)
return complex_glyph
return monomer_glyphs[0] | python | def _glyph_for_complex_pattern(self, pattern):
"""Add glyph and member glyphs for a PySB ComplexPattern."""
# Make the main glyph for the agent
monomer_glyphs = []
for monomer_pattern in pattern.monomer_patterns:
glyph = self._glyph_for_monomer_pattern(monomer_pattern)
monomer_glyphs.append(glyph)
if len(monomer_glyphs) > 1:
pattern.matches_key = lambda: str(pattern)
agent_id = self._make_agent_id(pattern)
complex_glyph = \
emaker.glyph(emaker.bbox(**self.complex_style),
class_('complex'), id=agent_id)
for glyph in monomer_glyphs:
glyph.attrib['id'] = agent_id + glyph.attrib['id']
complex_glyph.append(glyph)
return complex_glyph
return monomer_glyphs[0] | [
"def",
"_glyph_for_complex_pattern",
"(",
"self",
",",
"pattern",
")",
":",
"# Make the main glyph for the agent",
"monomer_glyphs",
"=",
"[",
"]",
"for",
"monomer_pattern",
"in",
"pattern",
".",
"monomer_patterns",
":",
"glyph",
"=",
"self",
".",
"_glyph_for_monomer_pattern",
"(",
"monomer_pattern",
")",
"monomer_glyphs",
".",
"append",
"(",
"glyph",
")",
"if",
"len",
"(",
"monomer_glyphs",
")",
">",
"1",
":",
"pattern",
".",
"matches_key",
"=",
"lambda",
":",
"str",
"(",
"pattern",
")",
"agent_id",
"=",
"self",
".",
"_make_agent_id",
"(",
"pattern",
")",
"complex_glyph",
"=",
"emaker",
".",
"glyph",
"(",
"emaker",
".",
"bbox",
"(",
"*",
"*",
"self",
".",
"complex_style",
")",
",",
"class_",
"(",
"'complex'",
")",
",",
"id",
"=",
"agent_id",
")",
"for",
"glyph",
"in",
"monomer_glyphs",
":",
"glyph",
".",
"attrib",
"[",
"'id'",
"]",
"=",
"agent_id",
"+",
"glyph",
".",
"attrib",
"[",
"'id'",
"]",
"complex_glyph",
".",
"append",
"(",
"glyph",
")",
"return",
"complex_glyph",
"return",
"monomer_glyphs",
"[",
"0",
"]"
]
| Add glyph and member glyphs for a PySB ComplexPattern. | [
"Add",
"glyph",
"and",
"member",
"glyphs",
"for",
"a",
"PySB",
"ComplexPattern",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/sbgn/assembler.py#L317-L335 | train |
sorgerlab/indra | indra/assemblers/sbgn/assembler.py | SBGNAssembler._glyph_for_monomer_pattern | def _glyph_for_monomer_pattern(self, pattern):
"""Add glyph for a PySB MonomerPattern."""
pattern.matches_key = lambda: str(pattern)
agent_id = self._make_agent_id(pattern)
# Handle sources and sinks
if pattern.monomer.name in ('__source', '__sink'):
return None
# Handle molecules
glyph = emaker.glyph(emaker.label(text=pattern.monomer.name),
emaker.bbox(**self.monomer_style),
class_('macromolecule'), id=agent_id)
# Temporarily remove this
# Add a glyph for type
#type_glyph = emaker.glyph(emaker.label(text='mt:prot'),
# class_('unit of information'),
# emaker.bbox(**self.entity_type_style),
# id=self._make_id())
#glyph.append(type_glyph)
for site, value in pattern.site_conditions.items():
if value is None or isinstance(value, int):
continue
# Make some common abbreviations
if site == 'phospho':
site = 'p'
elif site == 'activity':
site = 'act'
if value == 'active':
value = 'a'
elif value == 'inactive':
value = 'i'
state = emaker.state(variable=site, value=value)
state_glyph = \
emaker.glyph(state, emaker.bbox(**self.entity_state_style),
class_('state variable'), id=self._make_id())
glyph.append(state_glyph)
return glyph | python | def _glyph_for_monomer_pattern(self, pattern):
"""Add glyph for a PySB MonomerPattern."""
pattern.matches_key = lambda: str(pattern)
agent_id = self._make_agent_id(pattern)
# Handle sources and sinks
if pattern.monomer.name in ('__source', '__sink'):
return None
# Handle molecules
glyph = emaker.glyph(emaker.label(text=pattern.monomer.name),
emaker.bbox(**self.monomer_style),
class_('macromolecule'), id=agent_id)
# Temporarily remove this
# Add a glyph for type
#type_glyph = emaker.glyph(emaker.label(text='mt:prot'),
# class_('unit of information'),
# emaker.bbox(**self.entity_type_style),
# id=self._make_id())
#glyph.append(type_glyph)
for site, value in pattern.site_conditions.items():
if value is None or isinstance(value, int):
continue
# Make some common abbreviations
if site == 'phospho':
site = 'p'
elif site == 'activity':
site = 'act'
if value == 'active':
value = 'a'
elif value == 'inactive':
value = 'i'
state = emaker.state(variable=site, value=value)
state_glyph = \
emaker.glyph(state, emaker.bbox(**self.entity_state_style),
class_('state variable'), id=self._make_id())
glyph.append(state_glyph)
return glyph | [
"def",
"_glyph_for_monomer_pattern",
"(",
"self",
",",
"pattern",
")",
":",
"pattern",
".",
"matches_key",
"=",
"lambda",
":",
"str",
"(",
"pattern",
")",
"agent_id",
"=",
"self",
".",
"_make_agent_id",
"(",
"pattern",
")",
"# Handle sources and sinks",
"if",
"pattern",
".",
"monomer",
".",
"name",
"in",
"(",
"'__source'",
",",
"'__sink'",
")",
":",
"return",
"None",
"# Handle molecules",
"glyph",
"=",
"emaker",
".",
"glyph",
"(",
"emaker",
".",
"label",
"(",
"text",
"=",
"pattern",
".",
"monomer",
".",
"name",
")",
",",
"emaker",
".",
"bbox",
"(",
"*",
"*",
"self",
".",
"monomer_style",
")",
",",
"class_",
"(",
"'macromolecule'",
")",
",",
"id",
"=",
"agent_id",
")",
"# Temporarily remove this",
"# Add a glyph for type",
"#type_glyph = emaker.glyph(emaker.label(text='mt:prot'),",
"# class_('unit of information'),",
"# emaker.bbox(**self.entity_type_style),",
"# id=self._make_id())",
"#glyph.append(type_glyph)",
"for",
"site",
",",
"value",
"in",
"pattern",
".",
"site_conditions",
".",
"items",
"(",
")",
":",
"if",
"value",
"is",
"None",
"or",
"isinstance",
"(",
"value",
",",
"int",
")",
":",
"continue",
"# Make some common abbreviations",
"if",
"site",
"==",
"'phospho'",
":",
"site",
"=",
"'p'",
"elif",
"site",
"==",
"'activity'",
":",
"site",
"=",
"'act'",
"if",
"value",
"==",
"'active'",
":",
"value",
"=",
"'a'",
"elif",
"value",
"==",
"'inactive'",
":",
"value",
"=",
"'i'",
"state",
"=",
"emaker",
".",
"state",
"(",
"variable",
"=",
"site",
",",
"value",
"=",
"value",
")",
"state_glyph",
"=",
"emaker",
".",
"glyph",
"(",
"state",
",",
"emaker",
".",
"bbox",
"(",
"*",
"*",
"self",
".",
"entity_state_style",
")",
",",
"class_",
"(",
"'state variable'",
")",
",",
"id",
"=",
"self",
".",
"_make_id",
"(",
")",
")",
"glyph",
".",
"append",
"(",
"state_glyph",
")",
"return",
"glyph"
]
| Add glyph for a PySB MonomerPattern. | [
"Add",
"glyph",
"for",
"a",
"PySB",
"MonomerPattern",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/sbgn/assembler.py#L337-L372 | train |
sorgerlab/indra | indra/databases/go_client.py | load_go_graph | def load_go_graph(go_fname):
"""Load the GO data from an OWL file and parse into an RDF graph.
Parameters
----------
go_fname : str
Path to the GO OWL file. Can be downloaded from
http://geneontology.org/ontology/go.owl.
Returns
-------
rdflib.Graph
RDF graph containing GO data.
"""
global _go_graph
if _go_graph is None:
_go_graph = rdflib.Graph()
logger.info("Parsing GO OWL file")
_go_graph.parse(os.path.abspath(go_fname))
return _go_graph | python | def load_go_graph(go_fname):
"""Load the GO data from an OWL file and parse into an RDF graph.
Parameters
----------
go_fname : str
Path to the GO OWL file. Can be downloaded from
http://geneontology.org/ontology/go.owl.
Returns
-------
rdflib.Graph
RDF graph containing GO data.
"""
global _go_graph
if _go_graph is None:
_go_graph = rdflib.Graph()
logger.info("Parsing GO OWL file")
_go_graph.parse(os.path.abspath(go_fname))
return _go_graph | [
"def",
"load_go_graph",
"(",
"go_fname",
")",
":",
"global",
"_go_graph",
"if",
"_go_graph",
"is",
"None",
":",
"_go_graph",
"=",
"rdflib",
".",
"Graph",
"(",
")",
"logger",
".",
"info",
"(",
"\"Parsing GO OWL file\"",
")",
"_go_graph",
".",
"parse",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"go_fname",
")",
")",
"return",
"_go_graph"
]
| Load the GO data from an OWL file and parse into an RDF graph.
Parameters
----------
go_fname : str
Path to the GO OWL file. Can be downloaded from
http://geneontology.org/ontology/go.owl.
Returns
-------
rdflib.Graph
RDF graph containing GO data. | [
"Load",
"the",
"GO",
"data",
"from",
"an",
"OWL",
"file",
"and",
"parse",
"into",
"an",
"RDF",
"graph",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/go_client.py#L41-L60 | train |
sorgerlab/indra | indra/databases/go_client.py | update_id_mappings | def update_id_mappings(g):
"""Compile all ID->label mappings and save to a TSV file.
Parameters
----------
g : rdflib.Graph
RDF graph containing GO data.
"""
g = load_go_graph(go_owl_path)
query = _prefixes + """
SELECT ?id ?label
WHERE {
?class oboInOwl:id ?id .
?class rdfs:label ?label
}
"""
logger.info("Querying for GO ID mappings")
res = g.query(query)
mappings = []
for id_lit, label_lit in sorted(res, key=lambda x: x[0]):
mappings.append((id_lit.value, label_lit.value))
# Write to file
write_unicode_csv(go_mappings_file, mappings, delimiter='\t') | python | def update_id_mappings(g):
"""Compile all ID->label mappings and save to a TSV file.
Parameters
----------
g : rdflib.Graph
RDF graph containing GO data.
"""
g = load_go_graph(go_owl_path)
query = _prefixes + """
SELECT ?id ?label
WHERE {
?class oboInOwl:id ?id .
?class rdfs:label ?label
}
"""
logger.info("Querying for GO ID mappings")
res = g.query(query)
mappings = []
for id_lit, label_lit in sorted(res, key=lambda x: x[0]):
mappings.append((id_lit.value, label_lit.value))
# Write to file
write_unicode_csv(go_mappings_file, mappings, delimiter='\t') | [
"def",
"update_id_mappings",
"(",
"g",
")",
":",
"g",
"=",
"load_go_graph",
"(",
"go_owl_path",
")",
"query",
"=",
"_prefixes",
"+",
"\"\"\"\n SELECT ?id ?label\n WHERE {\n ?class oboInOwl:id ?id .\n ?class rdfs:label ?label\n }\n \"\"\"",
"logger",
".",
"info",
"(",
"\"Querying for GO ID mappings\"",
")",
"res",
"=",
"g",
".",
"query",
"(",
"query",
")",
"mappings",
"=",
"[",
"]",
"for",
"id_lit",
",",
"label_lit",
"in",
"sorted",
"(",
"res",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
")",
":",
"mappings",
".",
"append",
"(",
"(",
"id_lit",
".",
"value",
",",
"label_lit",
".",
"value",
")",
")",
"# Write to file",
"write_unicode_csv",
"(",
"go_mappings_file",
",",
"mappings",
",",
"delimiter",
"=",
"'\\t'",
")"
]
| Compile all ID->label mappings and save to a TSV file.
Parameters
----------
g : rdflib.Graph
RDF graph containing GO data. | [
"Compile",
"all",
"ID",
"-",
">",
"label",
"mappings",
"and",
"save",
"to",
"a",
"TSV",
"file",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/go_client.py#L80-L103 | train |
sorgerlab/indra | indra/databases/ndex_client.py | get_default_ndex_cred | def get_default_ndex_cred(ndex_cred):
"""Gets the NDEx credentials from the dict, or tries the environment if None"""
if ndex_cred:
username = ndex_cred.get('user')
password = ndex_cred.get('password')
if username is not None and password is not None:
return username, password
username = get_config('NDEX_USERNAME')
password = get_config('NDEX_PASSWORD')
return username, password | python | def get_default_ndex_cred(ndex_cred):
"""Gets the NDEx credentials from the dict, or tries the environment if None"""
if ndex_cred:
username = ndex_cred.get('user')
password = ndex_cred.get('password')
if username is not None and password is not None:
return username, password
username = get_config('NDEX_USERNAME')
password = get_config('NDEX_PASSWORD')
return username, password | [
"def",
"get_default_ndex_cred",
"(",
"ndex_cred",
")",
":",
"if",
"ndex_cred",
":",
"username",
"=",
"ndex_cred",
".",
"get",
"(",
"'user'",
")",
"password",
"=",
"ndex_cred",
".",
"get",
"(",
"'password'",
")",
"if",
"username",
"is",
"not",
"None",
"and",
"password",
"is",
"not",
"None",
":",
"return",
"username",
",",
"password",
"username",
"=",
"get_config",
"(",
"'NDEX_USERNAME'",
")",
"password",
"=",
"get_config",
"(",
"'NDEX_PASSWORD'",
")",
"return",
"username",
",",
"password"
]
| Gets the NDEx credentials from the dict, or tries the environment if None | [
"Gets",
"the",
"NDEx",
"credentials",
"from",
"the",
"dict",
"or",
"tries",
"the",
"environment",
"if",
"None"
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/ndex_client.py#L17-L29 | train |
sorgerlab/indra | indra/databases/ndex_client.py | send_request | def send_request(ndex_service_url, params, is_json=True, use_get=False):
"""Send a request to the NDEx server.
Parameters
----------
ndex_service_url : str
The URL of the service to use for the request.
params : dict
A dictionary of parameters to send with the request. Parameter keys
differ based on the type of request.
is_json : bool
True if the response is in json format, otherwise it is assumed to be
text. Default: False
use_get : bool
True if the request needs to use GET instead of POST.
Returns
-------
res : str
Depending on the type of service and the is_json parameter, this
function either returns a text string or a json dict.
"""
if use_get:
res = requests.get(ndex_service_url, json=params)
else:
res = requests.post(ndex_service_url, json=params)
status = res.status_code
# If response is immediate, we get 200
if status == 200:
if is_json:
return res.json()
else:
return res.text
# If there is a continuation of the message we get status 300, handled below.
# Otherwise we return None.
elif status != 300:
logger.error('Request returned with code %d' % status)
return None
# In case the response is not immediate, a task ID can be used to get
# the result.
task_id = res.json().get('task_id')
logger.info('NDEx task submitted...')
time_used = 0
try:
while status != 200:
res = requests.get(ndex_base_url + '/task/' + task_id)
status = res.status_code
if status != 200:
time.sleep(5)
time_used += 5
except KeyError:
next
return None
logger.info('NDEx task complete.')
if is_json:
return res.json()
else:
return res.text | python | def send_request(ndex_service_url, params, is_json=True, use_get=False):
"""Send a request to the NDEx server.
Parameters
----------
ndex_service_url : str
The URL of the service to use for the request.
params : dict
A dictionary of parameters to send with the request. Parameter keys
differ based on the type of request.
is_json : bool
True if the response is in json format, otherwise it is assumed to be
text. Default: False
use_get : bool
True if the request needs to use GET instead of POST.
Returns
-------
res : str
Depending on the type of service and the is_json parameter, this
function either returns a text string or a json dict.
"""
if use_get:
res = requests.get(ndex_service_url, json=params)
else:
res = requests.post(ndex_service_url, json=params)
status = res.status_code
# If response is immediate, we get 200
if status == 200:
if is_json:
return res.json()
else:
return res.text
# If there is a continuation of the message we get status 300, handled below.
# Otherwise we return None.
elif status != 300:
logger.error('Request returned with code %d' % status)
return None
# In case the response is not immediate, a task ID can be used to get
# the result.
task_id = res.json().get('task_id')
logger.info('NDEx task submitted...')
time_used = 0
try:
while status != 200:
res = requests.get(ndex_base_url + '/task/' + task_id)
status = res.status_code
if status != 200:
time.sleep(5)
time_used += 5
except KeyError:
next
return None
logger.info('NDEx task complete.')
if is_json:
return res.json()
else:
return res.text | [
"def",
"send_request",
"(",
"ndex_service_url",
",",
"params",
",",
"is_json",
"=",
"True",
",",
"use_get",
"=",
"False",
")",
":",
"if",
"use_get",
":",
"res",
"=",
"requests",
".",
"get",
"(",
"ndex_service_url",
",",
"json",
"=",
"params",
")",
"else",
":",
"res",
"=",
"requests",
".",
"post",
"(",
"ndex_service_url",
",",
"json",
"=",
"params",
")",
"status",
"=",
"res",
".",
"status_code",
"# If response is immediate, we get 200",
"if",
"status",
"==",
"200",
":",
"if",
"is_json",
":",
"return",
"res",
".",
"json",
"(",
")",
"else",
":",
"return",
"res",
".",
"text",
"# If there is a continuation of the message we get status 300, handled below.",
"# Otherwise we return None.",
"elif",
"status",
"!=",
"300",
":",
"logger",
".",
"error",
"(",
"'Request returned with code %d'",
"%",
"status",
")",
"return",
"None",
"# In case the response is not immediate, a task ID can be used to get",
"# the result.",
"task_id",
"=",
"res",
".",
"json",
"(",
")",
".",
"get",
"(",
"'task_id'",
")",
"logger",
".",
"info",
"(",
"'NDEx task submitted...'",
")",
"time_used",
"=",
"0",
"try",
":",
"while",
"status",
"!=",
"200",
":",
"res",
"=",
"requests",
".",
"get",
"(",
"ndex_base_url",
"+",
"'/task/'",
"+",
"task_id",
")",
"status",
"=",
"res",
".",
"status_code",
"if",
"status",
"!=",
"200",
":",
"time",
".",
"sleep",
"(",
"5",
")",
"time_used",
"+=",
"5",
"except",
"KeyError",
":",
"next",
"return",
"None",
"logger",
".",
"info",
"(",
"'NDEx task complete.'",
")",
"if",
"is_json",
":",
"return",
"res",
".",
"json",
"(",
")",
"else",
":",
"return",
"res",
".",
"text"
]
| Send a request to the NDEx server.
Parameters
----------
ndex_service_url : str
The URL of the service to use for the request.
params : dict
A dictionary of parameters to send with the request. Parameter keys
differ based on the type of request.
is_json : bool
True if the response is in json format, otherwise it is assumed to be
text. Default: False
use_get : bool
True if the request needs to use GET instead of POST.
Returns
-------
res : str
Depending on the type of service and the is_json parameter, this
function either returns a text string or a json dict. | [
"Send",
"a",
"request",
"to",
"the",
"NDEx",
"server",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/ndex_client.py#L32-L89 | train |
sorgerlab/indra | indra/databases/ndex_client.py | update_network | def update_network(cx_str, network_id, ndex_cred=None):
"""Update an existing CX network on NDEx with new CX content.
Parameters
----------
cx_str : str
String containing the CX content.
network_id : str
UUID of the network on NDEx.
ndex_cred : dict
A dictionary with the following entries:
'user': NDEx user name
'password': NDEx password
"""
server = 'http://public.ndexbio.org'
username, password = get_default_ndex_cred(ndex_cred)
nd = ndex2.client.Ndex2(server, username, password)
try:
logger.info('Getting network summary...')
summary = nd.get_network_summary(network_id)
except Exception as e:
logger.error('Could not get NDEx network summary.')
logger.error(e)
return
# Update network content
try:
logger.info('Updating network...')
cx_stream = io.BytesIO(cx_str.encode('utf-8'))
nd.update_cx_network(cx_stream, network_id)
except Exception as e:
logger.error('Could not update NDEx network.')
logger.error(e)
return
# Update network profile
ver_str = summary.get('version')
new_ver = _increment_ndex_ver(ver_str)
profile = {'name': summary.get('name'),
'description': summary.get('description'),
'version': new_ver,
}
logger.info('Updating NDEx network (%s) profile to %s',
network_id, profile)
profile_retries = 5
for _ in range(profile_retries):
try:
time.sleep(5)
nd.update_network_profile(network_id, profile)
break
except Exception as e:
logger.error('Could not update NDEx network profile.')
logger.error(e)
set_style(network_id, ndex_cred) | python | def update_network(cx_str, network_id, ndex_cred=None):
"""Update an existing CX network on NDEx with new CX content.
Parameters
----------
cx_str : str
String containing the CX content.
network_id : str
UUID of the network on NDEx.
ndex_cred : dict
A dictionary with the following entries:
'user': NDEx user name
'password': NDEx password
"""
server = 'http://public.ndexbio.org'
username, password = get_default_ndex_cred(ndex_cred)
nd = ndex2.client.Ndex2(server, username, password)
try:
logger.info('Getting network summary...')
summary = nd.get_network_summary(network_id)
except Exception as e:
logger.error('Could not get NDEx network summary.')
logger.error(e)
return
# Update network content
try:
logger.info('Updating network...')
cx_stream = io.BytesIO(cx_str.encode('utf-8'))
nd.update_cx_network(cx_stream, network_id)
except Exception as e:
logger.error('Could not update NDEx network.')
logger.error(e)
return
# Update network profile
ver_str = summary.get('version')
new_ver = _increment_ndex_ver(ver_str)
profile = {'name': summary.get('name'),
'description': summary.get('description'),
'version': new_ver,
}
logger.info('Updating NDEx network (%s) profile to %s',
network_id, profile)
profile_retries = 5
for _ in range(profile_retries):
try:
time.sleep(5)
nd.update_network_profile(network_id, profile)
break
except Exception as e:
logger.error('Could not update NDEx network profile.')
logger.error(e)
set_style(network_id, ndex_cred) | [
"def",
"update_network",
"(",
"cx_str",
",",
"network_id",
",",
"ndex_cred",
"=",
"None",
")",
":",
"server",
"=",
"'http://public.ndexbio.org'",
"username",
",",
"password",
"=",
"get_default_ndex_cred",
"(",
"ndex_cred",
")",
"nd",
"=",
"ndex2",
".",
"client",
".",
"Ndex2",
"(",
"server",
",",
"username",
",",
"password",
")",
"try",
":",
"logger",
".",
"info",
"(",
"'Getting network summary...'",
")",
"summary",
"=",
"nd",
".",
"get_network_summary",
"(",
"network_id",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"'Could not get NDEx network summary.'",
")",
"logger",
".",
"error",
"(",
"e",
")",
"return",
"# Update network content",
"try",
":",
"logger",
".",
"info",
"(",
"'Updating network...'",
")",
"cx_stream",
"=",
"io",
".",
"BytesIO",
"(",
"cx_str",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"nd",
".",
"update_cx_network",
"(",
"cx_stream",
",",
"network_id",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"'Could not update NDEx network.'",
")",
"logger",
".",
"error",
"(",
"e",
")",
"return",
"# Update network profile",
"ver_str",
"=",
"summary",
".",
"get",
"(",
"'version'",
")",
"new_ver",
"=",
"_increment_ndex_ver",
"(",
"ver_str",
")",
"profile",
"=",
"{",
"'name'",
":",
"summary",
".",
"get",
"(",
"'name'",
")",
",",
"'description'",
":",
"summary",
".",
"get",
"(",
"'description'",
")",
",",
"'version'",
":",
"new_ver",
",",
"}",
"logger",
".",
"info",
"(",
"'Updating NDEx network (%s) profile to %s'",
",",
"network_id",
",",
"profile",
")",
"profile_retries",
"=",
"5",
"for",
"_",
"in",
"range",
"(",
"profile_retries",
")",
":",
"try",
":",
"time",
".",
"sleep",
"(",
"5",
")",
"nd",
".",
"update_network_profile",
"(",
"network_id",
",",
"profile",
")",
"break",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"'Could not update NDEx network profile.'",
")",
"logger",
".",
"error",
"(",
"e",
")",
"set_style",
"(",
"network_id",
",",
"ndex_cred",
")"
]
| Update an existing CX network on NDEx with new CX content.
Parameters
----------
cx_str : str
String containing the CX content.
network_id : str
UUID of the network on NDEx.
ndex_cred : dict
A dictionary with the following entries:
'user': NDEx user name
'password': NDEx password | [
"Update",
"an",
"existing",
"CX",
"network",
"on",
"NDEx",
"with",
"new",
"CX",
"content",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/ndex_client.py#L134-L189 | train |
sorgerlab/indra | indra/databases/ndex_client.py | set_style | def set_style(network_id, ndex_cred=None, template_id=None):
"""Set the style of the network to a given template network's style
Parameters
----------
network_id : str
The UUID of the NDEx network whose style is to be changed.
ndex_cred : dict
A dictionary of NDEx credentials.
template_id : Optional[str]
The UUID of the NDEx network whose style is used on the
network specified in the first argument.
"""
if not template_id:
template_id = "ea4ea3b7-6903-11e7-961c-0ac135e8bacf"
server = 'http://public.ndexbio.org'
username, password = get_default_ndex_cred(ndex_cred)
source_network = ndex2.create_nice_cx_from_server(username=username,
password=password,
uuid=network_id,
server=server)
source_network.apply_template(server, template_id)
source_network.update_to(network_id, server=server, username=username,
password=password) | python | def set_style(network_id, ndex_cred=None, template_id=None):
"""Set the style of the network to a given template network's style
Parameters
----------
network_id : str
The UUID of the NDEx network whose style is to be changed.
ndex_cred : dict
A dictionary of NDEx credentials.
template_id : Optional[str]
The UUID of the NDEx network whose style is used on the
network specified in the first argument.
"""
if not template_id:
template_id = "ea4ea3b7-6903-11e7-961c-0ac135e8bacf"
server = 'http://public.ndexbio.org'
username, password = get_default_ndex_cred(ndex_cred)
source_network = ndex2.create_nice_cx_from_server(username=username,
password=password,
uuid=network_id,
server=server)
source_network.apply_template(server, template_id)
source_network.update_to(network_id, server=server, username=username,
password=password) | [
"def",
"set_style",
"(",
"network_id",
",",
"ndex_cred",
"=",
"None",
",",
"template_id",
"=",
"None",
")",
":",
"if",
"not",
"template_id",
":",
"template_id",
"=",
"\"ea4ea3b7-6903-11e7-961c-0ac135e8bacf\"",
"server",
"=",
"'http://public.ndexbio.org'",
"username",
",",
"password",
"=",
"get_default_ndex_cred",
"(",
"ndex_cred",
")",
"source_network",
"=",
"ndex2",
".",
"create_nice_cx_from_server",
"(",
"username",
"=",
"username",
",",
"password",
"=",
"password",
",",
"uuid",
"=",
"network_id",
",",
"server",
"=",
"server",
")",
"source_network",
".",
"apply_template",
"(",
"server",
",",
"template_id",
")",
"source_network",
".",
"update_to",
"(",
"network_id",
",",
"server",
"=",
"server",
",",
"username",
"=",
"username",
",",
"password",
"=",
"password",
")"
]
| Set the style of the network to a given template network's style
Parameters
----------
network_id : str
The UUID of the NDEx network whose style is to be changed.
ndex_cred : dict
A dictionary of NDEx credentials.
template_id : Optional[str]
The UUID of the NDEx network whose style is used on the
network specified in the first argument. | [
"Set",
"the",
"style",
"of",
"the",
"network",
"to",
"a",
"given",
"template",
"network",
"s",
"style"
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/ndex_client.py#L192-L219 | train |
sorgerlab/indra | indra/assemblers/pysb/bmi_wrapper.py | BMIModel.initialize | def initialize(self, cfg_file=None, mode=None):
"""Initialize the model for simulation, possibly given a config file.
Parameters
----------
cfg_file : Optional[str]
The name of the configuration file to load, optional.
"""
self.sim = ScipyOdeSimulator(self.model)
self.state = numpy.array(copy.copy(self.sim.initials)[0])
self.time = numpy.array(0.0)
self.status = 'initialized' | python | def initialize(self, cfg_file=None, mode=None):
"""Initialize the model for simulation, possibly given a config file.
Parameters
----------
cfg_file : Optional[str]
The name of the configuration file to load, optional.
"""
self.sim = ScipyOdeSimulator(self.model)
self.state = numpy.array(copy.copy(self.sim.initials)[0])
self.time = numpy.array(0.0)
self.status = 'initialized' | [
"def",
"initialize",
"(",
"self",
",",
"cfg_file",
"=",
"None",
",",
"mode",
"=",
"None",
")",
":",
"self",
".",
"sim",
"=",
"ScipyOdeSimulator",
"(",
"self",
".",
"model",
")",
"self",
".",
"state",
"=",
"numpy",
".",
"array",
"(",
"copy",
".",
"copy",
"(",
"self",
".",
"sim",
".",
"initials",
")",
"[",
"0",
"]",
")",
"self",
".",
"time",
"=",
"numpy",
".",
"array",
"(",
"0.0",
")",
"self",
".",
"status",
"=",
"'initialized'"
]
| Initialize the model for simulation, possibly given a config file.
Parameters
----------
cfg_file : Optional[str]
The name of the configuration file to load, optional. | [
"Initialize",
"the",
"model",
"for",
"simulation",
"possibly",
"given",
"a",
"config",
"file",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/bmi_wrapper.py#L74-L85 | train |
sorgerlab/indra | indra/assemblers/pysb/bmi_wrapper.py | BMIModel.update | def update(self, dt=None):
"""Simulate the model for a given time interval.
Parameters
----------
dt : Optional[float]
The time step to simulate, if None, the default built-in time step
is used.
"""
# EMELI passes dt = -1 so we need to handle that here
dt = dt if (dt is not None and dt > 0) else self.dt
tspan = [0, dt]
# Run simulaton with initials set to current state
res = self.sim.run(tspan=tspan, initials=self.state)
# Set the state based on the result here
self.state = res.species[-1]
self.time += dt
if self.time > self.stop_time:
self.DONE = True
print((self.time, self.state))
self.time_course.append((self.time.copy(), self.state.copy())) | python | def update(self, dt=None):
"""Simulate the model for a given time interval.
Parameters
----------
dt : Optional[float]
The time step to simulate, if None, the default built-in time step
is used.
"""
# EMELI passes dt = -1 so we need to handle that here
dt = dt if (dt is not None and dt > 0) else self.dt
tspan = [0, dt]
# Run simulaton with initials set to current state
res = self.sim.run(tspan=tspan, initials=self.state)
# Set the state based on the result here
self.state = res.species[-1]
self.time += dt
if self.time > self.stop_time:
self.DONE = True
print((self.time, self.state))
self.time_course.append((self.time.copy(), self.state.copy())) | [
"def",
"update",
"(",
"self",
",",
"dt",
"=",
"None",
")",
":",
"# EMELI passes dt = -1 so we need to handle that here",
"dt",
"=",
"dt",
"if",
"(",
"dt",
"is",
"not",
"None",
"and",
"dt",
">",
"0",
")",
"else",
"self",
".",
"dt",
"tspan",
"=",
"[",
"0",
",",
"dt",
"]",
"# Run simulaton with initials set to current state",
"res",
"=",
"self",
".",
"sim",
".",
"run",
"(",
"tspan",
"=",
"tspan",
",",
"initials",
"=",
"self",
".",
"state",
")",
"# Set the state based on the result here",
"self",
".",
"state",
"=",
"res",
".",
"species",
"[",
"-",
"1",
"]",
"self",
".",
"time",
"+=",
"dt",
"if",
"self",
".",
"time",
">",
"self",
".",
"stop_time",
":",
"self",
".",
"DONE",
"=",
"True",
"print",
"(",
"(",
"self",
".",
"time",
",",
"self",
".",
"state",
")",
")",
"self",
".",
"time_course",
".",
"append",
"(",
"(",
"self",
".",
"time",
".",
"copy",
"(",
")",
",",
"self",
".",
"state",
".",
"copy",
"(",
")",
")",
")"
]
| Simulate the model for a given time interval.
Parameters
----------
dt : Optional[float]
The time step to simulate, if None, the default built-in time step
is used. | [
"Simulate",
"the",
"model",
"for",
"a",
"given",
"time",
"interval",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/bmi_wrapper.py#L87-L107 | train |
sorgerlab/indra | indra/assemblers/pysb/bmi_wrapper.py | BMIModel.set_value | def set_value(self, var_name, value):
"""Set the value of a given variable to a given value.
Parameters
----------
var_name : str
The name of the variable in the model whose value should be set.
value : float
The value the variable should be set to
"""
if var_name in self.outside_name_map:
var_name = self.outside_name_map[var_name]
print('%s=%.5f' % (var_name, 1e9*value))
if var_name == 'Precipitation':
value = 1e9*value
species_idx = self.species_name_map[var_name]
self.state[species_idx] = value | python | def set_value(self, var_name, value):
"""Set the value of a given variable to a given value.
Parameters
----------
var_name : str
The name of the variable in the model whose value should be set.
value : float
The value the variable should be set to
"""
if var_name in self.outside_name_map:
var_name = self.outside_name_map[var_name]
print('%s=%.5f' % (var_name, 1e9*value))
if var_name == 'Precipitation':
value = 1e9*value
species_idx = self.species_name_map[var_name]
self.state[species_idx] = value | [
"def",
"set_value",
"(",
"self",
",",
"var_name",
",",
"value",
")",
":",
"if",
"var_name",
"in",
"self",
".",
"outside_name_map",
":",
"var_name",
"=",
"self",
".",
"outside_name_map",
"[",
"var_name",
"]",
"print",
"(",
"'%s=%.5f'",
"%",
"(",
"var_name",
",",
"1e9",
"*",
"value",
")",
")",
"if",
"var_name",
"==",
"'Precipitation'",
":",
"value",
"=",
"1e9",
"*",
"value",
"species_idx",
"=",
"self",
".",
"species_name_map",
"[",
"var_name",
"]",
"self",
".",
"state",
"[",
"species_idx",
"]",
"=",
"value"
]
| Set the value of a given variable to a given value.
Parameters
----------
var_name : str
The name of the variable in the model whose value should be set.
value : float
The value the variable should be set to | [
"Set",
"the",
"value",
"of",
"a",
"given",
"variable",
"to",
"a",
"given",
"value",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/bmi_wrapper.py#L114-L131 | train |
sorgerlab/indra | indra/assemblers/pysb/bmi_wrapper.py | BMIModel.get_value | def get_value(self, var_name):
"""Return the value of a given variable.
Parameters
----------
var_name : str
The name of the variable whose value should be returned
Returns
-------
value : float
The value of the given variable in the current state
"""
if var_name in self.outside_name_map:
var_name = self.outside_name_map[var_name]
species_idx = self.species_name_map[var_name]
return self.state[species_idx] | python | def get_value(self, var_name):
"""Return the value of a given variable.
Parameters
----------
var_name : str
The name of the variable whose value should be returned
Returns
-------
value : float
The value of the given variable in the current state
"""
if var_name in self.outside_name_map:
var_name = self.outside_name_map[var_name]
species_idx = self.species_name_map[var_name]
return self.state[species_idx] | [
"def",
"get_value",
"(",
"self",
",",
"var_name",
")",
":",
"if",
"var_name",
"in",
"self",
".",
"outside_name_map",
":",
"var_name",
"=",
"self",
".",
"outside_name_map",
"[",
"var_name",
"]",
"species_idx",
"=",
"self",
".",
"species_name_map",
"[",
"var_name",
"]",
"return",
"self",
".",
"state",
"[",
"species_idx",
"]"
]
| Return the value of a given variable.
Parameters
----------
var_name : str
The name of the variable whose value should be returned
Returns
-------
value : float
The value of the given variable in the current state | [
"Return",
"the",
"value",
"of",
"a",
"given",
"variable",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/bmi_wrapper.py#L147-L163 | train |
sorgerlab/indra | indra/assemblers/pysb/bmi_wrapper.py | BMIModel.get_input_var_names | def get_input_var_names(self):
"""Return a list of variables names that can be set as input.
Returns
-------
var_names : list[str]
A list of variable names that can be set from the outside
"""
in_vars = copy.copy(self.input_vars)
for idx, var in enumerate(in_vars):
if self._map_in_out(var) is not None:
in_vars[idx] = self._map_in_out(var)
return in_vars | python | def get_input_var_names(self):
"""Return a list of variables names that can be set as input.
Returns
-------
var_names : list[str]
A list of variable names that can be set from the outside
"""
in_vars = copy.copy(self.input_vars)
for idx, var in enumerate(in_vars):
if self._map_in_out(var) is not None:
in_vars[idx] = self._map_in_out(var)
return in_vars | [
"def",
"get_input_var_names",
"(",
"self",
")",
":",
"in_vars",
"=",
"copy",
".",
"copy",
"(",
"self",
".",
"input_vars",
")",
"for",
"idx",
",",
"var",
"in",
"enumerate",
"(",
"in_vars",
")",
":",
"if",
"self",
".",
"_map_in_out",
"(",
"var",
")",
"is",
"not",
"None",
":",
"in_vars",
"[",
"idx",
"]",
"=",
"self",
".",
"_map_in_out",
"(",
"var",
")",
"return",
"in_vars"
]
| Return a list of variables names that can be set as input.
Returns
-------
var_names : list[str]
A list of variable names that can be set from the outside | [
"Return",
"a",
"list",
"of",
"variables",
"names",
"that",
"can",
"be",
"set",
"as",
"input",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/bmi_wrapper.py#L203-L215 | train |
sorgerlab/indra | indra/assemblers/pysb/bmi_wrapper.py | BMIModel.get_output_var_names | def get_output_var_names(self):
"""Return a list of variables names that can be read as output.
Returns
-------
var_names : list[str]
A list of variable names that can be read from the outside
"""
# Return all the variables that aren't input variables
all_vars = list(self.species_name_map.keys())
output_vars = list(set(all_vars) - set(self.input_vars))
# Re-map to outside var names if needed
for idx, var in enumerate(output_vars):
if self._map_in_out(var) is not None:
output_vars[idx] = self._map_in_out(var)
return output_vars | python | def get_output_var_names(self):
"""Return a list of variables names that can be read as output.
Returns
-------
var_names : list[str]
A list of variable names that can be read from the outside
"""
# Return all the variables that aren't input variables
all_vars = list(self.species_name_map.keys())
output_vars = list(set(all_vars) - set(self.input_vars))
# Re-map to outside var names if needed
for idx, var in enumerate(output_vars):
if self._map_in_out(var) is not None:
output_vars[idx] = self._map_in_out(var)
return output_vars | [
"def",
"get_output_var_names",
"(",
"self",
")",
":",
"# Return all the variables that aren't input variables",
"all_vars",
"=",
"list",
"(",
"self",
".",
"species_name_map",
".",
"keys",
"(",
")",
")",
"output_vars",
"=",
"list",
"(",
"set",
"(",
"all_vars",
")",
"-",
"set",
"(",
"self",
".",
"input_vars",
")",
")",
"# Re-map to outside var names if needed",
"for",
"idx",
",",
"var",
"in",
"enumerate",
"(",
"output_vars",
")",
":",
"if",
"self",
".",
"_map_in_out",
"(",
"var",
")",
"is",
"not",
"None",
":",
"output_vars",
"[",
"idx",
"]",
"=",
"self",
".",
"_map_in_out",
"(",
"var",
")",
"return",
"output_vars"
]
| Return a list of variables names that can be read as output.
Returns
-------
var_names : list[str]
A list of variable names that can be read from the outside | [
"Return",
"a",
"list",
"of",
"variables",
"names",
"that",
"can",
"be",
"read",
"as",
"output",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/bmi_wrapper.py#L217-L232 | train |
sorgerlab/indra | indra/assemblers/pysb/bmi_wrapper.py | BMIModel.make_repository_component | def make_repository_component(self):
"""Return an XML string representing this BMI in a workflow.
This description is required by EMELI to discover and load models.
Returns
-------
xml : str
String serialized XML representation of the component in the
model repository.
"""
component = etree.Element('component')
comp_name = etree.Element('comp_name')
comp_name.text = self.model.name
component.append(comp_name)
mod_path = etree.Element('module_path')
mod_path.text = os.getcwd()
component.append(mod_path)
mod_name = etree.Element('module_name')
mod_name.text = self.model.name
component.append(mod_name)
class_name = etree.Element('class_name')
class_name.text = 'model_class'
component.append(class_name)
model_name = etree.Element('model_name')
model_name.text = self.model.name
component.append(model_name)
lang = etree.Element('language')
lang.text = 'python'
component.append(lang)
ver = etree.Element('version')
ver.text = self.get_attribute('version')
component.append(ver)
au = etree.Element('author')
au.text = self.get_attribute('author_name')
component.append(au)
hu = etree.Element('help_url')
hu.text = 'http://github.com/sorgerlab/indra'
component.append(hu)
for tag in ('cfg_template', 'time_step_type', 'time_units',
'grid_type', 'description', 'comp_type', 'uses_types'):
elem = etree.Element(tag)
elem.text = tag
component.append(elem)
return etree.tounicode(component, pretty_print=True) | python | def make_repository_component(self):
"""Return an XML string representing this BMI in a workflow.
This description is required by EMELI to discover and load models.
Returns
-------
xml : str
String serialized XML representation of the component in the
model repository.
"""
component = etree.Element('component')
comp_name = etree.Element('comp_name')
comp_name.text = self.model.name
component.append(comp_name)
mod_path = etree.Element('module_path')
mod_path.text = os.getcwd()
component.append(mod_path)
mod_name = etree.Element('module_name')
mod_name.text = self.model.name
component.append(mod_name)
class_name = etree.Element('class_name')
class_name.text = 'model_class'
component.append(class_name)
model_name = etree.Element('model_name')
model_name.text = self.model.name
component.append(model_name)
lang = etree.Element('language')
lang.text = 'python'
component.append(lang)
ver = etree.Element('version')
ver.text = self.get_attribute('version')
component.append(ver)
au = etree.Element('author')
au.text = self.get_attribute('author_name')
component.append(au)
hu = etree.Element('help_url')
hu.text = 'http://github.com/sorgerlab/indra'
component.append(hu)
for tag in ('cfg_template', 'time_step_type', 'time_units',
'grid_type', 'description', 'comp_type', 'uses_types'):
elem = etree.Element(tag)
elem.text = tag
component.append(elem)
return etree.tounicode(component, pretty_print=True) | [
"def",
"make_repository_component",
"(",
"self",
")",
":",
"component",
"=",
"etree",
".",
"Element",
"(",
"'component'",
")",
"comp_name",
"=",
"etree",
".",
"Element",
"(",
"'comp_name'",
")",
"comp_name",
".",
"text",
"=",
"self",
".",
"model",
".",
"name",
"component",
".",
"append",
"(",
"comp_name",
")",
"mod_path",
"=",
"etree",
".",
"Element",
"(",
"'module_path'",
")",
"mod_path",
".",
"text",
"=",
"os",
".",
"getcwd",
"(",
")",
"component",
".",
"append",
"(",
"mod_path",
")",
"mod_name",
"=",
"etree",
".",
"Element",
"(",
"'module_name'",
")",
"mod_name",
".",
"text",
"=",
"self",
".",
"model",
".",
"name",
"component",
".",
"append",
"(",
"mod_name",
")",
"class_name",
"=",
"etree",
".",
"Element",
"(",
"'class_name'",
")",
"class_name",
".",
"text",
"=",
"'model_class'",
"component",
".",
"append",
"(",
"class_name",
")",
"model_name",
"=",
"etree",
".",
"Element",
"(",
"'model_name'",
")",
"model_name",
".",
"text",
"=",
"self",
".",
"model",
".",
"name",
"component",
".",
"append",
"(",
"model_name",
")",
"lang",
"=",
"etree",
".",
"Element",
"(",
"'language'",
")",
"lang",
".",
"text",
"=",
"'python'",
"component",
".",
"append",
"(",
"lang",
")",
"ver",
"=",
"etree",
".",
"Element",
"(",
"'version'",
")",
"ver",
".",
"text",
"=",
"self",
".",
"get_attribute",
"(",
"'version'",
")",
"component",
".",
"append",
"(",
"ver",
")",
"au",
"=",
"etree",
".",
"Element",
"(",
"'author'",
")",
"au",
".",
"text",
"=",
"self",
".",
"get_attribute",
"(",
"'author_name'",
")",
"component",
".",
"append",
"(",
"au",
")",
"hu",
"=",
"etree",
".",
"Element",
"(",
"'help_url'",
")",
"hu",
".",
"text",
"=",
"'http://github.com/sorgerlab/indra'",
"component",
".",
"append",
"(",
"hu",
")",
"for",
"tag",
"in",
"(",
"'cfg_template'",
",",
"'time_step_type'",
",",
"'time_units'",
",",
"'grid_type'",
",",
"'description'",
",",
"'comp_type'",
",",
"'uses_types'",
")",
":",
"elem",
"=",
"etree",
".",
"Element",
"(",
"tag",
")",
"elem",
".",
"text",
"=",
"tag",
"component",
".",
"append",
"(",
"elem",
")",
"return",
"etree",
".",
"tounicode",
"(",
"component",
",",
"pretty_print",
"=",
"True",
")"
]
| Return an XML string representing this BMI in a workflow.
This description is required by EMELI to discover and load models.
Returns
-------
xml : str
String serialized XML representation of the component in the
model repository. | [
"Return",
"an",
"XML",
"string",
"representing",
"this",
"BMI",
"in",
"a",
"workflow",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/bmi_wrapper.py#L336-L391 | train |
sorgerlab/indra | indra/assemblers/pysb/bmi_wrapper.py | BMIModel._map_in_out | def _map_in_out(self, inside_var_name):
"""Return the external name of a variable mapped from inside."""
for out_name, in_name in self.outside_name_map.items():
if inside_var_name == in_name:
return out_name
return None | python | def _map_in_out(self, inside_var_name):
"""Return the external name of a variable mapped from inside."""
for out_name, in_name in self.outside_name_map.items():
if inside_var_name == in_name:
return out_name
return None | [
"def",
"_map_in_out",
"(",
"self",
",",
"inside_var_name",
")",
":",
"for",
"out_name",
",",
"in_name",
"in",
"self",
".",
"outside_name_map",
".",
"items",
"(",
")",
":",
"if",
"inside_var_name",
"==",
"in_name",
":",
"return",
"out_name",
"return",
"None"
]
| Return the external name of a variable mapped from inside. | [
"Return",
"the",
"external",
"name",
"of",
"a",
"variable",
"mapped",
"from",
"inside",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/bmi_wrapper.py#L417-L422 | train |
sorgerlab/indra | indra/tools/reading/pmid_reading/read_pmids.py | read_pmid | def read_pmid(pmid, source, cont_path, sparser_version, outbuf=None,
cleanup=True):
"Run sparser on a single pmid."
signal.signal(signal.SIGALRM, _timeout_handler)
signal.alarm(60)
try:
if (source is 'content_not_found'
or source.startswith('unhandled_content_type')
or source.endswith('failure')):
logger.info('No content read for %s.' % pmid)
return # No real content here.
if cont_path.endswith('.nxml') and source.startswith('pmc'):
new_fname = 'PMC%s%d.nxml' % (pmid, mp.current_process().pid)
os.rename(cont_path, new_fname)
try:
sp = sparser.process_nxml_file(
new_fname,
outbuf=outbuf,
cleanup=cleanup
)
finally:
if cleanup and os.path.exists(new_fname):
os.remove(new_fname)
elif cont_path.endswith('.txt'):
content_str = ''
with open(cont_path, 'r') as f:
content_str = f.read()
sp = sparser.process_text(
content_str,
outbuf=outbuf,
cleanup=cleanup
)
signal.alarm(0)
except Exception as e:
logger.error('Failed to process data for %s.' % pmid)
logger.exception(e)
signal.alarm(0)
return
if sp is None:
logger.error('Failed to run sparser on pmid: %s.' % pmid)
return
# At this point, we rewrite the PMID in the Evidence of Sparser
# Statements according to the actual PMID that was read.
sp.set_statements_pmid(pmid)
s3_client.put_reader_output('sparser', sp.json_stmts, pmid,
sparser_version, source)
return sp.statements | python | def read_pmid(pmid, source, cont_path, sparser_version, outbuf=None,
cleanup=True):
"Run sparser on a single pmid."
signal.signal(signal.SIGALRM, _timeout_handler)
signal.alarm(60)
try:
if (source is 'content_not_found'
or source.startswith('unhandled_content_type')
or source.endswith('failure')):
logger.info('No content read for %s.' % pmid)
return # No real content here.
if cont_path.endswith('.nxml') and source.startswith('pmc'):
new_fname = 'PMC%s%d.nxml' % (pmid, mp.current_process().pid)
os.rename(cont_path, new_fname)
try:
sp = sparser.process_nxml_file(
new_fname,
outbuf=outbuf,
cleanup=cleanup
)
finally:
if cleanup and os.path.exists(new_fname):
os.remove(new_fname)
elif cont_path.endswith('.txt'):
content_str = ''
with open(cont_path, 'r') as f:
content_str = f.read()
sp = sparser.process_text(
content_str,
outbuf=outbuf,
cleanup=cleanup
)
signal.alarm(0)
except Exception as e:
logger.error('Failed to process data for %s.' % pmid)
logger.exception(e)
signal.alarm(0)
return
if sp is None:
logger.error('Failed to run sparser on pmid: %s.' % pmid)
return
# At this point, we rewrite the PMID in the Evidence of Sparser
# Statements according to the actual PMID that was read.
sp.set_statements_pmid(pmid)
s3_client.put_reader_output('sparser', sp.json_stmts, pmid,
sparser_version, source)
return sp.statements | [
"def",
"read_pmid",
"(",
"pmid",
",",
"source",
",",
"cont_path",
",",
"sparser_version",
",",
"outbuf",
"=",
"None",
",",
"cleanup",
"=",
"True",
")",
":",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGALRM",
",",
"_timeout_handler",
")",
"signal",
".",
"alarm",
"(",
"60",
")",
"try",
":",
"if",
"(",
"source",
"is",
"'content_not_found'",
"or",
"source",
".",
"startswith",
"(",
"'unhandled_content_type'",
")",
"or",
"source",
".",
"endswith",
"(",
"'failure'",
")",
")",
":",
"logger",
".",
"info",
"(",
"'No content read for %s.'",
"%",
"pmid",
")",
"return",
"# No real content here.",
"if",
"cont_path",
".",
"endswith",
"(",
"'.nxml'",
")",
"and",
"source",
".",
"startswith",
"(",
"'pmc'",
")",
":",
"new_fname",
"=",
"'PMC%s%d.nxml'",
"%",
"(",
"pmid",
",",
"mp",
".",
"current_process",
"(",
")",
".",
"pid",
")",
"os",
".",
"rename",
"(",
"cont_path",
",",
"new_fname",
")",
"try",
":",
"sp",
"=",
"sparser",
".",
"process_nxml_file",
"(",
"new_fname",
",",
"outbuf",
"=",
"outbuf",
",",
"cleanup",
"=",
"cleanup",
")",
"finally",
":",
"if",
"cleanup",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"new_fname",
")",
":",
"os",
".",
"remove",
"(",
"new_fname",
")",
"elif",
"cont_path",
".",
"endswith",
"(",
"'.txt'",
")",
":",
"content_str",
"=",
"''",
"with",
"open",
"(",
"cont_path",
",",
"'r'",
")",
"as",
"f",
":",
"content_str",
"=",
"f",
".",
"read",
"(",
")",
"sp",
"=",
"sparser",
".",
"process_text",
"(",
"content_str",
",",
"outbuf",
"=",
"outbuf",
",",
"cleanup",
"=",
"cleanup",
")",
"signal",
".",
"alarm",
"(",
"0",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"'Failed to process data for %s.'",
"%",
"pmid",
")",
"logger",
".",
"exception",
"(",
"e",
")",
"signal",
".",
"alarm",
"(",
"0",
")",
"return",
"if",
"sp",
"is",
"None",
":",
"logger",
".",
"error",
"(",
"'Failed to run sparser on pmid: %s.'",
"%",
"pmid",
")",
"return",
"# At this point, we rewrite the PMID in the Evidence of Sparser",
"# Statements according to the actual PMID that was read.",
"sp",
".",
"set_statements_pmid",
"(",
"pmid",
")",
"s3_client",
".",
"put_reader_output",
"(",
"'sparser'",
",",
"sp",
".",
"json_stmts",
",",
"pmid",
",",
"sparser_version",
",",
"source",
")",
"return",
"sp",
".",
"statements"
]
| Run sparser on a single pmid. | [
"Run",
"sparser",
"on",
"a",
"single",
"pmid",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/pmid_reading/read_pmids.py#L353-L402 | train |
sorgerlab/indra | indra/tools/reading/pmid_reading/read_pmids.py | get_stmts | def get_stmts(pmids_unread, cleanup=True, sparser_version=None):
"Run sparser on the pmids in pmids_unread."
if sparser_version is None:
sparser_version = sparser.get_version()
stmts = {}
now = datetime.now()
outbuf_fname = 'sparser_%s_%s.log' % (
now.strftime('%Y%m%d-%H%M%S'),
mp.current_process().pid,
)
outbuf = open(outbuf_fname, 'wb')
try:
for pmid, result in pmids_unread.items():
logger.info('Reading %s' % pmid)
source = result['content_source']
cont_path = result['content_path']
outbuf.write(('\nReading pmid %s from %s located at %s.\n' % (
pmid,
source,
cont_path
)).encode('utf-8'))
outbuf.flush()
some_stmts = read_pmid(pmid, source, cont_path, sparser_version,
outbuf, cleanup)
if some_stmts is not None:
stmts[pmid] = some_stmts
else:
continue # We didn't get any new statements.
except KeyboardInterrupt as e:
logger.exception(e)
logger.info('Caught keyboard interrupt...stopping. \n'
'Results so far will be pickled unless '
'Keyboard interupt is hit again.')
finally:
outbuf.close()
print("Sparser logs may be found in %s" % outbuf_fname)
return stmts | python | def get_stmts(pmids_unread, cleanup=True, sparser_version=None):
"Run sparser on the pmids in pmids_unread."
if sparser_version is None:
sparser_version = sparser.get_version()
stmts = {}
now = datetime.now()
outbuf_fname = 'sparser_%s_%s.log' % (
now.strftime('%Y%m%d-%H%M%S'),
mp.current_process().pid,
)
outbuf = open(outbuf_fname, 'wb')
try:
for pmid, result in pmids_unread.items():
logger.info('Reading %s' % pmid)
source = result['content_source']
cont_path = result['content_path']
outbuf.write(('\nReading pmid %s from %s located at %s.\n' % (
pmid,
source,
cont_path
)).encode('utf-8'))
outbuf.flush()
some_stmts = read_pmid(pmid, source, cont_path, sparser_version,
outbuf, cleanup)
if some_stmts is not None:
stmts[pmid] = some_stmts
else:
continue # We didn't get any new statements.
except KeyboardInterrupt as e:
logger.exception(e)
logger.info('Caught keyboard interrupt...stopping. \n'
'Results so far will be pickled unless '
'Keyboard interupt is hit again.')
finally:
outbuf.close()
print("Sparser logs may be found in %s" % outbuf_fname)
return stmts | [
"def",
"get_stmts",
"(",
"pmids_unread",
",",
"cleanup",
"=",
"True",
",",
"sparser_version",
"=",
"None",
")",
":",
"if",
"sparser_version",
"is",
"None",
":",
"sparser_version",
"=",
"sparser",
".",
"get_version",
"(",
")",
"stmts",
"=",
"{",
"}",
"now",
"=",
"datetime",
".",
"now",
"(",
")",
"outbuf_fname",
"=",
"'sparser_%s_%s.log'",
"%",
"(",
"now",
".",
"strftime",
"(",
"'%Y%m%d-%H%M%S'",
")",
",",
"mp",
".",
"current_process",
"(",
")",
".",
"pid",
",",
")",
"outbuf",
"=",
"open",
"(",
"outbuf_fname",
",",
"'wb'",
")",
"try",
":",
"for",
"pmid",
",",
"result",
"in",
"pmids_unread",
".",
"items",
"(",
")",
":",
"logger",
".",
"info",
"(",
"'Reading %s'",
"%",
"pmid",
")",
"source",
"=",
"result",
"[",
"'content_source'",
"]",
"cont_path",
"=",
"result",
"[",
"'content_path'",
"]",
"outbuf",
".",
"write",
"(",
"(",
"'\\nReading pmid %s from %s located at %s.\\n'",
"%",
"(",
"pmid",
",",
"source",
",",
"cont_path",
")",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"outbuf",
".",
"flush",
"(",
")",
"some_stmts",
"=",
"read_pmid",
"(",
"pmid",
",",
"source",
",",
"cont_path",
",",
"sparser_version",
",",
"outbuf",
",",
"cleanup",
")",
"if",
"some_stmts",
"is",
"not",
"None",
":",
"stmts",
"[",
"pmid",
"]",
"=",
"some_stmts",
"else",
":",
"continue",
"# We didn't get any new statements.",
"except",
"KeyboardInterrupt",
"as",
"e",
":",
"logger",
".",
"exception",
"(",
"e",
")",
"logger",
".",
"info",
"(",
"'Caught keyboard interrupt...stopping. \\n'",
"'Results so far will be pickled unless '",
"'Keyboard interupt is hit again.'",
")",
"finally",
":",
"outbuf",
".",
"close",
"(",
")",
"print",
"(",
"\"Sparser logs may be found in %s\"",
"%",
"outbuf_fname",
")",
"return",
"stmts"
]
| Run sparser on the pmids in pmids_unread. | [
"Run",
"sparser",
"on",
"the",
"pmids",
"in",
"pmids_unread",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/pmid_reading/read_pmids.py#L405-L441 | train |
sorgerlab/indra | indra/tools/reading/pmid_reading/read_pmids.py | run_sparser | def run_sparser(pmid_list, tmp_dir, num_cores, start_index, end_index,
force_read, force_fulltext, cleanup=True, verbose=True):
'Run the sparser reader on the pmids in pmid_list.'
reader_version = sparser.get_version()
_, _, _, pmids_read, pmids_unread, _ =\
get_content_to_read(
pmid_list, start_index, end_index, tmp_dir, num_cores,
force_fulltext, force_read, 'sparser', reader_version
)
logger.info('Adjusting num cores to length of pmid_list.')
num_cores = min(len(pmid_list), num_cores)
logger.info('Adjusted...')
if num_cores is 1:
stmts = get_stmts(pmids_unread, cleanup=cleanup)
stmts.update({pmid: get_stmts_from_cache(pmid)[pmid]
for pmid in pmids_read.keys()})
elif num_cores > 1:
logger.info("Starting a pool with %d cores." % num_cores)
pool = mp.Pool(num_cores)
pmids_to_read = list(pmids_unread.keys())
N = len(pmids_unread)
dn = int(N/num_cores)
logger.info("Breaking pmids into batches.")
batches = []
for i in range(num_cores):
batches.append({
k: pmids_unread[k]
for k in pmids_to_read[i*dn:min((i+1)*dn, N)]
})
get_stmts_func = functools.partial(
get_stmts,
cleanup=cleanup,
sparser_version=reader_version
)
logger.info("Mapping get_stmts onto pool.")
unread_res = pool.map(get_stmts_func, batches)
logger.info('len(unread_res)=%d' % len(unread_res))
read_res = pool.map(get_stmts_from_cache, pmids_read.keys())
logger.info('len(read_res)=%d' % len(read_res))
pool.close()
logger.info('Multiprocessing pool closed.')
pool.join()
logger.info('Multiprocessing pool joined.')
stmts = {
pmid: stmt_list for res_dict in unread_res + read_res
for pmid, stmt_list in res_dict.items()
}
logger.info('len(stmts)=%d' % len(stmts))
return (stmts, pmids_unread) | python | def run_sparser(pmid_list, tmp_dir, num_cores, start_index, end_index,
force_read, force_fulltext, cleanup=True, verbose=True):
'Run the sparser reader on the pmids in pmid_list.'
reader_version = sparser.get_version()
_, _, _, pmids_read, pmids_unread, _ =\
get_content_to_read(
pmid_list, start_index, end_index, tmp_dir, num_cores,
force_fulltext, force_read, 'sparser', reader_version
)
logger.info('Adjusting num cores to length of pmid_list.')
num_cores = min(len(pmid_list), num_cores)
logger.info('Adjusted...')
if num_cores is 1:
stmts = get_stmts(pmids_unread, cleanup=cleanup)
stmts.update({pmid: get_stmts_from_cache(pmid)[pmid]
for pmid in pmids_read.keys()})
elif num_cores > 1:
logger.info("Starting a pool with %d cores." % num_cores)
pool = mp.Pool(num_cores)
pmids_to_read = list(pmids_unread.keys())
N = len(pmids_unread)
dn = int(N/num_cores)
logger.info("Breaking pmids into batches.")
batches = []
for i in range(num_cores):
batches.append({
k: pmids_unread[k]
for k in pmids_to_read[i*dn:min((i+1)*dn, N)]
})
get_stmts_func = functools.partial(
get_stmts,
cleanup=cleanup,
sparser_version=reader_version
)
logger.info("Mapping get_stmts onto pool.")
unread_res = pool.map(get_stmts_func, batches)
logger.info('len(unread_res)=%d' % len(unread_res))
read_res = pool.map(get_stmts_from_cache, pmids_read.keys())
logger.info('len(read_res)=%d' % len(read_res))
pool.close()
logger.info('Multiprocessing pool closed.')
pool.join()
logger.info('Multiprocessing pool joined.')
stmts = {
pmid: stmt_list for res_dict in unread_res + read_res
for pmid, stmt_list in res_dict.items()
}
logger.info('len(stmts)=%d' % len(stmts))
return (stmts, pmids_unread) | [
"def",
"run_sparser",
"(",
"pmid_list",
",",
"tmp_dir",
",",
"num_cores",
",",
"start_index",
",",
"end_index",
",",
"force_read",
",",
"force_fulltext",
",",
"cleanup",
"=",
"True",
",",
"verbose",
"=",
"True",
")",
":",
"reader_version",
"=",
"sparser",
".",
"get_version",
"(",
")",
"_",
",",
"_",
",",
"_",
",",
"pmids_read",
",",
"pmids_unread",
",",
"_",
"=",
"get_content_to_read",
"(",
"pmid_list",
",",
"start_index",
",",
"end_index",
",",
"tmp_dir",
",",
"num_cores",
",",
"force_fulltext",
",",
"force_read",
",",
"'sparser'",
",",
"reader_version",
")",
"logger",
".",
"info",
"(",
"'Adjusting num cores to length of pmid_list.'",
")",
"num_cores",
"=",
"min",
"(",
"len",
"(",
"pmid_list",
")",
",",
"num_cores",
")",
"logger",
".",
"info",
"(",
"'Adjusted...'",
")",
"if",
"num_cores",
"is",
"1",
":",
"stmts",
"=",
"get_stmts",
"(",
"pmids_unread",
",",
"cleanup",
"=",
"cleanup",
")",
"stmts",
".",
"update",
"(",
"{",
"pmid",
":",
"get_stmts_from_cache",
"(",
"pmid",
")",
"[",
"pmid",
"]",
"for",
"pmid",
"in",
"pmids_read",
".",
"keys",
"(",
")",
"}",
")",
"elif",
"num_cores",
">",
"1",
":",
"logger",
".",
"info",
"(",
"\"Starting a pool with %d cores.\"",
"%",
"num_cores",
")",
"pool",
"=",
"mp",
".",
"Pool",
"(",
"num_cores",
")",
"pmids_to_read",
"=",
"list",
"(",
"pmids_unread",
".",
"keys",
"(",
")",
")",
"N",
"=",
"len",
"(",
"pmids_unread",
")",
"dn",
"=",
"int",
"(",
"N",
"/",
"num_cores",
")",
"logger",
".",
"info",
"(",
"\"Breaking pmids into batches.\"",
")",
"batches",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"num_cores",
")",
":",
"batches",
".",
"append",
"(",
"{",
"k",
":",
"pmids_unread",
"[",
"k",
"]",
"for",
"k",
"in",
"pmids_to_read",
"[",
"i",
"*",
"dn",
":",
"min",
"(",
"(",
"i",
"+",
"1",
")",
"*",
"dn",
",",
"N",
")",
"]",
"}",
")",
"get_stmts_func",
"=",
"functools",
".",
"partial",
"(",
"get_stmts",
",",
"cleanup",
"=",
"cleanup",
",",
"sparser_version",
"=",
"reader_version",
")",
"logger",
".",
"info",
"(",
"\"Mapping get_stmts onto pool.\"",
")",
"unread_res",
"=",
"pool",
".",
"map",
"(",
"get_stmts_func",
",",
"batches",
")",
"logger",
".",
"info",
"(",
"'len(unread_res)=%d'",
"%",
"len",
"(",
"unread_res",
")",
")",
"read_res",
"=",
"pool",
".",
"map",
"(",
"get_stmts_from_cache",
",",
"pmids_read",
".",
"keys",
"(",
")",
")",
"logger",
".",
"info",
"(",
"'len(read_res)=%d'",
"%",
"len",
"(",
"read_res",
")",
")",
"pool",
".",
"close",
"(",
")",
"logger",
".",
"info",
"(",
"'Multiprocessing pool closed.'",
")",
"pool",
".",
"join",
"(",
")",
"logger",
".",
"info",
"(",
"'Multiprocessing pool joined.'",
")",
"stmts",
"=",
"{",
"pmid",
":",
"stmt_list",
"for",
"res_dict",
"in",
"unread_res",
"+",
"read_res",
"for",
"pmid",
",",
"stmt_list",
"in",
"res_dict",
".",
"items",
"(",
")",
"}",
"logger",
".",
"info",
"(",
"'len(stmts)=%d'",
"%",
"len",
"(",
"stmts",
")",
")",
"return",
"(",
"stmts",
",",
"pmids_unread",
")"
]
| Run the sparser reader on the pmids in pmid_list. | [
"Run",
"the",
"sparser",
"reader",
"on",
"the",
"pmids",
"in",
"pmid_list",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/pmid_reading/read_pmids.py#L452-L502 | train |
sorgerlab/indra | indra/statements/statements.py | get_all_descendants | def get_all_descendants(parent):
"""Get all the descendants of a parent class, recursively."""
children = parent.__subclasses__()
descendants = children[:]
for child in children:
descendants += get_all_descendants(child)
return descendants | python | def get_all_descendants(parent):
"""Get all the descendants of a parent class, recursively."""
children = parent.__subclasses__()
descendants = children[:]
for child in children:
descendants += get_all_descendants(child)
return descendants | [
"def",
"get_all_descendants",
"(",
"parent",
")",
":",
"children",
"=",
"parent",
".",
"__subclasses__",
"(",
")",
"descendants",
"=",
"children",
"[",
":",
"]",
"for",
"child",
"in",
"children",
":",
"descendants",
"+=",
"get_all_descendants",
"(",
"child",
")",
"return",
"descendants"
]
| Get all the descendants of a parent class, recursively. | [
"Get",
"all",
"the",
"descendants",
"of",
"a",
"parent",
"class",
"recursively",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/statements/statements.py#L2454-L2460 | train |
sorgerlab/indra | indra/statements/statements.py | get_type_hierarchy | def get_type_hierarchy(s):
"""Get the sequence of parents from `s` to Statement.
Parameters
----------
s : a class or instance of a child of Statement
For example the statement `Phosphorylation(MEK(), ERK())` or just the
class `Phosphorylation`.
Returns
-------
parent_list : list[types]
A list of the types leading up to Statement.
Examples
--------
>> s = Phosphorylation(MAPK1(), Elk1())
>> get_type_hierarchy(s)
[Phosphorylation, AddModification, Modification, Statement]
>> get_type_hierarchy(AddModification)
[AddModification, Modification, Statement]
"""
tp = type(s) if not isinstance(s, type) else s
p_list = [tp]
for p in tp.__bases__:
if p is not Statement:
p_list.extend(get_type_hierarchy(p))
else:
p_list.append(p)
return p_list | python | def get_type_hierarchy(s):
"""Get the sequence of parents from `s` to Statement.
Parameters
----------
s : a class or instance of a child of Statement
For example the statement `Phosphorylation(MEK(), ERK())` or just the
class `Phosphorylation`.
Returns
-------
parent_list : list[types]
A list of the types leading up to Statement.
Examples
--------
>> s = Phosphorylation(MAPK1(), Elk1())
>> get_type_hierarchy(s)
[Phosphorylation, AddModification, Modification, Statement]
>> get_type_hierarchy(AddModification)
[AddModification, Modification, Statement]
"""
tp = type(s) if not isinstance(s, type) else s
p_list = [tp]
for p in tp.__bases__:
if p is not Statement:
p_list.extend(get_type_hierarchy(p))
else:
p_list.append(p)
return p_list | [
"def",
"get_type_hierarchy",
"(",
"s",
")",
":",
"tp",
"=",
"type",
"(",
"s",
")",
"if",
"not",
"isinstance",
"(",
"s",
",",
"type",
")",
"else",
"s",
"p_list",
"=",
"[",
"tp",
"]",
"for",
"p",
"in",
"tp",
".",
"__bases__",
":",
"if",
"p",
"is",
"not",
"Statement",
":",
"p_list",
".",
"extend",
"(",
"get_type_hierarchy",
"(",
"p",
")",
")",
"else",
":",
"p_list",
".",
"append",
"(",
"p",
")",
"return",
"p_list"
]
| Get the sequence of parents from `s` to Statement.
Parameters
----------
s : a class or instance of a child of Statement
For example the statement `Phosphorylation(MEK(), ERK())` or just the
class `Phosphorylation`.
Returns
-------
parent_list : list[types]
A list of the types leading up to Statement.
Examples
--------
>> s = Phosphorylation(MAPK1(), Elk1())
>> get_type_hierarchy(s)
[Phosphorylation, AddModification, Modification, Statement]
>> get_type_hierarchy(AddModification)
[AddModification, Modification, Statement] | [
"Get",
"the",
"sequence",
"of",
"parents",
"from",
"s",
"to",
"Statement",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/statements/statements.py#L2465-L2494 | train |
sorgerlab/indra | indra/statements/statements.py | get_statement_by_name | def get_statement_by_name(stmt_name):
"""Get a statement class given the name of the statement class."""
stmt_classes = get_all_descendants(Statement)
for stmt_class in stmt_classes:
if stmt_class.__name__.lower() == stmt_name.lower():
return stmt_class
raise NotAStatementName('\"%s\" is not recognized as a statement type!'
% stmt_name) | python | def get_statement_by_name(stmt_name):
"""Get a statement class given the name of the statement class."""
stmt_classes = get_all_descendants(Statement)
for stmt_class in stmt_classes:
if stmt_class.__name__.lower() == stmt_name.lower():
return stmt_class
raise NotAStatementName('\"%s\" is not recognized as a statement type!'
% stmt_name) | [
"def",
"get_statement_by_name",
"(",
"stmt_name",
")",
":",
"stmt_classes",
"=",
"get_all_descendants",
"(",
"Statement",
")",
"for",
"stmt_class",
"in",
"stmt_classes",
":",
"if",
"stmt_class",
".",
"__name__",
".",
"lower",
"(",
")",
"==",
"stmt_name",
".",
"lower",
"(",
")",
":",
"return",
"stmt_class",
"raise",
"NotAStatementName",
"(",
"'\\\"%s\\\" is not recognized as a statement type!'",
"%",
"stmt_name",
")"
]
| Get a statement class given the name of the statement class. | [
"Get",
"a",
"statement",
"class",
"given",
"the",
"name",
"of",
"the",
"statement",
"class",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/statements/statements.py#L2501-L2508 | train |
sorgerlab/indra | indra/statements/statements.py | get_unresolved_support_uuids | def get_unresolved_support_uuids(stmts):
"""Get uuids unresolved in support from stmts from stmts_from_json."""
return {s.uuid for stmt in stmts for s in stmt.supports + stmt.supported_by
if isinstance(s, Unresolved)} | python | def get_unresolved_support_uuids(stmts):
"""Get uuids unresolved in support from stmts from stmts_from_json."""
return {s.uuid for stmt in stmts for s in stmt.supports + stmt.supported_by
if isinstance(s, Unresolved)} | [
"def",
"get_unresolved_support_uuids",
"(",
"stmts",
")",
":",
"return",
"{",
"s",
".",
"uuid",
"for",
"stmt",
"in",
"stmts",
"for",
"s",
"in",
"stmt",
".",
"supports",
"+",
"stmt",
".",
"supported_by",
"if",
"isinstance",
"(",
"s",
",",
"Unresolved",
")",
"}"
]
| Get uuids unresolved in support from stmts from stmts_from_json. | [
"Get",
"uuids",
"unresolved",
"in",
"support",
"from",
"stmts",
"from",
"stmts_from_json",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/statements/statements.py#L2516-L2519 | train |
sorgerlab/indra | indra/statements/statements.py | stmt_type | def stmt_type(obj, mk=True):
"""Return standardized, backwards compatible object type String.
This is a temporary solution to make sure type comparisons and
matches keys of Statements and related classes are backwards
compatible.
"""
if isinstance(obj, Statement) and mk:
return type(obj)
else:
return type(obj).__name__ | python | def stmt_type(obj, mk=True):
"""Return standardized, backwards compatible object type String.
This is a temporary solution to make sure type comparisons and
matches keys of Statements and related classes are backwards
compatible.
"""
if isinstance(obj, Statement) and mk:
return type(obj)
else:
return type(obj).__name__ | [
"def",
"stmt_type",
"(",
"obj",
",",
"mk",
"=",
"True",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"Statement",
")",
"and",
"mk",
":",
"return",
"type",
"(",
"obj",
")",
"else",
":",
"return",
"type",
"(",
"obj",
")",
".",
"__name__"
]
| Return standardized, backwards compatible object type String.
This is a temporary solution to make sure type comparisons and
matches keys of Statements and related classes are backwards
compatible. | [
"Return",
"standardized",
"backwards",
"compatible",
"object",
"type",
"String",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/statements/statements.py#L2522-L2532 | train |
sorgerlab/indra | indra/statements/statements.py | Statement.get_hash | def get_hash(self, shallow=True, refresh=False):
"""Get a hash for this Statement.
There are two types of hash, "shallow" and "full". A shallow hash is
as unique as the information carried by the statement, i.e. it is a hash
of the `matches_key`. This means that differences in source, evidence,
and so on are not included. As such, it is a shorter hash (14 nibbles).
The odds of a collision among all the statements we expect to encounter
(well under 10^8) is ~10^-9 (1 in a billion). Checks for collisions can
be done by using the matches keys.
A full hash includes, in addition to the matches key, information from
the evidence of the statement. These hashes will be equal if the two
Statements came from the same sentences, extracted by the same reader,
from the same source. These hashes are correspondingly longer (16
nibbles). The odds of a collision for an expected less than 10^10
extractions is ~10^-9 (1 in a billion).
Note that a hash of the Python object will also include the `uuid`, so
it will always be unique for every object.
Parameters
----------
shallow : bool
Choose between the shallow and full hashes described above. Default
is true (e.g. a shallow hash).
refresh : bool
Used to get a new copy of the hash. Default is false, so the hash,
if it has been already created, will be read from the attribute.
This is primarily used for speed testing.
Returns
-------
hash : int
A long integer hash.
"""
if shallow:
if not hasattr(self, '_shallow_hash') or self._shallow_hash is None\
or refresh:
self._shallow_hash = make_hash(self.matches_key(), 14)
ret = self._shallow_hash
else:
if not hasattr(self, '_full_hash') or self._full_hash is None \
or refresh:
ev_mk_list = sorted([ev.matches_key() for ev in self.evidence])
self._full_hash = \
make_hash(self.matches_key() + str(ev_mk_list), 16)
ret = self._full_hash
return ret | python | def get_hash(self, shallow=True, refresh=False):
"""Get a hash for this Statement.
There are two types of hash, "shallow" and "full". A shallow hash is
as unique as the information carried by the statement, i.e. it is a hash
of the `matches_key`. This means that differences in source, evidence,
and so on are not included. As such, it is a shorter hash (14 nibbles).
The odds of a collision among all the statements we expect to encounter
(well under 10^8) is ~10^-9 (1 in a billion). Checks for collisions can
be done by using the matches keys.
A full hash includes, in addition to the matches key, information from
the evidence of the statement. These hashes will be equal if the two
Statements came from the same sentences, extracted by the same reader,
from the same source. These hashes are correspondingly longer (16
nibbles). The odds of a collision for an expected less than 10^10
extractions is ~10^-9 (1 in a billion).
Note that a hash of the Python object will also include the `uuid`, so
it will always be unique for every object.
Parameters
----------
shallow : bool
Choose between the shallow and full hashes described above. Default
is true (e.g. a shallow hash).
refresh : bool
Used to get a new copy of the hash. Default is false, so the hash,
if it has been already created, will be read from the attribute.
This is primarily used for speed testing.
Returns
-------
hash : int
A long integer hash.
"""
if shallow:
if not hasattr(self, '_shallow_hash') or self._shallow_hash is None\
or refresh:
self._shallow_hash = make_hash(self.matches_key(), 14)
ret = self._shallow_hash
else:
if not hasattr(self, '_full_hash') or self._full_hash is None \
or refresh:
ev_mk_list = sorted([ev.matches_key() for ev in self.evidence])
self._full_hash = \
make_hash(self.matches_key() + str(ev_mk_list), 16)
ret = self._full_hash
return ret | [
"def",
"get_hash",
"(",
"self",
",",
"shallow",
"=",
"True",
",",
"refresh",
"=",
"False",
")",
":",
"if",
"shallow",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_shallow_hash'",
")",
"or",
"self",
".",
"_shallow_hash",
"is",
"None",
"or",
"refresh",
":",
"self",
".",
"_shallow_hash",
"=",
"make_hash",
"(",
"self",
".",
"matches_key",
"(",
")",
",",
"14",
")",
"ret",
"=",
"self",
".",
"_shallow_hash",
"else",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_full_hash'",
")",
"or",
"self",
".",
"_full_hash",
"is",
"None",
"or",
"refresh",
":",
"ev_mk_list",
"=",
"sorted",
"(",
"[",
"ev",
".",
"matches_key",
"(",
")",
"for",
"ev",
"in",
"self",
".",
"evidence",
"]",
")",
"self",
".",
"_full_hash",
"=",
"make_hash",
"(",
"self",
".",
"matches_key",
"(",
")",
"+",
"str",
"(",
"ev_mk_list",
")",
",",
"16",
")",
"ret",
"=",
"self",
".",
"_full_hash",
"return",
"ret"
]
| Get a hash for this Statement.
There are two types of hash, "shallow" and "full". A shallow hash is
as unique as the information carried by the statement, i.e. it is a hash
of the `matches_key`. This means that differences in source, evidence,
and so on are not included. As such, it is a shorter hash (14 nibbles).
The odds of a collision among all the statements we expect to encounter
(well under 10^8) is ~10^-9 (1 in a billion). Checks for collisions can
be done by using the matches keys.
A full hash includes, in addition to the matches key, information from
the evidence of the statement. These hashes will be equal if the two
Statements came from the same sentences, extracted by the same reader,
from the same source. These hashes are correspondingly longer (16
nibbles). The odds of a collision for an expected less than 10^10
extractions is ~10^-9 (1 in a billion).
Note that a hash of the Python object will also include the `uuid`, so
it will always be unique for every object.
Parameters
----------
shallow : bool
Choose between the shallow and full hashes described above. Default
is true (e.g. a shallow hash).
refresh : bool
Used to get a new copy of the hash. Default is false, so the hash,
if it has been already created, will be read from the attribute.
This is primarily used for speed testing.
Returns
-------
hash : int
A long integer hash. | [
"Get",
"a",
"hash",
"for",
"this",
"Statement",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/statements/statements.py#L269-L317 | train |
sorgerlab/indra | indra/statements/statements.py | Statement._tag_evidence | def _tag_evidence(self):
"""Set all the Evidence stmt_tag to my deep matches-key hash."""
h = self.get_hash(shallow=False)
for ev in self.evidence:
ev.stmt_tag = h
return | python | def _tag_evidence(self):
"""Set all the Evidence stmt_tag to my deep matches-key hash."""
h = self.get_hash(shallow=False)
for ev in self.evidence:
ev.stmt_tag = h
return | [
"def",
"_tag_evidence",
"(",
"self",
")",
":",
"h",
"=",
"self",
".",
"get_hash",
"(",
"shallow",
"=",
"False",
")",
"for",
"ev",
"in",
"self",
".",
"evidence",
":",
"ev",
".",
"stmt_tag",
"=",
"h",
"return"
]
| Set all the Evidence stmt_tag to my deep matches-key hash. | [
"Set",
"all",
"the",
"Evidence",
"stmt_tag",
"to",
"my",
"deep",
"matches",
"-",
"key",
"hash",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/statements/statements.py#L319-L324 | train |
sorgerlab/indra | indra/statements/statements.py | Statement.agent_list | def agent_list(self, deep_sorted=False):
"""Get the canonicallized agent list."""
ag_list = []
for ag_name in self._agent_order:
ag_attr = getattr(self, ag_name)
if isinstance(ag_attr, Concept) or ag_attr is None:
ag_list.append(ag_attr)
elif isinstance(ag_attr, list):
if not all([isinstance(ag, Concept) for ag in ag_attr]):
raise TypeError("Expected all elements of list to be Agent "
"and/or Concept, but got: %s"
% {type(ag) for ag in ag_attr})
if deep_sorted:
ag_attr = sorted_agents(ag_attr)
ag_list.extend(ag_attr)
else:
raise TypeError("Expected type Agent, Concept, or list, got "
"type %s." % type(ag_attr))
return ag_list | python | def agent_list(self, deep_sorted=False):
"""Get the canonicallized agent list."""
ag_list = []
for ag_name in self._agent_order:
ag_attr = getattr(self, ag_name)
if isinstance(ag_attr, Concept) or ag_attr is None:
ag_list.append(ag_attr)
elif isinstance(ag_attr, list):
if not all([isinstance(ag, Concept) for ag in ag_attr]):
raise TypeError("Expected all elements of list to be Agent "
"and/or Concept, but got: %s"
% {type(ag) for ag in ag_attr})
if deep_sorted:
ag_attr = sorted_agents(ag_attr)
ag_list.extend(ag_attr)
else:
raise TypeError("Expected type Agent, Concept, or list, got "
"type %s." % type(ag_attr))
return ag_list | [
"def",
"agent_list",
"(",
"self",
",",
"deep_sorted",
"=",
"False",
")",
":",
"ag_list",
"=",
"[",
"]",
"for",
"ag_name",
"in",
"self",
".",
"_agent_order",
":",
"ag_attr",
"=",
"getattr",
"(",
"self",
",",
"ag_name",
")",
"if",
"isinstance",
"(",
"ag_attr",
",",
"Concept",
")",
"or",
"ag_attr",
"is",
"None",
":",
"ag_list",
".",
"append",
"(",
"ag_attr",
")",
"elif",
"isinstance",
"(",
"ag_attr",
",",
"list",
")",
":",
"if",
"not",
"all",
"(",
"[",
"isinstance",
"(",
"ag",
",",
"Concept",
")",
"for",
"ag",
"in",
"ag_attr",
"]",
")",
":",
"raise",
"TypeError",
"(",
"\"Expected all elements of list to be Agent \"",
"\"and/or Concept, but got: %s\"",
"%",
"{",
"type",
"(",
"ag",
")",
"for",
"ag",
"in",
"ag_attr",
"}",
")",
"if",
"deep_sorted",
":",
"ag_attr",
"=",
"sorted_agents",
"(",
"ag_attr",
")",
"ag_list",
".",
"extend",
"(",
"ag_attr",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Expected type Agent, Concept, or list, got \"",
"\"type %s.\"",
"%",
"type",
"(",
"ag_attr",
")",
")",
"return",
"ag_list"
]
| Get the canonicallized agent list. | [
"Get",
"the",
"canonicallized",
"agent",
"list",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/statements/statements.py#L336-L354 | train |
sorgerlab/indra | indra/statements/statements.py | Statement.to_json | def to_json(self, use_sbo=False):
"""Return serialized Statement as a JSON dict.
Parameters
----------
use_sbo : Optional[bool]
If True, SBO annotations are added to each applicable element of
the JSON. Default: False
Returns
-------
json_dict : dict
The JSON-serialized INDRA Statement.
"""
stmt_type = type(self).__name__
# Original comment: For backwards compatibility, could be removed later
all_stmts = [self] + self.supports + self.supported_by
for st in all_stmts:
if not hasattr(st, 'uuid'):
st.uuid = '%s' % uuid.uuid4()
##################
json_dict = _o(type=stmt_type)
json_dict['belief'] = self.belief
if self.evidence:
evidence = [ev.to_json() for ev in self.evidence]
json_dict['evidence'] = evidence
json_dict['id'] = '%s' % self.uuid
if self.supports:
json_dict['supports'] = \
['%s' % st.uuid for st in self.supports]
if self.supported_by:
json_dict['supported_by'] = \
['%s' % st.uuid for st in self.supported_by]
def get_sbo_term(cls):
sbo_term = stmt_sbo_map.get(cls.__name__.lower())
while not sbo_term:
cls = cls.__bases__[0]
sbo_term = stmt_sbo_map.get(cls.__name__.lower())
return sbo_term
if use_sbo:
sbo_term = get_sbo_term(self.__class__)
json_dict['sbo'] = \
'http://identifiers.org/sbo/SBO:%s' % sbo_term
return json_dict | python | def to_json(self, use_sbo=False):
"""Return serialized Statement as a JSON dict.
Parameters
----------
use_sbo : Optional[bool]
If True, SBO annotations are added to each applicable element of
the JSON. Default: False
Returns
-------
json_dict : dict
The JSON-serialized INDRA Statement.
"""
stmt_type = type(self).__name__
# Original comment: For backwards compatibility, could be removed later
all_stmts = [self] + self.supports + self.supported_by
for st in all_stmts:
if not hasattr(st, 'uuid'):
st.uuid = '%s' % uuid.uuid4()
##################
json_dict = _o(type=stmt_type)
json_dict['belief'] = self.belief
if self.evidence:
evidence = [ev.to_json() for ev in self.evidence]
json_dict['evidence'] = evidence
json_dict['id'] = '%s' % self.uuid
if self.supports:
json_dict['supports'] = \
['%s' % st.uuid for st in self.supports]
if self.supported_by:
json_dict['supported_by'] = \
['%s' % st.uuid for st in self.supported_by]
def get_sbo_term(cls):
sbo_term = stmt_sbo_map.get(cls.__name__.lower())
while not sbo_term:
cls = cls.__bases__[0]
sbo_term = stmt_sbo_map.get(cls.__name__.lower())
return sbo_term
if use_sbo:
sbo_term = get_sbo_term(self.__class__)
json_dict['sbo'] = \
'http://identifiers.org/sbo/SBO:%s' % sbo_term
return json_dict | [
"def",
"to_json",
"(",
"self",
",",
"use_sbo",
"=",
"False",
")",
":",
"stmt_type",
"=",
"type",
"(",
"self",
")",
".",
"__name__",
"# Original comment: For backwards compatibility, could be removed later",
"all_stmts",
"=",
"[",
"self",
"]",
"+",
"self",
".",
"supports",
"+",
"self",
".",
"supported_by",
"for",
"st",
"in",
"all_stmts",
":",
"if",
"not",
"hasattr",
"(",
"st",
",",
"'uuid'",
")",
":",
"st",
".",
"uuid",
"=",
"'%s'",
"%",
"uuid",
".",
"uuid4",
"(",
")",
"##################",
"json_dict",
"=",
"_o",
"(",
"type",
"=",
"stmt_type",
")",
"json_dict",
"[",
"'belief'",
"]",
"=",
"self",
".",
"belief",
"if",
"self",
".",
"evidence",
":",
"evidence",
"=",
"[",
"ev",
".",
"to_json",
"(",
")",
"for",
"ev",
"in",
"self",
".",
"evidence",
"]",
"json_dict",
"[",
"'evidence'",
"]",
"=",
"evidence",
"json_dict",
"[",
"'id'",
"]",
"=",
"'%s'",
"%",
"self",
".",
"uuid",
"if",
"self",
".",
"supports",
":",
"json_dict",
"[",
"'supports'",
"]",
"=",
"[",
"'%s'",
"%",
"st",
".",
"uuid",
"for",
"st",
"in",
"self",
".",
"supports",
"]",
"if",
"self",
".",
"supported_by",
":",
"json_dict",
"[",
"'supported_by'",
"]",
"=",
"[",
"'%s'",
"%",
"st",
".",
"uuid",
"for",
"st",
"in",
"self",
".",
"supported_by",
"]",
"def",
"get_sbo_term",
"(",
"cls",
")",
":",
"sbo_term",
"=",
"stmt_sbo_map",
".",
"get",
"(",
"cls",
".",
"__name__",
".",
"lower",
"(",
")",
")",
"while",
"not",
"sbo_term",
":",
"cls",
"=",
"cls",
".",
"__bases__",
"[",
"0",
"]",
"sbo_term",
"=",
"stmt_sbo_map",
".",
"get",
"(",
"cls",
".",
"__name__",
".",
"lower",
"(",
")",
")",
"return",
"sbo_term",
"if",
"use_sbo",
":",
"sbo_term",
"=",
"get_sbo_term",
"(",
"self",
".",
"__class__",
")",
"json_dict",
"[",
"'sbo'",
"]",
"=",
"'http://identifiers.org/sbo/SBO:%s'",
"%",
"sbo_term",
"return",
"json_dict"
]
| Return serialized Statement as a JSON dict.
Parameters
----------
use_sbo : Optional[bool]
If True, SBO annotations are added to each applicable element of
the JSON. Default: False
Returns
-------
json_dict : dict
The JSON-serialized INDRA Statement. | [
"Return",
"serialized",
"Statement",
"as",
"a",
"JSON",
"dict",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/statements/statements.py#L421-L466 | train |
sorgerlab/indra | indra/statements/statements.py | Statement.to_graph | def to_graph(self):
"""Return Statement as a networkx graph."""
def json_node(graph, element, prefix):
if not element:
return None
node_id = '|'.join(prefix)
if isinstance(element, list):
graph.add_node(node_id, label='')
# Enumerate children and add nodes and connect to anchor node
for i, sub_element in enumerate(element):
sub_id = json_node(graph, sub_element, prefix + ['%s' % i])
if sub_id:
graph.add_edge(node_id, sub_id, label='')
elif isinstance(element, dict):
graph.add_node(node_id, label='')
# Add node recursively for each element
# Connect to this node with edge label according to key
for k, v in element.items():
if k == 'id':
continue
elif k == 'name':
graph.node[node_id]['label'] = v
continue
elif k == 'type':
graph.node[node_id]['label'] = v
continue
sub_id = json_node(graph, v, prefix + ['%s' % k])
if sub_id:
graph.add_edge(node_id, sub_id, label=('%s' % k))
else:
if isinstance(element, basestring) and \
element.startswith('http'):
element = element.split('/')[-1]
graph.add_node(node_id, label=('%s' % str(element)))
return node_id
jd = self.to_json()
graph = networkx.DiGraph()
json_node(graph, jd, ['%s' % self.uuid])
return graph | python | def to_graph(self):
"""Return Statement as a networkx graph."""
def json_node(graph, element, prefix):
if not element:
return None
node_id = '|'.join(prefix)
if isinstance(element, list):
graph.add_node(node_id, label='')
# Enumerate children and add nodes and connect to anchor node
for i, sub_element in enumerate(element):
sub_id = json_node(graph, sub_element, prefix + ['%s' % i])
if sub_id:
graph.add_edge(node_id, sub_id, label='')
elif isinstance(element, dict):
graph.add_node(node_id, label='')
# Add node recursively for each element
# Connect to this node with edge label according to key
for k, v in element.items():
if k == 'id':
continue
elif k == 'name':
graph.node[node_id]['label'] = v
continue
elif k == 'type':
graph.node[node_id]['label'] = v
continue
sub_id = json_node(graph, v, prefix + ['%s' % k])
if sub_id:
graph.add_edge(node_id, sub_id, label=('%s' % k))
else:
if isinstance(element, basestring) and \
element.startswith('http'):
element = element.split('/')[-1]
graph.add_node(node_id, label=('%s' % str(element)))
return node_id
jd = self.to_json()
graph = networkx.DiGraph()
json_node(graph, jd, ['%s' % self.uuid])
return graph | [
"def",
"to_graph",
"(",
"self",
")",
":",
"def",
"json_node",
"(",
"graph",
",",
"element",
",",
"prefix",
")",
":",
"if",
"not",
"element",
":",
"return",
"None",
"node_id",
"=",
"'|'",
".",
"join",
"(",
"prefix",
")",
"if",
"isinstance",
"(",
"element",
",",
"list",
")",
":",
"graph",
".",
"add_node",
"(",
"node_id",
",",
"label",
"=",
"''",
")",
"# Enumerate children and add nodes and connect to anchor node",
"for",
"i",
",",
"sub_element",
"in",
"enumerate",
"(",
"element",
")",
":",
"sub_id",
"=",
"json_node",
"(",
"graph",
",",
"sub_element",
",",
"prefix",
"+",
"[",
"'%s'",
"%",
"i",
"]",
")",
"if",
"sub_id",
":",
"graph",
".",
"add_edge",
"(",
"node_id",
",",
"sub_id",
",",
"label",
"=",
"''",
")",
"elif",
"isinstance",
"(",
"element",
",",
"dict",
")",
":",
"graph",
".",
"add_node",
"(",
"node_id",
",",
"label",
"=",
"''",
")",
"# Add node recursively for each element",
"# Connect to this node with edge label according to key",
"for",
"k",
",",
"v",
"in",
"element",
".",
"items",
"(",
")",
":",
"if",
"k",
"==",
"'id'",
":",
"continue",
"elif",
"k",
"==",
"'name'",
":",
"graph",
".",
"node",
"[",
"node_id",
"]",
"[",
"'label'",
"]",
"=",
"v",
"continue",
"elif",
"k",
"==",
"'type'",
":",
"graph",
".",
"node",
"[",
"node_id",
"]",
"[",
"'label'",
"]",
"=",
"v",
"continue",
"sub_id",
"=",
"json_node",
"(",
"graph",
",",
"v",
",",
"prefix",
"+",
"[",
"'%s'",
"%",
"k",
"]",
")",
"if",
"sub_id",
":",
"graph",
".",
"add_edge",
"(",
"node_id",
",",
"sub_id",
",",
"label",
"=",
"(",
"'%s'",
"%",
"k",
")",
")",
"else",
":",
"if",
"isinstance",
"(",
"element",
",",
"basestring",
")",
"and",
"element",
".",
"startswith",
"(",
"'http'",
")",
":",
"element",
"=",
"element",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
"graph",
".",
"add_node",
"(",
"node_id",
",",
"label",
"=",
"(",
"'%s'",
"%",
"str",
"(",
"element",
")",
")",
")",
"return",
"node_id",
"jd",
"=",
"self",
".",
"to_json",
"(",
")",
"graph",
"=",
"networkx",
".",
"DiGraph",
"(",
")",
"json_node",
"(",
"graph",
",",
"jd",
",",
"[",
"'%s'",
"%",
"self",
".",
"uuid",
"]",
")",
"return",
"graph"
]
| Return Statement as a networkx graph. | [
"Return",
"Statement",
"as",
"a",
"networkx",
"graph",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/statements/statements.py#L484-L523 | train |
sorgerlab/indra | indra/statements/statements.py | Statement.make_generic_copy | def make_generic_copy(self, deeply=False):
"""Make a new matching Statement with no provenance.
All agents and other attributes besides evidence, belief, supports, and
supported_by will be copied over, and a new uuid will be assigned.
Thus, the new Statement will satisfy `new_stmt.matches(old_stmt)`.
If `deeply` is set to True, all the attributes will be deep-copied,
which is comparatively slow. Otherwise, attributes of this statement
may be altered by changes to the new matching statement.
"""
if deeply:
kwargs = deepcopy(self.__dict__)
else:
kwargs = self.__dict__.copy()
for attr in ['evidence', 'belief', 'uuid', 'supports', 'supported_by',
'is_activation']:
kwargs.pop(attr, None)
for attr in ['_full_hash', '_shallow_hash']:
my_hash = kwargs.pop(attr, None)
my_shallow_hash = kwargs.pop(attr, None)
for attr in self._agent_order:
attr_value = kwargs.get(attr)
if isinstance(attr_value, list):
kwargs[attr] = sorted_agents(attr_value)
new_instance = self.__class__(**kwargs)
new_instance._full_hash = my_hash
new_instance._shallow_hash = my_shallow_hash
return new_instance | python | def make_generic_copy(self, deeply=False):
"""Make a new matching Statement with no provenance.
All agents and other attributes besides evidence, belief, supports, and
supported_by will be copied over, and a new uuid will be assigned.
Thus, the new Statement will satisfy `new_stmt.matches(old_stmt)`.
If `deeply` is set to True, all the attributes will be deep-copied,
which is comparatively slow. Otherwise, attributes of this statement
may be altered by changes to the new matching statement.
"""
if deeply:
kwargs = deepcopy(self.__dict__)
else:
kwargs = self.__dict__.copy()
for attr in ['evidence', 'belief', 'uuid', 'supports', 'supported_by',
'is_activation']:
kwargs.pop(attr, None)
for attr in ['_full_hash', '_shallow_hash']:
my_hash = kwargs.pop(attr, None)
my_shallow_hash = kwargs.pop(attr, None)
for attr in self._agent_order:
attr_value = kwargs.get(attr)
if isinstance(attr_value, list):
kwargs[attr] = sorted_agents(attr_value)
new_instance = self.__class__(**kwargs)
new_instance._full_hash = my_hash
new_instance._shallow_hash = my_shallow_hash
return new_instance | [
"def",
"make_generic_copy",
"(",
"self",
",",
"deeply",
"=",
"False",
")",
":",
"if",
"deeply",
":",
"kwargs",
"=",
"deepcopy",
"(",
"self",
".",
"__dict__",
")",
"else",
":",
"kwargs",
"=",
"self",
".",
"__dict__",
".",
"copy",
"(",
")",
"for",
"attr",
"in",
"[",
"'evidence'",
",",
"'belief'",
",",
"'uuid'",
",",
"'supports'",
",",
"'supported_by'",
",",
"'is_activation'",
"]",
":",
"kwargs",
".",
"pop",
"(",
"attr",
",",
"None",
")",
"for",
"attr",
"in",
"[",
"'_full_hash'",
",",
"'_shallow_hash'",
"]",
":",
"my_hash",
"=",
"kwargs",
".",
"pop",
"(",
"attr",
",",
"None",
")",
"my_shallow_hash",
"=",
"kwargs",
".",
"pop",
"(",
"attr",
",",
"None",
")",
"for",
"attr",
"in",
"self",
".",
"_agent_order",
":",
"attr_value",
"=",
"kwargs",
".",
"get",
"(",
"attr",
")",
"if",
"isinstance",
"(",
"attr_value",
",",
"list",
")",
":",
"kwargs",
"[",
"attr",
"]",
"=",
"sorted_agents",
"(",
"attr_value",
")",
"new_instance",
"=",
"self",
".",
"__class__",
"(",
"*",
"*",
"kwargs",
")",
"new_instance",
".",
"_full_hash",
"=",
"my_hash",
"new_instance",
".",
"_shallow_hash",
"=",
"my_shallow_hash",
"return",
"new_instance"
]
| Make a new matching Statement with no provenance.
All agents and other attributes besides evidence, belief, supports, and
supported_by will be copied over, and a new uuid will be assigned.
Thus, the new Statement will satisfy `new_stmt.matches(old_stmt)`.
If `deeply` is set to True, all the attributes will be deep-copied,
which is comparatively slow. Otherwise, attributes of this statement
may be altered by changes to the new matching statement. | [
"Make",
"a",
"new",
"matching",
"Statement",
"with",
"no",
"provenance",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/statements/statements.py#L525-L553 | train |
sorgerlab/indra | indra/databases/lincs_client.py | load_lincs_csv | def load_lincs_csv(url):
"""Helper function to turn csv rows into dicts."""
resp = requests.get(url, params={'output_type': '.csv'}, timeout=120)
resp.raise_for_status()
if sys.version_info[0] < 3:
csv_io = BytesIO(resp.content)
else:
csv_io = StringIO(resp.text)
data_rows = list(read_unicode_csv_fileobj(csv_io, delimiter=','))
headers = data_rows[0]
return [{header: val for header, val in zip(headers, line_elements)}
for line_elements in data_rows[1:]] | python | def load_lincs_csv(url):
"""Helper function to turn csv rows into dicts."""
resp = requests.get(url, params={'output_type': '.csv'}, timeout=120)
resp.raise_for_status()
if sys.version_info[0] < 3:
csv_io = BytesIO(resp.content)
else:
csv_io = StringIO(resp.text)
data_rows = list(read_unicode_csv_fileobj(csv_io, delimiter=','))
headers = data_rows[0]
return [{header: val for header, val in zip(headers, line_elements)}
for line_elements in data_rows[1:]] | [
"def",
"load_lincs_csv",
"(",
"url",
")",
":",
"resp",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"params",
"=",
"{",
"'output_type'",
":",
"'.csv'",
"}",
",",
"timeout",
"=",
"120",
")",
"resp",
".",
"raise_for_status",
"(",
")",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
"<",
"3",
":",
"csv_io",
"=",
"BytesIO",
"(",
"resp",
".",
"content",
")",
"else",
":",
"csv_io",
"=",
"StringIO",
"(",
"resp",
".",
"text",
")",
"data_rows",
"=",
"list",
"(",
"read_unicode_csv_fileobj",
"(",
"csv_io",
",",
"delimiter",
"=",
"','",
")",
")",
"headers",
"=",
"data_rows",
"[",
"0",
"]",
"return",
"[",
"{",
"header",
":",
"val",
"for",
"header",
",",
"val",
"in",
"zip",
"(",
"headers",
",",
"line_elements",
")",
"}",
"for",
"line_elements",
"in",
"data_rows",
"[",
"1",
":",
"]",
"]"
]
| Helper function to turn csv rows into dicts. | [
"Helper",
"function",
"to",
"turn",
"csv",
"rows",
"into",
"dicts",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/lincs_client.py#L146-L157 | train |
sorgerlab/indra | indra/databases/lincs_client.py | LincsClient.get_small_molecule_name | def get_small_molecule_name(self, hms_lincs_id):
"""Get the name of a small molecule from the LINCS sm metadata.
Parameters
----------
hms_lincs_id : str
The HMS LINCS ID of the small molecule.
Returns
-------
str
The name of the small molecule.
"""
entry = self._get_entry_by_id(self._sm_data, hms_lincs_id)
if not entry:
return None
name = entry['Name']
return name | python | def get_small_molecule_name(self, hms_lincs_id):
"""Get the name of a small molecule from the LINCS sm metadata.
Parameters
----------
hms_lincs_id : str
The HMS LINCS ID of the small molecule.
Returns
-------
str
The name of the small molecule.
"""
entry = self._get_entry_by_id(self._sm_data, hms_lincs_id)
if not entry:
return None
name = entry['Name']
return name | [
"def",
"get_small_molecule_name",
"(",
"self",
",",
"hms_lincs_id",
")",
":",
"entry",
"=",
"self",
".",
"_get_entry_by_id",
"(",
"self",
".",
"_sm_data",
",",
"hms_lincs_id",
")",
"if",
"not",
"entry",
":",
"return",
"None",
"name",
"=",
"entry",
"[",
"'Name'",
"]",
"return",
"name"
]
| Get the name of a small molecule from the LINCS sm metadata.
Parameters
----------
hms_lincs_id : str
The HMS LINCS ID of the small molecule.
Returns
-------
str
The name of the small molecule. | [
"Get",
"the",
"name",
"of",
"a",
"small",
"molecule",
"from",
"the",
"LINCS",
"sm",
"metadata",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/lincs_client.py#L35-L52 | train |
sorgerlab/indra | indra/databases/lincs_client.py | LincsClient.get_small_molecule_refs | def get_small_molecule_refs(self, hms_lincs_id):
"""Get the id refs of a small molecule from the LINCS sm metadata.
Parameters
----------
hms_lincs_id : str
The HMS LINCS ID of the small molecule.
Returns
-------
dict
A dictionary of references.
"""
refs = {'HMS-LINCS': hms_lincs_id}
entry = self._get_entry_by_id(self._sm_data, hms_lincs_id)
# If there is no entry for this ID
if not entry:
return refs
# If there is an entry then fill up the refs with existing values
mappings = dict(chembl='ChEMBL ID', chebi='ChEBI ID',
pubchem='PubChem CID', lincs='LINCS ID')
for k, v in mappings.items():
if entry.get(v):
refs[k.upper()] = entry.get(v)
return refs | python | def get_small_molecule_refs(self, hms_lincs_id):
"""Get the id refs of a small molecule from the LINCS sm metadata.
Parameters
----------
hms_lincs_id : str
The HMS LINCS ID of the small molecule.
Returns
-------
dict
A dictionary of references.
"""
refs = {'HMS-LINCS': hms_lincs_id}
entry = self._get_entry_by_id(self._sm_data, hms_lincs_id)
# If there is no entry for this ID
if not entry:
return refs
# If there is an entry then fill up the refs with existing values
mappings = dict(chembl='ChEMBL ID', chebi='ChEBI ID',
pubchem='PubChem CID', lincs='LINCS ID')
for k, v in mappings.items():
if entry.get(v):
refs[k.upper()] = entry.get(v)
return refs | [
"def",
"get_small_molecule_refs",
"(",
"self",
",",
"hms_lincs_id",
")",
":",
"refs",
"=",
"{",
"'HMS-LINCS'",
":",
"hms_lincs_id",
"}",
"entry",
"=",
"self",
".",
"_get_entry_by_id",
"(",
"self",
".",
"_sm_data",
",",
"hms_lincs_id",
")",
"# If there is no entry for this ID",
"if",
"not",
"entry",
":",
"return",
"refs",
"# If there is an entry then fill up the refs with existing values",
"mappings",
"=",
"dict",
"(",
"chembl",
"=",
"'ChEMBL ID'",
",",
"chebi",
"=",
"'ChEBI ID'",
",",
"pubchem",
"=",
"'PubChem CID'",
",",
"lincs",
"=",
"'LINCS ID'",
")",
"for",
"k",
",",
"v",
"in",
"mappings",
".",
"items",
"(",
")",
":",
"if",
"entry",
".",
"get",
"(",
"v",
")",
":",
"refs",
"[",
"k",
".",
"upper",
"(",
")",
"]",
"=",
"entry",
".",
"get",
"(",
"v",
")",
"return",
"refs"
]
| Get the id refs of a small molecule from the LINCS sm metadata.
Parameters
----------
hms_lincs_id : str
The HMS LINCS ID of the small molecule.
Returns
-------
dict
A dictionary of references. | [
"Get",
"the",
"id",
"refs",
"of",
"a",
"small",
"molecule",
"from",
"the",
"LINCS",
"sm",
"metadata",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/lincs_client.py#L54-L80 | train |
sorgerlab/indra | indra/databases/lincs_client.py | LincsClient.get_protein_refs | def get_protein_refs(self, hms_lincs_id):
"""Get the refs for a protein from the LINCs protein metadata.
Parameters
----------
hms_lincs_id : str
The HMS LINCS ID for the protein
Returns
-------
dict
A dictionary of protein references.
"""
# TODO: We could get phosphorylation states from the protein data.
refs = {'HMS-LINCS': hms_lincs_id}
entry = self._get_entry_by_id(self._prot_data, hms_lincs_id)
# If there is no entry for this ID
if not entry:
return refs
mappings = dict(egid='Gene ID', up='UniProt ID')
for k, v in mappings.items():
if entry.get(v):
refs[k.upper()] = entry.get(v)
return refs | python | def get_protein_refs(self, hms_lincs_id):
"""Get the refs for a protein from the LINCs protein metadata.
Parameters
----------
hms_lincs_id : str
The HMS LINCS ID for the protein
Returns
-------
dict
A dictionary of protein references.
"""
# TODO: We could get phosphorylation states from the protein data.
refs = {'HMS-LINCS': hms_lincs_id}
entry = self._get_entry_by_id(self._prot_data, hms_lincs_id)
# If there is no entry for this ID
if not entry:
return refs
mappings = dict(egid='Gene ID', up='UniProt ID')
for k, v in mappings.items():
if entry.get(v):
refs[k.upper()] = entry.get(v)
return refs | [
"def",
"get_protein_refs",
"(",
"self",
",",
"hms_lincs_id",
")",
":",
"# TODO: We could get phosphorylation states from the protein data.",
"refs",
"=",
"{",
"'HMS-LINCS'",
":",
"hms_lincs_id",
"}",
"entry",
"=",
"self",
".",
"_get_entry_by_id",
"(",
"self",
".",
"_prot_data",
",",
"hms_lincs_id",
")",
"# If there is no entry for this ID",
"if",
"not",
"entry",
":",
"return",
"refs",
"mappings",
"=",
"dict",
"(",
"egid",
"=",
"'Gene ID'",
",",
"up",
"=",
"'UniProt ID'",
")",
"for",
"k",
",",
"v",
"in",
"mappings",
".",
"items",
"(",
")",
":",
"if",
"entry",
".",
"get",
"(",
"v",
")",
":",
"refs",
"[",
"k",
".",
"upper",
"(",
")",
"]",
"=",
"entry",
".",
"get",
"(",
"v",
")",
"return",
"refs"
]
| Get the refs for a protein from the LINCs protein metadata.
Parameters
----------
hms_lincs_id : str
The HMS LINCS ID for the protein
Returns
-------
dict
A dictionary of protein references. | [
"Get",
"the",
"refs",
"for",
"a",
"protein",
"from",
"the",
"LINCs",
"protein",
"metadata",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/lincs_client.py#L82-L106 | train |
sorgerlab/indra | indra/tools/gene_network.py | GeneNetwork.get_bel_stmts | def get_bel_stmts(self, filter=False):
"""Get relevant statements from the BEL large corpus.
Performs a series of neighborhood queries and then takes the union of
all the statements. Because the query process can take a long time for
large gene lists, the resulting list of statements are cached in a
pickle file with the filename `<basename>_bel_stmts.pkl`. If the
pickle file is present, it is used by default; if not present, the
queries are performed and the results are cached.
Parameters
----------
filter : bool
If True, includes only those statements that exclusively mention
genes in :py:attr:`gene_list`. Default is False. Note that the
full (unfiltered) set of statements are cached.
Returns
-------
list of :py:class:`indra.statements.Statement`
List of INDRA statements extracted from the BEL large corpus.
"""
if self.basename is not None:
bel_stmt_path = '%s_bel_stmts.pkl' % self.basename
# Check for cached BEL stmt file
if self.basename is not None and os.path.isfile(bel_stmt_path):
logger.info("Loading BEL statements from %s" % bel_stmt_path)
with open(bel_stmt_path, 'rb') as f:
bel_statements = pickle.load(f)
# No cache, so perform the queries
else:
bel_proc = bel.process_pybel_neighborhood(self.gene_list,
network_file=self.bel_corpus)
bel_statements = bel_proc.statements
# Save to pickle file if we're caching
if self.basename is not None:
with open(bel_stmt_path, 'wb') as f:
pickle.dump(bel_statements, f)
# Optionally filter out statements not involving only our gene set
if filter:
if len(self.gene_list) > 1:
bel_statements = ac.filter_gene_list(bel_statements,
self.gene_list, 'all')
return bel_statements | python | def get_bel_stmts(self, filter=False):
"""Get relevant statements from the BEL large corpus.
Performs a series of neighborhood queries and then takes the union of
all the statements. Because the query process can take a long time for
large gene lists, the resulting list of statements are cached in a
pickle file with the filename `<basename>_bel_stmts.pkl`. If the
pickle file is present, it is used by default; if not present, the
queries are performed and the results are cached.
Parameters
----------
filter : bool
If True, includes only those statements that exclusively mention
genes in :py:attr:`gene_list`. Default is False. Note that the
full (unfiltered) set of statements are cached.
Returns
-------
list of :py:class:`indra.statements.Statement`
List of INDRA statements extracted from the BEL large corpus.
"""
if self.basename is not None:
bel_stmt_path = '%s_bel_stmts.pkl' % self.basename
# Check for cached BEL stmt file
if self.basename is not None and os.path.isfile(bel_stmt_path):
logger.info("Loading BEL statements from %s" % bel_stmt_path)
with open(bel_stmt_path, 'rb') as f:
bel_statements = pickle.load(f)
# No cache, so perform the queries
else:
bel_proc = bel.process_pybel_neighborhood(self.gene_list,
network_file=self.bel_corpus)
bel_statements = bel_proc.statements
# Save to pickle file if we're caching
if self.basename is not None:
with open(bel_stmt_path, 'wb') as f:
pickle.dump(bel_statements, f)
# Optionally filter out statements not involving only our gene set
if filter:
if len(self.gene_list) > 1:
bel_statements = ac.filter_gene_list(bel_statements,
self.gene_list, 'all')
return bel_statements | [
"def",
"get_bel_stmts",
"(",
"self",
",",
"filter",
"=",
"False",
")",
":",
"if",
"self",
".",
"basename",
"is",
"not",
"None",
":",
"bel_stmt_path",
"=",
"'%s_bel_stmts.pkl'",
"%",
"self",
".",
"basename",
"# Check for cached BEL stmt file",
"if",
"self",
".",
"basename",
"is",
"not",
"None",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"bel_stmt_path",
")",
":",
"logger",
".",
"info",
"(",
"\"Loading BEL statements from %s\"",
"%",
"bel_stmt_path",
")",
"with",
"open",
"(",
"bel_stmt_path",
",",
"'rb'",
")",
"as",
"f",
":",
"bel_statements",
"=",
"pickle",
".",
"load",
"(",
"f",
")",
"# No cache, so perform the queries",
"else",
":",
"bel_proc",
"=",
"bel",
".",
"process_pybel_neighborhood",
"(",
"self",
".",
"gene_list",
",",
"network_file",
"=",
"self",
".",
"bel_corpus",
")",
"bel_statements",
"=",
"bel_proc",
".",
"statements",
"# Save to pickle file if we're caching",
"if",
"self",
".",
"basename",
"is",
"not",
"None",
":",
"with",
"open",
"(",
"bel_stmt_path",
",",
"'wb'",
")",
"as",
"f",
":",
"pickle",
".",
"dump",
"(",
"bel_statements",
",",
"f",
")",
"# Optionally filter out statements not involving only our gene set",
"if",
"filter",
":",
"if",
"len",
"(",
"self",
".",
"gene_list",
")",
">",
"1",
":",
"bel_statements",
"=",
"ac",
".",
"filter_gene_list",
"(",
"bel_statements",
",",
"self",
".",
"gene_list",
",",
"'all'",
")",
"return",
"bel_statements"
]
| Get relevant statements from the BEL large corpus.
Performs a series of neighborhood queries and then takes the union of
all the statements. Because the query process can take a long time for
large gene lists, the resulting list of statements are cached in a
pickle file with the filename `<basename>_bel_stmts.pkl`. If the
pickle file is present, it is used by default; if not present, the
queries are performed and the results are cached.
Parameters
----------
filter : bool
If True, includes only those statements that exclusively mention
genes in :py:attr:`gene_list`. Default is False. Note that the
full (unfiltered) set of statements are cached.
Returns
-------
list of :py:class:`indra.statements.Statement`
List of INDRA statements extracted from the BEL large corpus. | [
"Get",
"relevant",
"statements",
"from",
"the",
"BEL",
"large",
"corpus",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/gene_network.py#L51-L94 | train |
sorgerlab/indra | indra/tools/gene_network.py | GeneNetwork.get_biopax_stmts | def get_biopax_stmts(self, filter=False, query='pathsbetween',
database_filter=None):
"""Get relevant statements from Pathway Commons.
Performs a "paths between" query for the genes in :py:attr:`gene_list`
and uses the results to build statements. This function caches two
files: the list of statements built from the query, which is cached in
`<basename>_biopax_stmts.pkl`, and the OWL file returned by the Pathway
Commons Web API, which is cached in `<basename>_pc_pathsbetween.owl`.
If these cached files are found, then the results are returned based
on the cached file and Pathway Commons is not queried again.
Parameters
----------
filter : Optional[bool]
If True, includes only those statements that exclusively mention
genes in :py:attr:`gene_list`. Default is False.
query : Optional[str]
Defined what type of query is executed. The two options are
'pathsbetween' which finds paths between the given list of genes
and only works if more than 1 gene is given, and 'neighborhood'
which searches the immediate neighborhood of each given gene.
Note that for pathsbetween queries with more thatn 60 genes, the
query will be executed in multiple blocks for scalability.
database_filter: Optional[list[str]]
A list of PathwayCommons databases to include in the query.
Returns
-------
list of :py:class:`indra.statements.Statement`
List of INDRA statements extracted from Pathway Commons.
"""
# If we're using a cache, initialize the appropriate filenames
if self.basename is not None:
biopax_stmt_path = '%s_biopax_stmts.pkl' % self.basename
biopax_ras_owl_path = '%s_pc_pathsbetween.owl' % self.basename
# Check for cached Biopax stmt file at the given path
# if it's there, return the statements from the cache
if self.basename is not None and os.path.isfile(biopax_stmt_path):
logger.info("Loading Biopax statements from %s" % biopax_stmt_path)
with open(biopax_stmt_path, 'rb') as f:
bp_statements = pickle.load(f)
return bp_statements
# Check for cached file before querying Pathway Commons Web API
if self.basename is not None and os.path.isfile(biopax_ras_owl_path):
logger.info("Loading Biopax from OWL file %s" % biopax_ras_owl_path)
bp = biopax.process_owl(biopax_ras_owl_path)
# OWL file not found; do query and save to file
else:
if (len(self.gene_list) < 2) and (query == 'pathsbetween'):
logger.warning('Using neighborhood query for one gene.')
query = 'neighborhood'
if query == 'pathsbetween':
if len(self.gene_list) > 60:
block_size = 60
else:
block_size = None
bp = biopax.process_pc_pathsbetween(self.gene_list,
database_filter=database_filter,
block_size=block_size)
elif query == 'neighborhood':
bp = biopax.process_pc_neighborhood(self.gene_list,
database_filter=database_filter)
else:
logger.error('Invalid query type: %s' % query)
return []
# Save the file if we're caching
if self.basename is not None:
bp.save_model(biopax_ras_owl_path)
# Save statements to pickle file if we're caching
if self.basename is not None:
with open(biopax_stmt_path, 'wb') as f:
pickle.dump(bp.statements, f)
# Optionally filter out statements not involving only our gene set
if filter:
policy = 'one' if len(self.gene_list) > 1 else 'all'
stmts = ac.filter_gene_list(bp.statements, self.gene_list, policy)
else:
stmts = bp.statements
return stmts | python | def get_biopax_stmts(self, filter=False, query='pathsbetween',
database_filter=None):
"""Get relevant statements from Pathway Commons.
Performs a "paths between" query for the genes in :py:attr:`gene_list`
and uses the results to build statements. This function caches two
files: the list of statements built from the query, which is cached in
`<basename>_biopax_stmts.pkl`, and the OWL file returned by the Pathway
Commons Web API, which is cached in `<basename>_pc_pathsbetween.owl`.
If these cached files are found, then the results are returned based
on the cached file and Pathway Commons is not queried again.
Parameters
----------
filter : Optional[bool]
If True, includes only those statements that exclusively mention
genes in :py:attr:`gene_list`. Default is False.
query : Optional[str]
Defined what type of query is executed. The two options are
'pathsbetween' which finds paths between the given list of genes
and only works if more than 1 gene is given, and 'neighborhood'
which searches the immediate neighborhood of each given gene.
Note that for pathsbetween queries with more thatn 60 genes, the
query will be executed in multiple blocks for scalability.
database_filter: Optional[list[str]]
A list of PathwayCommons databases to include in the query.
Returns
-------
list of :py:class:`indra.statements.Statement`
List of INDRA statements extracted from Pathway Commons.
"""
# If we're using a cache, initialize the appropriate filenames
if self.basename is not None:
biopax_stmt_path = '%s_biopax_stmts.pkl' % self.basename
biopax_ras_owl_path = '%s_pc_pathsbetween.owl' % self.basename
# Check for cached Biopax stmt file at the given path
# if it's there, return the statements from the cache
if self.basename is not None and os.path.isfile(biopax_stmt_path):
logger.info("Loading Biopax statements from %s" % biopax_stmt_path)
with open(biopax_stmt_path, 'rb') as f:
bp_statements = pickle.load(f)
return bp_statements
# Check for cached file before querying Pathway Commons Web API
if self.basename is not None and os.path.isfile(biopax_ras_owl_path):
logger.info("Loading Biopax from OWL file %s" % biopax_ras_owl_path)
bp = biopax.process_owl(biopax_ras_owl_path)
# OWL file not found; do query and save to file
else:
if (len(self.gene_list) < 2) and (query == 'pathsbetween'):
logger.warning('Using neighborhood query for one gene.')
query = 'neighborhood'
if query == 'pathsbetween':
if len(self.gene_list) > 60:
block_size = 60
else:
block_size = None
bp = biopax.process_pc_pathsbetween(self.gene_list,
database_filter=database_filter,
block_size=block_size)
elif query == 'neighborhood':
bp = biopax.process_pc_neighborhood(self.gene_list,
database_filter=database_filter)
else:
logger.error('Invalid query type: %s' % query)
return []
# Save the file if we're caching
if self.basename is not None:
bp.save_model(biopax_ras_owl_path)
# Save statements to pickle file if we're caching
if self.basename is not None:
with open(biopax_stmt_path, 'wb') as f:
pickle.dump(bp.statements, f)
# Optionally filter out statements not involving only our gene set
if filter:
policy = 'one' if len(self.gene_list) > 1 else 'all'
stmts = ac.filter_gene_list(bp.statements, self.gene_list, policy)
else:
stmts = bp.statements
return stmts | [
"def",
"get_biopax_stmts",
"(",
"self",
",",
"filter",
"=",
"False",
",",
"query",
"=",
"'pathsbetween'",
",",
"database_filter",
"=",
"None",
")",
":",
"# If we're using a cache, initialize the appropriate filenames",
"if",
"self",
".",
"basename",
"is",
"not",
"None",
":",
"biopax_stmt_path",
"=",
"'%s_biopax_stmts.pkl'",
"%",
"self",
".",
"basename",
"biopax_ras_owl_path",
"=",
"'%s_pc_pathsbetween.owl'",
"%",
"self",
".",
"basename",
"# Check for cached Biopax stmt file at the given path",
"# if it's there, return the statements from the cache",
"if",
"self",
".",
"basename",
"is",
"not",
"None",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"biopax_stmt_path",
")",
":",
"logger",
".",
"info",
"(",
"\"Loading Biopax statements from %s\"",
"%",
"biopax_stmt_path",
")",
"with",
"open",
"(",
"biopax_stmt_path",
",",
"'rb'",
")",
"as",
"f",
":",
"bp_statements",
"=",
"pickle",
".",
"load",
"(",
"f",
")",
"return",
"bp_statements",
"# Check for cached file before querying Pathway Commons Web API",
"if",
"self",
".",
"basename",
"is",
"not",
"None",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"biopax_ras_owl_path",
")",
":",
"logger",
".",
"info",
"(",
"\"Loading Biopax from OWL file %s\"",
"%",
"biopax_ras_owl_path",
")",
"bp",
"=",
"biopax",
".",
"process_owl",
"(",
"biopax_ras_owl_path",
")",
"# OWL file not found; do query and save to file",
"else",
":",
"if",
"(",
"len",
"(",
"self",
".",
"gene_list",
")",
"<",
"2",
")",
"and",
"(",
"query",
"==",
"'pathsbetween'",
")",
":",
"logger",
".",
"warning",
"(",
"'Using neighborhood query for one gene.'",
")",
"query",
"=",
"'neighborhood'",
"if",
"query",
"==",
"'pathsbetween'",
":",
"if",
"len",
"(",
"self",
".",
"gene_list",
")",
">",
"60",
":",
"block_size",
"=",
"60",
"else",
":",
"block_size",
"=",
"None",
"bp",
"=",
"biopax",
".",
"process_pc_pathsbetween",
"(",
"self",
".",
"gene_list",
",",
"database_filter",
"=",
"database_filter",
",",
"block_size",
"=",
"block_size",
")",
"elif",
"query",
"==",
"'neighborhood'",
":",
"bp",
"=",
"biopax",
".",
"process_pc_neighborhood",
"(",
"self",
".",
"gene_list",
",",
"database_filter",
"=",
"database_filter",
")",
"else",
":",
"logger",
".",
"error",
"(",
"'Invalid query type: %s'",
"%",
"query",
")",
"return",
"[",
"]",
"# Save the file if we're caching",
"if",
"self",
".",
"basename",
"is",
"not",
"None",
":",
"bp",
".",
"save_model",
"(",
"biopax_ras_owl_path",
")",
"# Save statements to pickle file if we're caching",
"if",
"self",
".",
"basename",
"is",
"not",
"None",
":",
"with",
"open",
"(",
"biopax_stmt_path",
",",
"'wb'",
")",
"as",
"f",
":",
"pickle",
".",
"dump",
"(",
"bp",
".",
"statements",
",",
"f",
")",
"# Optionally filter out statements not involving only our gene set",
"if",
"filter",
":",
"policy",
"=",
"'one'",
"if",
"len",
"(",
"self",
".",
"gene_list",
")",
">",
"1",
"else",
"'all'",
"stmts",
"=",
"ac",
".",
"filter_gene_list",
"(",
"bp",
".",
"statements",
",",
"self",
".",
"gene_list",
",",
"policy",
")",
"else",
":",
"stmts",
"=",
"bp",
".",
"statements",
"return",
"stmts"
]
| Get relevant statements from Pathway Commons.
Performs a "paths between" query for the genes in :py:attr:`gene_list`
and uses the results to build statements. This function caches two
files: the list of statements built from the query, which is cached in
`<basename>_biopax_stmts.pkl`, and the OWL file returned by the Pathway
Commons Web API, which is cached in `<basename>_pc_pathsbetween.owl`.
If these cached files are found, then the results are returned based
on the cached file and Pathway Commons is not queried again.
Parameters
----------
filter : Optional[bool]
If True, includes only those statements that exclusively mention
genes in :py:attr:`gene_list`. Default is False.
query : Optional[str]
Defined what type of query is executed. The two options are
'pathsbetween' which finds paths between the given list of genes
and only works if more than 1 gene is given, and 'neighborhood'
which searches the immediate neighborhood of each given gene.
Note that for pathsbetween queries with more thatn 60 genes, the
query will be executed in multiple blocks for scalability.
database_filter: Optional[list[str]]
A list of PathwayCommons databases to include in the query.
Returns
-------
list of :py:class:`indra.statements.Statement`
List of INDRA statements extracted from Pathway Commons. | [
"Get",
"relevant",
"statements",
"from",
"Pathway",
"Commons",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/gene_network.py#L96-L175 | train |
sorgerlab/indra | indra/tools/gene_network.py | GeneNetwork.get_statements | def get_statements(self, filter=False):
"""Return the combined list of statements from BEL and Pathway Commons.
Internally calls :py:meth:`get_biopax_stmts` and
:py:meth:`get_bel_stmts`.
Parameters
----------
filter : bool
If True, includes only those statements that exclusively mention
genes in :py:attr:`gene_list`. Default is False.
Returns
-------
list of :py:class:`indra.statements.Statement`
List of INDRA statements extracted the BEL large corpus and Pathway
Commons.
"""
bp_stmts = self.get_biopax_stmts(filter=filter)
bel_stmts = self.get_bel_stmts(filter=filter)
return bp_stmts + bel_stmts | python | def get_statements(self, filter=False):
"""Return the combined list of statements from BEL and Pathway Commons.
Internally calls :py:meth:`get_biopax_stmts` and
:py:meth:`get_bel_stmts`.
Parameters
----------
filter : bool
If True, includes only those statements that exclusively mention
genes in :py:attr:`gene_list`. Default is False.
Returns
-------
list of :py:class:`indra.statements.Statement`
List of INDRA statements extracted the BEL large corpus and Pathway
Commons.
"""
bp_stmts = self.get_biopax_stmts(filter=filter)
bel_stmts = self.get_bel_stmts(filter=filter)
return bp_stmts + bel_stmts | [
"def",
"get_statements",
"(",
"self",
",",
"filter",
"=",
"False",
")",
":",
"bp_stmts",
"=",
"self",
".",
"get_biopax_stmts",
"(",
"filter",
"=",
"filter",
")",
"bel_stmts",
"=",
"self",
".",
"get_bel_stmts",
"(",
"filter",
"=",
"filter",
")",
"return",
"bp_stmts",
"+",
"bel_stmts"
]
| Return the combined list of statements from BEL and Pathway Commons.
Internally calls :py:meth:`get_biopax_stmts` and
:py:meth:`get_bel_stmts`.
Parameters
----------
filter : bool
If True, includes only those statements that exclusively mention
genes in :py:attr:`gene_list`. Default is False.
Returns
-------
list of :py:class:`indra.statements.Statement`
List of INDRA statements extracted the BEL large corpus and Pathway
Commons. | [
"Return",
"the",
"combined",
"list",
"of",
"statements",
"from",
"BEL",
"and",
"Pathway",
"Commons",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/gene_network.py#L177-L198 | train |
sorgerlab/indra | indra/tools/gene_network.py | GeneNetwork.run_preassembly | def run_preassembly(self, stmts, print_summary=True):
"""Run complete preassembly procedure on the given statements.
Results are returned as a dict and stored in the attribute
:py:attr:`results`. They are also saved in the pickle file
`<basename>_results.pkl`.
Parameters
----------
stmts : list of :py:class:`indra.statements.Statement`
Statements to preassemble.
print_summary : bool
If True (default), prints a summary of the preassembly process to
the console.
Returns
-------
dict
A dict containing the following entries:
- `raw`: the starting set of statements before preassembly.
- `duplicates1`: statements after initial de-duplication.
- `valid`: statements found to have valid modification sites.
- `mapped`: mapped statements (list of
:py:class:`indra.preassembler.sitemapper.MappedStatement`).
- `mapped_stmts`: combined list of valid statements and statements
after mapping.
- `duplicates2`: statements resulting from de-duplication of the
statements in `mapped_stmts`.
- `related2`: top-level statements after combining the statements
in `duplicates2`.
"""
# First round of preassembly: remove duplicates before sitemapping
pa1 = Preassembler(hierarchies, stmts)
logger.info("Combining duplicates")
pa1.combine_duplicates()
# Map sites
logger.info("Mapping sites")
(valid, mapped) = sm.map_sites(pa1.unique_stmts)
# Combine valid and successfully mapped statements into single list
correctly_mapped_stmts = []
for ms in mapped:
if all([True if mm[1] is not None else False
for mm in ms.mapped_mods]):
correctly_mapped_stmts.append(ms.mapped_stmt)
mapped_stmts = valid + correctly_mapped_stmts
# Second round of preassembly: de-duplicate and combine related
pa2 = Preassembler(hierarchies, mapped_stmts)
logger.info("Combining duplicates again")
pa2.combine_duplicates()
pa2.combine_related()
# Fill out the results dict
self.results = {}
self.results['raw'] = stmts
self.results['duplicates1'] = pa1.unique_stmts
self.results['valid'] = valid
self.results['mapped'] = mapped
self.results['mapped_stmts'] = mapped_stmts
self.results['duplicates2'] = pa2.unique_stmts
self.results['related2'] = pa2.related_stmts
# Print summary
if print_summary:
logger.info("\nStarting number of statements: %d" % len(stmts))
logger.info("After duplicate removal: %d" % len(pa1.unique_stmts))
logger.info("Unique statements with valid sites: %d" % len(valid))
logger.info("Unique statements with invalid sites: %d" %
len(mapped))
logger.info("After post-mapping duplicate removal: %d" %
len(pa2.unique_stmts))
logger.info("After combining related statements: %d" %
len(pa2.related_stmts))
# Save the results if we're caching
if self.basename is not None:
results_filename = '%s_results.pkl' % self.basename
with open(results_filename, 'wb') as f:
pickle.dump(self.results, f)
return self.results | python | def run_preassembly(self, stmts, print_summary=True):
"""Run complete preassembly procedure on the given statements.
Results are returned as a dict and stored in the attribute
:py:attr:`results`. They are also saved in the pickle file
`<basename>_results.pkl`.
Parameters
----------
stmts : list of :py:class:`indra.statements.Statement`
Statements to preassemble.
print_summary : bool
If True (default), prints a summary of the preassembly process to
the console.
Returns
-------
dict
A dict containing the following entries:
- `raw`: the starting set of statements before preassembly.
- `duplicates1`: statements after initial de-duplication.
- `valid`: statements found to have valid modification sites.
- `mapped`: mapped statements (list of
:py:class:`indra.preassembler.sitemapper.MappedStatement`).
- `mapped_stmts`: combined list of valid statements and statements
after mapping.
- `duplicates2`: statements resulting from de-duplication of the
statements in `mapped_stmts`.
- `related2`: top-level statements after combining the statements
in `duplicates2`.
"""
# First round of preassembly: remove duplicates before sitemapping
pa1 = Preassembler(hierarchies, stmts)
logger.info("Combining duplicates")
pa1.combine_duplicates()
# Map sites
logger.info("Mapping sites")
(valid, mapped) = sm.map_sites(pa1.unique_stmts)
# Combine valid and successfully mapped statements into single list
correctly_mapped_stmts = []
for ms in mapped:
if all([True if mm[1] is not None else False
for mm in ms.mapped_mods]):
correctly_mapped_stmts.append(ms.mapped_stmt)
mapped_stmts = valid + correctly_mapped_stmts
# Second round of preassembly: de-duplicate and combine related
pa2 = Preassembler(hierarchies, mapped_stmts)
logger.info("Combining duplicates again")
pa2.combine_duplicates()
pa2.combine_related()
# Fill out the results dict
self.results = {}
self.results['raw'] = stmts
self.results['duplicates1'] = pa1.unique_stmts
self.results['valid'] = valid
self.results['mapped'] = mapped
self.results['mapped_stmts'] = mapped_stmts
self.results['duplicates2'] = pa2.unique_stmts
self.results['related2'] = pa2.related_stmts
# Print summary
if print_summary:
logger.info("\nStarting number of statements: %d" % len(stmts))
logger.info("After duplicate removal: %d" % len(pa1.unique_stmts))
logger.info("Unique statements with valid sites: %d" % len(valid))
logger.info("Unique statements with invalid sites: %d" %
len(mapped))
logger.info("After post-mapping duplicate removal: %d" %
len(pa2.unique_stmts))
logger.info("After combining related statements: %d" %
len(pa2.related_stmts))
# Save the results if we're caching
if self.basename is not None:
results_filename = '%s_results.pkl' % self.basename
with open(results_filename, 'wb') as f:
pickle.dump(self.results, f)
return self.results | [
"def",
"run_preassembly",
"(",
"self",
",",
"stmts",
",",
"print_summary",
"=",
"True",
")",
":",
"# First round of preassembly: remove duplicates before sitemapping",
"pa1",
"=",
"Preassembler",
"(",
"hierarchies",
",",
"stmts",
")",
"logger",
".",
"info",
"(",
"\"Combining duplicates\"",
")",
"pa1",
".",
"combine_duplicates",
"(",
")",
"# Map sites",
"logger",
".",
"info",
"(",
"\"Mapping sites\"",
")",
"(",
"valid",
",",
"mapped",
")",
"=",
"sm",
".",
"map_sites",
"(",
"pa1",
".",
"unique_stmts",
")",
"# Combine valid and successfully mapped statements into single list",
"correctly_mapped_stmts",
"=",
"[",
"]",
"for",
"ms",
"in",
"mapped",
":",
"if",
"all",
"(",
"[",
"True",
"if",
"mm",
"[",
"1",
"]",
"is",
"not",
"None",
"else",
"False",
"for",
"mm",
"in",
"ms",
".",
"mapped_mods",
"]",
")",
":",
"correctly_mapped_stmts",
".",
"append",
"(",
"ms",
".",
"mapped_stmt",
")",
"mapped_stmts",
"=",
"valid",
"+",
"correctly_mapped_stmts",
"# Second round of preassembly: de-duplicate and combine related",
"pa2",
"=",
"Preassembler",
"(",
"hierarchies",
",",
"mapped_stmts",
")",
"logger",
".",
"info",
"(",
"\"Combining duplicates again\"",
")",
"pa2",
".",
"combine_duplicates",
"(",
")",
"pa2",
".",
"combine_related",
"(",
")",
"# Fill out the results dict",
"self",
".",
"results",
"=",
"{",
"}",
"self",
".",
"results",
"[",
"'raw'",
"]",
"=",
"stmts",
"self",
".",
"results",
"[",
"'duplicates1'",
"]",
"=",
"pa1",
".",
"unique_stmts",
"self",
".",
"results",
"[",
"'valid'",
"]",
"=",
"valid",
"self",
".",
"results",
"[",
"'mapped'",
"]",
"=",
"mapped",
"self",
".",
"results",
"[",
"'mapped_stmts'",
"]",
"=",
"mapped_stmts",
"self",
".",
"results",
"[",
"'duplicates2'",
"]",
"=",
"pa2",
".",
"unique_stmts",
"self",
".",
"results",
"[",
"'related2'",
"]",
"=",
"pa2",
".",
"related_stmts",
"# Print summary",
"if",
"print_summary",
":",
"logger",
".",
"info",
"(",
"\"\\nStarting number of statements: %d\"",
"%",
"len",
"(",
"stmts",
")",
")",
"logger",
".",
"info",
"(",
"\"After duplicate removal: %d\"",
"%",
"len",
"(",
"pa1",
".",
"unique_stmts",
")",
")",
"logger",
".",
"info",
"(",
"\"Unique statements with valid sites: %d\"",
"%",
"len",
"(",
"valid",
")",
")",
"logger",
".",
"info",
"(",
"\"Unique statements with invalid sites: %d\"",
"%",
"len",
"(",
"mapped",
")",
")",
"logger",
".",
"info",
"(",
"\"After post-mapping duplicate removal: %d\"",
"%",
"len",
"(",
"pa2",
".",
"unique_stmts",
")",
")",
"logger",
".",
"info",
"(",
"\"After combining related statements: %d\"",
"%",
"len",
"(",
"pa2",
".",
"related_stmts",
")",
")",
"# Save the results if we're caching",
"if",
"self",
".",
"basename",
"is",
"not",
"None",
":",
"results_filename",
"=",
"'%s_results.pkl'",
"%",
"self",
".",
"basename",
"with",
"open",
"(",
"results_filename",
",",
"'wb'",
")",
"as",
"f",
":",
"pickle",
".",
"dump",
"(",
"self",
".",
"results",
",",
"f",
")",
"return",
"self",
".",
"results"
]
| Run complete preassembly procedure on the given statements.
Results are returned as a dict and stored in the attribute
:py:attr:`results`. They are also saved in the pickle file
`<basename>_results.pkl`.
Parameters
----------
stmts : list of :py:class:`indra.statements.Statement`
Statements to preassemble.
print_summary : bool
If True (default), prints a summary of the preassembly process to
the console.
Returns
-------
dict
A dict containing the following entries:
- `raw`: the starting set of statements before preassembly.
- `duplicates1`: statements after initial de-duplication.
- `valid`: statements found to have valid modification sites.
- `mapped`: mapped statements (list of
:py:class:`indra.preassembler.sitemapper.MappedStatement`).
- `mapped_stmts`: combined list of valid statements and statements
after mapping.
- `duplicates2`: statements resulting from de-duplication of the
statements in `mapped_stmts`.
- `related2`: top-level statements after combining the statements
in `duplicates2`. | [
"Run",
"complete",
"preassembly",
"procedure",
"on",
"the",
"given",
"statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/gene_network.py#L200-L276 | train |
sorgerlab/indra | indra/sources/hume/processor.py | _get_grounding | def _get_grounding(entity):
"""Return Hume grounding."""
db_refs = {'TEXT': entity['text']}
groundings = entity.get('grounding')
if not groundings:
return db_refs
def get_ont_concept(concept):
"""Strip slash, replace spaces and remove example leafs."""
# In the WM context, groundings have no URL prefix and start with /
# The following block does some special handling of these groundings.
if concept.startswith('/'):
concept = concept[1:]
concept = concept.replace(' ', '_')
# We eliminate any entries that aren't ontology categories
# these are typically "examples" corresponding to the category
while concept not in hume_onto_entries:
parts = concept.split('/')
if len(parts) == 1:
break
concept = '/'.join(parts[:-1])
# Otherwise we just return the concept as is
return concept
# Basic collection of grounding entries
raw_grounding_entries = [(get_ont_concept(g['ontologyConcept']),
g['value']) for g in groundings]
# Occasionally we get duplicate grounding entries, we want to
# eliminate those here
grounding_dict = {}
for cat, score in raw_grounding_entries:
if (cat not in grounding_dict) or (score > grounding_dict[cat]):
grounding_dict[cat] = score
# Then we sort the list in reverse order according to score
# Sometimes the exact same score appears multiple times, in this
# case we prioritize by the "depth" of the grounding which is
# obtained by looking at the number of /-s in the entry.
# However, there are still cases where the grounding depth and the score
# are the same. In these cases we just sort alphabetically.
grounding_entries = sorted(list(set(grounding_dict.items())),
key=lambda x: (x[1], x[0].count('/'), x[0]),
reverse=True)
# We could get an empty list here in which case we don't add the
# grounding
if grounding_entries:
db_refs['HUME'] = grounding_entries
return db_refs | python | def _get_grounding(entity):
"""Return Hume grounding."""
db_refs = {'TEXT': entity['text']}
groundings = entity.get('grounding')
if not groundings:
return db_refs
def get_ont_concept(concept):
"""Strip slash, replace spaces and remove example leafs."""
# In the WM context, groundings have no URL prefix and start with /
# The following block does some special handling of these groundings.
if concept.startswith('/'):
concept = concept[1:]
concept = concept.replace(' ', '_')
# We eliminate any entries that aren't ontology categories
# these are typically "examples" corresponding to the category
while concept not in hume_onto_entries:
parts = concept.split('/')
if len(parts) == 1:
break
concept = '/'.join(parts[:-1])
# Otherwise we just return the concept as is
return concept
# Basic collection of grounding entries
raw_grounding_entries = [(get_ont_concept(g['ontologyConcept']),
g['value']) for g in groundings]
# Occasionally we get duplicate grounding entries, we want to
# eliminate those here
grounding_dict = {}
for cat, score in raw_grounding_entries:
if (cat not in grounding_dict) or (score > grounding_dict[cat]):
grounding_dict[cat] = score
# Then we sort the list in reverse order according to score
# Sometimes the exact same score appears multiple times, in this
# case we prioritize by the "depth" of the grounding which is
# obtained by looking at the number of /-s in the entry.
# However, there are still cases where the grounding depth and the score
# are the same. In these cases we just sort alphabetically.
grounding_entries = sorted(list(set(grounding_dict.items())),
key=lambda x: (x[1], x[0].count('/'), x[0]),
reverse=True)
# We could get an empty list here in which case we don't add the
# grounding
if grounding_entries:
db_refs['HUME'] = grounding_entries
return db_refs | [
"def",
"_get_grounding",
"(",
"entity",
")",
":",
"db_refs",
"=",
"{",
"'TEXT'",
":",
"entity",
"[",
"'text'",
"]",
"}",
"groundings",
"=",
"entity",
".",
"get",
"(",
"'grounding'",
")",
"if",
"not",
"groundings",
":",
"return",
"db_refs",
"def",
"get_ont_concept",
"(",
"concept",
")",
":",
"\"\"\"Strip slash, replace spaces and remove example leafs.\"\"\"",
"# In the WM context, groundings have no URL prefix and start with /",
"# The following block does some special handling of these groundings.",
"if",
"concept",
".",
"startswith",
"(",
"'/'",
")",
":",
"concept",
"=",
"concept",
"[",
"1",
":",
"]",
"concept",
"=",
"concept",
".",
"replace",
"(",
"' '",
",",
"'_'",
")",
"# We eliminate any entries that aren't ontology categories",
"# these are typically \"examples\" corresponding to the category",
"while",
"concept",
"not",
"in",
"hume_onto_entries",
":",
"parts",
"=",
"concept",
".",
"split",
"(",
"'/'",
")",
"if",
"len",
"(",
"parts",
")",
"==",
"1",
":",
"break",
"concept",
"=",
"'/'",
".",
"join",
"(",
"parts",
"[",
":",
"-",
"1",
"]",
")",
"# Otherwise we just return the concept as is",
"return",
"concept",
"# Basic collection of grounding entries",
"raw_grounding_entries",
"=",
"[",
"(",
"get_ont_concept",
"(",
"g",
"[",
"'ontologyConcept'",
"]",
")",
",",
"g",
"[",
"'value'",
"]",
")",
"for",
"g",
"in",
"groundings",
"]",
"# Occasionally we get duplicate grounding entries, we want to",
"# eliminate those here",
"grounding_dict",
"=",
"{",
"}",
"for",
"cat",
",",
"score",
"in",
"raw_grounding_entries",
":",
"if",
"(",
"cat",
"not",
"in",
"grounding_dict",
")",
"or",
"(",
"score",
">",
"grounding_dict",
"[",
"cat",
"]",
")",
":",
"grounding_dict",
"[",
"cat",
"]",
"=",
"score",
"# Then we sort the list in reverse order according to score",
"# Sometimes the exact same score appears multiple times, in this",
"# case we prioritize by the \"depth\" of the grounding which is",
"# obtained by looking at the number of /-s in the entry.",
"# However, there are still cases where the grounding depth and the score",
"# are the same. In these cases we just sort alphabetically.",
"grounding_entries",
"=",
"sorted",
"(",
"list",
"(",
"set",
"(",
"grounding_dict",
".",
"items",
"(",
")",
")",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"(",
"x",
"[",
"1",
"]",
",",
"x",
"[",
"0",
"]",
".",
"count",
"(",
"'/'",
")",
",",
"x",
"[",
"0",
"]",
")",
",",
"reverse",
"=",
"True",
")",
"# We could get an empty list here in which case we don't add the",
"# grounding",
"if",
"grounding_entries",
":",
"db_refs",
"[",
"'HUME'",
"]",
"=",
"grounding_entries",
"return",
"db_refs"
]
| Return Hume grounding. | [
"Return",
"Hume",
"grounding",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/hume/processor.py#L230-L277 | train |
sorgerlab/indra | indra/sources/hume/processor.py | HumeJsonLdProcessor._find_relations | def _find_relations(self):
"""Find all relevant relation elements and return them in a list."""
# Get all extractions
extractions = \
list(self.tree.execute("$.extractions[(@.@type is 'Extraction')]"))
# Get relations from extractions
relations = []
for e in extractions:
label_set = set(e.get('labels', []))
# If this is a DirectedRelation
if 'DirectedRelation' in label_set:
self.relation_dict[e['@id']] = e
subtype = e.get('subtype')
if any(t in subtype for t in polarities.keys()):
relations.append((subtype, e))
# If this is an Event or an Entity
if {'Event', 'Entity'} & label_set:
self.concept_dict[e['@id']] = e
if not relations and not self.relation_dict:
logger.info("No relations found.")
else:
logger.info('%d relations of types %s found'
% (len(relations), ', '.join(polarities.keys())))
logger.info('%d relations in dict.' % len(self.relation_dict))
logger.info('%d concepts found.' % len(self.concept_dict))
return relations | python | def _find_relations(self):
"""Find all relevant relation elements and return them in a list."""
# Get all extractions
extractions = \
list(self.tree.execute("$.extractions[(@.@type is 'Extraction')]"))
# Get relations from extractions
relations = []
for e in extractions:
label_set = set(e.get('labels', []))
# If this is a DirectedRelation
if 'DirectedRelation' in label_set:
self.relation_dict[e['@id']] = e
subtype = e.get('subtype')
if any(t in subtype for t in polarities.keys()):
relations.append((subtype, e))
# If this is an Event or an Entity
if {'Event', 'Entity'} & label_set:
self.concept_dict[e['@id']] = e
if not relations and not self.relation_dict:
logger.info("No relations found.")
else:
logger.info('%d relations of types %s found'
% (len(relations), ', '.join(polarities.keys())))
logger.info('%d relations in dict.' % len(self.relation_dict))
logger.info('%d concepts found.' % len(self.concept_dict))
return relations | [
"def",
"_find_relations",
"(",
"self",
")",
":",
"# Get all extractions",
"extractions",
"=",
"list",
"(",
"self",
".",
"tree",
".",
"execute",
"(",
"\"$.extractions[(@.@type is 'Extraction')]\"",
")",
")",
"# Get relations from extractions",
"relations",
"=",
"[",
"]",
"for",
"e",
"in",
"extractions",
":",
"label_set",
"=",
"set",
"(",
"e",
".",
"get",
"(",
"'labels'",
",",
"[",
"]",
")",
")",
"# If this is a DirectedRelation",
"if",
"'DirectedRelation'",
"in",
"label_set",
":",
"self",
".",
"relation_dict",
"[",
"e",
"[",
"'@id'",
"]",
"]",
"=",
"e",
"subtype",
"=",
"e",
".",
"get",
"(",
"'subtype'",
")",
"if",
"any",
"(",
"t",
"in",
"subtype",
"for",
"t",
"in",
"polarities",
".",
"keys",
"(",
")",
")",
":",
"relations",
".",
"append",
"(",
"(",
"subtype",
",",
"e",
")",
")",
"# If this is an Event or an Entity",
"if",
"{",
"'Event'",
",",
"'Entity'",
"}",
"&",
"label_set",
":",
"self",
".",
"concept_dict",
"[",
"e",
"[",
"'@id'",
"]",
"]",
"=",
"e",
"if",
"not",
"relations",
"and",
"not",
"self",
".",
"relation_dict",
":",
"logger",
".",
"info",
"(",
"\"No relations found.\"",
")",
"else",
":",
"logger",
".",
"info",
"(",
"'%d relations of types %s found'",
"%",
"(",
"len",
"(",
"relations",
")",
",",
"', '",
".",
"join",
"(",
"polarities",
".",
"keys",
"(",
")",
")",
")",
")",
"logger",
".",
"info",
"(",
"'%d relations in dict.'",
"%",
"len",
"(",
"self",
".",
"relation_dict",
")",
")",
"logger",
".",
"info",
"(",
"'%d concepts found.'",
"%",
"len",
"(",
"self",
".",
"concept_dict",
")",
")",
"return",
"relations"
]
| Find all relevant relation elements and return them in a list. | [
"Find",
"all",
"relevant",
"relation",
"elements",
"and",
"return",
"them",
"in",
"a",
"list",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/hume/processor.py#L68-L95 | train |
sorgerlab/indra | indra/sources/hume/processor.py | HumeJsonLdProcessor._get_documents | def _get_documents(self):
"""Populate sentences attribute with a dict keyed by document id."""
documents = self.tree.execute("$.documents")
for doc in documents:
sentences = {s['@id']: s['text'] for s in doc.get('sentences', [])}
self.document_dict[doc['@id']] = {'sentences': sentences,
'location': doc['location']} | python | def _get_documents(self):
"""Populate sentences attribute with a dict keyed by document id."""
documents = self.tree.execute("$.documents")
for doc in documents:
sentences = {s['@id']: s['text'] for s in doc.get('sentences', [])}
self.document_dict[doc['@id']] = {'sentences': sentences,
'location': doc['location']} | [
"def",
"_get_documents",
"(",
"self",
")",
":",
"documents",
"=",
"self",
".",
"tree",
".",
"execute",
"(",
"\"$.documents\"",
")",
"for",
"doc",
"in",
"documents",
":",
"sentences",
"=",
"{",
"s",
"[",
"'@id'",
"]",
":",
"s",
"[",
"'text'",
"]",
"for",
"s",
"in",
"doc",
".",
"get",
"(",
"'sentences'",
",",
"[",
"]",
")",
"}",
"self",
".",
"document_dict",
"[",
"doc",
"[",
"'@id'",
"]",
"]",
"=",
"{",
"'sentences'",
":",
"sentences",
",",
"'location'",
":",
"doc",
"[",
"'location'",
"]",
"}"
]
| Populate sentences attribute with a dict keyed by document id. | [
"Populate",
"sentences",
"attribute",
"with",
"a",
"dict",
"keyed",
"by",
"document",
"id",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/hume/processor.py#L97-L103 | train |
sorgerlab/indra | indra/sources/hume/processor.py | HumeJsonLdProcessor._make_context | def _make_context(self, entity):
"""Get place and time info from the json for this entity."""
loc_context = None
time_context = None
# Look for time and place contexts.
for argument in entity["arguments"]:
if argument["type"] == "place":
entity_id = argument["value"]["@id"]
loc_entity = self.concept_dict[entity_id]
place = loc_entity.get("canonicalName")
if not place:
place = loc_entity['text']
geo_id = loc_entity.get('geoname_id')
loc_context = RefContext(name=place, db_refs={"GEOID": geo_id})
if argument["type"] == "time":
entity_id = argument["value"]["@id"]
temporal_entity = self.concept_dict[entity_id]
text = temporal_entity['mentions'][0]['text']
if len(temporal_entity.get("timeInterval", [])) < 1:
time_context = TimeContext(text=text)
continue
time = temporal_entity["timeInterval"][0]
start = datetime.strptime(time['start'], '%Y-%m-%dT%H:%M')
end = datetime.strptime(time['end'], '%Y-%m-%dT%H:%M')
duration = int(time['duration'])
time_context = TimeContext(text=text, start=start, end=end,
duration=duration)
# Put context together
context = None
if loc_context or time_context:
context = WorldContext(time=time_context, geo_location=loc_context)
return context | python | def _make_context(self, entity):
"""Get place and time info from the json for this entity."""
loc_context = None
time_context = None
# Look for time and place contexts.
for argument in entity["arguments"]:
if argument["type"] == "place":
entity_id = argument["value"]["@id"]
loc_entity = self.concept_dict[entity_id]
place = loc_entity.get("canonicalName")
if not place:
place = loc_entity['text']
geo_id = loc_entity.get('geoname_id')
loc_context = RefContext(name=place, db_refs={"GEOID": geo_id})
if argument["type"] == "time":
entity_id = argument["value"]["@id"]
temporal_entity = self.concept_dict[entity_id]
text = temporal_entity['mentions'][0]['text']
if len(temporal_entity.get("timeInterval", [])) < 1:
time_context = TimeContext(text=text)
continue
time = temporal_entity["timeInterval"][0]
start = datetime.strptime(time['start'], '%Y-%m-%dT%H:%M')
end = datetime.strptime(time['end'], '%Y-%m-%dT%H:%M')
duration = int(time['duration'])
time_context = TimeContext(text=text, start=start, end=end,
duration=duration)
# Put context together
context = None
if loc_context or time_context:
context = WorldContext(time=time_context, geo_location=loc_context)
return context | [
"def",
"_make_context",
"(",
"self",
",",
"entity",
")",
":",
"loc_context",
"=",
"None",
"time_context",
"=",
"None",
"# Look for time and place contexts.",
"for",
"argument",
"in",
"entity",
"[",
"\"arguments\"",
"]",
":",
"if",
"argument",
"[",
"\"type\"",
"]",
"==",
"\"place\"",
":",
"entity_id",
"=",
"argument",
"[",
"\"value\"",
"]",
"[",
"\"@id\"",
"]",
"loc_entity",
"=",
"self",
".",
"concept_dict",
"[",
"entity_id",
"]",
"place",
"=",
"loc_entity",
".",
"get",
"(",
"\"canonicalName\"",
")",
"if",
"not",
"place",
":",
"place",
"=",
"loc_entity",
"[",
"'text'",
"]",
"geo_id",
"=",
"loc_entity",
".",
"get",
"(",
"'geoname_id'",
")",
"loc_context",
"=",
"RefContext",
"(",
"name",
"=",
"place",
",",
"db_refs",
"=",
"{",
"\"GEOID\"",
":",
"geo_id",
"}",
")",
"if",
"argument",
"[",
"\"type\"",
"]",
"==",
"\"time\"",
":",
"entity_id",
"=",
"argument",
"[",
"\"value\"",
"]",
"[",
"\"@id\"",
"]",
"temporal_entity",
"=",
"self",
".",
"concept_dict",
"[",
"entity_id",
"]",
"text",
"=",
"temporal_entity",
"[",
"'mentions'",
"]",
"[",
"0",
"]",
"[",
"'text'",
"]",
"if",
"len",
"(",
"temporal_entity",
".",
"get",
"(",
"\"timeInterval\"",
",",
"[",
"]",
")",
")",
"<",
"1",
":",
"time_context",
"=",
"TimeContext",
"(",
"text",
"=",
"text",
")",
"continue",
"time",
"=",
"temporal_entity",
"[",
"\"timeInterval\"",
"]",
"[",
"0",
"]",
"start",
"=",
"datetime",
".",
"strptime",
"(",
"time",
"[",
"'start'",
"]",
",",
"'%Y-%m-%dT%H:%M'",
")",
"end",
"=",
"datetime",
".",
"strptime",
"(",
"time",
"[",
"'end'",
"]",
",",
"'%Y-%m-%dT%H:%M'",
")",
"duration",
"=",
"int",
"(",
"time",
"[",
"'duration'",
"]",
")",
"time_context",
"=",
"TimeContext",
"(",
"text",
"=",
"text",
",",
"start",
"=",
"start",
",",
"end",
"=",
"end",
",",
"duration",
"=",
"duration",
")",
"# Put context together",
"context",
"=",
"None",
"if",
"loc_context",
"or",
"time_context",
":",
"context",
"=",
"WorldContext",
"(",
"time",
"=",
"time_context",
",",
"geo_location",
"=",
"loc_context",
")",
"return",
"context"
]
| Get place and time info from the json for this entity. | [
"Get",
"place",
"and",
"time",
"info",
"from",
"the",
"json",
"for",
"this",
"entity",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/hume/processor.py#L105-L139 | train |
sorgerlab/indra | indra/sources/hume/processor.py | HumeJsonLdProcessor._make_concept | def _make_concept(self, entity):
"""Return Concept from a Hume entity."""
# Use the canonical name as the name of the Concept by default
name = self._sanitize(entity['canonicalName'])
# But if there is a trigger head text, we prefer that since
# it almost always results in a cleaner name
# This is removed for now since the head word seems to be too
# minimal for some concepts, e.g. it gives us only "security"
# for "food security".
"""
trigger = entity.get('trigger')
if trigger is not None:
head_text = trigger.get('head text')
if head_text is not None:
name = head_text
"""
# Save raw text and Hume scored groundings as db_refs
db_refs = _get_grounding(entity)
concept = Concept(name, db_refs=db_refs)
metadata = {arg['type']: arg['value']['@id']
for arg in entity['arguments']}
return concept, metadata | python | def _make_concept(self, entity):
"""Return Concept from a Hume entity."""
# Use the canonical name as the name of the Concept by default
name = self._sanitize(entity['canonicalName'])
# But if there is a trigger head text, we prefer that since
# it almost always results in a cleaner name
# This is removed for now since the head word seems to be too
# minimal for some concepts, e.g. it gives us only "security"
# for "food security".
"""
trigger = entity.get('trigger')
if trigger is not None:
head_text = trigger.get('head text')
if head_text is not None:
name = head_text
"""
# Save raw text and Hume scored groundings as db_refs
db_refs = _get_grounding(entity)
concept = Concept(name, db_refs=db_refs)
metadata = {arg['type']: arg['value']['@id']
for arg in entity['arguments']}
return concept, metadata | [
"def",
"_make_concept",
"(",
"self",
",",
"entity",
")",
":",
"# Use the canonical name as the name of the Concept by default",
"name",
"=",
"self",
".",
"_sanitize",
"(",
"entity",
"[",
"'canonicalName'",
"]",
")",
"# But if there is a trigger head text, we prefer that since",
"# it almost always results in a cleaner name",
"# This is removed for now since the head word seems to be too",
"# minimal for some concepts, e.g. it gives us only \"security\"",
"# for \"food security\".",
"\"\"\"\n trigger = entity.get('trigger')\n if trigger is not None:\n head_text = trigger.get('head text')\n if head_text is not None:\n name = head_text\n \"\"\"",
"# Save raw text and Hume scored groundings as db_refs",
"db_refs",
"=",
"_get_grounding",
"(",
"entity",
")",
"concept",
"=",
"Concept",
"(",
"name",
",",
"db_refs",
"=",
"db_refs",
")",
"metadata",
"=",
"{",
"arg",
"[",
"'type'",
"]",
":",
"arg",
"[",
"'value'",
"]",
"[",
"'@id'",
"]",
"for",
"arg",
"in",
"entity",
"[",
"'arguments'",
"]",
"}",
"return",
"concept",
",",
"metadata"
]
| Return Concept from a Hume entity. | [
"Return",
"Concept",
"from",
"a",
"Hume",
"entity",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/hume/processor.py#L141-L163 | train |
sorgerlab/indra | indra/sources/hume/processor.py | HumeJsonLdProcessor._get_event_and_context | def _get_event_and_context(self, event, arg_type):
"""Return an INDRA Event based on an event entry."""
eid = _choose_id(event, arg_type)
ev = self.concept_dict[eid]
concept, metadata = self._make_concept(ev)
ev_delta = {'adjectives': [],
'states': get_states(ev),
'polarity': get_polarity(ev)}
context = self._make_context(ev)
event_obj = Event(concept, delta=ev_delta, context=context)
return event_obj | python | def _get_event_and_context(self, event, arg_type):
"""Return an INDRA Event based on an event entry."""
eid = _choose_id(event, arg_type)
ev = self.concept_dict[eid]
concept, metadata = self._make_concept(ev)
ev_delta = {'adjectives': [],
'states': get_states(ev),
'polarity': get_polarity(ev)}
context = self._make_context(ev)
event_obj = Event(concept, delta=ev_delta, context=context)
return event_obj | [
"def",
"_get_event_and_context",
"(",
"self",
",",
"event",
",",
"arg_type",
")",
":",
"eid",
"=",
"_choose_id",
"(",
"event",
",",
"arg_type",
")",
"ev",
"=",
"self",
".",
"concept_dict",
"[",
"eid",
"]",
"concept",
",",
"metadata",
"=",
"self",
".",
"_make_concept",
"(",
"ev",
")",
"ev_delta",
"=",
"{",
"'adjectives'",
":",
"[",
"]",
",",
"'states'",
":",
"get_states",
"(",
"ev",
")",
",",
"'polarity'",
":",
"get_polarity",
"(",
"ev",
")",
"}",
"context",
"=",
"self",
".",
"_make_context",
"(",
"ev",
")",
"event_obj",
"=",
"Event",
"(",
"concept",
",",
"delta",
"=",
"ev_delta",
",",
"context",
"=",
"context",
")",
"return",
"event_obj"
]
| Return an INDRA Event based on an event entry. | [
"Return",
"an",
"INDRA",
"Event",
"based",
"on",
"an",
"event",
"entry",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/hume/processor.py#L165-L175 | train |
sorgerlab/indra | indra/sources/hume/processor.py | HumeJsonLdProcessor._get_evidence | def _get_evidence(self, event, adjectives):
"""Return the Evidence object for the INDRA Statement."""
provenance = event.get('provenance')
# First try looking up the full sentence through provenance
doc_id = provenance[0]['document']['@id']
sent_id = provenance[0]['sentence']
text = self.document_dict[doc_id]['sentences'][sent_id]
text = self._sanitize(text)
bounds = [provenance[0]['documentCharPositions'][k]
for k in ['start', 'end']]
annotations = {
'found_by': event.get('rule'),
'provenance': provenance,
'event_type': os.path.basename(event.get('type')),
'adjectives': adjectives,
'bounds': bounds
}
location = self.document_dict[doc_id]['location']
ev = Evidence(source_api='hume', text=text, annotations=annotations,
pmid=location)
return [ev] | python | def _get_evidence(self, event, adjectives):
"""Return the Evidence object for the INDRA Statement."""
provenance = event.get('provenance')
# First try looking up the full sentence through provenance
doc_id = provenance[0]['document']['@id']
sent_id = provenance[0]['sentence']
text = self.document_dict[doc_id]['sentences'][sent_id]
text = self._sanitize(text)
bounds = [provenance[0]['documentCharPositions'][k]
for k in ['start', 'end']]
annotations = {
'found_by': event.get('rule'),
'provenance': provenance,
'event_type': os.path.basename(event.get('type')),
'adjectives': adjectives,
'bounds': bounds
}
location = self.document_dict[doc_id]['location']
ev = Evidence(source_api='hume', text=text, annotations=annotations,
pmid=location)
return [ev] | [
"def",
"_get_evidence",
"(",
"self",
",",
"event",
",",
"adjectives",
")",
":",
"provenance",
"=",
"event",
".",
"get",
"(",
"'provenance'",
")",
"# First try looking up the full sentence through provenance",
"doc_id",
"=",
"provenance",
"[",
"0",
"]",
"[",
"'document'",
"]",
"[",
"'@id'",
"]",
"sent_id",
"=",
"provenance",
"[",
"0",
"]",
"[",
"'sentence'",
"]",
"text",
"=",
"self",
".",
"document_dict",
"[",
"doc_id",
"]",
"[",
"'sentences'",
"]",
"[",
"sent_id",
"]",
"text",
"=",
"self",
".",
"_sanitize",
"(",
"text",
")",
"bounds",
"=",
"[",
"provenance",
"[",
"0",
"]",
"[",
"'documentCharPositions'",
"]",
"[",
"k",
"]",
"for",
"k",
"in",
"[",
"'start'",
",",
"'end'",
"]",
"]",
"annotations",
"=",
"{",
"'found_by'",
":",
"event",
".",
"get",
"(",
"'rule'",
")",
",",
"'provenance'",
":",
"provenance",
",",
"'event_type'",
":",
"os",
".",
"path",
".",
"basename",
"(",
"event",
".",
"get",
"(",
"'type'",
")",
")",
",",
"'adjectives'",
":",
"adjectives",
",",
"'bounds'",
":",
"bounds",
"}",
"location",
"=",
"self",
".",
"document_dict",
"[",
"doc_id",
"]",
"[",
"'location'",
"]",
"ev",
"=",
"Evidence",
"(",
"source_api",
"=",
"'hume'",
",",
"text",
"=",
"text",
",",
"annotations",
"=",
"annotations",
",",
"pmid",
"=",
"location",
")",
"return",
"[",
"ev",
"]"
]
| Return the Evidence object for the INDRA Statement. | [
"Return",
"the",
"Evidence",
"object",
"for",
"the",
"INDRA",
"Statement",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/hume/processor.py#L177-L199 | train |
sorgerlab/indra | indra/sources/medscan/processor.py | _is_statement_in_list | def _is_statement_in_list(new_stmt, old_stmt_list):
"""Return True of given statement is equivalent to on in a list
Determines whether the statement is equivalent to any statement in the
given list of statements, with equivalency determined by Statement's
equals method.
Parameters
----------
new_stmt : indra.statements.Statement
The statement to compare with
old_stmt_list : list[indra.statements.Statement]
The statement list whose entries we compare with statement
Returns
-------
in_list : bool
True if statement is equivalent to any statements in the list
"""
for old_stmt in old_stmt_list:
if old_stmt.equals(new_stmt):
return True
elif old_stmt.evidence_equals(new_stmt) and old_stmt.matches(new_stmt):
# If we're comparing a complex, make sure the agents are sorted.
if isinstance(new_stmt, Complex):
agent_pairs = zip(old_stmt.sorted_members(),
new_stmt.sorted_members())
else:
agent_pairs = zip(old_stmt.agent_list(), new_stmt.agent_list())
# Compare agent-by-agent.
for ag_old, ag_new in agent_pairs:
s_old = set(ag_old.db_refs.items())
s_new = set(ag_new.db_refs.items())
# If they're equal this isn't the one we're interested in.
if s_old == s_new:
continue
# If the new statement has nothing new to offer, just ignore it
if s_old > s_new:
return True
# If the new statement does have something new, add it to the
# existing statement. And then ignore it.
if s_new > s_old:
ag_old.db_refs.update(ag_new.db_refs)
return True
# If this is a case where different CHEBI ids were mapped to
# the same entity, set the agent name to the CHEBI id.
if _fix_different_refs(ag_old, ag_new, 'CHEBI'):
# Check to make sure the newly described statement does
# not match anything.
return _is_statement_in_list(new_stmt, old_stmt_list)
# If this is a case, like above, but with UMLS IDs, do the same
# thing as above. This will likely never be improved.
if _fix_different_refs(ag_old, ag_new, 'UMLS'):
# Check to make sure the newly described statement does
# not match anything.
return _is_statement_in_list(new_stmt, old_stmt_list)
logger.warning("Found an unexpected kind of duplicate. "
"Ignoring it.")
return True
# This means all the agents matched, which can happen if the
# original issue was the ordering of agents in a Complex.
return True
elif old_stmt.get_hash(True, True) == new_stmt.get_hash(True, True):
# Check to see if we can improve the annotation of the existing
# statement.
e_old = old_stmt.evidence[0]
e_new = new_stmt.evidence[0]
if e_old.annotations['last_verb'] is None:
e_old.annotations['last_verb'] = e_new.annotations['last_verb']
# If the evidence is "the same", modulo annotations, just ignore it
if e_old.get_source_hash(True) == e_new.get_source_hash(True):
return True
return False | python | def _is_statement_in_list(new_stmt, old_stmt_list):
"""Return True of given statement is equivalent to on in a list
Determines whether the statement is equivalent to any statement in the
given list of statements, with equivalency determined by Statement's
equals method.
Parameters
----------
new_stmt : indra.statements.Statement
The statement to compare with
old_stmt_list : list[indra.statements.Statement]
The statement list whose entries we compare with statement
Returns
-------
in_list : bool
True if statement is equivalent to any statements in the list
"""
for old_stmt in old_stmt_list:
if old_stmt.equals(new_stmt):
return True
elif old_stmt.evidence_equals(new_stmt) and old_stmt.matches(new_stmt):
# If we're comparing a complex, make sure the agents are sorted.
if isinstance(new_stmt, Complex):
agent_pairs = zip(old_stmt.sorted_members(),
new_stmt.sorted_members())
else:
agent_pairs = zip(old_stmt.agent_list(), new_stmt.agent_list())
# Compare agent-by-agent.
for ag_old, ag_new in agent_pairs:
s_old = set(ag_old.db_refs.items())
s_new = set(ag_new.db_refs.items())
# If they're equal this isn't the one we're interested in.
if s_old == s_new:
continue
# If the new statement has nothing new to offer, just ignore it
if s_old > s_new:
return True
# If the new statement does have something new, add it to the
# existing statement. And then ignore it.
if s_new > s_old:
ag_old.db_refs.update(ag_new.db_refs)
return True
# If this is a case where different CHEBI ids were mapped to
# the same entity, set the agent name to the CHEBI id.
if _fix_different_refs(ag_old, ag_new, 'CHEBI'):
# Check to make sure the newly described statement does
# not match anything.
return _is_statement_in_list(new_stmt, old_stmt_list)
# If this is a case, like above, but with UMLS IDs, do the same
# thing as above. This will likely never be improved.
if _fix_different_refs(ag_old, ag_new, 'UMLS'):
# Check to make sure the newly described statement does
# not match anything.
return _is_statement_in_list(new_stmt, old_stmt_list)
logger.warning("Found an unexpected kind of duplicate. "
"Ignoring it.")
return True
# This means all the agents matched, which can happen if the
# original issue was the ordering of agents in a Complex.
return True
elif old_stmt.get_hash(True, True) == new_stmt.get_hash(True, True):
# Check to see if we can improve the annotation of the existing
# statement.
e_old = old_stmt.evidence[0]
e_new = new_stmt.evidence[0]
if e_old.annotations['last_verb'] is None:
e_old.annotations['last_verb'] = e_new.annotations['last_verb']
# If the evidence is "the same", modulo annotations, just ignore it
if e_old.get_source_hash(True) == e_new.get_source_hash(True):
return True
return False | [
"def",
"_is_statement_in_list",
"(",
"new_stmt",
",",
"old_stmt_list",
")",
":",
"for",
"old_stmt",
"in",
"old_stmt_list",
":",
"if",
"old_stmt",
".",
"equals",
"(",
"new_stmt",
")",
":",
"return",
"True",
"elif",
"old_stmt",
".",
"evidence_equals",
"(",
"new_stmt",
")",
"and",
"old_stmt",
".",
"matches",
"(",
"new_stmt",
")",
":",
"# If we're comparing a complex, make sure the agents are sorted.",
"if",
"isinstance",
"(",
"new_stmt",
",",
"Complex",
")",
":",
"agent_pairs",
"=",
"zip",
"(",
"old_stmt",
".",
"sorted_members",
"(",
")",
",",
"new_stmt",
".",
"sorted_members",
"(",
")",
")",
"else",
":",
"agent_pairs",
"=",
"zip",
"(",
"old_stmt",
".",
"agent_list",
"(",
")",
",",
"new_stmt",
".",
"agent_list",
"(",
")",
")",
"# Compare agent-by-agent.",
"for",
"ag_old",
",",
"ag_new",
"in",
"agent_pairs",
":",
"s_old",
"=",
"set",
"(",
"ag_old",
".",
"db_refs",
".",
"items",
"(",
")",
")",
"s_new",
"=",
"set",
"(",
"ag_new",
".",
"db_refs",
".",
"items",
"(",
")",
")",
"# If they're equal this isn't the one we're interested in.",
"if",
"s_old",
"==",
"s_new",
":",
"continue",
"# If the new statement has nothing new to offer, just ignore it",
"if",
"s_old",
">",
"s_new",
":",
"return",
"True",
"# If the new statement does have something new, add it to the",
"# existing statement. And then ignore it.",
"if",
"s_new",
">",
"s_old",
":",
"ag_old",
".",
"db_refs",
".",
"update",
"(",
"ag_new",
".",
"db_refs",
")",
"return",
"True",
"# If this is a case where different CHEBI ids were mapped to",
"# the same entity, set the agent name to the CHEBI id.",
"if",
"_fix_different_refs",
"(",
"ag_old",
",",
"ag_new",
",",
"'CHEBI'",
")",
":",
"# Check to make sure the newly described statement does",
"# not match anything.",
"return",
"_is_statement_in_list",
"(",
"new_stmt",
",",
"old_stmt_list",
")",
"# If this is a case, like above, but with UMLS IDs, do the same",
"# thing as above. This will likely never be improved.",
"if",
"_fix_different_refs",
"(",
"ag_old",
",",
"ag_new",
",",
"'UMLS'",
")",
":",
"# Check to make sure the newly described statement does",
"# not match anything.",
"return",
"_is_statement_in_list",
"(",
"new_stmt",
",",
"old_stmt_list",
")",
"logger",
".",
"warning",
"(",
"\"Found an unexpected kind of duplicate. \"",
"\"Ignoring it.\"",
")",
"return",
"True",
"# This means all the agents matched, which can happen if the",
"# original issue was the ordering of agents in a Complex.",
"return",
"True",
"elif",
"old_stmt",
".",
"get_hash",
"(",
"True",
",",
"True",
")",
"==",
"new_stmt",
".",
"get_hash",
"(",
"True",
",",
"True",
")",
":",
"# Check to see if we can improve the annotation of the existing",
"# statement.",
"e_old",
"=",
"old_stmt",
".",
"evidence",
"[",
"0",
"]",
"e_new",
"=",
"new_stmt",
".",
"evidence",
"[",
"0",
"]",
"if",
"e_old",
".",
"annotations",
"[",
"'last_verb'",
"]",
"is",
"None",
":",
"e_old",
".",
"annotations",
"[",
"'last_verb'",
"]",
"=",
"e_new",
".",
"annotations",
"[",
"'last_verb'",
"]",
"# If the evidence is \"the same\", modulo annotations, just ignore it",
"if",
"e_old",
".",
"get_source_hash",
"(",
"True",
")",
"==",
"e_new",
".",
"get_source_hash",
"(",
"True",
")",
":",
"return",
"True",
"return",
"False"
]
| Return True of given statement is equivalent to on in a list
Determines whether the statement is equivalent to any statement in the
given list of statements, with equivalency determined by Statement's
equals method.
Parameters
----------
new_stmt : indra.statements.Statement
The statement to compare with
old_stmt_list : list[indra.statements.Statement]
The statement list whose entries we compare with statement
Returns
-------
in_list : bool
True if statement is equivalent to any statements in the list | [
"Return",
"True",
"of",
"given",
"statement",
"is",
"equivalent",
"to",
"on",
"in",
"a",
"list"
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/medscan/processor.py#L62-L145 | train |
sorgerlab/indra | indra/sources/medscan/processor.py | normalize_medscan_name | def normalize_medscan_name(name):
"""Removes the "complex" and "complex complex" suffixes from a medscan
agent name so that it better corresponds with the grounding map.
Parameters
----------
name: str
The Medscan agent name
Returns
-------
norm_name: str
The Medscan agent name with the "complex" and "complex complex"
suffixes removed.
"""
suffix = ' complex'
for i in range(2):
if name.endswith(suffix):
name = name[:-len(suffix)]
return name | python | def normalize_medscan_name(name):
"""Removes the "complex" and "complex complex" suffixes from a medscan
agent name so that it better corresponds with the grounding map.
Parameters
----------
name: str
The Medscan agent name
Returns
-------
norm_name: str
The Medscan agent name with the "complex" and "complex complex"
suffixes removed.
"""
suffix = ' complex'
for i in range(2):
if name.endswith(suffix):
name = name[:-len(suffix)]
return name | [
"def",
"normalize_medscan_name",
"(",
"name",
")",
":",
"suffix",
"=",
"' complex'",
"for",
"i",
"in",
"range",
"(",
"2",
")",
":",
"if",
"name",
".",
"endswith",
"(",
"suffix",
")",
":",
"name",
"=",
"name",
"[",
":",
"-",
"len",
"(",
"suffix",
")",
"]",
"return",
"name"
]
| Removes the "complex" and "complex complex" suffixes from a medscan
agent name so that it better corresponds with the grounding map.
Parameters
----------
name: str
The Medscan agent name
Returns
-------
norm_name: str
The Medscan agent name with the "complex" and "complex complex"
suffixes removed. | [
"Removes",
"the",
"complex",
"and",
"complex",
"complex",
"suffixes",
"from",
"a",
"medscan",
"agent",
"name",
"so",
"that",
"it",
"better",
"corresponds",
"with",
"the",
"grounding",
"map",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/medscan/processor.py#L893-L913 | train |
sorgerlab/indra | indra/sources/medscan/processor.py | _urn_to_db_refs | def _urn_to_db_refs(urn):
"""Converts a Medscan URN to an INDRA db_refs dictionary with grounding
information.
Parameters
----------
urn : str
A Medscan URN
Returns
-------
db_refs : dict
A dictionary with grounding information, mapping databases to database
identifiers. If the Medscan URN is not recognized, returns an empty
dictionary.
db_name : str
The Famplex name, if available; otherwise the HGNC name if available;
otherwise None
"""
# Convert a urn to a db_refs dictionary
if urn is None:
return {}, None
m = URN_PATT.match(urn)
if m is None:
return None, None
urn_type, urn_id = m.groups()
db_refs = {}
db_name = None
# TODO: support more types of URNs
if urn_type == 'agi-cas':
# Identifier is CAS, convert to CHEBI
chebi_id = get_chebi_id_from_cas(urn_id)
if chebi_id:
db_refs['CHEBI'] = 'CHEBI:%s' % chebi_id
db_name = get_chebi_name_from_id(chebi_id)
elif urn_type == 'agi-llid':
# This is an Entrez ID, convert to HGNC
hgnc_id = get_hgnc_from_entrez(urn_id)
if hgnc_id is not None:
db_refs['HGNC'] = hgnc_id
# Convert the HGNC ID to a Uniprot ID
uniprot_id = get_uniprot_id(hgnc_id)
if uniprot_id is not None:
db_refs['UP'] = uniprot_id
# Try to lookup HGNC name; if it's available, set it to the
# agent name
db_name = get_hgnc_name(hgnc_id)
elif urn_type in ['agi-meshdis', 'agi-ncimorgan', 'agi-ncimtissue',
'agi-ncimcelltype']:
if urn_id.startswith('C') and urn_id[1:].isdigit():
# Identifier is probably UMLS
db_refs['UMLS'] = urn_id
else:
# Identifier is MESH
urn_mesh_name = unquote(urn_id)
mesh_id, mesh_name = mesh_client.get_mesh_id_name(urn_mesh_name)
if mesh_id:
db_refs['MESH'] = mesh_id
db_name = mesh_name
else:
db_name = urn_mesh_name
elif urn_type == 'agi-gocomplex':
# Identifier is GO
db_refs['GO'] = 'GO:%s' % urn_id
elif urn_type == 'agi-go':
# Identifier is GO
db_refs['GO'] = 'GO:%s' % urn_id
# If we have a GO or MESH grounding, see if there is a corresponding
# Famplex grounding
db_sometimes_maps_to_famplex = ['GO', 'MESH']
for db in db_sometimes_maps_to_famplex:
if db in db_refs:
key = (db, db_refs[db])
if key in famplex_map:
db_refs['FPLX'] = famplex_map[key]
# If the urn corresponds to an eccode, groudn to famplex if that eccode
# is in the Famplex equivalences table
if urn.startswith('urn:agi-enz'):
tokens = urn.split(':')
eccode = tokens[2]
key = ('ECCODE', eccode)
if key in famplex_map:
db_refs['FPLX'] = famplex_map[key]
# If the Medscan URN itself maps to a Famplex id, add a Famplex grounding
key = ('MEDSCAN', urn)
if key in famplex_map:
db_refs['FPLX'] = famplex_map[key]
# If there is a Famplex grounding, use Famplex for entity name
if 'FPLX' in db_refs:
db_name = db_refs['FPLX']
elif 'GO' in db_refs:
db_name = go_client.get_go_label(db_refs['GO'])
return db_refs, db_name | python | def _urn_to_db_refs(urn):
"""Converts a Medscan URN to an INDRA db_refs dictionary with grounding
information.
Parameters
----------
urn : str
A Medscan URN
Returns
-------
db_refs : dict
A dictionary with grounding information, mapping databases to database
identifiers. If the Medscan URN is not recognized, returns an empty
dictionary.
db_name : str
The Famplex name, if available; otherwise the HGNC name if available;
otherwise None
"""
# Convert a urn to a db_refs dictionary
if urn is None:
return {}, None
m = URN_PATT.match(urn)
if m is None:
return None, None
urn_type, urn_id = m.groups()
db_refs = {}
db_name = None
# TODO: support more types of URNs
if urn_type == 'agi-cas':
# Identifier is CAS, convert to CHEBI
chebi_id = get_chebi_id_from_cas(urn_id)
if chebi_id:
db_refs['CHEBI'] = 'CHEBI:%s' % chebi_id
db_name = get_chebi_name_from_id(chebi_id)
elif urn_type == 'agi-llid':
# This is an Entrez ID, convert to HGNC
hgnc_id = get_hgnc_from_entrez(urn_id)
if hgnc_id is not None:
db_refs['HGNC'] = hgnc_id
# Convert the HGNC ID to a Uniprot ID
uniprot_id = get_uniprot_id(hgnc_id)
if uniprot_id is not None:
db_refs['UP'] = uniprot_id
# Try to lookup HGNC name; if it's available, set it to the
# agent name
db_name = get_hgnc_name(hgnc_id)
elif urn_type in ['agi-meshdis', 'agi-ncimorgan', 'agi-ncimtissue',
'agi-ncimcelltype']:
if urn_id.startswith('C') and urn_id[1:].isdigit():
# Identifier is probably UMLS
db_refs['UMLS'] = urn_id
else:
# Identifier is MESH
urn_mesh_name = unquote(urn_id)
mesh_id, mesh_name = mesh_client.get_mesh_id_name(urn_mesh_name)
if mesh_id:
db_refs['MESH'] = mesh_id
db_name = mesh_name
else:
db_name = urn_mesh_name
elif urn_type == 'agi-gocomplex':
# Identifier is GO
db_refs['GO'] = 'GO:%s' % urn_id
elif urn_type == 'agi-go':
# Identifier is GO
db_refs['GO'] = 'GO:%s' % urn_id
# If we have a GO or MESH grounding, see if there is a corresponding
# Famplex grounding
db_sometimes_maps_to_famplex = ['GO', 'MESH']
for db in db_sometimes_maps_to_famplex:
if db in db_refs:
key = (db, db_refs[db])
if key in famplex_map:
db_refs['FPLX'] = famplex_map[key]
# If the urn corresponds to an eccode, groudn to famplex if that eccode
# is in the Famplex equivalences table
if urn.startswith('urn:agi-enz'):
tokens = urn.split(':')
eccode = tokens[2]
key = ('ECCODE', eccode)
if key in famplex_map:
db_refs['FPLX'] = famplex_map[key]
# If the Medscan URN itself maps to a Famplex id, add a Famplex grounding
key = ('MEDSCAN', urn)
if key in famplex_map:
db_refs['FPLX'] = famplex_map[key]
# If there is a Famplex grounding, use Famplex for entity name
if 'FPLX' in db_refs:
db_name = db_refs['FPLX']
elif 'GO' in db_refs:
db_name = go_client.get_go_label(db_refs['GO'])
return db_refs, db_name | [
"def",
"_urn_to_db_refs",
"(",
"urn",
")",
":",
"# Convert a urn to a db_refs dictionary",
"if",
"urn",
"is",
"None",
":",
"return",
"{",
"}",
",",
"None",
"m",
"=",
"URN_PATT",
".",
"match",
"(",
"urn",
")",
"if",
"m",
"is",
"None",
":",
"return",
"None",
",",
"None",
"urn_type",
",",
"urn_id",
"=",
"m",
".",
"groups",
"(",
")",
"db_refs",
"=",
"{",
"}",
"db_name",
"=",
"None",
"# TODO: support more types of URNs",
"if",
"urn_type",
"==",
"'agi-cas'",
":",
"# Identifier is CAS, convert to CHEBI",
"chebi_id",
"=",
"get_chebi_id_from_cas",
"(",
"urn_id",
")",
"if",
"chebi_id",
":",
"db_refs",
"[",
"'CHEBI'",
"]",
"=",
"'CHEBI:%s'",
"%",
"chebi_id",
"db_name",
"=",
"get_chebi_name_from_id",
"(",
"chebi_id",
")",
"elif",
"urn_type",
"==",
"'agi-llid'",
":",
"# This is an Entrez ID, convert to HGNC",
"hgnc_id",
"=",
"get_hgnc_from_entrez",
"(",
"urn_id",
")",
"if",
"hgnc_id",
"is",
"not",
"None",
":",
"db_refs",
"[",
"'HGNC'",
"]",
"=",
"hgnc_id",
"# Convert the HGNC ID to a Uniprot ID",
"uniprot_id",
"=",
"get_uniprot_id",
"(",
"hgnc_id",
")",
"if",
"uniprot_id",
"is",
"not",
"None",
":",
"db_refs",
"[",
"'UP'",
"]",
"=",
"uniprot_id",
"# Try to lookup HGNC name; if it's available, set it to the",
"# agent name",
"db_name",
"=",
"get_hgnc_name",
"(",
"hgnc_id",
")",
"elif",
"urn_type",
"in",
"[",
"'agi-meshdis'",
",",
"'agi-ncimorgan'",
",",
"'agi-ncimtissue'",
",",
"'agi-ncimcelltype'",
"]",
":",
"if",
"urn_id",
".",
"startswith",
"(",
"'C'",
")",
"and",
"urn_id",
"[",
"1",
":",
"]",
".",
"isdigit",
"(",
")",
":",
"# Identifier is probably UMLS",
"db_refs",
"[",
"'UMLS'",
"]",
"=",
"urn_id",
"else",
":",
"# Identifier is MESH",
"urn_mesh_name",
"=",
"unquote",
"(",
"urn_id",
")",
"mesh_id",
",",
"mesh_name",
"=",
"mesh_client",
".",
"get_mesh_id_name",
"(",
"urn_mesh_name",
")",
"if",
"mesh_id",
":",
"db_refs",
"[",
"'MESH'",
"]",
"=",
"mesh_id",
"db_name",
"=",
"mesh_name",
"else",
":",
"db_name",
"=",
"urn_mesh_name",
"elif",
"urn_type",
"==",
"'agi-gocomplex'",
":",
"# Identifier is GO",
"db_refs",
"[",
"'GO'",
"]",
"=",
"'GO:%s'",
"%",
"urn_id",
"elif",
"urn_type",
"==",
"'agi-go'",
":",
"# Identifier is GO",
"db_refs",
"[",
"'GO'",
"]",
"=",
"'GO:%s'",
"%",
"urn_id",
"# If we have a GO or MESH grounding, see if there is a corresponding",
"# Famplex grounding",
"db_sometimes_maps_to_famplex",
"=",
"[",
"'GO'",
",",
"'MESH'",
"]",
"for",
"db",
"in",
"db_sometimes_maps_to_famplex",
":",
"if",
"db",
"in",
"db_refs",
":",
"key",
"=",
"(",
"db",
",",
"db_refs",
"[",
"db",
"]",
")",
"if",
"key",
"in",
"famplex_map",
":",
"db_refs",
"[",
"'FPLX'",
"]",
"=",
"famplex_map",
"[",
"key",
"]",
"# If the urn corresponds to an eccode, groudn to famplex if that eccode",
"# is in the Famplex equivalences table",
"if",
"urn",
".",
"startswith",
"(",
"'urn:agi-enz'",
")",
":",
"tokens",
"=",
"urn",
".",
"split",
"(",
"':'",
")",
"eccode",
"=",
"tokens",
"[",
"2",
"]",
"key",
"=",
"(",
"'ECCODE'",
",",
"eccode",
")",
"if",
"key",
"in",
"famplex_map",
":",
"db_refs",
"[",
"'FPLX'",
"]",
"=",
"famplex_map",
"[",
"key",
"]",
"# If the Medscan URN itself maps to a Famplex id, add a Famplex grounding",
"key",
"=",
"(",
"'MEDSCAN'",
",",
"urn",
")",
"if",
"key",
"in",
"famplex_map",
":",
"db_refs",
"[",
"'FPLX'",
"]",
"=",
"famplex_map",
"[",
"key",
"]",
"# If there is a Famplex grounding, use Famplex for entity name",
"if",
"'FPLX'",
"in",
"db_refs",
":",
"db_name",
"=",
"db_refs",
"[",
"'FPLX'",
"]",
"elif",
"'GO'",
"in",
"db_refs",
":",
"db_name",
"=",
"go_client",
".",
"get_go_label",
"(",
"db_refs",
"[",
"'GO'",
"]",
")",
"return",
"db_refs",
",",
"db_name"
]
| Converts a Medscan URN to an INDRA db_refs dictionary with grounding
information.
Parameters
----------
urn : str
A Medscan URN
Returns
-------
db_refs : dict
A dictionary with grounding information, mapping databases to database
identifiers. If the Medscan URN is not recognized, returns an empty
dictionary.
db_name : str
The Famplex name, if available; otherwise the HGNC name if available;
otherwise None | [
"Converts",
"a",
"Medscan",
"URN",
"to",
"an",
"INDRA",
"db_refs",
"dictionary",
"with",
"grounding",
"information",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/medscan/processor.py#L976-L1079 | train |
sorgerlab/indra | indra/sources/medscan/processor.py | _untag_sentence | def _untag_sentence(tagged_sentence):
"""Removes all tags in the sentence, returning the original sentence
without Medscan annotations.
Parameters
----------
tagged_sentence : str
The tagged sentence
Returns
-------
untagged_sentence : str
Sentence with tags and annotations stripped out
"""
untagged_sentence = TAG_PATT.sub('\\2', tagged_sentence)
clean_sentence = JUNK_PATT.sub('', untagged_sentence)
return clean_sentence.strip() | python | def _untag_sentence(tagged_sentence):
"""Removes all tags in the sentence, returning the original sentence
without Medscan annotations.
Parameters
----------
tagged_sentence : str
The tagged sentence
Returns
-------
untagged_sentence : str
Sentence with tags and annotations stripped out
"""
untagged_sentence = TAG_PATT.sub('\\2', tagged_sentence)
clean_sentence = JUNK_PATT.sub('', untagged_sentence)
return clean_sentence.strip() | [
"def",
"_untag_sentence",
"(",
"tagged_sentence",
")",
":",
"untagged_sentence",
"=",
"TAG_PATT",
".",
"sub",
"(",
"'\\\\2'",
",",
"tagged_sentence",
")",
"clean_sentence",
"=",
"JUNK_PATT",
".",
"sub",
"(",
"''",
",",
"untagged_sentence",
")",
"return",
"clean_sentence",
".",
"strip",
"(",
")"
]
| Removes all tags in the sentence, returning the original sentence
without Medscan annotations.
Parameters
----------
tagged_sentence : str
The tagged sentence
Returns
-------
untagged_sentence : str
Sentence with tags and annotations stripped out | [
"Removes",
"all",
"tags",
"in",
"the",
"sentence",
"returning",
"the",
"original",
"sentence",
"without",
"Medscan",
"annotations",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/medscan/processor.py#L1109-L1125 | train |
sorgerlab/indra | indra/sources/medscan/processor.py | _extract_sentence_tags | def _extract_sentence_tags(tagged_sentence):
"""Given a tagged sentence, extracts a dictionary mapping tags to the words
or phrases that they tag.
Parameters
----------
tagged_sentence : str
The sentence with Medscan annotations and tags
Returns
-------
tags : dict
A dictionary mapping tags to the words or phrases that they tag.
"""
untagged_sentence = _untag_sentence(tagged_sentence)
decluttered_sentence = JUNK_PATT.sub('', tagged_sentence)
tags = {}
# Iteratively look for all matches of this pattern
endpos = 0
while True:
match = TAG_PATT.search(decluttered_sentence, pos=endpos)
if not match:
break
endpos = match.end()
text = match.group(2)
text = text.replace('CONTEXT', '')
text = text.replace('GLOSSARY', '')
text = text.strip()
start = untagged_sentence.index(text)
stop = start + len(text)
tag_key = match.group(1)
if ',' in tag_key:
for sub_key in tag_key.split(','):
if sub_key == '0':
continue
tags[sub_key] = {'text': text, 'bounds': (start, stop)}
else:
tags[tag_key] = {'text': text, 'bounds': (start, stop)}
return tags | python | def _extract_sentence_tags(tagged_sentence):
"""Given a tagged sentence, extracts a dictionary mapping tags to the words
or phrases that they tag.
Parameters
----------
tagged_sentence : str
The sentence with Medscan annotations and tags
Returns
-------
tags : dict
A dictionary mapping tags to the words or phrases that they tag.
"""
untagged_sentence = _untag_sentence(tagged_sentence)
decluttered_sentence = JUNK_PATT.sub('', tagged_sentence)
tags = {}
# Iteratively look for all matches of this pattern
endpos = 0
while True:
match = TAG_PATT.search(decluttered_sentence, pos=endpos)
if not match:
break
endpos = match.end()
text = match.group(2)
text = text.replace('CONTEXT', '')
text = text.replace('GLOSSARY', '')
text = text.strip()
start = untagged_sentence.index(text)
stop = start + len(text)
tag_key = match.group(1)
if ',' in tag_key:
for sub_key in tag_key.split(','):
if sub_key == '0':
continue
tags[sub_key] = {'text': text, 'bounds': (start, stop)}
else:
tags[tag_key] = {'text': text, 'bounds': (start, stop)}
return tags | [
"def",
"_extract_sentence_tags",
"(",
"tagged_sentence",
")",
":",
"untagged_sentence",
"=",
"_untag_sentence",
"(",
"tagged_sentence",
")",
"decluttered_sentence",
"=",
"JUNK_PATT",
".",
"sub",
"(",
"''",
",",
"tagged_sentence",
")",
"tags",
"=",
"{",
"}",
"# Iteratively look for all matches of this pattern",
"endpos",
"=",
"0",
"while",
"True",
":",
"match",
"=",
"TAG_PATT",
".",
"search",
"(",
"decluttered_sentence",
",",
"pos",
"=",
"endpos",
")",
"if",
"not",
"match",
":",
"break",
"endpos",
"=",
"match",
".",
"end",
"(",
")",
"text",
"=",
"match",
".",
"group",
"(",
"2",
")",
"text",
"=",
"text",
".",
"replace",
"(",
"'CONTEXT'",
",",
"''",
")",
"text",
"=",
"text",
".",
"replace",
"(",
"'GLOSSARY'",
",",
"''",
")",
"text",
"=",
"text",
".",
"strip",
"(",
")",
"start",
"=",
"untagged_sentence",
".",
"index",
"(",
"text",
")",
"stop",
"=",
"start",
"+",
"len",
"(",
"text",
")",
"tag_key",
"=",
"match",
".",
"group",
"(",
"1",
")",
"if",
"','",
"in",
"tag_key",
":",
"for",
"sub_key",
"in",
"tag_key",
".",
"split",
"(",
"','",
")",
":",
"if",
"sub_key",
"==",
"'0'",
":",
"continue",
"tags",
"[",
"sub_key",
"]",
"=",
"{",
"'text'",
":",
"text",
",",
"'bounds'",
":",
"(",
"start",
",",
"stop",
")",
"}",
"else",
":",
"tags",
"[",
"tag_key",
"]",
"=",
"{",
"'text'",
":",
"text",
",",
"'bounds'",
":",
"(",
"start",
",",
"stop",
")",
"}",
"return",
"tags"
]
| Given a tagged sentence, extracts a dictionary mapping tags to the words
or phrases that they tag.
Parameters
----------
tagged_sentence : str
The sentence with Medscan annotations and tags
Returns
-------
tags : dict
A dictionary mapping tags to the words or phrases that they tag. | [
"Given",
"a",
"tagged",
"sentence",
"extracts",
"a",
"dictionary",
"mapping",
"tags",
"to",
"the",
"words",
"or",
"phrases",
"that",
"they",
"tag",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/medscan/processor.py#L1128-L1168 | train |
sorgerlab/indra | indra/sources/medscan/processor.py | ProteinSiteInfo.get_sites | def get_sites(self):
"""Parse the site-text string and return a list of sites.
Returns
-------
sites : list[Site]
A list of position-residue pairs corresponding to the site-text
"""
st = self.site_text
suffixes = [' residue', ' residues', ',', '/']
for suffix in suffixes:
if st.endswith(suffix):
st = st[:-len(suffix)]
assert(not st.endswith(','))
# Strip parentheses
st = st.replace('(', '')
st = st.replace(')', '')
st = st.replace(' or ', ' and ') # Treat end and or the same
sites = []
parts = st.split(' and ')
for part in parts:
if part.endswith(','):
part = part[:-1]
if len(part.strip()) > 0:
sites.extend(ReachProcessor._parse_site_text(part.strip()))
return sites | python | def get_sites(self):
"""Parse the site-text string and return a list of sites.
Returns
-------
sites : list[Site]
A list of position-residue pairs corresponding to the site-text
"""
st = self.site_text
suffixes = [' residue', ' residues', ',', '/']
for suffix in suffixes:
if st.endswith(suffix):
st = st[:-len(suffix)]
assert(not st.endswith(','))
# Strip parentheses
st = st.replace('(', '')
st = st.replace(')', '')
st = st.replace(' or ', ' and ') # Treat end and or the same
sites = []
parts = st.split(' and ')
for part in parts:
if part.endswith(','):
part = part[:-1]
if len(part.strip()) > 0:
sites.extend(ReachProcessor._parse_site_text(part.strip()))
return sites | [
"def",
"get_sites",
"(",
"self",
")",
":",
"st",
"=",
"self",
".",
"site_text",
"suffixes",
"=",
"[",
"' residue'",
",",
"' residues'",
",",
"','",
",",
"'/'",
"]",
"for",
"suffix",
"in",
"suffixes",
":",
"if",
"st",
".",
"endswith",
"(",
"suffix",
")",
":",
"st",
"=",
"st",
"[",
":",
"-",
"len",
"(",
"suffix",
")",
"]",
"assert",
"(",
"not",
"st",
".",
"endswith",
"(",
"','",
")",
")",
"# Strip parentheses",
"st",
"=",
"st",
".",
"replace",
"(",
"'('",
",",
"''",
")",
"st",
"=",
"st",
".",
"replace",
"(",
"')'",
",",
"''",
")",
"st",
"=",
"st",
".",
"replace",
"(",
"' or '",
",",
"' and '",
")",
"# Treat end and or the same",
"sites",
"=",
"[",
"]",
"parts",
"=",
"st",
".",
"split",
"(",
"' and '",
")",
"for",
"part",
"in",
"parts",
":",
"if",
"part",
".",
"endswith",
"(",
"','",
")",
":",
"part",
"=",
"part",
"[",
":",
"-",
"1",
"]",
"if",
"len",
"(",
"part",
".",
"strip",
"(",
")",
")",
">",
"0",
":",
"sites",
".",
"extend",
"(",
"ReachProcessor",
".",
"_parse_site_text",
"(",
"part",
".",
"strip",
"(",
")",
")",
")",
"return",
"sites"
]
| Parse the site-text string and return a list of sites.
Returns
-------
sites : list[Site]
A list of position-residue pairs corresponding to the site-text | [
"Parse",
"the",
"site",
"-",
"text",
"string",
"and",
"return",
"a",
"list",
"of",
"sites",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/medscan/processor.py#L163-L190 | train |
sorgerlab/indra | indra/sources/medscan/processor.py | MedscanProcessor.process_csxml_file | def process_csxml_file(self, filename, interval=None, lazy=False):
"""Processes a filehandle to MedScan csxml input into INDRA
statements.
The CSXML format consists of a top-level `<batch>` root element
containing a series of `<doc>` (document) elements, in turn containing
`<sec>` (section) elements, and in turn containing `<sent>` (sentence)
elements.
Within the `<sent>` element, a series of additional elements appear in
the following order:
* `<toks>`, which contains a tokenized form of the sentence in its text
attribute
* `<textmods>`, which describes any preprocessing/normalization done to
the underlying text
* `<match>` elements, each of which contains one of more `<entity>`
elements, describing entities in the text with their identifiers.
The local IDs of each entities are given in the `msid` attribute of
this element; these IDs are then referenced in any subsequent SVO
elements.
* `<svo>` elements, representing subject-verb-object triples. SVO
elements with a `type` attribute of `CONTROL` represent normalized
regulation relationships; they often represent the normalized
extraction of the immediately preceding (but unnormalized SVO
element). However, in some cases there can be a "CONTROL" SVO
element without its parent immediately preceding it.
Parameters
----------
filename : string
The path to a Medscan csxml file.
interval : (start, end) or None
Select the interval of documents to read, starting with the
`start`th document and ending before the `end`th document. If
either is None, the value is considered undefined. If the value
exceeds the bounds of available documents, it will simply be
ignored.
lazy : bool
If True, only create a generator which can be used by the
`get_statements` method. If True, populate the statements list now.
"""
if interval is None:
interval = (None, None)
tmp_fname = tempfile.mktemp(os.path.basename(filename))
fix_character_encoding(filename, tmp_fname)
self.__f = open(tmp_fname, 'rb')
self._gen = self._iter_through_csxml_file_from_handle(*interval)
if not lazy:
for stmt in self._gen:
self.statements.append(stmt)
return | python | def process_csxml_file(self, filename, interval=None, lazy=False):
"""Processes a filehandle to MedScan csxml input into INDRA
statements.
The CSXML format consists of a top-level `<batch>` root element
containing a series of `<doc>` (document) elements, in turn containing
`<sec>` (section) elements, and in turn containing `<sent>` (sentence)
elements.
Within the `<sent>` element, a series of additional elements appear in
the following order:
* `<toks>`, which contains a tokenized form of the sentence in its text
attribute
* `<textmods>`, which describes any preprocessing/normalization done to
the underlying text
* `<match>` elements, each of which contains one of more `<entity>`
elements, describing entities in the text with their identifiers.
The local IDs of each entities are given in the `msid` attribute of
this element; these IDs are then referenced in any subsequent SVO
elements.
* `<svo>` elements, representing subject-verb-object triples. SVO
elements with a `type` attribute of `CONTROL` represent normalized
regulation relationships; they often represent the normalized
extraction of the immediately preceding (but unnormalized SVO
element). However, in some cases there can be a "CONTROL" SVO
element without its parent immediately preceding it.
Parameters
----------
filename : string
The path to a Medscan csxml file.
interval : (start, end) or None
Select the interval of documents to read, starting with the
`start`th document and ending before the `end`th document. If
either is None, the value is considered undefined. If the value
exceeds the bounds of available documents, it will simply be
ignored.
lazy : bool
If True, only create a generator which can be used by the
`get_statements` method. If True, populate the statements list now.
"""
if interval is None:
interval = (None, None)
tmp_fname = tempfile.mktemp(os.path.basename(filename))
fix_character_encoding(filename, tmp_fname)
self.__f = open(tmp_fname, 'rb')
self._gen = self._iter_through_csxml_file_from_handle(*interval)
if not lazy:
for stmt in self._gen:
self.statements.append(stmt)
return | [
"def",
"process_csxml_file",
"(",
"self",
",",
"filename",
",",
"interval",
"=",
"None",
",",
"lazy",
"=",
"False",
")",
":",
"if",
"interval",
"is",
"None",
":",
"interval",
"=",
"(",
"None",
",",
"None",
")",
"tmp_fname",
"=",
"tempfile",
".",
"mktemp",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"filename",
")",
")",
"fix_character_encoding",
"(",
"filename",
",",
"tmp_fname",
")",
"self",
".",
"__f",
"=",
"open",
"(",
"tmp_fname",
",",
"'rb'",
")",
"self",
".",
"_gen",
"=",
"self",
".",
"_iter_through_csxml_file_from_handle",
"(",
"*",
"interval",
")",
"if",
"not",
"lazy",
":",
"for",
"stmt",
"in",
"self",
".",
"_gen",
":",
"self",
".",
"statements",
".",
"append",
"(",
"stmt",
")",
"return"
]
| Processes a filehandle to MedScan csxml input into INDRA
statements.
The CSXML format consists of a top-level `<batch>` root element
containing a series of `<doc>` (document) elements, in turn containing
`<sec>` (section) elements, and in turn containing `<sent>` (sentence)
elements.
Within the `<sent>` element, a series of additional elements appear in
the following order:
* `<toks>`, which contains a tokenized form of the sentence in its text
attribute
* `<textmods>`, which describes any preprocessing/normalization done to
the underlying text
* `<match>` elements, each of which contains one of more `<entity>`
elements, describing entities in the text with their identifiers.
The local IDs of each entities are given in the `msid` attribute of
this element; these IDs are then referenced in any subsequent SVO
elements.
* `<svo>` elements, representing subject-verb-object triples. SVO
elements with a `type` attribute of `CONTROL` represent normalized
regulation relationships; they often represent the normalized
extraction of the immediately preceding (but unnormalized SVO
element). However, in some cases there can be a "CONTROL" SVO
element without its parent immediately preceding it.
Parameters
----------
filename : string
The path to a Medscan csxml file.
interval : (start, end) or None
Select the interval of documents to read, starting with the
`start`th document and ending before the `end`th document. If
either is None, the value is considered undefined. If the value
exceeds the bounds of available documents, it will simply be
ignored.
lazy : bool
If True, only create a generator which can be used by the
`get_statements` method. If True, populate the statements list now. | [
"Processes",
"a",
"filehandle",
"to",
"MedScan",
"csxml",
"input",
"into",
"INDRA",
"statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/medscan/processor.py#L328-L381 | train |
sorgerlab/indra | indra/tools/reading/util/script_tools.py | get_parser | def get_parser(description, input_desc):
"""Get a parser that is generic to reading scripts.
Parameters
----------
description : str
A description of the tool, usually about one line long.
input_desc: str
A string describing the nature of the input file used by the reading
tool.
Returns
-------
parser : argparse.ArgumentParser instance
An argument parser object, to which further arguments can be added.
"""
parser = ArgumentParser(description=description)
parser.add_argument(
dest='input_file',
help=input_desc
)
parser.add_argument(
'-r', '--readers',
choices=['reach', 'sparser', 'trips'],
help='List of readers to be used.',
nargs='+'
)
parser.add_argument(
'-n', '--num_procs',
dest='n_proc',
help='Select the number of processes to use.',
type=int,
default=1
)
parser.add_argument(
'-s', '--sample',
dest='n_samp',
help='Read a random sample of size N_SAMP of the inputs.',
type=int
)
parser.add_argument(
'-I', '--in_range',
dest='range_str',
help='Only read input lines in the range given as <start>:<end>.'
)
parser.add_argument(
'-v', '--verbose',
help='Include output from the readers.',
action='store_true'
)
parser.add_argument(
'-q', '--quiet',
help='Suppress most output. Overrides -v and -d options.',
action='store_true'
)
parser.add_argument(
'-d', '--debug',
help='Set the logging to debug level.',
action='store_true'
)
# parser.add_argument(
# '-m', '--messy',
# help='Do not clean up directories created while reading.',
# action='store_true'
# )
return parser | python | def get_parser(description, input_desc):
"""Get a parser that is generic to reading scripts.
Parameters
----------
description : str
A description of the tool, usually about one line long.
input_desc: str
A string describing the nature of the input file used by the reading
tool.
Returns
-------
parser : argparse.ArgumentParser instance
An argument parser object, to which further arguments can be added.
"""
parser = ArgumentParser(description=description)
parser.add_argument(
dest='input_file',
help=input_desc
)
parser.add_argument(
'-r', '--readers',
choices=['reach', 'sparser', 'trips'],
help='List of readers to be used.',
nargs='+'
)
parser.add_argument(
'-n', '--num_procs',
dest='n_proc',
help='Select the number of processes to use.',
type=int,
default=1
)
parser.add_argument(
'-s', '--sample',
dest='n_samp',
help='Read a random sample of size N_SAMP of the inputs.',
type=int
)
parser.add_argument(
'-I', '--in_range',
dest='range_str',
help='Only read input lines in the range given as <start>:<end>.'
)
parser.add_argument(
'-v', '--verbose',
help='Include output from the readers.',
action='store_true'
)
parser.add_argument(
'-q', '--quiet',
help='Suppress most output. Overrides -v and -d options.',
action='store_true'
)
parser.add_argument(
'-d', '--debug',
help='Set the logging to debug level.',
action='store_true'
)
# parser.add_argument(
# '-m', '--messy',
# help='Do not clean up directories created while reading.',
# action='store_true'
# )
return parser | [
"def",
"get_parser",
"(",
"description",
",",
"input_desc",
")",
":",
"parser",
"=",
"ArgumentParser",
"(",
"description",
"=",
"description",
")",
"parser",
".",
"add_argument",
"(",
"dest",
"=",
"'input_file'",
",",
"help",
"=",
"input_desc",
")",
"parser",
".",
"add_argument",
"(",
"'-r'",
",",
"'--readers'",
",",
"choices",
"=",
"[",
"'reach'",
",",
"'sparser'",
",",
"'trips'",
"]",
",",
"help",
"=",
"'List of readers to be used.'",
",",
"nargs",
"=",
"'+'",
")",
"parser",
".",
"add_argument",
"(",
"'-n'",
",",
"'--num_procs'",
",",
"dest",
"=",
"'n_proc'",
",",
"help",
"=",
"'Select the number of processes to use.'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"1",
")",
"parser",
".",
"add_argument",
"(",
"'-s'",
",",
"'--sample'",
",",
"dest",
"=",
"'n_samp'",
",",
"help",
"=",
"'Read a random sample of size N_SAMP of the inputs.'",
",",
"type",
"=",
"int",
")",
"parser",
".",
"add_argument",
"(",
"'-I'",
",",
"'--in_range'",
",",
"dest",
"=",
"'range_str'",
",",
"help",
"=",
"'Only read input lines in the range given as <start>:<end>.'",
")",
"parser",
".",
"add_argument",
"(",
"'-v'",
",",
"'--verbose'",
",",
"help",
"=",
"'Include output from the readers.'",
",",
"action",
"=",
"'store_true'",
")",
"parser",
".",
"add_argument",
"(",
"'-q'",
",",
"'--quiet'",
",",
"help",
"=",
"'Suppress most output. Overrides -v and -d options.'",
",",
"action",
"=",
"'store_true'",
")",
"parser",
".",
"add_argument",
"(",
"'-d'",
",",
"'--debug'",
",",
"help",
"=",
"'Set the logging to debug level.'",
",",
"action",
"=",
"'store_true'",
")",
"# parser.add_argument(",
"# '-m', '--messy',",
"# help='Do not clean up directories created while reading.',",
"# action='store_true'",
"# )",
"return",
"parser"
]
| Get a parser that is generic to reading scripts.
Parameters
----------
description : str
A description of the tool, usually about one line long.
input_desc: str
A string describing the nature of the input file used by the reading
tool.
Returns
-------
parser : argparse.ArgumentParser instance
An argument parser object, to which further arguments can be added. | [
"Get",
"a",
"parser",
"that",
"is",
"generic",
"to",
"reading",
"scripts",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/util/script_tools.py#L11-L76 | train |
sorgerlab/indra | indra/literature/newsapi_client.py | send_request | def send_request(endpoint, **kwargs):
"""Return the response to a query as JSON from the NewsAPI web service.
The basic API is limited to 100 results which is chosen unless explicitly
given as an argument. Beyond that, paging is supported through the "page"
argument, if needed.
Parameters
----------
endpoint : str
Endpoint to query, e.g. "everything" or "top-headlines"
kwargs : dict
A list of keyword arguments passed as parameters with the query.
The basic ones are "q" which is the search query, "from" is a start
date formatted as for instance 2018-06-10 and "to" is an end date
with the same format.
Returns
-------
res_json : dict
The response from the web service as a JSON dict.
"""
if api_key is None:
logger.error('NewsAPI cannot be used without an API key')
return None
url = '%s/%s' % (newsapi_url, endpoint)
if 'apiKey' not in kwargs:
kwargs['apiKey'] = api_key
if 'pageSize' not in kwargs:
kwargs['pageSize'] = 100
res = requests.get(url, params=kwargs)
res.raise_for_status()
res_json = res.json()
return res_json | python | def send_request(endpoint, **kwargs):
"""Return the response to a query as JSON from the NewsAPI web service.
The basic API is limited to 100 results which is chosen unless explicitly
given as an argument. Beyond that, paging is supported through the "page"
argument, if needed.
Parameters
----------
endpoint : str
Endpoint to query, e.g. "everything" or "top-headlines"
kwargs : dict
A list of keyword arguments passed as parameters with the query.
The basic ones are "q" which is the search query, "from" is a start
date formatted as for instance 2018-06-10 and "to" is an end date
with the same format.
Returns
-------
res_json : dict
The response from the web service as a JSON dict.
"""
if api_key is None:
logger.error('NewsAPI cannot be used without an API key')
return None
url = '%s/%s' % (newsapi_url, endpoint)
if 'apiKey' not in kwargs:
kwargs['apiKey'] = api_key
if 'pageSize' not in kwargs:
kwargs['pageSize'] = 100
res = requests.get(url, params=kwargs)
res.raise_for_status()
res_json = res.json()
return res_json | [
"def",
"send_request",
"(",
"endpoint",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"api_key",
"is",
"None",
":",
"logger",
".",
"error",
"(",
"'NewsAPI cannot be used without an API key'",
")",
"return",
"None",
"url",
"=",
"'%s/%s'",
"%",
"(",
"newsapi_url",
",",
"endpoint",
")",
"if",
"'apiKey'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'apiKey'",
"]",
"=",
"api_key",
"if",
"'pageSize'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'pageSize'",
"]",
"=",
"100",
"res",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"params",
"=",
"kwargs",
")",
"res",
".",
"raise_for_status",
"(",
")",
"res_json",
"=",
"res",
".",
"json",
"(",
")",
"return",
"res_json"
]
| Return the response to a query as JSON from the NewsAPI web service.
The basic API is limited to 100 results which is chosen unless explicitly
given as an argument. Beyond that, paging is supported through the "page"
argument, if needed.
Parameters
----------
endpoint : str
Endpoint to query, e.g. "everything" or "top-headlines"
kwargs : dict
A list of keyword arguments passed as parameters with the query.
The basic ones are "q" which is the search query, "from" is a start
date formatted as for instance 2018-06-10 and "to" is an end date
with the same format.
Returns
-------
res_json : dict
The response from the web service as a JSON dict. | [
"Return",
"the",
"response",
"to",
"a",
"query",
"as",
"JSON",
"from",
"the",
"NewsAPI",
"web",
"service",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/newsapi_client.py#L29-L63 | train |
sorgerlab/indra | indra/sources/ndex_cx/api.py | process_cx_file | def process_cx_file(file_name, require_grounding=True):
"""Process a CX JSON file into Statements.
Parameters
----------
file_name : str
Path to file containing CX JSON.
require_grounding: bool
Whether network nodes lacking grounding information should be included
among the extracted Statements (default is True).
Returns
-------
NdexCxProcessor
Processor containing Statements.
"""
with open(file_name, 'rt') as fh:
json_list = json.load(fh)
return process_cx(json_list, require_grounding=require_grounding) | python | def process_cx_file(file_name, require_grounding=True):
"""Process a CX JSON file into Statements.
Parameters
----------
file_name : str
Path to file containing CX JSON.
require_grounding: bool
Whether network nodes lacking grounding information should be included
among the extracted Statements (default is True).
Returns
-------
NdexCxProcessor
Processor containing Statements.
"""
with open(file_name, 'rt') as fh:
json_list = json.load(fh)
return process_cx(json_list, require_grounding=require_grounding) | [
"def",
"process_cx_file",
"(",
"file_name",
",",
"require_grounding",
"=",
"True",
")",
":",
"with",
"open",
"(",
"file_name",
",",
"'rt'",
")",
"as",
"fh",
":",
"json_list",
"=",
"json",
".",
"load",
"(",
"fh",
")",
"return",
"process_cx",
"(",
"json_list",
",",
"require_grounding",
"=",
"require_grounding",
")"
]
| Process a CX JSON file into Statements.
Parameters
----------
file_name : str
Path to file containing CX JSON.
require_grounding: bool
Whether network nodes lacking grounding information should be included
among the extracted Statements (default is True).
Returns
-------
NdexCxProcessor
Processor containing Statements. | [
"Process",
"a",
"CX",
"JSON",
"file",
"into",
"Statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/ndex_cx/api.py#L12-L30 | train |
sorgerlab/indra | indra/sources/ndex_cx/api.py | process_ndex_network | def process_ndex_network(network_id, username=None, password=None,
require_grounding=True):
"""Process an NDEx network into Statements.
Parameters
----------
network_id : str
NDEx network ID.
username : str
NDEx username.
password : str
NDEx password.
require_grounding: bool
Whether network nodes lacking grounding information should be included
among the extracted Statements (default is True).
Returns
-------
NdexCxProcessor
Processor containing Statements. Returns None if there if the HTTP
status code indicates an unsuccessful request.
"""
nd = ndex2.client.Ndex2(username=username, password=password)
res = nd.get_network_as_cx_stream(network_id)
if res.status_code != 200:
logger.error('Problem downloading network: status code %s' %
res.status_code)
logger.error('Response: %s' % res.text)
return None
json_list = res.json()
summary = nd.get_network_summary(network_id)
return process_cx(json_list, summary=summary,
require_grounding=require_grounding) | python | def process_ndex_network(network_id, username=None, password=None,
require_grounding=True):
"""Process an NDEx network into Statements.
Parameters
----------
network_id : str
NDEx network ID.
username : str
NDEx username.
password : str
NDEx password.
require_grounding: bool
Whether network nodes lacking grounding information should be included
among the extracted Statements (default is True).
Returns
-------
NdexCxProcessor
Processor containing Statements. Returns None if there if the HTTP
status code indicates an unsuccessful request.
"""
nd = ndex2.client.Ndex2(username=username, password=password)
res = nd.get_network_as_cx_stream(network_id)
if res.status_code != 200:
logger.error('Problem downloading network: status code %s' %
res.status_code)
logger.error('Response: %s' % res.text)
return None
json_list = res.json()
summary = nd.get_network_summary(network_id)
return process_cx(json_list, summary=summary,
require_grounding=require_grounding) | [
"def",
"process_ndex_network",
"(",
"network_id",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
",",
"require_grounding",
"=",
"True",
")",
":",
"nd",
"=",
"ndex2",
".",
"client",
".",
"Ndex2",
"(",
"username",
"=",
"username",
",",
"password",
"=",
"password",
")",
"res",
"=",
"nd",
".",
"get_network_as_cx_stream",
"(",
"network_id",
")",
"if",
"res",
".",
"status_code",
"!=",
"200",
":",
"logger",
".",
"error",
"(",
"'Problem downloading network: status code %s'",
"%",
"res",
".",
"status_code",
")",
"logger",
".",
"error",
"(",
"'Response: %s'",
"%",
"res",
".",
"text",
")",
"return",
"None",
"json_list",
"=",
"res",
".",
"json",
"(",
")",
"summary",
"=",
"nd",
".",
"get_network_summary",
"(",
"network_id",
")",
"return",
"process_cx",
"(",
"json_list",
",",
"summary",
"=",
"summary",
",",
"require_grounding",
"=",
"require_grounding",
")"
]
| Process an NDEx network into Statements.
Parameters
----------
network_id : str
NDEx network ID.
username : str
NDEx username.
password : str
NDEx password.
require_grounding: bool
Whether network nodes lacking grounding information should be included
among the extracted Statements (default is True).
Returns
-------
NdexCxProcessor
Processor containing Statements. Returns None if there if the HTTP
status code indicates an unsuccessful request. | [
"Process",
"an",
"NDEx",
"network",
"into",
"Statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/ndex_cx/api.py#L33-L65 | train |
sorgerlab/indra | indra/sources/ndex_cx/api.py | process_cx | def process_cx(cx_json, summary=None, require_grounding=True):
"""Process a CX JSON object into Statements.
Parameters
----------
cx_json : list
CX JSON object.
summary : Optional[dict]
The network summary object which can be obtained via
get_network_summary through the web service. THis contains metadata
such as the owner and the creation time of the network.
require_grounding: bool
Whether network nodes lacking grounding information should be included
among the extracted Statements (default is True).
Returns
-------
NdexCxProcessor
Processor containing Statements.
"""
ncp = NdexCxProcessor(cx_json, summary=summary,
require_grounding=require_grounding)
ncp.get_statements()
return ncp | python | def process_cx(cx_json, summary=None, require_grounding=True):
"""Process a CX JSON object into Statements.
Parameters
----------
cx_json : list
CX JSON object.
summary : Optional[dict]
The network summary object which can be obtained via
get_network_summary through the web service. THis contains metadata
such as the owner and the creation time of the network.
require_grounding: bool
Whether network nodes lacking grounding information should be included
among the extracted Statements (default is True).
Returns
-------
NdexCxProcessor
Processor containing Statements.
"""
ncp = NdexCxProcessor(cx_json, summary=summary,
require_grounding=require_grounding)
ncp.get_statements()
return ncp | [
"def",
"process_cx",
"(",
"cx_json",
",",
"summary",
"=",
"None",
",",
"require_grounding",
"=",
"True",
")",
":",
"ncp",
"=",
"NdexCxProcessor",
"(",
"cx_json",
",",
"summary",
"=",
"summary",
",",
"require_grounding",
"=",
"require_grounding",
")",
"ncp",
".",
"get_statements",
"(",
")",
"return",
"ncp"
]
| Process a CX JSON object into Statements.
Parameters
----------
cx_json : list
CX JSON object.
summary : Optional[dict]
The network summary object which can be obtained via
get_network_summary through the web service. THis contains metadata
such as the owner and the creation time of the network.
require_grounding: bool
Whether network nodes lacking grounding information should be included
among the extracted Statements (default is True).
Returns
-------
NdexCxProcessor
Processor containing Statements. | [
"Process",
"a",
"CX",
"JSON",
"object",
"into",
"Statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/ndex_cx/api.py#L68-L91 | train |
sorgerlab/indra | indra/tools/reading/read_files.py | read_files | def read_files(files, readers, **kwargs):
"""Read the files in `files` with the reader objects in `readers`.
Parameters
----------
files : list [str]
A list of file paths to be read by the readers. Supported files are
limited to text and nxml files.
readers : list [Reader instances]
A list of Reader objects to be used reading the files.
**kwargs :
Other keyword arguments are passed to the `read` method of the readers.
Returns
-------
output_list : list [ReadingData]
A list of ReadingData objects with the contents of the readings.
"""
reading_content = [Content.from_file(filepath) for filepath in files]
output_list = []
for reader in readers:
res_list = reader.read(reading_content, **kwargs)
if res_list is None:
logger.info("Nothing read by %s." % reader.name)
else:
logger.info("Successfully read %d content entries with %s."
% (len(res_list), reader.name))
output_list += res_list
logger.info("Read %s text content entries in all." % len(output_list))
return output_list | python | def read_files(files, readers, **kwargs):
"""Read the files in `files` with the reader objects in `readers`.
Parameters
----------
files : list [str]
A list of file paths to be read by the readers. Supported files are
limited to text and nxml files.
readers : list [Reader instances]
A list of Reader objects to be used reading the files.
**kwargs :
Other keyword arguments are passed to the `read` method of the readers.
Returns
-------
output_list : list [ReadingData]
A list of ReadingData objects with the contents of the readings.
"""
reading_content = [Content.from_file(filepath) for filepath in files]
output_list = []
for reader in readers:
res_list = reader.read(reading_content, **kwargs)
if res_list is None:
logger.info("Nothing read by %s." % reader.name)
else:
logger.info("Successfully read %d content entries with %s."
% (len(res_list), reader.name))
output_list += res_list
logger.info("Read %s text content entries in all." % len(output_list))
return output_list | [
"def",
"read_files",
"(",
"files",
",",
"readers",
",",
"*",
"*",
"kwargs",
")",
":",
"reading_content",
"=",
"[",
"Content",
".",
"from_file",
"(",
"filepath",
")",
"for",
"filepath",
"in",
"files",
"]",
"output_list",
"=",
"[",
"]",
"for",
"reader",
"in",
"readers",
":",
"res_list",
"=",
"reader",
".",
"read",
"(",
"reading_content",
",",
"*",
"*",
"kwargs",
")",
"if",
"res_list",
"is",
"None",
":",
"logger",
".",
"info",
"(",
"\"Nothing read by %s.\"",
"%",
"reader",
".",
"name",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"Successfully read %d content entries with %s.\"",
"%",
"(",
"len",
"(",
"res_list",
")",
",",
"reader",
".",
"name",
")",
")",
"output_list",
"+=",
"res_list",
"logger",
".",
"info",
"(",
"\"Read %s text content entries in all.\"",
"%",
"len",
"(",
"output_list",
")",
")",
"return",
"output_list"
]
| Read the files in `files` with the reader objects in `readers`.
Parameters
----------
files : list [str]
A list of file paths to be read by the readers. Supported files are
limited to text and nxml files.
readers : list [Reader instances]
A list of Reader objects to be used reading the files.
**kwargs :
Other keyword arguments are passed to the `read` method of the readers.
Returns
-------
output_list : list [ReadingData]
A list of ReadingData objects with the contents of the readings. | [
"Read",
"the",
"files",
"in",
"files",
"with",
"the",
"reader",
"objects",
"in",
"readers",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/read_files.py#L30-L59 | train |
sorgerlab/indra | indra/tools/expand_families.py | Expander.expand_families | def expand_families(self, stmts):
"""Generate statements by expanding members of families and complexes.
"""
new_stmts = []
for stmt in stmts:
# Put together the lists of families, with their members. E.g.,
# for a statement involving RAF and MEK, should return a list of
# tuples like [(BRAF, RAF1, ARAF), (MAP2K1, MAP2K2)]
families_list = []
for ag in stmt.agent_list():
ag_children = self.get_children(ag)
# If the agent has no children, then we use the agent itself
if len(ag_children) == 0:
families_list.append([ag])
# Otherwise, we add the tuple of namespaces/IDs for the children
else:
families_list.append(ag_children)
# Now, put together new statements frmo the cross product of the
# expanded family members
for ag_combo in itertools.product(*families_list):
# Create new agents based on the namespaces/IDs, with
# appropriate name and db_refs entries
child_agents = []
for ag_entry in ag_combo:
# If we got an agent, or None, that means there were no
# children; so we use the original agent rather than
# construct a new agent
if ag_entry is None or isinstance(ag_entry, Agent):
new_agent = ag_entry
# Otherwise, create a new agent from the ns/ID
elif isinstance(ag_entry, tuple):
# FIXME FIXME FIXME
# This doesn't reproduce agent state from the original
# family-level statements!
ag_ns, ag_id = ag_entry
new_agent = _agent_from_ns_id(ag_ns, ag_id)
else:
raise Exception('Unrecognized agent entry type.')
# Add agent to our list of child agents
child_agents.append(new_agent)
# Create a copy of the statement
new_stmt = deepcopy(stmt)
# Replace the agents in the statement with the newly-created
# child agents
new_stmt.set_agent_list(child_agents)
# Add to list
new_stmts.append(new_stmt)
return new_stmts | python | def expand_families(self, stmts):
"""Generate statements by expanding members of families and complexes.
"""
new_stmts = []
for stmt in stmts:
# Put together the lists of families, with their members. E.g.,
# for a statement involving RAF and MEK, should return a list of
# tuples like [(BRAF, RAF1, ARAF), (MAP2K1, MAP2K2)]
families_list = []
for ag in stmt.agent_list():
ag_children = self.get_children(ag)
# If the agent has no children, then we use the agent itself
if len(ag_children) == 0:
families_list.append([ag])
# Otherwise, we add the tuple of namespaces/IDs for the children
else:
families_list.append(ag_children)
# Now, put together new statements frmo the cross product of the
# expanded family members
for ag_combo in itertools.product(*families_list):
# Create new agents based on the namespaces/IDs, with
# appropriate name and db_refs entries
child_agents = []
for ag_entry in ag_combo:
# If we got an agent, or None, that means there were no
# children; so we use the original agent rather than
# construct a new agent
if ag_entry is None or isinstance(ag_entry, Agent):
new_agent = ag_entry
# Otherwise, create a new agent from the ns/ID
elif isinstance(ag_entry, tuple):
# FIXME FIXME FIXME
# This doesn't reproduce agent state from the original
# family-level statements!
ag_ns, ag_id = ag_entry
new_agent = _agent_from_ns_id(ag_ns, ag_id)
else:
raise Exception('Unrecognized agent entry type.')
# Add agent to our list of child agents
child_agents.append(new_agent)
# Create a copy of the statement
new_stmt = deepcopy(stmt)
# Replace the agents in the statement with the newly-created
# child agents
new_stmt.set_agent_list(child_agents)
# Add to list
new_stmts.append(new_stmt)
return new_stmts | [
"def",
"expand_families",
"(",
"self",
",",
"stmts",
")",
":",
"new_stmts",
"=",
"[",
"]",
"for",
"stmt",
"in",
"stmts",
":",
"# Put together the lists of families, with their members. E.g.,",
"# for a statement involving RAF and MEK, should return a list of",
"# tuples like [(BRAF, RAF1, ARAF), (MAP2K1, MAP2K2)]",
"families_list",
"=",
"[",
"]",
"for",
"ag",
"in",
"stmt",
".",
"agent_list",
"(",
")",
":",
"ag_children",
"=",
"self",
".",
"get_children",
"(",
"ag",
")",
"# If the agent has no children, then we use the agent itself",
"if",
"len",
"(",
"ag_children",
")",
"==",
"0",
":",
"families_list",
".",
"append",
"(",
"[",
"ag",
"]",
")",
"# Otherwise, we add the tuple of namespaces/IDs for the children",
"else",
":",
"families_list",
".",
"append",
"(",
"ag_children",
")",
"# Now, put together new statements frmo the cross product of the",
"# expanded family members",
"for",
"ag_combo",
"in",
"itertools",
".",
"product",
"(",
"*",
"families_list",
")",
":",
"# Create new agents based on the namespaces/IDs, with",
"# appropriate name and db_refs entries",
"child_agents",
"=",
"[",
"]",
"for",
"ag_entry",
"in",
"ag_combo",
":",
"# If we got an agent, or None, that means there were no",
"# children; so we use the original agent rather than",
"# construct a new agent",
"if",
"ag_entry",
"is",
"None",
"or",
"isinstance",
"(",
"ag_entry",
",",
"Agent",
")",
":",
"new_agent",
"=",
"ag_entry",
"# Otherwise, create a new agent from the ns/ID",
"elif",
"isinstance",
"(",
"ag_entry",
",",
"tuple",
")",
":",
"# FIXME FIXME FIXME",
"# This doesn't reproduce agent state from the original",
"# family-level statements!",
"ag_ns",
",",
"ag_id",
"=",
"ag_entry",
"new_agent",
"=",
"_agent_from_ns_id",
"(",
"ag_ns",
",",
"ag_id",
")",
"else",
":",
"raise",
"Exception",
"(",
"'Unrecognized agent entry type.'",
")",
"# Add agent to our list of child agents",
"child_agents",
".",
"append",
"(",
"new_agent",
")",
"# Create a copy of the statement",
"new_stmt",
"=",
"deepcopy",
"(",
"stmt",
")",
"# Replace the agents in the statement with the newly-created",
"# child agents",
"new_stmt",
".",
"set_agent_list",
"(",
"child_agents",
")",
"# Add to list",
"new_stmts",
".",
"append",
"(",
"new_stmt",
")",
"return",
"new_stmts"
]
| Generate statements by expanding members of families and complexes. | [
"Generate",
"statements",
"by",
"expanding",
"members",
"of",
"families",
"and",
"complexes",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/expand_families.py#L22-L69 | train |
sorgerlab/indra | indra/preassembler/make_eidos_hume_ontologies.py | update_ontology | def update_ontology(ont_url, rdf_path):
"""Load an ontology formatted like Eidos' from github."""
yaml_root = load_yaml_from_url(ont_url)
G = rdf_graph_from_yaml(yaml_root)
save_hierarchy(G, rdf_path) | python | def update_ontology(ont_url, rdf_path):
"""Load an ontology formatted like Eidos' from github."""
yaml_root = load_yaml_from_url(ont_url)
G = rdf_graph_from_yaml(yaml_root)
save_hierarchy(G, rdf_path) | [
"def",
"update_ontology",
"(",
"ont_url",
",",
"rdf_path",
")",
":",
"yaml_root",
"=",
"load_yaml_from_url",
"(",
"ont_url",
")",
"G",
"=",
"rdf_graph_from_yaml",
"(",
"yaml_root",
")",
"save_hierarchy",
"(",
"G",
",",
"rdf_path",
")"
]
| Load an ontology formatted like Eidos' from github. | [
"Load",
"an",
"ontology",
"formatted",
"like",
"Eidos",
"from",
"github",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/make_eidos_hume_ontologies.py#L69-L73 | train |
sorgerlab/indra | indra/preassembler/make_eidos_hume_ontologies.py | rdf_graph_from_yaml | def rdf_graph_from_yaml(yaml_root):
"""Convert the YAML object into an RDF Graph object."""
G = Graph()
for top_entry in yaml_root:
assert len(top_entry) == 1
node = list(top_entry.keys())[0]
build_relations(G, node, top_entry[node], None)
return G | python | def rdf_graph_from_yaml(yaml_root):
"""Convert the YAML object into an RDF Graph object."""
G = Graph()
for top_entry in yaml_root:
assert len(top_entry) == 1
node = list(top_entry.keys())[0]
build_relations(G, node, top_entry[node], None)
return G | [
"def",
"rdf_graph_from_yaml",
"(",
"yaml_root",
")",
":",
"G",
"=",
"Graph",
"(",
")",
"for",
"top_entry",
"in",
"yaml_root",
":",
"assert",
"len",
"(",
"top_entry",
")",
"==",
"1",
"node",
"=",
"list",
"(",
"top_entry",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
"build_relations",
"(",
"G",
",",
"node",
",",
"top_entry",
"[",
"node",
"]",
",",
"None",
")",
"return",
"G"
]
| Convert the YAML object into an RDF Graph object. | [
"Convert",
"the",
"YAML",
"object",
"into",
"an",
"RDF",
"Graph",
"object",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/make_eidos_hume_ontologies.py#L76-L83 | train |
sorgerlab/indra | indra/preassembler/make_eidos_hume_ontologies.py | load_yaml_from_url | def load_yaml_from_url(ont_url):
"""Return a YAML object loaded from a YAML file URL."""
res = requests.get(ont_url)
if res.status_code != 200:
raise Exception('Could not load ontology from %s' % ont_url)
root = yaml.load(res.content)
return root | python | def load_yaml_from_url(ont_url):
"""Return a YAML object loaded from a YAML file URL."""
res = requests.get(ont_url)
if res.status_code != 200:
raise Exception('Could not load ontology from %s' % ont_url)
root = yaml.load(res.content)
return root | [
"def",
"load_yaml_from_url",
"(",
"ont_url",
")",
":",
"res",
"=",
"requests",
".",
"get",
"(",
"ont_url",
")",
"if",
"res",
".",
"status_code",
"!=",
"200",
":",
"raise",
"Exception",
"(",
"'Could not load ontology from %s'",
"%",
"ont_url",
")",
"root",
"=",
"yaml",
".",
"load",
"(",
"res",
".",
"content",
")",
"return",
"root"
]
| Return a YAML object loaded from a YAML file URL. | [
"Return",
"a",
"YAML",
"object",
"loaded",
"from",
"a",
"YAML",
"file",
"URL",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/make_eidos_hume_ontologies.py#L86-L92 | train |
sorgerlab/indra | indra/sources/isi/preprocessor.py | IsiPreprocessor.register_preprocessed_file | def register_preprocessed_file(self, infile, pmid, extra_annotations):
"""Set up already preprocessed text file for reading with ISI reader.
This is essentially a mock function to "register" already preprocessed
files and get an IsiPreprocessor object that can be passed to
the IsiProcessor.
Parameters
----------
infile : str
Path to an already preprocessed text file (i.e. one ready to
be sent for reading to ISI reader).
pmid : str
The PMID corresponding to the file
extra_annotations : dict
Extra annotations to be added to each statement, possibly including
metadata about the source (annotations with the key "interaction"
will be overridden)
"""
infile_base = os.path.basename(infile)
outfile = os.path.join(self.preprocessed_dir, infile_base)
shutil.copyfile(infile, outfile)
infile_key = os.path.splitext(infile_base)[0]
self.pmids[infile_key] = pmid
self.extra_annotations[infile_key] = extra_annotations | python | def register_preprocessed_file(self, infile, pmid, extra_annotations):
"""Set up already preprocessed text file for reading with ISI reader.
This is essentially a mock function to "register" already preprocessed
files and get an IsiPreprocessor object that can be passed to
the IsiProcessor.
Parameters
----------
infile : str
Path to an already preprocessed text file (i.e. one ready to
be sent for reading to ISI reader).
pmid : str
The PMID corresponding to the file
extra_annotations : dict
Extra annotations to be added to each statement, possibly including
metadata about the source (annotations with the key "interaction"
will be overridden)
"""
infile_base = os.path.basename(infile)
outfile = os.path.join(self.preprocessed_dir, infile_base)
shutil.copyfile(infile, outfile)
infile_key = os.path.splitext(infile_base)[0]
self.pmids[infile_key] = pmid
self.extra_annotations[infile_key] = extra_annotations | [
"def",
"register_preprocessed_file",
"(",
"self",
",",
"infile",
",",
"pmid",
",",
"extra_annotations",
")",
":",
"infile_base",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"infile",
")",
"outfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"preprocessed_dir",
",",
"infile_base",
")",
"shutil",
".",
"copyfile",
"(",
"infile",
",",
"outfile",
")",
"infile_key",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"infile_base",
")",
"[",
"0",
"]",
"self",
".",
"pmids",
"[",
"infile_key",
"]",
"=",
"pmid",
"self",
".",
"extra_annotations",
"[",
"infile_key",
"]",
"=",
"extra_annotations"
]
| Set up already preprocessed text file for reading with ISI reader.
This is essentially a mock function to "register" already preprocessed
files and get an IsiPreprocessor object that can be passed to
the IsiProcessor.
Parameters
----------
infile : str
Path to an already preprocessed text file (i.e. one ready to
be sent for reading to ISI reader).
pmid : str
The PMID corresponding to the file
extra_annotations : dict
Extra annotations to be added to each statement, possibly including
metadata about the source (annotations with the key "interaction"
will be overridden) | [
"Set",
"up",
"already",
"preprocessed",
"text",
"file",
"for",
"reading",
"with",
"ISI",
"reader",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/isi/preprocessor.py#L54-L80 | train |
sorgerlab/indra | indra/sources/isi/preprocessor.py | IsiPreprocessor.preprocess_plain_text_string | def preprocess_plain_text_string(self, text, pmid, extra_annotations):
"""Preprocess plain text string for use by ISI reader.
Preprocessing is done by tokenizing into sentences and writing
each sentence on its own line in a plain text file. All other
preprocessing functions ultimately call this one.
Parameters
----------
text : str
The plain text of the article of abstract
pmid : str
The PMID from which it comes, or None if not specified
extra_annotations : dict
Extra annotations to be added to each statement, possibly including
metadata about the source (annotations with the key "interaction"
will be overridden)
"""
output_file = '%s.txt' % self.next_file_id
output_file = os.path.join(self.preprocessed_dir, output_file)
# Tokenize sentence
sentences = nltk.sent_tokenize(text)
# Write sentences to text file
first_sentence = True
with codecs.open(output_file, 'w', encoding='utf-8') as f:
for sentence in sentences:
if not first_sentence:
f.write('\n')
f.write(sentence.rstrip())
first_sentence = False
# Store annotations
self.pmids[str(self.next_file_id)] = pmid
self.extra_annotations[str(self.next_file_id)] = extra_annotations
# Increment file id
self.next_file_id += 1 | python | def preprocess_plain_text_string(self, text, pmid, extra_annotations):
"""Preprocess plain text string for use by ISI reader.
Preprocessing is done by tokenizing into sentences and writing
each sentence on its own line in a plain text file. All other
preprocessing functions ultimately call this one.
Parameters
----------
text : str
The plain text of the article of abstract
pmid : str
The PMID from which it comes, or None if not specified
extra_annotations : dict
Extra annotations to be added to each statement, possibly including
metadata about the source (annotations with the key "interaction"
will be overridden)
"""
output_file = '%s.txt' % self.next_file_id
output_file = os.path.join(self.preprocessed_dir, output_file)
# Tokenize sentence
sentences = nltk.sent_tokenize(text)
# Write sentences to text file
first_sentence = True
with codecs.open(output_file, 'w', encoding='utf-8') as f:
for sentence in sentences:
if not first_sentence:
f.write('\n')
f.write(sentence.rstrip())
first_sentence = False
# Store annotations
self.pmids[str(self.next_file_id)] = pmid
self.extra_annotations[str(self.next_file_id)] = extra_annotations
# Increment file id
self.next_file_id += 1 | [
"def",
"preprocess_plain_text_string",
"(",
"self",
",",
"text",
",",
"pmid",
",",
"extra_annotations",
")",
":",
"output_file",
"=",
"'%s.txt'",
"%",
"self",
".",
"next_file_id",
"output_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"preprocessed_dir",
",",
"output_file",
")",
"# Tokenize sentence",
"sentences",
"=",
"nltk",
".",
"sent_tokenize",
"(",
"text",
")",
"# Write sentences to text file",
"first_sentence",
"=",
"True",
"with",
"codecs",
".",
"open",
"(",
"output_file",
",",
"'w'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"f",
":",
"for",
"sentence",
"in",
"sentences",
":",
"if",
"not",
"first_sentence",
":",
"f",
".",
"write",
"(",
"'\\n'",
")",
"f",
".",
"write",
"(",
"sentence",
".",
"rstrip",
"(",
")",
")",
"first_sentence",
"=",
"False",
"# Store annotations",
"self",
".",
"pmids",
"[",
"str",
"(",
"self",
".",
"next_file_id",
")",
"]",
"=",
"pmid",
"self",
".",
"extra_annotations",
"[",
"str",
"(",
"self",
".",
"next_file_id",
")",
"]",
"=",
"extra_annotations",
"# Increment file id",
"self",
".",
"next_file_id",
"+=",
"1"
]
| Preprocess plain text string for use by ISI reader.
Preprocessing is done by tokenizing into sentences and writing
each sentence on its own line in a plain text file. All other
preprocessing functions ultimately call this one.
Parameters
----------
text : str
The plain text of the article of abstract
pmid : str
The PMID from which it comes, or None if not specified
extra_annotations : dict
Extra annotations to be added to each statement, possibly including
metadata about the source (annotations with the key "interaction"
will be overridden) | [
"Preprocess",
"plain",
"text",
"string",
"for",
"use",
"by",
"ISI",
"reader",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/isi/preprocessor.py#L82-L120 | train |
sorgerlab/indra | indra/sources/isi/preprocessor.py | IsiPreprocessor.preprocess_plain_text_file | def preprocess_plain_text_file(self, filename, pmid, extra_annotations):
"""Preprocess a plain text file for use with ISI reder.
Preprocessing results in a new text file with one sentence
per line.
Parameters
----------
filename : str
The name of the plain text file
pmid : str
The PMID from which it comes, or None if not specified
extra_annotations : dict
Extra annotations to be added to each statement, possibly including
metadata about the source (annotations with the key "interaction"
will be overridden)
"""
with codecs.open(filename, 'r', encoding='utf-8') as f:
content = f.read()
self.preprocess_plain_text_string(content, pmid,
extra_annotations) | python | def preprocess_plain_text_file(self, filename, pmid, extra_annotations):
"""Preprocess a plain text file for use with ISI reder.
Preprocessing results in a new text file with one sentence
per line.
Parameters
----------
filename : str
The name of the plain text file
pmid : str
The PMID from which it comes, or None if not specified
extra_annotations : dict
Extra annotations to be added to each statement, possibly including
metadata about the source (annotations with the key "interaction"
will be overridden)
"""
with codecs.open(filename, 'r', encoding='utf-8') as f:
content = f.read()
self.preprocess_plain_text_string(content, pmid,
extra_annotations) | [
"def",
"preprocess_plain_text_file",
"(",
"self",
",",
"filename",
",",
"pmid",
",",
"extra_annotations",
")",
":",
"with",
"codecs",
".",
"open",
"(",
"filename",
",",
"'r'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"f",
":",
"content",
"=",
"f",
".",
"read",
"(",
")",
"self",
".",
"preprocess_plain_text_string",
"(",
"content",
",",
"pmid",
",",
"extra_annotations",
")"
]
| Preprocess a plain text file for use with ISI reder.
Preprocessing results in a new text file with one sentence
per line.
Parameters
----------
filename : str
The name of the plain text file
pmid : str
The PMID from which it comes, or None if not specified
extra_annotations : dict
Extra annotations to be added to each statement, possibly including
metadata about the source (annotations with the key "interaction"
will be overridden) | [
"Preprocess",
"a",
"plain",
"text",
"file",
"for",
"use",
"with",
"ISI",
"reder",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/isi/preprocessor.py#L122-L142 | train |
sorgerlab/indra | indra/sources/isi/preprocessor.py | IsiPreprocessor.preprocess_nxml_file | def preprocess_nxml_file(self, filename, pmid, extra_annotations):
"""Preprocess an NXML file for use with the ISI reader.
Preprocessing is done by extracting plain text from NXML and then
creating a text file with one sentence per line.
Parameters
----------
filename : str
Filename of an nxml file to process
pmid : str
The PMID from which it comes, or None if not specified
extra_annotations : dict
Extra annotations to be added to each statement, possibly including
metadata about the source (annotations with the key "interaction"
will be overridden)
"""
# Create a temporary directory
tmp_dir = tempfile.mkdtemp('indra_isi_nxml2txt_output')
# Run nxml2txt
if nxml2txt_path is None:
logger.error('NXML2TXT_PATH not specified in config file or ' +
'environment variable')
return
if python2_path is None:
logger.error('PYTHON2_PATH not specified in config file or ' +
'environment variable')
return
else:
txt_out = os.path.join(tmp_dir, 'out.txt')
so_out = os.path.join(tmp_dir, 'out.so')
command = [python2_path,
os.path.join(nxml2txt_path, 'nxml2txt'),
filename,
txt_out,
so_out]
ret = subprocess.call(command)
if ret != 0:
logger.warning('nxml2txt returned non-zero error code')
with open(txt_out, 'r') as f:
txt_content = f.read()
# Remote temporary directory
shutil.rmtree(tmp_dir)
# We need to remove some common LaTEX commands from the converted text
# or the reader will get confused
cmd1 = '[^ \{\}]+\{[^\{\}]+\}\{[^\{\}]+\}'
cmd2 = '[^ \{\}]+\{[^\{\}]+\}'
txt_content = re.sub(cmd1, '', txt_content)
txt_content = re.sub(cmd2, '', txt_content)
with open('tmp.txt', 'w') as f:
f.write(txt_content)
# Prepocess text extracted from nxml
self.preprocess_plain_text_string(txt_content, pmid, extra_annotations) | python | def preprocess_nxml_file(self, filename, pmid, extra_annotations):
"""Preprocess an NXML file for use with the ISI reader.
Preprocessing is done by extracting plain text from NXML and then
creating a text file with one sentence per line.
Parameters
----------
filename : str
Filename of an nxml file to process
pmid : str
The PMID from which it comes, or None if not specified
extra_annotations : dict
Extra annotations to be added to each statement, possibly including
metadata about the source (annotations with the key "interaction"
will be overridden)
"""
# Create a temporary directory
tmp_dir = tempfile.mkdtemp('indra_isi_nxml2txt_output')
# Run nxml2txt
if nxml2txt_path is None:
logger.error('NXML2TXT_PATH not specified in config file or ' +
'environment variable')
return
if python2_path is None:
logger.error('PYTHON2_PATH not specified in config file or ' +
'environment variable')
return
else:
txt_out = os.path.join(tmp_dir, 'out.txt')
so_out = os.path.join(tmp_dir, 'out.so')
command = [python2_path,
os.path.join(nxml2txt_path, 'nxml2txt'),
filename,
txt_out,
so_out]
ret = subprocess.call(command)
if ret != 0:
logger.warning('nxml2txt returned non-zero error code')
with open(txt_out, 'r') as f:
txt_content = f.read()
# Remote temporary directory
shutil.rmtree(tmp_dir)
# We need to remove some common LaTEX commands from the converted text
# or the reader will get confused
cmd1 = '[^ \{\}]+\{[^\{\}]+\}\{[^\{\}]+\}'
cmd2 = '[^ \{\}]+\{[^\{\}]+\}'
txt_content = re.sub(cmd1, '', txt_content)
txt_content = re.sub(cmd2, '', txt_content)
with open('tmp.txt', 'w') as f:
f.write(txt_content)
# Prepocess text extracted from nxml
self.preprocess_plain_text_string(txt_content, pmid, extra_annotations) | [
"def",
"preprocess_nxml_file",
"(",
"self",
",",
"filename",
",",
"pmid",
",",
"extra_annotations",
")",
":",
"# Create a temporary directory",
"tmp_dir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
"'indra_isi_nxml2txt_output'",
")",
"# Run nxml2txt",
"if",
"nxml2txt_path",
"is",
"None",
":",
"logger",
".",
"error",
"(",
"'NXML2TXT_PATH not specified in config file or '",
"+",
"'environment variable'",
")",
"return",
"if",
"python2_path",
"is",
"None",
":",
"logger",
".",
"error",
"(",
"'PYTHON2_PATH not specified in config file or '",
"+",
"'environment variable'",
")",
"return",
"else",
":",
"txt_out",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"'out.txt'",
")",
"so_out",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"'out.so'",
")",
"command",
"=",
"[",
"python2_path",
",",
"os",
".",
"path",
".",
"join",
"(",
"nxml2txt_path",
",",
"'nxml2txt'",
")",
",",
"filename",
",",
"txt_out",
",",
"so_out",
"]",
"ret",
"=",
"subprocess",
".",
"call",
"(",
"command",
")",
"if",
"ret",
"!=",
"0",
":",
"logger",
".",
"warning",
"(",
"'nxml2txt returned non-zero error code'",
")",
"with",
"open",
"(",
"txt_out",
",",
"'r'",
")",
"as",
"f",
":",
"txt_content",
"=",
"f",
".",
"read",
"(",
")",
"# Remote temporary directory",
"shutil",
".",
"rmtree",
"(",
"tmp_dir",
")",
"# We need to remove some common LaTEX commands from the converted text",
"# or the reader will get confused",
"cmd1",
"=",
"'[^ \\{\\}]+\\{[^\\{\\}]+\\}\\{[^\\{\\}]+\\}'",
"cmd2",
"=",
"'[^ \\{\\}]+\\{[^\\{\\}]+\\}'",
"txt_content",
"=",
"re",
".",
"sub",
"(",
"cmd1",
",",
"''",
",",
"txt_content",
")",
"txt_content",
"=",
"re",
".",
"sub",
"(",
"cmd2",
",",
"''",
",",
"txt_content",
")",
"with",
"open",
"(",
"'tmp.txt'",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"txt_content",
")",
"# Prepocess text extracted from nxml",
"self",
".",
"preprocess_plain_text_string",
"(",
"txt_content",
",",
"pmid",
",",
"extra_annotations",
")"
]
| Preprocess an NXML file for use with the ISI reader.
Preprocessing is done by extracting plain text from NXML and then
creating a text file with one sentence per line.
Parameters
----------
filename : str
Filename of an nxml file to process
pmid : str
The PMID from which it comes, or None if not specified
extra_annotations : dict
Extra annotations to be added to each statement, possibly including
metadata about the source (annotations with the key "interaction"
will be overridden) | [
"Preprocess",
"an",
"NXML",
"file",
"for",
"use",
"with",
"the",
"ISI",
"reader",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/isi/preprocessor.py#L144-L202 | train |
sorgerlab/indra | indra/sources/isi/preprocessor.py | IsiPreprocessor.preprocess_abstract_list | def preprocess_abstract_list(self, abstract_list):
"""Preprocess abstracts in database pickle dump format for ISI reader.
For each abstract, creates a plain text file with one sentence per
line, and stores metadata to be included with each statement from
that abstract.
Parameters
----------
abstract_list : list[dict]
Compressed abstracts with corresopnding metadata in INDRA database
pickle dump format.
"""
for abstract_struct in abstract_list:
abs_format = abstract_struct['format']
content_type = abstract_struct['text_type']
content_zipped = abstract_struct['content']
tcid = abstract_struct['tcid']
trid = abstract_struct['trid']
assert(abs_format == 'text')
assert(content_type == 'abstract')
pmid = None # Don't worry about pmid for now
extra_annotations = {'tcid': tcid, 'trid': trid}
# Uncompress content
content = zlib.decompress(content_zipped,
zlib.MAX_WBITS+16).decode('utf-8')
self.preprocess_plain_text_string(content, pmid, extra_annotations) | python | def preprocess_abstract_list(self, abstract_list):
"""Preprocess abstracts in database pickle dump format for ISI reader.
For each abstract, creates a plain text file with one sentence per
line, and stores metadata to be included with each statement from
that abstract.
Parameters
----------
abstract_list : list[dict]
Compressed abstracts with corresopnding metadata in INDRA database
pickle dump format.
"""
for abstract_struct in abstract_list:
abs_format = abstract_struct['format']
content_type = abstract_struct['text_type']
content_zipped = abstract_struct['content']
tcid = abstract_struct['tcid']
trid = abstract_struct['trid']
assert(abs_format == 'text')
assert(content_type == 'abstract')
pmid = None # Don't worry about pmid for now
extra_annotations = {'tcid': tcid, 'trid': trid}
# Uncompress content
content = zlib.decompress(content_zipped,
zlib.MAX_WBITS+16).decode('utf-8')
self.preprocess_plain_text_string(content, pmid, extra_annotations) | [
"def",
"preprocess_abstract_list",
"(",
"self",
",",
"abstract_list",
")",
":",
"for",
"abstract_struct",
"in",
"abstract_list",
":",
"abs_format",
"=",
"abstract_struct",
"[",
"'format'",
"]",
"content_type",
"=",
"abstract_struct",
"[",
"'text_type'",
"]",
"content_zipped",
"=",
"abstract_struct",
"[",
"'content'",
"]",
"tcid",
"=",
"abstract_struct",
"[",
"'tcid'",
"]",
"trid",
"=",
"abstract_struct",
"[",
"'trid'",
"]",
"assert",
"(",
"abs_format",
"==",
"'text'",
")",
"assert",
"(",
"content_type",
"==",
"'abstract'",
")",
"pmid",
"=",
"None",
"# Don't worry about pmid for now",
"extra_annotations",
"=",
"{",
"'tcid'",
":",
"tcid",
",",
"'trid'",
":",
"trid",
"}",
"# Uncompress content",
"content",
"=",
"zlib",
".",
"decompress",
"(",
"content_zipped",
",",
"zlib",
".",
"MAX_WBITS",
"+",
"16",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"self",
".",
"preprocess_plain_text_string",
"(",
"content",
",",
"pmid",
",",
"extra_annotations",
")"
]
| Preprocess abstracts in database pickle dump format for ISI reader.
For each abstract, creates a plain text file with one sentence per
line, and stores metadata to be included with each statement from
that abstract.
Parameters
----------
abstract_list : list[dict]
Compressed abstracts with corresopnding metadata in INDRA database
pickle dump format. | [
"Preprocess",
"abstracts",
"in",
"database",
"pickle",
"dump",
"format",
"for",
"ISI",
"reader",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/isi/preprocessor.py#L204-L234 | train |
sorgerlab/indra | indra/sources/geneways/api.py | process_geneways_files | def process_geneways_files(input_folder=data_folder, get_evidence=True):
"""Reads in Geneways data and returns a list of statements.
Parameters
----------
input_folder : Optional[str]
A folder in which to search for Geneways data. Looks for these
Geneways extraction data files: human_action.txt,
human_actionmention.txt, human_symbols.txt.
Omit this parameter to use the default input folder which is
indra/data.
get_evidence : Optional[bool]
Attempt to find the evidence text for an extraction by downloading
the corresponding text content and searching for the given offset
in the text to get the evidence sentence. Default: True
Returns
-------
gp : GenewaysProcessor
A GenewaysProcessor object which contains a list of INDRA statements
generated from the Geneways action mentions.
"""
gp = GenewaysProcessor(input_folder, get_evidence)
return gp | python | def process_geneways_files(input_folder=data_folder, get_evidence=True):
"""Reads in Geneways data and returns a list of statements.
Parameters
----------
input_folder : Optional[str]
A folder in which to search for Geneways data. Looks for these
Geneways extraction data files: human_action.txt,
human_actionmention.txt, human_symbols.txt.
Omit this parameter to use the default input folder which is
indra/data.
get_evidence : Optional[bool]
Attempt to find the evidence text for an extraction by downloading
the corresponding text content and searching for the given offset
in the text to get the evidence sentence. Default: True
Returns
-------
gp : GenewaysProcessor
A GenewaysProcessor object which contains a list of INDRA statements
generated from the Geneways action mentions.
"""
gp = GenewaysProcessor(input_folder, get_evidence)
return gp | [
"def",
"process_geneways_files",
"(",
"input_folder",
"=",
"data_folder",
",",
"get_evidence",
"=",
"True",
")",
":",
"gp",
"=",
"GenewaysProcessor",
"(",
"input_folder",
",",
"get_evidence",
")",
"return",
"gp"
]
| Reads in Geneways data and returns a list of statements.
Parameters
----------
input_folder : Optional[str]
A folder in which to search for Geneways data. Looks for these
Geneways extraction data files: human_action.txt,
human_actionmention.txt, human_symbols.txt.
Omit this parameter to use the default input folder which is
indra/data.
get_evidence : Optional[bool]
Attempt to find the evidence text for an extraction by downloading
the corresponding text content and searching for the given offset
in the text to get the evidence sentence. Default: True
Returns
-------
gp : GenewaysProcessor
A GenewaysProcessor object which contains a list of INDRA statements
generated from the Geneways action mentions. | [
"Reads",
"in",
"Geneways",
"data",
"and",
"returns",
"a",
"list",
"of",
"statements",
"."
]
| 79a70415832c5702d7a820c7c9ccc8e25010124b | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/geneways/api.py#L24-L47 | train |
LuqueDaniel/pybooru | pybooru/api_danbooru.py | DanbooruApi_Mixin.post_flag_create | def post_flag_create(self, post_id, reason):
"""Function to flag a post.
Parameters:
post_id (int): The id of the flagged post.
reason (str): The reason of the flagging.
"""
params = {'post_flag[post_id]': post_id, 'post_flag[reason]': reason}
return self._get('post_flags.json', params, 'POST', auth=True) | python | def post_flag_create(self, post_id, reason):
"""Function to flag a post.
Parameters:
post_id (int): The id of the flagged post.
reason (str): The reason of the flagging.
"""
params = {'post_flag[post_id]': post_id, 'post_flag[reason]': reason}
return self._get('post_flags.json', params, 'POST', auth=True) | [
"def",
"post_flag_create",
"(",
"self",
",",
"post_id",
",",
"reason",
")",
":",
"params",
"=",
"{",
"'post_flag[post_id]'",
":",
"post_id",
",",
"'post_flag[reason]'",
":",
"reason",
"}",
"return",
"self",
".",
"_get",
"(",
"'post_flags.json'",
",",
"params",
",",
"'POST'",
",",
"auth",
"=",
"True",
")"
]
| Function to flag a post.
Parameters:
post_id (int): The id of the flagged post.
reason (str): The reason of the flagging. | [
"Function",
"to",
"flag",
"a",
"post",
"."
]
| 60cd5254684d293b308f0b11b8f4ac2dce101479 | https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L165-L173 | train |
LuqueDaniel/pybooru | pybooru/api_danbooru.py | DanbooruApi_Mixin.post_versions_list | def post_versions_list(self, updater_name=None, updater_id=None,
post_id=None, start_id=None):
"""Get list of post versions.
Parameters:
updater_name (str):
updater_id (int):
post_id (int):
start_id (int):
"""
params = {
'search[updater_name]': updater_name,
'search[updater_id]': updater_id,
'search[post_id]': post_id,
'search[start_id]': start_id
}
return self._get('post_versions.json', params) | python | def post_versions_list(self, updater_name=None, updater_id=None,
post_id=None, start_id=None):
"""Get list of post versions.
Parameters:
updater_name (str):
updater_id (int):
post_id (int):
start_id (int):
"""
params = {
'search[updater_name]': updater_name,
'search[updater_id]': updater_id,
'search[post_id]': post_id,
'search[start_id]': start_id
}
return self._get('post_versions.json', params) | [
"def",
"post_versions_list",
"(",
"self",
",",
"updater_name",
"=",
"None",
",",
"updater_id",
"=",
"None",
",",
"post_id",
"=",
"None",
",",
"start_id",
"=",
"None",
")",
":",
"params",
"=",
"{",
"'search[updater_name]'",
":",
"updater_name",
",",
"'search[updater_id]'",
":",
"updater_id",
",",
"'search[post_id]'",
":",
"post_id",
",",
"'search[start_id]'",
":",
"start_id",
"}",
"return",
"self",
".",
"_get",
"(",
"'post_versions.json'",
",",
"params",
")"
]
| Get list of post versions.
Parameters:
updater_name (str):
updater_id (int):
post_id (int):
start_id (int): | [
"Get",
"list",
"of",
"post",
"versions",
"."
]
| 60cd5254684d293b308f0b11b8f4ac2dce101479 | https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L210-L226 | train |
LuqueDaniel/pybooru | pybooru/api_danbooru.py | DanbooruApi_Mixin.artist_list | def artist_list(self, query=None, artist_id=None, creator_name=None,
creator_id=None, is_active=None, is_banned=None,
empty_only=None, order=None):
"""Get an artist of a list of artists.
Parameters:
query (str):
This field has multiple uses depending on what the query starts
with:
'http:desired_url':
Search for artist with this URL.
'name:desired_url':
Search for artists with the given name as their base name.
'other:other_name':
Search for artists with the given name in their other
names.
'group:group_name':
Search for artists belonging to the group with the given
name.
'status:banned':
Search for artists that are banned. else Search for the
given name in the base name and the other names.
artist_id (id): The artist id.
creator_name (str): Exact creator name.
creator_id (id): Artist creator id.
is_active (bool): Can be: true, false
is_banned (bool): Can be: true, false
empty_only (True): Search for artists that have 0 posts. Can be:
true
order (str): Can be: name, updated_at.
"""
params = {
'search[name]': query,
'search[id]': artist_id,
'search[creator_name]': creator_name,
'search[creator_id]': creator_id,
'search[is_active]': is_active,
'search[is_banned]': is_banned,
'search[empty_only]': empty_only,
'search[order]': order
}
return self._get('artists.json', params) | python | def artist_list(self, query=None, artist_id=None, creator_name=None,
creator_id=None, is_active=None, is_banned=None,
empty_only=None, order=None):
"""Get an artist of a list of artists.
Parameters:
query (str):
This field has multiple uses depending on what the query starts
with:
'http:desired_url':
Search for artist with this URL.
'name:desired_url':
Search for artists with the given name as their base name.
'other:other_name':
Search for artists with the given name in their other
names.
'group:group_name':
Search for artists belonging to the group with the given
name.
'status:banned':
Search for artists that are banned. else Search for the
given name in the base name and the other names.
artist_id (id): The artist id.
creator_name (str): Exact creator name.
creator_id (id): Artist creator id.
is_active (bool): Can be: true, false
is_banned (bool): Can be: true, false
empty_only (True): Search for artists that have 0 posts. Can be:
true
order (str): Can be: name, updated_at.
"""
params = {
'search[name]': query,
'search[id]': artist_id,
'search[creator_name]': creator_name,
'search[creator_id]': creator_id,
'search[is_active]': is_active,
'search[is_banned]': is_banned,
'search[empty_only]': empty_only,
'search[order]': order
}
return self._get('artists.json', params) | [
"def",
"artist_list",
"(",
"self",
",",
"query",
"=",
"None",
",",
"artist_id",
"=",
"None",
",",
"creator_name",
"=",
"None",
",",
"creator_id",
"=",
"None",
",",
"is_active",
"=",
"None",
",",
"is_banned",
"=",
"None",
",",
"empty_only",
"=",
"None",
",",
"order",
"=",
"None",
")",
":",
"params",
"=",
"{",
"'search[name]'",
":",
"query",
",",
"'search[id]'",
":",
"artist_id",
",",
"'search[creator_name]'",
":",
"creator_name",
",",
"'search[creator_id]'",
":",
"creator_id",
",",
"'search[is_active]'",
":",
"is_active",
",",
"'search[is_banned]'",
":",
"is_banned",
",",
"'search[empty_only]'",
":",
"empty_only",
",",
"'search[order]'",
":",
"order",
"}",
"return",
"self",
".",
"_get",
"(",
"'artists.json'",
",",
"params",
")"
]
| Get an artist of a list of artists.
Parameters:
query (str):
This field has multiple uses depending on what the query starts
with:
'http:desired_url':
Search for artist with this URL.
'name:desired_url':
Search for artists with the given name as their base name.
'other:other_name':
Search for artists with the given name in their other
names.
'group:group_name':
Search for artists belonging to the group with the given
name.
'status:banned':
Search for artists that are banned. else Search for the
given name in the base name and the other names.
artist_id (id): The artist id.
creator_name (str): Exact creator name.
creator_id (id): Artist creator id.
is_active (bool): Can be: true, false
is_banned (bool): Can be: true, false
empty_only (True): Search for artists that have 0 posts. Can be:
true
order (str): Can be: name, updated_at. | [
"Get",
"an",
"artist",
"of",
"a",
"list",
"of",
"artists",
"."
]
| 60cd5254684d293b308f0b11b8f4ac2dce101479 | https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L496-L537 | train |
LuqueDaniel/pybooru | pybooru/api_danbooru.py | DanbooruApi_Mixin.artist_commentary_list | def artist_commentary_list(self, text_matches=None, post_id=None,
post_tags_match=None, original_present=None,
translated_present=None):
"""list artist commentary.
Parameters:
text_matches (str):
post_id (int):
post_tags_match (str): The commentary's post's tags match the
giventerms. Meta-tags not supported.
original_present (str): Can be: yes, no.
translated_present (str): Can be: yes, no.
"""
params = {
'search[text_matches]': text_matches,
'search[post_id]': post_id,
'search[post_tags_match]': post_tags_match,
'search[original_present]': original_present,
'search[translated_present]': translated_present
}
return self._get('artist_commentaries.json', params) | python | def artist_commentary_list(self, text_matches=None, post_id=None,
post_tags_match=None, original_present=None,
translated_present=None):
"""list artist commentary.
Parameters:
text_matches (str):
post_id (int):
post_tags_match (str): The commentary's post's tags match the
giventerms. Meta-tags not supported.
original_present (str): Can be: yes, no.
translated_present (str): Can be: yes, no.
"""
params = {
'search[text_matches]': text_matches,
'search[post_id]': post_id,
'search[post_tags_match]': post_tags_match,
'search[original_present]': original_present,
'search[translated_present]': translated_present
}
return self._get('artist_commentaries.json', params) | [
"def",
"artist_commentary_list",
"(",
"self",
",",
"text_matches",
"=",
"None",
",",
"post_id",
"=",
"None",
",",
"post_tags_match",
"=",
"None",
",",
"original_present",
"=",
"None",
",",
"translated_present",
"=",
"None",
")",
":",
"params",
"=",
"{",
"'search[text_matches]'",
":",
"text_matches",
",",
"'search[post_id]'",
":",
"post_id",
",",
"'search[post_tags_match]'",
":",
"post_tags_match",
",",
"'search[original_present]'",
":",
"original_present",
",",
"'search[translated_present]'",
":",
"translated_present",
"}",
"return",
"self",
".",
"_get",
"(",
"'artist_commentaries.json'",
",",
"params",
")"
]
| list artist commentary.
Parameters:
text_matches (str):
post_id (int):
post_tags_match (str): The commentary's post's tags match the
giventerms. Meta-tags not supported.
original_present (str): Can be: yes, no.
translated_present (str): Can be: yes, no. | [
"list",
"artist",
"commentary",
"."
]
| 60cd5254684d293b308f0b11b8f4ac2dce101479 | https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L655-L675 | train |
LuqueDaniel/pybooru | pybooru/api_danbooru.py | DanbooruApi_Mixin.artist_commentary_versions | def artist_commentary_versions(self, post_id, updater_id):
"""Return list of artist commentary versions.
Parameters:
updater_id (int):
post_id (int):
"""
params = {'search[updater_id]': updater_id, 'search[post_id]': post_id}
return self._get('artist_commentary_versions.json', params) | python | def artist_commentary_versions(self, post_id, updater_id):
"""Return list of artist commentary versions.
Parameters:
updater_id (int):
post_id (int):
"""
params = {'search[updater_id]': updater_id, 'search[post_id]': post_id}
return self._get('artist_commentary_versions.json', params) | [
"def",
"artist_commentary_versions",
"(",
"self",
",",
"post_id",
",",
"updater_id",
")",
":",
"params",
"=",
"{",
"'search[updater_id]'",
":",
"updater_id",
",",
"'search[post_id]'",
":",
"post_id",
"}",
"return",
"self",
".",
"_get",
"(",
"'artist_commentary_versions.json'",
",",
"params",
")"
]
| Return list of artist commentary versions.
Parameters:
updater_id (int):
post_id (int): | [
"Return",
"list",
"of",
"artist",
"commentary",
"versions",
"."
]
| 60cd5254684d293b308f0b11b8f4ac2dce101479 | https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L711-L719 | train |
LuqueDaniel/pybooru | pybooru/api_danbooru.py | DanbooruApi_Mixin.note_list | def note_list(self, body_matches=None, post_id=None, post_tags_match=None,
creator_name=None, creator_id=None, is_active=None):
"""Return list of notes.
Parameters:
body_matches (str): The note's body matches the given terms.
post_id (int): A specific post.
post_tags_match (str): The note's post's tags match the given terms.
creator_name (str): The creator's name. Exact match.
creator_id (int): The creator's user id.
is_active (bool): Can be: True, False.
"""
params = {
'search[body_matches]': body_matches,
'search[post_id]': post_id,
'search[post_tags_match]': post_tags_match,
'search[creator_name]': creator_name,
'search[creator_id]': creator_id,
'search[is_active]': is_active
}
return self._get('notes.json', params) | python | def note_list(self, body_matches=None, post_id=None, post_tags_match=None,
creator_name=None, creator_id=None, is_active=None):
"""Return list of notes.
Parameters:
body_matches (str): The note's body matches the given terms.
post_id (int): A specific post.
post_tags_match (str): The note's post's tags match the given terms.
creator_name (str): The creator's name. Exact match.
creator_id (int): The creator's user id.
is_active (bool): Can be: True, False.
"""
params = {
'search[body_matches]': body_matches,
'search[post_id]': post_id,
'search[post_tags_match]': post_tags_match,
'search[creator_name]': creator_name,
'search[creator_id]': creator_id,
'search[is_active]': is_active
}
return self._get('notes.json', params) | [
"def",
"note_list",
"(",
"self",
",",
"body_matches",
"=",
"None",
",",
"post_id",
"=",
"None",
",",
"post_tags_match",
"=",
"None",
",",
"creator_name",
"=",
"None",
",",
"creator_id",
"=",
"None",
",",
"is_active",
"=",
"None",
")",
":",
"params",
"=",
"{",
"'search[body_matches]'",
":",
"body_matches",
",",
"'search[post_id]'",
":",
"post_id",
",",
"'search[post_tags_match]'",
":",
"post_tags_match",
",",
"'search[creator_name]'",
":",
"creator_name",
",",
"'search[creator_id]'",
":",
"creator_id",
",",
"'search[is_active]'",
":",
"is_active",
"}",
"return",
"self",
".",
"_get",
"(",
"'notes.json'",
",",
"params",
")"
]
| Return list of notes.
Parameters:
body_matches (str): The note's body matches the given terms.
post_id (int): A specific post.
post_tags_match (str): The note's post's tags match the given terms.
creator_name (str): The creator's name. Exact match.
creator_id (int): The creator's user id.
is_active (bool): Can be: True, False. | [
"Return",
"list",
"of",
"notes",
"."
]
| 60cd5254684d293b308f0b11b8f4ac2dce101479 | https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L721-L741 | train |
LuqueDaniel/pybooru | pybooru/api_danbooru.py | DanbooruApi_Mixin.note_versions | def note_versions(self, updater_id=None, post_id=None, note_id=None):
"""Get list of note versions.
Parameters:
updater_id (int):
post_id (int):
note_id (int):
"""
params = {
'search[updater_id]': updater_id,
'search[post_id]': post_id,
'search[note_id]': note_id
}
return self._get('note_versions.json', params) | python | def note_versions(self, updater_id=None, post_id=None, note_id=None):
"""Get list of note versions.
Parameters:
updater_id (int):
post_id (int):
note_id (int):
"""
params = {
'search[updater_id]': updater_id,
'search[post_id]': post_id,
'search[note_id]': note_id
}
return self._get('note_versions.json', params) | [
"def",
"note_versions",
"(",
"self",
",",
"updater_id",
"=",
"None",
",",
"post_id",
"=",
"None",
",",
"note_id",
"=",
"None",
")",
":",
"params",
"=",
"{",
"'search[updater_id]'",
":",
"updater_id",
",",
"'search[post_id]'",
":",
"post_id",
",",
"'search[note_id]'",
":",
"note_id",
"}",
"return",
"self",
".",
"_get",
"(",
"'note_versions.json'",
",",
"params",
")"
]
| Get list of note versions.
Parameters:
updater_id (int):
post_id (int):
note_id (int): | [
"Get",
"list",
"of",
"note",
"versions",
"."
]
| 60cd5254684d293b308f0b11b8f4ac2dce101479 | https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L817-L830 | train |
LuqueDaniel/pybooru | pybooru/api_danbooru.py | DanbooruApi_Mixin.user_list | def user_list(self, name=None, name_matches=None, min_level=None,
max_level=None, level=None, user_id=None, order=None):
"""Function to get a list of users or a specific user.
Levels:
Users have a number attribute called level representing their role.
The current levels are:
Member 20, Gold 30, Platinum 31, Builder 32, Contributor 33,
Janitor 35, Moderator 40 and Admin 50.
Parameters:
name (str): Supports patterns.
name_matches (str): Same functionality as name.
min_level (int): Minimum level (see section on levels).
max_level (int): Maximum level (see section on levels).
level (int): Current level (see section on levels).
user_id (int): The user id.
order (str): Can be: 'name', 'post_upload_count', 'note_count',
'post_update_count', 'date'.
"""
params = {
'search[name]': name,
'search[name_matches]': name_matches,
'search[min_level]': min_level,
'search[max_level]': max_level,
'search[level]': level,
'search[id]': user_id,
'search[order]': order
}
return self._get('users.json', params) | python | def user_list(self, name=None, name_matches=None, min_level=None,
max_level=None, level=None, user_id=None, order=None):
"""Function to get a list of users or a specific user.
Levels:
Users have a number attribute called level representing their role.
The current levels are:
Member 20, Gold 30, Platinum 31, Builder 32, Contributor 33,
Janitor 35, Moderator 40 and Admin 50.
Parameters:
name (str): Supports patterns.
name_matches (str): Same functionality as name.
min_level (int): Minimum level (see section on levels).
max_level (int): Maximum level (see section on levels).
level (int): Current level (see section on levels).
user_id (int): The user id.
order (str): Can be: 'name', 'post_upload_count', 'note_count',
'post_update_count', 'date'.
"""
params = {
'search[name]': name,
'search[name_matches]': name_matches,
'search[min_level]': min_level,
'search[max_level]': max_level,
'search[level]': level,
'search[id]': user_id,
'search[order]': order
}
return self._get('users.json', params) | [
"def",
"user_list",
"(",
"self",
",",
"name",
"=",
"None",
",",
"name_matches",
"=",
"None",
",",
"min_level",
"=",
"None",
",",
"max_level",
"=",
"None",
",",
"level",
"=",
"None",
",",
"user_id",
"=",
"None",
",",
"order",
"=",
"None",
")",
":",
"params",
"=",
"{",
"'search[name]'",
":",
"name",
",",
"'search[name_matches]'",
":",
"name_matches",
",",
"'search[min_level]'",
":",
"min_level",
",",
"'search[max_level]'",
":",
"max_level",
",",
"'search[level]'",
":",
"level",
",",
"'search[id]'",
":",
"user_id",
",",
"'search[order]'",
":",
"order",
"}",
"return",
"self",
".",
"_get",
"(",
"'users.json'",
",",
"params",
")"
]
| Function to get a list of users or a specific user.
Levels:
Users have a number attribute called level representing their role.
The current levels are:
Member 20, Gold 30, Platinum 31, Builder 32, Contributor 33,
Janitor 35, Moderator 40 and Admin 50.
Parameters:
name (str): Supports patterns.
name_matches (str): Same functionality as name.
min_level (int): Minimum level (see section on levels).
max_level (int): Maximum level (see section on levels).
level (int): Current level (see section on levels).
user_id (int): The user id.
order (str): Can be: 'name', 'post_upload_count', 'note_count',
'post_update_count', 'date'. | [
"Function",
"to",
"get",
"a",
"list",
"of",
"users",
"or",
"a",
"specific",
"user",
"."
]
| 60cd5254684d293b308f0b11b8f4ac2dce101479 | https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L832-L862 | train |
LuqueDaniel/pybooru | pybooru/api_danbooru.py | DanbooruApi_Mixin.pool_list | def pool_list(self, name_matches=None, pool_ids=None, category=None,
description_matches=None, creator_name=None, creator_id=None,
is_deleted=None, is_active=None, order=None):
"""Get a list of pools.
Parameters:
name_matches (str):
pool_ids (str): Can search for multiple ID's at once, separated by
commas.
description_matches (str):
creator_name (str):
creator_id (int):
is_active (bool): Can be: true, false.
is_deleted (bool): Can be: True, False.
order (str): Can be: name, created_at, post_count, date.
category (str): Can be: series, collection.
"""
params = {
'search[name_matches]': name_matches,
'search[id]': pool_ids,
'search[description_matches]': description_matches,
'search[creator_name]': creator_name,
'search[creator_id]': creator_id,
'search[is_active]': is_active,
'search[is_deleted]': is_deleted,
'search[order]': order,
'search[category]': category
}
return self._get('pools.json', params) | python | def pool_list(self, name_matches=None, pool_ids=None, category=None,
description_matches=None, creator_name=None, creator_id=None,
is_deleted=None, is_active=None, order=None):
"""Get a list of pools.
Parameters:
name_matches (str):
pool_ids (str): Can search for multiple ID's at once, separated by
commas.
description_matches (str):
creator_name (str):
creator_id (int):
is_active (bool): Can be: true, false.
is_deleted (bool): Can be: True, False.
order (str): Can be: name, created_at, post_count, date.
category (str): Can be: series, collection.
"""
params = {
'search[name_matches]': name_matches,
'search[id]': pool_ids,
'search[description_matches]': description_matches,
'search[creator_name]': creator_name,
'search[creator_id]': creator_id,
'search[is_active]': is_active,
'search[is_deleted]': is_deleted,
'search[order]': order,
'search[category]': category
}
return self._get('pools.json', params) | [
"def",
"pool_list",
"(",
"self",
",",
"name_matches",
"=",
"None",
",",
"pool_ids",
"=",
"None",
",",
"category",
"=",
"None",
",",
"description_matches",
"=",
"None",
",",
"creator_name",
"=",
"None",
",",
"creator_id",
"=",
"None",
",",
"is_deleted",
"=",
"None",
",",
"is_active",
"=",
"None",
",",
"order",
"=",
"None",
")",
":",
"params",
"=",
"{",
"'search[name_matches]'",
":",
"name_matches",
",",
"'search[id]'",
":",
"pool_ids",
",",
"'search[description_matches]'",
":",
"description_matches",
",",
"'search[creator_name]'",
":",
"creator_name",
",",
"'search[creator_id]'",
":",
"creator_id",
",",
"'search[is_active]'",
":",
"is_active",
",",
"'search[is_deleted]'",
":",
"is_deleted",
",",
"'search[order]'",
":",
"order",
",",
"'search[category]'",
":",
"category",
"}",
"return",
"self",
".",
"_get",
"(",
"'pools.json'",
",",
"params",
")"
]
| Get a list of pools.
Parameters:
name_matches (str):
pool_ids (str): Can search for multiple ID's at once, separated by
commas.
description_matches (str):
creator_name (str):
creator_id (int):
is_active (bool): Can be: true, false.
is_deleted (bool): Can be: True, False.
order (str): Can be: name, created_at, post_count, date.
category (str): Can be: series, collection. | [
"Get",
"a",
"list",
"of",
"pools",
"."
]
| 60cd5254684d293b308f0b11b8f4ac2dce101479 | https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L872-L900 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.