Code
stringlengths 103
85.9k
| Summary
listlengths 0
94
|
---|---|
Please provide a description of the function:def _any_bound_condition_fails_criterion(agent, criterion):
bc_agents = [bc.agent for bc in agent.bound_conditions]
for b in bc_agents:
if not criterion(b):
return True
return False | [
"Returns True if any bound condition fails to meet the specified\n criterion.\n\n Parameters\n ----------\n agent: Agent\n The agent whose bound conditions we evaluate\n criterion: function\n Evaluates criterion(a) for each a in a bound condition and returns True\n if any agents fail to meet the criterion.\n\n Returns\n -------\n any_meets: bool\n True if and only if any of the agents in a bound condition fail to match\n the specified criteria\n "
]
|
Please provide a description of the function:def filter_grounded_only(stmts_in, **kwargs):
remove_bound = kwargs.get('remove_bound', False)
logger.info('Filtering %d statements for grounded agents...' %
len(stmts_in))
stmts_out = []
score_threshold = kwargs.get('score_threshold')
for st in stmts_in:
grounded = True
for agent in st.agent_list():
if agent is not None:
criterion = lambda x: _agent_is_grounded(x, score_threshold)
if not criterion(agent):
grounded = False
break
if not isinstance(agent, Agent):
continue
if remove_bound:
_remove_bound_conditions(agent, criterion)
elif _any_bound_condition_fails_criterion(agent, criterion):
grounded = False
break
if grounded:
stmts_out.append(st)
logger.info('%d statements after filter...' % len(stmts_out))
dump_pkl = kwargs.get('save')
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | [
"Filter to statements that have grounded agents.\n\n Parameters\n ----------\n stmts_in : list[indra.statements.Statement]\n A list of statements to filter.\n score_threshold : Optional[float]\n If scored groundings are available in a list and the highest score\n if below this threshold, the Statement is filtered out.\n save : Optional[str]\n The name of a pickle file to save the results (stmts_out) into.\n remove_bound: Optional[bool]\n If true, removes ungrounded bound conditions from a statement.\n If false (default), filters out statements with ungrounded bound\n conditions.\n\n Returns\n -------\n stmts_out : list[indra.statements.Statement]\n A list of filtered statements.\n "
]
|
Please provide a description of the function:def _agent_is_gene(agent, specific_only):
if not specific_only:
if not(agent.db_refs.get('HGNC') or \
agent.db_refs.get('UP') or \
agent.db_refs.get('FPLX')):
return False
else:
if not(agent.db_refs.get('HGNC') or \
agent.db_refs.get('UP')):
return False
return True | [
"Returns whether an agent is for a gene.\n\n Parameters\n ----------\n agent: Agent\n The agent to evaluate\n specific_only : Optional[bool]\n If True, only elementary genes/proteins evaluate as genes and families\n will be filtered out. If False, families are also included.\n\n Returns\n -------\n is_gene: bool\n Whether the agent is a gene\n "
]
|
Please provide a description of the function:def filter_genes_only(stmts_in, **kwargs):
remove_bound = 'remove_bound' in kwargs and kwargs['remove_bound']
specific_only = kwargs.get('specific_only')
logger.info('Filtering %d statements for ones containing genes only...' %
len(stmts_in))
stmts_out = []
for st in stmts_in:
genes_only = True
for agent in st.agent_list():
if agent is not None:
criterion = lambda a: _agent_is_gene(a, specific_only)
if not criterion(agent):
genes_only = False
break
if remove_bound:
_remove_bound_conditions(agent, criterion)
else:
if _any_bound_condition_fails_criterion(agent, criterion):
genes_only = False
break
if genes_only:
stmts_out.append(st)
logger.info('%d statements after filter...' % len(stmts_out))
dump_pkl = kwargs.get('save')
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | [
"Filter to statements containing genes only.\n\n Parameters\n ----------\n stmts_in : list[indra.statements.Statement]\n A list of statements to filter.\n specific_only : Optional[bool]\n If True, only elementary genes/proteins will be kept and families\n will be filtered out. If False, families are also included in the\n output. Default: False\n save : Optional[str]\n The name of a pickle file to save the results (stmts_out) into.\n remove_bound: Optional[bool]\n If true, removes bound conditions that are not genes\n If false (default), filters out statements with non-gene bound\n conditions\n\n Returns\n -------\n stmts_out : list[indra.statements.Statement]\n A list of filtered statements.\n "
]
|
Please provide a description of the function:def filter_belief(stmts_in, belief_cutoff, **kwargs):
dump_pkl = kwargs.get('save')
logger.info('Filtering %d statements to above %f belief' %
(len(stmts_in), belief_cutoff))
# The first round of filtering is in the top-level list
stmts_out = []
# Now we eliminate supports/supported-by
for stmt in stmts_in:
if stmt.belief < belief_cutoff:
continue
stmts_out.append(stmt)
supp_by = []
supp = []
for st in stmt.supports:
if st.belief >= belief_cutoff:
supp.append(st)
for st in stmt.supported_by:
if st.belief >= belief_cutoff:
supp_by.append(st)
stmt.supports = supp
stmt.supported_by = supp_by
logger.info('%d statements after filter...' % len(stmts_out))
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | [
"Filter to statements with belief above a given cutoff.\n\n Parameters\n ----------\n stmts_in : list[indra.statements.Statement]\n A list of statements to filter.\n belief_cutoff : float\n Only statements with belief above the belief_cutoff will be returned.\n Here 0 < belief_cutoff < 1.\n save : Optional[str]\n The name of a pickle file to save the results (stmts_out) into.\n\n Returns\n -------\n stmts_out : list[indra.statements.Statement]\n A list of filtered statements.\n "
]
|
Please provide a description of the function:def filter_gene_list(stmts_in, gene_list, policy, allow_families=False,
**kwargs):
invert = kwargs.get('invert', False)
remove_bound = kwargs.get('remove_bound', False)
if policy not in ('one', 'all'):
logger.error('Policy %s is invalid, not applying filter.' % policy)
else:
genes_str = ', '.join(gene_list)
inv_str = 'not ' if invert else ''
logger.info(('Filtering %d statements for ones %scontaining "%s" of: '
'%s...') % (len(stmts_in), inv_str, policy, genes_str))
# If we're allowing families, make a list of all FamPlex IDs that
# contain members of the gene list, and add them to the filter list
filter_list = copy(gene_list)
if allow_families:
for hgnc_name in gene_list:
gene_uri = hierarchies['entity'].get_uri('HGNC', hgnc_name)
parents = hierarchies['entity'].get_parents(gene_uri)
for par_uri in parents:
ns, id = hierarchies['entity'].ns_id_from_uri(par_uri)
filter_list.append(id)
stmts_out = []
if remove_bound:
# If requested, remove agents whose names are not in the list from
# all bound conditions
if not invert:
keep_criterion = lambda a: a.name in filter_list
else:
keep_criterion = lambda a: a.name not in filter_list
for st in stmts_in:
for agent in st.agent_list():
_remove_bound_conditions(agent, keep_criterion)
if policy == 'one':
for st in stmts_in:
found_gene = False
if not remove_bound:
agent_list = st.agent_list_with_bound_condition_agents()
else:
agent_list = st.agent_list()
for agent in agent_list:
if agent is not None:
if agent.name in filter_list:
found_gene = True
break
if (found_gene and not invert) or (not found_gene and invert):
stmts_out.append(st)
elif policy == 'all':
for st in stmts_in:
found_genes = True
if not remove_bound:
agent_list = st.agent_list_with_bound_condition_agents()
else:
agent_list = st.agent_list()
for agent in agent_list:
if agent is not None:
if agent.name not in filter_list:
found_genes = False
break
if (found_genes and not invert) or (not found_genes and invert):
stmts_out.append(st)
else:
stmts_out = stmts_in
logger.info('%d statements after filter...' % len(stmts_out))
dump_pkl = kwargs.get('save')
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | [
"Return statements that contain genes given in a list.\n\n Parameters\n ----------\n stmts_in : list[indra.statements.Statement]\n A list of statements to filter.\n gene_list : list[str]\n A list of gene symbols to filter for.\n policy : str\n The policy to apply when filtering for the list of genes. \"one\": keep\n statements that contain at least one of the list of genes and\n possibly others not in the list \"all\": keep statements that only\n contain genes given in the list\n allow_families : Optional[bool]\n Will include statements involving FamPlex families containing one\n of the genes in the gene list. Default: False\n save : Optional[str]\n The name of a pickle file to save the results (stmts_out) into.\n remove_bound: Optional[str]\n If true, removes bound conditions that are not genes in the list\n If false (default), looks at agents in the bound conditions in addition\n to those participating in the statement directly when applying the\n specified policy.\n invert : Optional[bool]\n If True, the statements that do not match according to the policy\n are returned. Default: False\n\n Returns\n -------\n stmts_out : list[indra.statements.Statement]\n A list of filtered statements.\n "
]
|
Please provide a description of the function:def filter_concept_names(stmts_in, name_list, policy, **kwargs):
invert = kwargs.get('invert', False)
if policy not in ('one', 'all'):
logger.error('Policy %s is invalid, not applying filter.' % policy)
else:
name_str = ', '.join(name_list)
inv_str = 'not ' if invert else ''
logger.info(('Filtering %d statements for ones %scontaining "%s" of: '
'%s...') % (len(stmts_in), inv_str, policy, name_str))
stmts_out = []
if policy == 'one':
for st in stmts_in:
found = False
agent_list = st.agent_list()
for agent in agent_list:
if agent is not None:
if agent.name in name_list:
found = True
break
if (found and not invert) or (not found and invert):
stmts_out.append(st)
elif policy == 'all':
for st in stmts_in:
found = True
agent_list = st.agent_list()
for agent in agent_list:
if agent is not None:
if agent.name not in name_list:
found = False
break
if (found and not invert) or (not found and invert):
stmts_out.append(st)
else:
stmts_out = stmts_in
logger.info('%d Statements after filter...' % len(stmts_out))
dump_pkl = kwargs.get('save')
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | [
"Return Statements that refer to concepts/agents given as a list of names.\n\n Parameters\n ----------\n stmts_in : list[indra.statements.Statement]\n A list of Statements to filter.\n name_list : list[str]\n A list of concept/agent names to filter for.\n policy : str\n The policy to apply when filtering for the list of names. \"one\": keep\n Statements that contain at least one of the list of names and\n possibly others not in the list \"all\": keep Statements that only\n contain names given in the list\n save : Optional[str]\n The name of a pickle file to save the results (stmts_out) into.\n invert : Optional[bool]\n If True, the Statements that do not match according to the policy\n are returned. Default: False\n\n Returns\n -------\n stmts_out : list[indra.statements.Statement]\n A list of filtered Statements.\n "
]
|
Please provide a description of the function:def filter_by_db_refs(stmts_in, namespace, values, policy, **kwargs):
invert = kwargs.get('invert', False)
match_suffix = kwargs.get('match_suffix', False)
if policy not in ('one', 'all'):
logger.error('Policy %s is invalid, not applying filter.' % policy)
return
else:
name_str = ', '.join(values)
rev_mod = 'not ' if invert else ''
logger.info(('Filtering %d statements for those with %s agents %s'
'grounded to: %s in the %s namespace...') %
(len(stmts_in), policy, rev_mod, name_str, namespace))
def meets_criterion(agent):
if namespace not in agent.db_refs:
return False
entry = agent.db_refs[namespace]
if isinstance(entry, list):
entry = entry[0][0]
ret = False
# Match suffix or entire entry
if match_suffix:
if any([entry.endswith(e) for e in values]):
ret = True
else:
if entry in values:
ret = True
# Invert if needed
if invert:
return not ret
else:
return ret
enough = all if policy == 'all' else any
stmts_out = [s for s in stmts_in
if enough([meets_criterion(ag) for ag in s.agent_list()
if ag is not None])]
logger.info('%d Statements after filter...' % len(stmts_out))
dump_pkl = kwargs.get('save')
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | [
"Filter to Statements whose agents are grounded to a matching entry.\n\n Statements are filtered so that the db_refs entry (of the given namespace)\n of their Agent/Concept arguments take a value in the given list of values.\n\n Parameters\n ----------\n stmts_in : list[indra.statements.Statement]\n A list of Statements to filter.\n namespace : str\n The namespace in db_refs to which the filter should apply.\n values : list[str]\n A list of values in the given namespace to which the filter should\n apply.\n policy : str\n The policy to apply when filtering for the db_refs. \"one\": keep\n Statements that contain at least one of the list of db_refs and\n possibly others not in the list \"all\": keep Statements that only\n contain db_refs given in the list\n save : Optional[str]\n The name of a pickle file to save the results (stmts_out) into.\n invert : Optional[bool]\n If True, the Statements that do not match according to the policy\n are returned. Default: False\n match_suffix : Optional[bool]\n If True, the suffix of the db_refs entry is matches agains the list\n of entries\n\n Returns\n -------\n stmts_out : list[indra.statements.Statement]\n A list of filtered Statements.\n "
]
|
Please provide a description of the function:def filter_human_only(stmts_in, **kwargs):
from indra.databases import uniprot_client
if 'remove_bound' in kwargs and kwargs['remove_bound']:
remove_bound = True
else:
remove_bound = False
dump_pkl = kwargs.get('save')
logger.info('Filtering %d statements for human genes only...' %
len(stmts_in))
stmts_out = []
def criterion(agent):
upid = agent.db_refs.get('UP')
if upid and not uniprot_client.is_human(upid):
return False
else:
return True
for st in stmts_in:
human_genes = True
for agent in st.agent_list():
if agent is not None:
if not criterion(agent):
human_genes = False
break
if remove_bound:
_remove_bound_conditions(agent, criterion)
elif _any_bound_condition_fails_criterion(agent, criterion):
human_genes = False
break
if human_genes:
stmts_out.append(st)
logger.info('%d statements after filter...' % len(stmts_out))
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | [
"Filter out statements that are grounded, but not to a human gene.\n\n Parameters\n ----------\n stmts_in : list[indra.statements.Statement]\n A list of statements to filter.\n save : Optional[str]\n The name of a pickle file to save the results (stmts_out) into.\n remove_bound: Optional[bool]\n If true, removes all bound conditions that are grounded but not to human\n genes. If false (default), filters out statements with boundary\n conditions that are grounded to non-human genes.\n\n Returns\n -------\n stmts_out : list[indra.statements.Statement]\n A list of filtered statements.\n "
]
|
Please provide a description of the function:def filter_direct(stmts_in, **kwargs):
def get_is_direct(stmt):
any_indirect = False
for ev in stmt.evidence:
if ev.epistemics.get('direct') is True:
return True
elif ev.epistemics.get('direct') is False:
# This guarantees that we have seen at least
# some evidence that the statement is indirect
any_indirect = True
if any_indirect:
return False
return True
logger.info('Filtering %d statements to direct ones...' % len(stmts_in))
stmts_out = []
for st in stmts_in:
if get_is_direct(st):
stmts_out.append(st)
logger.info('%d statements after filter...' % len(stmts_out))
dump_pkl = kwargs.get('save')
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | [
"Filter to statements that are direct interactions\n\n Parameters\n ----------\n stmts_in : list[indra.statements.Statement]\n A list of statements to filter.\n save : Optional[str]\n The name of a pickle file to save the results (stmts_out) into.\n\n Returns\n -------\n stmts_out : list[indra.statements.Statement]\n A list of filtered statements.\n ",
"Returns true if there is evidence that the statement is a direct\n interaction.\n\n If any of the evidences associated with the statement\n indicates a direct interatcion then we assume the interaction\n is direct. If there is no evidence for the interaction being indirect\n then we default to direct.\n "
]
|
Please provide a description of the function:def filter_no_hypothesis(stmts_in, **kwargs):
logger.info('Filtering %d statements to no hypothesis...' % len(stmts_in))
stmts_out = []
for st in stmts_in:
all_hypotheses = True
ev = None
for ev in st.evidence:
if not ev.epistemics.get('hypothesis', False):
all_hypotheses = False
break
if ev is None:
all_hypotheses = False
if not all_hypotheses:
stmts_out.append(st)
logger.info('%d statements after filter...' % len(stmts_out))
dump_pkl = kwargs.get('save')
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | [
"Filter to statements that are not marked as hypothesis in epistemics.\n\n Parameters\n ----------\n stmts_in : list[indra.statements.Statement]\n A list of statements to filter.\n save : Optional[str]\n The name of a pickle file to save the results (stmts_out) into.\n\n Returns\n -------\n stmts_out : list[indra.statements.Statement]\n A list of filtered statements.\n "
]
|
Please provide a description of the function:def filter_evidence_source(stmts_in, source_apis, policy='one', **kwargs):
logger.info('Filtering %d statements to evidence source "%s" of: %s...' %
(len(stmts_in), policy, ', '.join(source_apis)))
stmts_out = []
for st in stmts_in:
sources = set([ev.source_api for ev in st.evidence])
if policy == 'one':
if sources.intersection(source_apis):
stmts_out.append(st)
if policy == 'all':
if sources.intersection(source_apis) == set(source_apis):
stmts_out.append(st)
if policy == 'none':
if not sources.intersection(source_apis):
stmts_out.append(st)
logger.info('%d statements after filter...' % len(stmts_out))
dump_pkl = kwargs.get('save')
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | [
"Filter to statements that have evidence from a given set of sources.\n\n Parameters\n ----------\n stmts_in : list[indra.statements.Statement]\n A list of statements to filter.\n source_apis : list[str]\n A list of sources to filter for. Examples: biopax, bel, reach\n policy : Optional[str]\n If 'one', a statement that hase evidence from any of the sources is\n kept. If 'all', only those statements are kept which have evidence\n from all the input sources specified in source_apis.\n If 'none', only those statements are kept that don't have evidence\n from any of the sources specified in source_apis.\n save : Optional[str]\n The name of a pickle file to save the results (stmts_out) into.\n\n Returns\n -------\n stmts_out : list[indra.statements.Statement]\n A list of filtered statements.\n "
]
|
Please provide a description of the function:def filter_top_level(stmts_in, **kwargs):
logger.info('Filtering %d statements for top-level...' % len(stmts_in))
stmts_out = [st for st in stmts_in if not st.supports]
logger.info('%d statements after filter...' % len(stmts_out))
dump_pkl = kwargs.get('save')
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | [
"Filter to statements that are at the top-level of the hierarchy.\n\n Here top-level statements correspond to most specific ones.\n\n Parameters\n ----------\n stmts_in : list[indra.statements.Statement]\n A list of statements to filter.\n save : Optional[str]\n The name of a pickle file to save the results (stmts_out) into.\n\n Returns\n -------\n stmts_out : list[indra.statements.Statement]\n A list of filtered statements.\n "
]
|
Please provide a description of the function:def filter_inconsequential_mods(stmts_in, whitelist=None, **kwargs):
if whitelist is None:
whitelist = {}
logger.info('Filtering %d statements to remove' % len(stmts_in) +
' inconsequential modifications...')
states_used = whitelist
for stmt in stmts_in:
for agent in stmt.agent_list():
if agent is not None:
if agent.mods:
for mc in agent.mods:
mod = (mc.mod_type, mc.residue, mc.position)
try:
states_used[agent.name].append(mod)
except KeyError:
states_used[agent.name] = [mod]
for k, v in states_used.items():
states_used[k] = list(set(v))
stmts_out = []
for stmt in stmts_in:
skip = False
if isinstance(stmt, Modification):
mod_type = modclass_to_modtype[stmt.__class__]
if isinstance(stmt, RemoveModification):
mod_type = modtype_to_inverse[mod_type]
mod = (mod_type, stmt.residue, stmt.position)
used = states_used.get(stmt.sub.name, [])
if mod not in used:
skip = True
if not skip:
stmts_out.append(stmt)
logger.info('%d statements after filter...' % len(stmts_out))
dump_pkl = kwargs.get('save')
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | [
"Filter out Modifications that modify inconsequential sites\n\n Inconsequential here means that the site is not mentioned / tested\n in any other statement. In some cases specific sites should be\n preserved, for instance, to be used as readouts in a model.\n In this case, the given sites can be passed in a whitelist.\n\n Parameters\n ----------\n stmts_in : list[indra.statements.Statement]\n A list of statements to filter.\n whitelist : Optional[dict]\n A whitelist containing agent modification sites whose\n modifications should be preserved even if no other statement\n refers to them. The whitelist parameter is a dictionary in which\n the key is a gene name and the value is a list of tuples of\n (modification_type, residue, position). Example:\n whitelist = {'MAP2K1': [('phosphorylation', 'S', '222')]}\n save : Optional[str]\n The name of a pickle file to save the results (stmts_out) into.\n\n Returns\n -------\n stmts_out : list[indra.statements.Statement]\n A list of filtered statements.\n "
]
|
Please provide a description of the function:def filter_inconsequential_acts(stmts_in, whitelist=None, **kwargs):
if whitelist is None:
whitelist = {}
logger.info('Filtering %d statements to remove' % len(stmts_in) +
' inconsequential activations...')
states_used = whitelist
for stmt in stmts_in:
for agent in stmt.agent_list():
if agent is not None:
if agent.activity:
act = agent.activity.activity_type
try:
states_used[agent.name].append(act)
except KeyError:
states_used[agent.name] = [act]
for k, v in states_used.items():
states_used[k] = list(set(v))
stmts_out = []
for stmt in stmts_in:
skip = False
if isinstance(stmt, RegulateActivity):
used = states_used.get(stmt.obj.name, [])
if stmt.obj_activity not in used:
skip = True
if not skip:
stmts_out.append(stmt)
logger.info('%d statements after filter...' % len(stmts_out))
dump_pkl = kwargs.get('save')
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | [
"Filter out Activations that modify inconsequential activities\n\n Inconsequential here means that the site is not mentioned / tested\n in any other statement. In some cases specific activity types should be\n preserved, for instance, to be used as readouts in a model.\n In this case, the given activities can be passed in a whitelist.\n\n Parameters\n ----------\n stmts_in : list[indra.statements.Statement]\n A list of statements to filter.\n whitelist : Optional[dict]\n A whitelist containing agent activity types which should be preserved\n even if no other statement refers to them.\n The whitelist parameter is a dictionary in which\n the key is a gene name and the value is a list of activity types.\n Example: whitelist = {'MAP2K1': ['kinase']}\n save : Optional[str]\n The name of a pickle file to save the results (stmts_out) into.\n\n Returns\n -------\n stmts_out : list[indra.statements.Statement]\n A list of filtered statements.\n "
]
|
Please provide a description of the function:def filter_mutation_status(stmts_in, mutations, deletions, **kwargs):
if 'remove_bound' in kwargs and kwargs['remove_bound']:
remove_bound = True
else:
remove_bound = False
def criterion(agent):
if agent is not None and agent.name in deletions:
return False
if agent is not None and agent.mutations:
muts = mutations.get(agent.name, [])
for mut in agent.mutations:
mut_tup = (mut.residue_from, mut.position, mut.residue_to)
if mut_tup not in muts:
return False
return True
logger.info('Filtering %d statements for mutation status...' %
len(stmts_in))
stmts_out = []
for stmt in stmts_in:
skip = False
for agent in stmt.agent_list():
if not criterion(agent):
skip = True
break
if remove_bound:
_remove_bound_conditions(agent, criterion)
elif _any_bound_condition_fails_criterion(agent, criterion):
skip = True
break
if not skip:
stmts_out.append(stmt)
logger.info('%d statements after filter...' % len(stmts_out))
dump_pkl = kwargs.get('save')
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | [
"Filter statements based on existing mutations/deletions\n\n This filter helps to contextualize a set of statements to a given\n cell type. Given a list of deleted genes, it removes statements that refer\n to these genes. It also takes a list of mutations and removes statements\n that refer to mutations not relevant for the given context.\n\n Parameters\n ----------\n stmts_in : list[indra.statements.Statement]\n A list of statements to filter.\n mutations : dict\n A dictionary whose keys are gene names, and the values are lists of\n tuples of the form (residue_from, position, residue_to).\n Example: mutations = {'BRAF': [('V', '600', 'E')]}\n deletions : list\n A list of gene names that are deleted.\n save : Optional[str]\n The name of a pickle file to save the results (stmts_out) into.\n\n Returns\n -------\n stmts_out : list[indra.statements.Statement]\n A list of filtered statements.\n "
]
|
Please provide a description of the function:def filter_enzyme_kinase(stmts_in, **kwargs):
logger.info('Filtering %d statements to remove ' % len(stmts_in) +
'phosphorylation by non-kinases...')
path = os.path.dirname(os.path.abspath(__file__))
kinase_table = read_unicode_csv(path + '/../resources/kinases.tsv',
delimiter='\t')
gene_names = [lin[1] for lin in list(kinase_table)[1:]]
stmts_out = []
for st in stmts_in:
if isinstance(st, Phosphorylation):
if st.enz is not None:
if st.enz.name in gene_names:
stmts_out.append(st)
else:
stmts_out.append(st)
logger.info('%d statements after filter...' % len(stmts_out))
dump_pkl = kwargs.get('save')
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | [
"Filter Phosphorylations to ones where the enzyme is a known kinase.\n\n Parameters\n ----------\n stmts_in : list[indra.statements.Statement]\n A list of statements to filter.\n save : Optional[str]\n The name of a pickle file to save the results (stmts_out) into.\n\n Returns\n -------\n stmts_out : list[indra.statements.Statement]\n A list of filtered statements.\n "
]
|
Please provide a description of the function:def filter_transcription_factor(stmts_in, **kwargs):
logger.info('Filtering %d statements to remove ' % len(stmts_in) +
'amount regulations by non-transcription-factors...')
path = os.path.dirname(os.path.abspath(__file__))
tf_table = \
read_unicode_csv(path + '/../resources/transcription_factors.csv')
gene_names = [lin[1] for lin in list(tf_table)[1:]]
stmts_out = []
for st in stmts_in:
if isinstance(st, RegulateAmount):
if st.subj is not None:
if st.subj.name in gene_names:
stmts_out.append(st)
else:
stmts_out.append(st)
logger.info('%d statements after filter...' % len(stmts_out))
dump_pkl = kwargs.get('save')
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | [
"Filter out RegulateAmounts where subject is not a transcription factor.\n\n Parameters\n ----------\n stmts_in : list[indra.statements.Statement]\n A list of statements to filter.\n save : Optional[str]\n The name of a pickle file to save the results (stmts_out) into.\n\n Returns\n -------\n stmts_out : list[indra.statements.Statement]\n A list of filtered statements.\n "
]
|
Please provide a description of the function:def filter_uuid_list(stmts_in, uuids, **kwargs):
invert = kwargs.get('invert', False)
logger.info('Filtering %d statements for %d UUID%s...' %
(len(stmts_in), len(uuids), 's' if len(uuids) > 1 else ''))
stmts_out = []
for st in stmts_in:
if not invert:
if st.uuid in uuids:
stmts_out.append(st)
else:
if st.uuid not in uuids:
stmts_out.append(st)
logger.info('%d statements after filter...' % len(stmts_out))
dump_pkl = kwargs.get('save')
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | [
"Filter to Statements corresponding to given UUIDs\n\n Parameters\n ----------\n stmts_in : list[indra.statements.Statement]\n A list of statements to filter.\n uuids : list[str]\n A list of UUIDs to filter for.\n save : Optional[str]\n The name of a pickle file to save the results (stmts_out) into.\n invert : Optional[bool]\n Invert the filter to remove the Statements corresponding to the given\n UUIDs.\n\n Returns\n -------\n stmts_out : list[indra.statements.Statement]\n A list of filtered statements.\n "
]
|
Please provide a description of the function:def expand_families(stmts_in, **kwargs):
from indra.tools.expand_families import Expander
logger.info('Expanding families on %d statements...' % len(stmts_in))
expander = Expander(hierarchies)
stmts_out = expander.expand_families(stmts_in)
logger.info('%d statements after expanding families...' % len(stmts_out))
dump_pkl = kwargs.get('save')
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | [
"Expand FamPlex Agents to individual genes.\n\n Parameters\n ----------\n stmts_in : list[indra.statements.Statement]\n A list of statements to expand.\n save : Optional[str]\n The name of a pickle file to save the results (stmts_out) into.\n\n Returns\n -------\n stmts_out : list[indra.statements.Statement]\n A list of expanded statements.\n "
]
|
Please provide a description of the function:def reduce_activities(stmts_in, **kwargs):
logger.info('Reducing activities on %d statements...' % len(stmts_in))
stmts_out = [deepcopy(st) for st in stmts_in]
ml = MechLinker(stmts_out)
ml.gather_explicit_activities()
ml.reduce_activities()
stmts_out = ml.statements
dump_pkl = kwargs.get('save')
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | [
"Reduce the activity types in a list of statements\n\n Parameters\n ----------\n stmts_in : list[indra.statements.Statement]\n A list of statements to reduce activity types in.\n save : Optional[str]\n The name of a pickle file to save the results (stmts_out) into.\n\n Returns\n -------\n stmts_out : list[indra.statements.Statement]\n A list of reduced activity statements.\n "
]
|
Please provide a description of the function:def strip_agent_context(stmts_in, **kwargs):
logger.info('Stripping agent context on %d statements...' % len(stmts_in))
stmts_out = []
for st in stmts_in:
new_st = deepcopy(st)
for agent in new_st.agent_list():
if agent is None:
continue
agent.mods = []
agent.mutations = []
agent.activity = None
agent.location = None
agent.bound_conditions = []
stmts_out.append(new_st)
dump_pkl = kwargs.get('save')
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | [
"Strip any context on agents within each statement.\n\n Parameters\n ----------\n stmts_in : list[indra.statements.Statement]\n A list of statements whose agent context should be stripped.\n save : Optional[str]\n The name of a pickle file to save the results (stmts_out) into.\n\n Returns\n -------\n stmts_out : list[indra.statements.Statement]\n A list of stripped statements.\n "
]
|
Please provide a description of the function:def standardize_names_groundings(stmts):
print('Standardize names to groundings')
for stmt in stmts:
for concept in stmt.agent_list():
db_ns, db_id = concept.get_grounding()
if db_id is not None:
if isinstance(db_id, list):
db_id = db_id[0][0].split('/')[-1]
else:
db_id = db_id.split('/')[-1]
db_id = db_id.replace('|', ' ')
db_id = db_id.replace('_', ' ')
db_id = db_id.replace('ONT::', '')
db_id = db_id.capitalize()
concept.name = db_id
return stmts | [
"Standardize the names of Concepts with respect to an ontology.\n\n NOTE: this function is currently optimized for Influence Statements\n obtained from Eidos, Hume, Sofia and CWMS. It will possibly yield\n unexpected results for biology-specific Statements.\n "
]
|
Please provide a description of the function:def dump_stmt_strings(stmts, fname):
with open(fname, 'wb') as fh:
for st in stmts:
fh.write(('%s\n' % st).encode('utf-8')) | [
"Save printed statements in a file.\n\n Parameters\n ----------\n stmts_in : list[indra.statements.Statement]\n A list of statements to save in a text file.\n fname : Optional[str]\n The name of a text file to save the printed statements into.\n "
]
|
Please provide a description of the function:def rename_db_ref(stmts_in, ns_from, ns_to, **kwargs):
logger.info('Remapping "%s" to "%s" in db_refs on %d statements...' %
(ns_from, ns_to, len(stmts_in)))
stmts_out = [deepcopy(st) for st in stmts_in]
for stmt in stmts_out:
for agent in stmt.agent_list():
if agent is not None and ns_from in agent.db_refs:
agent.db_refs[ns_to] = agent.db_refs.pop(ns_from)
dump_pkl = kwargs.get('save')
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | [
"Rename an entry in the db_refs of each Agent.\n\n This is particularly useful when old Statements in pickle files\n need to be updated after a namespace was changed such as\n 'BE' to 'FPLX'.\n\n Parameters\n ----------\n stmts_in : list[indra.statements.Statement]\n A list of statements whose Agents' db_refs need to be changed\n ns_from : str\n The namespace identifier to replace\n ns_to : str\n The namespace identifier to replace to\n save : Optional[str]\n The name of a pickle file to save the results (stmts_out) into.\n\n Returns\n -------\n stmts_out : list[indra.statements.Statement]\n A list of Statements with Agents' db_refs changed.\n "
]
|
Please provide a description of the function:def align_statements(stmts1, stmts2, keyfun=None):
def name_keyfun(stmt):
return tuple(a.name if a is not None else None for
a in stmt.agent_list())
if not keyfun:
keyfun = name_keyfun
matches = []
keys1 = [keyfun(s) for s in stmts1]
keys2 = [keyfun(s) for s in stmts2]
for stmt, key in zip(stmts1, keys1):
try:
match_idx = keys2.index(key)
match_stmt = stmts2[match_idx]
matches.append((stmt, match_stmt))
except ValueError:
matches.append((stmt, None))
for stmt, key in zip(stmts2, keys2):
try:
match_idx = keys1.index(key)
except ValueError:
matches.append((None, stmt))
return matches | [
"Return alignment of two lists of statements by key.\n\n Parameters\n ----------\n stmts1 : list[indra.statements.Statement]\n A list of INDRA Statements to align\n stmts2 : list[indra.statements.Statement]\n A list of INDRA Statements to align\n keyfun : Optional[function]\n A function that takes a Statement as an argument\n and returns a key to align by. If not given,\n the default key function is a tuble of the names\n of the Agents in the Statement.\n\n Return\n ------\n matches : list(tuple)\n A list of tuples where each tuple has two elements,\n the first corresponding to an element of the stmts1\n list and the second corresponding to an element\n of the stmts2 list. If a given element is not matched,\n its corresponding pair in the tuple is None.\n "
]
|
Please provide a description of the function:def submit_query_request(end_point, *args, **kwargs):
ev_limit = kwargs.pop('ev_limit', 10)
best_first = kwargs.pop('best_first', True)
tries = kwargs.pop('tries', 2)
# This isn't handled by requests because of the multiple identical agent
# keys, e.g. {'agent': 'MEK', 'agent': 'ERK'} which is not supported in
# python, but is allowed and necessary in these query strings.
# TODO because we use the API Gateway, this feature is not longer needed.
# We should just use the requests parameters dict.
query_str = '?' + '&'.join(['%s=%s' % (k, v) for k, v in kwargs.items()
if v is not None]
+ list(args))
return submit_statement_request('get', end_point, query_str,
ev_limit=ev_limit, best_first=best_first,
tries=tries) | [
"Low level function to format the query string."
]
|
Please provide a description of the function:def submit_statement_request(meth, end_point, query_str='', data=None,
tries=2, **params):
full_end_point = 'statements/' + end_point.lstrip('/')
return make_db_rest_request(meth, full_end_point, query_str, data, params, tries) | [
"Even lower level function to make the request."
]
|
Please provide a description of the function:def render_stmt_graph(statements, reduce=True, english=False, rankdir=None,
agent_style=None):
from indra.assemblers.english import EnglishAssembler
# Set the default agent formatting properties
if agent_style is None:
agent_style = {'color': 'lightgray', 'style': 'filled',
'fontname': 'arial'}
# Sets to store all of the nodes and edges as we recursively process all
# of the statements
nodes = set([])
edges = set([])
stmt_dict = {}
# Recursive function for processing all statements
def process_stmt(stmt):
nodes.add(str(stmt.matches_key()))
stmt_dict[str(stmt.matches_key())] = stmt
for sby_ix, sby_stmt in enumerate(stmt.supported_by):
edges.add((str(stmt.matches_key()), str(sby_stmt.matches_key())))
process_stmt(sby_stmt)
# Process all of the top-level statements, getting the supporting statements
# recursively
for stmt in statements:
process_stmt(stmt)
# Create a networkx graph from the nodes
nx_graph = nx.DiGraph()
nx_graph.add_edges_from(edges)
# Perform transitive reduction if desired
if reduce:
nx_graph = nx.algorithms.dag.transitive_reduction(nx_graph)
# Create a pygraphviz graph from the nx graph
try:
pgv_graph = pgv.AGraph(name='statements', directed=True,
rankdir=rankdir)
except NameError:
logger.error('Cannot generate graph because '
'pygraphviz could not be imported.')
return None
for node in nx_graph.nodes():
stmt = stmt_dict[node]
if english:
ea = EnglishAssembler([stmt])
stmt_str = ea.make_model()
else:
stmt_str = str(stmt)
pgv_graph.add_node(node,
label='%s (%d)' % (stmt_str, len(stmt.evidence)),
**agent_style)
pgv_graph.add_edges_from(nx_graph.edges())
return pgv_graph | [
"Render the statement hierarchy as a pygraphviz graph.\n\n Parameters\n ----------\n stmts : list of :py:class:`indra.statements.Statement`\n A list of top-level statements with associated supporting statements\n resulting from building a statement hierarchy with\n :py:meth:`combine_related`.\n reduce : bool\n Whether to perform a transitive reduction of the edges in the graph.\n Default is True.\n english : bool\n If True, the statements in the graph are represented by their\n English-assembled equivalent; otherwise they are represented as\n text-formatted Statements.\n rank_dir : str or None\n Argument to pass through to the pygraphviz `AGraph` constructor\n specifying graph layout direction. In particular, a value of 'LR'\n specifies a left-to-right direction. If None, the pygraphviz default\n is used.\n agent_style : dict or None\n Dict of attributes specifying the visual properties of nodes. If None,\n the following default attributes are used::\n\n agent_style = {'color': 'lightgray', 'style': 'filled',\n 'fontname': 'arial'}\n\n Returns\n -------\n pygraphviz.AGraph\n Pygraphviz graph with nodes representing statements and edges pointing\n from supported statements to supported_by statements.\n\n Examples\n --------\n Pattern for getting statements and rendering as a Graphviz graph:\n\n >>> from indra.preassembler.hierarchy_manager import hierarchies\n >>> braf = Agent('BRAF')\n >>> map2k1 = Agent('MAP2K1')\n >>> st1 = Phosphorylation(braf, map2k1)\n >>> st2 = Phosphorylation(braf, map2k1, residue='S')\n >>> pa = Preassembler(hierarchies, [st1, st2])\n >>> pa.combine_related() # doctest:+ELLIPSIS\n [Phosphorylation(BRAF(), MAP2K1(), S)]\n >>> graph = render_stmt_graph(pa.related_stmts)\n >>> graph.write('example_graph.dot') # To make the DOT file\n >>> graph.draw('example_graph.png', prog='dot') # To make an image\n\n Resulting graph:\n\n .. image:: /images/example_graph.png\n :align: center\n :alt: Example statement graph rendered by Graphviz\n\n "
]
|
Please provide a description of the function:def flatten_stmts(stmts):
total_stmts = set(stmts)
for stmt in stmts:
if stmt.supported_by:
children = flatten_stmts(stmt.supported_by)
total_stmts = total_stmts.union(children)
return list(total_stmts) | [
"Return the full set of unique stms in a pre-assembled stmt graph.\n\n The flattened list of statements returned by this function can be\n compared to the original set of unique statements to make sure no\n statements have been lost during the preassembly process.\n\n Parameters\n ----------\n stmts : list of :py:class:`indra.statements.Statement`\n A list of top-level statements with associated supporting statements\n resulting from building a statement hierarchy with\n :py:meth:`combine_related`.\n\n Returns\n -------\n stmts : list of :py:class:`indra.statements.Statement`\n List of all statements contained in the hierarchical statement graph.\n\n Examples\n --------\n Calling :py:meth:`combine_related` on two statements results in one\n top-level statement; calling :py:func:`flatten_stmts` recovers both:\n\n >>> from indra.preassembler.hierarchy_manager import hierarchies\n >>> braf = Agent('BRAF')\n >>> map2k1 = Agent('MAP2K1')\n >>> st1 = Phosphorylation(braf, map2k1)\n >>> st2 = Phosphorylation(braf, map2k1, residue='S')\n >>> pa = Preassembler(hierarchies, [st1, st2])\n >>> pa.combine_related() # doctest:+ELLIPSIS\n [Phosphorylation(BRAF(), MAP2K1(), S)]\n >>> flattened = flatten_stmts(pa.related_stmts)\n >>> flattened.sort(key=lambda x: x.matches_key())\n >>> flattened\n [Phosphorylation(BRAF(), MAP2K1()), Phosphorylation(BRAF(), MAP2K1(), S)]\n "
]
|
Please provide a description of the function:def flatten_evidence(stmts, collect_from=None):
if collect_from is None:
collect_from = 'supported_by'
if collect_from not in ('supports', 'supported_by'):
raise ValueError('collect_from must be one of "supports", '
'"supported_by"')
logger.info('Flattening evidence based on %s' % collect_from)
# Copy all of the statements--these will be the ones where we update
# the evidence lists
stmts = fast_deepcopy(stmts)
for stmt in stmts:
# We get the original evidence keys here so we can differentiate them
# from ones added during flattening.
orig_ev_keys = [ev.matches_key() for ev in stmt.evidence]
# We now do the flattening
total_evidence = _flatten_evidence_for_stmt(stmt, collect_from)
# Here we add annotations for each evidence in the list,
# depending on whether it's an original direct evidence or one that
# was added during flattening
new_evidence = []
for ev in total_evidence:
ev_key = ev.matches_key()
if ev_key in orig_ev_keys:
ev.annotations['support_type'] = 'direct'
new_evidence.append(ev)
else:
ev_copy = fast_deepcopy(ev)
ev_copy.annotations['support_type'] = collect_from
new_evidence.append(ev_copy)
# Now set the new evidence list as the copied statement's evidence
stmt.evidence = new_evidence
return stmts | [
"Add evidence from *supporting* stmts to evidence for *supported* stmts.\n\n Parameters\n ----------\n stmts : list of :py:class:`indra.statements.Statement`\n A list of top-level statements with associated supporting statements\n resulting from building a statement hierarchy with\n :py:meth:`combine_related`.\n collect_from : str in ('supports', 'supported_by')\n String indicating whether to collect and flatten evidence from the\n `supports` attribute of each statement or the `supported_by` attribute.\n If not set, defaults to 'supported_by'.\n\n Returns\n -------\n stmts : list of :py:class:`indra.statements.Statement`\n Statement hierarchy identical to the one passed, but with the\n evidence lists for each statement now containing all of the evidence\n associated with the statements they are supported by.\n\n Examples\n --------\n Flattening evidence adds the two pieces of evidence from the supporting\n statement to the evidence list of the top-level statement:\n\n >>> from indra.preassembler.hierarchy_manager import hierarchies\n >>> braf = Agent('BRAF')\n >>> map2k1 = Agent('MAP2K1')\n >>> st1 = Phosphorylation(braf, map2k1,\n ... evidence=[Evidence(text='foo'), Evidence(text='bar')])\n >>> st2 = Phosphorylation(braf, map2k1, residue='S',\n ... evidence=[Evidence(text='baz'), Evidence(text='bak')])\n >>> pa = Preassembler(hierarchies, [st1, st2])\n >>> pa.combine_related() # doctest:+ELLIPSIS\n [Phosphorylation(BRAF(), MAP2K1(), S)]\n >>> [e.text for e in pa.related_stmts[0].evidence] # doctest:+IGNORE_UNICODE\n ['baz', 'bak']\n >>> flattened = flatten_evidence(pa.related_stmts)\n >>> sorted([e.text for e in flattened[0].evidence]) # doctest:+IGNORE_UNICODE\n ['bak', 'bar', 'baz', 'foo']\n "
]
|
Please provide a description of the function:def combine_duplicates(self):
if self.unique_stmts is None:
self.unique_stmts = self.combine_duplicate_stmts(self.stmts)
return self.unique_stmts | [
"Combine duplicates among `stmts` and save result in `unique_stmts`.\n\n A wrapper around the static method :py:meth:`combine_duplicate_stmts`.\n "
]
|
Please provide a description of the function:def _get_stmt_matching_groups(stmts):
def match_func(x): return x.matches_key()
# Remove exact duplicates using a set() call, then make copies:
logger.debug('%d statements before removing object duplicates.' %
len(stmts))
st = list(set(stmts))
logger.debug('%d statements after removing object duplicates.' %
len(stmts))
# Group statements according to whether they are matches (differing
# only in their evidence).
# Sort the statements in place by matches_key()
st.sort(key=match_func)
return itertools.groupby(st, key=match_func) | [
"Use the matches_key method to get sets of matching statements."
]
|
Please provide a description of the function:def combine_duplicate_stmts(stmts):
# Helper function to get a list of evidence matches keys
def _ev_keys(sts):
ev_keys = []
for stmt in sts:
for ev in stmt.evidence:
ev_keys.append(ev.matches_key())
return ev_keys
# Iterate over groups of duplicate statements
unique_stmts = []
for _, duplicates in Preassembler._get_stmt_matching_groups(stmts):
ev_keys = set()
# Get the first statement and add the evidence of all subsequent
# Statements to it
duplicates = list(duplicates)
start_ev_keys = _ev_keys(duplicates)
for stmt_ix, stmt in enumerate(duplicates):
if stmt_ix is 0:
new_stmt = stmt.make_generic_copy()
if len(duplicates) == 1:
new_stmt.uuid = stmt.uuid
raw_text = [None if ag is None else ag.db_refs.get('TEXT')
for ag in stmt.agent_list(deep_sorted=True)]
raw_grounding = [None if ag is None else ag.db_refs
for ag in stmt.agent_list(deep_sorted=True)]
for ev in stmt.evidence:
ev_key = ev.matches_key() + str(raw_text) + \
str(raw_grounding)
if ev_key not in ev_keys:
# In case there are already agents annotations, we
# just add a new key for raw_text, otherwise create
# a new key
if 'agents' in ev.annotations:
ev.annotations['agents']['raw_text'] = raw_text
ev.annotations['agents']['raw_grounding'] = \
raw_grounding
else:
ev.annotations['agents'] = \
{'raw_text': raw_text,
'raw_grounding': raw_grounding}
if 'prior_uuids' not in ev.annotations:
ev.annotations['prior_uuids'] = []
ev.annotations['prior_uuids'].append(stmt.uuid)
new_stmt.evidence.append(ev)
ev_keys.add(ev_key)
end_ev_keys = _ev_keys([new_stmt])
if len(end_ev_keys) != len(start_ev_keys):
logger.debug('%d redundant evidences eliminated.' %
(len(start_ev_keys) - len(end_ev_keys)))
# This should never be None or anything else
assert isinstance(new_stmt, Statement)
unique_stmts.append(new_stmt)
return unique_stmts | [
"Combine evidence from duplicate Statements.\n\n Statements are deemed to be duplicates if they have the same key\n returned by the `matches_key()` method of the Statement class. This\n generally means that statements must be identical in terms of their\n arguments and can differ only in their associated `Evidence` objects.\n\n This function keeps the first instance of each set of duplicate\n statements and merges the lists of Evidence from all of the other\n statements.\n\n Parameters\n ----------\n stmts : list of :py:class:`indra.statements.Statement`\n Set of statements to de-duplicate.\n\n Returns\n -------\n list of :py:class:`indra.statements.Statement`\n Unique statements with accumulated evidence across duplicates.\n\n Examples\n --------\n De-duplicate and combine evidence for two statements differing only\n in their evidence lists:\n\n >>> map2k1 = Agent('MAP2K1')\n >>> mapk1 = Agent('MAPK1')\n >>> stmt1 = Phosphorylation(map2k1, mapk1, 'T', '185',\n ... evidence=[Evidence(text='evidence 1')])\n >>> stmt2 = Phosphorylation(map2k1, mapk1, 'T', '185',\n ... evidence=[Evidence(text='evidence 2')])\n >>> uniq_stmts = Preassembler.combine_duplicate_stmts([stmt1, stmt2])\n >>> uniq_stmts\n [Phosphorylation(MAP2K1(), MAPK1(), T, 185)]\n >>> sorted([e.text for e in uniq_stmts[0].evidence]) # doctest:+IGNORE_UNICODE\n ['evidence 1', 'evidence 2']\n "
]
|
Please provide a description of the function:def _get_stmt_by_group(self, stmt_type, stmts_this_type, eh):
# Dict of stmt group key tuples, indexed by their first Agent
stmt_by_first = collections.defaultdict(lambda: [])
# Dict of stmt group key tuples, indexed by their second Agent
stmt_by_second = collections.defaultdict(lambda: [])
# Dict of statements with None first, with second Agent as keys
none_first = collections.defaultdict(lambda: [])
# Dict of statements with None second, with first Agent as keys
none_second = collections.defaultdict(lambda: [])
# The dict of all statement groups, with tuples of components
# or entity_matches_keys as keys
stmt_by_group = collections.defaultdict(lambda: [])
# Here we group Statements according to the hierarchy graph
# components that their agents are part of
for stmt_tuple in stmts_this_type:
_, stmt = stmt_tuple
entities = self._get_entities(stmt, stmt_type, eh)
# At this point we have an entity list
# If we're dealing with Complexes, sort the entities and use
# as dict key
if stmt_type == Complex:
# There shouldn't be any statements of the type
# e.g., Complex([Foo, None, Bar])
assert None not in entities
assert len(entities) > 0
entities.sort()
key = tuple(entities)
if stmt_tuple not in stmt_by_group[key]:
stmt_by_group[key].append(stmt_tuple)
elif stmt_type == Conversion:
assert len(entities) > 0
key = (entities[0],
tuple(sorted(entities[1:len(stmt.obj_from)+1])),
tuple(sorted(entities[-len(stmt.obj_to):])))
if stmt_tuple not in stmt_by_group[key]:
stmt_by_group[key].append(stmt_tuple)
# Now look at all other statement types
# All other statements will have one or two entities
elif len(entities) == 1:
# If only one entity, we only need the one key
# It should not be None!
assert None not in entities
key = tuple(entities)
if stmt_tuple not in stmt_by_group[key]:
stmt_by_group[key].append(stmt_tuple)
else:
# Make sure we only have two entities, and they are not both
# None
key = tuple(entities)
assert len(key) == 2
assert key != (None, None)
# First agent is None; add in the statements, indexed by
# 2nd
if key[0] is None and stmt_tuple not in none_first[key[1]]:
none_first[key[1]].append(stmt_tuple)
# Second agent is None; add in the statements, indexed by
# 1st
elif key[1] is None and stmt_tuple not in none_second[key[0]]:
none_second[key[0]].append(stmt_tuple)
# Neither entity is None!
elif None not in key:
if stmt_tuple not in stmt_by_group[key]:
stmt_by_group[key].append(stmt_tuple)
if key not in stmt_by_first[key[0]]:
stmt_by_first[key[0]].append(key)
if key not in stmt_by_second[key[1]]:
stmt_by_second[key[1]].append(key)
# When we've gotten here, we should have stmt_by_group entries, and
# we may or may not have stmt_by_first/second dicts filled out
# (depending on the statement type).
if none_first:
# Get the keys associated with stmts having a None first
# argument
for second_arg, stmts in none_first.items():
# Look for any statements with this second arg
second_arg_keys = stmt_by_second[second_arg]
# If there are no more specific statements matching this
# set of statements with a None first arg, then the
# statements with the None first arg deserve to be in
# their own group.
if not second_arg_keys:
stmt_by_group[(None, second_arg)] = stmts
# On the other hand, if there are statements with a matching
# second arg component, we need to add the None first
# statements to all groups with the matching second arg
for second_arg_key in second_arg_keys:
stmt_by_group[second_arg_key] += stmts
# Now do the corresponding steps for the statements with None as the
# second argument:
if none_second:
for first_arg, stmts in none_second.items():
# Look for any statements with this first arg
first_arg_keys = stmt_by_first[first_arg]
# If there are no more specific statements matching this
# set of statements with a None second arg, then the
# statements with the None second arg deserve to be in
# their own group.
if not first_arg_keys:
stmt_by_group[(first_arg, None)] = stmts
# On the other hand, if there are statements with a matching
# first arg component, we need to add the None second
# statements to all groups with the matching first arg
for first_arg_key in first_arg_keys:
stmt_by_group[first_arg_key] += stmts
return stmt_by_group | [
"Group Statements of `stmt_type` by their hierarchical relations."
]
|
Please provide a description of the function:def _generate_id_maps(self, unique_stmts, poolsize=None,
size_cutoff=100, split_idx=None):
# Check arguments relating to multiprocessing
if poolsize is None:
logger.debug('combine_related: poolsize not set, '
'not using multiprocessing.')
use_mp = False
elif sys.version_info[0] >= 3 and sys.version_info[1] >= 4:
use_mp = True
logger.info('combine_related: Python >= 3.4 detected, '
'using multiprocessing with poolsize %d, '
'size_cutoff %d' % (poolsize, size_cutoff))
else:
use_mp = False
logger.info('combine_related: Python < 3.4 detected, '
'not using multiprocessing.')
eh = self.hierarchies['entity']
# Make a list of Statement types
stmts_by_type = collections.defaultdict(lambda: [])
for idx, stmt in enumerate(unique_stmts):
stmts_by_type[indra_stmt_type(stmt)].append((idx, stmt))
child_proc_groups = []
parent_proc_groups = []
skipped_groups = 0
# Each Statement type can be preassembled independently
for stmt_type, stmts_this_type in stmts_by_type.items():
logger.info('Grouping %s (%s)' %
(stmt_type.__name__, len(stmts_this_type)))
stmt_by_group = self._get_stmt_by_group(stmt_type, stmts_this_type,
eh)
# Divide statements by group size
# If we're not using multiprocessing, then all groups are local
for g_name, g in stmt_by_group.items():
if len(g) < 2:
skipped_groups += 1
continue
if use_mp and len(g) >= size_cutoff:
child_proc_groups.append(g)
else:
parent_proc_groups.append(g)
# Now run preassembly!
logger.debug("Groups: %d parent, %d worker, %d skipped." %
(len(parent_proc_groups), len(child_proc_groups),
skipped_groups))
supports_func = functools.partial(_set_supports_stmt_pairs,
hierarchies=self.hierarchies,
split_idx=split_idx,
check_entities_match=False)
# Check if we are running any groups in child processes; note that if
# use_mp is False, child_proc_groups will be empty
if child_proc_groups:
# Get a multiprocessing context
ctx = mp.get_context('spawn')
pool = ctx.Pool(poolsize)
# Run the large groups remotely
logger.debug("Running %d groups in child processes" %
len(child_proc_groups))
res = pool.map_async(supports_func, child_proc_groups)
workers_ready = False
else:
workers_ready = True
# Run the small groups locally
logger.debug("Running %d groups in parent process" %
len(parent_proc_groups))
stmt_ix_map = [supports_func(stmt_tuples)
for stmt_tuples in parent_proc_groups]
logger.debug("Done running parent process groups")
while not workers_ready:
logger.debug("Checking child processes")
if res.ready():
workers_ready = True
logger.debug('Child process group comparisons successful? %s' %
res.successful())
if not res.successful():
raise Exception("Sorry, there was a problem with "
"preassembly in the child processes.")
else:
stmt_ix_map += res.get()
logger.debug("Closing pool...")
pool.close()
logger.debug("Joining pool...")
pool.join()
logger.debug("Pool closed and joined.")
time.sleep(1)
logger.debug("Done.")
# Combine all redundant map edges
stmt_ix_map_set = set([])
for group_ix_map in stmt_ix_map:
for ix_pair in group_ix_map:
stmt_ix_map_set.add(ix_pair)
return stmt_ix_map_set | [
"Connect statements using their refinement relationships."
]
|
Please provide a description of the function:def combine_related(self, return_toplevel=True, poolsize=None,
size_cutoff=100):
if self.related_stmts is not None:
if return_toplevel:
return self.related_stmts
else:
assert self.unique_stmts is not None
return self.unique_stmts
# Call combine_duplicates, which lazily initializes self.unique_stmts
unique_stmts = self.combine_duplicates()
# Generate the index map, linking related statements.
idx_map = self._generate_id_maps(unique_stmts, poolsize, size_cutoff)
# Now iterate over all indices and set supports/supported by
for ix1, ix2 in idx_map:
unique_stmts[ix1].supported_by.append(unique_stmts[ix2])
unique_stmts[ix2].supports.append(unique_stmts[ix1])
# Get the top level statements
self.related_stmts = [st for st in unique_stmts if not st.supports]
logger.debug('%d top level' % len(self.related_stmts))
if return_toplevel:
return self.related_stmts
else:
return unique_stmts | [
"Connect related statements based on their refinement relationships.\n\n This function takes as a starting point the unique statements (with\n duplicates removed) and returns a modified flat list of statements\n containing only those statements which do not represent a refinement of\n other existing statements. In other words, the more general versions of\n a given statement do not appear at the top level, but instead are\n listed in the `supports` field of the top-level statements.\n\n If :py:attr:`unique_stmts` has not been initialized with the\n de-duplicated statements, :py:meth:`combine_duplicates` is called\n internally.\n\n After this function is called the attribute :py:attr:`related_stmts` is\n set as a side-effect.\n\n The procedure for combining statements in this way involves a series\n of steps:\n\n 1. The statements are grouped by type (e.g., Phosphorylation) and\n each type is iterated over independently.\n 2. Statements of the same type are then grouped according to their\n Agents' entity hierarchy component identifiers. For instance,\n ERK, MAPK1 and MAPK3 are all in the same connected component in the\n entity hierarchy and therefore all Statements of the same type\n referencing these entities will be grouped. This grouping assures\n that relations are only possible within Statement groups and\n not among groups. For two Statements to be in the same group at\n this step, the Statements must be the same type and the Agents at\n each position in the Agent lists must either be in the same\n hierarchy component, or if they are not in the hierarchy, must have\n identical entity_matches_keys. Statements with None in one of the\n Agent list positions are collected separately at this stage.\n 3. Statements with None at either the first or second position are\n iterated over. For a statement with a None as the first Agent,\n the second Agent is examined; then the Statement with None is\n added to all Statement groups with a corresponding component or\n entity_matches_key in the second position. The same procedure is\n performed for Statements with None at the second Agent position.\n 4. The statements within each group are then compared; if one\n statement represents a refinement of the other (as defined by the\n `refinement_of()` method implemented for the Statement), then the\n more refined statement is added to the `supports` field of the more\n general statement, and the more general statement is added to the\n `supported_by` field of the more refined statement.\n 5. A new flat list of statements is created that contains only those\n statements that have no `supports` entries (statements containing\n such entries are not eliminated, because they will be retrievable\n from the `supported_by` fields of other statements). This list\n is returned to the caller.\n\n On multi-core machines, the algorithm can be parallelized by setting\n the poolsize argument to the desired number of worker processes.\n This feature is only available in Python > 3.4.\n\n .. note:: Subfamily relationships must be consistent across arguments\n\n For now, we require that merges can only occur if the *isa*\n relationships are all in the *same direction for all the agents* in\n a Statement. For example, the two statement groups: `RAF_family ->\n MEK1` and `BRAF -> MEK_family` would not be merged, since BRAF\n *isa* RAF_family, but MEK_family is not a MEK1. In the future this\n restriction could be revisited.\n\n Parameters\n ----------\n return_toplevel : Optional[bool]\n If True only the top level statements are returned.\n If False, all statements are returned. Default: True\n poolsize : Optional[int]\n The number of worker processes to use to parallelize the\n comparisons performed by the function. If None (default), no\n parallelization is performed. NOTE: Parallelization is only\n available on Python 3.4 and above.\n size_cutoff : Optional[int]\n Groups with size_cutoff or more statements are sent to worker\n processes, while smaller groups are compared in the parent process.\n Default value is 100. Not relevant when parallelization is not\n used.\n\n Returns\n -------\n list of :py:class:`indra.statement.Statement`\n The returned list contains Statements representing the more\n concrete/refined versions of the Statements involving particular\n entities. The attribute :py:attr:`related_stmts` is also set to\n this list. However, if return_toplevel is False then all\n statements are returned, irrespective of level of specificity.\n In this case the relationships between statements can\n be accessed via the supports/supported_by attributes.\n\n Examples\n --------\n A more general statement with no information about a Phosphorylation\n site is identified as supporting a more specific statement:\n\n >>> from indra.preassembler.hierarchy_manager import hierarchies\n >>> braf = Agent('BRAF')\n >>> map2k1 = Agent('MAP2K1')\n >>> st1 = Phosphorylation(braf, map2k1)\n >>> st2 = Phosphorylation(braf, map2k1, residue='S')\n >>> pa = Preassembler(hierarchies, [st1, st2])\n >>> combined_stmts = pa.combine_related() # doctest:+ELLIPSIS\n >>> combined_stmts\n [Phosphorylation(BRAF(), MAP2K1(), S)]\n >>> combined_stmts[0].supported_by\n [Phosphorylation(BRAF(), MAP2K1())]\n >>> combined_stmts[0].supported_by[0].supports\n [Phosphorylation(BRAF(), MAP2K1(), S)]\n "
]
|
Please provide a description of the function:def find_contradicts(self):
eh = self.hierarchies['entity']
# Make a dict of Statement by type
stmts_by_type = collections.defaultdict(lambda: [])
for idx, stmt in enumerate(self.stmts):
stmts_by_type[indra_stmt_type(stmt)].append((idx, stmt))
# Handle Statements with polarity first
pos_stmts = AddModification.__subclasses__()
neg_stmts = [modclass_to_inverse[c] for c in pos_stmts]
pos_stmts += [Activation, IncreaseAmount]
neg_stmts += [Inhibition, DecreaseAmount]
contradicts = []
for pst, nst in zip(pos_stmts, neg_stmts):
poss = stmts_by_type.get(pst, [])
negs = stmts_by_type.get(nst, [])
pos_stmt_by_group = self._get_stmt_by_group(pst, poss, eh)
neg_stmt_by_group = self._get_stmt_by_group(nst, negs, eh)
for key, pg in pos_stmt_by_group.items():
ng = neg_stmt_by_group.get(key, [])
for (_, st1), (_, st2) in itertools.product(pg, ng):
if st1.contradicts(st2, self.hierarchies):
contradicts.append((st1, st2))
# Handle neutral Statements next
neu_stmts = [Influence, ActiveForm]
for stt in neu_stmts:
stmts = stmts_by_type.get(stt, [])
for (_, st1), (_, st2) in itertools.combinations(stmts, 2):
if st1.contradicts(st2, self.hierarchies):
contradicts.append((st1, st2))
return contradicts | [
"Return pairs of contradicting Statements.\n\n Returns\n -------\n contradicts : list(tuple(Statement, Statement))\n A list of Statement pairs that are contradicting.\n "
]
|
Please provide a description of the function:def get_text_content_for_pmids(pmids):
pmc_pmids = set(pmc_client.filter_pmids(pmids, source_type='fulltext'))
pmc_ids = []
for pmid in pmc_pmids:
pmc_id = pmc_client.id_lookup(pmid, idtype='pmid')['pmcid']
if pmc_id:
pmc_ids.append(pmc_id)
else:
pmc_pmids.discard(pmid)
pmc_xmls = []
failed = set()
for pmc_id in pmc_ids:
if pmc_id is not None:
pmc_xmls.append(pmc_client.get_xml(pmc_id))
else:
failed.append(pmid)
time.sleep(0.5)
remaining_pmids = set(pmids) - pmc_pmids | failed
abstracts = []
for pmid in remaining_pmids:
abstract = pubmed_client.get_abstract(pmid)
abstracts.append(abstract)
time.sleep(0.5)
return [text_content for source in (pmc_xmls, abstracts)
for text_content in source if text_content is not None] | [
"Get text content for articles given a list of their pmids\n\n Parameters\n ----------\n pmids : list of str\n\n Returns\n -------\n text_content : list of str\n "
]
|
Please provide a description of the function:def universal_extract_paragraphs(xml):
try:
paragraphs = elsevier_client.extract_paragraphs(xml)
except Exception:
paragraphs = None
if paragraphs is None:
try:
paragraphs = pmc_client.extract_paragraphs(xml)
except Exception:
paragraphs = [xml]
return paragraphs | [
"Extract paragraphs from xml that could be from different sources\n\n First try to parse the xml as if it came from elsevier. if we do not\n have valid elsevier xml this will throw an exception. the text extraction\n function in the pmc client may not throw an exception when parsing elsevier\n xml, silently processing the xml incorrectly\n\n Parameters\n ----------\n xml : str\n Either an NLM xml, Elsevier xml or plaintext\n\n Returns\n -------\n paragraphs : str\n Extracted plaintext paragraphs from NLM or Elsevier XML\n "
]
|
Please provide a description of the function:def filter_paragraphs(paragraphs, contains=None):
if contains is None:
pattern = ''
else:
if isinstance(contains, str):
contains = [contains]
pattern = '|'.join(r'[^\w]%s[^\w]' % shortform
for shortform in contains)
paragraphs = [p for p in paragraphs if re.search(pattern, p)]
return '\n'.join(paragraphs) + '\n' | [
"Filter paragraphs to only those containing one of a list of strings\n\n Parameters\n ----------\n paragraphs : list of str\n List of plaintext paragraphs from an article\n\n contains : str or list of str\n Exclude paragraphs not containing this string as a token, or\n at least one of the strings in contains if it is a list\n\n Returns\n -------\n str\n Plaintext consisting of all input paragraphs containing at least\n one of the supplied tokens.\n "
]
|
Please provide a description of the function:def get_valid_residue(residue):
if residue is not None and amino_acids.get(residue) is None:
res = amino_acids_reverse.get(residue.lower())
if res is None:
raise InvalidResidueError(residue)
else:
return res
return residue | [
"Check if the given string represents a valid amino acid residue."
]
|
Please provide a description of the function:def get_valid_location(location):
# If we're given None, return None
if location is not None and cellular_components.get(location) is None:
loc = cellular_components_reverse.get(location)
if loc is None:
raise InvalidLocationError(location)
else:
return loc
return location | [
"Check if the given location represents a valid cellular component."
]
|
Please provide a description of the function:def _read_activity_types():
this_dir = os.path.dirname(os.path.abspath(__file__))
ac_file = os.path.join(this_dir, os.pardir, 'resources',
'activity_hierarchy.rdf')
g = rdflib.Graph()
with open(ac_file, 'r'):
g.parse(ac_file, format='nt')
act_types = set()
for s, _, o in g:
subj = s.rpartition('/')[-1]
obj = o.rpartition('/')[-1]
act_types.add(subj)
act_types.add(obj)
return sorted(list(act_types)) | [
"Read types of valid activities from a resource file."
]
|
Please provide a description of the function:def _read_cellular_components():
# Here we load a patch file in addition to the current cellular components
# file to make sure we don't error with InvalidLocationError with some
# deprecated cellular location names
this_dir = os.path.dirname(os.path.abspath(__file__))
cc_file = os.path.join(this_dir, os.pardir, 'resources',
'cellular_components.tsv')
cc_patch_file = os.path.join(this_dir, os.pardir, 'resources',
'cellular_components_patch.tsv')
cellular_components = {}
cellular_components_reverse = {}
with open(cc_file, 'rt') as fh:
lines = list(fh.readlines())
# We add the patch to the end of the lines list
with open(cc_patch_file, 'rt') as fh:
lines += list(fh.readlines())
for lin in lines[1:]:
terms = lin.strip().split('\t')
cellular_components[terms[1]] = terms[0]
# If the GO -> name mapping doesn't exist yet, we add a mapping
# but if it already exists (i.e. the try doesn't error) then
# we don't add the GO -> name mapping. This ensures that names from
# the patch file aren't mapped to in the reverse list.
try:
cellular_components_reverse[terms[0]]
except KeyError:
cellular_components_reverse[terms[0]] = terms[1]
return cellular_components, cellular_components_reverse | [
"Read cellular components from a resource file."
]
|
Please provide a description of the function:def _read_amino_acids():
this_dir = os.path.dirname(os.path.abspath(__file__))
aa_file = os.path.join(this_dir, os.pardir, 'resources', 'amino_acids.tsv')
amino_acids = {}
amino_acids_reverse = {}
with open(aa_file, 'rt') as fh:
lines = fh.readlines()
for lin in lines[1:]:
terms = lin.strip().split('\t')
key = terms[2]
val = {'full_name': terms[0],
'short_name': terms[1],
'indra_name': terms[3]}
amino_acids[key] = val
for v in val.values():
amino_acids_reverse[v] = key
return amino_acids, amino_acids_reverse | [
"Read the amino acid information from a resource file."
]
|
Please provide a description of the function:def export_sbgn(model):
import lxml.etree
import lxml.builder
from pysb.bng import generate_equations
from indra.assemblers.sbgn import SBGNAssembler
logger.info('Generating reaction network with BNG for SBGN export. ' +
'This could take a long time.')
generate_equations(model)
sa = SBGNAssembler()
glyphs = {}
for idx, species in enumerate(model.species):
glyph = sa._glyph_for_complex_pattern(species)
if glyph is None:
continue
sa._map.append(glyph)
glyphs[idx] = glyph
for reaction in model.reactions:
# Get all the reactions / products / controllers of the reaction
reactants = set(reaction['reactants']) - set(reaction['products'])
products = set(reaction['products']) - set(reaction['reactants'])
controllers = set(reaction['reactants']) & set(reaction['products'])
# Add glyph for reaction
process_glyph = sa._process_glyph('process')
# Connect reactants with arcs
if not reactants:
glyph_id = sa._none_glyph()
sa._arc('consumption', glyph_id, process_glyph)
else:
for r in reactants:
glyph = glyphs.get(r)
if glyph is None:
glyph_id = sa._none_glyph()
else:
glyph_id = glyph.attrib['id']
sa._arc('consumption', glyph_id, process_glyph)
# Connect products with arcs
if not products:
glyph_id = sa._none_glyph()
sa._arc('production', process_glyph, glyph_id)
else:
for p in products:
glyph = glyphs.get(p)
if glyph is None:
glyph_id = sa._none_glyph()
else:
glyph_id = glyph.attrib['id']
sa._arc('production', process_glyph, glyph_id)
# Connect controllers with arcs
for c in controllers:
glyph = glyphs[c]
sa._arc('catalysis', glyph.attrib['id'], process_glyph)
sbgn_str = sa.print_model().decode('utf-8')
return sbgn_str | [
"Return an SBGN model string corresponding to the PySB model.\n\n This function first calls generate_equations on the PySB model to obtain\n a reaction network (i.e. individual species, reactions). It then iterates\n over each reaction and and instantiates its reactants, products, and the\n process itself as SBGN glyphs and arcs.\n\n Parameters\n ----------\n model : pysb.core.Model\n A PySB model to be exported into SBGN\n\n Returns\n -------\n sbgn_str : str\n An SBGN model as string\n "
]
|
Please provide a description of the function:def export_kappa_im(model, fname=None):
from .kappa_util import im_json_to_graph
kappa = _prepare_kappa(model)
imap = kappa.analyses_influence_map()
im = im_json_to_graph(imap)
for param in model.parameters:
try:
im.remove_node(param.name)
except:
pass
if fname:
agraph = networkx.nx_agraph.to_agraph(im)
agraph.draw(fname, prog='dot')
return im | [
"Return a networkx graph representing the model's Kappa influence map.\n\n Parameters\n ----------\n model : pysb.core.Model\n A PySB model to be exported into a Kappa IM.\n fname : Optional[str]\n A file name, typically with .png or .pdf extension in which\n the IM is rendered using pygraphviz.\n\n Returns\n -------\n networkx.MultiDiGraph\n A graph object representing the influence map.\n "
]
|
Please provide a description of the function:def export_kappa_cm(model, fname=None):
from .kappa_util import cm_json_to_graph
kappa = _prepare_kappa(model)
cmap = kappa.analyses_contact_map()
cm = cm_json_to_graph(cmap)
if fname:
cm.draw(fname, prog='dot')
return cm | [
"Return a networkx graph representing the model's Kappa contact map.\n\n Parameters\n ----------\n model : pysb.core.Model\n A PySB model to be exported into a Kappa CM.\n fname : Optional[str]\n A file name, typically with .png or .pdf extension in which\n the CM is rendered using pygraphviz.\n\n Returns\n -------\n npygraphviz.Agraph\n A graph object representing the contact map.\n "
]
|
Please provide a description of the function:def _prepare_kappa(model):
import kappy
kappa = kappy.KappaStd()
model_str = export(model, 'kappa')
kappa.add_model_string(model_str)
kappa.project_parse()
return kappa | [
"Return a Kappa STD with the model loaded."
]
|
Please provide a description of the function:def send_request(**kwargs):
skiprows = kwargs.pop('skiprows', None)
res = requests.get(cbio_url, params=kwargs)
if res.status_code == 200:
# Adaptively skip rows based on number of comment lines
if skiprows == -1:
lines = res.text.split('\n')
skiprows = 0
for line in lines:
if line.startswith('#'):
skiprows += 1
else:
break
csv_StringIO = StringIO(res.text)
df = pandas.read_csv(csv_StringIO, sep='\t', skiprows=skiprows)
return df
else:
logger.error('Request returned with code %d' % res.status_code) | [
"Return a data frame from a web service request to cBio portal.\n\n Sends a web service requrest to the cBio portal with arguments given in\n the dictionary data and returns a Pandas data frame on success.\n\n More information about the service here:\n http://www.cbioportal.org/web_api.jsp\n\n Parameters\n ----------\n kwargs : dict\n A dict of parameters for the query. Entries map directly to web service\n calls with the exception of the optional 'skiprows' entry, whose value\n is used as the number of rows to skip when reading the result data\n frame.\n\n Returns\n -------\n df : pandas.DataFrame\n Response from cBioPortal as a Pandas DataFrame.\n "
]
|
Please provide a description of the function:def get_mutations(study_id, gene_list, mutation_type=None,
case_id=None):
genetic_profile = get_genetic_profiles(study_id, 'mutation')[0]
gene_list_str = ','.join(gene_list)
data = {'cmd': 'getMutationData',
'case_set_id': study_id,
'genetic_profile_id': genetic_profile,
'gene_list': gene_list_str,
'skiprows': -1}
df = send_request(**data)
if case_id:
df = df[df['case_id'] == case_id]
res = _filter_data_frame(df, ['gene_symbol', 'amino_acid_change'],
'mutation_type', mutation_type)
mutations = {'gene_symbol': list(res['gene_symbol'].values()),
'amino_acid_change': list(res['amino_acid_change'].values())}
return mutations | [
"Return mutations as a list of genes and list of amino acid changes.\n\n Parameters\n ----------\n study_id : str\n The ID of the cBio study.\n Example: 'cellline_ccle_broad' or 'paad_icgc'\n gene_list : list[str]\n A list of genes with their HGNC symbols.\n Example: ['BRAF', 'KRAS']\n mutation_type : Optional[str]\n The type of mutation to filter to.\n mutation_type can be one of: missense, nonsense, frame_shift_ins,\n frame_shift_del, splice_site\n case_id : Optional[str]\n The case ID within the study to filter to.\n\n Returns\n -------\n mutations : tuple[list]\n A tuple of two lists, the first one containing a list of genes, and\n the second one a list of amino acid changes in those genes.\n "
]
|
Please provide a description of the function:def get_case_lists(study_id):
data = {'cmd': 'getCaseLists',
'cancer_study_id': study_id}
df = send_request(**data)
case_set_ids = df['case_list_id'].tolist()
return case_set_ids | [
"Return a list of the case set ids for a particular study.\n\n TAKE NOTE the \"case_list_id\" are the same thing as \"case_set_id\"\n Within the data, this string is referred to as a \"case_list_id\".\n Within API calls it is referred to as a 'case_set_id'.\n The documentation does not make this explicitly clear.\n\n Parameters\n ----------\n study_id : str\n The ID of the cBio study.\n Example: 'cellline_ccle_broad' or 'paad_icgc'\n\n Returns\n -------\n case_set_ids : dict[dict[int]]\n A dict keyed to cases containing a dict keyed to genes\n containing int\n "
]
|
Please provide a description of the function:def get_profile_data(study_id, gene_list,
profile_filter, case_set_filter=None):
genetic_profiles = get_genetic_profiles(study_id, profile_filter)
if genetic_profiles:
genetic_profile = genetic_profiles[0]
else:
return {}
gene_list_str = ','.join(gene_list)
case_set_ids = get_case_lists(study_id)
if case_set_filter:
case_set_id = [x for x in case_set_ids if case_set_filter in x][0]
else:
case_set_id = study_id + '_all'
# based on looking at the cBioPortal, this is a common case_set_id
data = {'cmd': 'getProfileData',
'case_set_id': case_set_id,
'genetic_profile_id': genetic_profile,
'gene_list': gene_list_str,
'skiprows': -1}
df = send_request(**data)
case_list_df = [x for x in df.columns.tolist()
if x not in ['GENE_ID', 'COMMON']]
profile_data = {case: {g: None for g in gene_list}
for case in case_list_df}
for case in case_list_df:
profile_values = df[case].tolist()
df_gene_list = df['COMMON'].tolist()
for g, cv in zip(df_gene_list, profile_values):
if not pandas.isnull(cv):
profile_data[case][g] = cv
return profile_data | [
"Return dict of cases and genes and their respective values.\n\n Parameters\n ----------\n study_id : str\n The ID of the cBio study.\n Example: 'cellline_ccle_broad' or 'paad_icgc'\n gene_list : list[str]\n A list of genes with their HGNC symbols.\n Example: ['BRAF', 'KRAS']\n profile_filter : str\n A string used to filter the profiles to return. Will be one of:\n - MUTATION\n - MUTATION_EXTENDED\n - COPY_NUMBER_ALTERATION\n - MRNA_EXPRESSION\n - METHYLATION\n case_set_filter : Optional[str]\n A string that specifices which case_set_id to use, based on a complete\n or partial match. If not provided, will look for study_id + '_all'\n\n Returns\n -------\n profile_data : dict[dict[int]]\n A dict keyed to cases containing a dict keyed to genes\n containing int\n "
]
|
Please provide a description of the function:def get_num_sequenced(study_id):
data = {'cmd': 'getCaseLists',
'cancer_study_id': study_id}
df = send_request(**data)
if df.empty:
return 0
row_filter = df['case_list_id'].str.contains('sequenced', case=False)
num_case = len(df[row_filter]['case_ids'].tolist()[0].split(' '))
return num_case | [
"Return number of sequenced tumors for given study.\n\n This is useful for calculating mutation statistics in terms of the\n prevalence of certain mutations within a type of cancer.\n\n Parameters\n ----------\n study_id : str\n The ID of the cBio study.\n Example: 'paad_icgc'\n\n Returns\n -------\n num_case : int\n The number of sequenced tumors in the given study\n "
]
|
Please provide a description of the function:def get_genetic_profiles(study_id, profile_filter=None):
data = {'cmd': 'getGeneticProfiles',
'cancer_study_id': study_id}
df = send_request(**data)
res = _filter_data_frame(df, ['genetic_profile_id'],
'genetic_alteration_type', profile_filter)
genetic_profiles = list(res['genetic_profile_id'].values())
return genetic_profiles | [
"Return all the genetic profiles (data sets) for a given study.\n\n Genetic profiles are different types of data for a given study. For\n instance the study 'cellline_ccle_broad' has profiles such as\n 'cellline_ccle_broad_mutations' for mutations, 'cellline_ccle_broad_CNA'\n for copy number alterations, etc.\n\n Parameters\n ----------\n study_id : str\n The ID of the cBio study.\n Example: 'paad_icgc'\n profile_filter : Optional[str]\n A string used to filter the profiles to return.\n Will be one of:\n - MUTATION\n - MUTATION_EXTENDED\n - COPY_NUMBER_ALTERATION\n - MRNA_EXPRESSION\n - METHYLATION\n The genetic profiles can include \"mutation\", \"CNA\", \"rppa\",\n \"methylation\", etc.\n\n Returns\n -------\n genetic_profiles : list[str]\n A list of genetic profiles available for the given study.\n "
]
|
Please provide a description of the function:def get_cancer_studies(study_filter=None):
data = {'cmd': 'getCancerStudies'}
df = send_request(**data)
res = _filter_data_frame(df, ['cancer_study_id'],
'cancer_study_id', study_filter)
study_ids = list(res['cancer_study_id'].values())
return study_ids | [
"Return a list of cancer study identifiers, optionally filtered.\n\n There are typically multiple studies for a given type of cancer and\n a filter can be used to constrain the returned list.\n\n Parameters\n ----------\n study_filter : Optional[str]\n A string used to filter the study IDs to return. Example: \"paad\"\n\n Returns\n -------\n study_ids : list[str]\n A list of study IDs.\n For instance \"paad\" as a filter would result in a list\n of study IDs with paad in their name like \"paad_icgc\", \"paad_tcga\",\n etc.\n "
]
|
Please provide a description of the function:def get_cancer_types(cancer_filter=None):
data = {'cmd': 'getTypesOfCancer'}
df = send_request(**data)
res = _filter_data_frame(df, ['type_of_cancer_id'], 'name', cancer_filter)
type_ids = list(res['type_of_cancer_id'].values())
return type_ids | [
"Return a list of cancer types, optionally filtered.\n\n Parameters\n ----------\n cancer_filter : Optional[str]\n A string used to filter cancer types. Its value is the name or\n part of the name of a type of cancer. Example: \"melanoma\",\n \"pancreatic\", \"non-small cell lung\"\n\n Returns\n -------\n type_ids : list[str]\n A list of cancer types matching the filter.\n Example: for cancer_filter=\"pancreatic\", the result includes\n \"panet\" (neuro-endocrine) and \"paad\" (adenocarcinoma)\n "
]
|
Please provide a description of the function:def get_ccle_mutations(gene_list, cell_lines, mutation_type=None):
mutations = {cl: {g: [] for g in gene_list} for cl in cell_lines}
for cell_line in cell_lines:
mutations_cl = get_mutations(ccle_study, gene_list,
mutation_type=mutation_type,
case_id=cell_line)
for gene, aa_change in zip(mutations_cl['gene_symbol'],
mutations_cl['amino_acid_change']):
aa_change = str(aa_change)
mutations[cell_line][gene].append(aa_change)
return mutations | [
"Return a dict of mutations in given genes and cell lines from CCLE.\n\n This is a specialized call to get_mutations tailored to CCLE cell lines.\n\n Parameters\n ----------\n gene_list : list[str]\n A list of HGNC gene symbols to get mutations in\n cell_lines : list[str]\n A list of CCLE cell line names to get mutations for.\n mutation_type : Optional[str]\n The type of mutation to filter to.\n mutation_type can be one of: missense, nonsense, frame_shift_ins,\n frame_shift_del, splice_site\n\n Returns\n -------\n mutations : dict\n The result from cBioPortal as a dict in the format\n {cell_line : {gene : [mutation1, mutation2, ...] }}\n\n Example:\n {'LOXIMVI_SKIN': {'BRAF': ['V600E', 'I208V']},\n 'SKMEL30_SKIN': {'BRAF': ['D287H', 'E275K']}}\n "
]
|
Please provide a description of the function:def get_ccle_lines_for_mutation(gene, amino_acid_change):
data = {'cmd': 'getMutationData',
'case_set_id': ccle_study,
'genetic_profile_id': ccle_study + '_mutations',
'gene_list': gene,
'skiprows': 1}
df = send_request(**data)
df = df[df['amino_acid_change'] == amino_acid_change]
cell_lines = df['case_id'].unique().tolist()
return cell_lines | [
"Return cell lines with a given point mutation in a given gene.\n\n Checks which cell lines in CCLE have a particular point mutation\n in a given gene and return their names in a list.\n\n Parameters\n ----------\n gene : str\n The HGNC symbol of the mutated gene in whose product the amino\n acid change occurs. Example: \"BRAF\"\n amino_acid_change : str\n The amino acid change of interest. Example: \"V600E\"\n\n Returns\n -------\n cell_lines : list\n A list of CCLE cell lines in which the given mutation occurs.\n "
]
|
Please provide a description of the function:def get_ccle_cna(gene_list, cell_lines):
profile_data = get_profile_data(ccle_study, gene_list,
'COPY_NUMBER_ALTERATION', 'all')
profile_data = dict((key, value) for key, value in profile_data.items()
if key in cell_lines)
return profile_data | [
"Return a dict of CNAs in given genes and cell lines from CCLE.\n\n CNA values correspond to the following alterations\n\n -2 = homozygous deletion\n\n -1 = hemizygous deletion\n\n 0 = neutral / no change\n\n 1 = gain\n\n 2 = high level amplification\n\n Parameters\n ----------\n gene_list : list[str]\n A list of HGNC gene symbols to get mutations in\n cell_lines : list[str]\n A list of CCLE cell line names to get mutations for.\n\n Returns\n -------\n profile_data : dict[dict[int]]\n A dict keyed to cases containing a dict keyed to genes\n containing int\n "
]
|
Please provide a description of the function:def get_ccle_mrna(gene_list, cell_lines):
gene_list_str = ','.join(gene_list)
data = {'cmd': 'getProfileData',
'case_set_id': ccle_study + '_mrna',
'genetic_profile_id': ccle_study + '_mrna',
'gene_list': gene_list_str,
'skiprows': -1}
df = send_request(**data)
mrna_amounts = {cl: {g: [] for g in gene_list} for cl in cell_lines}
for cell_line in cell_lines:
if cell_line in df.columns:
for gene in gene_list:
value_cell = df[cell_line][df['COMMON'] == gene]
if value_cell.empty:
mrna_amounts[cell_line][gene] = None
elif pandas.isnull(value_cell.values[0]):
mrna_amounts[cell_line][gene] = None
else:
value = value_cell.values[0]
mrna_amounts[cell_line][gene] = value
else:
mrna_amounts[cell_line] = None
return mrna_amounts | [
"Return a dict of mRNA amounts in given genes and cell lines from CCLE.\n\n Parameters\n ----------\n gene_list : list[str]\n A list of HGNC gene symbols to get mRNA amounts for.\n cell_lines : list[str]\n A list of CCLE cell line names to get mRNA amounts for.\n\n Returns\n -------\n mrna_amounts : dict[dict[float]]\n A dict keyed to cell lines containing a dict keyed to genes\n containing float\n "
]
|
Please provide a description of the function:def _filter_data_frame(df, data_col, filter_col, filter_str=None):
if filter_str is not None:
relevant_cols = data_col + [filter_col]
df.dropna(inplace=True, subset=relevant_cols)
row_filter = df[filter_col].str.contains(filter_str, case=False)
data_list = df[row_filter][data_col].to_dict()
else:
data_list = df[data_col].to_dict()
return data_list | [
"Return a filtered data frame as a dictionary."
]
|
Please provide a description of the function:def allow_cors(func):
def wrapper(*args, **kwargs):
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = \
'PUT, GET, POST, DELETE, OPTIONS'
response.headers['Access-Control-Allow-Headers'] = \
'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'
return func(*args, **kwargs)
return wrapper | [
"This is a decorator which enable CORS for the specified endpoint."
]
|
Please provide a description of the function:def trips_process_text():
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
text = body.get('text')
tp = trips.process_text(text)
return _stmts_from_proc(tp) | [
"Process text with TRIPS and return INDRA Statements."
]
|
Please provide a description of the function:def trips_process_xml():
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
xml_str = body.get('xml_str')
tp = trips.process_xml(xml_str)
return _stmts_from_proc(tp) | [
"Process TRIPS EKB XML and return INDRA Statements."
]
|
Please provide a description of the function:def reach_process_text():
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
text = body.get('text')
offline = True if body.get('offline') else False
rp = reach.process_text(text, offline=offline)
return _stmts_from_proc(rp) | [
"Process text with REACH and return INDRA Statements."
]
|
Please provide a description of the function:def reach_process_json():
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
json_str = body.get('json')
rp = reach.process_json_str(json_str)
return _stmts_from_proc(rp) | [
"Process REACH json and return INDRA Statements."
]
|
Please provide a description of the function:def reach_process_pmc():
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
pmcid = body.get('pmcid')
rp = reach.process_pmc(pmcid)
return _stmts_from_proc(rp) | [
"Process PubMedCentral article and return INDRA Statements."
]
|
Please provide a description of the function:def bel_process_pybel_neighborhood():
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
genes = body.get('genes')
bp = bel.process_pybel_neighborhood(genes)
return _stmts_from_proc(bp) | [
"Process BEL Large Corpus neighborhood and return INDRA Statements."
]
|
Please provide a description of the function:def bel_process_belrdf():
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
belrdf = body.get('belrdf')
bp = bel.process_belrdf(belrdf)
return _stmts_from_proc(bp) | [
"Process BEL RDF and return INDRA Statements."
]
|
Please provide a description of the function:def biopax_process_pc_pathsbetween():
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
genes = body.get('genes')
bp = biopax.process_pc_pathsbetween(genes)
return _stmts_from_proc(bp) | [
"Process PathwayCommons paths between genes, return INDRA Statements."
]
|
Please provide a description of the function:def biopax_process_pc_pathsfromto():
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
source = body.get('source')
target = body.get('target')
bp = biopax.process_pc_pathsfromto(source, target)
return _stmts_from_proc(bp) | [
"Process PathwayCommons paths from-to genes, return INDRA Statements."
]
|
Please provide a description of the function:def biopax_process_pc_neighborhood():
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
genes = body.get('genes')
bp = biopax.process_pc_neighborhood(genes)
return _stmts_from_proc(bp) | [
"Process PathwayCommons neighborhood, return INDRA Statements."
]
|
Please provide a description of the function:def eidos_process_text():
if request.method == 'OPTIONS':
return {}
req = request.body.read().decode('utf-8')
body = json.loads(req)
text = body.get('text')
webservice = body.get('webservice')
if not webservice:
response.status = 400
response.content_type = 'application/json'
return json.dumps({'error': 'No web service address provided.'})
ep = eidos.process_text(text, webservice=webservice)
return _stmts_from_proc(ep) | [
"Process text with EIDOS and return INDRA Statements."
]
|
Please provide a description of the function:def eidos_process_jsonld():
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
eidos_json = body.get('jsonld')
ep = eidos.process_json_str(eidos_json)
return _stmts_from_proc(ep) | [
"Process an EIDOS JSON-LD and return INDRA Statements."
]
|
Please provide a description of the function:def cwms_process_text():
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
text = body.get('text')
cp = cwms.process_text(text)
return _stmts_from_proc(cp) | [
"Process text with CWMS and return INDRA Statements."
]
|
Please provide a description of the function:def hume_process_jsonld():
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
jsonld_str = body.get('jsonld')
jsonld = json.loads(jsonld_str)
hp = hume.process_jsonld(jsonld)
return _stmts_from_proc(hp) | [
"Process Hume JSON-LD and return INDRA Statements."
]
|
Please provide a description of the function:def sofia_process_text():
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
text = body.get('text')
auth = body.get('auth')
sp = sofia.process_text(text, auth=auth)
return _stmts_from_proc(sp) | [
"Process text with Sofia and return INDRA Statements."
]
|
Please provide a description of the function:def assemble_pysb():
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
stmts_json = body.get('statements')
export_format = body.get('export_format')
stmts = stmts_from_json(stmts_json)
pa = PysbAssembler()
pa.add_statements(stmts)
pa.make_model()
try:
for m in pa.model.monomers:
pysb_assembler.set_extended_initial_condition(pa.model, m, 0)
except Exception as e:
logger.exception(e)
if not export_format:
model_str = pa.print_model()
elif export_format in ('kappa_im', 'kappa_cm'):
fname = 'model_%s.png' % export_format
root = os.path.dirname(os.path.abspath(fname))
graph = pa.export_model(format=export_format, file_name=fname)
with open(fname, 'rb') as fh:
data = 'data:image/png;base64,%s' % \
base64.b64encode(fh.read()).decode()
return {'image': data}
else:
try:
model_str = pa.export_model(format=export_format)
except Exception as e:
logger.exception(e)
model_str = ''
res = {'model': model_str}
return res | [
"Assemble INDRA Statements and return PySB model string."
]
|
Please provide a description of the function:def assemble_cx():
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
stmts_json = body.get('statements')
stmts = stmts_from_json(stmts_json)
ca = CxAssembler(stmts)
model_str = ca.make_model()
res = {'model': model_str}
return res | [
"Assemble INDRA Statements and return CX network json."
]
|
Please provide a description of the function:def share_model_ndex():
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
stmts_str = body.get('stmts')
stmts_json = json.loads(stmts_str)
stmts = stmts_from_json(stmts_json["statements"])
ca = CxAssembler(stmts)
for n, v in body.items():
ca.cx['networkAttributes'].append({'n': n, 'v': v, 'd': 'string'})
ca.make_model()
network_id = ca.upload_model(private=False)
return {'network_id': network_id} | [
"Upload the model to NDEX"
]
|
Please provide a description of the function:def fetch_model_ndex():
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
network_id = body.get('network_id')
cx = process_ndex_network(network_id)
network_attr = [x for x in cx.cx if x.get('networkAttributes')]
network_attr = network_attr[0]['networkAttributes']
keep_keys = ['txt_input', 'parser',
'model_elements', 'preset_pos', 'stmts',
'sentences', 'evidence', 'cell_line', 'mrna', 'mutations']
stored_data = {}
for d in network_attr:
if d['n'] in keep_keys:
stored_data[d['n']] = d['v']
return stored_data | [
"Download model and associated pieces from NDEX"
]
|
Please provide a description of the function:def assemble_graph():
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
stmts_json = body.get('statements')
stmts = stmts_from_json(stmts_json)
ga = GraphAssembler(stmts)
model_str = ga.make_model()
res = {'model': model_str}
return res | [
"Assemble INDRA Statements and return Graphviz graph dot string."
]
|
Please provide a description of the function:def assemble_cyjs():
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
stmts_json = body.get('statements')
stmts = stmts_from_json(stmts_json)
cja = CyJSAssembler()
cja.add_statements(stmts)
cja.make_model(grouping=True)
model_str = cja.print_cyjs_graph()
return model_str | [
"Assemble INDRA Statements and return Cytoscape JS network."
]
|
Please provide a description of the function:def assemble_english():
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
stmts_json = body.get('statements')
stmts = stmts_from_json(stmts_json)
sentences = {}
for st in stmts:
enga = EnglishAssembler()
enga.add_statements([st])
model_str = enga.make_model()
sentences[st.uuid] = model_str
res = {'sentences': sentences}
return res | [
"Assemble each statement into "
]
|
Please provide a description of the function:def assemble_loopy():
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
stmts_json = body.get('statements')
stmts = stmts_from_json(stmts_json)
sa = SifAssembler(stmts)
sa.make_model(use_name_as_key=True)
model_str = sa.print_loopy(as_url=True)
res = {'loopy_url': model_str}
return res | [
"Assemble INDRA Statements into a Loopy model using SIF Assembler."
]
|
Please provide a description of the function:def get_ccle_mrna_levels():
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
gene_list = body.get('gene_list')
cell_lines = body.get('cell_lines')
mrna_amounts = cbio_client.get_ccle_mrna(gene_list, cell_lines)
res = {'mrna_amounts': mrna_amounts}
return res | [
"Get CCLE mRNA amounts using cBioClient"
]
|
Please provide a description of the function:def get_ccle_cna():
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
gene_list = body.get('gene_list')
cell_lines = body.get('cell_lines')
cna = cbio_client.get_ccle_cna(gene_list, cell_lines)
res = {'cna': cna}
return res | [
"Get CCLE CNA\n -2 = homozygous deletion\n -1 = hemizygous deletion\n 0 = neutral / no change\n 1 = gain\n 2 = high level amplification\n "
]
|
Please provide a description of the function:def get_ccle_mutations():
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
gene_list = body.get('gene_list')
cell_lines = body.get('cell_lines')
mutations = cbio_client.get_ccle_mutations(gene_list, cell_lines)
res = {'mutations': mutations}
return res | [
"Get CCLE mutations\n returns the amino acid changes for a given list of genes and cell lines\n "
]
|
Please provide a description of the function:def map_grounding():
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
stmts_json = body.get('statements')
stmts = stmts_from_json(stmts_json)
stmts_out = ac.map_grounding(stmts)
return _return_stmts(stmts_out) | [
"Map grounding on a list of INDRA Statements."
]
|
Please provide a description of the function:def run_preassembly():
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
stmts_json = body.get('statements')
stmts = stmts_from_json(stmts_json)
scorer = body.get('scorer')
return_toplevel = body.get('return_toplevel')
if scorer == 'wm':
belief_scorer = get_eidos_scorer()
else:
belief_scorer = None
stmts_out = ac.run_preassembly(stmts, belief_scorer=belief_scorer,
return_toplevel=return_toplevel)
return _return_stmts(stmts_out) | [
"Run preassembly on a list of INDRA Statements."
]
|
Please provide a description of the function:def map_ontologies():
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
stmts_json = body.get('statements')
stmts = stmts_from_json(stmts_json)
om = OntologyMapper(stmts, wm_ontomap, scored=True, symmetric=False)
om.map_statements()
return _return_stmts(stmts) | [
"Run ontology mapping on a list of INDRA Statements."
]
|
Please provide a description of the function:def filter_by_type():
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
stmts_json = body.get('statements')
stmt_type_str = body.get('type')
stmt_type_str = stmt_type_str.capitalize()
stmt_type = getattr(sys.modules[__name__], stmt_type_str)
stmts = stmts_from_json(stmts_json)
stmts_out = ac.filter_by_type(stmts, stmt_type)
return _return_stmts(stmts_out) | [
"Filter to a given INDRA Statement type."
]
|
Please provide a description of the function:def filter_grounded_only():
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
stmts_json = body.get('statements')
score_threshold = body.get('score_threshold')
if score_threshold is not None:
score_threshold = float(score_threshold)
stmts = stmts_from_json(stmts_json)
stmts_out = ac.filter_grounded_only(stmts, score_threshold=score_threshold)
return _return_stmts(stmts_out) | [
"Filter to grounded Statements only."
]
|
Please provide a description of the function:def filter_belief():
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
stmts_json = body.get('statements')
belief_cutoff = body.get('belief_cutoff')
if belief_cutoff is not None:
belief_cutoff = float(belief_cutoff)
stmts = stmts_from_json(stmts_json)
stmts_out = ac.filter_belief(stmts, belief_cutoff)
return _return_stmts(stmts_out) | [
"Filter to beliefs above a given threshold."
]
|
Please provide a description of the function:def get_git_info():
start_dir = abspath(curdir)
try:
chdir(dirname(abspath(__file__)))
re_patt_str = (r'commit\s+(?P<commit_hash>\w+).*?Author:\s+'
r'(?P<author_name>.*?)\s+<(?P<author_email>.*?)>\s+Date:\s+'
r'(?P<date>.*?)\n\s+(?P<commit_msg>.*?)(?:\ndiff.*?)?$')
show_out = check_output(['git', 'show']).decode('ascii')
revp_out = check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
revp_out = revp_out.decode('ascii').strip()
m = re.search(re_patt_str, show_out, re.DOTALL)
assert m is not None, \
"Regex pattern:\n\n\"%s\"\n\n failed to match string:\n\n\"%s\"" \
% (re_patt_str, show_out)
ret_dict = m.groupdict()
ret_dict['branch_name'] = revp_out
finally:
chdir(start_dir)
return ret_dict | [
"Get a dict with useful git info."
]
|
Please provide a description of the function:def get_version(with_git_hash=True, refresh_hash=False):
version = __version__
if with_git_hash:
global INDRA_GITHASH
if INDRA_GITHASH is None or refresh_hash:
with open(devnull, 'w') as nul:
try:
ret = check_output(['git', 'rev-parse', 'HEAD'],
cwd=dirname(__file__), stderr=nul)
except CalledProcessError:
ret = 'UNHASHED'
INDRA_GITHASH = ret.strip().decode('utf-8')
version = '%s-%s' % (version, INDRA_GITHASH)
return version | [
"Get an indra version string, including a git hash."
]
|
Please provide a description of the function:def get_upload_content(pmid, force_fulltext_lookup=False):
# Make sure that the PMID doesn't start with PMID so that it doesn't
# screw up the literature clients
if pmid.startswith('PMID'):
pmid = pmid[4:]
# First, check S3:
(ft_content_s3, ft_content_type_s3) = get_full_text(pmid)
# The abstract is on S3 but there is no full text; if we're not forcing
# fulltext lookup, then we're done
if ft_content_type_s3 == 'abstract' and not force_fulltext_lookup:
return (ft_content_s3, ft_content_type_s3)
# If there's nothing (even an abstract on S3), or if there's an abstract
# and we're forcing fulltext lookup, do the lookup
elif ft_content_type_s3 is None or \
(ft_content_type_s3 == 'abstract' and force_fulltext_lookup) or \
(ft_content_type_s3 == 'elsevier_xml' and
not elsevier_client.extract_text(ft_content_s3)):
if ft_content_type_s3 == 'elsevier_xml':
logger.info('PMID%s: elsevier_xml cached on S3 is missing full '
'text element, getting again.' % pmid)
# Try to retrieve from literature client
logger.info("PMID%s: getting content using literature client" % pmid)
(ft_content, ft_content_type) = lit.get_full_text(pmid, 'pmid')
assert ft_content_type in ('pmc_oa_xml', 'elsevier_xml',
'abstract', None)
# If we tried to get the full text and didn't even get the abstract,
# then there was probably a problem with the web service. Try to
# get the abstract instead:
if ft_content_type is None:
return (None, None)
# If we got the abstract, and we already had the abstract on S3, then
# do nothing
elif ft_content_type == 'abstract' and ft_content_type_s3 == 'abstract':
logger.info("PMID%s: found abstract but already had it on " \
"S3; skipping" % pmid)
return (ft_content, ft_content_type)
# If we got the abstract, and we had nothing on S3, then upload
elif ft_content_type == 'abstract' and ft_content_type_s3 is None:
logger.info("PMID%s: found abstract, uploading to S3" % pmid)
put_abstract(pmid, ft_content)
return (ft_content, ft_content_type)
# If we got elsevier_xml, but cannot get a full text element, then
# get and put the abstract
elif ft_content_type == 'elsevier_xml' and \
not elsevier_client.extract_text(ft_content):
logger.info("PMID%s: Couldn't get a full text element for "
"the elsevier_xml content; getting abstract "
% pmid)
abstract = pubmed_client.get_abstract(pmid)
# Abstract is None, so return None
if abstract is None:
logger.info("PMID%s: Unable to get abstract, returning None"
% pmid)
return (None, None)
# Otherwise, upload and return the abstract
else:
logger.info("PMID%s: Uploading and returning abstract "
% pmid)
put_abstract(pmid, abstract)
return (abstract, 'abstract')
# We got a viable full text
# (or something other than None or abstract...)
else:
logger.info("PMID%s: uploading and returning %s"
% (pmid, ft_content_type))
put_full_text(pmid, ft_content, full_text_type=ft_content_type)
return (ft_content, ft_content_type)
# Some form of full text is already on S3
else:
# TODO
# In future, could check for abstract even if full text is found, and
# upload it just to have it
return (ft_content_s3, ft_content_type_s3)
# We should always return before we get here
assert False | [
"Get full text and/or abstract for paper and upload to S3."
]
|
Please provide a description of the function:def _fix_evidence_text(txt):
txt = re.sub('[ ]?\( xref \)', '', txt)
# This is to make [ xref ] become [] to match the two readers
txt = re.sub('\[ xref \]', '[]', txt)
txt = re.sub('[\(]?XREF_BIBR[\)]?[,]?', '', txt)
txt = re.sub('[\(]?XREF_FIG[\)]?[,]?', '', txt)
txt = re.sub('[\(]?XREF_SUPPLEMENT[\)]?[,]?', '', txt)
txt = txt.strip()
return txt | [
"Eliminate some symbols to have cleaner supporting text."
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.