repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
sorgerlab/indra
indra/assemblers/pysb/export.py
_prepare_kappa
def _prepare_kappa(model): """Return a Kappa STD with the model loaded.""" import kappy kappa = kappy.KappaStd() model_str = export(model, 'kappa') kappa.add_model_string(model_str) kappa.project_parse() return kappa
python
def _prepare_kappa(model): """Return a Kappa STD with the model loaded.""" import kappy kappa = kappy.KappaStd() model_str = export(model, 'kappa') kappa.add_model_string(model_str) kappa.project_parse() return kappa
[ "def", "_prepare_kappa", "(", "model", ")", ":", "import", "kappy", "kappa", "=", "kappy", ".", "KappaStd", "(", ")", "model_str", "=", "export", "(", "model", ",", "'kappa'", ")", "kappa", ".", "add_model_string", "(", "model_str", ")", "kappa", ".", "project_parse", "(", ")", "return", "kappa" ]
Return a Kappa STD with the model loaded.
[ "Return", "a", "Kappa", "STD", "with", "the", "model", "loaded", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/export.py#L141-L148
train
sorgerlab/indra
indra/databases/cbio_client.py
send_request
def send_request(**kwargs): """Return a data frame from a web service request to cBio portal. Sends a web service requrest to the cBio portal with arguments given in the dictionary data and returns a Pandas data frame on success. More information about the service here: http://www.cbioportal.org/web_api.jsp Parameters ---------- kwargs : dict A dict of parameters for the query. Entries map directly to web service calls with the exception of the optional 'skiprows' entry, whose value is used as the number of rows to skip when reading the result data frame. Returns ------- df : pandas.DataFrame Response from cBioPortal as a Pandas DataFrame. """ skiprows = kwargs.pop('skiprows', None) res = requests.get(cbio_url, params=kwargs) if res.status_code == 200: # Adaptively skip rows based on number of comment lines if skiprows == -1: lines = res.text.split('\n') skiprows = 0 for line in lines: if line.startswith('#'): skiprows += 1 else: break csv_StringIO = StringIO(res.text) df = pandas.read_csv(csv_StringIO, sep='\t', skiprows=skiprows) return df else: logger.error('Request returned with code %d' % res.status_code)
python
def send_request(**kwargs): """Return a data frame from a web service request to cBio portal. Sends a web service requrest to the cBio portal with arguments given in the dictionary data and returns a Pandas data frame on success. More information about the service here: http://www.cbioportal.org/web_api.jsp Parameters ---------- kwargs : dict A dict of parameters for the query. Entries map directly to web service calls with the exception of the optional 'skiprows' entry, whose value is used as the number of rows to skip when reading the result data frame. Returns ------- df : pandas.DataFrame Response from cBioPortal as a Pandas DataFrame. """ skiprows = kwargs.pop('skiprows', None) res = requests.get(cbio_url, params=kwargs) if res.status_code == 200: # Adaptively skip rows based on number of comment lines if skiprows == -1: lines = res.text.split('\n') skiprows = 0 for line in lines: if line.startswith('#'): skiprows += 1 else: break csv_StringIO = StringIO(res.text) df = pandas.read_csv(csv_StringIO, sep='\t', skiprows=skiprows) return df else: logger.error('Request returned with code %d' % res.status_code)
[ "def", "send_request", "(", "*", "*", "kwargs", ")", ":", "skiprows", "=", "kwargs", ".", "pop", "(", "'skiprows'", ",", "None", ")", "res", "=", "requests", ".", "get", "(", "cbio_url", ",", "params", "=", "kwargs", ")", "if", "res", ".", "status_code", "==", "200", ":", "# Adaptively skip rows based on number of comment lines", "if", "skiprows", "==", "-", "1", ":", "lines", "=", "res", ".", "text", ".", "split", "(", "'\\n'", ")", "skiprows", "=", "0", "for", "line", "in", "lines", ":", "if", "line", ".", "startswith", "(", "'#'", ")", ":", "skiprows", "+=", "1", "else", ":", "break", "csv_StringIO", "=", "StringIO", "(", "res", ".", "text", ")", "df", "=", "pandas", ".", "read_csv", "(", "csv_StringIO", ",", "sep", "=", "'\\t'", ",", "skiprows", "=", "skiprows", ")", "return", "df", "else", ":", "logger", ".", "error", "(", "'Request returned with code %d'", "%", "res", ".", "status_code", ")" ]
Return a data frame from a web service request to cBio portal. Sends a web service requrest to the cBio portal with arguments given in the dictionary data and returns a Pandas data frame on success. More information about the service here: http://www.cbioportal.org/web_api.jsp Parameters ---------- kwargs : dict A dict of parameters for the query. Entries map directly to web service calls with the exception of the optional 'skiprows' entry, whose value is used as the number of rows to skip when reading the result data frame. Returns ------- df : pandas.DataFrame Response from cBioPortal as a Pandas DataFrame.
[ "Return", "a", "data", "frame", "from", "a", "web", "service", "request", "to", "cBio", "portal", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/cbio_client.py#L25-L63
train
sorgerlab/indra
indra/databases/cbio_client.py
get_mutations
def get_mutations(study_id, gene_list, mutation_type=None, case_id=None): """Return mutations as a list of genes and list of amino acid changes. Parameters ---------- study_id : str The ID of the cBio study. Example: 'cellline_ccle_broad' or 'paad_icgc' gene_list : list[str] A list of genes with their HGNC symbols. Example: ['BRAF', 'KRAS'] mutation_type : Optional[str] The type of mutation to filter to. mutation_type can be one of: missense, nonsense, frame_shift_ins, frame_shift_del, splice_site case_id : Optional[str] The case ID within the study to filter to. Returns ------- mutations : tuple[list] A tuple of two lists, the first one containing a list of genes, and the second one a list of amino acid changes in those genes. """ genetic_profile = get_genetic_profiles(study_id, 'mutation')[0] gene_list_str = ','.join(gene_list) data = {'cmd': 'getMutationData', 'case_set_id': study_id, 'genetic_profile_id': genetic_profile, 'gene_list': gene_list_str, 'skiprows': -1} df = send_request(**data) if case_id: df = df[df['case_id'] == case_id] res = _filter_data_frame(df, ['gene_symbol', 'amino_acid_change'], 'mutation_type', mutation_type) mutations = {'gene_symbol': list(res['gene_symbol'].values()), 'amino_acid_change': list(res['amino_acid_change'].values())} return mutations
python
def get_mutations(study_id, gene_list, mutation_type=None, case_id=None): """Return mutations as a list of genes and list of amino acid changes. Parameters ---------- study_id : str The ID of the cBio study. Example: 'cellline_ccle_broad' or 'paad_icgc' gene_list : list[str] A list of genes with their HGNC symbols. Example: ['BRAF', 'KRAS'] mutation_type : Optional[str] The type of mutation to filter to. mutation_type can be one of: missense, nonsense, frame_shift_ins, frame_shift_del, splice_site case_id : Optional[str] The case ID within the study to filter to. Returns ------- mutations : tuple[list] A tuple of two lists, the first one containing a list of genes, and the second one a list of amino acid changes in those genes. """ genetic_profile = get_genetic_profiles(study_id, 'mutation')[0] gene_list_str = ','.join(gene_list) data = {'cmd': 'getMutationData', 'case_set_id': study_id, 'genetic_profile_id': genetic_profile, 'gene_list': gene_list_str, 'skiprows': -1} df = send_request(**data) if case_id: df = df[df['case_id'] == case_id] res = _filter_data_frame(df, ['gene_symbol', 'amino_acid_change'], 'mutation_type', mutation_type) mutations = {'gene_symbol': list(res['gene_symbol'].values()), 'amino_acid_change': list(res['amino_acid_change'].values())} return mutations
[ "def", "get_mutations", "(", "study_id", ",", "gene_list", ",", "mutation_type", "=", "None", ",", "case_id", "=", "None", ")", ":", "genetic_profile", "=", "get_genetic_profiles", "(", "study_id", ",", "'mutation'", ")", "[", "0", "]", "gene_list_str", "=", "','", ".", "join", "(", "gene_list", ")", "data", "=", "{", "'cmd'", ":", "'getMutationData'", ",", "'case_set_id'", ":", "study_id", ",", "'genetic_profile_id'", ":", "genetic_profile", ",", "'gene_list'", ":", "gene_list_str", ",", "'skiprows'", ":", "-", "1", "}", "df", "=", "send_request", "(", "*", "*", "data", ")", "if", "case_id", ":", "df", "=", "df", "[", "df", "[", "'case_id'", "]", "==", "case_id", "]", "res", "=", "_filter_data_frame", "(", "df", ",", "[", "'gene_symbol'", ",", "'amino_acid_change'", "]", ",", "'mutation_type'", ",", "mutation_type", ")", "mutations", "=", "{", "'gene_symbol'", ":", "list", "(", "res", "[", "'gene_symbol'", "]", ".", "values", "(", ")", ")", ",", "'amino_acid_change'", ":", "list", "(", "res", "[", "'amino_acid_change'", "]", ".", "values", "(", ")", ")", "}", "return", "mutations" ]
Return mutations as a list of genes and list of amino acid changes. Parameters ---------- study_id : str The ID of the cBio study. Example: 'cellline_ccle_broad' or 'paad_icgc' gene_list : list[str] A list of genes with their HGNC symbols. Example: ['BRAF', 'KRAS'] mutation_type : Optional[str] The type of mutation to filter to. mutation_type can be one of: missense, nonsense, frame_shift_ins, frame_shift_del, splice_site case_id : Optional[str] The case ID within the study to filter to. Returns ------- mutations : tuple[list] A tuple of two lists, the first one containing a list of genes, and the second one a list of amino acid changes in those genes.
[ "Return", "mutations", "as", "a", "list", "of", "genes", "and", "list", "of", "amino", "acid", "changes", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/cbio_client.py#L66-L106
train
sorgerlab/indra
indra/databases/cbio_client.py
get_case_lists
def get_case_lists(study_id): """Return a list of the case set ids for a particular study. TAKE NOTE the "case_list_id" are the same thing as "case_set_id" Within the data, this string is referred to as a "case_list_id". Within API calls it is referred to as a 'case_set_id'. The documentation does not make this explicitly clear. Parameters ---------- study_id : str The ID of the cBio study. Example: 'cellline_ccle_broad' or 'paad_icgc' Returns ------- case_set_ids : dict[dict[int]] A dict keyed to cases containing a dict keyed to genes containing int """ data = {'cmd': 'getCaseLists', 'cancer_study_id': study_id} df = send_request(**data) case_set_ids = df['case_list_id'].tolist() return case_set_ids
python
def get_case_lists(study_id): """Return a list of the case set ids for a particular study. TAKE NOTE the "case_list_id" are the same thing as "case_set_id" Within the data, this string is referred to as a "case_list_id". Within API calls it is referred to as a 'case_set_id'. The documentation does not make this explicitly clear. Parameters ---------- study_id : str The ID of the cBio study. Example: 'cellline_ccle_broad' or 'paad_icgc' Returns ------- case_set_ids : dict[dict[int]] A dict keyed to cases containing a dict keyed to genes containing int """ data = {'cmd': 'getCaseLists', 'cancer_study_id': study_id} df = send_request(**data) case_set_ids = df['case_list_id'].tolist() return case_set_ids
[ "def", "get_case_lists", "(", "study_id", ")", ":", "data", "=", "{", "'cmd'", ":", "'getCaseLists'", ",", "'cancer_study_id'", ":", "study_id", "}", "df", "=", "send_request", "(", "*", "*", "data", ")", "case_set_ids", "=", "df", "[", "'case_list_id'", "]", ".", "tolist", "(", ")", "return", "case_set_ids" ]
Return a list of the case set ids for a particular study. TAKE NOTE the "case_list_id" are the same thing as "case_set_id" Within the data, this string is referred to as a "case_list_id". Within API calls it is referred to as a 'case_set_id'. The documentation does not make this explicitly clear. Parameters ---------- study_id : str The ID of the cBio study. Example: 'cellline_ccle_broad' or 'paad_icgc' Returns ------- case_set_ids : dict[dict[int]] A dict keyed to cases containing a dict keyed to genes containing int
[ "Return", "a", "list", "of", "the", "case", "set", "ids", "for", "a", "particular", "study", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/cbio_client.py#L109-L133
train
sorgerlab/indra
indra/databases/cbio_client.py
get_profile_data
def get_profile_data(study_id, gene_list, profile_filter, case_set_filter=None): """Return dict of cases and genes and their respective values. Parameters ---------- study_id : str The ID of the cBio study. Example: 'cellline_ccle_broad' or 'paad_icgc' gene_list : list[str] A list of genes with their HGNC symbols. Example: ['BRAF', 'KRAS'] profile_filter : str A string used to filter the profiles to return. Will be one of: - MUTATION - MUTATION_EXTENDED - COPY_NUMBER_ALTERATION - MRNA_EXPRESSION - METHYLATION case_set_filter : Optional[str] A string that specifices which case_set_id to use, based on a complete or partial match. If not provided, will look for study_id + '_all' Returns ------- profile_data : dict[dict[int]] A dict keyed to cases containing a dict keyed to genes containing int """ genetic_profiles = get_genetic_profiles(study_id, profile_filter) if genetic_profiles: genetic_profile = genetic_profiles[0] else: return {} gene_list_str = ','.join(gene_list) case_set_ids = get_case_lists(study_id) if case_set_filter: case_set_id = [x for x in case_set_ids if case_set_filter in x][0] else: case_set_id = study_id + '_all' # based on looking at the cBioPortal, this is a common case_set_id data = {'cmd': 'getProfileData', 'case_set_id': case_set_id, 'genetic_profile_id': genetic_profile, 'gene_list': gene_list_str, 'skiprows': -1} df = send_request(**data) case_list_df = [x for x in df.columns.tolist() if x not in ['GENE_ID', 'COMMON']] profile_data = {case: {g: None for g in gene_list} for case in case_list_df} for case in case_list_df: profile_values = df[case].tolist() df_gene_list = df['COMMON'].tolist() for g, cv in zip(df_gene_list, profile_values): if not pandas.isnull(cv): profile_data[case][g] = cv return profile_data
python
def get_profile_data(study_id, gene_list, profile_filter, case_set_filter=None): """Return dict of cases and genes and their respective values. Parameters ---------- study_id : str The ID of the cBio study. Example: 'cellline_ccle_broad' or 'paad_icgc' gene_list : list[str] A list of genes with their HGNC symbols. Example: ['BRAF', 'KRAS'] profile_filter : str A string used to filter the profiles to return. Will be one of: - MUTATION - MUTATION_EXTENDED - COPY_NUMBER_ALTERATION - MRNA_EXPRESSION - METHYLATION case_set_filter : Optional[str] A string that specifices which case_set_id to use, based on a complete or partial match. If not provided, will look for study_id + '_all' Returns ------- profile_data : dict[dict[int]] A dict keyed to cases containing a dict keyed to genes containing int """ genetic_profiles = get_genetic_profiles(study_id, profile_filter) if genetic_profiles: genetic_profile = genetic_profiles[0] else: return {} gene_list_str = ','.join(gene_list) case_set_ids = get_case_lists(study_id) if case_set_filter: case_set_id = [x for x in case_set_ids if case_set_filter in x][0] else: case_set_id = study_id + '_all' # based on looking at the cBioPortal, this is a common case_set_id data = {'cmd': 'getProfileData', 'case_set_id': case_set_id, 'genetic_profile_id': genetic_profile, 'gene_list': gene_list_str, 'skiprows': -1} df = send_request(**data) case_list_df = [x for x in df.columns.tolist() if x not in ['GENE_ID', 'COMMON']] profile_data = {case: {g: None for g in gene_list} for case in case_list_df} for case in case_list_df: profile_values = df[case].tolist() df_gene_list = df['COMMON'].tolist() for g, cv in zip(df_gene_list, profile_values): if not pandas.isnull(cv): profile_data[case][g] = cv return profile_data
[ "def", "get_profile_data", "(", "study_id", ",", "gene_list", ",", "profile_filter", ",", "case_set_filter", "=", "None", ")", ":", "genetic_profiles", "=", "get_genetic_profiles", "(", "study_id", ",", "profile_filter", ")", "if", "genetic_profiles", ":", "genetic_profile", "=", "genetic_profiles", "[", "0", "]", "else", ":", "return", "{", "}", "gene_list_str", "=", "','", ".", "join", "(", "gene_list", ")", "case_set_ids", "=", "get_case_lists", "(", "study_id", ")", "if", "case_set_filter", ":", "case_set_id", "=", "[", "x", "for", "x", "in", "case_set_ids", "if", "case_set_filter", "in", "x", "]", "[", "0", "]", "else", ":", "case_set_id", "=", "study_id", "+", "'_all'", "# based on looking at the cBioPortal, this is a common case_set_id", "data", "=", "{", "'cmd'", ":", "'getProfileData'", ",", "'case_set_id'", ":", "case_set_id", ",", "'genetic_profile_id'", ":", "genetic_profile", ",", "'gene_list'", ":", "gene_list_str", ",", "'skiprows'", ":", "-", "1", "}", "df", "=", "send_request", "(", "*", "*", "data", ")", "case_list_df", "=", "[", "x", "for", "x", "in", "df", ".", "columns", ".", "tolist", "(", ")", "if", "x", "not", "in", "[", "'GENE_ID'", ",", "'COMMON'", "]", "]", "profile_data", "=", "{", "case", ":", "{", "g", ":", "None", "for", "g", "in", "gene_list", "}", "for", "case", "in", "case_list_df", "}", "for", "case", "in", "case_list_df", ":", "profile_values", "=", "df", "[", "case", "]", ".", "tolist", "(", ")", "df_gene_list", "=", "df", "[", "'COMMON'", "]", ".", "tolist", "(", ")", "for", "g", ",", "cv", "in", "zip", "(", "df_gene_list", ",", "profile_values", ")", ":", "if", "not", "pandas", ".", "isnull", "(", "cv", ")", ":", "profile_data", "[", "case", "]", "[", "g", "]", "=", "cv", "return", "profile_data" ]
Return dict of cases and genes and their respective values. Parameters ---------- study_id : str The ID of the cBio study. Example: 'cellline_ccle_broad' or 'paad_icgc' gene_list : list[str] A list of genes with their HGNC symbols. Example: ['BRAF', 'KRAS'] profile_filter : str A string used to filter the profiles to return. Will be one of: - MUTATION - MUTATION_EXTENDED - COPY_NUMBER_ALTERATION - MRNA_EXPRESSION - METHYLATION case_set_filter : Optional[str] A string that specifices which case_set_id to use, based on a complete or partial match. If not provided, will look for study_id + '_all' Returns ------- profile_data : dict[dict[int]] A dict keyed to cases containing a dict keyed to genes containing int
[ "Return", "dict", "of", "cases", "and", "genes", "and", "their", "respective", "values", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/cbio_client.py#L136-L193
train
sorgerlab/indra
indra/databases/cbio_client.py
get_num_sequenced
def get_num_sequenced(study_id): """Return number of sequenced tumors for given study. This is useful for calculating mutation statistics in terms of the prevalence of certain mutations within a type of cancer. Parameters ---------- study_id : str The ID of the cBio study. Example: 'paad_icgc' Returns ------- num_case : int The number of sequenced tumors in the given study """ data = {'cmd': 'getCaseLists', 'cancer_study_id': study_id} df = send_request(**data) if df.empty: return 0 row_filter = df['case_list_id'].str.contains('sequenced', case=False) num_case = len(df[row_filter]['case_ids'].tolist()[0].split(' ')) return num_case
python
def get_num_sequenced(study_id): """Return number of sequenced tumors for given study. This is useful for calculating mutation statistics in terms of the prevalence of certain mutations within a type of cancer. Parameters ---------- study_id : str The ID of the cBio study. Example: 'paad_icgc' Returns ------- num_case : int The number of sequenced tumors in the given study """ data = {'cmd': 'getCaseLists', 'cancer_study_id': study_id} df = send_request(**data) if df.empty: return 0 row_filter = df['case_list_id'].str.contains('sequenced', case=False) num_case = len(df[row_filter]['case_ids'].tolist()[0].split(' ')) return num_case
[ "def", "get_num_sequenced", "(", "study_id", ")", ":", "data", "=", "{", "'cmd'", ":", "'getCaseLists'", ",", "'cancer_study_id'", ":", "study_id", "}", "df", "=", "send_request", "(", "*", "*", "data", ")", "if", "df", ".", "empty", ":", "return", "0", "row_filter", "=", "df", "[", "'case_list_id'", "]", ".", "str", ".", "contains", "(", "'sequenced'", ",", "case", "=", "False", ")", "num_case", "=", "len", "(", "df", "[", "row_filter", "]", "[", "'case_ids'", "]", ".", "tolist", "(", ")", "[", "0", "]", ".", "split", "(", "' '", ")", ")", "return", "num_case" ]
Return number of sequenced tumors for given study. This is useful for calculating mutation statistics in terms of the prevalence of certain mutations within a type of cancer. Parameters ---------- study_id : str The ID of the cBio study. Example: 'paad_icgc' Returns ------- num_case : int The number of sequenced tumors in the given study
[ "Return", "number", "of", "sequenced", "tumors", "for", "given", "study", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/cbio_client.py#L196-L220
train
sorgerlab/indra
indra/databases/cbio_client.py
get_cancer_studies
def get_cancer_studies(study_filter=None): """Return a list of cancer study identifiers, optionally filtered. There are typically multiple studies for a given type of cancer and a filter can be used to constrain the returned list. Parameters ---------- study_filter : Optional[str] A string used to filter the study IDs to return. Example: "paad" Returns ------- study_ids : list[str] A list of study IDs. For instance "paad" as a filter would result in a list of study IDs with paad in their name like "paad_icgc", "paad_tcga", etc. """ data = {'cmd': 'getCancerStudies'} df = send_request(**data) res = _filter_data_frame(df, ['cancer_study_id'], 'cancer_study_id', study_filter) study_ids = list(res['cancer_study_id'].values()) return study_ids
python
def get_cancer_studies(study_filter=None): """Return a list of cancer study identifiers, optionally filtered. There are typically multiple studies for a given type of cancer and a filter can be used to constrain the returned list. Parameters ---------- study_filter : Optional[str] A string used to filter the study IDs to return. Example: "paad" Returns ------- study_ids : list[str] A list of study IDs. For instance "paad" as a filter would result in a list of study IDs with paad in their name like "paad_icgc", "paad_tcga", etc. """ data = {'cmd': 'getCancerStudies'} df = send_request(**data) res = _filter_data_frame(df, ['cancer_study_id'], 'cancer_study_id', study_filter) study_ids = list(res['cancer_study_id'].values()) return study_ids
[ "def", "get_cancer_studies", "(", "study_filter", "=", "None", ")", ":", "data", "=", "{", "'cmd'", ":", "'getCancerStudies'", "}", "df", "=", "send_request", "(", "*", "*", "data", ")", "res", "=", "_filter_data_frame", "(", "df", ",", "[", "'cancer_study_id'", "]", ",", "'cancer_study_id'", ",", "study_filter", ")", "study_ids", "=", "list", "(", "res", "[", "'cancer_study_id'", "]", ".", "values", "(", ")", ")", "return", "study_ids" ]
Return a list of cancer study identifiers, optionally filtered. There are typically multiple studies for a given type of cancer and a filter can be used to constrain the returned list. Parameters ---------- study_filter : Optional[str] A string used to filter the study IDs to return. Example: "paad" Returns ------- study_ids : list[str] A list of study IDs. For instance "paad" as a filter would result in a list of study IDs with paad in their name like "paad_icgc", "paad_tcga", etc.
[ "Return", "a", "list", "of", "cancer", "study", "identifiers", "optionally", "filtered", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/cbio_client.py#L261-L285
train
sorgerlab/indra
indra/databases/cbio_client.py
get_cancer_types
def get_cancer_types(cancer_filter=None): """Return a list of cancer types, optionally filtered. Parameters ---------- cancer_filter : Optional[str] A string used to filter cancer types. Its value is the name or part of the name of a type of cancer. Example: "melanoma", "pancreatic", "non-small cell lung" Returns ------- type_ids : list[str] A list of cancer types matching the filter. Example: for cancer_filter="pancreatic", the result includes "panet" (neuro-endocrine) and "paad" (adenocarcinoma) """ data = {'cmd': 'getTypesOfCancer'} df = send_request(**data) res = _filter_data_frame(df, ['type_of_cancer_id'], 'name', cancer_filter) type_ids = list(res['type_of_cancer_id'].values()) return type_ids
python
def get_cancer_types(cancer_filter=None): """Return a list of cancer types, optionally filtered. Parameters ---------- cancer_filter : Optional[str] A string used to filter cancer types. Its value is the name or part of the name of a type of cancer. Example: "melanoma", "pancreatic", "non-small cell lung" Returns ------- type_ids : list[str] A list of cancer types matching the filter. Example: for cancer_filter="pancreatic", the result includes "panet" (neuro-endocrine) and "paad" (adenocarcinoma) """ data = {'cmd': 'getTypesOfCancer'} df = send_request(**data) res = _filter_data_frame(df, ['type_of_cancer_id'], 'name', cancer_filter) type_ids = list(res['type_of_cancer_id'].values()) return type_ids
[ "def", "get_cancer_types", "(", "cancer_filter", "=", "None", ")", ":", "data", "=", "{", "'cmd'", ":", "'getTypesOfCancer'", "}", "df", "=", "send_request", "(", "*", "*", "data", ")", "res", "=", "_filter_data_frame", "(", "df", ",", "[", "'type_of_cancer_id'", "]", ",", "'name'", ",", "cancer_filter", ")", "type_ids", "=", "list", "(", "res", "[", "'type_of_cancer_id'", "]", ".", "values", "(", ")", ")", "return", "type_ids" ]
Return a list of cancer types, optionally filtered. Parameters ---------- cancer_filter : Optional[str] A string used to filter cancer types. Its value is the name or part of the name of a type of cancer. Example: "melanoma", "pancreatic", "non-small cell lung" Returns ------- type_ids : list[str] A list of cancer types matching the filter. Example: for cancer_filter="pancreatic", the result includes "panet" (neuro-endocrine) and "paad" (adenocarcinoma)
[ "Return", "a", "list", "of", "cancer", "types", "optionally", "filtered", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/cbio_client.py#L288-L309
train
sorgerlab/indra
indra/databases/cbio_client.py
get_ccle_mutations
def get_ccle_mutations(gene_list, cell_lines, mutation_type=None): """Return a dict of mutations in given genes and cell lines from CCLE. This is a specialized call to get_mutations tailored to CCLE cell lines. Parameters ---------- gene_list : list[str] A list of HGNC gene symbols to get mutations in cell_lines : list[str] A list of CCLE cell line names to get mutations for. mutation_type : Optional[str] The type of mutation to filter to. mutation_type can be one of: missense, nonsense, frame_shift_ins, frame_shift_del, splice_site Returns ------- mutations : dict The result from cBioPortal as a dict in the format {cell_line : {gene : [mutation1, mutation2, ...] }} Example: {'LOXIMVI_SKIN': {'BRAF': ['V600E', 'I208V']}, 'SKMEL30_SKIN': {'BRAF': ['D287H', 'E275K']}} """ mutations = {cl: {g: [] for g in gene_list} for cl in cell_lines} for cell_line in cell_lines: mutations_cl = get_mutations(ccle_study, gene_list, mutation_type=mutation_type, case_id=cell_line) for gene, aa_change in zip(mutations_cl['gene_symbol'], mutations_cl['amino_acid_change']): aa_change = str(aa_change) mutations[cell_line][gene].append(aa_change) return mutations
python
def get_ccle_mutations(gene_list, cell_lines, mutation_type=None): """Return a dict of mutations in given genes and cell lines from CCLE. This is a specialized call to get_mutations tailored to CCLE cell lines. Parameters ---------- gene_list : list[str] A list of HGNC gene symbols to get mutations in cell_lines : list[str] A list of CCLE cell line names to get mutations for. mutation_type : Optional[str] The type of mutation to filter to. mutation_type can be one of: missense, nonsense, frame_shift_ins, frame_shift_del, splice_site Returns ------- mutations : dict The result from cBioPortal as a dict in the format {cell_line : {gene : [mutation1, mutation2, ...] }} Example: {'LOXIMVI_SKIN': {'BRAF': ['V600E', 'I208V']}, 'SKMEL30_SKIN': {'BRAF': ['D287H', 'E275K']}} """ mutations = {cl: {g: [] for g in gene_list} for cl in cell_lines} for cell_line in cell_lines: mutations_cl = get_mutations(ccle_study, gene_list, mutation_type=mutation_type, case_id=cell_line) for gene, aa_change in zip(mutations_cl['gene_symbol'], mutations_cl['amino_acid_change']): aa_change = str(aa_change) mutations[cell_line][gene].append(aa_change) return mutations
[ "def", "get_ccle_mutations", "(", "gene_list", ",", "cell_lines", ",", "mutation_type", "=", "None", ")", ":", "mutations", "=", "{", "cl", ":", "{", "g", ":", "[", "]", "for", "g", "in", "gene_list", "}", "for", "cl", "in", "cell_lines", "}", "for", "cell_line", "in", "cell_lines", ":", "mutations_cl", "=", "get_mutations", "(", "ccle_study", ",", "gene_list", ",", "mutation_type", "=", "mutation_type", ",", "case_id", "=", "cell_line", ")", "for", "gene", ",", "aa_change", "in", "zip", "(", "mutations_cl", "[", "'gene_symbol'", "]", ",", "mutations_cl", "[", "'amino_acid_change'", "]", ")", ":", "aa_change", "=", "str", "(", "aa_change", ")", "mutations", "[", "cell_line", "]", "[", "gene", "]", ".", "append", "(", "aa_change", ")", "return", "mutations" ]
Return a dict of mutations in given genes and cell lines from CCLE. This is a specialized call to get_mutations tailored to CCLE cell lines. Parameters ---------- gene_list : list[str] A list of HGNC gene symbols to get mutations in cell_lines : list[str] A list of CCLE cell line names to get mutations for. mutation_type : Optional[str] The type of mutation to filter to. mutation_type can be one of: missense, nonsense, frame_shift_ins, frame_shift_del, splice_site Returns ------- mutations : dict The result from cBioPortal as a dict in the format {cell_line : {gene : [mutation1, mutation2, ...] }} Example: {'LOXIMVI_SKIN': {'BRAF': ['V600E', 'I208V']}, 'SKMEL30_SKIN': {'BRAF': ['D287H', 'E275K']}}
[ "Return", "a", "dict", "of", "mutations", "in", "given", "genes", "and", "cell", "lines", "from", "CCLE", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/cbio_client.py#L312-L347
train
sorgerlab/indra
indra/databases/cbio_client.py
get_ccle_lines_for_mutation
def get_ccle_lines_for_mutation(gene, amino_acid_change): """Return cell lines with a given point mutation in a given gene. Checks which cell lines in CCLE have a particular point mutation in a given gene and return their names in a list. Parameters ---------- gene : str The HGNC symbol of the mutated gene in whose product the amino acid change occurs. Example: "BRAF" amino_acid_change : str The amino acid change of interest. Example: "V600E" Returns ------- cell_lines : list A list of CCLE cell lines in which the given mutation occurs. """ data = {'cmd': 'getMutationData', 'case_set_id': ccle_study, 'genetic_profile_id': ccle_study + '_mutations', 'gene_list': gene, 'skiprows': 1} df = send_request(**data) df = df[df['amino_acid_change'] == amino_acid_change] cell_lines = df['case_id'].unique().tolist() return cell_lines
python
def get_ccle_lines_for_mutation(gene, amino_acid_change): """Return cell lines with a given point mutation in a given gene. Checks which cell lines in CCLE have a particular point mutation in a given gene and return their names in a list. Parameters ---------- gene : str The HGNC symbol of the mutated gene in whose product the amino acid change occurs. Example: "BRAF" amino_acid_change : str The amino acid change of interest. Example: "V600E" Returns ------- cell_lines : list A list of CCLE cell lines in which the given mutation occurs. """ data = {'cmd': 'getMutationData', 'case_set_id': ccle_study, 'genetic_profile_id': ccle_study + '_mutations', 'gene_list': gene, 'skiprows': 1} df = send_request(**data) df = df[df['amino_acid_change'] == amino_acid_change] cell_lines = df['case_id'].unique().tolist() return cell_lines
[ "def", "get_ccle_lines_for_mutation", "(", "gene", ",", "amino_acid_change", ")", ":", "data", "=", "{", "'cmd'", ":", "'getMutationData'", ",", "'case_set_id'", ":", "ccle_study", ",", "'genetic_profile_id'", ":", "ccle_study", "+", "'_mutations'", ",", "'gene_list'", ":", "gene", ",", "'skiprows'", ":", "1", "}", "df", "=", "send_request", "(", "*", "*", "data", ")", "df", "=", "df", "[", "df", "[", "'amino_acid_change'", "]", "==", "amino_acid_change", "]", "cell_lines", "=", "df", "[", "'case_id'", "]", ".", "unique", "(", ")", ".", "tolist", "(", ")", "return", "cell_lines" ]
Return cell lines with a given point mutation in a given gene. Checks which cell lines in CCLE have a particular point mutation in a given gene and return their names in a list. Parameters ---------- gene : str The HGNC symbol of the mutated gene in whose product the amino acid change occurs. Example: "BRAF" amino_acid_change : str The amino acid change of interest. Example: "V600E" Returns ------- cell_lines : list A list of CCLE cell lines in which the given mutation occurs.
[ "Return", "cell", "lines", "with", "a", "given", "point", "mutation", "in", "a", "given", "gene", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/cbio_client.py#L350-L377
train
sorgerlab/indra
indra/databases/cbio_client.py
get_ccle_cna
def get_ccle_cna(gene_list, cell_lines): """Return a dict of CNAs in given genes and cell lines from CCLE. CNA values correspond to the following alterations -2 = homozygous deletion -1 = hemizygous deletion 0 = neutral / no change 1 = gain 2 = high level amplification Parameters ---------- gene_list : list[str] A list of HGNC gene symbols to get mutations in cell_lines : list[str] A list of CCLE cell line names to get mutations for. Returns ------- profile_data : dict[dict[int]] A dict keyed to cases containing a dict keyed to genes containing int """ profile_data = get_profile_data(ccle_study, gene_list, 'COPY_NUMBER_ALTERATION', 'all') profile_data = dict((key, value) for key, value in profile_data.items() if key in cell_lines) return profile_data
python
def get_ccle_cna(gene_list, cell_lines): """Return a dict of CNAs in given genes and cell lines from CCLE. CNA values correspond to the following alterations -2 = homozygous deletion -1 = hemizygous deletion 0 = neutral / no change 1 = gain 2 = high level amplification Parameters ---------- gene_list : list[str] A list of HGNC gene symbols to get mutations in cell_lines : list[str] A list of CCLE cell line names to get mutations for. Returns ------- profile_data : dict[dict[int]] A dict keyed to cases containing a dict keyed to genes containing int """ profile_data = get_profile_data(ccle_study, gene_list, 'COPY_NUMBER_ALTERATION', 'all') profile_data = dict((key, value) for key, value in profile_data.items() if key in cell_lines) return profile_data
[ "def", "get_ccle_cna", "(", "gene_list", ",", "cell_lines", ")", ":", "profile_data", "=", "get_profile_data", "(", "ccle_study", ",", "gene_list", ",", "'COPY_NUMBER_ALTERATION'", ",", "'all'", ")", "profile_data", "=", "dict", "(", "(", "key", ",", "value", ")", "for", "key", ",", "value", "in", "profile_data", ".", "items", "(", ")", "if", "key", "in", "cell_lines", ")", "return", "profile_data" ]
Return a dict of CNAs in given genes and cell lines from CCLE. CNA values correspond to the following alterations -2 = homozygous deletion -1 = hemizygous deletion 0 = neutral / no change 1 = gain 2 = high level amplification Parameters ---------- gene_list : list[str] A list of HGNC gene symbols to get mutations in cell_lines : list[str] A list of CCLE cell line names to get mutations for. Returns ------- profile_data : dict[dict[int]] A dict keyed to cases containing a dict keyed to genes containing int
[ "Return", "a", "dict", "of", "CNAs", "in", "given", "genes", "and", "cell", "lines", "from", "CCLE", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/cbio_client.py#L380-L412
train
sorgerlab/indra
indra/databases/cbio_client.py
get_ccle_mrna
def get_ccle_mrna(gene_list, cell_lines): """Return a dict of mRNA amounts in given genes and cell lines from CCLE. Parameters ---------- gene_list : list[str] A list of HGNC gene symbols to get mRNA amounts for. cell_lines : list[str] A list of CCLE cell line names to get mRNA amounts for. Returns ------- mrna_amounts : dict[dict[float]] A dict keyed to cell lines containing a dict keyed to genes containing float """ gene_list_str = ','.join(gene_list) data = {'cmd': 'getProfileData', 'case_set_id': ccle_study + '_mrna', 'genetic_profile_id': ccle_study + '_mrna', 'gene_list': gene_list_str, 'skiprows': -1} df = send_request(**data) mrna_amounts = {cl: {g: [] for g in gene_list} for cl in cell_lines} for cell_line in cell_lines: if cell_line in df.columns: for gene in gene_list: value_cell = df[cell_line][df['COMMON'] == gene] if value_cell.empty: mrna_amounts[cell_line][gene] = None elif pandas.isnull(value_cell.values[0]): mrna_amounts[cell_line][gene] = None else: value = value_cell.values[0] mrna_amounts[cell_line][gene] = value else: mrna_amounts[cell_line] = None return mrna_amounts
python
def get_ccle_mrna(gene_list, cell_lines): """Return a dict of mRNA amounts in given genes and cell lines from CCLE. Parameters ---------- gene_list : list[str] A list of HGNC gene symbols to get mRNA amounts for. cell_lines : list[str] A list of CCLE cell line names to get mRNA amounts for. Returns ------- mrna_amounts : dict[dict[float]] A dict keyed to cell lines containing a dict keyed to genes containing float """ gene_list_str = ','.join(gene_list) data = {'cmd': 'getProfileData', 'case_set_id': ccle_study + '_mrna', 'genetic_profile_id': ccle_study + '_mrna', 'gene_list': gene_list_str, 'skiprows': -1} df = send_request(**data) mrna_amounts = {cl: {g: [] for g in gene_list} for cl in cell_lines} for cell_line in cell_lines: if cell_line in df.columns: for gene in gene_list: value_cell = df[cell_line][df['COMMON'] == gene] if value_cell.empty: mrna_amounts[cell_line][gene] = None elif pandas.isnull(value_cell.values[0]): mrna_amounts[cell_line][gene] = None else: value = value_cell.values[0] mrna_amounts[cell_line][gene] = value else: mrna_amounts[cell_line] = None return mrna_amounts
[ "def", "get_ccle_mrna", "(", "gene_list", ",", "cell_lines", ")", ":", "gene_list_str", "=", "','", ".", "join", "(", "gene_list", ")", "data", "=", "{", "'cmd'", ":", "'getProfileData'", ",", "'case_set_id'", ":", "ccle_study", "+", "'_mrna'", ",", "'genetic_profile_id'", ":", "ccle_study", "+", "'_mrna'", ",", "'gene_list'", ":", "gene_list_str", ",", "'skiprows'", ":", "-", "1", "}", "df", "=", "send_request", "(", "*", "*", "data", ")", "mrna_amounts", "=", "{", "cl", ":", "{", "g", ":", "[", "]", "for", "g", "in", "gene_list", "}", "for", "cl", "in", "cell_lines", "}", "for", "cell_line", "in", "cell_lines", ":", "if", "cell_line", "in", "df", ".", "columns", ":", "for", "gene", "in", "gene_list", ":", "value_cell", "=", "df", "[", "cell_line", "]", "[", "df", "[", "'COMMON'", "]", "==", "gene", "]", "if", "value_cell", ".", "empty", ":", "mrna_amounts", "[", "cell_line", "]", "[", "gene", "]", "=", "None", "elif", "pandas", ".", "isnull", "(", "value_cell", ".", "values", "[", "0", "]", ")", ":", "mrna_amounts", "[", "cell_line", "]", "[", "gene", "]", "=", "None", "else", ":", "value", "=", "value_cell", ".", "values", "[", "0", "]", "mrna_amounts", "[", "cell_line", "]", "[", "gene", "]", "=", "value", "else", ":", "mrna_amounts", "[", "cell_line", "]", "=", "None", "return", "mrna_amounts" ]
Return a dict of mRNA amounts in given genes and cell lines from CCLE. Parameters ---------- gene_list : list[str] A list of HGNC gene symbols to get mRNA amounts for. cell_lines : list[str] A list of CCLE cell line names to get mRNA amounts for. Returns ------- mrna_amounts : dict[dict[float]] A dict keyed to cell lines containing a dict keyed to genes containing float
[ "Return", "a", "dict", "of", "mRNA", "amounts", "in", "given", "genes", "and", "cell", "lines", "from", "CCLE", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/cbio_client.py#L415-L452
train
sorgerlab/indra
indra/databases/cbio_client.py
_filter_data_frame
def _filter_data_frame(df, data_col, filter_col, filter_str=None): """Return a filtered data frame as a dictionary.""" if filter_str is not None: relevant_cols = data_col + [filter_col] df.dropna(inplace=True, subset=relevant_cols) row_filter = df[filter_col].str.contains(filter_str, case=False) data_list = df[row_filter][data_col].to_dict() else: data_list = df[data_col].to_dict() return data_list
python
def _filter_data_frame(df, data_col, filter_col, filter_str=None): """Return a filtered data frame as a dictionary.""" if filter_str is not None: relevant_cols = data_col + [filter_col] df.dropna(inplace=True, subset=relevant_cols) row_filter = df[filter_col].str.contains(filter_str, case=False) data_list = df[row_filter][data_col].to_dict() else: data_list = df[data_col].to_dict() return data_list
[ "def", "_filter_data_frame", "(", "df", ",", "data_col", ",", "filter_col", ",", "filter_str", "=", "None", ")", ":", "if", "filter_str", "is", "not", "None", ":", "relevant_cols", "=", "data_col", "+", "[", "filter_col", "]", "df", ".", "dropna", "(", "inplace", "=", "True", ",", "subset", "=", "relevant_cols", ")", "row_filter", "=", "df", "[", "filter_col", "]", ".", "str", ".", "contains", "(", "filter_str", ",", "case", "=", "False", ")", "data_list", "=", "df", "[", "row_filter", "]", "[", "data_col", "]", ".", "to_dict", "(", ")", "else", ":", "data_list", "=", "df", "[", "data_col", "]", ".", "to_dict", "(", ")", "return", "data_list" ]
Return a filtered data frame as a dictionary.
[ "Return", "a", "filtered", "data", "frame", "as", "a", "dictionary", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/cbio_client.py#L455-L464
train
sorgerlab/indra
rest_api/api.py
allow_cors
def allow_cors(func): """This is a decorator which enable CORS for the specified endpoint.""" def wrapper(*args, **kwargs): response.headers['Access-Control-Allow-Origin'] = '*' response.headers['Access-Control-Allow-Methods'] = \ 'PUT, GET, POST, DELETE, OPTIONS' response.headers['Access-Control-Allow-Headers'] = \ 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token' return func(*args, **kwargs) return wrapper
python
def allow_cors(func): """This is a decorator which enable CORS for the specified endpoint.""" def wrapper(*args, **kwargs): response.headers['Access-Control-Allow-Origin'] = '*' response.headers['Access-Control-Allow-Methods'] = \ 'PUT, GET, POST, DELETE, OPTIONS' response.headers['Access-Control-Allow-Headers'] = \ 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token' return func(*args, **kwargs) return wrapper
[ "def", "allow_cors", "(", "func", ")", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "response", ".", "headers", "[", "'Access-Control-Allow-Origin'", "]", "=", "'*'", "response", ".", "headers", "[", "'Access-Control-Allow-Methods'", "]", "=", "'PUT, GET, POST, DELETE, OPTIONS'", "response", ".", "headers", "[", "'Access-Control-Allow-Headers'", "]", "=", "'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper" ]
This is a decorator which enable CORS for the specified endpoint.
[ "This", "is", "a", "decorator", "which", "enable", "CORS", "for", "the", "specified", "endpoint", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/rest_api/api.py#L50-L59
train
sorgerlab/indra
rest_api/api.py
trips_process_text
def trips_process_text(): """Process text with TRIPS and return INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) text = body.get('text') tp = trips.process_text(text) return _stmts_from_proc(tp)
python
def trips_process_text(): """Process text with TRIPS and return INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) text = body.get('text') tp = trips.process_text(text) return _stmts_from_proc(tp)
[ "def", "trips_process_text", "(", ")", ":", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "return", "{", "}", "response", "=", "request", ".", "body", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "body", "=", "json", ".", "loads", "(", "response", ")", "text", "=", "body", ".", "get", "(", "'text'", ")", "tp", "=", "trips", ".", "process_text", "(", "text", ")", "return", "_stmts_from_proc", "(", "tp", ")" ]
Process text with TRIPS and return INDRA Statements.
[ "Process", "text", "with", "TRIPS", "and", "return", "INDRA", "Statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/rest_api/api.py#L76-L84
train
sorgerlab/indra
rest_api/api.py
trips_process_xml
def trips_process_xml(): """Process TRIPS EKB XML and return INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) xml_str = body.get('xml_str') tp = trips.process_xml(xml_str) return _stmts_from_proc(tp)
python
def trips_process_xml(): """Process TRIPS EKB XML and return INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) xml_str = body.get('xml_str') tp = trips.process_xml(xml_str) return _stmts_from_proc(tp)
[ "def", "trips_process_xml", "(", ")", ":", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "return", "{", "}", "response", "=", "request", ".", "body", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "body", "=", "json", ".", "loads", "(", "response", ")", "xml_str", "=", "body", ".", "get", "(", "'xml_str'", ")", "tp", "=", "trips", ".", "process_xml", "(", "xml_str", ")", "return", "_stmts_from_proc", "(", "tp", ")" ]
Process TRIPS EKB XML and return INDRA Statements.
[ "Process", "TRIPS", "EKB", "XML", "and", "return", "INDRA", "Statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/rest_api/api.py#L89-L97
train
sorgerlab/indra
rest_api/api.py
reach_process_text
def reach_process_text(): """Process text with REACH and return INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) text = body.get('text') offline = True if body.get('offline') else False rp = reach.process_text(text, offline=offline) return _stmts_from_proc(rp)
python
def reach_process_text(): """Process text with REACH and return INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) text = body.get('text') offline = True if body.get('offline') else False rp = reach.process_text(text, offline=offline) return _stmts_from_proc(rp)
[ "def", "reach_process_text", "(", ")", ":", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "return", "{", "}", "response", "=", "request", ".", "body", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "body", "=", "json", ".", "loads", "(", "response", ")", "text", "=", "body", ".", "get", "(", "'text'", ")", "offline", "=", "True", "if", "body", ".", "get", "(", "'offline'", ")", "else", "False", "rp", "=", "reach", ".", "process_text", "(", "text", ",", "offline", "=", "offline", ")", "return", "_stmts_from_proc", "(", "rp", ")" ]
Process text with REACH and return INDRA Statements.
[ "Process", "text", "with", "REACH", "and", "return", "INDRA", "Statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/rest_api/api.py#L104-L113
train
sorgerlab/indra
rest_api/api.py
reach_process_json
def reach_process_json(): """Process REACH json and return INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) json_str = body.get('json') rp = reach.process_json_str(json_str) return _stmts_from_proc(rp)
python
def reach_process_json(): """Process REACH json and return INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) json_str = body.get('json') rp = reach.process_json_str(json_str) return _stmts_from_proc(rp)
[ "def", "reach_process_json", "(", ")", ":", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "return", "{", "}", "response", "=", "request", ".", "body", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "body", "=", "json", ".", "loads", "(", "response", ")", "json_str", "=", "body", ".", "get", "(", "'json'", ")", "rp", "=", "reach", ".", "process_json_str", "(", "json_str", ")", "return", "_stmts_from_proc", "(", "rp", ")" ]
Process REACH json and return INDRA Statements.
[ "Process", "REACH", "json", "and", "return", "INDRA", "Statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/rest_api/api.py#L118-L126
train
sorgerlab/indra
rest_api/api.py
reach_process_pmc
def reach_process_pmc(): """Process PubMedCentral article and return INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) pmcid = body.get('pmcid') rp = reach.process_pmc(pmcid) return _stmts_from_proc(rp)
python
def reach_process_pmc(): """Process PubMedCentral article and return INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) pmcid = body.get('pmcid') rp = reach.process_pmc(pmcid) return _stmts_from_proc(rp)
[ "def", "reach_process_pmc", "(", ")", ":", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "return", "{", "}", "response", "=", "request", ".", "body", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "body", "=", "json", ".", "loads", "(", "response", ")", "pmcid", "=", "body", ".", "get", "(", "'pmcid'", ")", "rp", "=", "reach", ".", "process_pmc", "(", "pmcid", ")", "return", "_stmts_from_proc", "(", "rp", ")" ]
Process PubMedCentral article and return INDRA Statements.
[ "Process", "PubMedCentral", "article", "and", "return", "INDRA", "Statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/rest_api/api.py#L131-L139
train
sorgerlab/indra
rest_api/api.py
bel_process_pybel_neighborhood
def bel_process_pybel_neighborhood(): """Process BEL Large Corpus neighborhood and return INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) genes = body.get('genes') bp = bel.process_pybel_neighborhood(genes) return _stmts_from_proc(bp)
python
def bel_process_pybel_neighborhood(): """Process BEL Large Corpus neighborhood and return INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) genes = body.get('genes') bp = bel.process_pybel_neighborhood(genes) return _stmts_from_proc(bp)
[ "def", "bel_process_pybel_neighborhood", "(", ")", ":", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "return", "{", "}", "response", "=", "request", ".", "body", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "body", "=", "json", ".", "loads", "(", "response", ")", "genes", "=", "body", ".", "get", "(", "'genes'", ")", "bp", "=", "bel", ".", "process_pybel_neighborhood", "(", "genes", ")", "return", "_stmts_from_proc", "(", "bp", ")" ]
Process BEL Large Corpus neighborhood and return INDRA Statements.
[ "Process", "BEL", "Large", "Corpus", "neighborhood", "and", "return", "INDRA", "Statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/rest_api/api.py#L147-L155
train
sorgerlab/indra
rest_api/api.py
bel_process_belrdf
def bel_process_belrdf(): """Process BEL RDF and return INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) belrdf = body.get('belrdf') bp = bel.process_belrdf(belrdf) return _stmts_from_proc(bp)
python
def bel_process_belrdf(): """Process BEL RDF and return INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) belrdf = body.get('belrdf') bp = bel.process_belrdf(belrdf) return _stmts_from_proc(bp)
[ "def", "bel_process_belrdf", "(", ")", ":", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "return", "{", "}", "response", "=", "request", ".", "body", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "body", "=", "json", ".", "loads", "(", "response", ")", "belrdf", "=", "body", ".", "get", "(", "'belrdf'", ")", "bp", "=", "bel", ".", "process_belrdf", "(", "belrdf", ")", "return", "_stmts_from_proc", "(", "bp", ")" ]
Process BEL RDF and return INDRA Statements.
[ "Process", "BEL", "RDF", "and", "return", "INDRA", "Statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/rest_api/api.py#L160-L168
train
sorgerlab/indra
rest_api/api.py
biopax_process_pc_pathsbetween
def biopax_process_pc_pathsbetween(): """Process PathwayCommons paths between genes, return INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) genes = body.get('genes') bp = biopax.process_pc_pathsbetween(genes) return _stmts_from_proc(bp)
python
def biopax_process_pc_pathsbetween(): """Process PathwayCommons paths between genes, return INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) genes = body.get('genes') bp = biopax.process_pc_pathsbetween(genes) return _stmts_from_proc(bp)
[ "def", "biopax_process_pc_pathsbetween", "(", ")", ":", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "return", "{", "}", "response", "=", "request", ".", "body", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "body", "=", "json", ".", "loads", "(", "response", ")", "genes", "=", "body", ".", "get", "(", "'genes'", ")", "bp", "=", "biopax", ".", "process_pc_pathsbetween", "(", "genes", ")", "return", "_stmts_from_proc", "(", "bp", ")" ]
Process PathwayCommons paths between genes, return INDRA Statements.
[ "Process", "PathwayCommons", "paths", "between", "genes", "return", "INDRA", "Statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/rest_api/api.py#L174-L182
train
sorgerlab/indra
rest_api/api.py
biopax_process_pc_pathsfromto
def biopax_process_pc_pathsfromto(): """Process PathwayCommons paths from-to genes, return INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) source = body.get('source') target = body.get('target') bp = biopax.process_pc_pathsfromto(source, target) return _stmts_from_proc(bp)
python
def biopax_process_pc_pathsfromto(): """Process PathwayCommons paths from-to genes, return INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) source = body.get('source') target = body.get('target') bp = biopax.process_pc_pathsfromto(source, target) return _stmts_from_proc(bp)
[ "def", "biopax_process_pc_pathsfromto", "(", ")", ":", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "return", "{", "}", "response", "=", "request", ".", "body", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "body", "=", "json", ".", "loads", "(", "response", ")", "source", "=", "body", ".", "get", "(", "'source'", ")", "target", "=", "body", ".", "get", "(", "'target'", ")", "bp", "=", "biopax", ".", "process_pc_pathsfromto", "(", "source", ",", "target", ")", "return", "_stmts_from_proc", "(", "bp", ")" ]
Process PathwayCommons paths from-to genes, return INDRA Statements.
[ "Process", "PathwayCommons", "paths", "from", "-", "to", "genes", "return", "INDRA", "Statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/rest_api/api.py#L187-L196
train
sorgerlab/indra
rest_api/api.py
biopax_process_pc_neighborhood
def biopax_process_pc_neighborhood(): """Process PathwayCommons neighborhood, return INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) genes = body.get('genes') bp = biopax.process_pc_neighborhood(genes) return _stmts_from_proc(bp)
python
def biopax_process_pc_neighborhood(): """Process PathwayCommons neighborhood, return INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) genes = body.get('genes') bp = biopax.process_pc_neighborhood(genes) return _stmts_from_proc(bp)
[ "def", "biopax_process_pc_neighborhood", "(", ")", ":", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "return", "{", "}", "response", "=", "request", ".", "body", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "body", "=", "json", ".", "loads", "(", "response", ")", "genes", "=", "body", ".", "get", "(", "'genes'", ")", "bp", "=", "biopax", ".", "process_pc_neighborhood", "(", "genes", ")", "return", "_stmts_from_proc", "(", "bp", ")" ]
Process PathwayCommons neighborhood, return INDRA Statements.
[ "Process", "PathwayCommons", "neighborhood", "return", "INDRA", "Statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/rest_api/api.py#L201-L209
train
sorgerlab/indra
rest_api/api.py
eidos_process_text
def eidos_process_text(): """Process text with EIDOS and return INDRA Statements.""" if request.method == 'OPTIONS': return {} req = request.body.read().decode('utf-8') body = json.loads(req) text = body.get('text') webservice = body.get('webservice') if not webservice: response.status = 400 response.content_type = 'application/json' return json.dumps({'error': 'No web service address provided.'}) ep = eidos.process_text(text, webservice=webservice) return _stmts_from_proc(ep)
python
def eidos_process_text(): """Process text with EIDOS and return INDRA Statements.""" if request.method == 'OPTIONS': return {} req = request.body.read().decode('utf-8') body = json.loads(req) text = body.get('text') webservice = body.get('webservice') if not webservice: response.status = 400 response.content_type = 'application/json' return json.dumps({'error': 'No web service address provided.'}) ep = eidos.process_text(text, webservice=webservice) return _stmts_from_proc(ep)
[ "def", "eidos_process_text", "(", ")", ":", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "return", "{", "}", "req", "=", "request", ".", "body", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "body", "=", "json", ".", "loads", "(", "req", ")", "text", "=", "body", ".", "get", "(", "'text'", ")", "webservice", "=", "body", ".", "get", "(", "'webservice'", ")", "if", "not", "webservice", ":", "response", ".", "status", "=", "400", "response", ".", "content_type", "=", "'application/json'", "return", "json", ".", "dumps", "(", "{", "'error'", ":", "'No web service address provided.'", "}", ")", "ep", "=", "eidos", ".", "process_text", "(", "text", ",", "webservice", "=", "webservice", ")", "return", "_stmts_from_proc", "(", "ep", ")" ]
Process text with EIDOS and return INDRA Statements.
[ "Process", "text", "with", "EIDOS", "and", "return", "INDRA", "Statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/rest_api/api.py#L215-L228
train
sorgerlab/indra
rest_api/api.py
eidos_process_jsonld
def eidos_process_jsonld(): """Process an EIDOS JSON-LD and return INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) eidos_json = body.get('jsonld') ep = eidos.process_json_str(eidos_json) return _stmts_from_proc(ep)
python
def eidos_process_jsonld(): """Process an EIDOS JSON-LD and return INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) eidos_json = body.get('jsonld') ep = eidos.process_json_str(eidos_json) return _stmts_from_proc(ep)
[ "def", "eidos_process_jsonld", "(", ")", ":", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "return", "{", "}", "response", "=", "request", ".", "body", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "body", "=", "json", ".", "loads", "(", "response", ")", "eidos_json", "=", "body", ".", "get", "(", "'jsonld'", ")", "ep", "=", "eidos", ".", "process_json_str", "(", "eidos_json", ")", "return", "_stmts_from_proc", "(", "ep", ")" ]
Process an EIDOS JSON-LD and return INDRA Statements.
[ "Process", "an", "EIDOS", "JSON", "-", "LD", "and", "return", "INDRA", "Statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/rest_api/api.py#L233-L241
train
sorgerlab/indra
rest_api/api.py
cwms_process_text
def cwms_process_text(): """Process text with CWMS and return INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) text = body.get('text') cp = cwms.process_text(text) return _stmts_from_proc(cp)
python
def cwms_process_text(): """Process text with CWMS and return INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) text = body.get('text') cp = cwms.process_text(text) return _stmts_from_proc(cp)
[ "def", "cwms_process_text", "(", ")", ":", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "return", "{", "}", "response", "=", "request", ".", "body", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "body", "=", "json", ".", "loads", "(", "response", ")", "text", "=", "body", ".", "get", "(", "'text'", ")", "cp", "=", "cwms", ".", "process_text", "(", "text", ")", "return", "_stmts_from_proc", "(", "cp", ")" ]
Process text with CWMS and return INDRA Statements.
[ "Process", "text", "with", "CWMS", "and", "return", "INDRA", "Statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/rest_api/api.py#L246-L254
train
sorgerlab/indra
rest_api/api.py
hume_process_jsonld
def hume_process_jsonld(): """Process Hume JSON-LD and return INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) jsonld_str = body.get('jsonld') jsonld = json.loads(jsonld_str) hp = hume.process_jsonld(jsonld) return _stmts_from_proc(hp)
python
def hume_process_jsonld(): """Process Hume JSON-LD and return INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) jsonld_str = body.get('jsonld') jsonld = json.loads(jsonld_str) hp = hume.process_jsonld(jsonld) return _stmts_from_proc(hp)
[ "def", "hume_process_jsonld", "(", ")", ":", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "return", "{", "}", "response", "=", "request", ".", "body", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "body", "=", "json", ".", "loads", "(", "response", ")", "jsonld_str", "=", "body", ".", "get", "(", "'jsonld'", ")", "jsonld", "=", "json", ".", "loads", "(", "jsonld_str", ")", "hp", "=", "hume", ".", "process_jsonld", "(", "jsonld", ")", "return", "_stmts_from_proc", "(", "hp", ")" ]
Process Hume JSON-LD and return INDRA Statements.
[ "Process", "Hume", "JSON", "-", "LD", "and", "return", "INDRA", "Statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/rest_api/api.py#L259-L268
train
sorgerlab/indra
rest_api/api.py
sofia_process_text
def sofia_process_text(): """Process text with Sofia and return INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) text = body.get('text') auth = body.get('auth') sp = sofia.process_text(text, auth=auth) return _stmts_from_proc(sp)
python
def sofia_process_text(): """Process text with Sofia and return INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) text = body.get('text') auth = body.get('auth') sp = sofia.process_text(text, auth=auth) return _stmts_from_proc(sp)
[ "def", "sofia_process_text", "(", ")", ":", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "return", "{", "}", "response", "=", "request", ".", "body", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "body", "=", "json", ".", "loads", "(", "response", ")", "text", "=", "body", ".", "get", "(", "'text'", ")", "auth", "=", "body", ".", "get", "(", "'auth'", ")", "sp", "=", "sofia", ".", "process_text", "(", "text", ",", "auth", "=", "auth", ")", "return", "_stmts_from_proc", "(", "sp", ")" ]
Process text with Sofia and return INDRA Statements.
[ "Process", "text", "with", "Sofia", "and", "return", "INDRA", "Statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/rest_api/api.py#L288-L297
train
sorgerlab/indra
rest_api/api.py
assemble_pysb
def assemble_pysb(): """Assemble INDRA Statements and return PySB model string.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) stmts_json = body.get('statements') export_format = body.get('export_format') stmts = stmts_from_json(stmts_json) pa = PysbAssembler() pa.add_statements(stmts) pa.make_model() try: for m in pa.model.monomers: pysb_assembler.set_extended_initial_condition(pa.model, m, 0) except Exception as e: logger.exception(e) if not export_format: model_str = pa.print_model() elif export_format in ('kappa_im', 'kappa_cm'): fname = 'model_%s.png' % export_format root = os.path.dirname(os.path.abspath(fname)) graph = pa.export_model(format=export_format, file_name=fname) with open(fname, 'rb') as fh: data = 'data:image/png;base64,%s' % \ base64.b64encode(fh.read()).decode() return {'image': data} else: try: model_str = pa.export_model(format=export_format) except Exception as e: logger.exception(e) model_str = '' res = {'model': model_str} return res
python
def assemble_pysb(): """Assemble INDRA Statements and return PySB model string.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) stmts_json = body.get('statements') export_format = body.get('export_format') stmts = stmts_from_json(stmts_json) pa = PysbAssembler() pa.add_statements(stmts) pa.make_model() try: for m in pa.model.monomers: pysb_assembler.set_extended_initial_condition(pa.model, m, 0) except Exception as e: logger.exception(e) if not export_format: model_str = pa.print_model() elif export_format in ('kappa_im', 'kappa_cm'): fname = 'model_%s.png' % export_format root = os.path.dirname(os.path.abspath(fname)) graph = pa.export_model(format=export_format, file_name=fname) with open(fname, 'rb') as fh: data = 'data:image/png;base64,%s' % \ base64.b64encode(fh.read()).decode() return {'image': data} else: try: model_str = pa.export_model(format=export_format) except Exception as e: logger.exception(e) model_str = '' res = {'model': model_str} return res
[ "def", "assemble_pysb", "(", ")", ":", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "return", "{", "}", "response", "=", "request", ".", "body", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "body", "=", "json", ".", "loads", "(", "response", ")", "stmts_json", "=", "body", ".", "get", "(", "'statements'", ")", "export_format", "=", "body", ".", "get", "(", "'export_format'", ")", "stmts", "=", "stmts_from_json", "(", "stmts_json", ")", "pa", "=", "PysbAssembler", "(", ")", "pa", ".", "add_statements", "(", "stmts", ")", "pa", ".", "make_model", "(", ")", "try", ":", "for", "m", "in", "pa", ".", "model", ".", "monomers", ":", "pysb_assembler", ".", "set_extended_initial_condition", "(", "pa", ".", "model", ",", "m", ",", "0", ")", "except", "Exception", "as", "e", ":", "logger", ".", "exception", "(", "e", ")", "if", "not", "export_format", ":", "model_str", "=", "pa", ".", "print_model", "(", ")", "elif", "export_format", "in", "(", "'kappa_im'", ",", "'kappa_cm'", ")", ":", "fname", "=", "'model_%s.png'", "%", "export_format", "root", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "fname", ")", ")", "graph", "=", "pa", ".", "export_model", "(", "format", "=", "export_format", ",", "file_name", "=", "fname", ")", "with", "open", "(", "fname", ",", "'rb'", ")", "as", "fh", ":", "data", "=", "'data:image/png;base64,%s'", "%", "base64", ".", "b64encode", "(", "fh", ".", "read", "(", ")", ")", ".", "decode", "(", ")", "return", "{", "'image'", ":", "data", "}", "else", ":", "try", ":", "model_str", "=", "pa", ".", "export_model", "(", "format", "=", "export_format", ")", "except", "Exception", "as", "e", ":", "logger", ".", "exception", "(", "e", ")", "model_str", "=", "''", "res", "=", "{", "'model'", ":", "model_str", "}", "return", "res" ]
Assemble INDRA Statements and return PySB model string.
[ "Assemble", "INDRA", "Statements", "and", "return", "PySB", "model", "string", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/rest_api/api.py#L307-L342
train
sorgerlab/indra
rest_api/api.py
assemble_cx
def assemble_cx(): """Assemble INDRA Statements and return CX network json.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) stmts_json = body.get('statements') stmts = stmts_from_json(stmts_json) ca = CxAssembler(stmts) model_str = ca.make_model() res = {'model': model_str} return res
python
def assemble_cx(): """Assemble INDRA Statements and return CX network json.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) stmts_json = body.get('statements') stmts = stmts_from_json(stmts_json) ca = CxAssembler(stmts) model_str = ca.make_model() res = {'model': model_str} return res
[ "def", "assemble_cx", "(", ")", ":", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "return", "{", "}", "response", "=", "request", ".", "body", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "body", "=", "json", ".", "loads", "(", "response", ")", "stmts_json", "=", "body", ".", "get", "(", "'statements'", ")", "stmts", "=", "stmts_from_json", "(", "stmts_json", ")", "ca", "=", "CxAssembler", "(", "stmts", ")", "model_str", "=", "ca", ".", "make_model", "(", ")", "res", "=", "{", "'model'", ":", "model_str", "}", "return", "res" ]
Assemble INDRA Statements and return CX network json.
[ "Assemble", "INDRA", "Statements", "and", "return", "CX", "network", "json", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/rest_api/api.py#L348-L359
train
sorgerlab/indra
rest_api/api.py
share_model_ndex
def share_model_ndex(): """Upload the model to NDEX""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) stmts_str = body.get('stmts') stmts_json = json.loads(stmts_str) stmts = stmts_from_json(stmts_json["statements"]) ca = CxAssembler(stmts) for n, v in body.items(): ca.cx['networkAttributes'].append({'n': n, 'v': v, 'd': 'string'}) ca.make_model() network_id = ca.upload_model(private=False) return {'network_id': network_id}
python
def share_model_ndex(): """Upload the model to NDEX""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) stmts_str = body.get('stmts') stmts_json = json.loads(stmts_str) stmts = stmts_from_json(stmts_json["statements"]) ca = CxAssembler(stmts) for n, v in body.items(): ca.cx['networkAttributes'].append({'n': n, 'v': v, 'd': 'string'}) ca.make_model() network_id = ca.upload_model(private=False) return {'network_id': network_id}
[ "def", "share_model_ndex", "(", ")", ":", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "return", "{", "}", "response", "=", "request", ".", "body", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "body", "=", "json", ".", "loads", "(", "response", ")", "stmts_str", "=", "body", ".", "get", "(", "'stmts'", ")", "stmts_json", "=", "json", ".", "loads", "(", "stmts_str", ")", "stmts", "=", "stmts_from_json", "(", "stmts_json", "[", "\"statements\"", "]", ")", "ca", "=", "CxAssembler", "(", "stmts", ")", "for", "n", ",", "v", "in", "body", ".", "items", "(", ")", ":", "ca", ".", "cx", "[", "'networkAttributes'", "]", ".", "append", "(", "{", "'n'", ":", "n", ",", "'v'", ":", "v", ",", "'d'", ":", "'string'", "}", ")", "ca", ".", "make_model", "(", ")", "network_id", "=", "ca", ".", "upload_model", "(", "private", "=", "False", ")", "return", "{", "'network_id'", ":", "network_id", "}" ]
Upload the model to NDEX
[ "Upload", "the", "model", "to", "NDEX" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/rest_api/api.py#L365-L379
train
sorgerlab/indra
rest_api/api.py
fetch_model_ndex
def fetch_model_ndex(): """Download model and associated pieces from NDEX""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) network_id = body.get('network_id') cx = process_ndex_network(network_id) network_attr = [x for x in cx.cx if x.get('networkAttributes')] network_attr = network_attr[0]['networkAttributes'] keep_keys = ['txt_input', 'parser', 'model_elements', 'preset_pos', 'stmts', 'sentences', 'evidence', 'cell_line', 'mrna', 'mutations'] stored_data = {} for d in network_attr: if d['n'] in keep_keys: stored_data[d['n']] = d['v'] return stored_data
python
def fetch_model_ndex(): """Download model and associated pieces from NDEX""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) network_id = body.get('network_id') cx = process_ndex_network(network_id) network_attr = [x for x in cx.cx if x.get('networkAttributes')] network_attr = network_attr[0]['networkAttributes'] keep_keys = ['txt_input', 'parser', 'model_elements', 'preset_pos', 'stmts', 'sentences', 'evidence', 'cell_line', 'mrna', 'mutations'] stored_data = {} for d in network_attr: if d['n'] in keep_keys: stored_data[d['n']] = d['v'] return stored_data
[ "def", "fetch_model_ndex", "(", ")", ":", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "return", "{", "}", "response", "=", "request", ".", "body", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "body", "=", "json", ".", "loads", "(", "response", ")", "network_id", "=", "body", ".", "get", "(", "'network_id'", ")", "cx", "=", "process_ndex_network", "(", "network_id", ")", "network_attr", "=", "[", "x", "for", "x", "in", "cx", ".", "cx", "if", "x", ".", "get", "(", "'networkAttributes'", ")", "]", "network_attr", "=", "network_attr", "[", "0", "]", "[", "'networkAttributes'", "]", "keep_keys", "=", "[", "'txt_input'", ",", "'parser'", ",", "'model_elements'", ",", "'preset_pos'", ",", "'stmts'", ",", "'sentences'", ",", "'evidence'", ",", "'cell_line'", ",", "'mrna'", ",", "'mutations'", "]", "stored_data", "=", "{", "}", "for", "d", "in", "network_attr", ":", "if", "d", "[", "'n'", "]", "in", "keep_keys", ":", "stored_data", "[", "d", "[", "'n'", "]", "]", "=", "d", "[", "'v'", "]", "return", "stored_data" ]
Download model and associated pieces from NDEX
[ "Download", "model", "and", "associated", "pieces", "from", "NDEX" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/rest_api/api.py#L384-L401
train
sorgerlab/indra
rest_api/api.py
assemble_graph
def assemble_graph(): """Assemble INDRA Statements and return Graphviz graph dot string.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) stmts_json = body.get('statements') stmts = stmts_from_json(stmts_json) ga = GraphAssembler(stmts) model_str = ga.make_model() res = {'model': model_str} return res
python
def assemble_graph(): """Assemble INDRA Statements and return Graphviz graph dot string.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) stmts_json = body.get('statements') stmts = stmts_from_json(stmts_json) ga = GraphAssembler(stmts) model_str = ga.make_model() res = {'model': model_str} return res
[ "def", "assemble_graph", "(", ")", ":", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "return", "{", "}", "response", "=", "request", ".", "body", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "body", "=", "json", ".", "loads", "(", "response", ")", "stmts_json", "=", "body", ".", "get", "(", "'statements'", ")", "stmts", "=", "stmts_from_json", "(", "stmts_json", ")", "ga", "=", "GraphAssembler", "(", "stmts", ")", "model_str", "=", "ga", ".", "make_model", "(", ")", "res", "=", "{", "'model'", ":", "model_str", "}", "return", "res" ]
Assemble INDRA Statements and return Graphviz graph dot string.
[ "Assemble", "INDRA", "Statements", "and", "return", "Graphviz", "graph", "dot", "string", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/rest_api/api.py#L407-L418
train
sorgerlab/indra
rest_api/api.py
assemble_cyjs
def assemble_cyjs(): """Assemble INDRA Statements and return Cytoscape JS network.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) stmts_json = body.get('statements') stmts = stmts_from_json(stmts_json) cja = CyJSAssembler() cja.add_statements(stmts) cja.make_model(grouping=True) model_str = cja.print_cyjs_graph() return model_str
python
def assemble_cyjs(): """Assemble INDRA Statements and return Cytoscape JS network.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) stmts_json = body.get('statements') stmts = stmts_from_json(stmts_json) cja = CyJSAssembler() cja.add_statements(stmts) cja.make_model(grouping=True) model_str = cja.print_cyjs_graph() return model_str
[ "def", "assemble_cyjs", "(", ")", ":", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "return", "{", "}", "response", "=", "request", ".", "body", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "body", "=", "json", ".", "loads", "(", "response", ")", "stmts_json", "=", "body", ".", "get", "(", "'statements'", ")", "stmts", "=", "stmts_from_json", "(", "stmts_json", ")", "cja", "=", "CyJSAssembler", "(", ")", "cja", ".", "add_statements", "(", "stmts", ")", "cja", ".", "make_model", "(", "grouping", "=", "True", ")", "model_str", "=", "cja", ".", "print_cyjs_graph", "(", ")", "return", "model_str" ]
Assemble INDRA Statements and return Cytoscape JS network.
[ "Assemble", "INDRA", "Statements", "and", "return", "Cytoscape", "JS", "network", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/rest_api/api.py#L424-L436
train
sorgerlab/indra
rest_api/api.py
assemble_english
def assemble_english(): """Assemble each statement into """ if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) stmts_json = body.get('statements') stmts = stmts_from_json(stmts_json) sentences = {} for st in stmts: enga = EnglishAssembler() enga.add_statements([st]) model_str = enga.make_model() sentences[st.uuid] = model_str res = {'sentences': sentences} return res
python
def assemble_english(): """Assemble each statement into """ if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) stmts_json = body.get('statements') stmts = stmts_from_json(stmts_json) sentences = {} for st in stmts: enga = EnglishAssembler() enga.add_statements([st]) model_str = enga.make_model() sentences[st.uuid] = model_str res = {'sentences': sentences} return res
[ "def", "assemble_english", "(", ")", ":", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "return", "{", "}", "response", "=", "request", ".", "body", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "body", "=", "json", ".", "loads", "(", "response", ")", "stmts_json", "=", "body", ".", "get", "(", "'statements'", ")", "stmts", "=", "stmts_from_json", "(", "stmts_json", ")", "sentences", "=", "{", "}", "for", "st", "in", "stmts", ":", "enga", "=", "EnglishAssembler", "(", ")", "enga", ".", "add_statements", "(", "[", "st", "]", ")", "model_str", "=", "enga", ".", "make_model", "(", ")", "sentences", "[", "st", ".", "uuid", "]", "=", "model_str", "res", "=", "{", "'sentences'", ":", "sentences", "}", "return", "res" ]
Assemble each statement into
[ "Assemble", "each", "statement", "into" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/rest_api/api.py#L442-L457
train
sorgerlab/indra
rest_api/api.py
assemble_loopy
def assemble_loopy(): """Assemble INDRA Statements into a Loopy model using SIF Assembler.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) stmts_json = body.get('statements') stmts = stmts_from_json(stmts_json) sa = SifAssembler(stmts) sa.make_model(use_name_as_key=True) model_str = sa.print_loopy(as_url=True) res = {'loopy_url': model_str} return res
python
def assemble_loopy(): """Assemble INDRA Statements into a Loopy model using SIF Assembler.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) stmts_json = body.get('statements') stmts = stmts_from_json(stmts_json) sa = SifAssembler(stmts) sa.make_model(use_name_as_key=True) model_str = sa.print_loopy(as_url=True) res = {'loopy_url': model_str} return res
[ "def", "assemble_loopy", "(", ")", ":", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "return", "{", "}", "response", "=", "request", ".", "body", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "body", "=", "json", ".", "loads", "(", "response", ")", "stmts_json", "=", "body", ".", "get", "(", "'statements'", ")", "stmts", "=", "stmts_from_json", "(", "stmts_json", ")", "sa", "=", "SifAssembler", "(", "stmts", ")", "sa", ".", "make_model", "(", "use_name_as_key", "=", "True", ")", "model_str", "=", "sa", ".", "print_loopy", "(", "as_url", "=", "True", ")", "res", "=", "{", "'loopy_url'", ":", "model_str", "}", "return", "res" ]
Assemble INDRA Statements into a Loopy model using SIF Assembler.
[ "Assemble", "INDRA", "Statements", "into", "a", "Loopy", "model", "using", "SIF", "Assembler", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/rest_api/api.py#L462-L474
train
sorgerlab/indra
rest_api/api.py
get_ccle_mrna_levels
def get_ccle_mrna_levels(): """Get CCLE mRNA amounts using cBioClient""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) gene_list = body.get('gene_list') cell_lines = body.get('cell_lines') mrna_amounts = cbio_client.get_ccle_mrna(gene_list, cell_lines) res = {'mrna_amounts': mrna_amounts} return res
python
def get_ccle_mrna_levels(): """Get CCLE mRNA amounts using cBioClient""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) gene_list = body.get('gene_list') cell_lines = body.get('cell_lines') mrna_amounts = cbio_client.get_ccle_mrna(gene_list, cell_lines) res = {'mrna_amounts': mrna_amounts} return res
[ "def", "get_ccle_mrna_levels", "(", ")", ":", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "return", "{", "}", "response", "=", "request", ".", "body", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "body", "=", "json", ".", "loads", "(", "response", ")", "gene_list", "=", "body", ".", "get", "(", "'gene_list'", ")", "cell_lines", "=", "body", ".", "get", "(", "'cell_lines'", ")", "mrna_amounts", "=", "cbio_client", ".", "get_ccle_mrna", "(", "gene_list", ",", "cell_lines", ")", "res", "=", "{", "'mrna_amounts'", ":", "mrna_amounts", "}", "return", "res" ]
Get CCLE mRNA amounts using cBioClient
[ "Get", "CCLE", "mRNA", "amounts", "using", "cBioClient" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/rest_api/api.py#L480-L490
train
sorgerlab/indra
rest_api/api.py
get_ccle_mutations
def get_ccle_mutations(): """Get CCLE mutations returns the amino acid changes for a given list of genes and cell lines """ if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) gene_list = body.get('gene_list') cell_lines = body.get('cell_lines') mutations = cbio_client.get_ccle_mutations(gene_list, cell_lines) res = {'mutations': mutations} return res
python
def get_ccle_mutations(): """Get CCLE mutations returns the amino acid changes for a given list of genes and cell lines """ if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) gene_list = body.get('gene_list') cell_lines = body.get('cell_lines') mutations = cbio_client.get_ccle_mutations(gene_list, cell_lines) res = {'mutations': mutations} return res
[ "def", "get_ccle_mutations", "(", ")", ":", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "return", "{", "}", "response", "=", "request", ".", "body", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "body", "=", "json", ".", "loads", "(", "response", ")", "gene_list", "=", "body", ".", "get", "(", "'gene_list'", ")", "cell_lines", "=", "body", ".", "get", "(", "'cell_lines'", ")", "mutations", "=", "cbio_client", ".", "get_ccle_mutations", "(", "gene_list", ",", "cell_lines", ")", "res", "=", "{", "'mutations'", ":", "mutations", "}", "return", "res" ]
Get CCLE mutations returns the amino acid changes for a given list of genes and cell lines
[ "Get", "CCLE", "mutations", "returns", "the", "amino", "acid", "changes", "for", "a", "given", "list", "of", "genes", "and", "cell", "lines" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/rest_api/api.py#L517-L529
train
sorgerlab/indra
rest_api/api.py
map_grounding
def map_grounding(): """Map grounding on a list of INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) stmts_json = body.get('statements') stmts = stmts_from_json(stmts_json) stmts_out = ac.map_grounding(stmts) return _return_stmts(stmts_out)
python
def map_grounding(): """Map grounding on a list of INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) stmts_json = body.get('statements') stmts = stmts_from_json(stmts_json) stmts_out = ac.map_grounding(stmts) return _return_stmts(stmts_out)
[ "def", "map_grounding", "(", ")", ":", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "return", "{", "}", "response", "=", "request", ".", "body", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "body", "=", "json", ".", "loads", "(", "response", ")", "stmts_json", "=", "body", ".", "get", "(", "'statements'", ")", "stmts", "=", "stmts_from_json", "(", "stmts_json", ")", "stmts_out", "=", "ac", ".", "map_grounding", "(", "stmts", ")", "return", "_return_stmts", "(", "stmts_out", ")" ]
Map grounding on a list of INDRA Statements.
[ "Map", "grounding", "on", "a", "list", "of", "INDRA", "Statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/rest_api/api.py#L534-L543
train
sorgerlab/indra
rest_api/api.py
run_preassembly
def run_preassembly(): """Run preassembly on a list of INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) stmts_json = body.get('statements') stmts = stmts_from_json(stmts_json) scorer = body.get('scorer') return_toplevel = body.get('return_toplevel') if scorer == 'wm': belief_scorer = get_eidos_scorer() else: belief_scorer = None stmts_out = ac.run_preassembly(stmts, belief_scorer=belief_scorer, return_toplevel=return_toplevel) return _return_stmts(stmts_out)
python
def run_preassembly(): """Run preassembly on a list of INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) stmts_json = body.get('statements') stmts = stmts_from_json(stmts_json) scorer = body.get('scorer') return_toplevel = body.get('return_toplevel') if scorer == 'wm': belief_scorer = get_eidos_scorer() else: belief_scorer = None stmts_out = ac.run_preassembly(stmts, belief_scorer=belief_scorer, return_toplevel=return_toplevel) return _return_stmts(stmts_out)
[ "def", "run_preassembly", "(", ")", ":", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "return", "{", "}", "response", "=", "request", ".", "body", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "body", "=", "json", ".", "loads", "(", "response", ")", "stmts_json", "=", "body", ".", "get", "(", "'statements'", ")", "stmts", "=", "stmts_from_json", "(", "stmts_json", ")", "scorer", "=", "body", ".", "get", "(", "'scorer'", ")", "return_toplevel", "=", "body", ".", "get", "(", "'return_toplevel'", ")", "if", "scorer", "==", "'wm'", ":", "belief_scorer", "=", "get_eidos_scorer", "(", ")", "else", ":", "belief_scorer", "=", "None", "stmts_out", "=", "ac", ".", "run_preassembly", "(", "stmts", ",", "belief_scorer", "=", "belief_scorer", ",", "return_toplevel", "=", "return_toplevel", ")", "return", "_return_stmts", "(", "stmts_out", ")" ]
Run preassembly on a list of INDRA Statements.
[ "Run", "preassembly", "on", "a", "list", "of", "INDRA", "Statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/rest_api/api.py#L562-L578
train
sorgerlab/indra
rest_api/api.py
map_ontologies
def map_ontologies(): """Run ontology mapping on a list of INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) stmts_json = body.get('statements') stmts = stmts_from_json(stmts_json) om = OntologyMapper(stmts, wm_ontomap, scored=True, symmetric=False) om.map_statements() return _return_stmts(stmts)
python
def map_ontologies(): """Run ontology mapping on a list of INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) stmts_json = body.get('statements') stmts = stmts_from_json(stmts_json) om = OntologyMapper(stmts, wm_ontomap, scored=True, symmetric=False) om.map_statements() return _return_stmts(stmts)
[ "def", "map_ontologies", "(", ")", ":", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "return", "{", "}", "response", "=", "request", ".", "body", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "body", "=", "json", ".", "loads", "(", "response", ")", "stmts_json", "=", "body", ".", "get", "(", "'statements'", ")", "stmts", "=", "stmts_from_json", "(", "stmts_json", ")", "om", "=", "OntologyMapper", "(", "stmts", ",", "wm_ontomap", ",", "scored", "=", "True", ",", "symmetric", "=", "False", ")", "om", ".", "map_statements", "(", ")", "return", "_return_stmts", "(", "stmts", ")" ]
Run ontology mapping on a list of INDRA Statements.
[ "Run", "ontology", "mapping", "on", "a", "list", "of", "INDRA", "Statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/rest_api/api.py#L583-L593
train
sorgerlab/indra
rest_api/api.py
filter_by_type
def filter_by_type(): """Filter to a given INDRA Statement type.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) stmts_json = body.get('statements') stmt_type_str = body.get('type') stmt_type_str = stmt_type_str.capitalize() stmt_type = getattr(sys.modules[__name__], stmt_type_str) stmts = stmts_from_json(stmts_json) stmts_out = ac.filter_by_type(stmts, stmt_type) return _return_stmts(stmts_out)
python
def filter_by_type(): """Filter to a given INDRA Statement type.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) stmts_json = body.get('statements') stmt_type_str = body.get('type') stmt_type_str = stmt_type_str.capitalize() stmt_type = getattr(sys.modules[__name__], stmt_type_str) stmts = stmts_from_json(stmts_json) stmts_out = ac.filter_by_type(stmts, stmt_type) return _return_stmts(stmts_out)
[ "def", "filter_by_type", "(", ")", ":", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "return", "{", "}", "response", "=", "request", ".", "body", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "body", "=", "json", ".", "loads", "(", "response", ")", "stmts_json", "=", "body", ".", "get", "(", "'statements'", ")", "stmt_type_str", "=", "body", ".", "get", "(", "'type'", ")", "stmt_type_str", "=", "stmt_type_str", ".", "capitalize", "(", ")", "stmt_type", "=", "getattr", "(", "sys", ".", "modules", "[", "__name__", "]", ",", "stmt_type_str", ")", "stmts", "=", "stmts_from_json", "(", "stmts_json", ")", "stmts_out", "=", "ac", ".", "filter_by_type", "(", "stmts", ",", "stmt_type", ")", "return", "_return_stmts", "(", "stmts_out", ")" ]
Filter to a given INDRA Statement type.
[ "Filter", "to", "a", "given", "INDRA", "Statement", "type", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/rest_api/api.py#L598-L610
train
sorgerlab/indra
rest_api/api.py
filter_grounded_only
def filter_grounded_only(): """Filter to grounded Statements only.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) stmts_json = body.get('statements') score_threshold = body.get('score_threshold') if score_threshold is not None: score_threshold = float(score_threshold) stmts = stmts_from_json(stmts_json) stmts_out = ac.filter_grounded_only(stmts, score_threshold=score_threshold) return _return_stmts(stmts_out)
python
def filter_grounded_only(): """Filter to grounded Statements only.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) stmts_json = body.get('statements') score_threshold = body.get('score_threshold') if score_threshold is not None: score_threshold = float(score_threshold) stmts = stmts_from_json(stmts_json) stmts_out = ac.filter_grounded_only(stmts, score_threshold=score_threshold) return _return_stmts(stmts_out)
[ "def", "filter_grounded_only", "(", ")", ":", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "return", "{", "}", "response", "=", "request", ".", "body", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "body", "=", "json", ".", "loads", "(", "response", ")", "stmts_json", "=", "body", ".", "get", "(", "'statements'", ")", "score_threshold", "=", "body", ".", "get", "(", "'score_threshold'", ")", "if", "score_threshold", "is", "not", "None", ":", "score_threshold", "=", "float", "(", "score_threshold", ")", "stmts", "=", "stmts_from_json", "(", "stmts_json", ")", "stmts_out", "=", "ac", ".", "filter_grounded_only", "(", "stmts", ",", "score_threshold", "=", "score_threshold", ")", "return", "_return_stmts", "(", "stmts_out", ")" ]
Filter to grounded Statements only.
[ "Filter", "to", "grounded", "Statements", "only", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/rest_api/api.py#L615-L627
train
sorgerlab/indra
rest_api/api.py
filter_belief
def filter_belief(): """Filter to beliefs above a given threshold.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) stmts_json = body.get('statements') belief_cutoff = body.get('belief_cutoff') if belief_cutoff is not None: belief_cutoff = float(belief_cutoff) stmts = stmts_from_json(stmts_json) stmts_out = ac.filter_belief(stmts, belief_cutoff) return _return_stmts(stmts_out)
python
def filter_belief(): """Filter to beliefs above a given threshold.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) stmts_json = body.get('statements') belief_cutoff = body.get('belief_cutoff') if belief_cutoff is not None: belief_cutoff = float(belief_cutoff) stmts = stmts_from_json(stmts_json) stmts_out = ac.filter_belief(stmts, belief_cutoff) return _return_stmts(stmts_out)
[ "def", "filter_belief", "(", ")", ":", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "return", "{", "}", "response", "=", "request", ".", "body", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "body", "=", "json", ".", "loads", "(", "response", ")", "stmts_json", "=", "body", ".", "get", "(", "'statements'", ")", "belief_cutoff", "=", "body", ".", "get", "(", "'belief_cutoff'", ")", "if", "belief_cutoff", "is", "not", "None", ":", "belief_cutoff", "=", "float", "(", "belief_cutoff", ")", "stmts", "=", "stmts_from_json", "(", "stmts_json", ")", "stmts_out", "=", "ac", ".", "filter_belief", "(", "stmts", ",", "belief_cutoff", ")", "return", "_return_stmts", "(", "stmts_out", ")" ]
Filter to beliefs above a given threshold.
[ "Filter", "to", "beliefs", "above", "a", "given", "threshold", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/rest_api/api.py#L632-L644
train
sorgerlab/indra
indra/util/get_version.py
get_git_info
def get_git_info(): """Get a dict with useful git info.""" start_dir = abspath(curdir) try: chdir(dirname(abspath(__file__))) re_patt_str = (r'commit\s+(?P<commit_hash>\w+).*?Author:\s+' r'(?P<author_name>.*?)\s+<(?P<author_email>.*?)>\s+Date:\s+' r'(?P<date>.*?)\n\s+(?P<commit_msg>.*?)(?:\ndiff.*?)?$') show_out = check_output(['git', 'show']).decode('ascii') revp_out = check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']) revp_out = revp_out.decode('ascii').strip() m = re.search(re_patt_str, show_out, re.DOTALL) assert m is not None, \ "Regex pattern:\n\n\"%s\"\n\n failed to match string:\n\n\"%s\"" \ % (re_patt_str, show_out) ret_dict = m.groupdict() ret_dict['branch_name'] = revp_out finally: chdir(start_dir) return ret_dict
python
def get_git_info(): """Get a dict with useful git info.""" start_dir = abspath(curdir) try: chdir(dirname(abspath(__file__))) re_patt_str = (r'commit\s+(?P<commit_hash>\w+).*?Author:\s+' r'(?P<author_name>.*?)\s+<(?P<author_email>.*?)>\s+Date:\s+' r'(?P<date>.*?)\n\s+(?P<commit_msg>.*?)(?:\ndiff.*?)?$') show_out = check_output(['git', 'show']).decode('ascii') revp_out = check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']) revp_out = revp_out.decode('ascii').strip() m = re.search(re_patt_str, show_out, re.DOTALL) assert m is not None, \ "Regex pattern:\n\n\"%s\"\n\n failed to match string:\n\n\"%s\"" \ % (re_patt_str, show_out) ret_dict = m.groupdict() ret_dict['branch_name'] = revp_out finally: chdir(start_dir) return ret_dict
[ "def", "get_git_info", "(", ")", ":", "start_dir", "=", "abspath", "(", "curdir", ")", "try", ":", "chdir", "(", "dirname", "(", "abspath", "(", "__file__", ")", ")", ")", "re_patt_str", "=", "(", "r'commit\\s+(?P<commit_hash>\\w+).*?Author:\\s+'", "r'(?P<author_name>.*?)\\s+<(?P<author_email>.*?)>\\s+Date:\\s+'", "r'(?P<date>.*?)\\n\\s+(?P<commit_msg>.*?)(?:\\ndiff.*?)?$'", ")", "show_out", "=", "check_output", "(", "[", "'git'", ",", "'show'", "]", ")", ".", "decode", "(", "'ascii'", ")", "revp_out", "=", "check_output", "(", "[", "'git'", ",", "'rev-parse'", ",", "'--abbrev-ref'", ",", "'HEAD'", "]", ")", "revp_out", "=", "revp_out", ".", "decode", "(", "'ascii'", ")", ".", "strip", "(", ")", "m", "=", "re", ".", "search", "(", "re_patt_str", ",", "show_out", ",", "re", ".", "DOTALL", ")", "assert", "m", "is", "not", "None", ",", "\"Regex pattern:\\n\\n\\\"%s\\\"\\n\\n failed to match string:\\n\\n\\\"%s\\\"\"", "%", "(", "re_patt_str", ",", "show_out", ")", "ret_dict", "=", "m", ".", "groupdict", "(", ")", "ret_dict", "[", "'branch_name'", "]", "=", "revp_out", "finally", ":", "chdir", "(", "start_dir", ")", "return", "ret_dict" ]
Get a dict with useful git info.
[ "Get", "a", "dict", "with", "useful", "git", "info", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/util/get_version.py#L18-L37
train
sorgerlab/indra
indra/util/get_version.py
get_version
def get_version(with_git_hash=True, refresh_hash=False): """Get an indra version string, including a git hash.""" version = __version__ if with_git_hash: global INDRA_GITHASH if INDRA_GITHASH is None or refresh_hash: with open(devnull, 'w') as nul: try: ret = check_output(['git', 'rev-parse', 'HEAD'], cwd=dirname(__file__), stderr=nul) except CalledProcessError: ret = 'UNHASHED' INDRA_GITHASH = ret.strip().decode('utf-8') version = '%s-%s' % (version, INDRA_GITHASH) return version
python
def get_version(with_git_hash=True, refresh_hash=False): """Get an indra version string, including a git hash.""" version = __version__ if with_git_hash: global INDRA_GITHASH if INDRA_GITHASH is None or refresh_hash: with open(devnull, 'w') as nul: try: ret = check_output(['git', 'rev-parse', 'HEAD'], cwd=dirname(__file__), stderr=nul) except CalledProcessError: ret = 'UNHASHED' INDRA_GITHASH = ret.strip().decode('utf-8') version = '%s-%s' % (version, INDRA_GITHASH) return version
[ "def", "get_version", "(", "with_git_hash", "=", "True", ",", "refresh_hash", "=", "False", ")", ":", "version", "=", "__version__", "if", "with_git_hash", ":", "global", "INDRA_GITHASH", "if", "INDRA_GITHASH", "is", "None", "or", "refresh_hash", ":", "with", "open", "(", "devnull", ",", "'w'", ")", "as", "nul", ":", "try", ":", "ret", "=", "check_output", "(", "[", "'git'", ",", "'rev-parse'", ",", "'HEAD'", "]", ",", "cwd", "=", "dirname", "(", "__file__", ")", ",", "stderr", "=", "nul", ")", "except", "CalledProcessError", ":", "ret", "=", "'UNHASHED'", "INDRA_GITHASH", "=", "ret", ".", "strip", "(", ")", ".", "decode", "(", "'utf-8'", ")", "version", "=", "'%s-%s'", "%", "(", "version", ",", "INDRA_GITHASH", ")", "return", "version" ]
Get an indra version string, including a git hash.
[ "Get", "an", "indra", "version", "string", "including", "a", "git", "hash", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/util/get_version.py#L40-L54
train
sorgerlab/indra
indra/assemblers/cx/assembler.py
_fix_evidence_text
def _fix_evidence_text(txt): """Eliminate some symbols to have cleaner supporting text.""" txt = re.sub('[ ]?\( xref \)', '', txt) # This is to make [ xref ] become [] to match the two readers txt = re.sub('\[ xref \]', '[]', txt) txt = re.sub('[\(]?XREF_BIBR[\)]?[,]?', '', txt) txt = re.sub('[\(]?XREF_FIG[\)]?[,]?', '', txt) txt = re.sub('[\(]?XREF_SUPPLEMENT[\)]?[,]?', '', txt) txt = txt.strip() return txt
python
def _fix_evidence_text(txt): """Eliminate some symbols to have cleaner supporting text.""" txt = re.sub('[ ]?\( xref \)', '', txt) # This is to make [ xref ] become [] to match the two readers txt = re.sub('\[ xref \]', '[]', txt) txt = re.sub('[\(]?XREF_BIBR[\)]?[,]?', '', txt) txt = re.sub('[\(]?XREF_FIG[\)]?[,]?', '', txt) txt = re.sub('[\(]?XREF_SUPPLEMENT[\)]?[,]?', '', txt) txt = txt.strip() return txt
[ "def", "_fix_evidence_text", "(", "txt", ")", ":", "txt", "=", "re", ".", "sub", "(", "'[ ]?\\( xref \\)'", ",", "''", ",", "txt", ")", "# This is to make [ xref ] become [] to match the two readers", "txt", "=", "re", ".", "sub", "(", "'\\[ xref \\]'", ",", "'[]'", ",", "txt", ")", "txt", "=", "re", ".", "sub", "(", "'[\\(]?XREF_BIBR[\\)]?[,]?'", ",", "''", ",", "txt", ")", "txt", "=", "re", ".", "sub", "(", "'[\\(]?XREF_FIG[\\)]?[,]?'", ",", "''", ",", "txt", ")", "txt", "=", "re", ".", "sub", "(", "'[\\(]?XREF_SUPPLEMENT[\\)]?[,]?'", ",", "''", ",", "txt", ")", "txt", "=", "txt", ".", "strip", "(", ")", "return", "txt" ]
Eliminate some symbols to have cleaner supporting text.
[ "Eliminate", "some", "symbols", "to", "have", "cleaner", "supporting", "text", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cx/assembler.py#L562-L571
train
sorgerlab/indra
indra/assemblers/cx/assembler.py
CxAssembler.make_model
def make_model(self, add_indra_json=True): """Assemble the CX network from the collected INDRA Statements. This method assembles a CX network from the set of INDRA Statements. The assembled network is set as the assembler's cx argument. Parameters ---------- add_indra_json : Optional[bool] If True, the INDRA Statement JSON annotation is added to each edge in the network. Default: True Returns ------- cx_str : str The json serialized CX model. """ self.add_indra_json = add_indra_json for stmt in self.statements: if isinstance(stmt, Modification): self._add_modification(stmt) if isinstance(stmt, SelfModification): self._add_self_modification(stmt) elif isinstance(stmt, RegulateActivity) or \ isinstance(stmt, RegulateAmount): self._add_regulation(stmt) elif isinstance(stmt, Complex): self._add_complex(stmt) elif isinstance(stmt, Gef): self._add_gef(stmt) elif isinstance(stmt, Gap): self._add_gap(stmt) elif isinstance(stmt, Influence): self._add_influence(stmt) network_description = '' self.cx['networkAttributes'].append({'n': 'name', 'v': self.network_name}) self.cx['networkAttributes'].append({'n': 'description', 'v': network_description}) cx_str = self.print_cx() return cx_str
python
def make_model(self, add_indra_json=True): """Assemble the CX network from the collected INDRA Statements. This method assembles a CX network from the set of INDRA Statements. The assembled network is set as the assembler's cx argument. Parameters ---------- add_indra_json : Optional[bool] If True, the INDRA Statement JSON annotation is added to each edge in the network. Default: True Returns ------- cx_str : str The json serialized CX model. """ self.add_indra_json = add_indra_json for stmt in self.statements: if isinstance(stmt, Modification): self._add_modification(stmt) if isinstance(stmt, SelfModification): self._add_self_modification(stmt) elif isinstance(stmt, RegulateActivity) or \ isinstance(stmt, RegulateAmount): self._add_regulation(stmt) elif isinstance(stmt, Complex): self._add_complex(stmt) elif isinstance(stmt, Gef): self._add_gef(stmt) elif isinstance(stmt, Gap): self._add_gap(stmt) elif isinstance(stmt, Influence): self._add_influence(stmt) network_description = '' self.cx['networkAttributes'].append({'n': 'name', 'v': self.network_name}) self.cx['networkAttributes'].append({'n': 'description', 'v': network_description}) cx_str = self.print_cx() return cx_str
[ "def", "make_model", "(", "self", ",", "add_indra_json", "=", "True", ")", ":", "self", ".", "add_indra_json", "=", "add_indra_json", "for", "stmt", "in", "self", ".", "statements", ":", "if", "isinstance", "(", "stmt", ",", "Modification", ")", ":", "self", ".", "_add_modification", "(", "stmt", ")", "if", "isinstance", "(", "stmt", ",", "SelfModification", ")", ":", "self", ".", "_add_self_modification", "(", "stmt", ")", "elif", "isinstance", "(", "stmt", ",", "RegulateActivity", ")", "or", "isinstance", "(", "stmt", ",", "RegulateAmount", ")", ":", "self", ".", "_add_regulation", "(", "stmt", ")", "elif", "isinstance", "(", "stmt", ",", "Complex", ")", ":", "self", ".", "_add_complex", "(", "stmt", ")", "elif", "isinstance", "(", "stmt", ",", "Gef", ")", ":", "self", ".", "_add_gef", "(", "stmt", ")", "elif", "isinstance", "(", "stmt", ",", "Gap", ")", ":", "self", ".", "_add_gap", "(", "stmt", ")", "elif", "isinstance", "(", "stmt", ",", "Influence", ")", ":", "self", ".", "_add_influence", "(", "stmt", ")", "network_description", "=", "''", "self", ".", "cx", "[", "'networkAttributes'", "]", ".", "append", "(", "{", "'n'", ":", "'name'", ",", "'v'", ":", "self", ".", "network_name", "}", ")", "self", ".", "cx", "[", "'networkAttributes'", "]", ".", "append", "(", "{", "'n'", ":", "'description'", ",", "'v'", ":", "network_description", "}", ")", "cx_str", "=", "self", ".", "print_cx", "(", ")", "return", "cx_str" ]
Assemble the CX network from the collected INDRA Statements. This method assembles a CX network from the set of INDRA Statements. The assembled network is set as the assembler's cx argument. Parameters ---------- add_indra_json : Optional[bool] If True, the INDRA Statement JSON annotation is added to each edge in the network. Default: True Returns ------- cx_str : str The json serialized CX model.
[ "Assemble", "the", "CX", "network", "from", "the", "collected", "INDRA", "Statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cx/assembler.py#L75-L115
train
sorgerlab/indra
indra/assemblers/cx/assembler.py
CxAssembler.print_cx
def print_cx(self, pretty=True): """Return the assembled CX network as a json string. Parameters ---------- pretty : bool If True, the CX string is formatted with indentation (for human viewing) otherwise no indentation is used. Returns ------- json_str : str A json formatted string representation of the CX network. """ def _get_aspect_metadata(aspect): count = len(self.cx.get(aspect)) if self.cx.get(aspect) else 0 if not count: return None data = {'name': aspect, 'idCounter': self._id_counter, 'consistencyGroup': 1, 'elementCount': count} return data full_cx = OrderedDict() full_cx['numberVerification'] = [{'longNumber': 281474976710655}] aspects = ['nodes', 'edges', 'supports', 'citations', 'edgeAttributes', 'edgeCitations', 'edgeSupports', 'networkAttributes', 'nodeAttributes', 'cartesianLayout'] full_cx['metaData'] = [] for aspect in aspects: metadata = _get_aspect_metadata(aspect) if metadata: full_cx['metaData'].append(metadata) for k, v in self.cx.items(): full_cx[k] = v full_cx['status'] = [{'error': '', 'success': True}] full_cx = [{k: v} for k, v in full_cx.items()] if pretty: json_str = json.dumps(full_cx, indent=2) else: json_str = json.dumps(full_cx) return json_str
python
def print_cx(self, pretty=True): """Return the assembled CX network as a json string. Parameters ---------- pretty : bool If True, the CX string is formatted with indentation (for human viewing) otherwise no indentation is used. Returns ------- json_str : str A json formatted string representation of the CX network. """ def _get_aspect_metadata(aspect): count = len(self.cx.get(aspect)) if self.cx.get(aspect) else 0 if not count: return None data = {'name': aspect, 'idCounter': self._id_counter, 'consistencyGroup': 1, 'elementCount': count} return data full_cx = OrderedDict() full_cx['numberVerification'] = [{'longNumber': 281474976710655}] aspects = ['nodes', 'edges', 'supports', 'citations', 'edgeAttributes', 'edgeCitations', 'edgeSupports', 'networkAttributes', 'nodeAttributes', 'cartesianLayout'] full_cx['metaData'] = [] for aspect in aspects: metadata = _get_aspect_metadata(aspect) if metadata: full_cx['metaData'].append(metadata) for k, v in self.cx.items(): full_cx[k] = v full_cx['status'] = [{'error': '', 'success': True}] full_cx = [{k: v} for k, v in full_cx.items()] if pretty: json_str = json.dumps(full_cx, indent=2) else: json_str = json.dumps(full_cx) return json_str
[ "def", "print_cx", "(", "self", ",", "pretty", "=", "True", ")", ":", "def", "_get_aspect_metadata", "(", "aspect", ")", ":", "count", "=", "len", "(", "self", ".", "cx", ".", "get", "(", "aspect", ")", ")", "if", "self", ".", "cx", ".", "get", "(", "aspect", ")", "else", "0", "if", "not", "count", ":", "return", "None", "data", "=", "{", "'name'", ":", "aspect", ",", "'idCounter'", ":", "self", ".", "_id_counter", ",", "'consistencyGroup'", ":", "1", ",", "'elementCount'", ":", "count", "}", "return", "data", "full_cx", "=", "OrderedDict", "(", ")", "full_cx", "[", "'numberVerification'", "]", "=", "[", "{", "'longNumber'", ":", "281474976710655", "}", "]", "aspects", "=", "[", "'nodes'", ",", "'edges'", ",", "'supports'", ",", "'citations'", ",", "'edgeAttributes'", ",", "'edgeCitations'", ",", "'edgeSupports'", ",", "'networkAttributes'", ",", "'nodeAttributes'", ",", "'cartesianLayout'", "]", "full_cx", "[", "'metaData'", "]", "=", "[", "]", "for", "aspect", "in", "aspects", ":", "metadata", "=", "_get_aspect_metadata", "(", "aspect", ")", "if", "metadata", ":", "full_cx", "[", "'metaData'", "]", ".", "append", "(", "metadata", ")", "for", "k", ",", "v", "in", "self", ".", "cx", ".", "items", "(", ")", ":", "full_cx", "[", "k", "]", "=", "v", "full_cx", "[", "'status'", "]", "=", "[", "{", "'error'", ":", "''", ",", "'success'", ":", "True", "}", "]", "full_cx", "=", "[", "{", "k", ":", "v", "}", "for", "k", ",", "v", "in", "full_cx", ".", "items", "(", ")", "]", "if", "pretty", ":", "json_str", "=", "json", ".", "dumps", "(", "full_cx", ",", "indent", "=", "2", ")", "else", ":", "json_str", "=", "json", ".", "dumps", "(", "full_cx", ")", "return", "json_str" ]
Return the assembled CX network as a json string. Parameters ---------- pretty : bool If True, the CX string is formatted with indentation (for human viewing) otherwise no indentation is used. Returns ------- json_str : str A json formatted string representation of the CX network.
[ "Return", "the", "assembled", "CX", "network", "as", "a", "json", "string", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cx/assembler.py#L117-L158
train
sorgerlab/indra
indra/assemblers/cx/assembler.py
CxAssembler.save_model
def save_model(self, file_name='model.cx'): """Save the assembled CX network in a file. Parameters ---------- file_name : Optional[str] The name of the file to save the CX network to. Default: model.cx """ with open(file_name, 'wt') as fh: cx_str = self.print_cx() fh.write(cx_str)
python
def save_model(self, file_name='model.cx'): """Save the assembled CX network in a file. Parameters ---------- file_name : Optional[str] The name of the file to save the CX network to. Default: model.cx """ with open(file_name, 'wt') as fh: cx_str = self.print_cx() fh.write(cx_str)
[ "def", "save_model", "(", "self", ",", "file_name", "=", "'model.cx'", ")", ":", "with", "open", "(", "file_name", ",", "'wt'", ")", "as", "fh", ":", "cx_str", "=", "self", ".", "print_cx", "(", ")", "fh", ".", "write", "(", "cx_str", ")" ]
Save the assembled CX network in a file. Parameters ---------- file_name : Optional[str] The name of the file to save the CX network to. Default: model.cx
[ "Save", "the", "assembled", "CX", "network", "in", "a", "file", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cx/assembler.py#L160-L170
train
sorgerlab/indra
indra/assemblers/cx/assembler.py
CxAssembler.set_context
def set_context(self, cell_type): """Set protein expression data and mutational status as node attribute This method uses :py:mod:`indra.databases.context_client` to get protein expression levels and mutational status for a given cell type and set a node attribute for proteins accordingly. Parameters ---------- cell_type : str Cell type name for which expression levels are queried. The cell type name follows the CCLE database conventions. Example: LOXIMVI_SKIN, BT20_BREAST """ node_names = [node['n'] for node in self.cx['nodes']] res_expr = context_client.get_protein_expression(node_names, [cell_type]) res_mut = context_client.get_mutations(node_names, [cell_type]) res_expr = res_expr.get(cell_type) res_mut = res_mut.get(cell_type) if not res_expr: msg = 'Could not get protein expression for %s cell type.' % \ cell_type logger.warning(msg) if not res_mut: msg = 'Could not get mutational status for %s cell type.' % \ cell_type logger.warning(msg) if not res_expr and not res_mut: return self.cx['networkAttributes'].append({'n': 'cellular_context', 'v': cell_type}) counter = 0 for node in self.cx['nodes']: amount = res_expr.get(node['n']) mut = res_mut.get(node['n']) if amount is not None: node_attribute = {'po': node['@id'], 'n': 'expression_amount', 'v': int(amount)} self.cx['nodeAttributes'].append(node_attribute) if mut is not None: is_mutated = 1 if mut else 0 node_attribute = {'po': node['@id'], 'n': 'is_mutated', 'v': is_mutated} self.cx['nodeAttributes'].append(node_attribute) if mut is not None or amount is not None: counter += 1 logger.info('Set context for %d nodes.' % counter)
python
def set_context(self, cell_type): """Set protein expression data and mutational status as node attribute This method uses :py:mod:`indra.databases.context_client` to get protein expression levels and mutational status for a given cell type and set a node attribute for proteins accordingly. Parameters ---------- cell_type : str Cell type name for which expression levels are queried. The cell type name follows the CCLE database conventions. Example: LOXIMVI_SKIN, BT20_BREAST """ node_names = [node['n'] for node in self.cx['nodes']] res_expr = context_client.get_protein_expression(node_names, [cell_type]) res_mut = context_client.get_mutations(node_names, [cell_type]) res_expr = res_expr.get(cell_type) res_mut = res_mut.get(cell_type) if not res_expr: msg = 'Could not get protein expression for %s cell type.' % \ cell_type logger.warning(msg) if not res_mut: msg = 'Could not get mutational status for %s cell type.' % \ cell_type logger.warning(msg) if not res_expr and not res_mut: return self.cx['networkAttributes'].append({'n': 'cellular_context', 'v': cell_type}) counter = 0 for node in self.cx['nodes']: amount = res_expr.get(node['n']) mut = res_mut.get(node['n']) if amount is not None: node_attribute = {'po': node['@id'], 'n': 'expression_amount', 'v': int(amount)} self.cx['nodeAttributes'].append(node_attribute) if mut is not None: is_mutated = 1 if mut else 0 node_attribute = {'po': node['@id'], 'n': 'is_mutated', 'v': is_mutated} self.cx['nodeAttributes'].append(node_attribute) if mut is not None or amount is not None: counter += 1 logger.info('Set context for %d nodes.' % counter)
[ "def", "set_context", "(", "self", ",", "cell_type", ")", ":", "node_names", "=", "[", "node", "[", "'n'", "]", "for", "node", "in", "self", ".", "cx", "[", "'nodes'", "]", "]", "res_expr", "=", "context_client", ".", "get_protein_expression", "(", "node_names", ",", "[", "cell_type", "]", ")", "res_mut", "=", "context_client", ".", "get_mutations", "(", "node_names", ",", "[", "cell_type", "]", ")", "res_expr", "=", "res_expr", ".", "get", "(", "cell_type", ")", "res_mut", "=", "res_mut", ".", "get", "(", "cell_type", ")", "if", "not", "res_expr", ":", "msg", "=", "'Could not get protein expression for %s cell type.'", "%", "cell_type", "logger", ".", "warning", "(", "msg", ")", "if", "not", "res_mut", ":", "msg", "=", "'Could not get mutational status for %s cell type.'", "%", "cell_type", "logger", ".", "warning", "(", "msg", ")", "if", "not", "res_expr", "and", "not", "res_mut", ":", "return", "self", ".", "cx", "[", "'networkAttributes'", "]", ".", "append", "(", "{", "'n'", ":", "'cellular_context'", ",", "'v'", ":", "cell_type", "}", ")", "counter", "=", "0", "for", "node", "in", "self", ".", "cx", "[", "'nodes'", "]", ":", "amount", "=", "res_expr", ".", "get", "(", "node", "[", "'n'", "]", ")", "mut", "=", "res_mut", ".", "get", "(", "node", "[", "'n'", "]", ")", "if", "amount", "is", "not", "None", ":", "node_attribute", "=", "{", "'po'", ":", "node", "[", "'@id'", "]", ",", "'n'", ":", "'expression_amount'", ",", "'v'", ":", "int", "(", "amount", ")", "}", "self", ".", "cx", "[", "'nodeAttributes'", "]", ".", "append", "(", "node_attribute", ")", "if", "mut", "is", "not", "None", ":", "is_mutated", "=", "1", "if", "mut", "else", "0", "node_attribute", "=", "{", "'po'", ":", "node", "[", "'@id'", "]", ",", "'n'", ":", "'is_mutated'", ",", "'v'", ":", "is_mutated", "}", "self", ".", "cx", "[", "'nodeAttributes'", "]", ".", "append", "(", "node_attribute", ")", "if", "mut", "is", "not", "None", "or", "amount", "is", "not", "None", ":", "counter", "+=", "1", "logger", ".", "info", "(", "'Set context for %d nodes.'", "%", "counter", ")" ]
Set protein expression data and mutational status as node attribute This method uses :py:mod:`indra.databases.context_client` to get protein expression levels and mutational status for a given cell type and set a node attribute for proteins accordingly. Parameters ---------- cell_type : str Cell type name for which expression levels are queried. The cell type name follows the CCLE database conventions. Example: LOXIMVI_SKIN, BT20_BREAST
[ "Set", "protein", "expression", "data", "and", "mutational", "status", "as", "node", "attribute" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cx/assembler.py#L212-L265
train
sorgerlab/indra
indra/databases/biogrid_client.py
get_publications
def get_publications(gene_names, save_json_name=None): """Return evidence publications for interaction between the given genes. Parameters ---------- gene_names : list[str] A list of gene names (HGNC symbols) to query interactions between. Currently supports exactly two genes only. save_json_name : Optional[str] A file name to save the raw BioGRID web service output in. By default, the raw output is not saved. Return ------ publications : list[Publication] A list of Publication objects that provide evidence for interactions between the given list of genes. """ if len(gene_names) != 2: logger.warning('Other than 2 gene names given.') return [] res_dict = _send_request(gene_names) if not res_dict: return [] if save_json_name is not None: # The json module produces strings, not bytes, so the file should be # opened in text mode with open(save_json_name, 'wt') as fh: json.dump(res_dict, fh, indent=1) publications = _extract_publications(res_dict, gene_names) return publications
python
def get_publications(gene_names, save_json_name=None): """Return evidence publications for interaction between the given genes. Parameters ---------- gene_names : list[str] A list of gene names (HGNC symbols) to query interactions between. Currently supports exactly two genes only. save_json_name : Optional[str] A file name to save the raw BioGRID web service output in. By default, the raw output is not saved. Return ------ publications : list[Publication] A list of Publication objects that provide evidence for interactions between the given list of genes. """ if len(gene_names) != 2: logger.warning('Other than 2 gene names given.') return [] res_dict = _send_request(gene_names) if not res_dict: return [] if save_json_name is not None: # The json module produces strings, not bytes, so the file should be # opened in text mode with open(save_json_name, 'wt') as fh: json.dump(res_dict, fh, indent=1) publications = _extract_publications(res_dict, gene_names) return publications
[ "def", "get_publications", "(", "gene_names", ",", "save_json_name", "=", "None", ")", ":", "if", "len", "(", "gene_names", ")", "!=", "2", ":", "logger", ".", "warning", "(", "'Other than 2 gene names given.'", ")", "return", "[", "]", "res_dict", "=", "_send_request", "(", "gene_names", ")", "if", "not", "res_dict", ":", "return", "[", "]", "if", "save_json_name", "is", "not", "None", ":", "# The json module produces strings, not bytes, so the file should be", "# opened in text mode", "with", "open", "(", "save_json_name", ",", "'wt'", ")", "as", "fh", ":", "json", ".", "dump", "(", "res_dict", ",", "fh", ",", "indent", "=", "1", ")", "publications", "=", "_extract_publications", "(", "res_dict", ",", "gene_names", ")", "return", "publications" ]
Return evidence publications for interaction between the given genes. Parameters ---------- gene_names : list[str] A list of gene names (HGNC symbols) to query interactions between. Currently supports exactly two genes only. save_json_name : Optional[str] A file name to save the raw BioGRID web service output in. By default, the raw output is not saved. Return ------ publications : list[Publication] A list of Publication objects that provide evidence for interactions between the given list of genes.
[ "Return", "evidence", "publications", "for", "interaction", "between", "the", "given", "genes", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/biogrid_client.py#L90-L120
train
sorgerlab/indra
indra/assemblers/pysb/common.py
_n
def _n(name): """Return valid PySB name.""" n = name.encode('ascii', errors='ignore').decode('ascii') n = re.sub('[^A-Za-z0-9_]', '_', n) n = re.sub(r'(^[0-9].*)', r'p\1', n) return n
python
def _n(name): """Return valid PySB name.""" n = name.encode('ascii', errors='ignore').decode('ascii') n = re.sub('[^A-Za-z0-9_]', '_', n) n = re.sub(r'(^[0-9].*)', r'p\1', n) return n
[ "def", "_n", "(", "name", ")", ":", "n", "=", "name", ".", "encode", "(", "'ascii'", ",", "errors", "=", "'ignore'", ")", ".", "decode", "(", "'ascii'", ")", "n", "=", "re", ".", "sub", "(", "'[^A-Za-z0-9_]'", ",", "'_'", ",", "n", ")", "n", "=", "re", ".", "sub", "(", "r'(^[0-9].*)'", ",", "r'p\\1'", ",", "n", ")", "return", "n" ]
Return valid PySB name.
[ "Return", "valid", "PySB", "name", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/common.py#L5-L10
train
sorgerlab/indra
indra/sources/indra_db_rest/processor.py
IndraDBRestProcessor.get_hash_statements_dict
def get_hash_statements_dict(self): """Return a dict of Statements keyed by hashes.""" res = {stmt_hash: stmts_from_json([stmt])[0] for stmt_hash, stmt in self.__statement_jsons.items()} return res
python
def get_hash_statements_dict(self): """Return a dict of Statements keyed by hashes.""" res = {stmt_hash: stmts_from_json([stmt])[0] for stmt_hash, stmt in self.__statement_jsons.items()} return res
[ "def", "get_hash_statements_dict", "(", "self", ")", ":", "res", "=", "{", "stmt_hash", ":", "stmts_from_json", "(", "[", "stmt", "]", ")", "[", "0", "]", "for", "stmt_hash", ",", "stmt", "in", "self", ".", "__statement_jsons", ".", "items", "(", ")", "}", "return", "res" ]
Return a dict of Statements keyed by hashes.
[ "Return", "a", "dict", "of", "Statements", "keyed", "by", "hashes", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/indra_db_rest/processor.py#L159-L163
train
sorgerlab/indra
indra/sources/indra_db_rest/processor.py
IndraDBRestProcessor.merge_results
def merge_results(self, other_processor): """Merge the results of this processor with those of another.""" if not isinstance(other_processor, self.__class__): raise ValueError("Can only extend with another %s instance." % self.__class__.__name__) self.statements.extend(other_processor.statements) if other_processor.statements_sample is not None: if self.statements_sample is None: self.statements_sample = other_processor.statements_sample else: self.statements_sample.extend(other_processor.statements_sample) self._merge_json(other_processor.__statement_jsons, other_processor.__evidence_counts) return
python
def merge_results(self, other_processor): """Merge the results of this processor with those of another.""" if not isinstance(other_processor, self.__class__): raise ValueError("Can only extend with another %s instance." % self.__class__.__name__) self.statements.extend(other_processor.statements) if other_processor.statements_sample is not None: if self.statements_sample is None: self.statements_sample = other_processor.statements_sample else: self.statements_sample.extend(other_processor.statements_sample) self._merge_json(other_processor.__statement_jsons, other_processor.__evidence_counts) return
[ "def", "merge_results", "(", "self", ",", "other_processor", ")", ":", "if", "not", "isinstance", "(", "other_processor", ",", "self", ".", "__class__", ")", ":", "raise", "ValueError", "(", "\"Can only extend with another %s instance.\"", "%", "self", ".", "__class__", ".", "__name__", ")", "self", ".", "statements", ".", "extend", "(", "other_processor", ".", "statements", ")", "if", "other_processor", ".", "statements_sample", "is", "not", "None", ":", "if", "self", ".", "statements_sample", "is", "None", ":", "self", ".", "statements_sample", "=", "other_processor", ".", "statements_sample", "else", ":", "self", ".", "statements_sample", ".", "extend", "(", "other_processor", ".", "statements_sample", ")", "self", ".", "_merge_json", "(", "other_processor", ".", "__statement_jsons", ",", "other_processor", ".", "__evidence_counts", ")", "return" ]
Merge the results of this processor with those of another.
[ "Merge", "the", "results", "of", "this", "processor", "with", "those", "of", "another", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/indra_db_rest/processor.py#L165-L179
train
sorgerlab/indra
indra/sources/indra_db_rest/processor.py
IndraDBRestProcessor.wait_until_done
def wait_until_done(self, timeout=None): """Wait for the background load to complete.""" start = datetime.now() if not self.__th: raise IndraDBRestResponseError("There is no thread waiting to " "complete.") self.__th.join(timeout) now = datetime.now() dt = now - start if self.__th.is_alive(): logger.warning("Timed out after %0.3f seconds waiting for " "statement load to complete." % dt.total_seconds()) ret = False else: logger.info("Waited %0.3f seconds for statements to finish loading." % dt.total_seconds()) ret = True return ret
python
def wait_until_done(self, timeout=None): """Wait for the background load to complete.""" start = datetime.now() if not self.__th: raise IndraDBRestResponseError("There is no thread waiting to " "complete.") self.__th.join(timeout) now = datetime.now() dt = now - start if self.__th.is_alive(): logger.warning("Timed out after %0.3f seconds waiting for " "statement load to complete." % dt.total_seconds()) ret = False else: logger.info("Waited %0.3f seconds for statements to finish loading." % dt.total_seconds()) ret = True return ret
[ "def", "wait_until_done", "(", "self", ",", "timeout", "=", "None", ")", ":", "start", "=", "datetime", ".", "now", "(", ")", "if", "not", "self", ".", "__th", ":", "raise", "IndraDBRestResponseError", "(", "\"There is no thread waiting to \"", "\"complete.\"", ")", "self", ".", "__th", ".", "join", "(", "timeout", ")", "now", "=", "datetime", ".", "now", "(", ")", "dt", "=", "now", "-", "start", "if", "self", ".", "__th", ".", "is_alive", "(", ")", ":", "logger", ".", "warning", "(", "\"Timed out after %0.3f seconds waiting for \"", "\"statement load to complete.\"", "%", "dt", ".", "total_seconds", "(", ")", ")", "ret", "=", "False", "else", ":", "logger", ".", "info", "(", "\"Waited %0.3f seconds for statements to finish loading.\"", "%", "dt", ".", "total_seconds", "(", ")", ")", "ret", "=", "True", "return", "ret" ]
Wait for the background load to complete.
[ "Wait", "for", "the", "background", "load", "to", "complete", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/indra_db_rest/processor.py#L181-L198
train
sorgerlab/indra
indra/sources/indra_db_rest/processor.py
IndraDBRestProcessor._merge_json
def _merge_json(self, stmt_json, ev_counts): """Merge these statement jsons with new jsons.""" # Where there is overlap, there _should_ be agreement. self.__evidence_counts.update(ev_counts) for k, sj in stmt_json.items(): if k not in self.__statement_jsons: self.__statement_jsons[k] = sj # This should be most of them else: # This should only happen rarely. for evj in sj['evidence']: self.__statement_jsons[k]['evidence'].append(evj) if not self.__started: self.statements_sample = stmts_from_json( self.__statement_jsons.values()) self.__started = True return
python
def _merge_json(self, stmt_json, ev_counts): """Merge these statement jsons with new jsons.""" # Where there is overlap, there _should_ be agreement. self.__evidence_counts.update(ev_counts) for k, sj in stmt_json.items(): if k not in self.__statement_jsons: self.__statement_jsons[k] = sj # This should be most of them else: # This should only happen rarely. for evj in sj['evidence']: self.__statement_jsons[k]['evidence'].append(evj) if not self.__started: self.statements_sample = stmts_from_json( self.__statement_jsons.values()) self.__started = True return
[ "def", "_merge_json", "(", "self", ",", "stmt_json", ",", "ev_counts", ")", ":", "# Where there is overlap, there _should_ be agreement.", "self", ".", "__evidence_counts", ".", "update", "(", "ev_counts", ")", "for", "k", ",", "sj", "in", "stmt_json", ".", "items", "(", ")", ":", "if", "k", "not", "in", "self", ".", "__statement_jsons", ":", "self", ".", "__statement_jsons", "[", "k", "]", "=", "sj", "# This should be most of them", "else", ":", "# This should only happen rarely.", "for", "evj", "in", "sj", "[", "'evidence'", "]", ":", "self", ".", "__statement_jsons", "[", "k", "]", "[", "'evidence'", "]", ".", "append", "(", "evj", ")", "if", "not", "self", ".", "__started", ":", "self", ".", "statements_sample", "=", "stmts_from_json", "(", "self", ".", "__statement_jsons", ".", "values", "(", ")", ")", "self", ".", "__started", "=", "True", "return" ]
Merge these statement jsons with new jsons.
[ "Merge", "these", "statement", "jsons", "with", "new", "jsons", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/indra_db_rest/processor.py#L200-L217
train
sorgerlab/indra
indra/sources/indra_db_rest/processor.py
IndraDBRestProcessor._run_queries
def _run_queries(self, agent_strs, stmt_types, params, persist): """Use paging to get all statements requested.""" self._query_over_statement_types(agent_strs, stmt_types, params) assert len(self.__done_dict) == len(stmt_types) \ or None in self.__done_dict.keys(), \ "Done dict was not initiated for all stmt_type's." # Check if we want to keep going. if not persist: self._compile_statements() return # Get the rest of the content. while not self._all_done(): self._query_over_statement_types(agent_strs, stmt_types, params) # Create the actual statements. self._compile_statements() return
python
def _run_queries(self, agent_strs, stmt_types, params, persist): """Use paging to get all statements requested.""" self._query_over_statement_types(agent_strs, stmt_types, params) assert len(self.__done_dict) == len(stmt_types) \ or None in self.__done_dict.keys(), \ "Done dict was not initiated for all stmt_type's." # Check if we want to keep going. if not persist: self._compile_statements() return # Get the rest of the content. while not self._all_done(): self._query_over_statement_types(agent_strs, stmt_types, params) # Create the actual statements. self._compile_statements() return
[ "def", "_run_queries", "(", "self", ",", "agent_strs", ",", "stmt_types", ",", "params", ",", "persist", ")", ":", "self", ".", "_query_over_statement_types", "(", "agent_strs", ",", "stmt_types", ",", "params", ")", "assert", "len", "(", "self", ".", "__done_dict", ")", "==", "len", "(", "stmt_types", ")", "or", "None", "in", "self", ".", "__done_dict", ".", "keys", "(", ")", ",", "\"Done dict was not initiated for all stmt_type's.\"", "# Check if we want to keep going.", "if", "not", "persist", ":", "self", ".", "_compile_statements", "(", ")", "return", "# Get the rest of the content.", "while", "not", "self", ".", "_all_done", "(", ")", ":", "self", ".", "_query_over_statement_types", "(", "agent_strs", ",", "stmt_types", ",", "params", ")", "# Create the actual statements.", "self", ".", "_compile_statements", "(", ")", "return" ]
Use paging to get all statements requested.
[ "Use", "paging", "to", "get", "all", "statements", "requested", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/indra_db_rest/processor.py#L274-L293
train
sorgerlab/indra
indra/literature/pubmed_client.py
get_ids
def get_ids(search_term, **kwargs): """Search Pubmed for paper IDs given a search term. Search options can be passed as keyword arguments, some of which are custom keywords identified by this function, while others are passed on as parameters for the request to the PubMed web service For details on parameters that can be used in PubMed searches, see https://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.ESearch Some useful parameters to pass are db='pmc' to search PMC instead of pubmed reldate=2 to search for papers within the last 2 days mindate='2016/03/01', maxdate='2016/03/31' to search for papers in March 2016. PubMed, by default, limits returned PMIDs to a small number, and this number can be controlled by the "retmax" parameter. This function uses a retmax value of 100,000 by default that can be changed via the corresponding keyword argument. Parameters ---------- search_term : str A term for which the PubMed search should be performed. use_text_word : Optional[bool] If True, the "[tw]" string is appended to the search term to constrain the search to "text words", that is words that appear as whole in relevant parts of the PubMed entry (excl. for instance the journal name or publication date) like the title and abstract. Using this option can eliminate spurious search results such as all articles published in June for a search for the "JUN" gene, or journal names that contain Acad for a search for the "ACAD" gene. See also: https://www.nlm.nih.gov/bsd/disted/pubmedtutorial/020_760.html Default : True kwargs : kwargs Additional keyword arguments to pass to the PubMed search as parameters. """ use_text_word = kwargs.pop('use_text_word', True) if use_text_word: search_term += '[tw]' params = {'term': search_term, 'retmax': 100000, 'retstart': 0, 'db': 'pubmed', 'sort': 'pub+date'} params.update(kwargs) tree = send_request(pubmed_search, params) if tree is None: return [] if tree.find('ERROR') is not None: logger.error(tree.find('ERROR').text) return [] if tree.find('ErrorList') is not None: for err in tree.find('ErrorList').getchildren(): logger.error('Error - %s: %s' % (err.tag, err.text)) return [] count = int(tree.find('Count').text) id_terms = tree.findall('IdList/Id') if id_terms is None: return [] ids = [idt.text for idt in id_terms] if count != len(ids): logger.warning('Not all ids were retrieved for search %s;\n' 'limited at %d.' % (search_term, params['retmax'])) return ids
python
def get_ids(search_term, **kwargs): """Search Pubmed for paper IDs given a search term. Search options can be passed as keyword arguments, some of which are custom keywords identified by this function, while others are passed on as parameters for the request to the PubMed web service For details on parameters that can be used in PubMed searches, see https://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.ESearch Some useful parameters to pass are db='pmc' to search PMC instead of pubmed reldate=2 to search for papers within the last 2 days mindate='2016/03/01', maxdate='2016/03/31' to search for papers in March 2016. PubMed, by default, limits returned PMIDs to a small number, and this number can be controlled by the "retmax" parameter. This function uses a retmax value of 100,000 by default that can be changed via the corresponding keyword argument. Parameters ---------- search_term : str A term for which the PubMed search should be performed. use_text_word : Optional[bool] If True, the "[tw]" string is appended to the search term to constrain the search to "text words", that is words that appear as whole in relevant parts of the PubMed entry (excl. for instance the journal name or publication date) like the title and abstract. Using this option can eliminate spurious search results such as all articles published in June for a search for the "JUN" gene, or journal names that contain Acad for a search for the "ACAD" gene. See also: https://www.nlm.nih.gov/bsd/disted/pubmedtutorial/020_760.html Default : True kwargs : kwargs Additional keyword arguments to pass to the PubMed search as parameters. """ use_text_word = kwargs.pop('use_text_word', True) if use_text_word: search_term += '[tw]' params = {'term': search_term, 'retmax': 100000, 'retstart': 0, 'db': 'pubmed', 'sort': 'pub+date'} params.update(kwargs) tree = send_request(pubmed_search, params) if tree is None: return [] if tree.find('ERROR') is not None: logger.error(tree.find('ERROR').text) return [] if tree.find('ErrorList') is not None: for err in tree.find('ErrorList').getchildren(): logger.error('Error - %s: %s' % (err.tag, err.text)) return [] count = int(tree.find('Count').text) id_terms = tree.findall('IdList/Id') if id_terms is None: return [] ids = [idt.text for idt in id_terms] if count != len(ids): logger.warning('Not all ids were retrieved for search %s;\n' 'limited at %d.' % (search_term, params['retmax'])) return ids
[ "def", "get_ids", "(", "search_term", ",", "*", "*", "kwargs", ")", ":", "use_text_word", "=", "kwargs", ".", "pop", "(", "'use_text_word'", ",", "True", ")", "if", "use_text_word", ":", "search_term", "+=", "'[tw]'", "params", "=", "{", "'term'", ":", "search_term", ",", "'retmax'", ":", "100000", ",", "'retstart'", ":", "0", ",", "'db'", ":", "'pubmed'", ",", "'sort'", ":", "'pub+date'", "}", "params", ".", "update", "(", "kwargs", ")", "tree", "=", "send_request", "(", "pubmed_search", ",", "params", ")", "if", "tree", "is", "None", ":", "return", "[", "]", "if", "tree", ".", "find", "(", "'ERROR'", ")", "is", "not", "None", ":", "logger", ".", "error", "(", "tree", ".", "find", "(", "'ERROR'", ")", ".", "text", ")", "return", "[", "]", "if", "tree", ".", "find", "(", "'ErrorList'", ")", "is", "not", "None", ":", "for", "err", "in", "tree", ".", "find", "(", "'ErrorList'", ")", ".", "getchildren", "(", ")", ":", "logger", ".", "error", "(", "'Error - %s: %s'", "%", "(", "err", ".", "tag", ",", "err", ".", "text", ")", ")", "return", "[", "]", "count", "=", "int", "(", "tree", ".", "find", "(", "'Count'", ")", ".", "text", ")", "id_terms", "=", "tree", ".", "findall", "(", "'IdList/Id'", ")", "if", "id_terms", "is", "None", ":", "return", "[", "]", "ids", "=", "[", "idt", ".", "text", "for", "idt", "in", "id_terms", "]", "if", "count", "!=", "len", "(", "ids", ")", ":", "logger", ".", "warning", "(", "'Not all ids were retrieved for search %s;\\n'", "'limited at %d.'", "%", "(", "search_term", ",", "params", "[", "'retmax'", "]", ")", ")", "return", "ids" ]
Search Pubmed for paper IDs given a search term. Search options can be passed as keyword arguments, some of which are custom keywords identified by this function, while others are passed on as parameters for the request to the PubMed web service For details on parameters that can be used in PubMed searches, see https://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.ESearch Some useful parameters to pass are db='pmc' to search PMC instead of pubmed reldate=2 to search for papers within the last 2 days mindate='2016/03/01', maxdate='2016/03/31' to search for papers in March 2016. PubMed, by default, limits returned PMIDs to a small number, and this number can be controlled by the "retmax" parameter. This function uses a retmax value of 100,000 by default that can be changed via the corresponding keyword argument. Parameters ---------- search_term : str A term for which the PubMed search should be performed. use_text_word : Optional[bool] If True, the "[tw]" string is appended to the search term to constrain the search to "text words", that is words that appear as whole in relevant parts of the PubMed entry (excl. for instance the journal name or publication date) like the title and abstract. Using this option can eliminate spurious search results such as all articles published in June for a search for the "JUN" gene, or journal names that contain Acad for a search for the "ACAD" gene. See also: https://www.nlm.nih.gov/bsd/disted/pubmedtutorial/020_760.html Default : True kwargs : kwargs Additional keyword arguments to pass to the PubMed search as parameters.
[ "Search", "Pubmed", "for", "paper", "IDs", "given", "a", "search", "term", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/pubmed_client.py#L41-L103
train
sorgerlab/indra
indra/literature/pubmed_client.py
get_id_count
def get_id_count(search_term): """Get the number of citations in Pubmed for a search query. Parameters ---------- search_term : str A term for which the PubMed search should be performed. Returns ------- int or None The number of citations for the query, or None if the query fails. """ params = {'term': search_term, 'rettype': 'count', 'db': 'pubmed'} tree = send_request(pubmed_search, params) if tree is None: return None else: count = tree.getchildren()[0].text return int(count)
python
def get_id_count(search_term): """Get the number of citations in Pubmed for a search query. Parameters ---------- search_term : str A term for which the PubMed search should be performed. Returns ------- int or None The number of citations for the query, or None if the query fails. """ params = {'term': search_term, 'rettype': 'count', 'db': 'pubmed'} tree = send_request(pubmed_search, params) if tree is None: return None else: count = tree.getchildren()[0].text return int(count)
[ "def", "get_id_count", "(", "search_term", ")", ":", "params", "=", "{", "'term'", ":", "search_term", ",", "'rettype'", ":", "'count'", ",", "'db'", ":", "'pubmed'", "}", "tree", "=", "send_request", "(", "pubmed_search", ",", "params", ")", "if", "tree", "is", "None", ":", "return", "None", "else", ":", "count", "=", "tree", ".", "getchildren", "(", ")", "[", "0", "]", ".", "text", "return", "int", "(", "count", ")" ]
Get the number of citations in Pubmed for a search query. Parameters ---------- search_term : str A term for which the PubMed search should be performed. Returns ------- int or None The number of citations for the query, or None if the query fails.
[ "Get", "the", "number", "of", "citations", "in", "Pubmed", "for", "a", "search", "query", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/pubmed_client.py#L106-L127
train
sorgerlab/indra
indra/literature/pubmed_client.py
get_ids_for_gene
def get_ids_for_gene(hgnc_name, **kwargs): """Get the curated set of articles for a gene in the Entrez database. Search parameters for the Gene database query can be passed in as keyword arguments. Parameters ---------- hgnc_name : string The HGNC name of the gene. This is used to obtain the HGNC ID (using the hgnc_client module) and in turn used to obtain the Entrez ID associated with the gene. Entrez is then queried for that ID. """ # Get the HGNC ID for the HGNC name hgnc_id = hgnc_client.get_hgnc_id(hgnc_name) if hgnc_id is None: raise ValueError('Invalid HGNC name.') # Get the Entrez ID entrez_id = hgnc_client.get_entrez_id(hgnc_id) if entrez_id is None: raise ValueError('Entrez ID not found in HGNC table.') # Query the Entrez Gene database params = {'db': 'gene', 'retmode': 'xml', 'id': entrez_id} params.update(kwargs) tree = send_request(pubmed_fetch, params) if tree is None: return [] if tree.find('ERROR') is not None: logger.error(tree.find('ERROR').text) return [] # Get all PMIDs from the XML tree id_terms = tree.findall('.//PubMedId') if id_terms is None: return [] # Use a set to remove duplicate IDs ids = list(set([idt.text for idt in id_terms])) return ids
python
def get_ids_for_gene(hgnc_name, **kwargs): """Get the curated set of articles for a gene in the Entrez database. Search parameters for the Gene database query can be passed in as keyword arguments. Parameters ---------- hgnc_name : string The HGNC name of the gene. This is used to obtain the HGNC ID (using the hgnc_client module) and in turn used to obtain the Entrez ID associated with the gene. Entrez is then queried for that ID. """ # Get the HGNC ID for the HGNC name hgnc_id = hgnc_client.get_hgnc_id(hgnc_name) if hgnc_id is None: raise ValueError('Invalid HGNC name.') # Get the Entrez ID entrez_id = hgnc_client.get_entrez_id(hgnc_id) if entrez_id is None: raise ValueError('Entrez ID not found in HGNC table.') # Query the Entrez Gene database params = {'db': 'gene', 'retmode': 'xml', 'id': entrez_id} params.update(kwargs) tree = send_request(pubmed_fetch, params) if tree is None: return [] if tree.find('ERROR') is not None: logger.error(tree.find('ERROR').text) return [] # Get all PMIDs from the XML tree id_terms = tree.findall('.//PubMedId') if id_terms is None: return [] # Use a set to remove duplicate IDs ids = list(set([idt.text for idt in id_terms])) return ids
[ "def", "get_ids_for_gene", "(", "hgnc_name", ",", "*", "*", "kwargs", ")", ":", "# Get the HGNC ID for the HGNC name", "hgnc_id", "=", "hgnc_client", ".", "get_hgnc_id", "(", "hgnc_name", ")", "if", "hgnc_id", "is", "None", ":", "raise", "ValueError", "(", "'Invalid HGNC name.'", ")", "# Get the Entrez ID", "entrez_id", "=", "hgnc_client", ".", "get_entrez_id", "(", "hgnc_id", ")", "if", "entrez_id", "is", "None", ":", "raise", "ValueError", "(", "'Entrez ID not found in HGNC table.'", ")", "# Query the Entrez Gene database", "params", "=", "{", "'db'", ":", "'gene'", ",", "'retmode'", ":", "'xml'", ",", "'id'", ":", "entrez_id", "}", "params", ".", "update", "(", "kwargs", ")", "tree", "=", "send_request", "(", "pubmed_fetch", ",", "params", ")", "if", "tree", "is", "None", ":", "return", "[", "]", "if", "tree", ".", "find", "(", "'ERROR'", ")", "is", "not", "None", ":", "logger", ".", "error", "(", "tree", ".", "find", "(", "'ERROR'", ")", ".", "text", ")", "return", "[", "]", "# Get all PMIDs from the XML tree", "id_terms", "=", "tree", ".", "findall", "(", "'.//PubMedId'", ")", "if", "id_terms", "is", "None", ":", "return", "[", "]", "# Use a set to remove duplicate IDs", "ids", "=", "list", "(", "set", "(", "[", "idt", ".", "text", "for", "idt", "in", "id_terms", "]", ")", ")", "return", "ids" ]
Get the curated set of articles for a gene in the Entrez database. Search parameters for the Gene database query can be passed in as keyword arguments. Parameters ---------- hgnc_name : string The HGNC name of the gene. This is used to obtain the HGNC ID (using the hgnc_client module) and in turn used to obtain the Entrez ID associated with the gene. Entrez is then queried for that ID.
[ "Get", "the", "curated", "set", "of", "articles", "for", "a", "gene", "in", "the", "Entrez", "database", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/pubmed_client.py#L131-L170
train
sorgerlab/indra
indra/literature/pubmed_client.py
get_article_xml
def get_article_xml(pubmed_id): """Get the XML metadata for a single article from the Pubmed database. """ if pubmed_id.upper().startswith('PMID'): pubmed_id = pubmed_id[4:] params = {'db': 'pubmed', 'retmode': 'xml', 'id': pubmed_id} tree = send_request(pubmed_fetch, params) if tree is None: return None article = tree.find('PubmedArticle/MedlineCitation/Article') return article
python
def get_article_xml(pubmed_id): """Get the XML metadata for a single article from the Pubmed database. """ if pubmed_id.upper().startswith('PMID'): pubmed_id = pubmed_id[4:] params = {'db': 'pubmed', 'retmode': 'xml', 'id': pubmed_id} tree = send_request(pubmed_fetch, params) if tree is None: return None article = tree.find('PubmedArticle/MedlineCitation/Article') return article
[ "def", "get_article_xml", "(", "pubmed_id", ")", ":", "if", "pubmed_id", ".", "upper", "(", ")", ".", "startswith", "(", "'PMID'", ")", ":", "pubmed_id", "=", "pubmed_id", "[", "4", ":", "]", "params", "=", "{", "'db'", ":", "'pubmed'", ",", "'retmode'", ":", "'xml'", ",", "'id'", ":", "pubmed_id", "}", "tree", "=", "send_request", "(", "pubmed_fetch", ",", "params", ")", "if", "tree", "is", "None", ":", "return", "None", "article", "=", "tree", ".", "find", "(", "'PubmedArticle/MedlineCitation/Article'", ")", "return", "article" ]
Get the XML metadata for a single article from the Pubmed database.
[ "Get", "the", "XML", "metadata", "for", "a", "single", "article", "from", "the", "Pubmed", "database", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/pubmed_client.py#L174-L186
train
sorgerlab/indra
indra/literature/pubmed_client.py
get_abstract
def get_abstract(pubmed_id, prepend_title=True): """Get the abstract of an article in the Pubmed database.""" article = get_article_xml(pubmed_id) if article is None: return None return _abstract_from_article_element(article, prepend_title)
python
def get_abstract(pubmed_id, prepend_title=True): """Get the abstract of an article in the Pubmed database.""" article = get_article_xml(pubmed_id) if article is None: return None return _abstract_from_article_element(article, prepend_title)
[ "def", "get_abstract", "(", "pubmed_id", ",", "prepend_title", "=", "True", ")", ":", "article", "=", "get_article_xml", "(", "pubmed_id", ")", "if", "article", "is", "None", ":", "return", "None", "return", "_abstract_from_article_element", "(", "article", ",", "prepend_title", ")" ]
Get the abstract of an article in the Pubmed database.
[ "Get", "the", "abstract", "of", "an", "article", "in", "the", "Pubmed", "database", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/pubmed_client.py#L224-L229
train
sorgerlab/indra
indra/literature/pubmed_client.py
get_metadata_from_xml_tree
def get_metadata_from_xml_tree(tree, get_issns_from_nlm=False, get_abstracts=False, prepend_title=False, mesh_annotations=False): """Get metadata for an XML tree containing PubmedArticle elements. Documentation on the XML structure can be found at: - https://www.nlm.nih.gov/bsd/licensee/elements_descriptions.html - https://www.nlm.nih.gov/bsd/licensee/elements_alphabetical.html Parameters ---------- tree : xml.etree.ElementTree ElementTree containing one or more PubmedArticle elements. get_issns_from_nlm : boolean Look up the full list of ISSN number for the journal associated with the article, which helps to match articles to CrossRef search results. Defaults to False, since it slows down performance. get_abstracts : boolean Indicates whether to include the Pubmed abstract in the results. prepend_title : boolean If get_abstracts is True, specifies whether the article title should be prepended to the abstract text. mesh_annotations : boolean If True, extract mesh annotations from the pubmed entries and include in the returned data. If false, don't. Returns ------- dict of dicts Dictionary indexed by PMID. Each value is a dict containing the following fields: 'doi', 'title', 'authors', 'journal_title', 'journal_abbrev', 'journal_nlm_id', 'issn_list', 'page'. """ # Iterate over the articles and build the results dict results = {} pm_articles = tree.findall('./PubmedArticle') for art_ix, pm_article in enumerate(pm_articles): medline_citation = pm_article.find('./MedlineCitation') article_info = _get_article_info(medline_citation, pm_article.find('PubmedData')) journal_info = _get_journal_info(medline_citation, get_issns_from_nlm) context_info = _get_annotations(medline_citation) # Build the result result = {} result.update(article_info) result.update(journal_info) result.update(context_info) # Get the abstracts if requested if get_abstracts: abstract = _abstract_from_article_element( medline_citation.find('Article'), prepend_title=prepend_title ) result['abstract'] = abstract # Add to dict results[article_info['pmid']] = result return results
python
def get_metadata_from_xml_tree(tree, get_issns_from_nlm=False, get_abstracts=False, prepend_title=False, mesh_annotations=False): """Get metadata for an XML tree containing PubmedArticle elements. Documentation on the XML structure can be found at: - https://www.nlm.nih.gov/bsd/licensee/elements_descriptions.html - https://www.nlm.nih.gov/bsd/licensee/elements_alphabetical.html Parameters ---------- tree : xml.etree.ElementTree ElementTree containing one or more PubmedArticle elements. get_issns_from_nlm : boolean Look up the full list of ISSN number for the journal associated with the article, which helps to match articles to CrossRef search results. Defaults to False, since it slows down performance. get_abstracts : boolean Indicates whether to include the Pubmed abstract in the results. prepend_title : boolean If get_abstracts is True, specifies whether the article title should be prepended to the abstract text. mesh_annotations : boolean If True, extract mesh annotations from the pubmed entries and include in the returned data. If false, don't. Returns ------- dict of dicts Dictionary indexed by PMID. Each value is a dict containing the following fields: 'doi', 'title', 'authors', 'journal_title', 'journal_abbrev', 'journal_nlm_id', 'issn_list', 'page'. """ # Iterate over the articles and build the results dict results = {} pm_articles = tree.findall('./PubmedArticle') for art_ix, pm_article in enumerate(pm_articles): medline_citation = pm_article.find('./MedlineCitation') article_info = _get_article_info(medline_citation, pm_article.find('PubmedData')) journal_info = _get_journal_info(medline_citation, get_issns_from_nlm) context_info = _get_annotations(medline_citation) # Build the result result = {} result.update(article_info) result.update(journal_info) result.update(context_info) # Get the abstracts if requested if get_abstracts: abstract = _abstract_from_article_element( medline_citation.find('Article'), prepend_title=prepend_title ) result['abstract'] = abstract # Add to dict results[article_info['pmid']] = result return results
[ "def", "get_metadata_from_xml_tree", "(", "tree", ",", "get_issns_from_nlm", "=", "False", ",", "get_abstracts", "=", "False", ",", "prepend_title", "=", "False", ",", "mesh_annotations", "=", "False", ")", ":", "# Iterate over the articles and build the results dict", "results", "=", "{", "}", "pm_articles", "=", "tree", ".", "findall", "(", "'./PubmedArticle'", ")", "for", "art_ix", ",", "pm_article", "in", "enumerate", "(", "pm_articles", ")", ":", "medline_citation", "=", "pm_article", ".", "find", "(", "'./MedlineCitation'", ")", "article_info", "=", "_get_article_info", "(", "medline_citation", ",", "pm_article", ".", "find", "(", "'PubmedData'", ")", ")", "journal_info", "=", "_get_journal_info", "(", "medline_citation", ",", "get_issns_from_nlm", ")", "context_info", "=", "_get_annotations", "(", "medline_citation", ")", "# Build the result", "result", "=", "{", "}", "result", ".", "update", "(", "article_info", ")", "result", ".", "update", "(", "journal_info", ")", "result", ".", "update", "(", "context_info", ")", "# Get the abstracts if requested", "if", "get_abstracts", ":", "abstract", "=", "_abstract_from_article_element", "(", "medline_citation", ".", "find", "(", "'Article'", ")", ",", "prepend_title", "=", "prepend_title", ")", "result", "[", "'abstract'", "]", "=", "abstract", "# Add to dict", "results", "[", "article_info", "[", "'pmid'", "]", "]", "=", "result", "return", "results" ]
Get metadata for an XML tree containing PubmedArticle elements. Documentation on the XML structure can be found at: - https://www.nlm.nih.gov/bsd/licensee/elements_descriptions.html - https://www.nlm.nih.gov/bsd/licensee/elements_alphabetical.html Parameters ---------- tree : xml.etree.ElementTree ElementTree containing one or more PubmedArticle elements. get_issns_from_nlm : boolean Look up the full list of ISSN number for the journal associated with the article, which helps to match articles to CrossRef search results. Defaults to False, since it slows down performance. get_abstracts : boolean Indicates whether to include the Pubmed abstract in the results. prepend_title : boolean If get_abstracts is True, specifies whether the article title should be prepended to the abstract text. mesh_annotations : boolean If True, extract mesh annotations from the pubmed entries and include in the returned data. If false, don't. Returns ------- dict of dicts Dictionary indexed by PMID. Each value is a dict containing the following fields: 'doi', 'title', 'authors', 'journal_title', 'journal_abbrev', 'journal_nlm_id', 'issn_list', 'page'.
[ "Get", "metadata", "for", "an", "XML", "tree", "containing", "PubmedArticle", "elements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/pubmed_client.py#L303-L364
train
sorgerlab/indra
indra/literature/pubmed_client.py
get_metadata_for_ids
def get_metadata_for_ids(pmid_list, get_issns_from_nlm=False, get_abstracts=False, prepend_title=False): """Get article metadata for up to 200 PMIDs from the Pubmed database. Parameters ---------- pmid_list : list of PMIDs as strings Can contain 1-200 PMIDs. get_issns_from_nlm : boolean Look up the full list of ISSN number for the journal associated with the article, which helps to match articles to CrossRef search results. Defaults to False, since it slows down performance. get_abstracts : boolean Indicates whether to include the Pubmed abstract in the results. prepend_title : boolean If get_abstracts is True, specifies whether the article title should be prepended to the abstract text. Returns ------- dict of dicts Dictionary indexed by PMID. Each value is a dict containing the following fields: 'doi', 'title', 'authors', 'journal_title', 'journal_abbrev', 'journal_nlm_id', 'issn_list', 'page'. """ if len(pmid_list) > 200: raise ValueError("Metadata query is limited to 200 PMIDs at a time.") params = {'db': 'pubmed', 'retmode': 'xml', 'id': pmid_list} tree = send_request(pubmed_fetch, params) if tree is None: return None return get_metadata_from_xml_tree(tree, get_issns_from_nlm, get_abstracts, prepend_title)
python
def get_metadata_for_ids(pmid_list, get_issns_from_nlm=False, get_abstracts=False, prepend_title=False): """Get article metadata for up to 200 PMIDs from the Pubmed database. Parameters ---------- pmid_list : list of PMIDs as strings Can contain 1-200 PMIDs. get_issns_from_nlm : boolean Look up the full list of ISSN number for the journal associated with the article, which helps to match articles to CrossRef search results. Defaults to False, since it slows down performance. get_abstracts : boolean Indicates whether to include the Pubmed abstract in the results. prepend_title : boolean If get_abstracts is True, specifies whether the article title should be prepended to the abstract text. Returns ------- dict of dicts Dictionary indexed by PMID. Each value is a dict containing the following fields: 'doi', 'title', 'authors', 'journal_title', 'journal_abbrev', 'journal_nlm_id', 'issn_list', 'page'. """ if len(pmid_list) > 200: raise ValueError("Metadata query is limited to 200 PMIDs at a time.") params = {'db': 'pubmed', 'retmode': 'xml', 'id': pmid_list} tree = send_request(pubmed_fetch, params) if tree is None: return None return get_metadata_from_xml_tree(tree, get_issns_from_nlm, get_abstracts, prepend_title)
[ "def", "get_metadata_for_ids", "(", "pmid_list", ",", "get_issns_from_nlm", "=", "False", ",", "get_abstracts", "=", "False", ",", "prepend_title", "=", "False", ")", ":", "if", "len", "(", "pmid_list", ")", ">", "200", ":", "raise", "ValueError", "(", "\"Metadata query is limited to 200 PMIDs at a time.\"", ")", "params", "=", "{", "'db'", ":", "'pubmed'", ",", "'retmode'", ":", "'xml'", ",", "'id'", ":", "pmid_list", "}", "tree", "=", "send_request", "(", "pubmed_fetch", ",", "params", ")", "if", "tree", "is", "None", ":", "return", "None", "return", "get_metadata_from_xml_tree", "(", "tree", ",", "get_issns_from_nlm", ",", "get_abstracts", ",", "prepend_title", ")" ]
Get article metadata for up to 200 PMIDs from the Pubmed database. Parameters ---------- pmid_list : list of PMIDs as strings Can contain 1-200 PMIDs. get_issns_from_nlm : boolean Look up the full list of ISSN number for the journal associated with the article, which helps to match articles to CrossRef search results. Defaults to False, since it slows down performance. get_abstracts : boolean Indicates whether to include the Pubmed abstract in the results. prepend_title : boolean If get_abstracts is True, specifies whether the article title should be prepended to the abstract text. Returns ------- dict of dicts Dictionary indexed by PMID. Each value is a dict containing the following fields: 'doi', 'title', 'authors', 'journal_title', 'journal_abbrev', 'journal_nlm_id', 'issn_list', 'page'.
[ "Get", "article", "metadata", "for", "up", "to", "200", "PMIDs", "from", "the", "Pubmed", "database", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/pubmed_client.py#L391-L425
train
sorgerlab/indra
indra/literature/pubmed_client.py
get_issns_for_journal
def get_issns_for_journal(nlm_id): """Get a list of the ISSN numbers for a journal given its NLM ID. Information on NLM XML DTDs is available at https://www.nlm.nih.gov/databases/dtd/ """ params = {'db': 'nlmcatalog', 'retmode': 'xml', 'id': nlm_id} tree = send_request(pubmed_fetch, params) if tree is None: return None issn_list = tree.findall('.//ISSN') issn_linking = tree.findall('.//ISSNLinking') issns = issn_list + issn_linking # No ISSNs found! if not issns: return None else: return [issn.text for issn in issns]
python
def get_issns_for_journal(nlm_id): """Get a list of the ISSN numbers for a journal given its NLM ID. Information on NLM XML DTDs is available at https://www.nlm.nih.gov/databases/dtd/ """ params = {'db': 'nlmcatalog', 'retmode': 'xml', 'id': nlm_id} tree = send_request(pubmed_fetch, params) if tree is None: return None issn_list = tree.findall('.//ISSN') issn_linking = tree.findall('.//ISSNLinking') issns = issn_list + issn_linking # No ISSNs found! if not issns: return None else: return [issn.text for issn in issns]
[ "def", "get_issns_for_journal", "(", "nlm_id", ")", ":", "params", "=", "{", "'db'", ":", "'nlmcatalog'", ",", "'retmode'", ":", "'xml'", ",", "'id'", ":", "nlm_id", "}", "tree", "=", "send_request", "(", "pubmed_fetch", ",", "params", ")", "if", "tree", "is", "None", ":", "return", "None", "issn_list", "=", "tree", ".", "findall", "(", "'.//ISSN'", ")", "issn_linking", "=", "tree", ".", "findall", "(", "'.//ISSNLinking'", ")", "issns", "=", "issn_list", "+", "issn_linking", "# No ISSNs found!", "if", "not", "issns", ":", "return", "None", "else", ":", "return", "[", "issn", ".", "text", "for", "issn", "in", "issns", "]" ]
Get a list of the ISSN numbers for a journal given its NLM ID. Information on NLM XML DTDs is available at https://www.nlm.nih.gov/databases/dtd/
[ "Get", "a", "list", "of", "the", "ISSN", "numbers", "for", "a", "journal", "given", "its", "NLM", "ID", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/pubmed_client.py#L429-L448
train
sorgerlab/indra
indra/explanation/model_checker.py
remove_im_params
def remove_im_params(model, im): """Remove parameter nodes from the influence map. Parameters ---------- model : pysb.core.Model PySB model. im : networkx.MultiDiGraph Influence map. Returns ------- networkx.MultiDiGraph Influence map with the parameter nodes removed. """ for param in model.parameters: # If the node doesn't exist e.g., it may have already been removed), # skip over the parameter without error try: im.remove_node(param.name) except: pass
python
def remove_im_params(model, im): """Remove parameter nodes from the influence map. Parameters ---------- model : pysb.core.Model PySB model. im : networkx.MultiDiGraph Influence map. Returns ------- networkx.MultiDiGraph Influence map with the parameter nodes removed. """ for param in model.parameters: # If the node doesn't exist e.g., it may have already been removed), # skip over the parameter without error try: im.remove_node(param.name) except: pass
[ "def", "remove_im_params", "(", "model", ",", "im", ")", ":", "for", "param", "in", "model", ".", "parameters", ":", "# If the node doesn't exist e.g., it may have already been removed),", "# skip over the parameter without error", "try", ":", "im", ".", "remove_node", "(", "param", ".", "name", ")", "except", ":", "pass" ]
Remove parameter nodes from the influence map. Parameters ---------- model : pysb.core.Model PySB model. im : networkx.MultiDiGraph Influence map. Returns ------- networkx.MultiDiGraph Influence map with the parameter nodes removed.
[ "Remove", "parameter", "nodes", "from", "the", "influence", "map", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/explanation/model_checker.py#L919-L940
train
sorgerlab/indra
indra/explanation/model_checker.py
_get_signed_predecessors
def _get_signed_predecessors(im, node, polarity): """Get upstream nodes in the influence map. Return the upstream nodes along with the overall polarity of the path to that node by account for the polarity of the path to the given node and the polarity of the edge between the given node and its immediate predecessors. Parameters ---------- im : networkx.MultiDiGraph Graph containing the influence map. node : str The node (rule name) in the influence map to get predecessors (upstream nodes) for. polarity : int Polarity of the overall path to the given node. Returns ------- generator of tuples, (node, polarity) Each tuple returned contains two elements, a node (string) and the polarity of the overall path (int) to that node. """ signed_pred_list = [] for pred in im.predecessors(node): pred_edge = (pred, node) yield (pred, _get_edge_sign(im, pred_edge) * polarity)
python
def _get_signed_predecessors(im, node, polarity): """Get upstream nodes in the influence map. Return the upstream nodes along with the overall polarity of the path to that node by account for the polarity of the path to the given node and the polarity of the edge between the given node and its immediate predecessors. Parameters ---------- im : networkx.MultiDiGraph Graph containing the influence map. node : str The node (rule name) in the influence map to get predecessors (upstream nodes) for. polarity : int Polarity of the overall path to the given node. Returns ------- generator of tuples, (node, polarity) Each tuple returned contains two elements, a node (string) and the polarity of the overall path (int) to that node. """ signed_pred_list = [] for pred in im.predecessors(node): pred_edge = (pred, node) yield (pred, _get_edge_sign(im, pred_edge) * polarity)
[ "def", "_get_signed_predecessors", "(", "im", ",", "node", ",", "polarity", ")", ":", "signed_pred_list", "=", "[", "]", "for", "pred", "in", "im", ".", "predecessors", "(", "node", ")", ":", "pred_edge", "=", "(", "pred", ",", "node", ")", "yield", "(", "pred", ",", "_get_edge_sign", "(", "im", ",", "pred_edge", ")", "*", "polarity", ")" ]
Get upstream nodes in the influence map. Return the upstream nodes along with the overall polarity of the path to that node by account for the polarity of the path to the given node and the polarity of the edge between the given node and its immediate predecessors. Parameters ---------- im : networkx.MultiDiGraph Graph containing the influence map. node : str The node (rule name) in the influence map to get predecessors (upstream nodes) for. polarity : int Polarity of the overall path to the given node. Returns ------- generator of tuples, (node, polarity) Each tuple returned contains two elements, a node (string) and the polarity of the overall path (int) to that node.
[ "Get", "upstream", "nodes", "in", "the", "influence", "map", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/explanation/model_checker.py#L1009-L1037
train
sorgerlab/indra
indra/explanation/model_checker.py
_get_edge_sign
def _get_edge_sign(im, edge): """Get the polarity of the influence by examining the edge sign.""" edge_data = im[edge[0]][edge[1]] # Handle possible multiple edges between nodes signs = list(set([v['sign'] for v in edge_data.values() if v.get('sign')])) if len(signs) > 1: logger.warning("Edge %s has conflicting polarities; choosing " "positive polarity by default" % str(edge)) sign = 1 else: sign = signs[0] if sign is None: raise Exception('No sign attribute for edge.') elif abs(sign) == 1: return sign else: raise Exception('Unexpected edge sign: %s' % edge.attr['sign'])
python
def _get_edge_sign(im, edge): """Get the polarity of the influence by examining the edge sign.""" edge_data = im[edge[0]][edge[1]] # Handle possible multiple edges between nodes signs = list(set([v['sign'] for v in edge_data.values() if v.get('sign')])) if len(signs) > 1: logger.warning("Edge %s has conflicting polarities; choosing " "positive polarity by default" % str(edge)) sign = 1 else: sign = signs[0] if sign is None: raise Exception('No sign attribute for edge.') elif abs(sign) == 1: return sign else: raise Exception('Unexpected edge sign: %s' % edge.attr['sign'])
[ "def", "_get_edge_sign", "(", "im", ",", "edge", ")", ":", "edge_data", "=", "im", "[", "edge", "[", "0", "]", "]", "[", "edge", "[", "1", "]", "]", "# Handle possible multiple edges between nodes", "signs", "=", "list", "(", "set", "(", "[", "v", "[", "'sign'", "]", "for", "v", "in", "edge_data", ".", "values", "(", ")", "if", "v", ".", "get", "(", "'sign'", ")", "]", ")", ")", "if", "len", "(", "signs", ")", ">", "1", ":", "logger", ".", "warning", "(", "\"Edge %s has conflicting polarities; choosing \"", "\"positive polarity by default\"", "%", "str", "(", "edge", ")", ")", "sign", "=", "1", "else", ":", "sign", "=", "signs", "[", "0", "]", "if", "sign", "is", "None", ":", "raise", "Exception", "(", "'No sign attribute for edge.'", ")", "elif", "abs", "(", "sign", ")", "==", "1", ":", "return", "sign", "else", ":", "raise", "Exception", "(", "'Unexpected edge sign: %s'", "%", "edge", ".", "attr", "[", "'sign'", "]", ")" ]
Get the polarity of the influence by examining the edge sign.
[ "Get", "the", "polarity", "of", "the", "influence", "by", "examining", "the", "edge", "sign", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/explanation/model_checker.py#L1040-L1057
train
sorgerlab/indra
indra/explanation/model_checker.py
_add_modification_to_agent
def _add_modification_to_agent(agent, mod_type, residue, position): """Add a modification condition to an Agent.""" new_mod = ModCondition(mod_type, residue, position) # Check if this modification already exists for old_mod in agent.mods: if old_mod.equals(new_mod): return agent new_agent = deepcopy(agent) new_agent.mods.append(new_mod) return new_agent
python
def _add_modification_to_agent(agent, mod_type, residue, position): """Add a modification condition to an Agent.""" new_mod = ModCondition(mod_type, residue, position) # Check if this modification already exists for old_mod in agent.mods: if old_mod.equals(new_mod): return agent new_agent = deepcopy(agent) new_agent.mods.append(new_mod) return new_agent
[ "def", "_add_modification_to_agent", "(", "agent", ",", "mod_type", ",", "residue", ",", "position", ")", ":", "new_mod", "=", "ModCondition", "(", "mod_type", ",", "residue", ",", "position", ")", "# Check if this modification already exists", "for", "old_mod", "in", "agent", ".", "mods", ":", "if", "old_mod", ".", "equals", "(", "new_mod", ")", ":", "return", "agent", "new_agent", "=", "deepcopy", "(", "agent", ")", "new_agent", ".", "mods", ".", "append", "(", "new_mod", ")", "return", "new_agent" ]
Add a modification condition to an Agent.
[ "Add", "a", "modification", "condition", "to", "an", "Agent", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/explanation/model_checker.py#L1060-L1069
train
sorgerlab/indra
indra/explanation/model_checker.py
_match_lhs
def _match_lhs(cp, rules): """Get rules with a left-hand side matching the given ComplexPattern.""" rule_matches = [] for rule in rules: reactant_pattern = rule.rule_expression.reactant_pattern for rule_cp in reactant_pattern.complex_patterns: if _cp_embeds_into(rule_cp, cp): rule_matches.append(rule) break return rule_matches
python
def _match_lhs(cp, rules): """Get rules with a left-hand side matching the given ComplexPattern.""" rule_matches = [] for rule in rules: reactant_pattern = rule.rule_expression.reactant_pattern for rule_cp in reactant_pattern.complex_patterns: if _cp_embeds_into(rule_cp, cp): rule_matches.append(rule) break return rule_matches
[ "def", "_match_lhs", "(", "cp", ",", "rules", ")", ":", "rule_matches", "=", "[", "]", "for", "rule", "in", "rules", ":", "reactant_pattern", "=", "rule", ".", "rule_expression", ".", "reactant_pattern", "for", "rule_cp", "in", "reactant_pattern", ".", "complex_patterns", ":", "if", "_cp_embeds_into", "(", "rule_cp", ",", "cp", ")", ":", "rule_matches", ".", "append", "(", "rule", ")", "break", "return", "rule_matches" ]
Get rules with a left-hand side matching the given ComplexPattern.
[ "Get", "rules", "with", "a", "left", "-", "hand", "side", "matching", "the", "given", "ComplexPattern", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/explanation/model_checker.py#L1084-L1093
train
sorgerlab/indra
indra/explanation/model_checker.py
_cp_embeds_into
def _cp_embeds_into(cp1, cp2): """Check that any state in ComplexPattern2 is matched in ComplexPattern1. """ # Check that any state in cp2 is matched in cp1 # If the thing we're matching to is just a monomer pattern, that makes # things easier--we just need to find the corresponding monomer pattern # in cp1 if cp1 is None or cp2 is None: return False cp1 = as_complex_pattern(cp1) cp2 = as_complex_pattern(cp2) if len(cp2.monomer_patterns) == 1: mp2 = cp2.monomer_patterns[0] # Iterate over the monomer patterns in cp1 and see if there is one # that has the same name for mp1 in cp1.monomer_patterns: if _mp_embeds_into(mp1, mp2): return True return False
python
def _cp_embeds_into(cp1, cp2): """Check that any state in ComplexPattern2 is matched in ComplexPattern1. """ # Check that any state in cp2 is matched in cp1 # If the thing we're matching to is just a monomer pattern, that makes # things easier--we just need to find the corresponding monomer pattern # in cp1 if cp1 is None or cp2 is None: return False cp1 = as_complex_pattern(cp1) cp2 = as_complex_pattern(cp2) if len(cp2.monomer_patterns) == 1: mp2 = cp2.monomer_patterns[0] # Iterate over the monomer patterns in cp1 and see if there is one # that has the same name for mp1 in cp1.monomer_patterns: if _mp_embeds_into(mp1, mp2): return True return False
[ "def", "_cp_embeds_into", "(", "cp1", ",", "cp2", ")", ":", "# Check that any state in cp2 is matched in cp1", "# If the thing we're matching to is just a monomer pattern, that makes", "# things easier--we just need to find the corresponding monomer pattern", "# in cp1", "if", "cp1", "is", "None", "or", "cp2", "is", "None", ":", "return", "False", "cp1", "=", "as_complex_pattern", "(", "cp1", ")", "cp2", "=", "as_complex_pattern", "(", "cp2", ")", "if", "len", "(", "cp2", ".", "monomer_patterns", ")", "==", "1", ":", "mp2", "=", "cp2", ".", "monomer_patterns", "[", "0", "]", "# Iterate over the monomer patterns in cp1 and see if there is one", "# that has the same name", "for", "mp1", "in", "cp1", ".", "monomer_patterns", ":", "if", "_mp_embeds_into", "(", "mp1", ",", "mp2", ")", ":", "return", "True", "return", "False" ]
Check that any state in ComplexPattern2 is matched in ComplexPattern1.
[ "Check", "that", "any", "state", "in", "ComplexPattern2", "is", "matched", "in", "ComplexPattern1", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/explanation/model_checker.py#L1096-L1114
train
sorgerlab/indra
indra/explanation/model_checker.py
_mp_embeds_into
def _mp_embeds_into(mp1, mp2): """Check that conditions in MonomerPattern2 are met in MonomerPattern1.""" sc_matches = [] if mp1.monomer.name != mp2.monomer.name: return False # Check that all conditions in mp2 are met in mp1 for site_name, site_state in mp2.site_conditions.items(): if site_name not in mp1.site_conditions or \ site_state != mp1.site_conditions[site_name]: return False return True
python
def _mp_embeds_into(mp1, mp2): """Check that conditions in MonomerPattern2 are met in MonomerPattern1.""" sc_matches = [] if mp1.monomer.name != mp2.monomer.name: return False # Check that all conditions in mp2 are met in mp1 for site_name, site_state in mp2.site_conditions.items(): if site_name not in mp1.site_conditions or \ site_state != mp1.site_conditions[site_name]: return False return True
[ "def", "_mp_embeds_into", "(", "mp1", ",", "mp2", ")", ":", "sc_matches", "=", "[", "]", "if", "mp1", ".", "monomer", ".", "name", "!=", "mp2", ".", "monomer", ".", "name", ":", "return", "False", "# Check that all conditions in mp2 are met in mp1", "for", "site_name", ",", "site_state", "in", "mp2", ".", "site_conditions", ".", "items", "(", ")", ":", "if", "site_name", "not", "in", "mp1", ".", "site_conditions", "or", "site_state", "!=", "mp1", ".", "site_conditions", "[", "site_name", "]", ":", "return", "False", "return", "True" ]
Check that conditions in MonomerPattern2 are met in MonomerPattern1.
[ "Check", "that", "conditions", "in", "MonomerPattern2", "are", "met", "in", "MonomerPattern1", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/explanation/model_checker.py#L1117-L1127
train
sorgerlab/indra
indra/explanation/model_checker.py
_monomer_pattern_label
def _monomer_pattern_label(mp): """Return a string label for a MonomerPattern.""" site_strs = [] for site, cond in mp.site_conditions.items(): if isinstance(cond, tuple) or isinstance(cond, list): assert len(cond) == 2 if cond[1] == WILD: site_str = '%s_%s' % (site, cond[0]) else: site_str = '%s_%s%s' % (site, cond[0], cond[1]) elif isinstance(cond, numbers.Real): continue else: site_str = '%s_%s' % (site, cond) site_strs.append(site_str) return '%s_%s' % (mp.monomer.name, '_'.join(site_strs))
python
def _monomer_pattern_label(mp): """Return a string label for a MonomerPattern.""" site_strs = [] for site, cond in mp.site_conditions.items(): if isinstance(cond, tuple) or isinstance(cond, list): assert len(cond) == 2 if cond[1] == WILD: site_str = '%s_%s' % (site, cond[0]) else: site_str = '%s_%s%s' % (site, cond[0], cond[1]) elif isinstance(cond, numbers.Real): continue else: site_str = '%s_%s' % (site, cond) site_strs.append(site_str) return '%s_%s' % (mp.monomer.name, '_'.join(site_strs))
[ "def", "_monomer_pattern_label", "(", "mp", ")", ":", "site_strs", "=", "[", "]", "for", "site", ",", "cond", "in", "mp", ".", "site_conditions", ".", "items", "(", ")", ":", "if", "isinstance", "(", "cond", ",", "tuple", ")", "or", "isinstance", "(", "cond", ",", "list", ")", ":", "assert", "len", "(", "cond", ")", "==", "2", "if", "cond", "[", "1", "]", "==", "WILD", ":", "site_str", "=", "'%s_%s'", "%", "(", "site", ",", "cond", "[", "0", "]", ")", "else", ":", "site_str", "=", "'%s_%s%s'", "%", "(", "site", ",", "cond", "[", "0", "]", ",", "cond", "[", "1", "]", ")", "elif", "isinstance", "(", "cond", ",", "numbers", ".", "Real", ")", ":", "continue", "else", ":", "site_str", "=", "'%s_%s'", "%", "(", "site", ",", "cond", ")", "site_strs", ".", "append", "(", "site_str", ")", "return", "'%s_%s'", "%", "(", "mp", ".", "monomer", ".", "name", ",", "'_'", ".", "join", "(", "site_strs", ")", ")" ]
Return a string label for a MonomerPattern.
[ "Return", "a", "string", "label", "for", "a", "MonomerPattern", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/explanation/model_checker.py#L1224-L1239
train
sorgerlab/indra
indra/explanation/model_checker.py
_stmt_from_rule
def _stmt_from_rule(model, rule_name, stmts): """Return the INDRA Statement corresponding to a given rule by name.""" stmt_uuid = None for ann in model.annotations: if ann.predicate == 'from_indra_statement': if ann.subject == rule_name: stmt_uuid = ann.object break if stmt_uuid: for stmt in stmts: if stmt.uuid == stmt_uuid: return stmt
python
def _stmt_from_rule(model, rule_name, stmts): """Return the INDRA Statement corresponding to a given rule by name.""" stmt_uuid = None for ann in model.annotations: if ann.predicate == 'from_indra_statement': if ann.subject == rule_name: stmt_uuid = ann.object break if stmt_uuid: for stmt in stmts: if stmt.uuid == stmt_uuid: return stmt
[ "def", "_stmt_from_rule", "(", "model", ",", "rule_name", ",", "stmts", ")", ":", "stmt_uuid", "=", "None", "for", "ann", "in", "model", ".", "annotations", ":", "if", "ann", ".", "predicate", "==", "'from_indra_statement'", ":", "if", "ann", ".", "subject", "==", "rule_name", ":", "stmt_uuid", "=", "ann", ".", "object", "break", "if", "stmt_uuid", ":", "for", "stmt", "in", "stmts", ":", "if", "stmt", ".", "uuid", "==", "stmt_uuid", ":", "return", "stmt" ]
Return the INDRA Statement corresponding to a given rule by name.
[ "Return", "the", "INDRA", "Statement", "corresponding", "to", "a", "given", "rule", "by", "name", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/explanation/model_checker.py#L1263-L1274
train
sorgerlab/indra
indra/explanation/model_checker.py
ModelChecker.generate_im
def generate_im(self, model): """Return a graph representing the influence map generated by Kappa Parameters ---------- model : pysb.Model The PySB model whose influence map is to be generated Returns ------- graph : networkx.MultiDiGraph A MultiDiGraph representing the influence map """ kappa = kappy.KappaStd() model_str = export.export(model, 'kappa') kappa.add_model_string(model_str) kappa.project_parse() imap = kappa.analyses_influence_map(accuracy='medium') graph = im_json_to_graph(imap) return graph
python
def generate_im(self, model): """Return a graph representing the influence map generated by Kappa Parameters ---------- model : pysb.Model The PySB model whose influence map is to be generated Returns ------- graph : networkx.MultiDiGraph A MultiDiGraph representing the influence map """ kappa = kappy.KappaStd() model_str = export.export(model, 'kappa') kappa.add_model_string(model_str) kappa.project_parse() imap = kappa.analyses_influence_map(accuracy='medium') graph = im_json_to_graph(imap) return graph
[ "def", "generate_im", "(", "self", ",", "model", ")", ":", "kappa", "=", "kappy", ".", "KappaStd", "(", ")", "model_str", "=", "export", ".", "export", "(", "model", ",", "'kappa'", ")", "kappa", ".", "add_model_string", "(", "model_str", ")", "kappa", ".", "project_parse", "(", ")", "imap", "=", "kappa", ".", "analyses_influence_map", "(", "accuracy", "=", "'medium'", ")", "graph", "=", "im_json_to_graph", "(", "imap", ")", "return", "graph" ]
Return a graph representing the influence map generated by Kappa Parameters ---------- model : pysb.Model The PySB model whose influence map is to be generated Returns ------- graph : networkx.MultiDiGraph A MultiDiGraph representing the influence map
[ "Return", "a", "graph", "representing", "the", "influence", "map", "generated", "by", "Kappa" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/explanation/model_checker.py#L196-L215
train
sorgerlab/indra
indra/explanation/model_checker.py
ModelChecker.draw_im
def draw_im(self, fname): """Draw and save the influence map in a file. Parameters ---------- fname : str The name of the file to save the influence map in. The extension of the file will determine the file format, typically png or pdf. """ im = self.get_im() im_agraph = nx.nx_agraph.to_agraph(im) im_agraph.draw(fname, prog='dot')
python
def draw_im(self, fname): """Draw and save the influence map in a file. Parameters ---------- fname : str The name of the file to save the influence map in. The extension of the file will determine the file format, typically png or pdf. """ im = self.get_im() im_agraph = nx.nx_agraph.to_agraph(im) im_agraph.draw(fname, prog='dot')
[ "def", "draw_im", "(", "self", ",", "fname", ")", ":", "im", "=", "self", ".", "get_im", "(", ")", "im_agraph", "=", "nx", ".", "nx_agraph", ".", "to_agraph", "(", "im", ")", "im_agraph", ".", "draw", "(", "fname", ",", "prog", "=", "'dot'", ")" ]
Draw and save the influence map in a file. Parameters ---------- fname : str The name of the file to save the influence map in. The extension of the file will determine the file format, typically png or pdf.
[ "Draw", "and", "save", "the", "influence", "map", "in", "a", "file", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/explanation/model_checker.py#L217-L229
train
sorgerlab/indra
indra/explanation/model_checker.py
ModelChecker.get_im
def get_im(self, force_update=False): """Get the influence map for the model, generating it if necessary. Parameters ---------- force_update : bool Whether to generate the influence map when the function is called. If False, returns the previously generated influence map if available. Defaults to True. Returns ------- networkx MultiDiGraph object containing the influence map. The influence map can be rendered as a pdf using the dot layout program as follows:: im_agraph = nx.nx_agraph.to_agraph(influence_map) im_agraph.draw('influence_map.pdf', prog='dot') """ if self._im and not force_update: return self._im if not self.model: raise Exception("Cannot get influence map if there is no model.") def add_obs_for_agent(agent): obj_mps = list(pa.grounded_monomer_patterns(self.model, agent)) if not obj_mps: logger.debug('No monomer patterns found in model for agent %s, ' 'skipping' % agent) return obs_list = [] for obj_mp in obj_mps: obs_name = _monomer_pattern_label(obj_mp) + '_obs' # Add the observable obj_obs = Observable(obs_name, obj_mp, _export=False) obs_list.append(obs_name) try: self.model.add_component(obj_obs) except ComponentDuplicateNameError as e: pass return obs_list # Create observables for all statements to check, and add to model # Remove any existing observables in the model self.model.observables = ComponentSet([]) for stmt in self.statements: # Generate observables for Modification statements if isinstance(stmt, Modification): mod_condition_name = modclass_to_modtype[stmt.__class__] if isinstance(stmt, RemoveModification): mod_condition_name = modtype_to_inverse[mod_condition_name] # Add modification to substrate agent modified_sub = _add_modification_to_agent(stmt.sub, mod_condition_name, stmt.residue, stmt.position) obs_list = add_obs_for_agent(modified_sub) # Associate this statement with this observable self.stmt_to_obs[stmt] = obs_list # Generate observables for Activation/Inhibition statements elif isinstance(stmt, RegulateActivity): regulated_obj, polarity = \ _add_activity_to_agent(stmt.obj, stmt.obj_activity, stmt.is_activation) obs_list = add_obs_for_agent(regulated_obj) # Associate this statement with this observable self.stmt_to_obs[stmt] = obs_list elif isinstance(stmt, RegulateAmount): obs_list = add_obs_for_agent(stmt.obj) self.stmt_to_obs[stmt] = obs_list elif isinstance(stmt, Influence): obs_list = add_obs_for_agent(stmt.obj.concept) self.stmt_to_obs[stmt] = obs_list # Add observables for each agent for ag in self.agent_obs: obs_list = add_obs_for_agent(ag) self.agent_to_obs[ag] = obs_list logger.info("Generating influence map") self._im = self.generate_im(self.model) #self._im.is_multigraph = lambda: False # Now, for every rule in the model, check if there are any observables # downstream; alternatively, for every observable in the model, get a # list of rules. # We'll need the dictionary to check if nodes are observables node_attributes = nx.get_node_attributes(self._im, 'node_type') for rule in self.model.rules: obs_list = [] # Get successors of the rule node for neighb in self._im.neighbors(rule.name): # Check if the node is an observable if node_attributes[neighb] != 'variable': continue # Get the edge and check the polarity edge_sign = _get_edge_sign(self._im, (rule.name, neighb)) obs_list.append((neighb, edge_sign)) self.rule_obs_dict[rule.name] = obs_list return self._im
python
def get_im(self, force_update=False): """Get the influence map for the model, generating it if necessary. Parameters ---------- force_update : bool Whether to generate the influence map when the function is called. If False, returns the previously generated influence map if available. Defaults to True. Returns ------- networkx MultiDiGraph object containing the influence map. The influence map can be rendered as a pdf using the dot layout program as follows:: im_agraph = nx.nx_agraph.to_agraph(influence_map) im_agraph.draw('influence_map.pdf', prog='dot') """ if self._im and not force_update: return self._im if not self.model: raise Exception("Cannot get influence map if there is no model.") def add_obs_for_agent(agent): obj_mps = list(pa.grounded_monomer_patterns(self.model, agent)) if not obj_mps: logger.debug('No monomer patterns found in model for agent %s, ' 'skipping' % agent) return obs_list = [] for obj_mp in obj_mps: obs_name = _monomer_pattern_label(obj_mp) + '_obs' # Add the observable obj_obs = Observable(obs_name, obj_mp, _export=False) obs_list.append(obs_name) try: self.model.add_component(obj_obs) except ComponentDuplicateNameError as e: pass return obs_list # Create observables for all statements to check, and add to model # Remove any existing observables in the model self.model.observables = ComponentSet([]) for stmt in self.statements: # Generate observables for Modification statements if isinstance(stmt, Modification): mod_condition_name = modclass_to_modtype[stmt.__class__] if isinstance(stmt, RemoveModification): mod_condition_name = modtype_to_inverse[mod_condition_name] # Add modification to substrate agent modified_sub = _add_modification_to_agent(stmt.sub, mod_condition_name, stmt.residue, stmt.position) obs_list = add_obs_for_agent(modified_sub) # Associate this statement with this observable self.stmt_to_obs[stmt] = obs_list # Generate observables for Activation/Inhibition statements elif isinstance(stmt, RegulateActivity): regulated_obj, polarity = \ _add_activity_to_agent(stmt.obj, stmt.obj_activity, stmt.is_activation) obs_list = add_obs_for_agent(regulated_obj) # Associate this statement with this observable self.stmt_to_obs[stmt] = obs_list elif isinstance(stmt, RegulateAmount): obs_list = add_obs_for_agent(stmt.obj) self.stmt_to_obs[stmt] = obs_list elif isinstance(stmt, Influence): obs_list = add_obs_for_agent(stmt.obj.concept) self.stmt_to_obs[stmt] = obs_list # Add observables for each agent for ag in self.agent_obs: obs_list = add_obs_for_agent(ag) self.agent_to_obs[ag] = obs_list logger.info("Generating influence map") self._im = self.generate_im(self.model) #self._im.is_multigraph = lambda: False # Now, for every rule in the model, check if there are any observables # downstream; alternatively, for every observable in the model, get a # list of rules. # We'll need the dictionary to check if nodes are observables node_attributes = nx.get_node_attributes(self._im, 'node_type') for rule in self.model.rules: obs_list = [] # Get successors of the rule node for neighb in self._im.neighbors(rule.name): # Check if the node is an observable if node_attributes[neighb] != 'variable': continue # Get the edge and check the polarity edge_sign = _get_edge_sign(self._im, (rule.name, neighb)) obs_list.append((neighb, edge_sign)) self.rule_obs_dict[rule.name] = obs_list return self._im
[ "def", "get_im", "(", "self", ",", "force_update", "=", "False", ")", ":", "if", "self", ".", "_im", "and", "not", "force_update", ":", "return", "self", ".", "_im", "if", "not", "self", ".", "model", ":", "raise", "Exception", "(", "\"Cannot get influence map if there is no model.\"", ")", "def", "add_obs_for_agent", "(", "agent", ")", ":", "obj_mps", "=", "list", "(", "pa", ".", "grounded_monomer_patterns", "(", "self", ".", "model", ",", "agent", ")", ")", "if", "not", "obj_mps", ":", "logger", ".", "debug", "(", "'No monomer patterns found in model for agent %s, '", "'skipping'", "%", "agent", ")", "return", "obs_list", "=", "[", "]", "for", "obj_mp", "in", "obj_mps", ":", "obs_name", "=", "_monomer_pattern_label", "(", "obj_mp", ")", "+", "'_obs'", "# Add the observable", "obj_obs", "=", "Observable", "(", "obs_name", ",", "obj_mp", ",", "_export", "=", "False", ")", "obs_list", ".", "append", "(", "obs_name", ")", "try", ":", "self", ".", "model", ".", "add_component", "(", "obj_obs", ")", "except", "ComponentDuplicateNameError", "as", "e", ":", "pass", "return", "obs_list", "# Create observables for all statements to check, and add to model", "# Remove any existing observables in the model", "self", ".", "model", ".", "observables", "=", "ComponentSet", "(", "[", "]", ")", "for", "stmt", "in", "self", ".", "statements", ":", "# Generate observables for Modification statements", "if", "isinstance", "(", "stmt", ",", "Modification", ")", ":", "mod_condition_name", "=", "modclass_to_modtype", "[", "stmt", ".", "__class__", "]", "if", "isinstance", "(", "stmt", ",", "RemoveModification", ")", ":", "mod_condition_name", "=", "modtype_to_inverse", "[", "mod_condition_name", "]", "# Add modification to substrate agent", "modified_sub", "=", "_add_modification_to_agent", "(", "stmt", ".", "sub", ",", "mod_condition_name", ",", "stmt", ".", "residue", ",", "stmt", ".", "position", ")", "obs_list", "=", "add_obs_for_agent", "(", "modified_sub", ")", "# Associate this statement with this observable", "self", ".", "stmt_to_obs", "[", "stmt", "]", "=", "obs_list", "# Generate observables for Activation/Inhibition statements", "elif", "isinstance", "(", "stmt", ",", "RegulateActivity", ")", ":", "regulated_obj", ",", "polarity", "=", "_add_activity_to_agent", "(", "stmt", ".", "obj", ",", "stmt", ".", "obj_activity", ",", "stmt", ".", "is_activation", ")", "obs_list", "=", "add_obs_for_agent", "(", "regulated_obj", ")", "# Associate this statement with this observable", "self", ".", "stmt_to_obs", "[", "stmt", "]", "=", "obs_list", "elif", "isinstance", "(", "stmt", ",", "RegulateAmount", ")", ":", "obs_list", "=", "add_obs_for_agent", "(", "stmt", ".", "obj", ")", "self", ".", "stmt_to_obs", "[", "stmt", "]", "=", "obs_list", "elif", "isinstance", "(", "stmt", ",", "Influence", ")", ":", "obs_list", "=", "add_obs_for_agent", "(", "stmt", ".", "obj", ".", "concept", ")", "self", ".", "stmt_to_obs", "[", "stmt", "]", "=", "obs_list", "# Add observables for each agent", "for", "ag", "in", "self", ".", "agent_obs", ":", "obs_list", "=", "add_obs_for_agent", "(", "ag", ")", "self", ".", "agent_to_obs", "[", "ag", "]", "=", "obs_list", "logger", ".", "info", "(", "\"Generating influence map\"", ")", "self", ".", "_im", "=", "self", ".", "generate_im", "(", "self", ".", "model", ")", "#self._im.is_multigraph = lambda: False", "# Now, for every rule in the model, check if there are any observables", "# downstream; alternatively, for every observable in the model, get a", "# list of rules.", "# We'll need the dictionary to check if nodes are observables", "node_attributes", "=", "nx", ".", "get_node_attributes", "(", "self", ".", "_im", ",", "'node_type'", ")", "for", "rule", "in", "self", ".", "model", ".", "rules", ":", "obs_list", "=", "[", "]", "# Get successors of the rule node", "for", "neighb", "in", "self", ".", "_im", ".", "neighbors", "(", "rule", ".", "name", ")", ":", "# Check if the node is an observable", "if", "node_attributes", "[", "neighb", "]", "!=", "'variable'", ":", "continue", "# Get the edge and check the polarity", "edge_sign", "=", "_get_edge_sign", "(", "self", ".", "_im", ",", "(", "rule", ".", "name", ",", "neighb", ")", ")", "obs_list", ".", "append", "(", "(", "neighb", ",", "edge_sign", ")", ")", "self", ".", "rule_obs_dict", "[", "rule", ".", "name", "]", "=", "obs_list", "return", "self", ".", "_im" ]
Get the influence map for the model, generating it if necessary. Parameters ---------- force_update : bool Whether to generate the influence map when the function is called. If False, returns the previously generated influence map if available. Defaults to True. Returns ------- networkx MultiDiGraph object containing the influence map. The influence map can be rendered as a pdf using the dot layout program as follows:: im_agraph = nx.nx_agraph.to_agraph(influence_map) im_agraph.draw('influence_map.pdf', prog='dot')
[ "Get", "the", "influence", "map", "for", "the", "model", "generating", "it", "if", "necessary", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/explanation/model_checker.py#L231-L327
train
sorgerlab/indra
indra/explanation/model_checker.py
ModelChecker.check_model
def check_model(self, max_paths=1, max_path_length=5): """Check all the statements added to the ModelChecker. Parameters ---------- max_paths : Optional[int] The maximum number of specific paths to return for each Statement to be explained. Default: 1 max_path_length : Optional[int] The maximum length of specific paths to return. Default: 5 Returns ------- list of (Statement, PathResult) Each tuple contains the Statement checked against the model and a PathResult object describing the results of model checking. """ results = [] for stmt in self.statements: result = self.check_statement(stmt, max_paths, max_path_length) results.append((stmt, result)) return results
python
def check_model(self, max_paths=1, max_path_length=5): """Check all the statements added to the ModelChecker. Parameters ---------- max_paths : Optional[int] The maximum number of specific paths to return for each Statement to be explained. Default: 1 max_path_length : Optional[int] The maximum length of specific paths to return. Default: 5 Returns ------- list of (Statement, PathResult) Each tuple contains the Statement checked against the model and a PathResult object describing the results of model checking. """ results = [] for stmt in self.statements: result = self.check_statement(stmt, max_paths, max_path_length) results.append((stmt, result)) return results
[ "def", "check_model", "(", "self", ",", "max_paths", "=", "1", ",", "max_path_length", "=", "5", ")", ":", "results", "=", "[", "]", "for", "stmt", "in", "self", ".", "statements", ":", "result", "=", "self", ".", "check_statement", "(", "stmt", ",", "max_paths", ",", "max_path_length", ")", "results", ".", "append", "(", "(", "stmt", ",", "result", ")", ")", "return", "results" ]
Check all the statements added to the ModelChecker. Parameters ---------- max_paths : Optional[int] The maximum number of specific paths to return for each Statement to be explained. Default: 1 max_path_length : Optional[int] The maximum length of specific paths to return. Default: 5 Returns ------- list of (Statement, PathResult) Each tuple contains the Statement checked against the model and a PathResult object describing the results of model checking.
[ "Check", "all", "the", "statements", "added", "to", "the", "ModelChecker", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/explanation/model_checker.py#L329-L350
train
sorgerlab/indra
indra/explanation/model_checker.py
ModelChecker.check_statement
def check_statement(self, stmt, max_paths=1, max_path_length=5): """Check a single Statement against the model. Parameters ---------- stmt : indra.statements.Statement The Statement to check. max_paths : Optional[int] The maximum number of specific paths to return for each Statement to be explained. Default: 1 max_path_length : Optional[int] The maximum length of specific paths to return. Default: 5 Returns ------- boolean True if the model satisfies the Statement. """ # Make sure the influence map is initialized self.get_im() # Check if this is one of the statement types that we can check if not isinstance(stmt, (Modification, RegulateAmount, RegulateActivity, Influence)): return PathResult(False, 'STATEMENT_TYPE_NOT_HANDLED', max_paths, max_path_length) # Get the polarity for the statement if isinstance(stmt, Modification): target_polarity = -1 if isinstance(stmt, RemoveModification) else 1 elif isinstance(stmt, RegulateActivity): target_polarity = 1 if stmt.is_activation else -1 elif isinstance(stmt, RegulateAmount): target_polarity = -1 if isinstance(stmt, DecreaseAmount) else 1 elif isinstance(stmt, Influence): target_polarity = -1 if stmt.overall_polarity() == -1 else 1 # Get the subject and object (works also for Modifications) subj, obj = stmt.agent_list() # Get a list of monomer patterns matching the subject FIXME Currently # this will match rules with the corresponding monomer pattern on it. # In future, this statement should (possibly) also match rules in which # 1) the agent is in its active form, or 2) the agent is tagged as the # enzyme in a rule of the appropriate activity (e.g., a phosphorylation # rule) FIXME if subj is not None: subj_mps = list(pa.grounded_monomer_patterns(self.model, subj, ignore_activities=True)) if not subj_mps: logger.debug('No monomers found corresponding to agent %s' % subj) return PathResult(False, 'SUBJECT_MONOMERS_NOT_FOUND', max_paths, max_path_length) else: subj_mps = [None] # Observables may not be found for an activation since there may be no # rule in the model activating the object, and the object may not have # an "active" site of the appropriate type obs_names = self.stmt_to_obs[stmt] if not obs_names: logger.debug("No observables for stmt %s, returning False" % stmt) return PathResult(False, 'OBSERVABLES_NOT_FOUND', max_paths, max_path_length) for subj_mp, obs_name in itertools.product(subj_mps, obs_names): # NOTE: Returns on the path found for the first enz_mp/obs combo result = self._find_im_paths(subj_mp, obs_name, target_polarity, max_paths, max_path_length) # If a path was found, then we return it; otherwise, that means # there was no path for this observable, so we have to try the next # one if result.path_found: return result # If we got here, then there was no path for any observable return PathResult(False, 'NO_PATHS_FOUND', max_paths, max_path_length)
python
def check_statement(self, stmt, max_paths=1, max_path_length=5): """Check a single Statement against the model. Parameters ---------- stmt : indra.statements.Statement The Statement to check. max_paths : Optional[int] The maximum number of specific paths to return for each Statement to be explained. Default: 1 max_path_length : Optional[int] The maximum length of specific paths to return. Default: 5 Returns ------- boolean True if the model satisfies the Statement. """ # Make sure the influence map is initialized self.get_im() # Check if this is one of the statement types that we can check if not isinstance(stmt, (Modification, RegulateAmount, RegulateActivity, Influence)): return PathResult(False, 'STATEMENT_TYPE_NOT_HANDLED', max_paths, max_path_length) # Get the polarity for the statement if isinstance(stmt, Modification): target_polarity = -1 if isinstance(stmt, RemoveModification) else 1 elif isinstance(stmt, RegulateActivity): target_polarity = 1 if stmt.is_activation else -1 elif isinstance(stmt, RegulateAmount): target_polarity = -1 if isinstance(stmt, DecreaseAmount) else 1 elif isinstance(stmt, Influence): target_polarity = -1 if stmt.overall_polarity() == -1 else 1 # Get the subject and object (works also for Modifications) subj, obj = stmt.agent_list() # Get a list of monomer patterns matching the subject FIXME Currently # this will match rules with the corresponding monomer pattern on it. # In future, this statement should (possibly) also match rules in which # 1) the agent is in its active form, or 2) the agent is tagged as the # enzyme in a rule of the appropriate activity (e.g., a phosphorylation # rule) FIXME if subj is not None: subj_mps = list(pa.grounded_monomer_patterns(self.model, subj, ignore_activities=True)) if not subj_mps: logger.debug('No monomers found corresponding to agent %s' % subj) return PathResult(False, 'SUBJECT_MONOMERS_NOT_FOUND', max_paths, max_path_length) else: subj_mps = [None] # Observables may not be found for an activation since there may be no # rule in the model activating the object, and the object may not have # an "active" site of the appropriate type obs_names = self.stmt_to_obs[stmt] if not obs_names: logger.debug("No observables for stmt %s, returning False" % stmt) return PathResult(False, 'OBSERVABLES_NOT_FOUND', max_paths, max_path_length) for subj_mp, obs_name in itertools.product(subj_mps, obs_names): # NOTE: Returns on the path found for the first enz_mp/obs combo result = self._find_im_paths(subj_mp, obs_name, target_polarity, max_paths, max_path_length) # If a path was found, then we return it; otherwise, that means # there was no path for this observable, so we have to try the next # one if result.path_found: return result # If we got here, then there was no path for any observable return PathResult(False, 'NO_PATHS_FOUND', max_paths, max_path_length)
[ "def", "check_statement", "(", "self", ",", "stmt", ",", "max_paths", "=", "1", ",", "max_path_length", "=", "5", ")", ":", "# Make sure the influence map is initialized", "self", ".", "get_im", "(", ")", "# Check if this is one of the statement types that we can check", "if", "not", "isinstance", "(", "stmt", ",", "(", "Modification", ",", "RegulateAmount", ",", "RegulateActivity", ",", "Influence", ")", ")", ":", "return", "PathResult", "(", "False", ",", "'STATEMENT_TYPE_NOT_HANDLED'", ",", "max_paths", ",", "max_path_length", ")", "# Get the polarity for the statement", "if", "isinstance", "(", "stmt", ",", "Modification", ")", ":", "target_polarity", "=", "-", "1", "if", "isinstance", "(", "stmt", ",", "RemoveModification", ")", "else", "1", "elif", "isinstance", "(", "stmt", ",", "RegulateActivity", ")", ":", "target_polarity", "=", "1", "if", "stmt", ".", "is_activation", "else", "-", "1", "elif", "isinstance", "(", "stmt", ",", "RegulateAmount", ")", ":", "target_polarity", "=", "-", "1", "if", "isinstance", "(", "stmt", ",", "DecreaseAmount", ")", "else", "1", "elif", "isinstance", "(", "stmt", ",", "Influence", ")", ":", "target_polarity", "=", "-", "1", "if", "stmt", ".", "overall_polarity", "(", ")", "==", "-", "1", "else", "1", "# Get the subject and object (works also for Modifications)", "subj", ",", "obj", "=", "stmt", ".", "agent_list", "(", ")", "# Get a list of monomer patterns matching the subject FIXME Currently", "# this will match rules with the corresponding monomer pattern on it.", "# In future, this statement should (possibly) also match rules in which", "# 1) the agent is in its active form, or 2) the agent is tagged as the", "# enzyme in a rule of the appropriate activity (e.g., a phosphorylation", "# rule) FIXME", "if", "subj", "is", "not", "None", ":", "subj_mps", "=", "list", "(", "pa", ".", "grounded_monomer_patterns", "(", "self", ".", "model", ",", "subj", ",", "ignore_activities", "=", "True", ")", ")", "if", "not", "subj_mps", ":", "logger", ".", "debug", "(", "'No monomers found corresponding to agent %s'", "%", "subj", ")", "return", "PathResult", "(", "False", ",", "'SUBJECT_MONOMERS_NOT_FOUND'", ",", "max_paths", ",", "max_path_length", ")", "else", ":", "subj_mps", "=", "[", "None", "]", "# Observables may not be found for an activation since there may be no", "# rule in the model activating the object, and the object may not have", "# an \"active\" site of the appropriate type", "obs_names", "=", "self", ".", "stmt_to_obs", "[", "stmt", "]", "if", "not", "obs_names", ":", "logger", ".", "debug", "(", "\"No observables for stmt %s, returning False\"", "%", "stmt", ")", "return", "PathResult", "(", "False", ",", "'OBSERVABLES_NOT_FOUND'", ",", "max_paths", ",", "max_path_length", ")", "for", "subj_mp", ",", "obs_name", "in", "itertools", ".", "product", "(", "subj_mps", ",", "obs_names", ")", ":", "# NOTE: Returns on the path found for the first enz_mp/obs combo", "result", "=", "self", ".", "_find_im_paths", "(", "subj_mp", ",", "obs_name", ",", "target_polarity", ",", "max_paths", ",", "max_path_length", ")", "# If a path was found, then we return it; otherwise, that means", "# there was no path for this observable, so we have to try the next", "# one", "if", "result", ".", "path_found", ":", "return", "result", "# If we got here, then there was no path for any observable", "return", "PathResult", "(", "False", ",", "'NO_PATHS_FOUND'", ",", "max_paths", ",", "max_path_length", ")" ]
Check a single Statement against the model. Parameters ---------- stmt : indra.statements.Statement The Statement to check. max_paths : Optional[int] The maximum number of specific paths to return for each Statement to be explained. Default: 1 max_path_length : Optional[int] The maximum length of specific paths to return. Default: 5 Returns ------- boolean True if the model satisfies the Statement.
[ "Check", "a", "single", "Statement", "against", "the", "model", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/explanation/model_checker.py#L352-L423
train
sorgerlab/indra
indra/explanation/model_checker.py
ModelChecker.score_paths
def score_paths(self, paths, agents_values, loss_of_function=False, sigma=0.15, include_final_node=False): """Return scores associated with a given set of paths. Parameters ---------- paths : list[list[tuple[str, int]]] A list of paths obtained from path finding. Each path is a list of tuples (which are edges in the path), with the first element of the tuple the name of a rule, and the second element its polarity in the path. agents_values : dict[indra.statements.Agent, float] A dictionary of INDRA Agents and their corresponding measured value in a given experimental condition. loss_of_function : Optional[boolean] If True, flip the polarity of the path. For instance, if the effect of an inhibitory drug is explained, set this to True. Default: False sigma : Optional[float] The estimated standard deviation for the normally distributed measurement error in the observation model used to score paths with respect to data. Default: 0.15 include_final_node : Optional[boolean] Determines whether the final node of the path is included in the score. Default: False """ obs_model = lambda x: scipy.stats.norm(x, sigma) # Build up dict mapping observables to values obs_dict = {} for ag, val in agents_values.items(): obs_list = self.agent_to_obs[ag] if obs_list is not None: for obs in obs_list: obs_dict[obs] = val # For every path... path_scores = [] for path in paths: logger.info('------') logger.info("Scoring path:") logger.info(path) # Look at every node in the path, excluding the final # observable... path_score = 0 last_path_node_index = -1 if include_final_node else -2 for node, sign in path[:last_path_node_index]: # ...and for each node check the sign to see if it matches the # data. So the first thing is to look at what's downstream # of the rule # affected_obs is a list of observable names alogn for affected_obs, rule_obs_sign in self.rule_obs_dict[node]: flip_polarity = -1 if loss_of_function else 1 pred_sign = sign * rule_obs_sign * flip_polarity # Check to see if this observable is in the data logger.info('%s %s: effect %s %s' % (node, sign, affected_obs, pred_sign)) measured_val = obs_dict.get(affected_obs) if measured_val: # For negative predictions use CDF (prob that given # measured value, true value lies below 0) if pred_sign <= 0: prob_correct = obs_model(measured_val).logcdf(0) # For positive predictions, use log survival function # (SF = 1 - CDF, i.e., prob that true value is # above 0) else: prob_correct = obs_model(measured_val).logsf(0) logger.info('Actual: %s, Log Probability: %s' % (measured_val, prob_correct)) path_score += prob_correct if not self.rule_obs_dict[node]: logger.info('%s %s' % (node, sign)) prob_correct = obs_model(0).logcdf(0) logger.info('Unmeasured node, Log Probability: %s' % (prob_correct)) path_score += prob_correct # Normalized path #path_score = path_score / len(path) logger.info("Path score: %s" % path_score) path_scores.append(path_score) path_tuples = list(zip(paths, path_scores)) # Sort first by path length sorted_by_length = sorted(path_tuples, key=lambda x: len(x[0])) # Sort by probability; sort in reverse order to large values # (higher probabilities) are ranked higher scored_paths = sorted(sorted_by_length, key=lambda x: x[1], reverse=True) return scored_paths
python
def score_paths(self, paths, agents_values, loss_of_function=False, sigma=0.15, include_final_node=False): """Return scores associated with a given set of paths. Parameters ---------- paths : list[list[tuple[str, int]]] A list of paths obtained from path finding. Each path is a list of tuples (which are edges in the path), with the first element of the tuple the name of a rule, and the second element its polarity in the path. agents_values : dict[indra.statements.Agent, float] A dictionary of INDRA Agents and their corresponding measured value in a given experimental condition. loss_of_function : Optional[boolean] If True, flip the polarity of the path. For instance, if the effect of an inhibitory drug is explained, set this to True. Default: False sigma : Optional[float] The estimated standard deviation for the normally distributed measurement error in the observation model used to score paths with respect to data. Default: 0.15 include_final_node : Optional[boolean] Determines whether the final node of the path is included in the score. Default: False """ obs_model = lambda x: scipy.stats.norm(x, sigma) # Build up dict mapping observables to values obs_dict = {} for ag, val in agents_values.items(): obs_list = self.agent_to_obs[ag] if obs_list is not None: for obs in obs_list: obs_dict[obs] = val # For every path... path_scores = [] for path in paths: logger.info('------') logger.info("Scoring path:") logger.info(path) # Look at every node in the path, excluding the final # observable... path_score = 0 last_path_node_index = -1 if include_final_node else -2 for node, sign in path[:last_path_node_index]: # ...and for each node check the sign to see if it matches the # data. So the first thing is to look at what's downstream # of the rule # affected_obs is a list of observable names alogn for affected_obs, rule_obs_sign in self.rule_obs_dict[node]: flip_polarity = -1 if loss_of_function else 1 pred_sign = sign * rule_obs_sign * flip_polarity # Check to see if this observable is in the data logger.info('%s %s: effect %s %s' % (node, sign, affected_obs, pred_sign)) measured_val = obs_dict.get(affected_obs) if measured_val: # For negative predictions use CDF (prob that given # measured value, true value lies below 0) if pred_sign <= 0: prob_correct = obs_model(measured_val).logcdf(0) # For positive predictions, use log survival function # (SF = 1 - CDF, i.e., prob that true value is # above 0) else: prob_correct = obs_model(measured_val).logsf(0) logger.info('Actual: %s, Log Probability: %s' % (measured_val, prob_correct)) path_score += prob_correct if not self.rule_obs_dict[node]: logger.info('%s %s' % (node, sign)) prob_correct = obs_model(0).logcdf(0) logger.info('Unmeasured node, Log Probability: %s' % (prob_correct)) path_score += prob_correct # Normalized path #path_score = path_score / len(path) logger.info("Path score: %s" % path_score) path_scores.append(path_score) path_tuples = list(zip(paths, path_scores)) # Sort first by path length sorted_by_length = sorted(path_tuples, key=lambda x: len(x[0])) # Sort by probability; sort in reverse order to large values # (higher probabilities) are ranked higher scored_paths = sorted(sorted_by_length, key=lambda x: x[1], reverse=True) return scored_paths
[ "def", "score_paths", "(", "self", ",", "paths", ",", "agents_values", ",", "loss_of_function", "=", "False", ",", "sigma", "=", "0.15", ",", "include_final_node", "=", "False", ")", ":", "obs_model", "=", "lambda", "x", ":", "scipy", ".", "stats", ".", "norm", "(", "x", ",", "sigma", ")", "# Build up dict mapping observables to values", "obs_dict", "=", "{", "}", "for", "ag", ",", "val", "in", "agents_values", ".", "items", "(", ")", ":", "obs_list", "=", "self", ".", "agent_to_obs", "[", "ag", "]", "if", "obs_list", "is", "not", "None", ":", "for", "obs", "in", "obs_list", ":", "obs_dict", "[", "obs", "]", "=", "val", "# For every path...", "path_scores", "=", "[", "]", "for", "path", "in", "paths", ":", "logger", ".", "info", "(", "'------'", ")", "logger", ".", "info", "(", "\"Scoring path:\"", ")", "logger", ".", "info", "(", "path", ")", "# Look at every node in the path, excluding the final", "# observable...", "path_score", "=", "0", "last_path_node_index", "=", "-", "1", "if", "include_final_node", "else", "-", "2", "for", "node", ",", "sign", "in", "path", "[", ":", "last_path_node_index", "]", ":", "# ...and for each node check the sign to see if it matches the", "# data. So the first thing is to look at what's downstream", "# of the rule", "# affected_obs is a list of observable names alogn", "for", "affected_obs", ",", "rule_obs_sign", "in", "self", ".", "rule_obs_dict", "[", "node", "]", ":", "flip_polarity", "=", "-", "1", "if", "loss_of_function", "else", "1", "pred_sign", "=", "sign", "*", "rule_obs_sign", "*", "flip_polarity", "# Check to see if this observable is in the data", "logger", ".", "info", "(", "'%s %s: effect %s %s'", "%", "(", "node", ",", "sign", ",", "affected_obs", ",", "pred_sign", ")", ")", "measured_val", "=", "obs_dict", ".", "get", "(", "affected_obs", ")", "if", "measured_val", ":", "# For negative predictions use CDF (prob that given", "# measured value, true value lies below 0)", "if", "pred_sign", "<=", "0", ":", "prob_correct", "=", "obs_model", "(", "measured_val", ")", ".", "logcdf", "(", "0", ")", "# For positive predictions, use log survival function", "# (SF = 1 - CDF, i.e., prob that true value is", "# above 0)", "else", ":", "prob_correct", "=", "obs_model", "(", "measured_val", ")", ".", "logsf", "(", "0", ")", "logger", ".", "info", "(", "'Actual: %s, Log Probability: %s'", "%", "(", "measured_val", ",", "prob_correct", ")", ")", "path_score", "+=", "prob_correct", "if", "not", "self", ".", "rule_obs_dict", "[", "node", "]", ":", "logger", ".", "info", "(", "'%s %s'", "%", "(", "node", ",", "sign", ")", ")", "prob_correct", "=", "obs_model", "(", "0", ")", ".", "logcdf", "(", "0", ")", "logger", ".", "info", "(", "'Unmeasured node, Log Probability: %s'", "%", "(", "prob_correct", ")", ")", "path_score", "+=", "prob_correct", "# Normalized path", "#path_score = path_score / len(path)", "logger", ".", "info", "(", "\"Path score: %s\"", "%", "path_score", ")", "path_scores", ".", "append", "(", "path_score", ")", "path_tuples", "=", "list", "(", "zip", "(", "paths", ",", "path_scores", ")", ")", "# Sort first by path length", "sorted_by_length", "=", "sorted", "(", "path_tuples", ",", "key", "=", "lambda", "x", ":", "len", "(", "x", "[", "0", "]", ")", ")", "# Sort by probability; sort in reverse order to large values", "# (higher probabilities) are ranked higher", "scored_paths", "=", "sorted", "(", "sorted_by_length", ",", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ",", "reverse", "=", "True", ")", "return", "scored_paths" ]
Return scores associated with a given set of paths. Parameters ---------- paths : list[list[tuple[str, int]]] A list of paths obtained from path finding. Each path is a list of tuples (which are edges in the path), with the first element of the tuple the name of a rule, and the second element its polarity in the path. agents_values : dict[indra.statements.Agent, float] A dictionary of INDRA Agents and their corresponding measured value in a given experimental condition. loss_of_function : Optional[boolean] If True, flip the polarity of the path. For instance, if the effect of an inhibitory drug is explained, set this to True. Default: False sigma : Optional[float] The estimated standard deviation for the normally distributed measurement error in the observation model used to score paths with respect to data. Default: 0.15 include_final_node : Optional[boolean] Determines whether the final node of the path is included in the score. Default: False
[ "Return", "scores", "associated", "with", "a", "given", "set", "of", "paths", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/explanation/model_checker.py#L642-L728
train
sorgerlab/indra
indra/explanation/model_checker.py
ModelChecker.prune_influence_map
def prune_influence_map(self): """Remove edges between rules causing problematic non-transitivity. First, all self-loops are removed. After this initial step, edges are removed between rules when they share *all* child nodes except for each other; that is, they have a mutual relationship with each other and share all of the same children. Note that edges must be removed in batch at the end to prevent edge removal from affecting the lists of rule children during the comparison process. """ im = self.get_im() # First, remove all self-loops logger.info('Removing self loops') edges_to_remove = [] for e in im.edges(): if e[0] == e[1]: logger.info('Removing self loop: %s', e) edges_to_remove.append((e[0], e[1])) # Now remove all the edges to be removed with a single call im.remove_edges_from(edges_to_remove) # Remove parameter nodes from influence map remove_im_params(self.model, im) # Now compare nodes pairwise and look for overlap between child nodes logger.info('Get successorts of each node') succ_dict = {} for node in im.nodes(): succ_dict[node] = set(im.successors(node)) # Sort and then group nodes by number of successors logger.info('Compare combinations of successors') group_key_fun = lambda x: len(succ_dict[x]) nodes_sorted = sorted(im.nodes(), key=group_key_fun) groups = itertools.groupby(nodes_sorted, key=group_key_fun) # Now iterate over each group and then construct combinations # within the group to check for shared sucessors edges_to_remove = [] for gix, group in groups: combos = itertools.combinations(group, 2) for ix, (p1, p2) in enumerate(combos): # Children are identical except for mutual relationship if succ_dict[p1].difference(succ_dict[p2]) == set([p2]) and \ succ_dict[p2].difference(succ_dict[p1]) == set([p1]): for u, v in ((p1, p2), (p2, p1)): edges_to_remove.append((u, v)) logger.debug('Will remove edge (%s, %s)', u, v) logger.info('Removing %d edges from influence map' % len(edges_to_remove)) # Now remove all the edges to be removed with a single call im.remove_edges_from(edges_to_remove)
python
def prune_influence_map(self): """Remove edges between rules causing problematic non-transitivity. First, all self-loops are removed. After this initial step, edges are removed between rules when they share *all* child nodes except for each other; that is, they have a mutual relationship with each other and share all of the same children. Note that edges must be removed in batch at the end to prevent edge removal from affecting the lists of rule children during the comparison process. """ im = self.get_im() # First, remove all self-loops logger.info('Removing self loops') edges_to_remove = [] for e in im.edges(): if e[0] == e[1]: logger.info('Removing self loop: %s', e) edges_to_remove.append((e[0], e[1])) # Now remove all the edges to be removed with a single call im.remove_edges_from(edges_to_remove) # Remove parameter nodes from influence map remove_im_params(self.model, im) # Now compare nodes pairwise and look for overlap between child nodes logger.info('Get successorts of each node') succ_dict = {} for node in im.nodes(): succ_dict[node] = set(im.successors(node)) # Sort and then group nodes by number of successors logger.info('Compare combinations of successors') group_key_fun = lambda x: len(succ_dict[x]) nodes_sorted = sorted(im.nodes(), key=group_key_fun) groups = itertools.groupby(nodes_sorted, key=group_key_fun) # Now iterate over each group and then construct combinations # within the group to check for shared sucessors edges_to_remove = [] for gix, group in groups: combos = itertools.combinations(group, 2) for ix, (p1, p2) in enumerate(combos): # Children are identical except for mutual relationship if succ_dict[p1].difference(succ_dict[p2]) == set([p2]) and \ succ_dict[p2].difference(succ_dict[p1]) == set([p1]): for u, v in ((p1, p2), (p2, p1)): edges_to_remove.append((u, v)) logger.debug('Will remove edge (%s, %s)', u, v) logger.info('Removing %d edges from influence map' % len(edges_to_remove)) # Now remove all the edges to be removed with a single call im.remove_edges_from(edges_to_remove)
[ "def", "prune_influence_map", "(", "self", ")", ":", "im", "=", "self", ".", "get_im", "(", ")", "# First, remove all self-loops", "logger", ".", "info", "(", "'Removing self loops'", ")", "edges_to_remove", "=", "[", "]", "for", "e", "in", "im", ".", "edges", "(", ")", ":", "if", "e", "[", "0", "]", "==", "e", "[", "1", "]", ":", "logger", ".", "info", "(", "'Removing self loop: %s'", ",", "e", ")", "edges_to_remove", ".", "append", "(", "(", "e", "[", "0", "]", ",", "e", "[", "1", "]", ")", ")", "# Now remove all the edges to be removed with a single call", "im", ".", "remove_edges_from", "(", "edges_to_remove", ")", "# Remove parameter nodes from influence map", "remove_im_params", "(", "self", ".", "model", ",", "im", ")", "# Now compare nodes pairwise and look for overlap between child nodes", "logger", ".", "info", "(", "'Get successorts of each node'", ")", "succ_dict", "=", "{", "}", "for", "node", "in", "im", ".", "nodes", "(", ")", ":", "succ_dict", "[", "node", "]", "=", "set", "(", "im", ".", "successors", "(", "node", ")", ")", "# Sort and then group nodes by number of successors", "logger", ".", "info", "(", "'Compare combinations of successors'", ")", "group_key_fun", "=", "lambda", "x", ":", "len", "(", "succ_dict", "[", "x", "]", ")", "nodes_sorted", "=", "sorted", "(", "im", ".", "nodes", "(", ")", ",", "key", "=", "group_key_fun", ")", "groups", "=", "itertools", ".", "groupby", "(", "nodes_sorted", ",", "key", "=", "group_key_fun", ")", "# Now iterate over each group and then construct combinations", "# within the group to check for shared sucessors", "edges_to_remove", "=", "[", "]", "for", "gix", ",", "group", "in", "groups", ":", "combos", "=", "itertools", ".", "combinations", "(", "group", ",", "2", ")", "for", "ix", ",", "(", "p1", ",", "p2", ")", "in", "enumerate", "(", "combos", ")", ":", "# Children are identical except for mutual relationship", "if", "succ_dict", "[", "p1", "]", ".", "difference", "(", "succ_dict", "[", "p2", "]", ")", "==", "set", "(", "[", "p2", "]", ")", "and", "succ_dict", "[", "p2", "]", ".", "difference", "(", "succ_dict", "[", "p1", "]", ")", "==", "set", "(", "[", "p1", "]", ")", ":", "for", "u", ",", "v", "in", "(", "(", "p1", ",", "p2", ")", ",", "(", "p2", ",", "p1", ")", ")", ":", "edges_to_remove", ".", "append", "(", "(", "u", ",", "v", ")", ")", "logger", ".", "debug", "(", "'Will remove edge (%s, %s)'", ",", "u", ",", "v", ")", "logger", ".", "info", "(", "'Removing %d edges from influence map'", "%", "len", "(", "edges_to_remove", ")", ")", "# Now remove all the edges to be removed with a single call", "im", ".", "remove_edges_from", "(", "edges_to_remove", ")" ]
Remove edges between rules causing problematic non-transitivity. First, all self-loops are removed. After this initial step, edges are removed between rules when they share *all* child nodes except for each other; that is, they have a mutual relationship with each other and share all of the same children. Note that edges must be removed in batch at the end to prevent edge removal from affecting the lists of rule children during the comparison process.
[ "Remove", "edges", "between", "rules", "causing", "problematic", "non", "-", "transitivity", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/explanation/model_checker.py#L730-L782
train
sorgerlab/indra
indra/explanation/model_checker.py
ModelChecker.prune_influence_map_subj_obj
def prune_influence_map_subj_obj(self): """Prune influence map to include only edges where the object of the upstream rule matches the subject of the downstream rule.""" def get_rule_info(r): result = {} for ann in self.model.annotations: if ann.subject == r: if ann.predicate == 'rule_has_subject': result['subject'] = ann.object elif ann.predicate == 'rule_has_object': result['object'] = ann.object return result im = self.get_im() rules = im.nodes() edges_to_prune = [] for r1, r2 in itertools.permutations(rules, 2): if (r1, r2) not in im.edges(): continue r1_info = get_rule_info(r1) r2_info = get_rule_info(r2) if 'object' not in r1_info or 'subject' not in r2_info: continue if r1_info['object'] != r2_info['subject']: logger.info("Removing edge %s --> %s" % (r1, r2)) edges_to_prune.append((r1, r2)) im.remove_edges_from(edges_to_prune)
python
def prune_influence_map_subj_obj(self): """Prune influence map to include only edges where the object of the upstream rule matches the subject of the downstream rule.""" def get_rule_info(r): result = {} for ann in self.model.annotations: if ann.subject == r: if ann.predicate == 'rule_has_subject': result['subject'] = ann.object elif ann.predicate == 'rule_has_object': result['object'] = ann.object return result im = self.get_im() rules = im.nodes() edges_to_prune = [] for r1, r2 in itertools.permutations(rules, 2): if (r1, r2) not in im.edges(): continue r1_info = get_rule_info(r1) r2_info = get_rule_info(r2) if 'object' not in r1_info or 'subject' not in r2_info: continue if r1_info['object'] != r2_info['subject']: logger.info("Removing edge %s --> %s" % (r1, r2)) edges_to_prune.append((r1, r2)) im.remove_edges_from(edges_to_prune)
[ "def", "prune_influence_map_subj_obj", "(", "self", ")", ":", "def", "get_rule_info", "(", "r", ")", ":", "result", "=", "{", "}", "for", "ann", "in", "self", ".", "model", ".", "annotations", ":", "if", "ann", ".", "subject", "==", "r", ":", "if", "ann", ".", "predicate", "==", "'rule_has_subject'", ":", "result", "[", "'subject'", "]", "=", "ann", ".", "object", "elif", "ann", ".", "predicate", "==", "'rule_has_object'", ":", "result", "[", "'object'", "]", "=", "ann", ".", "object", "return", "result", "im", "=", "self", ".", "get_im", "(", ")", "rules", "=", "im", ".", "nodes", "(", ")", "edges_to_prune", "=", "[", "]", "for", "r1", ",", "r2", "in", "itertools", ".", "permutations", "(", "rules", ",", "2", ")", ":", "if", "(", "r1", ",", "r2", ")", "not", "in", "im", ".", "edges", "(", ")", ":", "continue", "r1_info", "=", "get_rule_info", "(", "r1", ")", "r2_info", "=", "get_rule_info", "(", "r2", ")", "if", "'object'", "not", "in", "r1_info", "or", "'subject'", "not", "in", "r2_info", ":", "continue", "if", "r1_info", "[", "'object'", "]", "!=", "r2_info", "[", "'subject'", "]", ":", "logger", ".", "info", "(", "\"Removing edge %s --> %s\"", "%", "(", "r1", ",", "r2", ")", ")", "edges_to_prune", ".", "append", "(", "(", "r1", ",", "r2", ")", ")", "im", ".", "remove_edges_from", "(", "edges_to_prune", ")" ]
Prune influence map to include only edges where the object of the upstream rule matches the subject of the downstream rule.
[ "Prune", "influence", "map", "to", "include", "only", "edges", "where", "the", "object", "of", "the", "upstream", "rule", "matches", "the", "subject", "of", "the", "downstream", "rule", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/explanation/model_checker.py#L784-L809
train
sorgerlab/indra
indra/tools/reading/util/reporter.py
Reporter.add_section
def add_section(self, section_name): """Create a section of the report, to be headed by section_name Text and images can be added by using the `section` argument of the `add_text` and `add_image` methods. Sections can also be ordered by using the `set_section_order` method. By default, text and images that have no section will be placed after all the sections, in the order they were added. This behavior may be altered using the `sections_first` attribute of the `make_report` method. """ self.section_headings.append(section_name) if section_name in self.sections: raise ValueError("Section %s already exists." % section_name) self.sections[section_name] = [] return
python
def add_section(self, section_name): """Create a section of the report, to be headed by section_name Text and images can be added by using the `section` argument of the `add_text` and `add_image` methods. Sections can also be ordered by using the `set_section_order` method. By default, text and images that have no section will be placed after all the sections, in the order they were added. This behavior may be altered using the `sections_first` attribute of the `make_report` method. """ self.section_headings.append(section_name) if section_name in self.sections: raise ValueError("Section %s already exists." % section_name) self.sections[section_name] = [] return
[ "def", "add_section", "(", "self", ",", "section_name", ")", ":", "self", ".", "section_headings", ".", "append", "(", "section_name", ")", "if", "section_name", "in", "self", ".", "sections", ":", "raise", "ValueError", "(", "\"Section %s already exists.\"", "%", "section_name", ")", "self", ".", "sections", "[", "section_name", "]", "=", "[", "]", "return" ]
Create a section of the report, to be headed by section_name Text and images can be added by using the `section` argument of the `add_text` and `add_image` methods. Sections can also be ordered by using the `set_section_order` method. By default, text and images that have no section will be placed after all the sections, in the order they were added. This behavior may be altered using the `sections_first` attribute of the `make_report` method.
[ "Create", "a", "section", "of", "the", "report", "to", "be", "headed", "by", "section_name" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/util/reporter.py#L31-L47
train
sorgerlab/indra
indra/tools/reading/util/reporter.py
Reporter.set_section_order
def set_section_order(self, section_name_list): """Set the order of the sections, which are by default unorderd. Any unlisted sections that exist will be placed at the end of the document in no particular order. """ self.section_headings = section_name_list[:] for section_name in self.sections.keys(): if section_name not in section_name_list: self.section_headings.append(section_name) return
python
def set_section_order(self, section_name_list): """Set the order of the sections, which are by default unorderd. Any unlisted sections that exist will be placed at the end of the document in no particular order. """ self.section_headings = section_name_list[:] for section_name in self.sections.keys(): if section_name not in section_name_list: self.section_headings.append(section_name) return
[ "def", "set_section_order", "(", "self", ",", "section_name_list", ")", ":", "self", ".", "section_headings", "=", "section_name_list", "[", ":", "]", "for", "section_name", "in", "self", ".", "sections", ".", "keys", "(", ")", ":", "if", "section_name", "not", "in", "section_name_list", ":", "self", ".", "section_headings", ".", "append", "(", "section_name", ")", "return" ]
Set the order of the sections, which are by default unorderd. Any unlisted sections that exist will be placed at the end of the document in no particular order.
[ "Set", "the", "order", "of", "the", "sections", "which", "are", "by", "default", "unorderd", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/util/reporter.py#L49-L59
train
sorgerlab/indra
indra/tools/reading/util/reporter.py
Reporter.add_text
def add_text(self, text, *args, **kwargs): """Add text to the document. Text is shown on the final document in the order it is added, either within the given section or as part of the un-sectioned list of content. Parameters ---------- text : str The text to be added. style : str Choose the style of the text. Options include 'Normal', 'Code', 'Title', 'h1'. For others, see `getSampleStyleSheet` from `reportlab.lib.styles`. space : tuple (num spaces, font size) The number and size of spaces to follow this section of text. Default is (1, 12). fontsize : int The integer font size of the text (e.g. 12 for 12 point font). Default is 12. alignment : str The alignment of the text. Options include 'left', 'right', and 'center'. Default is 'left'. section : str (This must be a keyword) Select a section in which to place this text. Default is None, in which case the text will be simply be added to a default list of text and images. """ # Pull down some kwargs. section_name = kwargs.pop('section', None) # Actually do the formatting. para, sp = self._preformat_text(text, *args, **kwargs) # Select the appropriate list to update if section_name is None: relevant_list = self.story else: relevant_list = self.sections[section_name] # Add the new content to list. relevant_list.append(para) relevant_list.append(sp) return
python
def add_text(self, text, *args, **kwargs): """Add text to the document. Text is shown on the final document in the order it is added, either within the given section or as part of the un-sectioned list of content. Parameters ---------- text : str The text to be added. style : str Choose the style of the text. Options include 'Normal', 'Code', 'Title', 'h1'. For others, see `getSampleStyleSheet` from `reportlab.lib.styles`. space : tuple (num spaces, font size) The number and size of spaces to follow this section of text. Default is (1, 12). fontsize : int The integer font size of the text (e.g. 12 for 12 point font). Default is 12. alignment : str The alignment of the text. Options include 'left', 'right', and 'center'. Default is 'left'. section : str (This must be a keyword) Select a section in which to place this text. Default is None, in which case the text will be simply be added to a default list of text and images. """ # Pull down some kwargs. section_name = kwargs.pop('section', None) # Actually do the formatting. para, sp = self._preformat_text(text, *args, **kwargs) # Select the appropriate list to update if section_name is None: relevant_list = self.story else: relevant_list = self.sections[section_name] # Add the new content to list. relevant_list.append(para) relevant_list.append(sp) return
[ "def", "add_text", "(", "self", ",", "text", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Pull down some kwargs.", "section_name", "=", "kwargs", ".", "pop", "(", "'section'", ",", "None", ")", "# Actually do the formatting.", "para", ",", "sp", "=", "self", ".", "_preformat_text", "(", "text", ",", "*", "args", ",", "*", "*", "kwargs", ")", "# Select the appropriate list to update", "if", "section_name", "is", "None", ":", "relevant_list", "=", "self", ".", "story", "else", ":", "relevant_list", "=", "self", ".", "sections", "[", "section_name", "]", "# Add the new content to list.", "relevant_list", ".", "append", "(", "para", ")", "relevant_list", ".", "append", "(", "sp", ")", "return" ]
Add text to the document. Text is shown on the final document in the order it is added, either within the given section or as part of the un-sectioned list of content. Parameters ---------- text : str The text to be added. style : str Choose the style of the text. Options include 'Normal', 'Code', 'Title', 'h1'. For others, see `getSampleStyleSheet` from `reportlab.lib.styles`. space : tuple (num spaces, font size) The number and size of spaces to follow this section of text. Default is (1, 12). fontsize : int The integer font size of the text (e.g. 12 for 12 point font). Default is 12. alignment : str The alignment of the text. Options include 'left', 'right', and 'center'. Default is 'left'. section : str (This must be a keyword) Select a section in which to place this text. Default is None, in which case the text will be simply be added to a default list of text and images.
[ "Add", "text", "to", "the", "document", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/util/reporter.py#L61-L104
train
sorgerlab/indra
indra/tools/reading/util/reporter.py
Reporter.add_image
def add_image(self, image_path, width=None, height=None, section=None): """Add an image to the document. Images are shown on the final document in the order they are added, either within the given section or as part of the un-sectioned list of content. Parameters ---------- image_path : str A path to the image on the local file system. width : int or float The width of the image in the document in inches. height : int or float The height of the image in the document in incehs. section : str (This must be a keyword) Select a section in which to place this image. Default is None, in which case the image will be simply be added to a default list of text and images. """ if width is not None: width = width*inch if height is not None: height = height*inch im = Image(image_path, width, height) if section is None: self.story.append(im) else: self.sections[section].append(im) return
python
def add_image(self, image_path, width=None, height=None, section=None): """Add an image to the document. Images are shown on the final document in the order they are added, either within the given section or as part of the un-sectioned list of content. Parameters ---------- image_path : str A path to the image on the local file system. width : int or float The width of the image in the document in inches. height : int or float The height of the image in the document in incehs. section : str (This must be a keyword) Select a section in which to place this image. Default is None, in which case the image will be simply be added to a default list of text and images. """ if width is not None: width = width*inch if height is not None: height = height*inch im = Image(image_path, width, height) if section is None: self.story.append(im) else: self.sections[section].append(im) return
[ "def", "add_image", "(", "self", ",", "image_path", ",", "width", "=", "None", ",", "height", "=", "None", ",", "section", "=", "None", ")", ":", "if", "width", "is", "not", "None", ":", "width", "=", "width", "*", "inch", "if", "height", "is", "not", "None", ":", "height", "=", "height", "*", "inch", "im", "=", "Image", "(", "image_path", ",", "width", ",", "height", ")", "if", "section", "is", "None", ":", "self", ".", "story", ".", "append", "(", "im", ")", "else", ":", "self", ".", "sections", "[", "section", "]", ".", "append", "(", "im", ")", "return" ]
Add an image to the document. Images are shown on the final document in the order they are added, either within the given section or as part of the un-sectioned list of content. Parameters ---------- image_path : str A path to the image on the local file system. width : int or float The width of the image in the document in inches. height : int or float The height of the image in the document in incehs. section : str (This must be a keyword) Select a section in which to place this image. Default is None, in which case the image will be simply be added to a default list of text and images.
[ "Add", "an", "image", "to", "the", "document", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/util/reporter.py#L106-L135
train
sorgerlab/indra
indra/tools/reading/util/reporter.py
Reporter.make_report
def make_report(self, sections_first=True, section_header_params=None): """Create the pdf document with name `self.name + '.pdf'`. Parameters ---------- sections_first : bool If True (default), text and images with sections are presented first and un-sectioned content is appended afterword. If False, sectioned text and images will be placed before the sections. section_header_params : dict or None Optionally overwrite/extend the default formatting for the section headers. Default is None. """ full_story = list(self._preformat_text(self.title, style='Title', fontsize=18, alignment='center')) # Set the default section header parameters if section_header_params is None: section_header_params = {'style': 'h1', 'fontsize': 14, 'alignment': 'center'} # Merge the sections and the rest of the story. if sections_first: full_story += self._make_sections(**section_header_params) full_story += self.story else: full_story += self.story full_story += self._make_sections(**section_header_params) fname = self.name + '.pdf' doc = SimpleDocTemplate(fname, pagesize=letter, rightMargin=72, leftMargin=72, topMargin=72, bottomMargin=18) doc.build(full_story) return fname
python
def make_report(self, sections_first=True, section_header_params=None): """Create the pdf document with name `self.name + '.pdf'`. Parameters ---------- sections_first : bool If True (default), text and images with sections are presented first and un-sectioned content is appended afterword. If False, sectioned text and images will be placed before the sections. section_header_params : dict or None Optionally overwrite/extend the default formatting for the section headers. Default is None. """ full_story = list(self._preformat_text(self.title, style='Title', fontsize=18, alignment='center')) # Set the default section header parameters if section_header_params is None: section_header_params = {'style': 'h1', 'fontsize': 14, 'alignment': 'center'} # Merge the sections and the rest of the story. if sections_first: full_story += self._make_sections(**section_header_params) full_story += self.story else: full_story += self.story full_story += self._make_sections(**section_header_params) fname = self.name + '.pdf' doc = SimpleDocTemplate(fname, pagesize=letter, rightMargin=72, leftMargin=72, topMargin=72, bottomMargin=18) doc.build(full_story) return fname
[ "def", "make_report", "(", "self", ",", "sections_first", "=", "True", ",", "section_header_params", "=", "None", ")", ":", "full_story", "=", "list", "(", "self", ".", "_preformat_text", "(", "self", ".", "title", ",", "style", "=", "'Title'", ",", "fontsize", "=", "18", ",", "alignment", "=", "'center'", ")", ")", "# Set the default section header parameters", "if", "section_header_params", "is", "None", ":", "section_header_params", "=", "{", "'style'", ":", "'h1'", ",", "'fontsize'", ":", "14", ",", "'alignment'", ":", "'center'", "}", "# Merge the sections and the rest of the story.", "if", "sections_first", ":", "full_story", "+=", "self", ".", "_make_sections", "(", "*", "*", "section_header_params", ")", "full_story", "+=", "self", ".", "story", "else", ":", "full_story", "+=", "self", ".", "story", "full_story", "+=", "self", ".", "_make_sections", "(", "*", "*", "section_header_params", ")", "fname", "=", "self", ".", "name", "+", "'.pdf'", "doc", "=", "SimpleDocTemplate", "(", "fname", ",", "pagesize", "=", "letter", ",", "rightMargin", "=", "72", ",", "leftMargin", "=", "72", ",", "topMargin", "=", "72", ",", "bottomMargin", "=", "18", ")", "doc", ".", "build", "(", "full_story", ")", "return", "fname" ]
Create the pdf document with name `self.name + '.pdf'`. Parameters ---------- sections_first : bool If True (default), text and images with sections are presented first and un-sectioned content is appended afterword. If False, sectioned text and images will be placed before the sections. section_header_params : dict or None Optionally overwrite/extend the default formatting for the section headers. Default is None.
[ "Create", "the", "pdf", "document", "with", "name", "self", ".", "name", "+", ".", "pdf", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/util/reporter.py#L137-L171
train
sorgerlab/indra
indra/tools/reading/util/reporter.py
Reporter._make_sections
def _make_sections(self, **section_hdr_params): """Flatten the sections into a single story list.""" sect_story = [] if not self.section_headings and len(self.sections): self.section_headings = self.sections.keys() for section_name in self.section_headings: section_story = self.sections[section_name] line = '-'*20 section_head_text = '%s %s %s' % (line, section_name, line) title, title_sp = self._preformat_text(section_head_text, **section_hdr_params) sect_story += [title, title_sp] + section_story return sect_story
python
def _make_sections(self, **section_hdr_params): """Flatten the sections into a single story list.""" sect_story = [] if not self.section_headings and len(self.sections): self.section_headings = self.sections.keys() for section_name in self.section_headings: section_story = self.sections[section_name] line = '-'*20 section_head_text = '%s %s %s' % (line, section_name, line) title, title_sp = self._preformat_text(section_head_text, **section_hdr_params) sect_story += [title, title_sp] + section_story return sect_story
[ "def", "_make_sections", "(", "self", ",", "*", "*", "section_hdr_params", ")", ":", "sect_story", "=", "[", "]", "if", "not", "self", ".", "section_headings", "and", "len", "(", "self", ".", "sections", ")", ":", "self", ".", "section_headings", "=", "self", ".", "sections", ".", "keys", "(", ")", "for", "section_name", "in", "self", ".", "section_headings", ":", "section_story", "=", "self", ".", "sections", "[", "section_name", "]", "line", "=", "'-'", "*", "20", "section_head_text", "=", "'%s %s %s'", "%", "(", "line", ",", "section_name", ",", "line", ")", "title", ",", "title_sp", "=", "self", ".", "_preformat_text", "(", "section_head_text", ",", "*", "*", "section_hdr_params", ")", "sect_story", "+=", "[", "title", ",", "title_sp", "]", "+", "section_story", "return", "sect_story" ]
Flatten the sections into a single story list.
[ "Flatten", "the", "sections", "into", "a", "single", "story", "list", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/util/reporter.py#L173-L186
train
sorgerlab/indra
indra/tools/reading/util/reporter.py
Reporter._preformat_text
def _preformat_text(self, text, style='Normal', space=None, fontsize=12, alignment='left'): """Format the text for addition to a story list.""" if space is None: space=(1,12) ptext = ('<para alignment=\"%s\"><font size=%d>%s</font></para>' % (alignment, fontsize, text)) para = Paragraph(ptext, self.styles[style]) sp = Spacer(*space) return para, sp
python
def _preformat_text(self, text, style='Normal', space=None, fontsize=12, alignment='left'): """Format the text for addition to a story list.""" if space is None: space=(1,12) ptext = ('<para alignment=\"%s\"><font size=%d>%s</font></para>' % (alignment, fontsize, text)) para = Paragraph(ptext, self.styles[style]) sp = Spacer(*space) return para, sp
[ "def", "_preformat_text", "(", "self", ",", "text", ",", "style", "=", "'Normal'", ",", "space", "=", "None", ",", "fontsize", "=", "12", ",", "alignment", "=", "'left'", ")", ":", "if", "space", "is", "None", ":", "space", "=", "(", "1", ",", "12", ")", "ptext", "=", "(", "'<para alignment=\\\"%s\\\"><font size=%d>%s</font></para>'", "%", "(", "alignment", ",", "fontsize", ",", "text", ")", ")", "para", "=", "Paragraph", "(", "ptext", ",", "self", ".", "styles", "[", "style", "]", ")", "sp", "=", "Spacer", "(", "*", "space", ")", "return", "para", ",", "sp" ]
Format the text for addition to a story list.
[ "Format", "the", "text", "for", "addition", "to", "a", "story", "list", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/util/reporter.py#L188-L197
train
sorgerlab/indra
indra/databases/mesh_client.py
get_mesh_name_from_web
def get_mesh_name_from_web(mesh_id): """Get the MESH label for the given MESH ID using the NLM REST API. Parameters ---------- mesh_id : str MESH Identifier, e.g. 'D003094'. Returns ------- str Label for the MESH ID, or None if the query failed or no label was found. """ url = MESH_URL + mesh_id + '.json' resp = requests.get(url) if resp.status_code != 200: return None mesh_json = resp.json() try: label = mesh_json['@graph'][0]['label']['@value'] except (KeyError, IndexError) as e: return None return label
python
def get_mesh_name_from_web(mesh_id): """Get the MESH label for the given MESH ID using the NLM REST API. Parameters ---------- mesh_id : str MESH Identifier, e.g. 'D003094'. Returns ------- str Label for the MESH ID, or None if the query failed or no label was found. """ url = MESH_URL + mesh_id + '.json' resp = requests.get(url) if resp.status_code != 200: return None mesh_json = resp.json() try: label = mesh_json['@graph'][0]['label']['@value'] except (KeyError, IndexError) as e: return None return label
[ "def", "get_mesh_name_from_web", "(", "mesh_id", ")", ":", "url", "=", "MESH_URL", "+", "mesh_id", "+", "'.json'", "resp", "=", "requests", ".", "get", "(", "url", ")", "if", "resp", ".", "status_code", "!=", "200", ":", "return", "None", "mesh_json", "=", "resp", ".", "json", "(", ")", "try", ":", "label", "=", "mesh_json", "[", "'@graph'", "]", "[", "0", "]", "[", "'label'", "]", "[", "'@value'", "]", "except", "(", "KeyError", ",", "IndexError", ")", "as", "e", ":", "return", "None", "return", "label" ]
Get the MESH label for the given MESH ID using the NLM REST API. Parameters ---------- mesh_id : str MESH Identifier, e.g. 'D003094'. Returns ------- str Label for the MESH ID, or None if the query failed or no label was found.
[ "Get", "the", "MESH", "label", "for", "the", "given", "MESH", "ID", "using", "the", "NLM", "REST", "API", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/mesh_client.py#L28-L51
train
sorgerlab/indra
indra/databases/mesh_client.py
get_mesh_name
def get_mesh_name(mesh_id, offline=False): """Get the MESH label for the given MESH ID. Uses the mappings table in `indra/resources`; if the MESH ID is not listed there, falls back on the NLM REST API. Parameters ---------- mesh_id : str MESH Identifier, e.g. 'D003094'. offline : bool Whether to allow queries to the NLM REST API if the given MESH ID is not contained in INDRA's internal MESH mappings file. Default is False (allows REST API queries). Returns ------- str Label for the MESH ID, or None if the query failed or no label was found. """ indra_mesh_mapping = mesh_id_to_name.get(mesh_id) if offline or indra_mesh_mapping is not None: return indra_mesh_mapping # Look up the MESH mapping from NLM if we don't have it locally return get_mesh_name_from_web(mesh_id)
python
def get_mesh_name(mesh_id, offline=False): """Get the MESH label for the given MESH ID. Uses the mappings table in `indra/resources`; if the MESH ID is not listed there, falls back on the NLM REST API. Parameters ---------- mesh_id : str MESH Identifier, e.g. 'D003094'. offline : bool Whether to allow queries to the NLM REST API if the given MESH ID is not contained in INDRA's internal MESH mappings file. Default is False (allows REST API queries). Returns ------- str Label for the MESH ID, or None if the query failed or no label was found. """ indra_mesh_mapping = mesh_id_to_name.get(mesh_id) if offline or indra_mesh_mapping is not None: return indra_mesh_mapping # Look up the MESH mapping from NLM if we don't have it locally return get_mesh_name_from_web(mesh_id)
[ "def", "get_mesh_name", "(", "mesh_id", ",", "offline", "=", "False", ")", ":", "indra_mesh_mapping", "=", "mesh_id_to_name", ".", "get", "(", "mesh_id", ")", "if", "offline", "or", "indra_mesh_mapping", "is", "not", "None", ":", "return", "indra_mesh_mapping", "# Look up the MESH mapping from NLM if we don't have it locally", "return", "get_mesh_name_from_web", "(", "mesh_id", ")" ]
Get the MESH label for the given MESH ID. Uses the mappings table in `indra/resources`; if the MESH ID is not listed there, falls back on the NLM REST API. Parameters ---------- mesh_id : str MESH Identifier, e.g. 'D003094'. offline : bool Whether to allow queries to the NLM REST API if the given MESH ID is not contained in INDRA's internal MESH mappings file. Default is False (allows REST API queries). Returns ------- str Label for the MESH ID, or None if the query failed or no label was found.
[ "Get", "the", "MESH", "label", "for", "the", "given", "MESH", "ID", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/mesh_client.py#L54-L79
train
sorgerlab/indra
indra/databases/mesh_client.py
get_mesh_id_name
def get_mesh_id_name(mesh_term, offline=False): """Get the MESH ID and name for the given MESH term. Uses the mappings table in `indra/resources`; if the MESH term is not listed there, falls back on the NLM REST API. Parameters ---------- mesh_term : str MESH Descriptor or Concept name, e.g. 'Breast Cancer'. offline : bool Whether to allow queries to the NLM REST API if the given MESH term is not contained in INDRA's internal MESH mappings file. Default is False (allows REST API queries). Returns ------- tuple of strs Returns a 2-tuple of the form `(id, name)` with the ID of the descriptor corresponding to the MESH label, and the descriptor name (which may not exactly match the name provided as an argument if it is a Concept name). If the query failed, or no descriptor corresponding to the name was found, returns a tuple of (None, None). """ indra_mesh_id = mesh_name_to_id.get(mesh_term) if indra_mesh_id is not None: return indra_mesh_id, mesh_term indra_mesh_id, new_term = \ mesh_name_to_id_name.get(mesh_term, (None, None)) if indra_mesh_id is not None: return indra_mesh_id, new_term if offline: return None, None # Look up the MESH mapping from NLM if we don't have it locally return get_mesh_id_name_from_web(mesh_term)
python
def get_mesh_id_name(mesh_term, offline=False): """Get the MESH ID and name for the given MESH term. Uses the mappings table in `indra/resources`; if the MESH term is not listed there, falls back on the NLM REST API. Parameters ---------- mesh_term : str MESH Descriptor or Concept name, e.g. 'Breast Cancer'. offline : bool Whether to allow queries to the NLM REST API if the given MESH term is not contained in INDRA's internal MESH mappings file. Default is False (allows REST API queries). Returns ------- tuple of strs Returns a 2-tuple of the form `(id, name)` with the ID of the descriptor corresponding to the MESH label, and the descriptor name (which may not exactly match the name provided as an argument if it is a Concept name). If the query failed, or no descriptor corresponding to the name was found, returns a tuple of (None, None). """ indra_mesh_id = mesh_name_to_id.get(mesh_term) if indra_mesh_id is not None: return indra_mesh_id, mesh_term indra_mesh_id, new_term = \ mesh_name_to_id_name.get(mesh_term, (None, None)) if indra_mesh_id is not None: return indra_mesh_id, new_term if offline: return None, None # Look up the MESH mapping from NLM if we don't have it locally return get_mesh_id_name_from_web(mesh_term)
[ "def", "get_mesh_id_name", "(", "mesh_term", ",", "offline", "=", "False", ")", ":", "indra_mesh_id", "=", "mesh_name_to_id", ".", "get", "(", "mesh_term", ")", "if", "indra_mesh_id", "is", "not", "None", ":", "return", "indra_mesh_id", ",", "mesh_term", "indra_mesh_id", ",", "new_term", "=", "mesh_name_to_id_name", ".", "get", "(", "mesh_term", ",", "(", "None", ",", "None", ")", ")", "if", "indra_mesh_id", "is", "not", "None", ":", "return", "indra_mesh_id", ",", "new_term", "if", "offline", ":", "return", "None", ",", "None", "# Look up the MESH mapping from NLM if we don't have it locally", "return", "get_mesh_id_name_from_web", "(", "mesh_term", ")" ]
Get the MESH ID and name for the given MESH term. Uses the mappings table in `indra/resources`; if the MESH term is not listed there, falls back on the NLM REST API. Parameters ---------- mesh_term : str MESH Descriptor or Concept name, e.g. 'Breast Cancer'. offline : bool Whether to allow queries to the NLM REST API if the given MESH term is not contained in INDRA's internal MESH mappings file. Default is False (allows REST API queries). Returns ------- tuple of strs Returns a 2-tuple of the form `(id, name)` with the ID of the descriptor corresponding to the MESH label, and the descriptor name (which may not exactly match the name provided as an argument if it is a Concept name). If the query failed, or no descriptor corresponding to the name was found, returns a tuple of (None, None).
[ "Get", "the", "MESH", "ID", "and", "name", "for", "the", "given", "MESH", "term", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/mesh_client.py#L82-L119
train
sorgerlab/indra
indra/tools/machine/cli.py
make
def make(directory): """Makes a RAS Machine directory""" if os.path.exists(directory): if os.path.isdir(directory): click.echo('Directory already exists') else: click.echo('Path exists and is not a directory') sys.exit() os.makedirs(directory) os.mkdir(os.path.join(directory, 'jsons')) copy_default_config(os.path.join(directory, 'config.yaml'))
python
def make(directory): """Makes a RAS Machine directory""" if os.path.exists(directory): if os.path.isdir(directory): click.echo('Directory already exists') else: click.echo('Path exists and is not a directory') sys.exit() os.makedirs(directory) os.mkdir(os.path.join(directory, 'jsons')) copy_default_config(os.path.join(directory, 'config.yaml'))
[ "def", "make", "(", "directory", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "directory", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "directory", ")", ":", "click", ".", "echo", "(", "'Directory already exists'", ")", "else", ":", "click", ".", "echo", "(", "'Path exists and is not a directory'", ")", "sys", ".", "exit", "(", ")", "os", ".", "makedirs", "(", "directory", ")", "os", ".", "mkdir", "(", "os", ".", "path", ".", "join", "(", "directory", ",", "'jsons'", ")", ")", "copy_default_config", "(", "os", ".", "path", ".", "join", "(", "directory", ",", "'config.yaml'", ")", ")" ]
Makes a RAS Machine directory
[ "Makes", "a", "RAS", "Machine", "directory" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/machine/cli.py#L30-L42
train
sorgerlab/indra
indra/tools/machine/cli.py
run_with_search
def run_with_search(model_path, config, num_days): """Run with PubMed search for new papers.""" from indra.tools.machine.machine import run_with_search_helper run_with_search_helper(model_path, config, num_days=num_days)
python
def run_with_search(model_path, config, num_days): """Run with PubMed search for new papers.""" from indra.tools.machine.machine import run_with_search_helper run_with_search_helper(model_path, config, num_days=num_days)
[ "def", "run_with_search", "(", "model_path", ",", "config", ",", "num_days", ")", ":", "from", "indra", ".", "tools", ".", "machine", ".", "machine", "import", "run_with_search_helper", "run_with_search_helper", "(", "model_path", ",", "config", ",", "num_days", "=", "num_days", ")" ]
Run with PubMed search for new papers.
[ "Run", "with", "PubMed", "search", "for", "new", "papers", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/machine/cli.py#L50-L53
train
sorgerlab/indra
indra/tools/machine/cli.py
run_with_pmids
def run_with_pmids(model_path, pmids): """Run with given list of PMIDs.""" from indra.tools.machine.machine import run_with_pmids_helper run_with_pmids_helper(model_path, pmids)
python
def run_with_pmids(model_path, pmids): """Run with given list of PMIDs.""" from indra.tools.machine.machine import run_with_pmids_helper run_with_pmids_helper(model_path, pmids)
[ "def", "run_with_pmids", "(", "model_path", ",", "pmids", ")", ":", "from", "indra", ".", "tools", ".", "machine", ".", "machine", "import", "run_with_pmids_helper", "run_with_pmids_helper", "(", "model_path", ",", "pmids", ")" ]
Run with given list of PMIDs.
[ "Run", "with", "given", "list", "of", "PMIDs", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/machine/cli.py#L68-L71
train
sorgerlab/indra
indra/literature/pmc_client.py
id_lookup
def id_lookup(paper_id, idtype=None): """This function takes a Pubmed ID, Pubmed Central ID, or DOI and use the Pubmed ID mapping service and looks up all other IDs from one of these. The IDs are returned in a dictionary.""" if idtype is not None and idtype not in ('pmid', 'pmcid', 'doi'): raise ValueError("Invalid idtype %s; must be 'pmid', 'pmcid', " "or 'doi'." % idtype) if paper_id.upper().startswith('PMC'): idtype = 'pmcid' # Strip off any prefix if paper_id.upper().startswith('PMID'): paper_id = paper_id[4:] elif paper_id.upper().startswith('DOI'): paper_id = paper_id[3:] data = {'ids': paper_id} if idtype is not None: data['idtype'] = idtype try: tree = pubmed_client.send_request(pmid_convert_url, data) except Exception as e: logger.error('Error looking up PMID in PMC: %s' % e) return {} if tree is None: return {} record = tree.find('record') if record is None: return {} doi = record.attrib.get('doi') pmid = record.attrib.get('pmid') pmcid = record.attrib.get('pmcid') ids = {'doi': doi, 'pmid': pmid, 'pmcid': pmcid} return ids
python
def id_lookup(paper_id, idtype=None): """This function takes a Pubmed ID, Pubmed Central ID, or DOI and use the Pubmed ID mapping service and looks up all other IDs from one of these. The IDs are returned in a dictionary.""" if idtype is not None and idtype not in ('pmid', 'pmcid', 'doi'): raise ValueError("Invalid idtype %s; must be 'pmid', 'pmcid', " "or 'doi'." % idtype) if paper_id.upper().startswith('PMC'): idtype = 'pmcid' # Strip off any prefix if paper_id.upper().startswith('PMID'): paper_id = paper_id[4:] elif paper_id.upper().startswith('DOI'): paper_id = paper_id[3:] data = {'ids': paper_id} if idtype is not None: data['idtype'] = idtype try: tree = pubmed_client.send_request(pmid_convert_url, data) except Exception as e: logger.error('Error looking up PMID in PMC: %s' % e) return {} if tree is None: return {} record = tree.find('record') if record is None: return {} doi = record.attrib.get('doi') pmid = record.attrib.get('pmid') pmcid = record.attrib.get('pmcid') ids = {'doi': doi, 'pmid': pmid, 'pmcid': pmcid} return ids
[ "def", "id_lookup", "(", "paper_id", ",", "idtype", "=", "None", ")", ":", "if", "idtype", "is", "not", "None", "and", "idtype", "not", "in", "(", "'pmid'", ",", "'pmcid'", ",", "'doi'", ")", ":", "raise", "ValueError", "(", "\"Invalid idtype %s; must be 'pmid', 'pmcid', \"", "\"or 'doi'.\"", "%", "idtype", ")", "if", "paper_id", ".", "upper", "(", ")", ".", "startswith", "(", "'PMC'", ")", ":", "idtype", "=", "'pmcid'", "# Strip off any prefix", "if", "paper_id", ".", "upper", "(", ")", ".", "startswith", "(", "'PMID'", ")", ":", "paper_id", "=", "paper_id", "[", "4", ":", "]", "elif", "paper_id", ".", "upper", "(", ")", ".", "startswith", "(", "'DOI'", ")", ":", "paper_id", "=", "paper_id", "[", "3", ":", "]", "data", "=", "{", "'ids'", ":", "paper_id", "}", "if", "idtype", "is", "not", "None", ":", "data", "[", "'idtype'", "]", "=", "idtype", "try", ":", "tree", "=", "pubmed_client", ".", "send_request", "(", "pmid_convert_url", ",", "data", ")", "except", "Exception", "as", "e", ":", "logger", ".", "error", "(", "'Error looking up PMID in PMC: %s'", "%", "e", ")", "return", "{", "}", "if", "tree", "is", "None", ":", "return", "{", "}", "record", "=", "tree", ".", "find", "(", "'record'", ")", "if", "record", "is", "None", ":", "return", "{", "}", "doi", "=", "record", ".", "attrib", ".", "get", "(", "'doi'", ")", "pmid", "=", "record", ".", "attrib", ".", "get", "(", "'pmid'", ")", "pmcid", "=", "record", ".", "attrib", ".", "get", "(", "'pmcid'", ")", "ids", "=", "{", "'doi'", ":", "doi", ",", "'pmid'", ":", "pmid", ",", "'pmcid'", ":", "pmcid", "}", "return", "ids" ]
This function takes a Pubmed ID, Pubmed Central ID, or DOI and use the Pubmed ID mapping service and looks up all other IDs from one of these. The IDs are returned in a dictionary.
[ "This", "function", "takes", "a", "Pubmed", "ID", "Pubmed", "Central", "ID", "or", "DOI", "and", "use", "the", "Pubmed", "ID", "mapping", "service", "and", "looks", "up", "all", "other", "IDs", "from", "one", "of", "these", ".", "The", "IDs", "are", "returned", "in", "a", "dictionary", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/pmc_client.py#L40-L74
train
sorgerlab/indra
indra/literature/pmc_client.py
get_xml
def get_xml(pmc_id): """Returns XML for the article corresponding to a PMC ID.""" if pmc_id.upper().startswith('PMC'): pmc_id = pmc_id[3:] # Request params params = {} params['verb'] = 'GetRecord' params['identifier'] = 'oai:pubmedcentral.nih.gov:%s' % pmc_id params['metadataPrefix'] = 'pmc' # Submit the request res = requests.get(pmc_url, params) if not res.status_code == 200: logger.warning("Couldn't download %s" % pmc_id) return None # Read the bytestream xml_bytes = res.content # Check for any XML errors; xml_str should still be bytes tree = ET.XML(xml_bytes, parser=UTB()) xmlns = "http://www.openarchives.org/OAI/2.0/" err_tag = tree.find('{%s}error' % xmlns) if err_tag is not None: err_code = err_tag.attrib['code'] err_text = err_tag.text logger.warning('PMC client returned with error %s: %s' % (err_code, err_text)) return None # If no error, return the XML as a unicode string else: return xml_bytes.decode('utf-8')
python
def get_xml(pmc_id): """Returns XML for the article corresponding to a PMC ID.""" if pmc_id.upper().startswith('PMC'): pmc_id = pmc_id[3:] # Request params params = {} params['verb'] = 'GetRecord' params['identifier'] = 'oai:pubmedcentral.nih.gov:%s' % pmc_id params['metadataPrefix'] = 'pmc' # Submit the request res = requests.get(pmc_url, params) if not res.status_code == 200: logger.warning("Couldn't download %s" % pmc_id) return None # Read the bytestream xml_bytes = res.content # Check for any XML errors; xml_str should still be bytes tree = ET.XML(xml_bytes, parser=UTB()) xmlns = "http://www.openarchives.org/OAI/2.0/" err_tag = tree.find('{%s}error' % xmlns) if err_tag is not None: err_code = err_tag.attrib['code'] err_text = err_tag.text logger.warning('PMC client returned with error %s: %s' % (err_code, err_text)) return None # If no error, return the XML as a unicode string else: return xml_bytes.decode('utf-8')
[ "def", "get_xml", "(", "pmc_id", ")", ":", "if", "pmc_id", ".", "upper", "(", ")", ".", "startswith", "(", "'PMC'", ")", ":", "pmc_id", "=", "pmc_id", "[", "3", ":", "]", "# Request params", "params", "=", "{", "}", "params", "[", "'verb'", "]", "=", "'GetRecord'", "params", "[", "'identifier'", "]", "=", "'oai:pubmedcentral.nih.gov:%s'", "%", "pmc_id", "params", "[", "'metadataPrefix'", "]", "=", "'pmc'", "# Submit the request", "res", "=", "requests", ".", "get", "(", "pmc_url", ",", "params", ")", "if", "not", "res", ".", "status_code", "==", "200", ":", "logger", ".", "warning", "(", "\"Couldn't download %s\"", "%", "pmc_id", ")", "return", "None", "# Read the bytestream", "xml_bytes", "=", "res", ".", "content", "# Check for any XML errors; xml_str should still be bytes", "tree", "=", "ET", ".", "XML", "(", "xml_bytes", ",", "parser", "=", "UTB", "(", ")", ")", "xmlns", "=", "\"http://www.openarchives.org/OAI/2.0/\"", "err_tag", "=", "tree", ".", "find", "(", "'{%s}error'", "%", "xmlns", ")", "if", "err_tag", "is", "not", "None", ":", "err_code", "=", "err_tag", ".", "attrib", "[", "'code'", "]", "err_text", "=", "err_tag", ".", "text", "logger", ".", "warning", "(", "'PMC client returned with error %s: %s'", "%", "(", "err_code", ",", "err_text", ")", ")", "return", "None", "# If no error, return the XML as a unicode string", "else", ":", "return", "xml_bytes", ".", "decode", "(", "'utf-8'", ")" ]
Returns XML for the article corresponding to a PMC ID.
[ "Returns", "XML", "for", "the", "article", "corresponding", "to", "a", "PMC", "ID", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/pmc_client.py#L81-L109
train
sorgerlab/indra
indra/literature/pmc_client.py
extract_paragraphs
def extract_paragraphs(xml_string): """Returns list of paragraphs in an NLM XML. Parameters ---------- xml_string : str String containing valid NLM XML. Returns ------- list of str List of extracted paragraphs in an NLM XML """ tree = etree.fromstring(xml_string.encode('utf-8')) paragraphs = [] # In NLM xml, all plaintext is within <p> tags, and is the only thing # that can be contained in <p> tags. To handle to possibility of namespaces # uses regex to search for tags either of the form 'p' or '{<namespace>}p' for element in tree.iter(): if isinstance(element.tag, basestring) and \ re.search('(^|})[p|title]$', element.tag) and element.text: paragraph = ' '.join(element.itertext()) paragraphs.append(paragraph) return paragraphs
python
def extract_paragraphs(xml_string): """Returns list of paragraphs in an NLM XML. Parameters ---------- xml_string : str String containing valid NLM XML. Returns ------- list of str List of extracted paragraphs in an NLM XML """ tree = etree.fromstring(xml_string.encode('utf-8')) paragraphs = [] # In NLM xml, all plaintext is within <p> tags, and is the only thing # that can be contained in <p> tags. To handle to possibility of namespaces # uses regex to search for tags either of the form 'p' or '{<namespace>}p' for element in tree.iter(): if isinstance(element.tag, basestring) and \ re.search('(^|})[p|title]$', element.tag) and element.text: paragraph = ' '.join(element.itertext()) paragraphs.append(paragraph) return paragraphs
[ "def", "extract_paragraphs", "(", "xml_string", ")", ":", "tree", "=", "etree", ".", "fromstring", "(", "xml_string", ".", "encode", "(", "'utf-8'", ")", ")", "paragraphs", "=", "[", "]", "# In NLM xml, all plaintext is within <p> tags, and is the only thing", "# that can be contained in <p> tags. To handle to possibility of namespaces", "# uses regex to search for tags either of the form 'p' or '{<namespace>}p'", "for", "element", "in", "tree", ".", "iter", "(", ")", ":", "if", "isinstance", "(", "element", ".", "tag", ",", "basestring", ")", "and", "re", ".", "search", "(", "'(^|})[p|title]$'", ",", "element", ".", "tag", ")", "and", "element", ".", "text", ":", "paragraph", "=", "' '", ".", "join", "(", "element", ".", "itertext", "(", ")", ")", "paragraphs", ".", "append", "(", "paragraph", ")", "return", "paragraphs" ]
Returns list of paragraphs in an NLM XML. Parameters ---------- xml_string : str String containing valid NLM XML. Returns ------- list of str List of extracted paragraphs in an NLM XML
[ "Returns", "list", "of", "paragraphs", "in", "an", "NLM", "XML", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/pmc_client.py#L132-L156
train