repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
sorgerlab/indra
indra/tools/incremental_model.py
IncrementalModel.save
def save(self, model_fname='model.pkl'): """Save the state of the IncrementalModel in a pickle file. Parameters ---------- model_fname : Optional[str] The name of the pickle file to save the state of the IncrementalModel in. Default: model.pkl """ with open(model_fname, 'wb') as fh: pickle.dump(self.stmts, fh, protocol=4)
python
def save(self, model_fname='model.pkl'): """Save the state of the IncrementalModel in a pickle file. Parameters ---------- model_fname : Optional[str] The name of the pickle file to save the state of the IncrementalModel in. Default: model.pkl """ with open(model_fname, 'wb') as fh: pickle.dump(self.stmts, fh, protocol=4)
[ "def", "save", "(", "self", ",", "model_fname", "=", "'model.pkl'", ")", ":", "with", "open", "(", "model_fname", ",", "'wb'", ")", "as", "fh", ":", "pickle", ".", "dump", "(", "self", ".", "stmts", ",", "fh", ",", "protocol", "=", "4", ")" ]
Save the state of the IncrementalModel in a pickle file. Parameters ---------- model_fname : Optional[str] The name of the pickle file to save the state of the IncrementalModel in. Default: model.pkl
[ "Save", "the", "state", "of", "the", "IncrementalModel", "in", "a", "pickle", "file", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/incremental_model.py#L45-L55
train
sorgerlab/indra
indra/tools/incremental_model.py
IncrementalModel.add_statements
def add_statements(self, pmid, stmts): """Add INDRA Statements to the incremental model indexed by PMID. Parameters ---------- pmid : str The PMID of the paper from which statements were extracted. stmts : list[indra.statements.Statement] A list of INDRA Statements to be added to the model. """ if pmid not in self.stmts: self.stmts[pmid] = stmts else: self.stmts[pmid] += stmts
python
def add_statements(self, pmid, stmts): """Add INDRA Statements to the incremental model indexed by PMID. Parameters ---------- pmid : str The PMID of the paper from which statements were extracted. stmts : list[indra.statements.Statement] A list of INDRA Statements to be added to the model. """ if pmid not in self.stmts: self.stmts[pmid] = stmts else: self.stmts[pmid] += stmts
[ "def", "add_statements", "(", "self", ",", "pmid", ",", "stmts", ")", ":", "if", "pmid", "not", "in", "self", ".", "stmts", ":", "self", ".", "stmts", "[", "pmid", "]", "=", "stmts", "else", ":", "self", ".", "stmts", "[", "pmid", "]", "+=", "stmts" ]
Add INDRA Statements to the incremental model indexed by PMID. Parameters ---------- pmid : str The PMID of the paper from which statements were extracted. stmts : list[indra.statements.Statement] A list of INDRA Statements to be added to the model.
[ "Add", "INDRA", "Statements", "to", "the", "incremental", "model", "indexed", "by", "PMID", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/incremental_model.py#L57-L70
train
sorgerlab/indra
indra/tools/incremental_model.py
IncrementalModel.preassemble
def preassemble(self, filters=None, grounding_map=None): """Preassemble the Statements collected in the model. Use INDRA's GroundingMapper, Preassembler and BeliefEngine on the IncrementalModel and save the unique statements and the top level statements in class attributes. Currently the following filter options are implemented: - grounding: require that all Agents in statements are grounded - human_only: require that all proteins are human proteins - prior_one: require that at least one Agent is in the prior model - prior_all: require that all Agents are in the prior model Parameters ---------- filters : Optional[list[str]] A list of filter options to apply when choosing the statements. See description above for more details. Default: None grounding_map : Optional[dict] A user supplied grounding map which maps a string to a dictionary of database IDs (in the format used by Agents' db_refs). """ stmts = self.get_statements() # Filter out hypotheses stmts = ac.filter_no_hypothesis(stmts) # Fix grounding if grounding_map is not None: stmts = ac.map_grounding(stmts, grounding_map=grounding_map) else: stmts = ac.map_grounding(stmts) if filters and ('grounding' in filters): stmts = ac.filter_grounded_only(stmts) # Fix sites stmts = ac.map_sequence(stmts) if filters and 'human_only' in filters: stmts = ac.filter_human_only(stmts) # Run preassembly stmts = ac.run_preassembly(stmts, return_toplevel=False) # Run relevance filter stmts = self._relevance_filter(stmts, filters) # Save Statements self.assembled_stmts = stmts
python
def preassemble(self, filters=None, grounding_map=None): """Preassemble the Statements collected in the model. Use INDRA's GroundingMapper, Preassembler and BeliefEngine on the IncrementalModel and save the unique statements and the top level statements in class attributes. Currently the following filter options are implemented: - grounding: require that all Agents in statements are grounded - human_only: require that all proteins are human proteins - prior_one: require that at least one Agent is in the prior model - prior_all: require that all Agents are in the prior model Parameters ---------- filters : Optional[list[str]] A list of filter options to apply when choosing the statements. See description above for more details. Default: None grounding_map : Optional[dict] A user supplied grounding map which maps a string to a dictionary of database IDs (in the format used by Agents' db_refs). """ stmts = self.get_statements() # Filter out hypotheses stmts = ac.filter_no_hypothesis(stmts) # Fix grounding if grounding_map is not None: stmts = ac.map_grounding(stmts, grounding_map=grounding_map) else: stmts = ac.map_grounding(stmts) if filters and ('grounding' in filters): stmts = ac.filter_grounded_only(stmts) # Fix sites stmts = ac.map_sequence(stmts) if filters and 'human_only' in filters: stmts = ac.filter_human_only(stmts) # Run preassembly stmts = ac.run_preassembly(stmts, return_toplevel=False) # Run relevance filter stmts = self._relevance_filter(stmts, filters) # Save Statements self.assembled_stmts = stmts
[ "def", "preassemble", "(", "self", ",", "filters", "=", "None", ",", "grounding_map", "=", "None", ")", ":", "stmts", "=", "self", ".", "get_statements", "(", ")", "# Filter out hypotheses", "stmts", "=", "ac", ".", "filter_no_hypothesis", "(", "stmts", ")", "# Fix grounding", "if", "grounding_map", "is", "not", "None", ":", "stmts", "=", "ac", ".", "map_grounding", "(", "stmts", ",", "grounding_map", "=", "grounding_map", ")", "else", ":", "stmts", "=", "ac", ".", "map_grounding", "(", "stmts", ")", "if", "filters", "and", "(", "'grounding'", "in", "filters", ")", ":", "stmts", "=", "ac", ".", "filter_grounded_only", "(", "stmts", ")", "# Fix sites", "stmts", "=", "ac", ".", "map_sequence", "(", "stmts", ")", "if", "filters", "and", "'human_only'", "in", "filters", ":", "stmts", "=", "ac", ".", "filter_human_only", "(", "stmts", ")", "# Run preassembly", "stmts", "=", "ac", ".", "run_preassembly", "(", "stmts", ",", "return_toplevel", "=", "False", ")", "# Run relevance filter", "stmts", "=", "self", ".", "_relevance_filter", "(", "stmts", ",", "filters", ")", "# Save Statements", "self", ".", "assembled_stmts", "=", "stmts" ]
Preassemble the Statements collected in the model. Use INDRA's GroundingMapper, Preassembler and BeliefEngine on the IncrementalModel and save the unique statements and the top level statements in class attributes. Currently the following filter options are implemented: - grounding: require that all Agents in statements are grounded - human_only: require that all proteins are human proteins - prior_one: require that at least one Agent is in the prior model - prior_all: require that all Agents are in the prior model Parameters ---------- filters : Optional[list[str]] A list of filter options to apply when choosing the statements. See description above for more details. Default: None grounding_map : Optional[dict] A user supplied grounding map which maps a string to a dictionary of database IDs (in the format used by Agents' db_refs).
[ "Preassemble", "the", "Statements", "collected", "in", "the", "model", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/incremental_model.py#L84-L134
train
sorgerlab/indra
indra/tools/incremental_model.py
IncrementalModel.get_model_agents
def get_model_agents(self): """Return a list of all Agents from all Statements. Returns ------- agents : list[indra.statements.Agent] A list of Agents that are in the model. """ model_stmts = self.get_statements() agents = [] for stmt in model_stmts: for a in stmt.agent_list(): if a is not None: agents.append(a) return agents
python
def get_model_agents(self): """Return a list of all Agents from all Statements. Returns ------- agents : list[indra.statements.Agent] A list of Agents that are in the model. """ model_stmts = self.get_statements() agents = [] for stmt in model_stmts: for a in stmt.agent_list(): if a is not None: agents.append(a) return agents
[ "def", "get_model_agents", "(", "self", ")", ":", "model_stmts", "=", "self", ".", "get_statements", "(", ")", "agents", "=", "[", "]", "for", "stmt", "in", "model_stmts", ":", "for", "a", "in", "stmt", ".", "agent_list", "(", ")", ":", "if", "a", "is", "not", "None", ":", "agents", ".", "append", "(", "a", ")", "return", "agents" ]
Return a list of all Agents from all Statements. Returns ------- agents : list[indra.statements.Agent] A list of Agents that are in the model.
[ "Return", "a", "list", "of", "all", "Agents", "from", "all", "Statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/incremental_model.py#L149-L163
train
sorgerlab/indra
indra/tools/incremental_model.py
IncrementalModel.get_statements
def get_statements(self): """Return a list of all Statements in a single list. Returns ------- stmts : list[indra.statements.Statement] A list of all the INDRA Statements in the model. """ stmt_lists = [v for k, v in self.stmts.items()] stmts = [] for s in stmt_lists: stmts += s return stmts
python
def get_statements(self): """Return a list of all Statements in a single list. Returns ------- stmts : list[indra.statements.Statement] A list of all the INDRA Statements in the model. """ stmt_lists = [v for k, v in self.stmts.items()] stmts = [] for s in stmt_lists: stmts += s return stmts
[ "def", "get_statements", "(", "self", ")", ":", "stmt_lists", "=", "[", "v", "for", "k", ",", "v", "in", "self", ".", "stmts", ".", "items", "(", ")", "]", "stmts", "=", "[", "]", "for", "s", "in", "stmt_lists", ":", "stmts", "+=", "s", "return", "stmts" ]
Return a list of all Statements in a single list. Returns ------- stmts : list[indra.statements.Statement] A list of all the INDRA Statements in the model.
[ "Return", "a", "list", "of", "all", "Statements", "in", "a", "single", "list", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/incremental_model.py#L165-L177
train
sorgerlab/indra
indra/tools/incremental_model.py
IncrementalModel.get_statements_noprior
def get_statements_noprior(self): """Return a list of all non-prior Statements in a single list. Returns ------- stmts : list[indra.statements.Statement] A list of all the INDRA Statements in the model (excluding the prior). """ stmt_lists = [v for k, v in self.stmts.items() if k != 'prior'] stmts = [] for s in stmt_lists: stmts += s return stmts
python
def get_statements_noprior(self): """Return a list of all non-prior Statements in a single list. Returns ------- stmts : list[indra.statements.Statement] A list of all the INDRA Statements in the model (excluding the prior). """ stmt_lists = [v for k, v in self.stmts.items() if k != 'prior'] stmts = [] for s in stmt_lists: stmts += s return stmts
[ "def", "get_statements_noprior", "(", "self", ")", ":", "stmt_lists", "=", "[", "v", "for", "k", ",", "v", "in", "self", ".", "stmts", ".", "items", "(", ")", "if", "k", "!=", "'prior'", "]", "stmts", "=", "[", "]", "for", "s", "in", "stmt_lists", ":", "stmts", "+=", "s", "return", "stmts" ]
Return a list of all non-prior Statements in a single list. Returns ------- stmts : list[indra.statements.Statement] A list of all the INDRA Statements in the model (excluding the prior).
[ "Return", "a", "list", "of", "all", "non", "-", "prior", "Statements", "in", "a", "single", "list", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/incremental_model.py#L179-L192
train
sorgerlab/indra
indra/sources/bel/api.py
process_ndex_neighborhood
def process_ndex_neighborhood(gene_names, network_id=None, rdf_out='bel_output.rdf', print_output=True): """Return a BelRdfProcessor for an NDEx network neighborhood. Parameters ---------- gene_names : list A list of HGNC gene symbols to search the neighborhood of. Example: ['BRAF', 'MAP2K1'] network_id : Optional[str] The UUID of the network in NDEx. By default, the BEL Large Corpus network is used. rdf_out : Optional[str] Name of the output file to save the RDF returned by the web service. This is useful for debugging purposes or to repeat the same query on an offline RDF file later. Default: bel_output.rdf Returns ------- bp : BelRdfProcessor A BelRdfProcessor object which contains INDRA Statements in bp.statements. Notes ----- This function calls process_belrdf to the returned RDF string from the webservice. """ logger.warning('This method is deprecated and the results are not ' 'guaranteed to be correct. Please use ' 'process_pybel_neighborhood instead.') if network_id is None: network_id = '9ea3c170-01ad-11e5-ac0f-000c29cb28fb' url = ndex_bel2rdf + '/network/%s/asBELRDF/query' % network_id params = {'searchString': ' '.join(gene_names)} # The ndex_client returns the rdf as the content of a json dict res_json = ndex_client.send_request(url, params, is_json=True) if not res_json: logger.error('No response for NDEx neighborhood query.') return None if res_json.get('error'): error_msg = res_json.get('message') logger.error('BEL/RDF response contains error: %s' % error_msg) return None rdf = res_json.get('content') if not rdf: logger.error('BEL/RDF response is empty.') return None with open(rdf_out, 'wb') as fh: fh.write(rdf.encode('utf-8')) bp = process_belrdf(rdf, print_output=print_output) return bp
python
def process_ndex_neighborhood(gene_names, network_id=None, rdf_out='bel_output.rdf', print_output=True): """Return a BelRdfProcessor for an NDEx network neighborhood. Parameters ---------- gene_names : list A list of HGNC gene symbols to search the neighborhood of. Example: ['BRAF', 'MAP2K1'] network_id : Optional[str] The UUID of the network in NDEx. By default, the BEL Large Corpus network is used. rdf_out : Optional[str] Name of the output file to save the RDF returned by the web service. This is useful for debugging purposes or to repeat the same query on an offline RDF file later. Default: bel_output.rdf Returns ------- bp : BelRdfProcessor A BelRdfProcessor object which contains INDRA Statements in bp.statements. Notes ----- This function calls process_belrdf to the returned RDF string from the webservice. """ logger.warning('This method is deprecated and the results are not ' 'guaranteed to be correct. Please use ' 'process_pybel_neighborhood instead.') if network_id is None: network_id = '9ea3c170-01ad-11e5-ac0f-000c29cb28fb' url = ndex_bel2rdf + '/network/%s/asBELRDF/query' % network_id params = {'searchString': ' '.join(gene_names)} # The ndex_client returns the rdf as the content of a json dict res_json = ndex_client.send_request(url, params, is_json=True) if not res_json: logger.error('No response for NDEx neighborhood query.') return None if res_json.get('error'): error_msg = res_json.get('message') logger.error('BEL/RDF response contains error: %s' % error_msg) return None rdf = res_json.get('content') if not rdf: logger.error('BEL/RDF response is empty.') return None with open(rdf_out, 'wb') as fh: fh.write(rdf.encode('utf-8')) bp = process_belrdf(rdf, print_output=print_output) return bp
[ "def", "process_ndex_neighborhood", "(", "gene_names", ",", "network_id", "=", "None", ",", "rdf_out", "=", "'bel_output.rdf'", ",", "print_output", "=", "True", ")", ":", "logger", ".", "warning", "(", "'This method is deprecated and the results are not '", "'guaranteed to be correct. Please use '", "'process_pybel_neighborhood instead.'", ")", "if", "network_id", "is", "None", ":", "network_id", "=", "'9ea3c170-01ad-11e5-ac0f-000c29cb28fb'", "url", "=", "ndex_bel2rdf", "+", "'/network/%s/asBELRDF/query'", "%", "network_id", "params", "=", "{", "'searchString'", ":", "' '", ".", "join", "(", "gene_names", ")", "}", "# The ndex_client returns the rdf as the content of a json dict", "res_json", "=", "ndex_client", ".", "send_request", "(", "url", ",", "params", ",", "is_json", "=", "True", ")", "if", "not", "res_json", ":", "logger", ".", "error", "(", "'No response for NDEx neighborhood query.'", ")", "return", "None", "if", "res_json", ".", "get", "(", "'error'", ")", ":", "error_msg", "=", "res_json", ".", "get", "(", "'message'", ")", "logger", ".", "error", "(", "'BEL/RDF response contains error: %s'", "%", "error_msg", ")", "return", "None", "rdf", "=", "res_json", ".", "get", "(", "'content'", ")", "if", "not", "rdf", ":", "logger", ".", "error", "(", "'BEL/RDF response is empty.'", ")", "return", "None", "with", "open", "(", "rdf_out", ",", "'wb'", ")", "as", "fh", ":", "fh", ".", "write", "(", "rdf", ".", "encode", "(", "'utf-8'", ")", ")", "bp", "=", "process_belrdf", "(", "rdf", ",", "print_output", "=", "print_output", ")", "return", "bp" ]
Return a BelRdfProcessor for an NDEx network neighborhood. Parameters ---------- gene_names : list A list of HGNC gene symbols to search the neighborhood of. Example: ['BRAF', 'MAP2K1'] network_id : Optional[str] The UUID of the network in NDEx. By default, the BEL Large Corpus network is used. rdf_out : Optional[str] Name of the output file to save the RDF returned by the web service. This is useful for debugging purposes or to repeat the same query on an offline RDF file later. Default: bel_output.rdf Returns ------- bp : BelRdfProcessor A BelRdfProcessor object which contains INDRA Statements in bp.statements. Notes ----- This function calls process_belrdf to the returned RDF string from the webservice.
[ "Return", "a", "BelRdfProcessor", "for", "an", "NDEx", "network", "neighborhood", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/bel/api.py#L20-L71
train
sorgerlab/indra
indra/sources/bel/api.py
process_pybel_neighborhood
def process_pybel_neighborhood(gene_names, network_file=None, network_type='belscript', **kwargs): """Return PybelProcessor around neighborhood of given genes in a network. This function processes the given network file and filters the returned Statements to ones that contain genes in the given list. Parameters ---------- network_file : Optional[str] Path to the network file to process. If not given, by default, the BEL Large Corpus is used. network_type : Optional[str] This function allows processing both BEL Script files and JSON files. This argument controls which type is assumed to be processed, and the value can be either 'belscript' or 'json'. Default: bel_script Returns ------- bp : PybelProcessor A PybelProcessor object which contains INDRA Statements in bp.statements. """ if network_file is None: # Use large corpus as base network network_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.path.pardir, os.path.pardir, os.path.pardir, 'data', 'large_corpus.bel') if network_type == 'belscript': bp = process_belscript(network_file, **kwargs) elif network_type == 'json': bp = process_json_file(network_file) filtered_stmts = [] for stmt in bp.statements: found = False for agent in stmt.agent_list(): if agent is not None: if agent.name in gene_names: found = True if found: filtered_stmts.append(stmt) bp.statements = filtered_stmts return bp
python
def process_pybel_neighborhood(gene_names, network_file=None, network_type='belscript', **kwargs): """Return PybelProcessor around neighborhood of given genes in a network. This function processes the given network file and filters the returned Statements to ones that contain genes in the given list. Parameters ---------- network_file : Optional[str] Path to the network file to process. If not given, by default, the BEL Large Corpus is used. network_type : Optional[str] This function allows processing both BEL Script files and JSON files. This argument controls which type is assumed to be processed, and the value can be either 'belscript' or 'json'. Default: bel_script Returns ------- bp : PybelProcessor A PybelProcessor object which contains INDRA Statements in bp.statements. """ if network_file is None: # Use large corpus as base network network_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.path.pardir, os.path.pardir, os.path.pardir, 'data', 'large_corpus.bel') if network_type == 'belscript': bp = process_belscript(network_file, **kwargs) elif network_type == 'json': bp = process_json_file(network_file) filtered_stmts = [] for stmt in bp.statements: found = False for agent in stmt.agent_list(): if agent is not None: if agent.name in gene_names: found = True if found: filtered_stmts.append(stmt) bp.statements = filtered_stmts return bp
[ "def", "process_pybel_neighborhood", "(", "gene_names", ",", "network_file", "=", "None", ",", "network_type", "=", "'belscript'", ",", "*", "*", "kwargs", ")", ":", "if", "network_file", "is", "None", ":", "# Use large corpus as base network", "network_file", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "__file__", ")", ")", ",", "os", ".", "path", ".", "pardir", ",", "os", ".", "path", ".", "pardir", ",", "os", ".", "path", ".", "pardir", ",", "'data'", ",", "'large_corpus.bel'", ")", "if", "network_type", "==", "'belscript'", ":", "bp", "=", "process_belscript", "(", "network_file", ",", "*", "*", "kwargs", ")", "elif", "network_type", "==", "'json'", ":", "bp", "=", "process_json_file", "(", "network_file", ")", "filtered_stmts", "=", "[", "]", "for", "stmt", "in", "bp", ".", "statements", ":", "found", "=", "False", "for", "agent", "in", "stmt", ".", "agent_list", "(", ")", ":", "if", "agent", "is", "not", "None", ":", "if", "agent", ".", "name", "in", "gene_names", ":", "found", "=", "True", "if", "found", ":", "filtered_stmts", ".", "append", "(", "stmt", ")", "bp", ".", "statements", "=", "filtered_stmts", "return", "bp" ]
Return PybelProcessor around neighborhood of given genes in a network. This function processes the given network file and filters the returned Statements to ones that contain genes in the given list. Parameters ---------- network_file : Optional[str] Path to the network file to process. If not given, by default, the BEL Large Corpus is used. network_type : Optional[str] This function allows processing both BEL Script files and JSON files. This argument controls which type is assumed to be processed, and the value can be either 'belscript' or 'json'. Default: bel_script Returns ------- bp : PybelProcessor A PybelProcessor object which contains INDRA Statements in bp.statements.
[ "Return", "PybelProcessor", "around", "neighborhood", "of", "given", "genes", "in", "a", "network", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/bel/api.py#L74-L119
train
sorgerlab/indra
indra/sources/bel/api.py
process_pybel_graph
def process_pybel_graph(graph): """Return a PybelProcessor by processing a PyBEL graph. Parameters ---------- graph : pybel.struct.BELGraph A PyBEL graph to process Returns ------- bp : PybelProcessor A PybelProcessor object which contains INDRA Statements in bp.statements. """ bp = PybelProcessor(graph) bp.get_statements() if bp.annot_manager.failures: logger.warning('missing %d annotation pairs', sum(len(v) for v in bp.annot_manager.failures.values())) return bp
python
def process_pybel_graph(graph): """Return a PybelProcessor by processing a PyBEL graph. Parameters ---------- graph : pybel.struct.BELGraph A PyBEL graph to process Returns ------- bp : PybelProcessor A PybelProcessor object which contains INDRA Statements in bp.statements. """ bp = PybelProcessor(graph) bp.get_statements() if bp.annot_manager.failures: logger.warning('missing %d annotation pairs', sum(len(v) for v in bp.annot_manager.failures.values())) return bp
[ "def", "process_pybel_graph", "(", "graph", ")", ":", "bp", "=", "PybelProcessor", "(", "graph", ")", "bp", ".", "get_statements", "(", ")", "if", "bp", ".", "annot_manager", ".", "failures", ":", "logger", ".", "warning", "(", "'missing %d annotation pairs'", ",", "sum", "(", "len", "(", "v", ")", "for", "v", "in", "bp", ".", "annot_manager", ".", "failures", ".", "values", "(", ")", ")", ")", "return", "bp" ]
Return a PybelProcessor by processing a PyBEL graph. Parameters ---------- graph : pybel.struct.BELGraph A PyBEL graph to process Returns ------- bp : PybelProcessor A PybelProcessor object which contains INDRA Statements in bp.statements.
[ "Return", "a", "PybelProcessor", "by", "processing", "a", "PyBEL", "graph", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/bel/api.py#L167-L187
train
sorgerlab/indra
indra/sources/bel/api.py
process_belscript
def process_belscript(file_name, **kwargs): """Return a PybelProcessor by processing a BEL script file. Key word arguments are passed directly to pybel.from_path, for further information, see pybel.readthedocs.io/en/latest/io.html#pybel.from_path Some keyword arguments we use here differ from the defaults of PyBEL, namely we set `citation_clearing` to False and `no_identifier_validation` to True. Parameters ---------- file_name : str The path to a BEL script file. Returns ------- bp : PybelProcessor A PybelProcessor object which contains INDRA Statements in bp.statements. """ if 'citation_clearing' not in kwargs: kwargs['citation_clearing'] = False if 'no_identifier_validation' not in kwargs: kwargs['no_identifier_validation'] = True pybel_graph = pybel.from_path(file_name, **kwargs) return process_pybel_graph(pybel_graph)
python
def process_belscript(file_name, **kwargs): """Return a PybelProcessor by processing a BEL script file. Key word arguments are passed directly to pybel.from_path, for further information, see pybel.readthedocs.io/en/latest/io.html#pybel.from_path Some keyword arguments we use here differ from the defaults of PyBEL, namely we set `citation_clearing` to False and `no_identifier_validation` to True. Parameters ---------- file_name : str The path to a BEL script file. Returns ------- bp : PybelProcessor A PybelProcessor object which contains INDRA Statements in bp.statements. """ if 'citation_clearing' not in kwargs: kwargs['citation_clearing'] = False if 'no_identifier_validation' not in kwargs: kwargs['no_identifier_validation'] = True pybel_graph = pybel.from_path(file_name, **kwargs) return process_pybel_graph(pybel_graph)
[ "def", "process_belscript", "(", "file_name", ",", "*", "*", "kwargs", ")", ":", "if", "'citation_clearing'", "not", "in", "kwargs", ":", "kwargs", "[", "'citation_clearing'", "]", "=", "False", "if", "'no_identifier_validation'", "not", "in", "kwargs", ":", "kwargs", "[", "'no_identifier_validation'", "]", "=", "True", "pybel_graph", "=", "pybel", ".", "from_path", "(", "file_name", ",", "*", "*", "kwargs", ")", "return", "process_pybel_graph", "(", "pybel_graph", ")" ]
Return a PybelProcessor by processing a BEL script file. Key word arguments are passed directly to pybel.from_path, for further information, see pybel.readthedocs.io/en/latest/io.html#pybel.from_path Some keyword arguments we use here differ from the defaults of PyBEL, namely we set `citation_clearing` to False and `no_identifier_validation` to True. Parameters ---------- file_name : str The path to a BEL script file. Returns ------- bp : PybelProcessor A PybelProcessor object which contains INDRA Statements in bp.statements.
[ "Return", "a", "PybelProcessor", "by", "processing", "a", "BEL", "script", "file", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/bel/api.py#L190-L216
train
sorgerlab/indra
indra/sources/bel/api.py
process_json_file
def process_json_file(file_name): """Return a PybelProcessor by processing a Node-Link JSON file. For more information on this format, see: http://pybel.readthedocs.io/en/latest/io.html#node-link-json Parameters ---------- file_name : str The path to a Node-Link JSON file. Returns ------- bp : PybelProcessor A PybelProcessor object which contains INDRA Statements in bp.statements. """ with open(file_name, 'rt') as fh: pybel_graph = pybel.from_json_file(fh, False) return process_pybel_graph(pybel_graph)
python
def process_json_file(file_name): """Return a PybelProcessor by processing a Node-Link JSON file. For more information on this format, see: http://pybel.readthedocs.io/en/latest/io.html#node-link-json Parameters ---------- file_name : str The path to a Node-Link JSON file. Returns ------- bp : PybelProcessor A PybelProcessor object which contains INDRA Statements in bp.statements. """ with open(file_name, 'rt') as fh: pybel_graph = pybel.from_json_file(fh, False) return process_pybel_graph(pybel_graph)
[ "def", "process_json_file", "(", "file_name", ")", ":", "with", "open", "(", "file_name", ",", "'rt'", ")", "as", "fh", ":", "pybel_graph", "=", "pybel", ".", "from_json_file", "(", "fh", ",", "False", ")", "return", "process_pybel_graph", "(", "pybel_graph", ")" ]
Return a PybelProcessor by processing a Node-Link JSON file. For more information on this format, see: http://pybel.readthedocs.io/en/latest/io.html#node-link-json Parameters ---------- file_name : str The path to a Node-Link JSON file. Returns ------- bp : PybelProcessor A PybelProcessor object which contains INDRA Statements in bp.statements.
[ "Return", "a", "PybelProcessor", "by", "processing", "a", "Node", "-", "Link", "JSON", "file", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/bel/api.py#L219-L238
train
sorgerlab/indra
indra/sources/bel/api.py
process_cbn_jgif_file
def process_cbn_jgif_file(file_name): """Return a PybelProcessor by processing a CBN JGIF JSON file. Parameters ---------- file_name : str The path to a CBN JGIF JSON file. Returns ------- bp : PybelProcessor A PybelProcessor object which contains INDRA Statements in bp.statements. """ with open(file_name, 'r') as jgf: return process_pybel_graph(pybel.from_cbn_jgif(json.load(jgf)))
python
def process_cbn_jgif_file(file_name): """Return a PybelProcessor by processing a CBN JGIF JSON file. Parameters ---------- file_name : str The path to a CBN JGIF JSON file. Returns ------- bp : PybelProcessor A PybelProcessor object which contains INDRA Statements in bp.statements. """ with open(file_name, 'r') as jgf: return process_pybel_graph(pybel.from_cbn_jgif(json.load(jgf)))
[ "def", "process_cbn_jgif_file", "(", "file_name", ")", ":", "with", "open", "(", "file_name", ",", "'r'", ")", "as", "jgf", ":", "return", "process_pybel_graph", "(", "pybel", ".", "from_cbn_jgif", "(", "json", ".", "load", "(", "jgf", ")", ")", ")" ]
Return a PybelProcessor by processing a CBN JGIF JSON file. Parameters ---------- file_name : str The path to a CBN JGIF JSON file. Returns ------- bp : PybelProcessor A PybelProcessor object which contains INDRA Statements in bp.statements.
[ "Return", "a", "PybelProcessor", "by", "processing", "a", "CBN", "JGIF", "JSON", "file", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/bel/api.py#L241-L256
train
sorgerlab/indra
indra/resources/update_resources.py
update_famplex
def update_famplex(): """Update all the CSV files that form the FamPlex resource.""" famplex_url_pattern = \ 'https://raw.githubusercontent.com/sorgerlab/famplex/master/%s.csv' csv_names = ['entities', 'equivalences', 'gene_prefixes', 'grounding_map', 'relations'] for csv_name in csv_names: url = famplex_url_pattern % csv_name save_from_http(url, os.path.join(path,'famplex/%s.csv' % csv_name))
python
def update_famplex(): """Update all the CSV files that form the FamPlex resource.""" famplex_url_pattern = \ 'https://raw.githubusercontent.com/sorgerlab/famplex/master/%s.csv' csv_names = ['entities', 'equivalences', 'gene_prefixes', 'grounding_map', 'relations'] for csv_name in csv_names: url = famplex_url_pattern % csv_name save_from_http(url, os.path.join(path,'famplex/%s.csv' % csv_name))
[ "def", "update_famplex", "(", ")", ":", "famplex_url_pattern", "=", "'https://raw.githubusercontent.com/sorgerlab/famplex/master/%s.csv'", "csv_names", "=", "[", "'entities'", ",", "'equivalences'", ",", "'gene_prefixes'", ",", "'grounding_map'", ",", "'relations'", "]", "for", "csv_name", "in", "csv_names", ":", "url", "=", "famplex_url_pattern", "%", "csv_name", "save_from_http", "(", "url", ",", "os", ".", "path", ".", "join", "(", "path", ",", "'famplex/%s.csv'", "%", "csv_name", ")", ")" ]
Update all the CSV files that form the FamPlex resource.
[ "Update", "all", "the", "CSV", "files", "that", "form", "the", "FamPlex", "resource", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/resources/update_resources.py#L421-L429
train
sorgerlab/indra
indra/resources/update_resources.py
update_lincs_small_molecules
def update_lincs_small_molecules(): """Load the csv of LINCS small molecule metadata into a dict. Produces a dict keyed by HMS LINCS small molecule ids, with the metadata contained in a dict of row values keyed by the column headers extracted from the csv. """ url = 'http://lincs.hms.harvard.edu/db/sm/' sm_data = load_lincs_csv(url) sm_dict = {d['HMS LINCS ID']: d.copy() for d in sm_data} assert len(sm_dict) == len(sm_data), "We lost data." fname = os.path.join(path, 'lincs_small_molecules.json') with open(fname, 'w') as fh: json.dump(sm_dict, fh, indent=1)
python
def update_lincs_small_molecules(): """Load the csv of LINCS small molecule metadata into a dict. Produces a dict keyed by HMS LINCS small molecule ids, with the metadata contained in a dict of row values keyed by the column headers extracted from the csv. """ url = 'http://lincs.hms.harvard.edu/db/sm/' sm_data = load_lincs_csv(url) sm_dict = {d['HMS LINCS ID']: d.copy() for d in sm_data} assert len(sm_dict) == len(sm_data), "We lost data." fname = os.path.join(path, 'lincs_small_molecules.json') with open(fname, 'w') as fh: json.dump(sm_dict, fh, indent=1)
[ "def", "update_lincs_small_molecules", "(", ")", ":", "url", "=", "'http://lincs.hms.harvard.edu/db/sm/'", "sm_data", "=", "load_lincs_csv", "(", "url", ")", "sm_dict", "=", "{", "d", "[", "'HMS LINCS ID'", "]", ":", "d", ".", "copy", "(", ")", "for", "d", "in", "sm_data", "}", "assert", "len", "(", "sm_dict", ")", "==", "len", "(", "sm_data", ")", ",", "\"We lost data.\"", "fname", "=", "os", ".", "path", ".", "join", "(", "path", ",", "'lincs_small_molecules.json'", ")", "with", "open", "(", "fname", ",", "'w'", ")", "as", "fh", ":", "json", ".", "dump", "(", "sm_dict", ",", "fh", ",", "indent", "=", "1", ")" ]
Load the csv of LINCS small molecule metadata into a dict. Produces a dict keyed by HMS LINCS small molecule ids, with the metadata contained in a dict of row values keyed by the column headers extracted from the csv.
[ "Load", "the", "csv", "of", "LINCS", "small", "molecule", "metadata", "into", "a", "dict", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/resources/update_resources.py#L439-L452
train
sorgerlab/indra
indra/resources/update_resources.py
update_lincs_proteins
def update_lincs_proteins(): """Load the csv of LINCS protein metadata into a dict. Produces a dict keyed by HMS LINCS protein ids, with the metadata contained in a dict of row values keyed by the column headers extracted from the csv. """ url = 'http://lincs.hms.harvard.edu/db/proteins/' prot_data = load_lincs_csv(url) prot_dict = {d['HMS LINCS ID']: d.copy() for d in prot_data} assert len(prot_dict) == len(prot_data), "We lost data." fname = os.path.join(path, 'lincs_proteins.json') with open(fname, 'w') as fh: json.dump(prot_dict, fh, indent=1)
python
def update_lincs_proteins(): """Load the csv of LINCS protein metadata into a dict. Produces a dict keyed by HMS LINCS protein ids, with the metadata contained in a dict of row values keyed by the column headers extracted from the csv. """ url = 'http://lincs.hms.harvard.edu/db/proteins/' prot_data = load_lincs_csv(url) prot_dict = {d['HMS LINCS ID']: d.copy() for d in prot_data} assert len(prot_dict) == len(prot_data), "We lost data." fname = os.path.join(path, 'lincs_proteins.json') with open(fname, 'w') as fh: json.dump(prot_dict, fh, indent=1)
[ "def", "update_lincs_proteins", "(", ")", ":", "url", "=", "'http://lincs.hms.harvard.edu/db/proteins/'", "prot_data", "=", "load_lincs_csv", "(", "url", ")", "prot_dict", "=", "{", "d", "[", "'HMS LINCS ID'", "]", ":", "d", ".", "copy", "(", ")", "for", "d", "in", "prot_data", "}", "assert", "len", "(", "prot_dict", ")", "==", "len", "(", "prot_data", ")", ",", "\"We lost data.\"", "fname", "=", "os", ".", "path", ".", "join", "(", "path", ",", "'lincs_proteins.json'", ")", "with", "open", "(", "fname", ",", "'w'", ")", "as", "fh", ":", "json", ".", "dump", "(", "prot_dict", ",", "fh", ",", "indent", "=", "1", ")" ]
Load the csv of LINCS protein metadata into a dict. Produces a dict keyed by HMS LINCS protein ids, with the metadata contained in a dict of row values keyed by the column headers extracted from the csv.
[ "Load", "the", "csv", "of", "LINCS", "protein", "metadata", "into", "a", "dict", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/resources/update_resources.py#L455-L468
train
sorgerlab/indra
indra/assemblers/index_card/assembler.py
_get_is_direct
def _get_is_direct(stmt): '''Returns true if there is evidence that the statement is a direct interaction. If any of the evidences associated with the statement indicates a direct interatcion then we assume the interaction is direct. If there is no evidence for the interaction being indirect then we default to direct.''' any_indirect = False for ev in stmt.evidence: if ev.epistemics.get('direct') is True: return True elif ev.epistemics.get('direct') is False: # This guarantees that we have seen at least # some evidence that the statement is indirect any_indirect = True if any_indirect: return False return True
python
def _get_is_direct(stmt): '''Returns true if there is evidence that the statement is a direct interaction. If any of the evidences associated with the statement indicates a direct interatcion then we assume the interaction is direct. If there is no evidence for the interaction being indirect then we default to direct.''' any_indirect = False for ev in stmt.evidence: if ev.epistemics.get('direct') is True: return True elif ev.epistemics.get('direct') is False: # This guarantees that we have seen at least # some evidence that the statement is indirect any_indirect = True if any_indirect: return False return True
[ "def", "_get_is_direct", "(", "stmt", ")", ":", "any_indirect", "=", "False", "for", "ev", "in", "stmt", ".", "evidence", ":", "if", "ev", ".", "epistemics", ".", "get", "(", "'direct'", ")", "is", "True", ":", "return", "True", "elif", "ev", ".", "epistemics", ".", "get", "(", "'direct'", ")", "is", "False", ":", "# This guarantees that we have seen at least", "# some evidence that the statement is indirect", "any_indirect", "=", "True", "if", "any_indirect", ":", "return", "False", "return", "True" ]
Returns true if there is evidence that the statement is a direct interaction. If any of the evidences associated with the statement indicates a direct interatcion then we assume the interaction is direct. If there is no evidence for the interaction being indirect then we default to direct.
[ "Returns", "true", "if", "there", "is", "evidence", "that", "the", "statement", "is", "a", "direct", "interaction", ".", "If", "any", "of", "the", "evidences", "associated", "with", "the", "statement", "indicates", "a", "direct", "interatcion", "then", "we", "assume", "the", "interaction", "is", "direct", ".", "If", "there", "is", "no", "evidence", "for", "the", "interaction", "being", "indirect", "then", "we", "default", "to", "direct", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/index_card/assembler.py#L418-L434
train
sorgerlab/indra
indra/assemblers/index_card/assembler.py
IndexCardAssembler.make_model
def make_model(self): """Assemble statements into index cards.""" for stmt in self.statements: if isinstance(stmt, Modification): card = assemble_modification(stmt) elif isinstance(stmt, SelfModification): card = assemble_selfmodification(stmt) elif isinstance(stmt, Complex): card = assemble_complex(stmt) elif isinstance(stmt, Translocation): card = assemble_translocation(stmt) elif isinstance(stmt, RegulateActivity): card = assemble_regulate_activity(stmt) elif isinstance(stmt, RegulateAmount): card = assemble_regulate_amount(stmt) else: continue if card is not None: card.card['meta'] = {'id': stmt.uuid, 'belief': stmt.belief} if self.pmc_override is not None: card.card['pmc_id'] = self.pmc_override else: card.card['pmc_id'] = get_pmc_id(stmt) self.cards.append(card)
python
def make_model(self): """Assemble statements into index cards.""" for stmt in self.statements: if isinstance(stmt, Modification): card = assemble_modification(stmt) elif isinstance(stmt, SelfModification): card = assemble_selfmodification(stmt) elif isinstance(stmt, Complex): card = assemble_complex(stmt) elif isinstance(stmt, Translocation): card = assemble_translocation(stmt) elif isinstance(stmt, RegulateActivity): card = assemble_regulate_activity(stmt) elif isinstance(stmt, RegulateAmount): card = assemble_regulate_amount(stmt) else: continue if card is not None: card.card['meta'] = {'id': stmt.uuid, 'belief': stmt.belief} if self.pmc_override is not None: card.card['pmc_id'] = self.pmc_override else: card.card['pmc_id'] = get_pmc_id(stmt) self.cards.append(card)
[ "def", "make_model", "(", "self", ")", ":", "for", "stmt", "in", "self", ".", "statements", ":", "if", "isinstance", "(", "stmt", ",", "Modification", ")", ":", "card", "=", "assemble_modification", "(", "stmt", ")", "elif", "isinstance", "(", "stmt", ",", "SelfModification", ")", ":", "card", "=", "assemble_selfmodification", "(", "stmt", ")", "elif", "isinstance", "(", "stmt", ",", "Complex", ")", ":", "card", "=", "assemble_complex", "(", "stmt", ")", "elif", "isinstance", "(", "stmt", ",", "Translocation", ")", ":", "card", "=", "assemble_translocation", "(", "stmt", ")", "elif", "isinstance", "(", "stmt", ",", "RegulateActivity", ")", ":", "card", "=", "assemble_regulate_activity", "(", "stmt", ")", "elif", "isinstance", "(", "stmt", ",", "RegulateAmount", ")", ":", "card", "=", "assemble_regulate_amount", "(", "stmt", ")", "else", ":", "continue", "if", "card", "is", "not", "None", ":", "card", ".", "card", "[", "'meta'", "]", "=", "{", "'id'", ":", "stmt", ".", "uuid", ",", "'belief'", ":", "stmt", ".", "belief", "}", "if", "self", ".", "pmc_override", "is", "not", "None", ":", "card", ".", "card", "[", "'pmc_id'", "]", "=", "self", ".", "pmc_override", "else", ":", "card", ".", "card", "[", "'pmc_id'", "]", "=", "get_pmc_id", "(", "stmt", ")", "self", ".", "cards", ".", "append", "(", "card", ")" ]
Assemble statements into index cards.
[ "Assemble", "statements", "into", "index", "cards", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/index_card/assembler.py#L48-L71
train
sorgerlab/indra
indra/assemblers/index_card/assembler.py
IndexCardAssembler.print_model
def print_model(self): """Return the assembled cards as a JSON string. Returns ------- cards_json : str The JSON string representing the assembled cards. """ cards = [c.card for c in self.cards] # If there is only one card, print it as a single # card not as a list if len(cards) == 1: cards = cards[0] cards_json = json.dumps(cards, indent=1) return cards_json
python
def print_model(self): """Return the assembled cards as a JSON string. Returns ------- cards_json : str The JSON string representing the assembled cards. """ cards = [c.card for c in self.cards] # If there is only one card, print it as a single # card not as a list if len(cards) == 1: cards = cards[0] cards_json = json.dumps(cards, indent=1) return cards_json
[ "def", "print_model", "(", "self", ")", ":", "cards", "=", "[", "c", ".", "card", "for", "c", "in", "self", ".", "cards", "]", "# If there is only one card, print it as a single", "# card not as a list", "if", "len", "(", "cards", ")", "==", "1", ":", "cards", "=", "cards", "[", "0", "]", "cards_json", "=", "json", ".", "dumps", "(", "cards", ",", "indent", "=", "1", ")", "return", "cards_json" ]
Return the assembled cards as a JSON string. Returns ------- cards_json : str The JSON string representing the assembled cards.
[ "Return", "the", "assembled", "cards", "as", "a", "JSON", "string", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/index_card/assembler.py#L73-L87
train
sorgerlab/indra
indra/sources/geneways/processor.py
geneways_action_to_indra_statement_type
def geneways_action_to_indra_statement_type(actiontype, plo): """Return INDRA Statement corresponding to Geneways action type. Parameters ---------- actiontype : str The verb extracted by the Geneways processor plo : str A one character string designating whether Geneways classifies this verb as a physical, logical, or other interaction Returns ------- statement_generator : If there is no mapping to INDRA statements from this action type the return value is None. If there is such a mapping, statement_generator is an anonymous function that takes in the subject agent, object agent, and evidence, in that order, and returns an INDRA statement object. """ actiontype = actiontype.lower() statement_generator = None is_direct = (plo == 'P') if actiontype == 'bind': statement_generator = lambda substance1, substance2, evidence: \ Complex([substance1, substance2], evidence=evidence) is_direct = True elif actiontype == 'phosphorylate': statement_generator = lambda substance1, substance2, evidence: \ Phosphorylation(substance1, substance2, evidence=evidence) is_direct = True return (statement_generator, is_direct)
python
def geneways_action_to_indra_statement_type(actiontype, plo): """Return INDRA Statement corresponding to Geneways action type. Parameters ---------- actiontype : str The verb extracted by the Geneways processor plo : str A one character string designating whether Geneways classifies this verb as a physical, logical, or other interaction Returns ------- statement_generator : If there is no mapping to INDRA statements from this action type the return value is None. If there is such a mapping, statement_generator is an anonymous function that takes in the subject agent, object agent, and evidence, in that order, and returns an INDRA statement object. """ actiontype = actiontype.lower() statement_generator = None is_direct = (plo == 'P') if actiontype == 'bind': statement_generator = lambda substance1, substance2, evidence: \ Complex([substance1, substance2], evidence=evidence) is_direct = True elif actiontype == 'phosphorylate': statement_generator = lambda substance1, substance2, evidence: \ Phosphorylation(substance1, substance2, evidence=evidence) is_direct = True return (statement_generator, is_direct)
[ "def", "geneways_action_to_indra_statement_type", "(", "actiontype", ",", "plo", ")", ":", "actiontype", "=", "actiontype", ".", "lower", "(", ")", "statement_generator", "=", "None", "is_direct", "=", "(", "plo", "==", "'P'", ")", "if", "actiontype", "==", "'bind'", ":", "statement_generator", "=", "lambda", "substance1", ",", "substance2", ",", "evidence", ":", "Complex", "(", "[", "substance1", ",", "substance2", "]", ",", "evidence", "=", "evidence", ")", "is_direct", "=", "True", "elif", "actiontype", "==", "'phosphorylate'", ":", "statement_generator", "=", "lambda", "substance1", ",", "substance2", ",", "evidence", ":", "Phosphorylation", "(", "substance1", ",", "substance2", ",", "evidence", "=", "evidence", ")", "is_direct", "=", "True", "return", "(", "statement_generator", ",", "is_direct", ")" ]
Return INDRA Statement corresponding to Geneways action type. Parameters ---------- actiontype : str The verb extracted by the Geneways processor plo : str A one character string designating whether Geneways classifies this verb as a physical, logical, or other interaction Returns ------- statement_generator : If there is no mapping to INDRA statements from this action type the return value is None. If there is such a mapping, statement_generator is an anonymous function that takes in the subject agent, object agent, and evidence, in that order, and returns an INDRA statement object.
[ "Return", "INDRA", "Statement", "corresponding", "to", "Geneways", "action", "type", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/geneways/processor.py#L155-L189
train
sorgerlab/indra
indra/sources/geneways/processor.py
GenewaysProcessor.make_statement
def make_statement(self, action, mention): """Makes an INDRA statement from a Geneways action and action mention. Parameters ---------- action : GenewaysAction The mechanism that the Geneways mention maps to. Note that several text mentions can correspond to the same action if they are referring to the same relationship - there may be multiple Geneways action mentions corresponding to each action. mention : GenewaysActionMention The Geneways action mention object corresponding to a single mention of a mechanism in a specific text. We make a new INDRA statement corresponding to each action mention. Returns ------- statement : indra.statements.Statement An INDRA statement corresponding to the provided Geneways action mention, or None if the action mention's type does not map onto any INDRA statement type in geneways_action_type_mapper. """ (statement_generator, is_direct) = \ geneways_action_to_indra_statement_type(mention.actiontype, action.plo) if statement_generator is None: # Geneways statement does not map onto an indra statement return None # Try to find the full-text sentence # Unfortunately, the sentence numbers in the Geneways dataset # don't correspond to an obvious sentence segmentation. # This code looks for sentences with the subject, object, and verb # listed by the Geneways action mention table and only includes # it in the evidence if there is exactly one such sentence text = None if self.get_ft_mention: try: content, content_type = get_full_text(mention.pmid, 'pmid') if content is not None: ftm = FullTextMention(mention, content) sentences = ftm.find_matching_sentences() if len(sentences) == 1: text = sentences[0] except Exception: logger.warning('Could not fetch full text for PMID ' + mention.pmid) # Make an evidence object epistemics = dict() epistemics['direct'] = is_direct annotations = mention.make_annotation() annotations['plo'] = action.plo # plo only in action table evidence = Evidence(source_api='geneways', source_id=mention.actionmentionid, pmid=mention.pmid, text=text, epistemics=epistemics, annotations=annotations) # Construct the grounded and name standardized agents # Note that this involves grounding the agent by # converting the Entrez ID listed in the Geneways data with # HGNC and UniProt upstream_agent = get_agent(mention.upstream, action.up) downstream_agent = get_agent(mention.downstream, action.dn) # Make the statement return statement_generator(upstream_agent, downstream_agent, evidence)
python
def make_statement(self, action, mention): """Makes an INDRA statement from a Geneways action and action mention. Parameters ---------- action : GenewaysAction The mechanism that the Geneways mention maps to. Note that several text mentions can correspond to the same action if they are referring to the same relationship - there may be multiple Geneways action mentions corresponding to each action. mention : GenewaysActionMention The Geneways action mention object corresponding to a single mention of a mechanism in a specific text. We make a new INDRA statement corresponding to each action mention. Returns ------- statement : indra.statements.Statement An INDRA statement corresponding to the provided Geneways action mention, or None if the action mention's type does not map onto any INDRA statement type in geneways_action_type_mapper. """ (statement_generator, is_direct) = \ geneways_action_to_indra_statement_type(mention.actiontype, action.plo) if statement_generator is None: # Geneways statement does not map onto an indra statement return None # Try to find the full-text sentence # Unfortunately, the sentence numbers in the Geneways dataset # don't correspond to an obvious sentence segmentation. # This code looks for sentences with the subject, object, and verb # listed by the Geneways action mention table and only includes # it in the evidence if there is exactly one such sentence text = None if self.get_ft_mention: try: content, content_type = get_full_text(mention.pmid, 'pmid') if content is not None: ftm = FullTextMention(mention, content) sentences = ftm.find_matching_sentences() if len(sentences) == 1: text = sentences[0] except Exception: logger.warning('Could not fetch full text for PMID ' + mention.pmid) # Make an evidence object epistemics = dict() epistemics['direct'] = is_direct annotations = mention.make_annotation() annotations['plo'] = action.plo # plo only in action table evidence = Evidence(source_api='geneways', source_id=mention.actionmentionid, pmid=mention.pmid, text=text, epistemics=epistemics, annotations=annotations) # Construct the grounded and name standardized agents # Note that this involves grounding the agent by # converting the Entrez ID listed in the Geneways data with # HGNC and UniProt upstream_agent = get_agent(mention.upstream, action.up) downstream_agent = get_agent(mention.downstream, action.dn) # Make the statement return statement_generator(upstream_agent, downstream_agent, evidence)
[ "def", "make_statement", "(", "self", ",", "action", ",", "mention", ")", ":", "(", "statement_generator", ",", "is_direct", ")", "=", "geneways_action_to_indra_statement_type", "(", "mention", ".", "actiontype", ",", "action", ".", "plo", ")", "if", "statement_generator", "is", "None", ":", "# Geneways statement does not map onto an indra statement", "return", "None", "# Try to find the full-text sentence", "# Unfortunately, the sentence numbers in the Geneways dataset", "# don't correspond to an obvious sentence segmentation.", "# This code looks for sentences with the subject, object, and verb", "# listed by the Geneways action mention table and only includes", "# it in the evidence if there is exactly one such sentence", "text", "=", "None", "if", "self", ".", "get_ft_mention", ":", "try", ":", "content", ",", "content_type", "=", "get_full_text", "(", "mention", ".", "pmid", ",", "'pmid'", ")", "if", "content", "is", "not", "None", ":", "ftm", "=", "FullTextMention", "(", "mention", ",", "content", ")", "sentences", "=", "ftm", ".", "find_matching_sentences", "(", ")", "if", "len", "(", "sentences", ")", "==", "1", ":", "text", "=", "sentences", "[", "0", "]", "except", "Exception", ":", "logger", ".", "warning", "(", "'Could not fetch full text for PMID '", "+", "mention", ".", "pmid", ")", "# Make an evidence object", "epistemics", "=", "dict", "(", ")", "epistemics", "[", "'direct'", "]", "=", "is_direct", "annotations", "=", "mention", ".", "make_annotation", "(", ")", "annotations", "[", "'plo'", "]", "=", "action", ".", "plo", "# plo only in action table", "evidence", "=", "Evidence", "(", "source_api", "=", "'geneways'", ",", "source_id", "=", "mention", ".", "actionmentionid", ",", "pmid", "=", "mention", ".", "pmid", ",", "text", "=", "text", ",", "epistemics", "=", "epistemics", ",", "annotations", "=", "annotations", ")", "# Construct the grounded and name standardized agents", "# Note that this involves grounding the agent by", "# converting the Entrez ID listed in the Geneways data with", "# HGNC and UniProt", "upstream_agent", "=", "get_agent", "(", "mention", ".", "upstream", ",", "action", ".", "up", ")", "downstream_agent", "=", "get_agent", "(", "mention", ".", "downstream", ",", "action", ".", "dn", ")", "# Make the statement", "return", "statement_generator", "(", "upstream_agent", ",", "downstream_agent", ",", "evidence", ")" ]
Makes an INDRA statement from a Geneways action and action mention. Parameters ---------- action : GenewaysAction The mechanism that the Geneways mention maps to. Note that several text mentions can correspond to the same action if they are referring to the same relationship - there may be multiple Geneways action mentions corresponding to each action. mention : GenewaysActionMention The Geneways action mention object corresponding to a single mention of a mechanism in a specific text. We make a new INDRA statement corresponding to each action mention. Returns ------- statement : indra.statements.Statement An INDRA statement corresponding to the provided Geneways action mention, or None if the action mention's type does not map onto any INDRA statement type in geneways_action_type_mapper.
[ "Makes", "an", "INDRA", "statement", "from", "a", "Geneways", "action", "and", "action", "mention", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/geneways/processor.py#L71-L139
train
sorgerlab/indra
indra/preassembler/hierarchy_manager.py
HierarchyManager.load_from_rdf_file
def load_from_rdf_file(self, rdf_file): """Initialize given an RDF input file representing the hierarchy." Parameters ---------- rdf_file : str Path to an RDF file. """ self.graph = rdflib.Graph() self.graph.parse(os.path.abspath(rdf_file), format='nt') self.initialize()
python
def load_from_rdf_file(self, rdf_file): """Initialize given an RDF input file representing the hierarchy." Parameters ---------- rdf_file : str Path to an RDF file. """ self.graph = rdflib.Graph() self.graph.parse(os.path.abspath(rdf_file), format='nt') self.initialize()
[ "def", "load_from_rdf_file", "(", "self", ",", "rdf_file", ")", ":", "self", ".", "graph", "=", "rdflib", ".", "Graph", "(", ")", "self", ".", "graph", ".", "parse", "(", "os", ".", "path", ".", "abspath", "(", "rdf_file", ")", ",", "format", "=", "'nt'", ")", "self", ".", "initialize", "(", ")" ]
Initialize given an RDF input file representing the hierarchy." Parameters ---------- rdf_file : str Path to an RDF file.
[ "Initialize", "given", "an", "RDF", "input", "file", "representing", "the", "hierarchy", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/hierarchy_manager.py#L62-L72
train
sorgerlab/indra
indra/preassembler/hierarchy_manager.py
HierarchyManager.load_from_rdf_string
def load_from_rdf_string(self, rdf_str): """Initialize given an RDF string representing the hierarchy." Parameters ---------- rdf_str : str An RDF string. """ self.graph = rdflib.Graph() self.graph.parse(data=rdf_str, format='nt') self.initialize()
python
def load_from_rdf_string(self, rdf_str): """Initialize given an RDF string representing the hierarchy." Parameters ---------- rdf_str : str An RDF string. """ self.graph = rdflib.Graph() self.graph.parse(data=rdf_str, format='nt') self.initialize()
[ "def", "load_from_rdf_string", "(", "self", ",", "rdf_str", ")", ":", "self", ".", "graph", "=", "rdflib", ".", "Graph", "(", ")", "self", ".", "graph", ".", "parse", "(", "data", "=", "rdf_str", ",", "format", "=", "'nt'", ")", "self", ".", "initialize", "(", ")" ]
Initialize given an RDF string representing the hierarchy." Parameters ---------- rdf_str : str An RDF string.
[ "Initialize", "given", "an", "RDF", "string", "representing", "the", "hierarchy", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/hierarchy_manager.py#L74-L84
train
sorgerlab/indra
indra/preassembler/hierarchy_manager.py
HierarchyManager.extend_with
def extend_with(self, rdf_file): """Extend the RDF graph of this HierarchyManager with another RDF file. Parameters ---------- rdf_file : str An RDF file which is parsed such that the current graph and the graph described by the file are merged. """ self.graph.parse(os.path.abspath(rdf_file), format='nt') self.initialize()
python
def extend_with(self, rdf_file): """Extend the RDF graph of this HierarchyManager with another RDF file. Parameters ---------- rdf_file : str An RDF file which is parsed such that the current graph and the graph described by the file are merged. """ self.graph.parse(os.path.abspath(rdf_file), format='nt') self.initialize()
[ "def", "extend_with", "(", "self", ",", "rdf_file", ")", ":", "self", ".", "graph", ".", "parse", "(", "os", ".", "path", ".", "abspath", "(", "rdf_file", ")", ",", "format", "=", "'nt'", ")", "self", ".", "initialize", "(", ")" ]
Extend the RDF graph of this HierarchyManager with another RDF file. Parameters ---------- rdf_file : str An RDF file which is parsed such that the current graph and the graph described by the file are merged.
[ "Extend", "the", "RDF", "graph", "of", "this", "HierarchyManager", "with", "another", "RDF", "file", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/hierarchy_manager.py#L116-L126
train
sorgerlab/indra
indra/preassembler/hierarchy_manager.py
HierarchyManager.build_transitive_closures
def build_transitive_closures(self): """Build the transitive closures of the hierarchy. This method constructs dictionaries which contain terms in the hierarchy as keys and either all the "isa+" or "partof+" related terms as values. """ self.component_counter = 0 for rel, tc_dict in ((self.isa_objects, self.isa_closure), (self.partof_objects, self.partof_closure), (self.isa_or_partof_objects, self.isa_or_partof_closure)): self.build_transitive_closure(rel, tc_dict)
python
def build_transitive_closures(self): """Build the transitive closures of the hierarchy. This method constructs dictionaries which contain terms in the hierarchy as keys and either all the "isa+" or "partof+" related terms as values. """ self.component_counter = 0 for rel, tc_dict in ((self.isa_objects, self.isa_closure), (self.partof_objects, self.partof_closure), (self.isa_or_partof_objects, self.isa_or_partof_closure)): self.build_transitive_closure(rel, tc_dict)
[ "def", "build_transitive_closures", "(", "self", ")", ":", "self", ".", "component_counter", "=", "0", "for", "rel", ",", "tc_dict", "in", "(", "(", "self", ".", "isa_objects", ",", "self", ".", "isa_closure", ")", ",", "(", "self", ".", "partof_objects", ",", "self", ".", "partof_closure", ")", ",", "(", "self", ".", "isa_or_partof_objects", ",", "self", ".", "isa_or_partof_closure", ")", ")", ":", "self", ".", "build_transitive_closure", "(", "rel", ",", "tc_dict", ")" ]
Build the transitive closures of the hierarchy. This method constructs dictionaries which contain terms in the hierarchy as keys and either all the "isa+" or "partof+" related terms as values.
[ "Build", "the", "transitive", "closures", "of", "the", "hierarchy", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/hierarchy_manager.py#L128-L140
train
sorgerlab/indra
indra/preassembler/hierarchy_manager.py
HierarchyManager.build_transitive_closure
def build_transitive_closure(self, rel, tc_dict): """Build a transitive closure for a given relation in a given dict.""" # Make a function with the righ argument structure rel_fun = lambda node, graph: rel(node) for x in self.graph.all_nodes(): rel_closure = self.graph.transitiveClosure(rel_fun, x) xs = x.toPython() for y in rel_closure: ys = y.toPython() if xs == ys: continue try: tc_dict[xs].append(ys) except KeyError: tc_dict[xs] = [ys] if rel == self.isa_or_partof_objects: self._add_component(xs, ys)
python
def build_transitive_closure(self, rel, tc_dict): """Build a transitive closure for a given relation in a given dict.""" # Make a function with the righ argument structure rel_fun = lambda node, graph: rel(node) for x in self.graph.all_nodes(): rel_closure = self.graph.transitiveClosure(rel_fun, x) xs = x.toPython() for y in rel_closure: ys = y.toPython() if xs == ys: continue try: tc_dict[xs].append(ys) except KeyError: tc_dict[xs] = [ys] if rel == self.isa_or_partof_objects: self._add_component(xs, ys)
[ "def", "build_transitive_closure", "(", "self", ",", "rel", ",", "tc_dict", ")", ":", "# Make a function with the righ argument structure", "rel_fun", "=", "lambda", "node", ",", "graph", ":", "rel", "(", "node", ")", "for", "x", "in", "self", ".", "graph", ".", "all_nodes", "(", ")", ":", "rel_closure", "=", "self", ".", "graph", ".", "transitiveClosure", "(", "rel_fun", ",", "x", ")", "xs", "=", "x", ".", "toPython", "(", ")", "for", "y", "in", "rel_closure", ":", "ys", "=", "y", ".", "toPython", "(", ")", "if", "xs", "==", "ys", ":", "continue", "try", ":", "tc_dict", "[", "xs", "]", ".", "append", "(", "ys", ")", "except", "KeyError", ":", "tc_dict", "[", "xs", "]", "=", "[", "ys", "]", "if", "rel", "==", "self", ".", "isa_or_partof_objects", ":", "self", ".", "_add_component", "(", "xs", ",", "ys", ")" ]
Build a transitive closure for a given relation in a given dict.
[ "Build", "a", "transitive", "closure", "for", "a", "given", "relation", "in", "a", "given", "dict", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/hierarchy_manager.py#L142-L158
train
sorgerlab/indra
indra/preassembler/hierarchy_manager.py
HierarchyManager.directly_or_indirectly_related
def directly_or_indirectly_related(self, ns1, id1, ns2, id2, closure_dict, relation_func): """Return True if two entities have the speicified relationship. This relation is constructed possibly through multiple links connecting the two entities directly or indirectly. Parameters ---------- ns1 : str Namespace code for an entity. id1 : str URI for an entity. ns2 : str Namespace code for an entity. id2 : str URI for an entity. closure_dict: dict A dictionary mapping node names to nodes that have the specified relationship, directly or indirectly. Empty if this has not been precomputed. relation_func: function Function with arguments (node, graph) that generates objects with some relationship with node on the given graph. Returns ------- bool True if t1 has the specified relationship with t2, either directly or through a series of intermediates; False otherwise. """ # if id2 is None, or both are None, then it's by definition isa: if id2 is None or (id2 is None and id1 is None): return True # If only id1 is None, then it cannot be isa elif id1 is None: return False if closure_dict: term1 = self.get_uri(ns1, id1) term2 = self.get_uri(ns2, id2) ec = closure_dict.get(term1) if ec is not None and term2 in ec: return True else: return False else: if not self.uri_as_name: e1 = self.find_entity(id1) e2 = self.find_entity(id2) if e1 is None or e2 is None: return False t1 = rdflib.term.URIRef(e1) t2 = rdflib.term.URIRef(e2) else: u1 = self.get_uri(ns1, id1) u2 = self.get_uri(ns2, id2) t1 = rdflib.term.URIRef(u1) t2 = rdflib.term.URIRef(u2) to = self.graph.transitiveClosure(relation_func, t1) if t2 in to: return True else: return False
python
def directly_or_indirectly_related(self, ns1, id1, ns2, id2, closure_dict, relation_func): """Return True if two entities have the speicified relationship. This relation is constructed possibly through multiple links connecting the two entities directly or indirectly. Parameters ---------- ns1 : str Namespace code for an entity. id1 : str URI for an entity. ns2 : str Namespace code for an entity. id2 : str URI for an entity. closure_dict: dict A dictionary mapping node names to nodes that have the specified relationship, directly or indirectly. Empty if this has not been precomputed. relation_func: function Function with arguments (node, graph) that generates objects with some relationship with node on the given graph. Returns ------- bool True if t1 has the specified relationship with t2, either directly or through a series of intermediates; False otherwise. """ # if id2 is None, or both are None, then it's by definition isa: if id2 is None or (id2 is None and id1 is None): return True # If only id1 is None, then it cannot be isa elif id1 is None: return False if closure_dict: term1 = self.get_uri(ns1, id1) term2 = self.get_uri(ns2, id2) ec = closure_dict.get(term1) if ec is not None and term2 in ec: return True else: return False else: if not self.uri_as_name: e1 = self.find_entity(id1) e2 = self.find_entity(id2) if e1 is None or e2 is None: return False t1 = rdflib.term.URIRef(e1) t2 = rdflib.term.URIRef(e2) else: u1 = self.get_uri(ns1, id1) u2 = self.get_uri(ns2, id2) t1 = rdflib.term.URIRef(u1) t2 = rdflib.term.URIRef(u2) to = self.graph.transitiveClosure(relation_func, t1) if t2 in to: return True else: return False
[ "def", "directly_or_indirectly_related", "(", "self", ",", "ns1", ",", "id1", ",", "ns2", ",", "id2", ",", "closure_dict", ",", "relation_func", ")", ":", "# if id2 is None, or both are None, then it's by definition isa:", "if", "id2", "is", "None", "or", "(", "id2", "is", "None", "and", "id1", "is", "None", ")", ":", "return", "True", "# If only id1 is None, then it cannot be isa", "elif", "id1", "is", "None", ":", "return", "False", "if", "closure_dict", ":", "term1", "=", "self", ".", "get_uri", "(", "ns1", ",", "id1", ")", "term2", "=", "self", ".", "get_uri", "(", "ns2", ",", "id2", ")", "ec", "=", "closure_dict", ".", "get", "(", "term1", ")", "if", "ec", "is", "not", "None", "and", "term2", "in", "ec", ":", "return", "True", "else", ":", "return", "False", "else", ":", "if", "not", "self", ".", "uri_as_name", ":", "e1", "=", "self", ".", "find_entity", "(", "id1", ")", "e2", "=", "self", ".", "find_entity", "(", "id2", ")", "if", "e1", "is", "None", "or", "e2", "is", "None", ":", "return", "False", "t1", "=", "rdflib", ".", "term", ".", "URIRef", "(", "e1", ")", "t2", "=", "rdflib", ".", "term", ".", "URIRef", "(", "e2", ")", "else", ":", "u1", "=", "self", ".", "get_uri", "(", "ns1", ",", "id1", ")", "u2", "=", "self", ".", "get_uri", "(", "ns2", ",", "id2", ")", "t1", "=", "rdflib", ".", "term", ".", "URIRef", "(", "u1", ")", "t2", "=", "rdflib", ".", "term", ".", "URIRef", "(", "u2", ")", "to", "=", "self", ".", "graph", ".", "transitiveClosure", "(", "relation_func", ",", "t1", ")", "if", "t2", "in", "to", ":", "return", "True", "else", ":", "return", "False" ]
Return True if two entities have the speicified relationship. This relation is constructed possibly through multiple links connecting the two entities directly or indirectly. Parameters ---------- ns1 : str Namespace code for an entity. id1 : str URI for an entity. ns2 : str Namespace code for an entity. id2 : str URI for an entity. closure_dict: dict A dictionary mapping node names to nodes that have the specified relationship, directly or indirectly. Empty if this has not been precomputed. relation_func: function Function with arguments (node, graph) that generates objects with some relationship with node on the given graph. Returns ------- bool True if t1 has the specified relationship with t2, either directly or through a series of intermediates; False otherwise.
[ "Return", "True", "if", "two", "entities", "have", "the", "speicified", "relationship", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/hierarchy_manager.py#L240-L304
train
sorgerlab/indra
indra/preassembler/hierarchy_manager.py
HierarchyManager.isa
def isa(self, ns1, id1, ns2, id2): """Return True if one entity has an "isa" relationship to another. Parameters ---------- ns1 : str Namespace code for an entity. id1 : string URI for an entity. ns2 : str Namespace code for an entity. id2 : str URI for an entity. Returns ------- bool True if t1 has an "isa" relationship with t2, either directly or through a series of intermediates; False otherwise. """ rel_fun = lambda node, graph: self.isa_objects(node) return self.directly_or_indirectly_related(ns1, id1, ns2, id2, self.isa_closure, rel_fun)
python
def isa(self, ns1, id1, ns2, id2): """Return True if one entity has an "isa" relationship to another. Parameters ---------- ns1 : str Namespace code for an entity. id1 : string URI for an entity. ns2 : str Namespace code for an entity. id2 : str URI for an entity. Returns ------- bool True if t1 has an "isa" relationship with t2, either directly or through a series of intermediates; False otherwise. """ rel_fun = lambda node, graph: self.isa_objects(node) return self.directly_or_indirectly_related(ns1, id1, ns2, id2, self.isa_closure, rel_fun)
[ "def", "isa", "(", "self", ",", "ns1", ",", "id1", ",", "ns2", ",", "id2", ")", ":", "rel_fun", "=", "lambda", "node", ",", "graph", ":", "self", ".", "isa_objects", "(", "node", ")", "return", "self", ".", "directly_or_indirectly_related", "(", "ns1", ",", "id1", ",", "ns2", ",", "id2", ",", "self", ".", "isa_closure", ",", "rel_fun", ")" ]
Return True if one entity has an "isa" relationship to another. Parameters ---------- ns1 : str Namespace code for an entity. id1 : string URI for an entity. ns2 : str Namespace code for an entity. id2 : str URI for an entity. Returns ------- bool True if t1 has an "isa" relationship with t2, either directly or through a series of intermediates; False otherwise.
[ "Return", "True", "if", "one", "entity", "has", "an", "isa", "relationship", "to", "another", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/hierarchy_manager.py#L306-L329
train
sorgerlab/indra
indra/preassembler/hierarchy_manager.py
HierarchyManager.partof
def partof(self, ns1, id1, ns2, id2): """Return True if one entity is "partof" another. Parameters ---------- ns1 : str Namespace code for an entity. id1 : str URI for an entity. ns2 : str Namespace code for an entity. id2 : str URI for an entity. Returns ------- bool True if t1 has a "partof" relationship with t2, either directly or through a series of intermediates; False otherwise. """ rel_fun = lambda node, graph: self.partof_objects(node) return self.directly_or_indirectly_related(ns1, id1, ns2, id2, self.partof_closure, rel_fun)
python
def partof(self, ns1, id1, ns2, id2): """Return True if one entity is "partof" another. Parameters ---------- ns1 : str Namespace code for an entity. id1 : str URI for an entity. ns2 : str Namespace code for an entity. id2 : str URI for an entity. Returns ------- bool True if t1 has a "partof" relationship with t2, either directly or through a series of intermediates; False otherwise. """ rel_fun = lambda node, graph: self.partof_objects(node) return self.directly_or_indirectly_related(ns1, id1, ns2, id2, self.partof_closure, rel_fun)
[ "def", "partof", "(", "self", ",", "ns1", ",", "id1", ",", "ns2", ",", "id2", ")", ":", "rel_fun", "=", "lambda", "node", ",", "graph", ":", "self", ".", "partof_objects", "(", "node", ")", "return", "self", ".", "directly_or_indirectly_related", "(", "ns1", ",", "id1", ",", "ns2", ",", "id2", ",", "self", ".", "partof_closure", ",", "rel_fun", ")" ]
Return True if one entity is "partof" another. Parameters ---------- ns1 : str Namespace code for an entity. id1 : str URI for an entity. ns2 : str Namespace code for an entity. id2 : str URI for an entity. Returns ------- bool True if t1 has a "partof" relationship with t2, either directly or through a series of intermediates; False otherwise.
[ "Return", "True", "if", "one", "entity", "is", "partof", "another", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/hierarchy_manager.py#L331-L354
train
sorgerlab/indra
indra/preassembler/hierarchy_manager.py
HierarchyManager.isa_or_partof
def isa_or_partof(self, ns1, id1, ns2, id2): """Return True if two entities are in an "isa" or "partof" relationship Parameters ---------- ns1 : str Namespace code for an entity. id1 : str URI for an entity. ns2 : str Namespace code for an entity. id2 : str URI for an entity. Returns ------- bool True if t1 has a "isa" or "partof" relationship with t2, either directly or through a series of intermediates; False otherwise. """ rel_fun = lambda node, graph: self.isa_or_partof_objects(node) return self.directly_or_indirectly_related(ns1, id1, ns2, id2, self.isa_or_partof_closure, rel_fun)
python
def isa_or_partof(self, ns1, id1, ns2, id2): """Return True if two entities are in an "isa" or "partof" relationship Parameters ---------- ns1 : str Namespace code for an entity. id1 : str URI for an entity. ns2 : str Namespace code for an entity. id2 : str URI for an entity. Returns ------- bool True if t1 has a "isa" or "partof" relationship with t2, either directly or through a series of intermediates; False otherwise. """ rel_fun = lambda node, graph: self.isa_or_partof_objects(node) return self.directly_or_indirectly_related(ns1, id1, ns2, id2, self.isa_or_partof_closure, rel_fun)
[ "def", "isa_or_partof", "(", "self", ",", "ns1", ",", "id1", ",", "ns2", ",", "id2", ")", ":", "rel_fun", "=", "lambda", "node", ",", "graph", ":", "self", ".", "isa_or_partof_objects", "(", "node", ")", "return", "self", ".", "directly_or_indirectly_related", "(", "ns1", ",", "id1", ",", "ns2", ",", "id2", ",", "self", ".", "isa_or_partof_closure", ",", "rel_fun", ")" ]
Return True if two entities are in an "isa" or "partof" relationship Parameters ---------- ns1 : str Namespace code for an entity. id1 : str URI for an entity. ns2 : str Namespace code for an entity. id2 : str URI for an entity. Returns ------- bool True if t1 has a "isa" or "partof" relationship with t2, either directly or through a series of intermediates; False otherwise.
[ "Return", "True", "if", "two", "entities", "are", "in", "an", "isa", "or", "partof", "relationship" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/hierarchy_manager.py#L356-L379
train
sorgerlab/indra
indra/preassembler/hierarchy_manager.py
HierarchyManager.is_opposite
def is_opposite(self, ns1, id1, ns2, id2): """Return True if two entities are in an "is_opposite" relationship Parameters ---------- ns1 : str Namespace code for an entity. id1 : str URI for an entity. ns2 : str Namespace code for an entity. id2 : str URI for an entity. Returns ------- bool True if t1 has an "is_opposite" relationship with t2. """ u1 = self.get_uri(ns1, id1) u2 = self.get_uri(ns2, id2) t1 = rdflib.term.URIRef(u1) t2 = rdflib.term.URIRef(u2) rel = rdflib.term.URIRef(self.relations_prefix + 'is_opposite') to = self.graph.objects(t1, rel) if t2 in to: return True return False
python
def is_opposite(self, ns1, id1, ns2, id2): """Return True if two entities are in an "is_opposite" relationship Parameters ---------- ns1 : str Namespace code for an entity. id1 : str URI for an entity. ns2 : str Namespace code for an entity. id2 : str URI for an entity. Returns ------- bool True if t1 has an "is_opposite" relationship with t2. """ u1 = self.get_uri(ns1, id1) u2 = self.get_uri(ns2, id2) t1 = rdflib.term.URIRef(u1) t2 = rdflib.term.URIRef(u2) rel = rdflib.term.URIRef(self.relations_prefix + 'is_opposite') to = self.graph.objects(t1, rel) if t2 in to: return True return False
[ "def", "is_opposite", "(", "self", ",", "ns1", ",", "id1", ",", "ns2", ",", "id2", ")", ":", "u1", "=", "self", ".", "get_uri", "(", "ns1", ",", "id1", ")", "u2", "=", "self", ".", "get_uri", "(", "ns2", ",", "id2", ")", "t1", "=", "rdflib", ".", "term", ".", "URIRef", "(", "u1", ")", "t2", "=", "rdflib", ".", "term", ".", "URIRef", "(", "u2", ")", "rel", "=", "rdflib", ".", "term", ".", "URIRef", "(", "self", ".", "relations_prefix", "+", "'is_opposite'", ")", "to", "=", "self", ".", "graph", ".", "objects", "(", "t1", ",", "rel", ")", "if", "t2", "in", "to", ":", "return", "True", "return", "False" ]
Return True if two entities are in an "is_opposite" relationship Parameters ---------- ns1 : str Namespace code for an entity. id1 : str URI for an entity. ns2 : str Namespace code for an entity. id2 : str URI for an entity. Returns ------- bool True if t1 has an "is_opposite" relationship with t2.
[ "Return", "True", "if", "two", "entities", "are", "in", "an", "is_opposite", "relationship" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/hierarchy_manager.py#L381-L409
train
sorgerlab/indra
indra/preassembler/hierarchy_manager.py
HierarchyManager.get_parents
def get_parents(self, uri, type='all'): """Return parents of a given entry. Parameters ---------- uri : str The URI of the entry whose parents are to be returned. See the get_uri method to construct this URI from a name space and id. type : str 'all': return all parents irrespective of level; 'immediate': return only the immediate parents; 'top': return only the highest level parents """ # First do a quick dict lookup to see if there are any parents all_parents = set(self.isa_or_partof_closure.get(uri, [])) # If there are no parents or we are looking for all, we can return here if not all_parents or type == 'all': return all_parents # If we need immediate parents, we search again, this time knowing that # the uri is definitely in the graph since it has some parents if type == 'immediate': node = rdflib.term.URIRef(uri) immediate_parents = list(set(self.isa_or_partof_objects(node))) return [p.toPython() for p in immediate_parents] elif type == 'top': top_parents = [p for p in all_parents if not self.isa_or_partof_closure.get(p)] return top_parents
python
def get_parents(self, uri, type='all'): """Return parents of a given entry. Parameters ---------- uri : str The URI of the entry whose parents are to be returned. See the get_uri method to construct this URI from a name space and id. type : str 'all': return all parents irrespective of level; 'immediate': return only the immediate parents; 'top': return only the highest level parents """ # First do a quick dict lookup to see if there are any parents all_parents = set(self.isa_or_partof_closure.get(uri, [])) # If there are no parents or we are looking for all, we can return here if not all_parents or type == 'all': return all_parents # If we need immediate parents, we search again, this time knowing that # the uri is definitely in the graph since it has some parents if type == 'immediate': node = rdflib.term.URIRef(uri) immediate_parents = list(set(self.isa_or_partof_objects(node))) return [p.toPython() for p in immediate_parents] elif type == 'top': top_parents = [p for p in all_parents if not self.isa_or_partof_closure.get(p)] return top_parents
[ "def", "get_parents", "(", "self", ",", "uri", ",", "type", "=", "'all'", ")", ":", "# First do a quick dict lookup to see if there are any parents", "all_parents", "=", "set", "(", "self", ".", "isa_or_partof_closure", ".", "get", "(", "uri", ",", "[", "]", ")", ")", "# If there are no parents or we are looking for all, we can return here", "if", "not", "all_parents", "or", "type", "==", "'all'", ":", "return", "all_parents", "# If we need immediate parents, we search again, this time knowing that", "# the uri is definitely in the graph since it has some parents", "if", "type", "==", "'immediate'", ":", "node", "=", "rdflib", ".", "term", ".", "URIRef", "(", "uri", ")", "immediate_parents", "=", "list", "(", "set", "(", "self", ".", "isa_or_partof_objects", "(", "node", ")", ")", ")", "return", "[", "p", ".", "toPython", "(", ")", "for", "p", "in", "immediate_parents", "]", "elif", "type", "==", "'top'", ":", "top_parents", "=", "[", "p", "for", "p", "in", "all_parents", "if", "not", "self", ".", "isa_or_partof_closure", ".", "get", "(", "p", ")", "]", "return", "top_parents" ]
Return parents of a given entry. Parameters ---------- uri : str The URI of the entry whose parents are to be returned. See the get_uri method to construct this URI from a name space and id. type : str 'all': return all parents irrespective of level; 'immediate': return only the immediate parents; 'top': return only the highest level parents
[ "Return", "parents", "of", "a", "given", "entry", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/hierarchy_manager.py#L411-L439
train
sorgerlab/indra
indra/sources/trips/drum_reader.py
_get_perf
def _get_perf(text, msg_id): """Return a request message for a given text.""" msg = KQMLPerformative('REQUEST') msg.set('receiver', 'READER') content = KQMLList('run-text') content.sets('text', text) msg.set('content', content) msg.set('reply-with', msg_id) return msg
python
def _get_perf(text, msg_id): """Return a request message for a given text.""" msg = KQMLPerformative('REQUEST') msg.set('receiver', 'READER') content = KQMLList('run-text') content.sets('text', text) msg.set('content', content) msg.set('reply-with', msg_id) return msg
[ "def", "_get_perf", "(", "text", ",", "msg_id", ")", ":", "msg", "=", "KQMLPerformative", "(", "'REQUEST'", ")", "msg", ".", "set", "(", "'receiver'", ",", "'READER'", ")", "content", "=", "KQMLList", "(", "'run-text'", ")", "content", ".", "sets", "(", "'text'", ",", "text", ")", "msg", ".", "set", "(", "'content'", ",", "content", ")", "msg", ".", "set", "(", "'reply-with'", ",", "msg_id", ")", "return", "msg" ]
Return a request message for a given text.
[ "Return", "a", "request", "message", "for", "a", "given", "text", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/drum_reader.py#L156-L164
train
sorgerlab/indra
indra/sources/trips/drum_reader.py
DrumReader.read_pmc
def read_pmc(self, pmcid): """Read a given PMC article. Parameters ---------- pmcid : str The PMC ID of the article to read. Note that only articles in the open-access subset of PMC will work. """ msg = KQMLPerformative('REQUEST') msg.set('receiver', 'READER') content = KQMLList('run-pmcid') content.sets('pmcid', pmcid) content.set('reply-when-done', 'true') msg.set('content', content) msg.set('reply-with', 'P-%s' % pmcid) self.reply_counter += 1 self.send(msg)
python
def read_pmc(self, pmcid): """Read a given PMC article. Parameters ---------- pmcid : str The PMC ID of the article to read. Note that only articles in the open-access subset of PMC will work. """ msg = KQMLPerformative('REQUEST') msg.set('receiver', 'READER') content = KQMLList('run-pmcid') content.sets('pmcid', pmcid) content.set('reply-when-done', 'true') msg.set('content', content) msg.set('reply-with', 'P-%s' % pmcid) self.reply_counter += 1 self.send(msg)
[ "def", "read_pmc", "(", "self", ",", "pmcid", ")", ":", "msg", "=", "KQMLPerformative", "(", "'REQUEST'", ")", "msg", ".", "set", "(", "'receiver'", ",", "'READER'", ")", "content", "=", "KQMLList", "(", "'run-pmcid'", ")", "content", ".", "sets", "(", "'pmcid'", ",", "pmcid", ")", "content", ".", "set", "(", "'reply-when-done'", ",", "'true'", ")", "msg", ".", "set", "(", "'content'", ",", "content", ")", "msg", ".", "set", "(", "'reply-with'", ",", "'P-%s'", "%", "pmcid", ")", "self", ".", "reply_counter", "+=", "1", "self", ".", "send", "(", "msg", ")" ]
Read a given PMC article. Parameters ---------- pmcid : str The PMC ID of the article to read. Note that only articles in the open-access subset of PMC will work.
[ "Read", "a", "given", "PMC", "article", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/drum_reader.py#L87-L104
train
sorgerlab/indra
indra/sources/trips/drum_reader.py
DrumReader.read_text
def read_text(self, text): """Read a given text phrase. Parameters ---------- text : str The text to read. Typically a sentence or a paragraph. """ logger.info('Reading: "%s"' % text) msg_id = 'RT000%s' % self.msg_counter kqml_perf = _get_perf(text, msg_id) self.reply_counter += 1 self.msg_counter += 1 self.send(kqml_perf)
python
def read_text(self, text): """Read a given text phrase. Parameters ---------- text : str The text to read. Typically a sentence or a paragraph. """ logger.info('Reading: "%s"' % text) msg_id = 'RT000%s' % self.msg_counter kqml_perf = _get_perf(text, msg_id) self.reply_counter += 1 self.msg_counter += 1 self.send(kqml_perf)
[ "def", "read_text", "(", "self", ",", "text", ")", ":", "logger", ".", "info", "(", "'Reading: \"%s\"'", "%", "text", ")", "msg_id", "=", "'RT000%s'", "%", "self", ".", "msg_counter", "kqml_perf", "=", "_get_perf", "(", "text", ",", "msg_id", ")", "self", ".", "reply_counter", "+=", "1", "self", ".", "msg_counter", "+=", "1", "self", ".", "send", "(", "kqml_perf", ")" ]
Read a given text phrase. Parameters ---------- text : str The text to read. Typically a sentence or a paragraph.
[ "Read", "a", "given", "text", "phrase", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/drum_reader.py#L106-L119
train
sorgerlab/indra
indra/sources/trips/drum_reader.py
DrumReader.receive_reply
def receive_reply(self, msg, content): """Handle replies with reading results.""" reply_head = content.head() if reply_head == 'error': comment = content.gets('comment') logger.error('Got error reply: "%s"' % comment) else: extractions = content.gets('ekb') self.extractions.append(extractions) self.reply_counter -= 1 if self.reply_counter == 0: self.exit(0)
python
def receive_reply(self, msg, content): """Handle replies with reading results.""" reply_head = content.head() if reply_head == 'error': comment = content.gets('comment') logger.error('Got error reply: "%s"' % comment) else: extractions = content.gets('ekb') self.extractions.append(extractions) self.reply_counter -= 1 if self.reply_counter == 0: self.exit(0)
[ "def", "receive_reply", "(", "self", ",", "msg", ",", "content", ")", ":", "reply_head", "=", "content", ".", "head", "(", ")", "if", "reply_head", "==", "'error'", ":", "comment", "=", "content", ".", "gets", "(", "'comment'", ")", "logger", ".", "error", "(", "'Got error reply: \"%s\"'", "%", "comment", ")", "else", ":", "extractions", "=", "content", ".", "gets", "(", "'ekb'", ")", "self", ".", "extractions", ".", "append", "(", "extractions", ")", "self", ".", "reply_counter", "-=", "1", "if", "self", ".", "reply_counter", "==", "0", ":", "self", ".", "exit", "(", "0", ")" ]
Handle replies with reading results.
[ "Handle", "replies", "with", "reading", "results", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/drum_reader.py#L121-L132
train
sorgerlab/indra
indra/sources/hume/visualize_causal.py
split_long_sentence
def split_long_sentence(sentence, words_per_line): """Takes a sentence and adds a newline every "words_per_line" words. Parameters ---------- sentence: str Sentene to split words_per_line: double Add a newline every this many words """ words = sentence.split(' ') split_sentence = '' for i in range(len(words)): split_sentence = split_sentence + words[i] if (i+1) % words_per_line == 0: split_sentence = split_sentence + '\n' elif i != len(words) - 1: split_sentence = split_sentence + " " return split_sentence
python
def split_long_sentence(sentence, words_per_line): """Takes a sentence and adds a newline every "words_per_line" words. Parameters ---------- sentence: str Sentene to split words_per_line: double Add a newline every this many words """ words = sentence.split(' ') split_sentence = '' for i in range(len(words)): split_sentence = split_sentence + words[i] if (i+1) % words_per_line == 0: split_sentence = split_sentence + '\n' elif i != len(words) - 1: split_sentence = split_sentence + " " return split_sentence
[ "def", "split_long_sentence", "(", "sentence", ",", "words_per_line", ")", ":", "words", "=", "sentence", ".", "split", "(", "' '", ")", "split_sentence", "=", "''", "for", "i", "in", "range", "(", "len", "(", "words", ")", ")", ":", "split_sentence", "=", "split_sentence", "+", "words", "[", "i", "]", "if", "(", "i", "+", "1", ")", "%", "words_per_line", "==", "0", ":", "split_sentence", "=", "split_sentence", "+", "'\\n'", "elif", "i", "!=", "len", "(", "words", ")", "-", "1", ":", "split_sentence", "=", "split_sentence", "+", "\" \"", "return", "split_sentence" ]
Takes a sentence and adds a newline every "words_per_line" words. Parameters ---------- sentence: str Sentene to split words_per_line: double Add a newline every this many words
[ "Takes", "a", "sentence", "and", "adds", "a", "newline", "every", "words_per_line", "words", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/hume/visualize_causal.py#L7-L25
train
sorgerlab/indra
indra/sources/hume/visualize_causal.py
shorter_name
def shorter_name(key): """Return a shorter name for an id. Does this by only taking the last part of the URI, after the last / and the last #. Also replaces - and . with _. Parameters ---------- key: str Some URI Returns ------- key_short: str A shortened, but more ambiguous, identifier """ key_short = key for sep in ['#', '/']: ind = key_short.rfind(sep) if ind is not None: key_short = key_short[ind+1:] else: key_short = key_short return key_short.replace('-', '_').replace('.', '_')
python
def shorter_name(key): """Return a shorter name for an id. Does this by only taking the last part of the URI, after the last / and the last #. Also replaces - and . with _. Parameters ---------- key: str Some URI Returns ------- key_short: str A shortened, but more ambiguous, identifier """ key_short = key for sep in ['#', '/']: ind = key_short.rfind(sep) if ind is not None: key_short = key_short[ind+1:] else: key_short = key_short return key_short.replace('-', '_').replace('.', '_')
[ "def", "shorter_name", "(", "key", ")", ":", "key_short", "=", "key", "for", "sep", "in", "[", "'#'", ",", "'/'", "]", ":", "ind", "=", "key_short", ".", "rfind", "(", "sep", ")", "if", "ind", "is", "not", "None", ":", "key_short", "=", "key_short", "[", "ind", "+", "1", ":", "]", "else", ":", "key_short", "=", "key_short", "return", "key_short", ".", "replace", "(", "'-'", ",", "'_'", ")", ".", "replace", "(", "'.'", ",", "'_'", ")" ]
Return a shorter name for an id. Does this by only taking the last part of the URI, after the last / and the last #. Also replaces - and . with _. Parameters ---------- key: str Some URI Returns ------- key_short: str A shortened, but more ambiguous, identifier
[ "Return", "a", "shorter", "name", "for", "an", "id", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/hume/visualize_causal.py#L28-L51
train
sorgerlab/indra
indra/sources/hume/visualize_causal.py
add_event_property_edges
def add_event_property_edges(event_entity, entries): """Adds edges to the graph for event properties.""" do_not_log = ['@type', '@id', 'http://worldmodelers.com/DataProvenance#sourced_from'] for prop in event_entity: if prop not in do_not_log: value = event_entity[prop] value_entry = None value_str = None if '@id' in value[0]: value = value[0]['@id'] if value in entries: value_str = get_entry_compact_text_repr(entries[value], entries) #get_entry_compact_text_repr(entry, entries) if value_str is not None: edges.append([shorter_name(event_entity['@id']), shorter_name(value), shorter_name(prop)]) node_labels[shorter_name(value)] = value_str
python
def add_event_property_edges(event_entity, entries): """Adds edges to the graph for event properties.""" do_not_log = ['@type', '@id', 'http://worldmodelers.com/DataProvenance#sourced_from'] for prop in event_entity: if prop not in do_not_log: value = event_entity[prop] value_entry = None value_str = None if '@id' in value[0]: value = value[0]['@id'] if value in entries: value_str = get_entry_compact_text_repr(entries[value], entries) #get_entry_compact_text_repr(entry, entries) if value_str is not None: edges.append([shorter_name(event_entity['@id']), shorter_name(value), shorter_name(prop)]) node_labels[shorter_name(value)] = value_str
[ "def", "add_event_property_edges", "(", "event_entity", ",", "entries", ")", ":", "do_not_log", "=", "[", "'@type'", ",", "'@id'", ",", "'http://worldmodelers.com/DataProvenance#sourced_from'", "]", "for", "prop", "in", "event_entity", ":", "if", "prop", "not", "in", "do_not_log", ":", "value", "=", "event_entity", "[", "prop", "]", "value_entry", "=", "None", "value_str", "=", "None", "if", "'@id'", "in", "value", "[", "0", "]", ":", "value", "=", "value", "[", "0", "]", "[", "'@id'", "]", "if", "value", "in", "entries", ":", "value_str", "=", "get_entry_compact_text_repr", "(", "entries", "[", "value", "]", ",", "entries", ")", "#get_entry_compact_text_repr(entry, entries)", "if", "value_str", "is", "not", "None", ":", "edges", ".", "append", "(", "[", "shorter_name", "(", "event_entity", "[", "'@id'", "]", ")", ",", "shorter_name", "(", "value", ")", ",", "shorter_name", "(", "prop", ")", "]", ")", "node_labels", "[", "shorter_name", "(", "value", ")", "]", "=", "value_str" ]
Adds edges to the graph for event properties.
[ "Adds", "edges", "to", "the", "graph", "for", "event", "properties", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/hume/visualize_causal.py#L54-L76
train
sorgerlab/indra
indra/sources/hume/visualize_causal.py
get_sourced_from
def get_sourced_from(entry): """Get a list of values from the source_from attribute""" sourced_from = 'http://worldmodelers.com/DataProvenance#sourced_from' if sourced_from in entry: values = entry[sourced_from] values = [i['@id'] for i in values] return values
python
def get_sourced_from(entry): """Get a list of values from the source_from attribute""" sourced_from = 'http://worldmodelers.com/DataProvenance#sourced_from' if sourced_from in entry: values = entry[sourced_from] values = [i['@id'] for i in values] return values
[ "def", "get_sourced_from", "(", "entry", ")", ":", "sourced_from", "=", "'http://worldmodelers.com/DataProvenance#sourced_from'", "if", "sourced_from", "in", "entry", ":", "values", "=", "entry", "[", "sourced_from", "]", "values", "=", "[", "i", "[", "'@id'", "]", "for", "i", "in", "values", "]", "return", "values" ]
Get a list of values from the source_from attribute
[ "Get", "a", "list", "of", "values", "from", "the", "source_from", "attribute" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/hume/visualize_causal.py#L127-L134
train
sorgerlab/indra
indra/sources/hume/visualize_causal.py
get_entry_compact_text_repr
def get_entry_compact_text_repr(entry, entries): """If the entry has a text value, return that. If the entry has a source_from value, return the text value of the source. Otherwise, return None.""" text = get_shortest_text_value(entry) if text is not None: return text else: sources = get_sourced_from(entry) # There are a lot of references to this entity, each of which refer # to it by a different text label. For the sake of visualization, # let's pick one of these labels (in this case, the shortest one) if sources is not None: texts = [] for source in sources: source_entry = entries[source] texts.append(get_shortest_text_value(source_entry)) return get_shortest_string(texts)
python
def get_entry_compact_text_repr(entry, entries): """If the entry has a text value, return that. If the entry has a source_from value, return the text value of the source. Otherwise, return None.""" text = get_shortest_text_value(entry) if text is not None: return text else: sources = get_sourced_from(entry) # There are a lot of references to this entity, each of which refer # to it by a different text label. For the sake of visualization, # let's pick one of these labels (in this case, the shortest one) if sources is not None: texts = [] for source in sources: source_entry = entries[source] texts.append(get_shortest_text_value(source_entry)) return get_shortest_string(texts)
[ "def", "get_entry_compact_text_repr", "(", "entry", ",", "entries", ")", ":", "text", "=", "get_shortest_text_value", "(", "entry", ")", "if", "text", "is", "not", "None", ":", "return", "text", "else", ":", "sources", "=", "get_sourced_from", "(", "entry", ")", "# There are a lot of references to this entity, each of which refer", "# to it by a different text label. For the sake of visualization,", "# let's pick one of these labels (in this case, the shortest one)", "if", "sources", "is", "not", "None", ":", "texts", "=", "[", "]", "for", "source", "in", "sources", ":", "source_entry", "=", "entries", "[", "source", "]", "texts", ".", "append", "(", "get_shortest_text_value", "(", "source_entry", ")", ")", "return", "get_shortest_string", "(", "texts", ")" ]
If the entry has a text value, return that. If the entry has a source_from value, return the text value of the source. Otherwise, return None.
[ "If", "the", "entry", "has", "a", "text", "value", "return", "that", ".", "If", "the", "entry", "has", "a", "source_from", "value", "return", "the", "text", "value", "of", "the", "source", ".", "Otherwise", "return", "None", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/hume/visualize_causal.py#L137-L154
train
sorgerlab/indra
indra/sources/sparser/api.py
process_text
def process_text(text, output_fmt='json', outbuf=None, cleanup=True, key='', **kwargs): """Return processor with Statements extracted by reading text with Sparser. Parameters ---------- text : str The text to be processed output_fmt: Optional[str] The output format to obtain from Sparser, with the two options being 'json' and 'xml'. Default: 'json' outbuf : Optional[file] A file like object that the Sparser output is written to. cleanup : Optional[bool] If True, the temporary file created, which is used as an input file for Sparser, as well as the output file created by Sparser are removed. Default: True key : Optional[str] A key which is embedded into the name of the temporary file passed to Sparser for reading. Default is empty string. Returns ------- SparserXMLProcessor or SparserJSONProcessor depending on what output format was chosen. """ nxml_str = make_nxml_from_text(text) return process_nxml_str(nxml_str, output_fmt, outbuf, cleanup, key, **kwargs)
python
def process_text(text, output_fmt='json', outbuf=None, cleanup=True, key='', **kwargs): """Return processor with Statements extracted by reading text with Sparser. Parameters ---------- text : str The text to be processed output_fmt: Optional[str] The output format to obtain from Sparser, with the two options being 'json' and 'xml'. Default: 'json' outbuf : Optional[file] A file like object that the Sparser output is written to. cleanup : Optional[bool] If True, the temporary file created, which is used as an input file for Sparser, as well as the output file created by Sparser are removed. Default: True key : Optional[str] A key which is embedded into the name of the temporary file passed to Sparser for reading. Default is empty string. Returns ------- SparserXMLProcessor or SparserJSONProcessor depending on what output format was chosen. """ nxml_str = make_nxml_from_text(text) return process_nxml_str(nxml_str, output_fmt, outbuf, cleanup, key, **kwargs)
[ "def", "process_text", "(", "text", ",", "output_fmt", "=", "'json'", ",", "outbuf", "=", "None", ",", "cleanup", "=", "True", ",", "key", "=", "''", ",", "*", "*", "kwargs", ")", ":", "nxml_str", "=", "make_nxml_from_text", "(", "text", ")", "return", "process_nxml_str", "(", "nxml_str", ",", "output_fmt", ",", "outbuf", ",", "cleanup", ",", "key", ",", "*", "*", "kwargs", ")" ]
Return processor with Statements extracted by reading text with Sparser. Parameters ---------- text : str The text to be processed output_fmt: Optional[str] The output format to obtain from Sparser, with the two options being 'json' and 'xml'. Default: 'json' outbuf : Optional[file] A file like object that the Sparser output is written to. cleanup : Optional[bool] If True, the temporary file created, which is used as an input file for Sparser, as well as the output file created by Sparser are removed. Default: True key : Optional[str] A key which is embedded into the name of the temporary file passed to Sparser for reading. Default is empty string. Returns ------- SparserXMLProcessor or SparserJSONProcessor depending on what output format was chosen.
[ "Return", "processor", "with", "Statements", "extracted", "by", "reading", "text", "with", "Sparser", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/sparser/api.py#L31-L59
train
sorgerlab/indra
indra/sources/sparser/api.py
process_nxml_str
def process_nxml_str(nxml_str, output_fmt='json', outbuf=None, cleanup=True, key='', **kwargs): """Return processor with Statements extracted by reading an NXML string. Parameters ---------- nxml_str : str The string value of the NXML-formatted paper to be read. output_fmt: Optional[str] The output format to obtain from Sparser, with the two options being 'json' and 'xml'. Default: 'json' outbuf : Optional[file] A file like object that the Sparser output is written to. cleanup : Optional[bool] If True, the temporary file created in this function, which is used as an input file for Sparser, as well as the output file created by Sparser are removed. Default: True key : Optional[str] A key which is embedded into the name of the temporary file passed to Sparser for reading. Default is empty string. Returns ------- SparserXMLProcessor or SparserJSONProcessor depending on what output format was chosen. """ tmp_fname = 'PMC%s_%d.nxml' % (key, mp.current_process().pid) with open(tmp_fname, 'wb') as fh: fh.write(nxml_str.encode('utf-8')) try: sp = process_nxml_file(tmp_fname, output_fmt, outbuf, cleanup, **kwargs) finally: if cleanup and os.path.exists(tmp_fname): os.remove(tmp_fname) return sp
python
def process_nxml_str(nxml_str, output_fmt='json', outbuf=None, cleanup=True, key='', **kwargs): """Return processor with Statements extracted by reading an NXML string. Parameters ---------- nxml_str : str The string value of the NXML-formatted paper to be read. output_fmt: Optional[str] The output format to obtain from Sparser, with the two options being 'json' and 'xml'. Default: 'json' outbuf : Optional[file] A file like object that the Sparser output is written to. cleanup : Optional[bool] If True, the temporary file created in this function, which is used as an input file for Sparser, as well as the output file created by Sparser are removed. Default: True key : Optional[str] A key which is embedded into the name of the temporary file passed to Sparser for reading. Default is empty string. Returns ------- SparserXMLProcessor or SparserJSONProcessor depending on what output format was chosen. """ tmp_fname = 'PMC%s_%d.nxml' % (key, mp.current_process().pid) with open(tmp_fname, 'wb') as fh: fh.write(nxml_str.encode('utf-8')) try: sp = process_nxml_file(tmp_fname, output_fmt, outbuf, cleanup, **kwargs) finally: if cleanup and os.path.exists(tmp_fname): os.remove(tmp_fname) return sp
[ "def", "process_nxml_str", "(", "nxml_str", ",", "output_fmt", "=", "'json'", ",", "outbuf", "=", "None", ",", "cleanup", "=", "True", ",", "key", "=", "''", ",", "*", "*", "kwargs", ")", ":", "tmp_fname", "=", "'PMC%s_%d.nxml'", "%", "(", "key", ",", "mp", ".", "current_process", "(", ")", ".", "pid", ")", "with", "open", "(", "tmp_fname", ",", "'wb'", ")", "as", "fh", ":", "fh", ".", "write", "(", "nxml_str", ".", "encode", "(", "'utf-8'", ")", ")", "try", ":", "sp", "=", "process_nxml_file", "(", "tmp_fname", ",", "output_fmt", ",", "outbuf", ",", "cleanup", ",", "*", "*", "kwargs", ")", "finally", ":", "if", "cleanup", "and", "os", ".", "path", ".", "exists", "(", "tmp_fname", ")", ":", "os", ".", "remove", "(", "tmp_fname", ")", "return", "sp" ]
Return processor with Statements extracted by reading an NXML string. Parameters ---------- nxml_str : str The string value of the NXML-formatted paper to be read. output_fmt: Optional[str] The output format to obtain from Sparser, with the two options being 'json' and 'xml'. Default: 'json' outbuf : Optional[file] A file like object that the Sparser output is written to. cleanup : Optional[bool] If True, the temporary file created in this function, which is used as an input file for Sparser, as well as the output file created by Sparser are removed. Default: True key : Optional[str] A key which is embedded into the name of the temporary file passed to Sparser for reading. Default is empty string. Returns ------- SparserXMLProcessor or SparserJSONProcessor depending on what output format was chosen.
[ "Return", "processor", "with", "Statements", "extracted", "by", "reading", "an", "NXML", "string", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/sparser/api.py#L62-L97
train
sorgerlab/indra
indra/sources/sparser/api.py
process_nxml_file
def process_nxml_file(fname, output_fmt='json', outbuf=None, cleanup=True, **kwargs): """Return processor with Statements extracted by reading an NXML file. Parameters ---------- fname : str The path to the NXML file to be read. output_fmt: Optional[str] The output format to obtain from Sparser, with the two options being 'json' and 'xml'. Default: 'json' outbuf : Optional[file] A file like object that the Sparser output is written to. cleanup : Optional[bool] If True, the output file created by Sparser is removed. Default: True Returns ------- sp : SparserXMLProcessor or SparserJSONProcessor depending on what output format was chosen. """ sp = None out_fname = None try: out_fname = run_sparser(fname, output_fmt, outbuf, **kwargs) sp = process_sparser_output(out_fname, output_fmt) except Exception as e: logger.error("Sparser failed to run on %s." % fname) logger.exception(e) finally: if out_fname is not None and os.path.exists(out_fname) and cleanup: os.remove(out_fname) return sp
python
def process_nxml_file(fname, output_fmt='json', outbuf=None, cleanup=True, **kwargs): """Return processor with Statements extracted by reading an NXML file. Parameters ---------- fname : str The path to the NXML file to be read. output_fmt: Optional[str] The output format to obtain from Sparser, with the two options being 'json' and 'xml'. Default: 'json' outbuf : Optional[file] A file like object that the Sparser output is written to. cleanup : Optional[bool] If True, the output file created by Sparser is removed. Default: True Returns ------- sp : SparserXMLProcessor or SparserJSONProcessor depending on what output format was chosen. """ sp = None out_fname = None try: out_fname = run_sparser(fname, output_fmt, outbuf, **kwargs) sp = process_sparser_output(out_fname, output_fmt) except Exception as e: logger.error("Sparser failed to run on %s." % fname) logger.exception(e) finally: if out_fname is not None and os.path.exists(out_fname) and cleanup: os.remove(out_fname) return sp
[ "def", "process_nxml_file", "(", "fname", ",", "output_fmt", "=", "'json'", ",", "outbuf", "=", "None", ",", "cleanup", "=", "True", ",", "*", "*", "kwargs", ")", ":", "sp", "=", "None", "out_fname", "=", "None", "try", ":", "out_fname", "=", "run_sparser", "(", "fname", ",", "output_fmt", ",", "outbuf", ",", "*", "*", "kwargs", ")", "sp", "=", "process_sparser_output", "(", "out_fname", ",", "output_fmt", ")", "except", "Exception", "as", "e", ":", "logger", ".", "error", "(", "\"Sparser failed to run on %s.\"", "%", "fname", ")", "logger", ".", "exception", "(", "e", ")", "finally", ":", "if", "out_fname", "is", "not", "None", "and", "os", ".", "path", ".", "exists", "(", "out_fname", ")", "and", "cleanup", ":", "os", ".", "remove", "(", "out_fname", ")", "return", "sp" ]
Return processor with Statements extracted by reading an NXML file. Parameters ---------- fname : str The path to the NXML file to be read. output_fmt: Optional[str] The output format to obtain from Sparser, with the two options being 'json' and 'xml'. Default: 'json' outbuf : Optional[file] A file like object that the Sparser output is written to. cleanup : Optional[bool] If True, the output file created by Sparser is removed. Default: True Returns ------- sp : SparserXMLProcessor or SparserJSONProcessor depending on what output format was chosen.
[ "Return", "processor", "with", "Statements", "extracted", "by", "reading", "an", "NXML", "file", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/sparser/api.py#L100-L134
train
sorgerlab/indra
indra/sources/sparser/api.py
process_sparser_output
def process_sparser_output(output_fname, output_fmt='json'): """Return a processor with Statements extracted from Sparser XML or JSON Parameters ---------- output_fname : str The path to the Sparser output file to be processed. The file can either be JSON or XML output from Sparser, with the output_fmt parameter defining what format is assumed to be processed. output_fmt : Optional[str] The format of the Sparser output to be processed, can either be 'json' or 'xml'. Default: 'json' Returns ------- sp : SparserXMLProcessor or SparserJSONProcessor depending on what output format was chosen. """ if output_fmt not in ['json', 'xml']: logger.error("Unrecognized output format '%s'." % output_fmt) return None sp = None with open(output_fname, 'rt') as fh: if output_fmt == 'json': json_dict = json.load(fh) sp = process_json_dict(json_dict) else: xml_str = fh.read() sp = process_xml(xml_str) return sp
python
def process_sparser_output(output_fname, output_fmt='json'): """Return a processor with Statements extracted from Sparser XML or JSON Parameters ---------- output_fname : str The path to the Sparser output file to be processed. The file can either be JSON or XML output from Sparser, with the output_fmt parameter defining what format is assumed to be processed. output_fmt : Optional[str] The format of the Sparser output to be processed, can either be 'json' or 'xml'. Default: 'json' Returns ------- sp : SparserXMLProcessor or SparserJSONProcessor depending on what output format was chosen. """ if output_fmt not in ['json', 'xml']: logger.error("Unrecognized output format '%s'." % output_fmt) return None sp = None with open(output_fname, 'rt') as fh: if output_fmt == 'json': json_dict = json.load(fh) sp = process_json_dict(json_dict) else: xml_str = fh.read() sp = process_xml(xml_str) return sp
[ "def", "process_sparser_output", "(", "output_fname", ",", "output_fmt", "=", "'json'", ")", ":", "if", "output_fmt", "not", "in", "[", "'json'", ",", "'xml'", "]", ":", "logger", ".", "error", "(", "\"Unrecognized output format '%s'.\"", "%", "output_fmt", ")", "return", "None", "sp", "=", "None", "with", "open", "(", "output_fname", ",", "'rt'", ")", "as", "fh", ":", "if", "output_fmt", "==", "'json'", ":", "json_dict", "=", "json", ".", "load", "(", "fh", ")", "sp", "=", "process_json_dict", "(", "json_dict", ")", "else", ":", "xml_str", "=", "fh", ".", "read", "(", ")", "sp", "=", "process_xml", "(", "xml_str", ")", "return", "sp" ]
Return a processor with Statements extracted from Sparser XML or JSON Parameters ---------- output_fname : str The path to the Sparser output file to be processed. The file can either be JSON or XML output from Sparser, with the output_fmt parameter defining what format is assumed to be processed. output_fmt : Optional[str] The format of the Sparser output to be processed, can either be 'json' or 'xml'. Default: 'json' Returns ------- sp : SparserXMLProcessor or SparserJSONProcessor depending on what output format was chosen.
[ "Return", "a", "processor", "with", "Statements", "extracted", "from", "Sparser", "XML", "or", "JSON" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/sparser/api.py#L137-L167
train
sorgerlab/indra
indra/sources/sparser/api.py
process_xml
def process_xml(xml_str): """Return processor with Statements extracted from a Sparser XML. Parameters ---------- xml_str : str The XML string obtained by reading content with Sparser, using the 'xml' output mode. Returns ------- sp : SparserXMLProcessor A SparserXMLProcessor which has extracted Statements as its statements attribute. """ try: tree = ET.XML(xml_str, parser=UTB()) except ET.ParseError as e: logger.error('Could not parse XML string') logger.error(e) return None sp = _process_elementtree(tree) return sp
python
def process_xml(xml_str): """Return processor with Statements extracted from a Sparser XML. Parameters ---------- xml_str : str The XML string obtained by reading content with Sparser, using the 'xml' output mode. Returns ------- sp : SparserXMLProcessor A SparserXMLProcessor which has extracted Statements as its statements attribute. """ try: tree = ET.XML(xml_str, parser=UTB()) except ET.ParseError as e: logger.error('Could not parse XML string') logger.error(e) return None sp = _process_elementtree(tree) return sp
[ "def", "process_xml", "(", "xml_str", ")", ":", "try", ":", "tree", "=", "ET", ".", "XML", "(", "xml_str", ",", "parser", "=", "UTB", "(", ")", ")", "except", "ET", ".", "ParseError", "as", "e", ":", "logger", ".", "error", "(", "'Could not parse XML string'", ")", "logger", ".", "error", "(", "e", ")", "return", "None", "sp", "=", "_process_elementtree", "(", "tree", ")", "return", "sp" ]
Return processor with Statements extracted from a Sparser XML. Parameters ---------- xml_str : str The XML string obtained by reading content with Sparser, using the 'xml' output mode. Returns ------- sp : SparserXMLProcessor A SparserXMLProcessor which has extracted Statements as its statements attribute.
[ "Return", "processor", "with", "Statements", "extracted", "from", "a", "Sparser", "XML", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/sparser/api.py#L190-L212
train
sorgerlab/indra
indra/sources/sparser/api.py
run_sparser
def run_sparser(fname, output_fmt, outbuf=None, timeout=600): """Return the path to reading output after running Sparser reading. Parameters ---------- fname : str The path to an input file to be processed. Due to the Spaser executable's assumptions, the file name needs to start with PMC and should be an NXML formatted file. output_fmt : Optional[str] The format in which Sparser should produce its output, can either be 'json' or 'xml'. outbuf : Optional[file] A file like object that the Sparser output is written to. timeout : int The number of seconds to wait until giving up on this one reading. The default is 600 seconds (i.e. 10 minutes). Sparcer is a fast reader and the typical type to read a single full text is a matter of seconds. Returns ------- output_path : str The path to the output file created by Sparser. """ if not sparser_path or not os.path.exists(sparser_path): logger.error('Sparser executable not set in %s' % sparser_path_var) return None if output_fmt == 'xml': format_flag = '-x' suffix = '.xml' elif output_fmt == 'json': format_flag = '-j' suffix = '.json' else: logger.error('Unknown output format: %s' % output_fmt) return None sparser_exec_path = os.path.join(sparser_path, 'save-semantics.sh') output_path = fname.split('.')[0] + '-semantics' + suffix for fpath in [sparser_exec_path, fname]: if not os.path.exists(fpath): raise Exception("'%s' is not a valid path." % fpath) cmd_list = [sparser_exec_path, format_flag, fname] # This is mostly a copy of the code found in subprocess.run, with the # key change that proc.kill is replaced with os.killpg. This allows the # process to be killed even if it has children. Solution developed from: # https://stackoverflow.com/questions/36952245/subprocess-timeout-failure with sp.Popen(cmd_list, stdout=sp.PIPE) as proc: try: stdout, stderr = proc.communicate(timeout=timeout) except sp.TimeoutExpired: # Yes, this is about as bad as it looks. But it is the only way to # be sure the script actually dies. sp.check_call(['pkill', '-f', 'r3.core.*%s' % fname]) stdout, stderr = proc.communicate() raise sp.TimeoutExpired(proc.args, timeout, output=stdout, stderr=stderr) except BaseException: # See comment on above instance. sp.check_call(['pkill', '-f', fname]) proc.wait() raise retcode = proc.poll() if retcode: raise sp.CalledProcessError(retcode, proc.args, output=stdout, stderr=stderr) if outbuf is not None: outbuf.write(stdout) outbuf.flush() assert os.path.exists(output_path),\ 'No output file \"%s\" created by sparser.' % output_path return output_path
python
def run_sparser(fname, output_fmt, outbuf=None, timeout=600): """Return the path to reading output after running Sparser reading. Parameters ---------- fname : str The path to an input file to be processed. Due to the Spaser executable's assumptions, the file name needs to start with PMC and should be an NXML formatted file. output_fmt : Optional[str] The format in which Sparser should produce its output, can either be 'json' or 'xml'. outbuf : Optional[file] A file like object that the Sparser output is written to. timeout : int The number of seconds to wait until giving up on this one reading. The default is 600 seconds (i.e. 10 minutes). Sparcer is a fast reader and the typical type to read a single full text is a matter of seconds. Returns ------- output_path : str The path to the output file created by Sparser. """ if not sparser_path or not os.path.exists(sparser_path): logger.error('Sparser executable not set in %s' % sparser_path_var) return None if output_fmt == 'xml': format_flag = '-x' suffix = '.xml' elif output_fmt == 'json': format_flag = '-j' suffix = '.json' else: logger.error('Unknown output format: %s' % output_fmt) return None sparser_exec_path = os.path.join(sparser_path, 'save-semantics.sh') output_path = fname.split('.')[0] + '-semantics' + suffix for fpath in [sparser_exec_path, fname]: if not os.path.exists(fpath): raise Exception("'%s' is not a valid path." % fpath) cmd_list = [sparser_exec_path, format_flag, fname] # This is mostly a copy of the code found in subprocess.run, with the # key change that proc.kill is replaced with os.killpg. This allows the # process to be killed even if it has children. Solution developed from: # https://stackoverflow.com/questions/36952245/subprocess-timeout-failure with sp.Popen(cmd_list, stdout=sp.PIPE) as proc: try: stdout, stderr = proc.communicate(timeout=timeout) except sp.TimeoutExpired: # Yes, this is about as bad as it looks. But it is the only way to # be sure the script actually dies. sp.check_call(['pkill', '-f', 'r3.core.*%s' % fname]) stdout, stderr = proc.communicate() raise sp.TimeoutExpired(proc.args, timeout, output=stdout, stderr=stderr) except BaseException: # See comment on above instance. sp.check_call(['pkill', '-f', fname]) proc.wait() raise retcode = proc.poll() if retcode: raise sp.CalledProcessError(retcode, proc.args, output=stdout, stderr=stderr) if outbuf is not None: outbuf.write(stdout) outbuf.flush() assert os.path.exists(output_path),\ 'No output file \"%s\" created by sparser.' % output_path return output_path
[ "def", "run_sparser", "(", "fname", ",", "output_fmt", ",", "outbuf", "=", "None", ",", "timeout", "=", "600", ")", ":", "if", "not", "sparser_path", "or", "not", "os", ".", "path", ".", "exists", "(", "sparser_path", ")", ":", "logger", ".", "error", "(", "'Sparser executable not set in %s'", "%", "sparser_path_var", ")", "return", "None", "if", "output_fmt", "==", "'xml'", ":", "format_flag", "=", "'-x'", "suffix", "=", "'.xml'", "elif", "output_fmt", "==", "'json'", ":", "format_flag", "=", "'-j'", "suffix", "=", "'.json'", "else", ":", "logger", ".", "error", "(", "'Unknown output format: %s'", "%", "output_fmt", ")", "return", "None", "sparser_exec_path", "=", "os", ".", "path", ".", "join", "(", "sparser_path", ",", "'save-semantics.sh'", ")", "output_path", "=", "fname", ".", "split", "(", "'.'", ")", "[", "0", "]", "+", "'-semantics'", "+", "suffix", "for", "fpath", "in", "[", "sparser_exec_path", ",", "fname", "]", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "fpath", ")", ":", "raise", "Exception", "(", "\"'%s' is not a valid path.\"", "%", "fpath", ")", "cmd_list", "=", "[", "sparser_exec_path", ",", "format_flag", ",", "fname", "]", "# This is mostly a copy of the code found in subprocess.run, with the", "# key change that proc.kill is replaced with os.killpg. This allows the", "# process to be killed even if it has children. Solution developed from:", "# https://stackoverflow.com/questions/36952245/subprocess-timeout-failure", "with", "sp", ".", "Popen", "(", "cmd_list", ",", "stdout", "=", "sp", ".", "PIPE", ")", "as", "proc", ":", "try", ":", "stdout", ",", "stderr", "=", "proc", ".", "communicate", "(", "timeout", "=", "timeout", ")", "except", "sp", ".", "TimeoutExpired", ":", "# Yes, this is about as bad as it looks. But it is the only way to", "# be sure the script actually dies.", "sp", ".", "check_call", "(", "[", "'pkill'", ",", "'-f'", ",", "'r3.core.*%s'", "%", "fname", "]", ")", "stdout", ",", "stderr", "=", "proc", ".", "communicate", "(", ")", "raise", "sp", ".", "TimeoutExpired", "(", "proc", ".", "args", ",", "timeout", ",", "output", "=", "stdout", ",", "stderr", "=", "stderr", ")", "except", "BaseException", ":", "# See comment on above instance.", "sp", ".", "check_call", "(", "[", "'pkill'", ",", "'-f'", ",", "fname", "]", ")", "proc", ".", "wait", "(", ")", "raise", "retcode", "=", "proc", ".", "poll", "(", ")", "if", "retcode", ":", "raise", "sp", ".", "CalledProcessError", "(", "retcode", ",", "proc", ".", "args", ",", "output", "=", "stdout", ",", "stderr", "=", "stderr", ")", "if", "outbuf", "is", "not", "None", ":", "outbuf", ".", "write", "(", "stdout", ")", "outbuf", ".", "flush", "(", ")", "assert", "os", ".", "path", ".", "exists", "(", "output_path", ")", ",", "'No output file \\\"%s\\\" created by sparser.'", "%", "output_path", "return", "output_path" ]
Return the path to reading output after running Sparser reading. Parameters ---------- fname : str The path to an input file to be processed. Due to the Spaser executable's assumptions, the file name needs to start with PMC and should be an NXML formatted file. output_fmt : Optional[str] The format in which Sparser should produce its output, can either be 'json' or 'xml'. outbuf : Optional[file] A file like object that the Sparser output is written to. timeout : int The number of seconds to wait until giving up on this one reading. The default is 600 seconds (i.e. 10 minutes). Sparcer is a fast reader and the typical type to read a single full text is a matter of seconds. Returns ------- output_path : str The path to the output file created by Sparser.
[ "Return", "the", "path", "to", "reading", "output", "after", "running", "Sparser", "reading", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/sparser/api.py#L215-L287
train
sorgerlab/indra
indra/sources/sparser/api.py
get_version
def get_version(): """Return the version of the Sparser executable on the path. Returns ------- version : str The version of Sparser that is found on the Sparser path. """ assert sparser_path is not None, "Sparser path is not defined." with open(os.path.join(sparser_path, 'version.txt'), 'r') as f: version = f.read().strip() return version
python
def get_version(): """Return the version of the Sparser executable on the path. Returns ------- version : str The version of Sparser that is found on the Sparser path. """ assert sparser_path is not None, "Sparser path is not defined." with open(os.path.join(sparser_path, 'version.txt'), 'r') as f: version = f.read().strip() return version
[ "def", "get_version", "(", ")", ":", "assert", "sparser_path", "is", "not", "None", ",", "\"Sparser path is not defined.\"", "with", "open", "(", "os", ".", "path", ".", "join", "(", "sparser_path", ",", "'version.txt'", ")", ",", "'r'", ")", "as", "f", ":", "version", "=", "f", ".", "read", "(", ")", ".", "strip", "(", ")", "return", "version" ]
Return the version of the Sparser executable on the path. Returns ------- version : str The version of Sparser that is found on the Sparser path.
[ "Return", "the", "version", "of", "the", "Sparser", "executable", "on", "the", "path", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/sparser/api.py#L290-L301
train
sorgerlab/indra
indra/sources/sparser/api.py
make_nxml_from_text
def make_nxml_from_text(text): """Return raw text wrapped in NXML structure. Parameters ---------- text : str The raw text content to be wrapped in an NXML structure. Returns ------- nxml_str : str The NXML string wrapping the raw text input. """ text = _escape_xml(text) header = '<?xml version="1.0" encoding="UTF-8" ?>' + \ '<OAI-PMH><article><body><sec id="s1"><p>' footer = '</p></sec></body></article></OAI-PMH>' nxml_str = header + text + footer return nxml_str
python
def make_nxml_from_text(text): """Return raw text wrapped in NXML structure. Parameters ---------- text : str The raw text content to be wrapped in an NXML structure. Returns ------- nxml_str : str The NXML string wrapping the raw text input. """ text = _escape_xml(text) header = '<?xml version="1.0" encoding="UTF-8" ?>' + \ '<OAI-PMH><article><body><sec id="s1"><p>' footer = '</p></sec></body></article></OAI-PMH>' nxml_str = header + text + footer return nxml_str
[ "def", "make_nxml_from_text", "(", "text", ")", ":", "text", "=", "_escape_xml", "(", "text", ")", "header", "=", "'<?xml version=\"1.0\" encoding=\"UTF-8\" ?>'", "+", "'<OAI-PMH><article><body><sec id=\"s1\"><p>'", "footer", "=", "'</p></sec></body></article></OAI-PMH>'", "nxml_str", "=", "header", "+", "text", "+", "footer", "return", "nxml_str" ]
Return raw text wrapped in NXML structure. Parameters ---------- text : str The raw text content to be wrapped in an NXML structure. Returns ------- nxml_str : str The NXML string wrapping the raw text input.
[ "Return", "raw", "text", "wrapped", "in", "NXML", "structure", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/sparser/api.py#L304-L322
train
sorgerlab/indra
indra/databases/hgnc_client.py
get_hgnc_name
def get_hgnc_name(hgnc_id): """Return the HGNC symbol corresponding to the given HGNC ID. Parameters ---------- hgnc_id : str The HGNC ID to be converted. Returns ------- hgnc_name : str The HGNC symbol corresponding to the given HGNC ID. """ try: hgnc_name = hgnc_names[hgnc_id] except KeyError: xml_tree = get_hgnc_entry(hgnc_id) if xml_tree is None: return None hgnc_name_tag =\ xml_tree.find("result/doc/str[@name='symbol']") if hgnc_name_tag is None: return None hgnc_name = hgnc_name_tag.text.strip() return hgnc_name
python
def get_hgnc_name(hgnc_id): """Return the HGNC symbol corresponding to the given HGNC ID. Parameters ---------- hgnc_id : str The HGNC ID to be converted. Returns ------- hgnc_name : str The HGNC symbol corresponding to the given HGNC ID. """ try: hgnc_name = hgnc_names[hgnc_id] except KeyError: xml_tree = get_hgnc_entry(hgnc_id) if xml_tree is None: return None hgnc_name_tag =\ xml_tree.find("result/doc/str[@name='symbol']") if hgnc_name_tag is None: return None hgnc_name = hgnc_name_tag.text.strip() return hgnc_name
[ "def", "get_hgnc_name", "(", "hgnc_id", ")", ":", "try", ":", "hgnc_name", "=", "hgnc_names", "[", "hgnc_id", "]", "except", "KeyError", ":", "xml_tree", "=", "get_hgnc_entry", "(", "hgnc_id", ")", "if", "xml_tree", "is", "None", ":", "return", "None", "hgnc_name_tag", "=", "xml_tree", ".", "find", "(", "\"result/doc/str[@name='symbol']\"", ")", "if", "hgnc_name_tag", "is", "None", ":", "return", "None", "hgnc_name", "=", "hgnc_name_tag", ".", "text", ".", "strip", "(", ")", "return", "hgnc_name" ]
Return the HGNC symbol corresponding to the given HGNC ID. Parameters ---------- hgnc_id : str The HGNC ID to be converted. Returns ------- hgnc_name : str The HGNC symbol corresponding to the given HGNC ID.
[ "Return", "the", "HGNC", "symbol", "corresponding", "to", "the", "given", "HGNC", "ID", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/hgnc_client.py#L83-L107
train
sorgerlab/indra
indra/databases/hgnc_client.py
get_hgnc_entry
def get_hgnc_entry(hgnc_id): """Return the HGNC entry for the given HGNC ID from the web service. Parameters ---------- hgnc_id : str The HGNC ID to be converted. Returns ------- xml_tree : ElementTree The XML ElementTree corresponding to the entry for the given HGNC ID. """ url = hgnc_url + 'hgnc_id/%s' % hgnc_id headers = {'Accept': '*/*'} res = requests.get(url, headers=headers) if not res.status_code == 200: return None xml_tree = ET.XML(res.content, parser=UTB()) return xml_tree
python
def get_hgnc_entry(hgnc_id): """Return the HGNC entry for the given HGNC ID from the web service. Parameters ---------- hgnc_id : str The HGNC ID to be converted. Returns ------- xml_tree : ElementTree The XML ElementTree corresponding to the entry for the given HGNC ID. """ url = hgnc_url + 'hgnc_id/%s' % hgnc_id headers = {'Accept': '*/*'} res = requests.get(url, headers=headers) if not res.status_code == 200: return None xml_tree = ET.XML(res.content, parser=UTB()) return xml_tree
[ "def", "get_hgnc_entry", "(", "hgnc_id", ")", ":", "url", "=", "hgnc_url", "+", "'hgnc_id/%s'", "%", "hgnc_id", "headers", "=", "{", "'Accept'", ":", "'*/*'", "}", "res", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "headers", ")", "if", "not", "res", ".", "status_code", "==", "200", ":", "return", "None", "xml_tree", "=", "ET", ".", "XML", "(", "res", ".", "content", ",", "parser", "=", "UTB", "(", ")", ")", "return", "xml_tree" ]
Return the HGNC entry for the given HGNC ID from the web service. Parameters ---------- hgnc_id : str The HGNC ID to be converted. Returns ------- xml_tree : ElementTree The XML ElementTree corresponding to the entry for the given HGNC ID.
[ "Return", "the", "HGNC", "entry", "for", "the", "given", "HGNC", "ID", "from", "the", "web", "service", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/hgnc_client.py#L224-L244
train
sorgerlab/indra
indra/tools/reading/util/log_analysis_tools.py
analyze_reach_log
def analyze_reach_log(log_fname=None, log_str=None): """Return unifinished PMIDs given a log file name.""" assert bool(log_fname) ^ bool(log_str), 'Must specify log_fname OR log_str' started_patt = re.compile('Starting ([\d]+)') # TODO: it might be interesting to get the time it took to read # each paper here finished_patt = re.compile('Finished ([\d]+)') def get_content_nums(txt): pat = 'Retrieved content for ([\d]+) / ([\d]+) papers to be read' res = re.match(pat, txt) has_content, total = res.groups() if res else None, None return has_content, total if log_fname: with open(log_fname, 'r') as fh: log_str = fh.read() # has_content, total = get_content_nums(log_str) # unused pmids = {} pmids['started'] = started_patt.findall(log_str) pmids['finished'] = finished_patt.findall(log_str) pmids['not_done'] = set(pmids['started']) - set(pmids['finished']) return pmids
python
def analyze_reach_log(log_fname=None, log_str=None): """Return unifinished PMIDs given a log file name.""" assert bool(log_fname) ^ bool(log_str), 'Must specify log_fname OR log_str' started_patt = re.compile('Starting ([\d]+)') # TODO: it might be interesting to get the time it took to read # each paper here finished_patt = re.compile('Finished ([\d]+)') def get_content_nums(txt): pat = 'Retrieved content for ([\d]+) / ([\d]+) papers to be read' res = re.match(pat, txt) has_content, total = res.groups() if res else None, None return has_content, total if log_fname: with open(log_fname, 'r') as fh: log_str = fh.read() # has_content, total = get_content_nums(log_str) # unused pmids = {} pmids['started'] = started_patt.findall(log_str) pmids['finished'] = finished_patt.findall(log_str) pmids['not_done'] = set(pmids['started']) - set(pmids['finished']) return pmids
[ "def", "analyze_reach_log", "(", "log_fname", "=", "None", ",", "log_str", "=", "None", ")", ":", "assert", "bool", "(", "log_fname", ")", "^", "bool", "(", "log_str", ")", ",", "'Must specify log_fname OR log_str'", "started_patt", "=", "re", ".", "compile", "(", "'Starting ([\\d]+)'", ")", "# TODO: it might be interesting to get the time it took to read", "# each paper here", "finished_patt", "=", "re", ".", "compile", "(", "'Finished ([\\d]+)'", ")", "def", "get_content_nums", "(", "txt", ")", ":", "pat", "=", "'Retrieved content for ([\\d]+) / ([\\d]+) papers to be read'", "res", "=", "re", ".", "match", "(", "pat", ",", "txt", ")", "has_content", ",", "total", "=", "res", ".", "groups", "(", ")", "if", "res", "else", "None", ",", "None", "return", "has_content", ",", "total", "if", "log_fname", ":", "with", "open", "(", "log_fname", ",", "'r'", ")", "as", "fh", ":", "log_str", "=", "fh", ".", "read", "(", ")", "# has_content, total = get_content_nums(log_str) # unused", "pmids", "=", "{", "}", "pmids", "[", "'started'", "]", "=", "started_patt", ".", "findall", "(", "log_str", ")", "pmids", "[", "'finished'", "]", "=", "finished_patt", ".", "findall", "(", "log_str", ")", "pmids", "[", "'not_done'", "]", "=", "set", "(", "pmids", "[", "'started'", "]", ")", "-", "set", "(", "pmids", "[", "'finished'", "]", ")", "return", "pmids" ]
Return unifinished PMIDs given a log file name.
[ "Return", "unifinished", "PMIDs", "given", "a", "log", "file", "name", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/util/log_analysis_tools.py#L6-L28
train
sorgerlab/indra
indra/tools/reading/util/log_analysis_tools.py
get_logs_from_db_reading
def get_logs_from_db_reading(job_prefix, reading_queue='run_db_reading_queue'): """Get the logs stashed on s3 for a particular reading.""" s3 = boto3.client('s3') gen_prefix = 'reading_results/%s/logs/%s' % (job_prefix, reading_queue) job_log_data = s3.list_objects_v2(Bucket='bigmech', Prefix=join(gen_prefix, job_prefix)) # TODO: Track success/failure log_strs = [] for fdict in job_log_data['Contents']: resp = s3.get_object(Bucket='bigmech', Key=fdict['Key']) log_strs.append(resp['Body'].read().decode('utf-8')) return log_strs
python
def get_logs_from_db_reading(job_prefix, reading_queue='run_db_reading_queue'): """Get the logs stashed on s3 for a particular reading.""" s3 = boto3.client('s3') gen_prefix = 'reading_results/%s/logs/%s' % (job_prefix, reading_queue) job_log_data = s3.list_objects_v2(Bucket='bigmech', Prefix=join(gen_prefix, job_prefix)) # TODO: Track success/failure log_strs = [] for fdict in job_log_data['Contents']: resp = s3.get_object(Bucket='bigmech', Key=fdict['Key']) log_strs.append(resp['Body'].read().decode('utf-8')) return log_strs
[ "def", "get_logs_from_db_reading", "(", "job_prefix", ",", "reading_queue", "=", "'run_db_reading_queue'", ")", ":", "s3", "=", "boto3", ".", "client", "(", "'s3'", ")", "gen_prefix", "=", "'reading_results/%s/logs/%s'", "%", "(", "job_prefix", ",", "reading_queue", ")", "job_log_data", "=", "s3", ".", "list_objects_v2", "(", "Bucket", "=", "'bigmech'", ",", "Prefix", "=", "join", "(", "gen_prefix", ",", "job_prefix", ")", ")", "# TODO: Track success/failure", "log_strs", "=", "[", "]", "for", "fdict", "in", "job_log_data", "[", "'Contents'", "]", ":", "resp", "=", "s3", ".", "get_object", "(", "Bucket", "=", "'bigmech'", ",", "Key", "=", "fdict", "[", "'Key'", "]", ")", "log_strs", ".", "append", "(", "resp", "[", "'Body'", "]", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", ")", "return", "log_strs" ]
Get the logs stashed on s3 for a particular reading.
[ "Get", "the", "logs", "stashed", "on", "s3", "for", "a", "particular", "reading", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/util/log_analysis_tools.py#L36-L47
train
sorgerlab/indra
indra/tools/reading/util/log_analysis_tools.py
separate_reach_logs
def separate_reach_logs(log_str): """Get the list of reach logs from the overall logs.""" log_lines = log_str.splitlines() reach_logs = [] reach_lines = [] adding_reach_lines = False for l in log_lines[:]: if not adding_reach_lines and 'Beginning reach' in l: adding_reach_lines = True elif adding_reach_lines and 'Reach finished' in l: adding_reach_lines = False reach_logs.append(('SUCCEEDED', '\n'.join(reach_lines))) reach_lines = [] elif adding_reach_lines: reach_lines.append(l.split('readers - ')[1]) log_lines.remove(l) if adding_reach_lines: reach_logs.append(('FAILURE', '\n'.join(reach_lines))) return '\n'.join(log_lines), reach_logs
python
def separate_reach_logs(log_str): """Get the list of reach logs from the overall logs.""" log_lines = log_str.splitlines() reach_logs = [] reach_lines = [] adding_reach_lines = False for l in log_lines[:]: if not adding_reach_lines and 'Beginning reach' in l: adding_reach_lines = True elif adding_reach_lines and 'Reach finished' in l: adding_reach_lines = False reach_logs.append(('SUCCEEDED', '\n'.join(reach_lines))) reach_lines = [] elif adding_reach_lines: reach_lines.append(l.split('readers - ')[1]) log_lines.remove(l) if adding_reach_lines: reach_logs.append(('FAILURE', '\n'.join(reach_lines))) return '\n'.join(log_lines), reach_logs
[ "def", "separate_reach_logs", "(", "log_str", ")", ":", "log_lines", "=", "log_str", ".", "splitlines", "(", ")", "reach_logs", "=", "[", "]", "reach_lines", "=", "[", "]", "adding_reach_lines", "=", "False", "for", "l", "in", "log_lines", "[", ":", "]", ":", "if", "not", "adding_reach_lines", "and", "'Beginning reach'", "in", "l", ":", "adding_reach_lines", "=", "True", "elif", "adding_reach_lines", "and", "'Reach finished'", "in", "l", ":", "adding_reach_lines", "=", "False", "reach_logs", ".", "append", "(", "(", "'SUCCEEDED'", ",", "'\\n'", ".", "join", "(", "reach_lines", ")", ")", ")", "reach_lines", "=", "[", "]", "elif", "adding_reach_lines", ":", "reach_lines", ".", "append", "(", "l", ".", "split", "(", "'readers - '", ")", "[", "1", "]", ")", "log_lines", ".", "remove", "(", "l", ")", "if", "adding_reach_lines", ":", "reach_logs", ".", "append", "(", "(", "'FAILURE'", ",", "'\\n'", ".", "join", "(", "reach_lines", ")", ")", ")", "return", "'\\n'", ".", "join", "(", "log_lines", ")", ",", "reach_logs" ]
Get the list of reach logs from the overall logs.
[ "Get", "the", "list", "of", "reach", "logs", "from", "the", "overall", "logs", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/util/log_analysis_tools.py#L50-L68
train
sorgerlab/indra
indra/tools/reading/util/log_analysis_tools.py
get_unyielding_tcids
def get_unyielding_tcids(log_str): """Extract the set of tcids for which no statements were created.""" tcid_strs = re.findall('INFO: \[.*?\].*? - Got no statements for (\d+).*', log_str) return {int(tcid_str) for tcid_str in tcid_strs}
python
def get_unyielding_tcids(log_str): """Extract the set of tcids for which no statements were created.""" tcid_strs = re.findall('INFO: \[.*?\].*? - Got no statements for (\d+).*', log_str) return {int(tcid_str) for tcid_str in tcid_strs}
[ "def", "get_unyielding_tcids", "(", "log_str", ")", ":", "tcid_strs", "=", "re", ".", "findall", "(", "'INFO: \\[.*?\\].*? - Got no statements for (\\d+).*'", ",", "log_str", ")", "return", "{", "int", "(", "tcid_str", ")", "for", "tcid_str", "in", "tcid_strs", "}" ]
Extract the set of tcids for which no statements were created.
[ "Extract", "the", "set", "of", "tcids", "for", "which", "no", "statements", "were", "created", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/util/log_analysis_tools.py#L115-L119
train
sorgerlab/indra
indra/tools/reading/util/log_analysis_tools.py
analyze_db_reading
def analyze_db_reading(job_prefix, reading_queue='run_db_reading_queue'): """Run various analysis on a particular reading job.""" # Analyze reach failures log_strs = get_logs_from_db_reading(job_prefix, reading_queue) indra_log_strs = [] all_reach_logs = [] log_stats = [] for log_str in log_strs: log_str, reach_logs = separate_reach_logs(log_str) all_reach_logs.extend(reach_logs) indra_log_strs.append(log_str) log_stats.append(get_reading_stats(log_str)) # Analayze the reach failures. failed_reach_logs = [reach_log_str for result, reach_log_str in all_reach_logs if result == 'FAILURE'] failed_id_dicts = [analyze_reach_log(log_str=reach_log) for reach_log in failed_reach_logs if bool(reach_log)] tcids_unfinished = {id_dict['not_done'] for id_dict in failed_id_dicts} print("Found %d unfinished tcids." % len(tcids_unfinished)) # Summarize the global stats if log_stats: sum_dict = dict.fromkeys(log_stats[0].keys()) for log_stat in log_stats: for k in log_stat.keys(): if isinstance(log_stat[k], list): if k not in sum_dict.keys(): sum_dict[k] = [0]*len(log_stat[k]) sum_dict[k] = [sum_dict[k][i] + log_stat[k][i] for i in range(len(log_stat[k]))] else: if k not in sum_dict.keys(): sum_dict[k] = 0 sum_dict[k] += log_stat[k] else: sum_dict = {} return tcids_unfinished, sum_dict, log_stats
python
def analyze_db_reading(job_prefix, reading_queue='run_db_reading_queue'): """Run various analysis on a particular reading job.""" # Analyze reach failures log_strs = get_logs_from_db_reading(job_prefix, reading_queue) indra_log_strs = [] all_reach_logs = [] log_stats = [] for log_str in log_strs: log_str, reach_logs = separate_reach_logs(log_str) all_reach_logs.extend(reach_logs) indra_log_strs.append(log_str) log_stats.append(get_reading_stats(log_str)) # Analayze the reach failures. failed_reach_logs = [reach_log_str for result, reach_log_str in all_reach_logs if result == 'FAILURE'] failed_id_dicts = [analyze_reach_log(log_str=reach_log) for reach_log in failed_reach_logs if bool(reach_log)] tcids_unfinished = {id_dict['not_done'] for id_dict in failed_id_dicts} print("Found %d unfinished tcids." % len(tcids_unfinished)) # Summarize the global stats if log_stats: sum_dict = dict.fromkeys(log_stats[0].keys()) for log_stat in log_stats: for k in log_stat.keys(): if isinstance(log_stat[k], list): if k not in sum_dict.keys(): sum_dict[k] = [0]*len(log_stat[k]) sum_dict[k] = [sum_dict[k][i] + log_stat[k][i] for i in range(len(log_stat[k]))] else: if k not in sum_dict.keys(): sum_dict[k] = 0 sum_dict[k] += log_stat[k] else: sum_dict = {} return tcids_unfinished, sum_dict, log_stats
[ "def", "analyze_db_reading", "(", "job_prefix", ",", "reading_queue", "=", "'run_db_reading_queue'", ")", ":", "# Analyze reach failures", "log_strs", "=", "get_logs_from_db_reading", "(", "job_prefix", ",", "reading_queue", ")", "indra_log_strs", "=", "[", "]", "all_reach_logs", "=", "[", "]", "log_stats", "=", "[", "]", "for", "log_str", "in", "log_strs", ":", "log_str", ",", "reach_logs", "=", "separate_reach_logs", "(", "log_str", ")", "all_reach_logs", ".", "extend", "(", "reach_logs", ")", "indra_log_strs", ".", "append", "(", "log_str", ")", "log_stats", ".", "append", "(", "get_reading_stats", "(", "log_str", ")", ")", "# Analayze the reach failures.", "failed_reach_logs", "=", "[", "reach_log_str", "for", "result", ",", "reach_log_str", "in", "all_reach_logs", "if", "result", "==", "'FAILURE'", "]", "failed_id_dicts", "=", "[", "analyze_reach_log", "(", "log_str", "=", "reach_log", ")", "for", "reach_log", "in", "failed_reach_logs", "if", "bool", "(", "reach_log", ")", "]", "tcids_unfinished", "=", "{", "id_dict", "[", "'not_done'", "]", "for", "id_dict", "in", "failed_id_dicts", "}", "print", "(", "\"Found %d unfinished tcids.\"", "%", "len", "(", "tcids_unfinished", ")", ")", "# Summarize the global stats", "if", "log_stats", ":", "sum_dict", "=", "dict", ".", "fromkeys", "(", "log_stats", "[", "0", "]", ".", "keys", "(", ")", ")", "for", "log_stat", "in", "log_stats", ":", "for", "k", "in", "log_stat", ".", "keys", "(", ")", ":", "if", "isinstance", "(", "log_stat", "[", "k", "]", ",", "list", ")", ":", "if", "k", "not", "in", "sum_dict", ".", "keys", "(", ")", ":", "sum_dict", "[", "k", "]", "=", "[", "0", "]", "*", "len", "(", "log_stat", "[", "k", "]", ")", "sum_dict", "[", "k", "]", "=", "[", "sum_dict", "[", "k", "]", "[", "i", "]", "+", "log_stat", "[", "k", "]", "[", "i", "]", "for", "i", "in", "range", "(", "len", "(", "log_stat", "[", "k", "]", ")", ")", "]", "else", ":", "if", "k", "not", "in", "sum_dict", ".", "keys", "(", ")", ":", "sum_dict", "[", "k", "]", "=", "0", "sum_dict", "[", "k", "]", "+=", "log_stat", "[", "k", "]", "else", ":", "sum_dict", "=", "{", "}", "return", "tcids_unfinished", ",", "sum_dict", ",", "log_stats" ]
Run various analysis on a particular reading job.
[ "Run", "various", "analysis", "on", "a", "particular", "reading", "job", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/util/log_analysis_tools.py#L156-L195
train
sorgerlab/indra
indra/sources/biopax/api.py
process_pc_neighborhood
def process_pc_neighborhood(gene_names, neighbor_limit=1, database_filter=None): """Returns a BiopaxProcessor for a PathwayCommons neighborhood query. The neighborhood query finds the neighborhood around a set of source genes. http://www.pathwaycommons.org/pc2/#graph http://www.pathwaycommons.org/pc2/#graph_kind Parameters ---------- gene_names : list A list of HGNC gene symbols to search the neighborhood of. Examples: ['BRAF'], ['BRAF', 'MAP2K1'] neighbor_limit : Optional[int] The number of steps to limit the size of the neighborhood around the gene names being queried. Default: 1 database_filter : Optional[list] A list of database identifiers to which the query is restricted. Examples: ['reactome'], ['biogrid', 'pid', 'psp'] If not given, all databases are used in the query. For a full list of databases see http://www.pathwaycommons.org/pc2/datasources Returns ------- bp : BiopaxProcessor A BiopaxProcessor containing the obtained BioPAX model in bp.model. """ model = pcc.graph_query('neighborhood', gene_names, neighbor_limit=neighbor_limit, database_filter=database_filter) if model is not None: return process_model(model)
python
def process_pc_neighborhood(gene_names, neighbor_limit=1, database_filter=None): """Returns a BiopaxProcessor for a PathwayCommons neighborhood query. The neighborhood query finds the neighborhood around a set of source genes. http://www.pathwaycommons.org/pc2/#graph http://www.pathwaycommons.org/pc2/#graph_kind Parameters ---------- gene_names : list A list of HGNC gene symbols to search the neighborhood of. Examples: ['BRAF'], ['BRAF', 'MAP2K1'] neighbor_limit : Optional[int] The number of steps to limit the size of the neighborhood around the gene names being queried. Default: 1 database_filter : Optional[list] A list of database identifiers to which the query is restricted. Examples: ['reactome'], ['biogrid', 'pid', 'psp'] If not given, all databases are used in the query. For a full list of databases see http://www.pathwaycommons.org/pc2/datasources Returns ------- bp : BiopaxProcessor A BiopaxProcessor containing the obtained BioPAX model in bp.model. """ model = pcc.graph_query('neighborhood', gene_names, neighbor_limit=neighbor_limit, database_filter=database_filter) if model is not None: return process_model(model)
[ "def", "process_pc_neighborhood", "(", "gene_names", ",", "neighbor_limit", "=", "1", ",", "database_filter", "=", "None", ")", ":", "model", "=", "pcc", ".", "graph_query", "(", "'neighborhood'", ",", "gene_names", ",", "neighbor_limit", "=", "neighbor_limit", ",", "database_filter", "=", "database_filter", ")", "if", "model", "is", "not", "None", ":", "return", "process_model", "(", "model", ")" ]
Returns a BiopaxProcessor for a PathwayCommons neighborhood query. The neighborhood query finds the neighborhood around a set of source genes. http://www.pathwaycommons.org/pc2/#graph http://www.pathwaycommons.org/pc2/#graph_kind Parameters ---------- gene_names : list A list of HGNC gene symbols to search the neighborhood of. Examples: ['BRAF'], ['BRAF', 'MAP2K1'] neighbor_limit : Optional[int] The number of steps to limit the size of the neighborhood around the gene names being queried. Default: 1 database_filter : Optional[list] A list of database identifiers to which the query is restricted. Examples: ['reactome'], ['biogrid', 'pid', 'psp'] If not given, all databases are used in the query. For a full list of databases see http://www.pathwaycommons.org/pc2/datasources Returns ------- bp : BiopaxProcessor A BiopaxProcessor containing the obtained BioPAX model in bp.model.
[ "Returns", "a", "BiopaxProcessor", "for", "a", "PathwayCommons", "neighborhood", "query", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/api.py#L8-L41
train
sorgerlab/indra
indra/sources/biopax/api.py
process_pc_pathsbetween
def process_pc_pathsbetween(gene_names, neighbor_limit=1, database_filter=None, block_size=None): """Returns a BiopaxProcessor for a PathwayCommons paths-between query. The paths-between query finds the paths between a set of genes. Here source gene names are given in a single list and all directions of paths between these genes are considered. http://www.pathwaycommons.org/pc2/#graph http://www.pathwaycommons.org/pc2/#graph_kind Parameters ---------- gene_names : list A list of HGNC gene symbols to search for paths between. Examples: ['BRAF', 'MAP2K1'] neighbor_limit : Optional[int] The number of steps to limit the length of the paths between the gene names being queried. Default: 1 database_filter : Optional[list] A list of database identifiers to which the query is restricted. Examples: ['reactome'], ['biogrid', 'pid', 'psp'] If not given, all databases are used in the query. For a full list of databases see http://www.pathwaycommons.org/pc2/datasources block_size : Optional[int] Large paths-between queries (above ~60 genes) can error on the server side. In this case, the query can be replaced by a series of smaller paths-between and paths-from-to queries each of which contains block_size genes. Returns ------- bp : BiopaxProcessor A BiopaxProcessor containing the obtained BioPAX model in bp.model. """ if not block_size: model = pcc.graph_query('pathsbetween', gene_names, neighbor_limit=neighbor_limit, database_filter=database_filter) if model is not None: return process_model(model) else: gene_blocks = [gene_names[i:i + block_size] for i in range(0, len(gene_names), block_size)] stmts = [] # Run pathsfromto between pairs of blocks and pathsbetween # within each block. This breaks up a single call with N genes into # (N/block_size)*(N/blocksize) calls with block_size genes for genes1, genes2 in itertools.product(gene_blocks, repeat=2): if genes1 == genes2: bp = process_pc_pathsbetween(genes1, database_filter=database_filter, block_size=None) else: bp = process_pc_pathsfromto(genes1, genes2, database_filter=database_filter) stmts += bp.statements
python
def process_pc_pathsbetween(gene_names, neighbor_limit=1, database_filter=None, block_size=None): """Returns a BiopaxProcessor for a PathwayCommons paths-between query. The paths-between query finds the paths between a set of genes. Here source gene names are given in a single list and all directions of paths between these genes are considered. http://www.pathwaycommons.org/pc2/#graph http://www.pathwaycommons.org/pc2/#graph_kind Parameters ---------- gene_names : list A list of HGNC gene symbols to search for paths between. Examples: ['BRAF', 'MAP2K1'] neighbor_limit : Optional[int] The number of steps to limit the length of the paths between the gene names being queried. Default: 1 database_filter : Optional[list] A list of database identifiers to which the query is restricted. Examples: ['reactome'], ['biogrid', 'pid', 'psp'] If not given, all databases are used in the query. For a full list of databases see http://www.pathwaycommons.org/pc2/datasources block_size : Optional[int] Large paths-between queries (above ~60 genes) can error on the server side. In this case, the query can be replaced by a series of smaller paths-between and paths-from-to queries each of which contains block_size genes. Returns ------- bp : BiopaxProcessor A BiopaxProcessor containing the obtained BioPAX model in bp.model. """ if not block_size: model = pcc.graph_query('pathsbetween', gene_names, neighbor_limit=neighbor_limit, database_filter=database_filter) if model is not None: return process_model(model) else: gene_blocks = [gene_names[i:i + block_size] for i in range(0, len(gene_names), block_size)] stmts = [] # Run pathsfromto between pairs of blocks and pathsbetween # within each block. This breaks up a single call with N genes into # (N/block_size)*(N/blocksize) calls with block_size genes for genes1, genes2 in itertools.product(gene_blocks, repeat=2): if genes1 == genes2: bp = process_pc_pathsbetween(genes1, database_filter=database_filter, block_size=None) else: bp = process_pc_pathsfromto(genes1, genes2, database_filter=database_filter) stmts += bp.statements
[ "def", "process_pc_pathsbetween", "(", "gene_names", ",", "neighbor_limit", "=", "1", ",", "database_filter", "=", "None", ",", "block_size", "=", "None", ")", ":", "if", "not", "block_size", ":", "model", "=", "pcc", ".", "graph_query", "(", "'pathsbetween'", ",", "gene_names", ",", "neighbor_limit", "=", "neighbor_limit", ",", "database_filter", "=", "database_filter", ")", "if", "model", "is", "not", "None", ":", "return", "process_model", "(", "model", ")", "else", ":", "gene_blocks", "=", "[", "gene_names", "[", "i", ":", "i", "+", "block_size", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "gene_names", ")", ",", "block_size", ")", "]", "stmts", "=", "[", "]", "# Run pathsfromto between pairs of blocks and pathsbetween", "# within each block. This breaks up a single call with N genes into", "# (N/block_size)*(N/blocksize) calls with block_size genes", "for", "genes1", ",", "genes2", "in", "itertools", ".", "product", "(", "gene_blocks", ",", "repeat", "=", "2", ")", ":", "if", "genes1", "==", "genes2", ":", "bp", "=", "process_pc_pathsbetween", "(", "genes1", ",", "database_filter", "=", "database_filter", ",", "block_size", "=", "None", ")", "else", ":", "bp", "=", "process_pc_pathsfromto", "(", "genes1", ",", "genes2", ",", "database_filter", "=", "database_filter", ")", "stmts", "+=", "bp", ".", "statements" ]
Returns a BiopaxProcessor for a PathwayCommons paths-between query. The paths-between query finds the paths between a set of genes. Here source gene names are given in a single list and all directions of paths between these genes are considered. http://www.pathwaycommons.org/pc2/#graph http://www.pathwaycommons.org/pc2/#graph_kind Parameters ---------- gene_names : list A list of HGNC gene symbols to search for paths between. Examples: ['BRAF', 'MAP2K1'] neighbor_limit : Optional[int] The number of steps to limit the length of the paths between the gene names being queried. Default: 1 database_filter : Optional[list] A list of database identifiers to which the query is restricted. Examples: ['reactome'], ['biogrid', 'pid', 'psp'] If not given, all databases are used in the query. For a full list of databases see http://www.pathwaycommons.org/pc2/datasources block_size : Optional[int] Large paths-between queries (above ~60 genes) can error on the server side. In this case, the query can be replaced by a series of smaller paths-between and paths-from-to queries each of which contains block_size genes. Returns ------- bp : BiopaxProcessor A BiopaxProcessor containing the obtained BioPAX model in bp.model.
[ "Returns", "a", "BiopaxProcessor", "for", "a", "PathwayCommons", "paths", "-", "between", "query", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/api.py#L44-L101
train
sorgerlab/indra
indra/sources/biopax/api.py
process_pc_pathsfromto
def process_pc_pathsfromto(source_genes, target_genes, neighbor_limit=1, database_filter=None): """Returns a BiopaxProcessor for a PathwayCommons paths-from-to query. The paths-from-to query finds the paths from a set of source genes to a set of target genes. http://www.pathwaycommons.org/pc2/#graph http://www.pathwaycommons.org/pc2/#graph_kind Parameters ---------- source_genes : list A list of HGNC gene symbols that are the sources of paths being searched for. Examples: ['BRAF', 'RAF1', 'ARAF'] target_genes : list A list of HGNC gene symbols that are the targets of paths being searched for. Examples: ['MAP2K1', 'MAP2K2'] neighbor_limit : Optional[int] The number of steps to limit the length of the paths between the source genes and target genes being queried. Default: 1 database_filter : Optional[list] A list of database identifiers to which the query is restricted. Examples: ['reactome'], ['biogrid', 'pid', 'psp'] If not given, all databases are used in the query. For a full list of databases see http://www.pathwaycommons.org/pc2/datasources Returns ------- bp : BiopaxProcessor A BiopaxProcessor containing the obtained BioPAX model in bp.model. """ model = pcc.graph_query('pathsfromto', source_genes, target_genes, neighbor_limit=neighbor_limit, database_filter=database_filter) if model is not None: return process_model(model)
python
def process_pc_pathsfromto(source_genes, target_genes, neighbor_limit=1, database_filter=None): """Returns a BiopaxProcessor for a PathwayCommons paths-from-to query. The paths-from-to query finds the paths from a set of source genes to a set of target genes. http://www.pathwaycommons.org/pc2/#graph http://www.pathwaycommons.org/pc2/#graph_kind Parameters ---------- source_genes : list A list of HGNC gene symbols that are the sources of paths being searched for. Examples: ['BRAF', 'RAF1', 'ARAF'] target_genes : list A list of HGNC gene symbols that are the targets of paths being searched for. Examples: ['MAP2K1', 'MAP2K2'] neighbor_limit : Optional[int] The number of steps to limit the length of the paths between the source genes and target genes being queried. Default: 1 database_filter : Optional[list] A list of database identifiers to which the query is restricted. Examples: ['reactome'], ['biogrid', 'pid', 'psp'] If not given, all databases are used in the query. For a full list of databases see http://www.pathwaycommons.org/pc2/datasources Returns ------- bp : BiopaxProcessor A BiopaxProcessor containing the obtained BioPAX model in bp.model. """ model = pcc.graph_query('pathsfromto', source_genes, target_genes, neighbor_limit=neighbor_limit, database_filter=database_filter) if model is not None: return process_model(model)
[ "def", "process_pc_pathsfromto", "(", "source_genes", ",", "target_genes", ",", "neighbor_limit", "=", "1", ",", "database_filter", "=", "None", ")", ":", "model", "=", "pcc", ".", "graph_query", "(", "'pathsfromto'", ",", "source_genes", ",", "target_genes", ",", "neighbor_limit", "=", "neighbor_limit", ",", "database_filter", "=", "database_filter", ")", "if", "model", "is", "not", "None", ":", "return", "process_model", "(", "model", ")" ]
Returns a BiopaxProcessor for a PathwayCommons paths-from-to query. The paths-from-to query finds the paths from a set of source genes to a set of target genes. http://www.pathwaycommons.org/pc2/#graph http://www.pathwaycommons.org/pc2/#graph_kind Parameters ---------- source_genes : list A list of HGNC gene symbols that are the sources of paths being searched for. Examples: ['BRAF', 'RAF1', 'ARAF'] target_genes : list A list of HGNC gene symbols that are the targets of paths being searched for. Examples: ['MAP2K1', 'MAP2K2'] neighbor_limit : Optional[int] The number of steps to limit the length of the paths between the source genes and target genes being queried. Default: 1 database_filter : Optional[list] A list of database identifiers to which the query is restricted. Examples: ['reactome'], ['biogrid', 'pid', 'psp'] If not given, all databases are used in the query. For a full list of databases see http://www.pathwaycommons.org/pc2/datasources Returns ------- bp : BiopaxProcessor A BiopaxProcessor containing the obtained BioPAX model in bp.model.
[ "Returns", "a", "BiopaxProcessor", "for", "a", "PathwayCommons", "paths", "-", "from", "-", "to", "query", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/api.py#L104-L143
train
sorgerlab/indra
indra/sources/biopax/api.py
process_model
def process_model(model): """Returns a BiopaxProcessor for a BioPAX model object. Parameters ---------- model : org.biopax.paxtools.model.Model A BioPAX model object. Returns ------- bp : BiopaxProcessor A BiopaxProcessor containing the obtained BioPAX model in bp.model. """ bp = BiopaxProcessor(model) bp.get_modifications() bp.get_regulate_activities() bp.get_regulate_amounts() bp.get_activity_modification() bp.get_gef() bp.get_gap() bp.get_conversions() # bp.get_complexes() bp.eliminate_exact_duplicates() return bp
python
def process_model(model): """Returns a BiopaxProcessor for a BioPAX model object. Parameters ---------- model : org.biopax.paxtools.model.Model A BioPAX model object. Returns ------- bp : BiopaxProcessor A BiopaxProcessor containing the obtained BioPAX model in bp.model. """ bp = BiopaxProcessor(model) bp.get_modifications() bp.get_regulate_activities() bp.get_regulate_amounts() bp.get_activity_modification() bp.get_gef() bp.get_gap() bp.get_conversions() # bp.get_complexes() bp.eliminate_exact_duplicates() return bp
[ "def", "process_model", "(", "model", ")", ":", "bp", "=", "BiopaxProcessor", "(", "model", ")", "bp", ".", "get_modifications", "(", ")", "bp", ".", "get_regulate_activities", "(", ")", "bp", ".", "get_regulate_amounts", "(", ")", "bp", ".", "get_activity_modification", "(", ")", "bp", ".", "get_gef", "(", ")", "bp", ".", "get_gap", "(", ")", "bp", ".", "get_conversions", "(", ")", "# bp.get_complexes()", "bp", ".", "eliminate_exact_duplicates", "(", ")", "return", "bp" ]
Returns a BiopaxProcessor for a BioPAX model object. Parameters ---------- model : org.biopax.paxtools.model.Model A BioPAX model object. Returns ------- bp : BiopaxProcessor A BiopaxProcessor containing the obtained BioPAX model in bp.model.
[ "Returns", "a", "BiopaxProcessor", "for", "a", "BioPAX", "model", "object", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/api.py#L163-L186
train
sorgerlab/indra
indra/benchmarks/assembly_eval/batch4/assembly_eval.py
is_background_knowledge
def is_background_knowledge(stmt): '''Return True if Statement is only supported by background knowledge.''' any_background = False # Iterate over all evidence for the statement for ev in stmt.evidence: epi = ev.epistemics if epi is not None: sec = epi.get('section_type') # If there is at least one evidence not from a # background section then we consider this to be # a non-background knowledge finding. if sec is not None and sec not in background_secs: return False # If there is at least one evidence that is explicitly # from a background section then we keep track of that. elif sec in background_secs: any_background = True # If there is any explicit evidence for this statement being # background info (and no evidence otherwise) then we return # True, otherwise (for instnace of there is no section info at all) # we return False. return any_background
python
def is_background_knowledge(stmt): '''Return True if Statement is only supported by background knowledge.''' any_background = False # Iterate over all evidence for the statement for ev in stmt.evidence: epi = ev.epistemics if epi is not None: sec = epi.get('section_type') # If there is at least one evidence not from a # background section then we consider this to be # a non-background knowledge finding. if sec is not None and sec not in background_secs: return False # If there is at least one evidence that is explicitly # from a background section then we keep track of that. elif sec in background_secs: any_background = True # If there is any explicit evidence for this statement being # background info (and no evidence otherwise) then we return # True, otherwise (for instnace of there is no section info at all) # we return False. return any_background
[ "def", "is_background_knowledge", "(", "stmt", ")", ":", "any_background", "=", "False", "# Iterate over all evidence for the statement", "for", "ev", "in", "stmt", ".", "evidence", ":", "epi", "=", "ev", ".", "epistemics", "if", "epi", "is", "not", "None", ":", "sec", "=", "epi", ".", "get", "(", "'section_type'", ")", "# If there is at least one evidence not from a ", "# background section then we consider this to be", "# a non-background knowledge finding.", "if", "sec", "is", "not", "None", "and", "sec", "not", "in", "background_secs", ":", "return", "False", "# If there is at least one evidence that is explicitly", "# from a background section then we keep track of that.", "elif", "sec", "in", "background_secs", ":", "any_background", "=", "True", "# If there is any explicit evidence for this statement being", "# background info (and no evidence otherwise) then we return", "# True, otherwise (for instnace of there is no section info at all)", "# we return False.", "return", "any_background" ]
Return True if Statement is only supported by background knowledge.
[ "Return", "True", "if", "Statement", "is", "only", "supported", "by", "background", "knowledge", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/benchmarks/assembly_eval/batch4/assembly_eval.py#L45-L66
train
sorgerlab/indra
indra/benchmarks/assembly_eval/batch4/assembly_eval.py
multiple_sources
def multiple_sources(stmt): '''Return True if statement is supported by multiple sources. Note: this is currently not used and replaced by BeliefEngine score cutoff ''' sources = list(set([e.source_api for e in stmt.evidence])) if len(sources) > 1: return True return False
python
def multiple_sources(stmt): '''Return True if statement is supported by multiple sources. Note: this is currently not used and replaced by BeliefEngine score cutoff ''' sources = list(set([e.source_api for e in stmt.evidence])) if len(sources) > 1: return True return False
[ "def", "multiple_sources", "(", "stmt", ")", ":", "sources", "=", "list", "(", "set", "(", "[", "e", ".", "source_api", "for", "e", "in", "stmt", ".", "evidence", "]", ")", ")", "if", "len", "(", "sources", ")", ">", "1", ":", "return", "True", "return", "False" ]
Return True if statement is supported by multiple sources. Note: this is currently not used and replaced by BeliefEngine score cutoff
[ "Return", "True", "if", "statement", "is", "supported", "by", "multiple", "sources", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/benchmarks/assembly_eval/batch4/assembly_eval.py#L68-L76
train
sorgerlab/indra
indra/sources/geneways/symbols_parser.py
GenewaysSymbols.id_to_symbol
def id_to_symbol(self, entrez_id): """Gives the symbol for a given entrez id)""" entrez_id = str(entrez_id) if entrez_id not in self.ids_to_symbols: m = 'Could not look up symbol for Entrez ID ' + entrez_id raise Exception(m) return self.ids_to_symbols[entrez_id]
python
def id_to_symbol(self, entrez_id): """Gives the symbol for a given entrez id)""" entrez_id = str(entrez_id) if entrez_id not in self.ids_to_symbols: m = 'Could not look up symbol for Entrez ID ' + entrez_id raise Exception(m) return self.ids_to_symbols[entrez_id]
[ "def", "id_to_symbol", "(", "self", ",", "entrez_id", ")", ":", "entrez_id", "=", "str", "(", "entrez_id", ")", "if", "entrez_id", "not", "in", "self", ".", "ids_to_symbols", ":", "m", "=", "'Could not look up symbol for Entrez ID '", "+", "entrez_id", "raise", "Exception", "(", "m", ")", "return", "self", ".", "ids_to_symbols", "[", "entrez_id", "]" ]
Gives the symbol for a given entrez id)
[ "Gives", "the", "symbol", "for", "a", "given", "entrez", "id", ")" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/geneways/symbols_parser.py#L59-L66
train
sorgerlab/indra
indra/assemblers/tsv/assembler.py
TsvAssembler.make_model
def make_model(self, output_file, add_curation_cols=False, up_only=False): """Export the statements into a tab-separated text file. Parameters ---------- output_file : str Name of the output file. add_curation_cols : bool Whether to add columns to facilitate statement curation. Default is False (no additional columns). up_only : bool Whether to include identifiers.org links *only* for the Uniprot grounding of an agent when one is available. Because most spreadsheets allow only a single hyperlink per cell, this can makes it easier to link to Uniprot information pages for curation purposes. Default is False. """ stmt_header = ['INDEX', 'UUID', 'TYPE', 'STR', 'AG_A_TEXT', 'AG_A_LINKS', 'AG_A_STR', 'AG_B_TEXT', 'AG_B_LINKS', 'AG_B_STR', 'PMID', 'TEXT', 'IS_HYP', 'IS_DIRECT'] if add_curation_cols: stmt_header = stmt_header + \ ['AG_A_IDS_CORRECT', 'AG_A_STATE_CORRECT', 'AG_B_IDS_CORRECT', 'AG_B_STATE_CORRECT', 'EVENT_CORRECT', 'RES_CORRECT', 'POS_CORRECT', 'SUBJ_ACT_CORRECT', 'OBJ_ACT_CORRECT', 'HYP_CORRECT', 'DIRECT_CORRECT'] rows = [stmt_header] for ix, stmt in enumerate(self.statements): # Complexes if len(stmt.agent_list()) > 2: logger.info("Skipping statement with more than two members: %s" % stmt) continue # Self-modifications, ActiveForms elif len(stmt.agent_list()) == 1: ag_a = stmt.agent_list()[0] ag_b = None # All others else: (ag_a, ag_b) = stmt.agent_list() # Put together the data row row = [ix+1, stmt.uuid, stmt.__class__.__name__, str(stmt)] + \ _format_agent_entries(ag_a, up_only) + \ _format_agent_entries(ag_b, up_only) + \ [stmt.evidence[0].pmid, stmt.evidence[0].text, stmt.evidence[0].epistemics.get('hypothesis', ''), stmt.evidence[0].epistemics.get('direct', '')] if add_curation_cols: row = row + ([''] * 11) rows.append(row) # Write to file write_unicode_csv(output_file, rows, delimiter='\t')
python
def make_model(self, output_file, add_curation_cols=False, up_only=False): """Export the statements into a tab-separated text file. Parameters ---------- output_file : str Name of the output file. add_curation_cols : bool Whether to add columns to facilitate statement curation. Default is False (no additional columns). up_only : bool Whether to include identifiers.org links *only* for the Uniprot grounding of an agent when one is available. Because most spreadsheets allow only a single hyperlink per cell, this can makes it easier to link to Uniprot information pages for curation purposes. Default is False. """ stmt_header = ['INDEX', 'UUID', 'TYPE', 'STR', 'AG_A_TEXT', 'AG_A_LINKS', 'AG_A_STR', 'AG_B_TEXT', 'AG_B_LINKS', 'AG_B_STR', 'PMID', 'TEXT', 'IS_HYP', 'IS_DIRECT'] if add_curation_cols: stmt_header = stmt_header + \ ['AG_A_IDS_CORRECT', 'AG_A_STATE_CORRECT', 'AG_B_IDS_CORRECT', 'AG_B_STATE_CORRECT', 'EVENT_CORRECT', 'RES_CORRECT', 'POS_CORRECT', 'SUBJ_ACT_CORRECT', 'OBJ_ACT_CORRECT', 'HYP_CORRECT', 'DIRECT_CORRECT'] rows = [stmt_header] for ix, stmt in enumerate(self.statements): # Complexes if len(stmt.agent_list()) > 2: logger.info("Skipping statement with more than two members: %s" % stmt) continue # Self-modifications, ActiveForms elif len(stmt.agent_list()) == 1: ag_a = stmt.agent_list()[0] ag_b = None # All others else: (ag_a, ag_b) = stmt.agent_list() # Put together the data row row = [ix+1, stmt.uuid, stmt.__class__.__name__, str(stmt)] + \ _format_agent_entries(ag_a, up_only) + \ _format_agent_entries(ag_b, up_only) + \ [stmt.evidence[0].pmid, stmt.evidence[0].text, stmt.evidence[0].epistemics.get('hypothesis', ''), stmt.evidence[0].epistemics.get('direct', '')] if add_curation_cols: row = row + ([''] * 11) rows.append(row) # Write to file write_unicode_csv(output_file, rows, delimiter='\t')
[ "def", "make_model", "(", "self", ",", "output_file", ",", "add_curation_cols", "=", "False", ",", "up_only", "=", "False", ")", ":", "stmt_header", "=", "[", "'INDEX'", ",", "'UUID'", ",", "'TYPE'", ",", "'STR'", ",", "'AG_A_TEXT'", ",", "'AG_A_LINKS'", ",", "'AG_A_STR'", ",", "'AG_B_TEXT'", ",", "'AG_B_LINKS'", ",", "'AG_B_STR'", ",", "'PMID'", ",", "'TEXT'", ",", "'IS_HYP'", ",", "'IS_DIRECT'", "]", "if", "add_curation_cols", ":", "stmt_header", "=", "stmt_header", "+", "[", "'AG_A_IDS_CORRECT'", ",", "'AG_A_STATE_CORRECT'", ",", "'AG_B_IDS_CORRECT'", ",", "'AG_B_STATE_CORRECT'", ",", "'EVENT_CORRECT'", ",", "'RES_CORRECT'", ",", "'POS_CORRECT'", ",", "'SUBJ_ACT_CORRECT'", ",", "'OBJ_ACT_CORRECT'", ",", "'HYP_CORRECT'", ",", "'DIRECT_CORRECT'", "]", "rows", "=", "[", "stmt_header", "]", "for", "ix", ",", "stmt", "in", "enumerate", "(", "self", ".", "statements", ")", ":", "# Complexes", "if", "len", "(", "stmt", ".", "agent_list", "(", ")", ")", ">", "2", ":", "logger", ".", "info", "(", "\"Skipping statement with more than two members: %s\"", "%", "stmt", ")", "continue", "# Self-modifications, ActiveForms", "elif", "len", "(", "stmt", ".", "agent_list", "(", ")", ")", "==", "1", ":", "ag_a", "=", "stmt", ".", "agent_list", "(", ")", "[", "0", "]", "ag_b", "=", "None", "# All others", "else", ":", "(", "ag_a", ",", "ag_b", ")", "=", "stmt", ".", "agent_list", "(", ")", "# Put together the data row", "row", "=", "[", "ix", "+", "1", ",", "stmt", ".", "uuid", ",", "stmt", ".", "__class__", ".", "__name__", ",", "str", "(", "stmt", ")", "]", "+", "_format_agent_entries", "(", "ag_a", ",", "up_only", ")", "+", "_format_agent_entries", "(", "ag_b", ",", "up_only", ")", "+", "[", "stmt", ".", "evidence", "[", "0", "]", ".", "pmid", ",", "stmt", ".", "evidence", "[", "0", "]", ".", "text", ",", "stmt", ".", "evidence", "[", "0", "]", ".", "epistemics", ".", "get", "(", "'hypothesis'", ",", "''", ")", ",", "stmt", ".", "evidence", "[", "0", "]", ".", "epistemics", ".", "get", "(", "'direct'", ",", "''", ")", "]", "if", "add_curation_cols", ":", "row", "=", "row", "+", "(", "[", "''", "]", "*", "11", ")", "rows", ".", "append", "(", "row", ")", "# Write to file", "write_unicode_csv", "(", "output_file", ",", "rows", ",", "delimiter", "=", "'\\t'", ")" ]
Export the statements into a tab-separated text file. Parameters ---------- output_file : str Name of the output file. add_curation_cols : bool Whether to add columns to facilitate statement curation. Default is False (no additional columns). up_only : bool Whether to include identifiers.org links *only* for the Uniprot grounding of an agent when one is available. Because most spreadsheets allow only a single hyperlink per cell, this can makes it easier to link to Uniprot information pages for curation purposes. Default is False.
[ "Export", "the", "statements", "into", "a", "tab", "-", "separated", "text", "file", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/tsv/assembler.py#L109-L163
train
sorgerlab/indra
indra/assemblers/pysb/base_agents.py
BaseAgentSet.get_create_base_agent
def get_create_base_agent(self, agent): """Return base agent with given name, creating it if needed.""" try: base_agent = self.agents[_n(agent.name)] except KeyError: base_agent = BaseAgent(_n(agent.name)) self.agents[_n(agent.name)] = base_agent # If it's a molecular agent if isinstance(agent, Agent): # Handle bound conditions for bc in agent.bound_conditions: bound_base_agent = self.get_create_base_agent(bc.agent) bound_base_agent.create_site(get_binding_site_name(agent)) base_agent.create_site(get_binding_site_name(bc.agent)) # Handle modification conditions for mc in agent.mods: base_agent.create_mod_site(mc) # Handle mutation conditions for mc in agent.mutations: res_from = mc.residue_from if mc.residue_from else 'mut' res_to = mc.residue_to if mc.residue_to else 'X' if mc.position is None: mut_site_name = res_from else: mut_site_name = res_from + mc.position base_agent.create_site(mut_site_name, states=['WT', res_to]) # Handle location condition if agent.location is not None: base_agent.create_site('loc', [_n(agent.location)]) # Handle activity if agent.activity is not None: site_name = agent.activity.activity_type base_agent.create_site(site_name, ['inactive', 'active']) # There might be overwrites here for db_name, db_ref in agent.db_refs.items(): base_agent.db_refs[db_name] = db_ref return base_agent
python
def get_create_base_agent(self, agent): """Return base agent with given name, creating it if needed.""" try: base_agent = self.agents[_n(agent.name)] except KeyError: base_agent = BaseAgent(_n(agent.name)) self.agents[_n(agent.name)] = base_agent # If it's a molecular agent if isinstance(agent, Agent): # Handle bound conditions for bc in agent.bound_conditions: bound_base_agent = self.get_create_base_agent(bc.agent) bound_base_agent.create_site(get_binding_site_name(agent)) base_agent.create_site(get_binding_site_name(bc.agent)) # Handle modification conditions for mc in agent.mods: base_agent.create_mod_site(mc) # Handle mutation conditions for mc in agent.mutations: res_from = mc.residue_from if mc.residue_from else 'mut' res_to = mc.residue_to if mc.residue_to else 'X' if mc.position is None: mut_site_name = res_from else: mut_site_name = res_from + mc.position base_agent.create_site(mut_site_name, states=['WT', res_to]) # Handle location condition if agent.location is not None: base_agent.create_site('loc', [_n(agent.location)]) # Handle activity if agent.activity is not None: site_name = agent.activity.activity_type base_agent.create_site(site_name, ['inactive', 'active']) # There might be overwrites here for db_name, db_ref in agent.db_refs.items(): base_agent.db_refs[db_name] = db_ref return base_agent
[ "def", "get_create_base_agent", "(", "self", ",", "agent", ")", ":", "try", ":", "base_agent", "=", "self", ".", "agents", "[", "_n", "(", "agent", ".", "name", ")", "]", "except", "KeyError", ":", "base_agent", "=", "BaseAgent", "(", "_n", "(", "agent", ".", "name", ")", ")", "self", ".", "agents", "[", "_n", "(", "agent", ".", "name", ")", "]", "=", "base_agent", "# If it's a molecular agent", "if", "isinstance", "(", "agent", ",", "Agent", ")", ":", "# Handle bound conditions", "for", "bc", "in", "agent", ".", "bound_conditions", ":", "bound_base_agent", "=", "self", ".", "get_create_base_agent", "(", "bc", ".", "agent", ")", "bound_base_agent", ".", "create_site", "(", "get_binding_site_name", "(", "agent", ")", ")", "base_agent", ".", "create_site", "(", "get_binding_site_name", "(", "bc", ".", "agent", ")", ")", "# Handle modification conditions", "for", "mc", "in", "agent", ".", "mods", ":", "base_agent", ".", "create_mod_site", "(", "mc", ")", "# Handle mutation conditions", "for", "mc", "in", "agent", ".", "mutations", ":", "res_from", "=", "mc", ".", "residue_from", "if", "mc", ".", "residue_from", "else", "'mut'", "res_to", "=", "mc", ".", "residue_to", "if", "mc", ".", "residue_to", "else", "'X'", "if", "mc", ".", "position", "is", "None", ":", "mut_site_name", "=", "res_from", "else", ":", "mut_site_name", "=", "res_from", "+", "mc", ".", "position", "base_agent", ".", "create_site", "(", "mut_site_name", ",", "states", "=", "[", "'WT'", ",", "res_to", "]", ")", "# Handle location condition", "if", "agent", ".", "location", "is", "not", "None", ":", "base_agent", ".", "create_site", "(", "'loc'", ",", "[", "_n", "(", "agent", ".", "location", ")", "]", ")", "# Handle activity", "if", "agent", ".", "activity", "is", "not", "None", ":", "site_name", "=", "agent", ".", "activity", ".", "activity_type", "base_agent", ".", "create_site", "(", "site_name", ",", "[", "'inactive'", ",", "'active'", "]", ")", "# There might be overwrites here", "for", "db_name", ",", "db_ref", "in", "agent", ".", "db_refs", ".", "items", "(", ")", ":", "base_agent", ".", "db_refs", "[", "db_name", "]", "=", "db_ref", "return", "base_agent" ]
Return base agent with given name, creating it if needed.
[ "Return", "base", "agent", "with", "given", "name", "creating", "it", "if", "needed", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/base_agents.py#L13-L57
train
sorgerlab/indra
indra/assemblers/pysb/base_agents.py
BaseAgent.create_site
def create_site(self, site, states=None): """Create a new site on an agent if it doesn't already exist.""" if site not in self.sites: self.sites.append(site) if states is not None: self.site_states.setdefault(site, []) try: states = list(states) except TypeError: return self.add_site_states(site, states)
python
def create_site(self, site, states=None): """Create a new site on an agent if it doesn't already exist.""" if site not in self.sites: self.sites.append(site) if states is not None: self.site_states.setdefault(site, []) try: states = list(states) except TypeError: return self.add_site_states(site, states)
[ "def", "create_site", "(", "self", ",", "site", ",", "states", "=", "None", ")", ":", "if", "site", "not", "in", "self", ".", "sites", ":", "self", ".", "sites", ".", "append", "(", "site", ")", "if", "states", "is", "not", "None", ":", "self", ".", "site_states", ".", "setdefault", "(", "site", ",", "[", "]", ")", "try", ":", "states", "=", "list", "(", "states", ")", "except", "TypeError", ":", "return", "self", ".", "add_site_states", "(", "site", ",", "states", ")" ]
Create a new site on an agent if it doesn't already exist.
[ "Create", "a", "new", "site", "on", "an", "agent", "if", "it", "doesn", "t", "already", "exist", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/base_agents.py#L90-L100
train
sorgerlab/indra
indra/assemblers/pysb/base_agents.py
BaseAgent.create_mod_site
def create_mod_site(self, mc): """Create modification site for the BaseAgent from a ModCondition.""" site_name = get_mod_site_name(mc) (unmod_site_state, mod_site_state) = states[mc.mod_type] self.create_site(site_name, (unmod_site_state, mod_site_state)) site_anns = [Annotation((site_name, mod_site_state), mc.mod_type, 'is_modification')] if mc.residue: site_anns.append(Annotation(site_name, mc.residue, 'is_residue')) if mc.position: site_anns.append(Annotation(site_name, mc.position, 'is_position')) self.site_annotations += site_anns
python
def create_mod_site(self, mc): """Create modification site for the BaseAgent from a ModCondition.""" site_name = get_mod_site_name(mc) (unmod_site_state, mod_site_state) = states[mc.mod_type] self.create_site(site_name, (unmod_site_state, mod_site_state)) site_anns = [Annotation((site_name, mod_site_state), mc.mod_type, 'is_modification')] if mc.residue: site_anns.append(Annotation(site_name, mc.residue, 'is_residue')) if mc.position: site_anns.append(Annotation(site_name, mc.position, 'is_position')) self.site_annotations += site_anns
[ "def", "create_mod_site", "(", "self", ",", "mc", ")", ":", "site_name", "=", "get_mod_site_name", "(", "mc", ")", "(", "unmod_site_state", ",", "mod_site_state", ")", "=", "states", "[", "mc", ".", "mod_type", "]", "self", ".", "create_site", "(", "site_name", ",", "(", "unmod_site_state", ",", "mod_site_state", ")", ")", "site_anns", "=", "[", "Annotation", "(", "(", "site_name", ",", "mod_site_state", ")", ",", "mc", ".", "mod_type", ",", "'is_modification'", ")", "]", "if", "mc", ".", "residue", ":", "site_anns", ".", "append", "(", "Annotation", "(", "site_name", ",", "mc", ".", "residue", ",", "'is_residue'", ")", ")", "if", "mc", ".", "position", ":", "site_anns", ".", "append", "(", "Annotation", "(", "site_name", ",", "mc", ".", "position", ",", "'is_position'", ")", ")", "self", ".", "site_annotations", "+=", "site_anns" ]
Create modification site for the BaseAgent from a ModCondition.
[ "Create", "modification", "site", "for", "the", "BaseAgent", "from", "a", "ModCondition", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/base_agents.py#L102-L113
train
sorgerlab/indra
indra/assemblers/pysb/base_agents.py
BaseAgent.add_site_states
def add_site_states(self, site, states): """Create new states on an agent site if the state doesn't exist.""" for state in states: if state not in self.site_states[site]: self.site_states[site].append(state)
python
def add_site_states(self, site, states): """Create new states on an agent site if the state doesn't exist.""" for state in states: if state not in self.site_states[site]: self.site_states[site].append(state)
[ "def", "add_site_states", "(", "self", ",", "site", ",", "states", ")", ":", "for", "state", "in", "states", ":", "if", "state", "not", "in", "self", ".", "site_states", "[", "site", "]", ":", "self", ".", "site_states", "[", "site", "]", ".", "append", "(", "state", ")" ]
Create new states on an agent site if the state doesn't exist.
[ "Create", "new", "states", "on", "an", "agent", "site", "if", "the", "state", "doesn", "t", "exist", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/base_agents.py#L115-L119
train
sorgerlab/indra
indra/assemblers/pysb/base_agents.py
BaseAgent.add_activity_form
def add_activity_form(self, activity_pattern, is_active): """Adds the pattern as an active or inactive form to an Agent. Parameters ---------- activity_pattern : dict A dictionary of site names and their states. is_active : bool Is True if the given pattern corresponds to an active state. """ if is_active: if activity_pattern not in self.active_forms: self.active_forms.append(activity_pattern) else: if activity_pattern not in self.inactive_forms: self.inactive_forms.append(activity_pattern)
python
def add_activity_form(self, activity_pattern, is_active): """Adds the pattern as an active or inactive form to an Agent. Parameters ---------- activity_pattern : dict A dictionary of site names and their states. is_active : bool Is True if the given pattern corresponds to an active state. """ if is_active: if activity_pattern not in self.active_forms: self.active_forms.append(activity_pattern) else: if activity_pattern not in self.inactive_forms: self.inactive_forms.append(activity_pattern)
[ "def", "add_activity_form", "(", "self", ",", "activity_pattern", ",", "is_active", ")", ":", "if", "is_active", ":", "if", "activity_pattern", "not", "in", "self", ".", "active_forms", ":", "self", ".", "active_forms", ".", "append", "(", "activity_pattern", ")", "else", ":", "if", "activity_pattern", "not", "in", "self", ".", "inactive_forms", ":", "self", ".", "inactive_forms", ".", "append", "(", "activity_pattern", ")" ]
Adds the pattern as an active or inactive form to an Agent. Parameters ---------- activity_pattern : dict A dictionary of site names and their states. is_active : bool Is True if the given pattern corresponds to an active state.
[ "Adds", "the", "pattern", "as", "an", "active", "or", "inactive", "form", "to", "an", "Agent", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/base_agents.py#L121-L136
train
sorgerlab/indra
indra/assemblers/pysb/base_agents.py
BaseAgent.add_activity_type
def add_activity_type(self, activity_type): """Adds an activity type to an Agent. Parameters ---------- activity_type : str The type of activity to add such as 'activity', 'kinase', 'gtpbound' """ if activity_type not in self.activity_types: self.activity_types.append(activity_type)
python
def add_activity_type(self, activity_type): """Adds an activity type to an Agent. Parameters ---------- activity_type : str The type of activity to add such as 'activity', 'kinase', 'gtpbound' """ if activity_type not in self.activity_types: self.activity_types.append(activity_type)
[ "def", "add_activity_type", "(", "self", ",", "activity_type", ")", ":", "if", "activity_type", "not", "in", "self", ".", "activity_types", ":", "self", ".", "activity_types", ".", "append", "(", "activity_type", ")" ]
Adds an activity type to an Agent. Parameters ---------- activity_type : str The type of activity to add such as 'activity', 'kinase', 'gtpbound'
[ "Adds", "an", "activity", "type", "to", "an", "Agent", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/base_agents.py#L138-L148
train
sorgerlab/indra
indra/sources/geneways/action_parser.py
GenewaysAction.make_annotation
def make_annotation(self): """Returns a dictionary with all properties of the action and each of its action mentions.""" annotation = dict() # Put all properties of the action object into the annotation for item in dir(self): if len(item) > 0 and item[0] != '_' and \ not inspect.ismethod(getattr(self, item)): annotation[item] = getattr(self, item) # Add properties of each action mention annotation['action_mentions'] = list() for action_mention in self.action_mentions: annotation_mention = action_mention.make_annotation() annotation['action_mentions'].append(annotation_mention) return annotation
python
def make_annotation(self): """Returns a dictionary with all properties of the action and each of its action mentions.""" annotation = dict() # Put all properties of the action object into the annotation for item in dir(self): if len(item) > 0 and item[0] != '_' and \ not inspect.ismethod(getattr(self, item)): annotation[item] = getattr(self, item) # Add properties of each action mention annotation['action_mentions'] = list() for action_mention in self.action_mentions: annotation_mention = action_mention.make_annotation() annotation['action_mentions'].append(annotation_mention) return annotation
[ "def", "make_annotation", "(", "self", ")", ":", "annotation", "=", "dict", "(", ")", "# Put all properties of the action object into the annotation", "for", "item", "in", "dir", "(", "self", ")", ":", "if", "len", "(", "item", ")", ">", "0", "and", "item", "[", "0", "]", "!=", "'_'", "and", "not", "inspect", ".", "ismethod", "(", "getattr", "(", "self", ",", "item", ")", ")", ":", "annotation", "[", "item", "]", "=", "getattr", "(", "self", ",", "item", ")", "# Add properties of each action mention", "annotation", "[", "'action_mentions'", "]", "=", "list", "(", ")", "for", "action_mention", "in", "self", ".", "action_mentions", ":", "annotation_mention", "=", "action_mention", ".", "make_annotation", "(", ")", "annotation", "[", "'action_mentions'", "]", ".", "append", "(", "annotation_mention", ")", "return", "annotation" ]
Returns a dictionary with all properties of the action and each of its action mentions.
[ "Returns", "a", "dictionary", "with", "all", "properties", "of", "the", "action", "and", "each", "of", "its", "action", "mentions", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/geneways/action_parser.py#L35-L52
train
sorgerlab/indra
indra/sources/geneways/action_parser.py
GenewaysActionParser._search_path
def _search_path(self, directory_name, filename): """Searches for a given file in the specified directory.""" full_path = path.join(directory_name, filename) if path.exists(full_path): return full_path # Could not find the requested file in any of the directories return None
python
def _search_path(self, directory_name, filename): """Searches for a given file in the specified directory.""" full_path = path.join(directory_name, filename) if path.exists(full_path): return full_path # Could not find the requested file in any of the directories return None
[ "def", "_search_path", "(", "self", ",", "directory_name", ",", "filename", ")", ":", "full_path", "=", "path", ".", "join", "(", "directory_name", ",", "filename", ")", "if", "path", ".", "exists", "(", "full_path", ")", ":", "return", "full_path", "# Could not find the requested file in any of the directories", "return", "None" ]
Searches for a given file in the specified directory.
[ "Searches", "for", "a", "given", "file", "in", "the", "specified", "directory", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/geneways/action_parser.py#L96-L103
train
sorgerlab/indra
indra/sources/geneways/action_parser.py
GenewaysActionParser._init_action_list
def _init_action_list(self, action_filename): """Parses the file and populates the data.""" self.actions = list() self.hiid_to_action_index = dict() f = codecs.open(action_filename, 'r', encoding='latin-1') first_line = True for line in f: line = line.rstrip() if first_line: # Ignore the first line first_line = False else: self.actions.append(GenewaysAction(line)) latestInd = len(self.actions)-1 hiid = self.actions[latestInd].hiid if hiid in self.hiid_to_action_index: raise Exception('action hiid not unique: %d' % hiid) self.hiid_to_action_index[hiid] = latestInd
python
def _init_action_list(self, action_filename): """Parses the file and populates the data.""" self.actions = list() self.hiid_to_action_index = dict() f = codecs.open(action_filename, 'r', encoding='latin-1') first_line = True for line in f: line = line.rstrip() if first_line: # Ignore the first line first_line = False else: self.actions.append(GenewaysAction(line)) latestInd = len(self.actions)-1 hiid = self.actions[latestInd].hiid if hiid in self.hiid_to_action_index: raise Exception('action hiid not unique: %d' % hiid) self.hiid_to_action_index[hiid] = latestInd
[ "def", "_init_action_list", "(", "self", ",", "action_filename", ")", ":", "self", ".", "actions", "=", "list", "(", ")", "self", ".", "hiid_to_action_index", "=", "dict", "(", ")", "f", "=", "codecs", ".", "open", "(", "action_filename", ",", "'r'", ",", "encoding", "=", "'latin-1'", ")", "first_line", "=", "True", "for", "line", "in", "f", ":", "line", "=", "line", ".", "rstrip", "(", ")", "if", "first_line", ":", "# Ignore the first line", "first_line", "=", "False", "else", ":", "self", ".", "actions", ".", "append", "(", "GenewaysAction", "(", "line", ")", ")", "latestInd", "=", "len", "(", "self", ".", "actions", ")", "-", "1", "hiid", "=", "self", ".", "actions", "[", "latestInd", "]", ".", "hiid", "if", "hiid", "in", "self", ".", "hiid_to_action_index", ":", "raise", "Exception", "(", "'action hiid not unique: %d'", "%", "hiid", ")", "self", ".", "hiid_to_action_index", "[", "hiid", "]", "=", "latestInd" ]
Parses the file and populates the data.
[ "Parses", "the", "file", "and", "populates", "the", "data", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/geneways/action_parser.py#L105-L125
train
sorgerlab/indra
indra/sources/geneways/action_parser.py
GenewaysActionParser._link_to_action_mentions
def _link_to_action_mentions(self, actionmention_filename): """Add action mentions""" parser = GenewaysActionMentionParser(actionmention_filename) self.action_mentions = parser.action_mentions for action_mention in self.action_mentions: hiid = action_mention.hiid if hiid not in self.hiid_to_action_index: m1 = 'Parsed action mention has hiid %d, which does not exist' m2 = ' in table of action hiids' raise Exception((m1 + m2) % hiid) else: idx = self.hiid_to_action_index[hiid] self.actions[idx].action_mentions.append(action_mention)
python
def _link_to_action_mentions(self, actionmention_filename): """Add action mentions""" parser = GenewaysActionMentionParser(actionmention_filename) self.action_mentions = parser.action_mentions for action_mention in self.action_mentions: hiid = action_mention.hiid if hiid not in self.hiid_to_action_index: m1 = 'Parsed action mention has hiid %d, which does not exist' m2 = ' in table of action hiids' raise Exception((m1 + m2) % hiid) else: idx = self.hiid_to_action_index[hiid] self.actions[idx].action_mentions.append(action_mention)
[ "def", "_link_to_action_mentions", "(", "self", ",", "actionmention_filename", ")", ":", "parser", "=", "GenewaysActionMentionParser", "(", "actionmention_filename", ")", "self", ".", "action_mentions", "=", "parser", ".", "action_mentions", "for", "action_mention", "in", "self", ".", "action_mentions", ":", "hiid", "=", "action_mention", ".", "hiid", "if", "hiid", "not", "in", "self", ".", "hiid_to_action_index", ":", "m1", "=", "'Parsed action mention has hiid %d, which does not exist'", "m2", "=", "' in table of action hiids'", "raise", "Exception", "(", "(", "m1", "+", "m2", ")", "%", "hiid", ")", "else", ":", "idx", "=", "self", ".", "hiid_to_action_index", "[", "hiid", "]", "self", ".", "actions", "[", "idx", "]", ".", "action_mentions", ".", "append", "(", "action_mention", ")" ]
Add action mentions
[ "Add", "action", "mentions" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/geneways/action_parser.py#L127-L140
train
sorgerlab/indra
indra/sources/geneways/action_parser.py
GenewaysActionParser._lookup_symbols
def _lookup_symbols(self, symbols_filename): """Look up symbols for actions and action mentions""" symbol_lookup = GenewaysSymbols(symbols_filename) for action in self.actions: action.up_symbol = symbol_lookup.id_to_symbol(action.up) action.dn_symbol = symbol_lookup.id_to_symbol(action.dn)
python
def _lookup_symbols(self, symbols_filename): """Look up symbols for actions and action mentions""" symbol_lookup = GenewaysSymbols(symbols_filename) for action in self.actions: action.up_symbol = symbol_lookup.id_to_symbol(action.up) action.dn_symbol = symbol_lookup.id_to_symbol(action.dn)
[ "def", "_lookup_symbols", "(", "self", ",", "symbols_filename", ")", ":", "symbol_lookup", "=", "GenewaysSymbols", "(", "symbols_filename", ")", "for", "action", "in", "self", ".", "actions", ":", "action", ".", "up_symbol", "=", "symbol_lookup", ".", "id_to_symbol", "(", "action", ".", "up", ")", "action", ".", "dn_symbol", "=", "symbol_lookup", ".", "id_to_symbol", "(", "action", ".", "dn", ")" ]
Look up symbols for actions and action mentions
[ "Look", "up", "symbols", "for", "actions", "and", "action", "mentions" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/geneways/action_parser.py#L142-L147
train
sorgerlab/indra
indra/sources/geneways/action_parser.py
GenewaysActionParser.get_top_n_action_types
def get_top_n_action_types(self, top_n): """Returns the top N actions by count.""" # Count action types action_type_to_counts = dict() for action in self.actions: actiontype = action.actiontype if actiontype not in action_type_to_counts: action_type_to_counts[actiontype] = 1 else: action_type_to_counts[actiontype] = \ action_type_to_counts[actiontype] + 1 # Convert the dictionary representation into a pair of lists action_types = list() counts = list() for actiontype in action_type_to_counts.keys(): action_types.append(actiontype) counts.append(action_type_to_counts[actiontype]) # How many actions in total? num_actions = len(self.actions) num_actions2 = 0 for count in counts: num_actions2 = num_actions2 + count if num_actions != num_actions2: raise(Exception('Problem counting everything up!')) # Sort action types by count (lowest to highest) sorted_inds = np.argsort(counts) last_ind = len(sorted_inds)-1 # Return the top N actions top_actions = list() if top_n > len(sorted_inds): raise Exception('Asked for top %d action types, ' + 'but there are only %d action types' % (top_n, len(sorted_inds))) for i in range(top_n): top_actions.append(action_types[sorted_inds[last_ind-i]]) return top_actions
python
def get_top_n_action_types(self, top_n): """Returns the top N actions by count.""" # Count action types action_type_to_counts = dict() for action in self.actions: actiontype = action.actiontype if actiontype not in action_type_to_counts: action_type_to_counts[actiontype] = 1 else: action_type_to_counts[actiontype] = \ action_type_to_counts[actiontype] + 1 # Convert the dictionary representation into a pair of lists action_types = list() counts = list() for actiontype in action_type_to_counts.keys(): action_types.append(actiontype) counts.append(action_type_to_counts[actiontype]) # How many actions in total? num_actions = len(self.actions) num_actions2 = 0 for count in counts: num_actions2 = num_actions2 + count if num_actions != num_actions2: raise(Exception('Problem counting everything up!')) # Sort action types by count (lowest to highest) sorted_inds = np.argsort(counts) last_ind = len(sorted_inds)-1 # Return the top N actions top_actions = list() if top_n > len(sorted_inds): raise Exception('Asked for top %d action types, ' + 'but there are only %d action types' % (top_n, len(sorted_inds))) for i in range(top_n): top_actions.append(action_types[sorted_inds[last_ind-i]]) return top_actions
[ "def", "get_top_n_action_types", "(", "self", ",", "top_n", ")", ":", "# Count action types", "action_type_to_counts", "=", "dict", "(", ")", "for", "action", "in", "self", ".", "actions", ":", "actiontype", "=", "action", ".", "actiontype", "if", "actiontype", "not", "in", "action_type_to_counts", ":", "action_type_to_counts", "[", "actiontype", "]", "=", "1", "else", ":", "action_type_to_counts", "[", "actiontype", "]", "=", "action_type_to_counts", "[", "actiontype", "]", "+", "1", "# Convert the dictionary representation into a pair of lists", "action_types", "=", "list", "(", ")", "counts", "=", "list", "(", ")", "for", "actiontype", "in", "action_type_to_counts", ".", "keys", "(", ")", ":", "action_types", ".", "append", "(", "actiontype", ")", "counts", ".", "append", "(", "action_type_to_counts", "[", "actiontype", "]", ")", "# How many actions in total?", "num_actions", "=", "len", "(", "self", ".", "actions", ")", "num_actions2", "=", "0", "for", "count", "in", "counts", ":", "num_actions2", "=", "num_actions2", "+", "count", "if", "num_actions", "!=", "num_actions2", ":", "raise", "(", "Exception", "(", "'Problem counting everything up!'", ")", ")", "# Sort action types by count (lowest to highest)", "sorted_inds", "=", "np", ".", "argsort", "(", "counts", ")", "last_ind", "=", "len", "(", "sorted_inds", ")", "-", "1", "# Return the top N actions", "top_actions", "=", "list", "(", ")", "if", "top_n", ">", "len", "(", "sorted_inds", ")", ":", "raise", "Exception", "(", "'Asked for top %d action types, '", "+", "'but there are only %d action types'", "%", "(", "top_n", ",", "len", "(", "sorted_inds", ")", ")", ")", "for", "i", "in", "range", "(", "top_n", ")", ":", "top_actions", ".", "append", "(", "action_types", "[", "sorted_inds", "[", "last_ind", "-", "i", "]", "]", ")", "return", "top_actions" ]
Returns the top N actions by count.
[ "Returns", "the", "top", "N", "actions", "by", "count", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/geneways/action_parser.py#L149-L188
train
sorgerlab/indra
indra/assemblers/graph/assembler.py
GraphAssembler.get_string
def get_string(self): """Return the assembled graph as a string. Returns ------- graph_string : str The assembled graph as a string. """ graph_string = self.graph.to_string() graph_string = graph_string.replace('\\N', '\\n') return graph_string
python
def get_string(self): """Return the assembled graph as a string. Returns ------- graph_string : str The assembled graph as a string. """ graph_string = self.graph.to_string() graph_string = graph_string.replace('\\N', '\\n') return graph_string
[ "def", "get_string", "(", "self", ")", ":", "graph_string", "=", "self", ".", "graph", ".", "to_string", "(", ")", "graph_string", "=", "graph_string", ".", "replace", "(", "'\\\\N'", ",", "'\\\\n'", ")", "return", "graph_string" ]
Return the assembled graph as a string. Returns ------- graph_string : str The assembled graph as a string.
[ "Return", "the", "assembled", "graph", "as", "a", "string", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/graph/assembler.py#L146-L156
train
sorgerlab/indra
indra/assemblers/graph/assembler.py
GraphAssembler.save_dot
def save_dot(self, file_name='graph.dot'): """Save the graph in a graphviz dot file. Parameters ---------- file_name : Optional[str] The name of the file to save the graph dot string to. """ s = self.get_string() with open(file_name, 'wt') as fh: fh.write(s)
python
def save_dot(self, file_name='graph.dot'): """Save the graph in a graphviz dot file. Parameters ---------- file_name : Optional[str] The name of the file to save the graph dot string to. """ s = self.get_string() with open(file_name, 'wt') as fh: fh.write(s)
[ "def", "save_dot", "(", "self", ",", "file_name", "=", "'graph.dot'", ")", ":", "s", "=", "self", ".", "get_string", "(", ")", "with", "open", "(", "file_name", ",", "'wt'", ")", "as", "fh", ":", "fh", ".", "write", "(", "s", ")" ]
Save the graph in a graphviz dot file. Parameters ---------- file_name : Optional[str] The name of the file to save the graph dot string to.
[ "Save", "the", "graph", "in", "a", "graphviz", "dot", "file", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/graph/assembler.py#L158-L168
train
sorgerlab/indra
indra/assemblers/graph/assembler.py
GraphAssembler.save_pdf
def save_pdf(self, file_name='graph.pdf', prog='dot'): """Draw the graph and save as an image or pdf file. Parameters ---------- file_name : Optional[str] The name of the file to save the graph as. Default: graph.pdf prog : Optional[str] The graphviz program to use for graph layout. Default: dot """ self.graph.draw(file_name, prog=prog)
python
def save_pdf(self, file_name='graph.pdf', prog='dot'): """Draw the graph and save as an image or pdf file. Parameters ---------- file_name : Optional[str] The name of the file to save the graph as. Default: graph.pdf prog : Optional[str] The graphviz program to use for graph layout. Default: dot """ self.graph.draw(file_name, prog=prog)
[ "def", "save_pdf", "(", "self", ",", "file_name", "=", "'graph.pdf'", ",", "prog", "=", "'dot'", ")", ":", "self", ".", "graph", ".", "draw", "(", "file_name", ",", "prog", "=", "prog", ")" ]
Draw the graph and save as an image or pdf file. Parameters ---------- file_name : Optional[str] The name of the file to save the graph as. Default: graph.pdf prog : Optional[str] The graphviz program to use for graph layout. Default: dot
[ "Draw", "the", "graph", "and", "save", "as", "an", "image", "or", "pdf", "file", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/graph/assembler.py#L170-L180
train
sorgerlab/indra
indra/assemblers/graph/assembler.py
GraphAssembler._add_edge
def _add_edge(self, source, target, **kwargs): """Add an edge to the graph.""" # Start with default edge properties edge_properties = self.edge_properties # Overwrite ones that are given in function call explicitly for k, v in kwargs.items(): edge_properties[k] = v self.graph.add_edge(source, target, **edge_properties)
python
def _add_edge(self, source, target, **kwargs): """Add an edge to the graph.""" # Start with default edge properties edge_properties = self.edge_properties # Overwrite ones that are given in function call explicitly for k, v in kwargs.items(): edge_properties[k] = v self.graph.add_edge(source, target, **edge_properties)
[ "def", "_add_edge", "(", "self", ",", "source", ",", "target", ",", "*", "*", "kwargs", ")", ":", "# Start with default edge properties", "edge_properties", "=", "self", ".", "edge_properties", "# Overwrite ones that are given in function call explicitly", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "edge_properties", "[", "k", "]", "=", "v", "self", ".", "graph", ".", "add_edge", "(", "source", ",", "target", ",", "*", "*", "edge_properties", ")" ]
Add an edge to the graph.
[ "Add", "an", "edge", "to", "the", "graph", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/graph/assembler.py#L182-L189
train
sorgerlab/indra
indra/assemblers/graph/assembler.py
GraphAssembler._add_node
def _add_node(self, agent): """Add an Agent as a node to the graph.""" if agent is None: return node_label = _get_node_label(agent) if isinstance(agent, Agent) and agent.bound_conditions: bound_agents = [bc.agent for bc in agent.bound_conditions if bc.is_bound] if bound_agents: bound_names = [_get_node_label(a) for a in bound_agents] node_label = _get_node_label(agent) + '/' + \ '/'.join(bound_names) self._complex_nodes.append([agent] + bound_agents) else: node_label = _get_node_label(agent) node_key = _get_node_key(agent) if node_key in self.existing_nodes: return self.existing_nodes.append(node_key) self.graph.add_node(node_key, label=node_label, **self.node_properties)
python
def _add_node(self, agent): """Add an Agent as a node to the graph.""" if agent is None: return node_label = _get_node_label(agent) if isinstance(agent, Agent) and agent.bound_conditions: bound_agents = [bc.agent for bc in agent.bound_conditions if bc.is_bound] if bound_agents: bound_names = [_get_node_label(a) for a in bound_agents] node_label = _get_node_label(agent) + '/' + \ '/'.join(bound_names) self._complex_nodes.append([agent] + bound_agents) else: node_label = _get_node_label(agent) node_key = _get_node_key(agent) if node_key in self.existing_nodes: return self.existing_nodes.append(node_key) self.graph.add_node(node_key, label=node_label, **self.node_properties)
[ "def", "_add_node", "(", "self", ",", "agent", ")", ":", "if", "agent", "is", "None", ":", "return", "node_label", "=", "_get_node_label", "(", "agent", ")", "if", "isinstance", "(", "agent", ",", "Agent", ")", "and", "agent", ".", "bound_conditions", ":", "bound_agents", "=", "[", "bc", ".", "agent", "for", "bc", "in", "agent", ".", "bound_conditions", "if", "bc", ".", "is_bound", "]", "if", "bound_agents", ":", "bound_names", "=", "[", "_get_node_label", "(", "a", ")", "for", "a", "in", "bound_agents", "]", "node_label", "=", "_get_node_label", "(", "agent", ")", "+", "'/'", "+", "'/'", ".", "join", "(", "bound_names", ")", "self", ".", "_complex_nodes", ".", "append", "(", "[", "agent", "]", "+", "bound_agents", ")", "else", ":", "node_label", "=", "_get_node_label", "(", "agent", ")", "node_key", "=", "_get_node_key", "(", "agent", ")", "if", "node_key", "in", "self", ".", "existing_nodes", ":", "return", "self", ".", "existing_nodes", ".", "append", "(", "node_key", ")", "self", ".", "graph", ".", "add_node", "(", "node_key", ",", "label", "=", "node_label", ",", "*", "*", "self", ".", "node_properties", ")" ]
Add an Agent as a node to the graph.
[ "Add", "an", "Agent", "as", "a", "node", "to", "the", "graph", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/graph/assembler.py#L191-L212
train
sorgerlab/indra
indra/assemblers/graph/assembler.py
GraphAssembler._add_stmt_edge
def _add_stmt_edge(self, stmt): """Assemble a Modification statement.""" # Skip statements with None in the subject position source = _get_node_key(stmt.agent_list()[0]) target = _get_node_key(stmt.agent_list()[1]) edge_key = (source, target, stmt.__class__.__name__) if edge_key in self.existing_edges: return self.existing_edges.append(edge_key) if isinstance(stmt, RemoveModification) or \ isinstance(stmt, Inhibition) or \ isinstance(stmt, DecreaseAmount) or \ isinstance(stmt, Gap) or \ (isinstance(stmt, Influence) and stmt.overall_polarity() == -1): color = '#ff0000' else: color = '#000000' params = {'color': color, 'arrowhead': 'normal', 'dir': 'forward'} self._add_edge(source, target, **params)
python
def _add_stmt_edge(self, stmt): """Assemble a Modification statement.""" # Skip statements with None in the subject position source = _get_node_key(stmt.agent_list()[0]) target = _get_node_key(stmt.agent_list()[1]) edge_key = (source, target, stmt.__class__.__name__) if edge_key in self.existing_edges: return self.existing_edges.append(edge_key) if isinstance(stmt, RemoveModification) or \ isinstance(stmt, Inhibition) or \ isinstance(stmt, DecreaseAmount) or \ isinstance(stmt, Gap) or \ (isinstance(stmt, Influence) and stmt.overall_polarity() == -1): color = '#ff0000' else: color = '#000000' params = {'color': color, 'arrowhead': 'normal', 'dir': 'forward'} self._add_edge(source, target, **params)
[ "def", "_add_stmt_edge", "(", "self", ",", "stmt", ")", ":", "# Skip statements with None in the subject position", "source", "=", "_get_node_key", "(", "stmt", ".", "agent_list", "(", ")", "[", "0", "]", ")", "target", "=", "_get_node_key", "(", "stmt", ".", "agent_list", "(", ")", "[", "1", "]", ")", "edge_key", "=", "(", "source", ",", "target", ",", "stmt", ".", "__class__", ".", "__name__", ")", "if", "edge_key", "in", "self", ".", "existing_edges", ":", "return", "self", ".", "existing_edges", ".", "append", "(", "edge_key", ")", "if", "isinstance", "(", "stmt", ",", "RemoveModification", ")", "or", "isinstance", "(", "stmt", ",", "Inhibition", ")", "or", "isinstance", "(", "stmt", ",", "DecreaseAmount", ")", "or", "isinstance", "(", "stmt", ",", "Gap", ")", "or", "(", "isinstance", "(", "stmt", ",", "Influence", ")", "and", "stmt", ".", "overall_polarity", "(", ")", "==", "-", "1", ")", ":", "color", "=", "'#ff0000'", "else", ":", "color", "=", "'#000000'", "params", "=", "{", "'color'", ":", "color", ",", "'arrowhead'", ":", "'normal'", ",", "'dir'", ":", "'forward'", "}", "self", ".", "_add_edge", "(", "source", ",", "target", ",", "*", "*", "params", ")" ]
Assemble a Modification statement.
[ "Assemble", "a", "Modification", "statement", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/graph/assembler.py#L214-L234
train
sorgerlab/indra
indra/assemblers/graph/assembler.py
GraphAssembler._add_complex
def _add_complex(self, members, is_association=False): """Assemble a Complex statement.""" params = {'color': '#0000ff', 'arrowhead': 'dot', 'arrowtail': 'dot', 'dir': 'both'} for m1, m2 in itertools.combinations(members, 2): if self._has_complex_node(m1, m2): continue if is_association: m1_key = _get_node_key(m1.concept) m2_key = _get_node_key(m2.concept) else: m1_key = _get_node_key(m1) m2_key = _get_node_key(m2) edge_key = (set([m1_key, m2_key]), 'complex') if edge_key in self.existing_edges: return self.existing_edges.append(edge_key) self._add_edge(m1_key, m2_key, **params)
python
def _add_complex(self, members, is_association=False): """Assemble a Complex statement.""" params = {'color': '#0000ff', 'arrowhead': 'dot', 'arrowtail': 'dot', 'dir': 'both'} for m1, m2 in itertools.combinations(members, 2): if self._has_complex_node(m1, m2): continue if is_association: m1_key = _get_node_key(m1.concept) m2_key = _get_node_key(m2.concept) else: m1_key = _get_node_key(m1) m2_key = _get_node_key(m2) edge_key = (set([m1_key, m2_key]), 'complex') if edge_key in self.existing_edges: return self.existing_edges.append(edge_key) self._add_edge(m1_key, m2_key, **params)
[ "def", "_add_complex", "(", "self", ",", "members", ",", "is_association", "=", "False", ")", ":", "params", "=", "{", "'color'", ":", "'#0000ff'", ",", "'arrowhead'", ":", "'dot'", ",", "'arrowtail'", ":", "'dot'", ",", "'dir'", ":", "'both'", "}", "for", "m1", ",", "m2", "in", "itertools", ".", "combinations", "(", "members", ",", "2", ")", ":", "if", "self", ".", "_has_complex_node", "(", "m1", ",", "m2", ")", ":", "continue", "if", "is_association", ":", "m1_key", "=", "_get_node_key", "(", "m1", ".", "concept", ")", "m2_key", "=", "_get_node_key", "(", "m2", ".", "concept", ")", "else", ":", "m1_key", "=", "_get_node_key", "(", "m1", ")", "m2_key", "=", "_get_node_key", "(", "m2", ")", "edge_key", "=", "(", "set", "(", "[", "m1_key", ",", "m2_key", "]", ")", ",", "'complex'", ")", "if", "edge_key", "in", "self", ".", "existing_edges", ":", "return", "self", ".", "existing_edges", ".", "append", "(", "edge_key", ")", "self", ".", "_add_edge", "(", "m1_key", ",", "m2_key", ",", "*", "*", "params", ")" ]
Assemble a Complex statement.
[ "Assemble", "a", "Complex", "statement", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/graph/assembler.py#L236-L255
train
sorgerlab/indra
indra/sources/signor/api.py
process_from_file
def process_from_file(signor_data_file, signor_complexes_file=None): """Process Signor interaction data from CSV files. Parameters ---------- signor_data_file : str Path to the Signor interaction data file in CSV format. signor_complexes_file : str Path to the Signor complexes data in CSV format. If unspecified, Signor complexes will not be expanded to their constitutents. Returns ------- indra.sources.signor.SignorProcessor SignorProcessor containing Statements extracted from the Signor data. """ # Get generator over the CSV file data_iter = read_unicode_csv(signor_data_file, delimiter=';', skiprows=1) complexes_iter = None if signor_complexes_file: complexes_iter = read_unicode_csv(signor_complexes_file, delimiter=';', skiprows=1) else: logger.warning('Signor complex mapping file not provided, Statements ' 'involving complexes will not be expanded to members.') return _processor_from_data(data_iter, complexes_iter)
python
def process_from_file(signor_data_file, signor_complexes_file=None): """Process Signor interaction data from CSV files. Parameters ---------- signor_data_file : str Path to the Signor interaction data file in CSV format. signor_complexes_file : str Path to the Signor complexes data in CSV format. If unspecified, Signor complexes will not be expanded to their constitutents. Returns ------- indra.sources.signor.SignorProcessor SignorProcessor containing Statements extracted from the Signor data. """ # Get generator over the CSV file data_iter = read_unicode_csv(signor_data_file, delimiter=';', skiprows=1) complexes_iter = None if signor_complexes_file: complexes_iter = read_unicode_csv(signor_complexes_file, delimiter=';', skiprows=1) else: logger.warning('Signor complex mapping file not provided, Statements ' 'involving complexes will not be expanded to members.') return _processor_from_data(data_iter, complexes_iter)
[ "def", "process_from_file", "(", "signor_data_file", ",", "signor_complexes_file", "=", "None", ")", ":", "# Get generator over the CSV file", "data_iter", "=", "read_unicode_csv", "(", "signor_data_file", ",", "delimiter", "=", "';'", ",", "skiprows", "=", "1", ")", "complexes_iter", "=", "None", "if", "signor_complexes_file", ":", "complexes_iter", "=", "read_unicode_csv", "(", "signor_complexes_file", ",", "delimiter", "=", "';'", ",", "skiprows", "=", "1", ")", "else", ":", "logger", ".", "warning", "(", "'Signor complex mapping file not provided, Statements '", "'involving complexes will not be expanded to members.'", ")", "return", "_processor_from_data", "(", "data_iter", ",", "complexes_iter", ")" ]
Process Signor interaction data from CSV files. Parameters ---------- signor_data_file : str Path to the Signor interaction data file in CSV format. signor_complexes_file : str Path to the Signor complexes data in CSV format. If unspecified, Signor complexes will not be expanded to their constitutents. Returns ------- indra.sources.signor.SignorProcessor SignorProcessor containing Statements extracted from the Signor data.
[ "Process", "Signor", "interaction", "data", "from", "CSV", "files", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/signor/api.py#L47-L72
train
sorgerlab/indra
indra/sources/signor/api.py
_handle_response
def _handle_response(res, delimiter): """Get an iterator over the CSV data from the response.""" if res.status_code == 200: # Python 2 -- csv.reader will need bytes if sys.version_info[0] < 3: csv_io = BytesIO(res.content) # Python 3 -- csv.reader needs str else: csv_io = StringIO(res.text) data_iter = read_unicode_csv_fileobj(csv_io, delimiter=delimiter, skiprows=1) else: raise Exception('Could not download Signor data.') return data_iter
python
def _handle_response(res, delimiter): """Get an iterator over the CSV data from the response.""" if res.status_code == 200: # Python 2 -- csv.reader will need bytes if sys.version_info[0] < 3: csv_io = BytesIO(res.content) # Python 3 -- csv.reader needs str else: csv_io = StringIO(res.text) data_iter = read_unicode_csv_fileobj(csv_io, delimiter=delimiter, skiprows=1) else: raise Exception('Could not download Signor data.') return data_iter
[ "def", "_handle_response", "(", "res", ",", "delimiter", ")", ":", "if", "res", ".", "status_code", "==", "200", ":", "# Python 2 -- csv.reader will need bytes", "if", "sys", ".", "version_info", "[", "0", "]", "<", "3", ":", "csv_io", "=", "BytesIO", "(", "res", ".", "content", ")", "# Python 3 -- csv.reader needs str", "else", ":", "csv_io", "=", "StringIO", "(", "res", ".", "text", ")", "data_iter", "=", "read_unicode_csv_fileobj", "(", "csv_io", ",", "delimiter", "=", "delimiter", ",", "skiprows", "=", "1", ")", "else", ":", "raise", "Exception", "(", "'Could not download Signor data.'", ")", "return", "data_iter" ]
Get an iterator over the CSV data from the response.
[ "Get", "an", "iterator", "over", "the", "CSV", "data", "from", "the", "response", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/signor/api.py#L89-L102
train
sorgerlab/indra
indra/databases/context_client.py
get_protein_expression
def get_protein_expression(gene_names, cell_types): """Return the protein expression levels of genes in cell types. Parameters ---------- gene_names : list HGNC gene symbols for which expression levels are queried. cell_types : list List of cell type names in which expression levels are queried. The cell type names follow the CCLE database conventions. Example: LOXIMVI_SKIN, BT20_BREAST Returns ------- res : dict[dict[float]] A dictionary keyed by cell line, which contains another dictionary that is keyed by gene name, with estimated protein amounts as values. """ A = 0.2438361 B = 3.0957627 mrna_amounts = cbio_client.get_ccle_mrna(gene_names, cell_types) protein_amounts = copy(mrna_amounts) for cell_type in cell_types: amounts = mrna_amounts.get(cell_type) if amounts is None: continue for gene_name, amount in amounts.items(): if amount is not None: protein_amount = 10**(A * amount + B) protein_amounts[cell_type][gene_name] = protein_amount return protein_amounts
python
def get_protein_expression(gene_names, cell_types): """Return the protein expression levels of genes in cell types. Parameters ---------- gene_names : list HGNC gene symbols for which expression levels are queried. cell_types : list List of cell type names in which expression levels are queried. The cell type names follow the CCLE database conventions. Example: LOXIMVI_SKIN, BT20_BREAST Returns ------- res : dict[dict[float]] A dictionary keyed by cell line, which contains another dictionary that is keyed by gene name, with estimated protein amounts as values. """ A = 0.2438361 B = 3.0957627 mrna_amounts = cbio_client.get_ccle_mrna(gene_names, cell_types) protein_amounts = copy(mrna_amounts) for cell_type in cell_types: amounts = mrna_amounts.get(cell_type) if amounts is None: continue for gene_name, amount in amounts.items(): if amount is not None: protein_amount = 10**(A * amount + B) protein_amounts[cell_type][gene_name] = protein_amount return protein_amounts
[ "def", "get_protein_expression", "(", "gene_names", ",", "cell_types", ")", ":", "A", "=", "0.2438361", "B", "=", "3.0957627", "mrna_amounts", "=", "cbio_client", ".", "get_ccle_mrna", "(", "gene_names", ",", "cell_types", ")", "protein_amounts", "=", "copy", "(", "mrna_amounts", ")", "for", "cell_type", "in", "cell_types", ":", "amounts", "=", "mrna_amounts", ".", "get", "(", "cell_type", ")", "if", "amounts", "is", "None", ":", "continue", "for", "gene_name", ",", "amount", "in", "amounts", ".", "items", "(", ")", ":", "if", "amount", "is", "not", "None", ":", "protein_amount", "=", "10", "**", "(", "A", "*", "amount", "+", "B", ")", "protein_amounts", "[", "cell_type", "]", "[", "gene_name", "]", "=", "protein_amount", "return", "protein_amounts" ]
Return the protein expression levels of genes in cell types. Parameters ---------- gene_names : list HGNC gene symbols for which expression levels are queried. cell_types : list List of cell type names in which expression levels are queried. The cell type names follow the CCLE database conventions. Example: LOXIMVI_SKIN, BT20_BREAST Returns ------- res : dict[dict[float]] A dictionary keyed by cell line, which contains another dictionary that is keyed by gene name, with estimated protein amounts as values.
[ "Return", "the", "protein", "expression", "levels", "of", "genes", "in", "cell", "types", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/context_client.py#L13-L44
train
sorgerlab/indra
indra/assemblers/cx/hub_layout.py
get_aspect
def get_aspect(cx, aspect_name): """Return an aspect given the name of the aspect""" if isinstance(cx, dict): return cx.get(aspect_name) for entry in cx: if list(entry.keys())[0] == aspect_name: return entry[aspect_name]
python
def get_aspect(cx, aspect_name): """Return an aspect given the name of the aspect""" if isinstance(cx, dict): return cx.get(aspect_name) for entry in cx: if list(entry.keys())[0] == aspect_name: return entry[aspect_name]
[ "def", "get_aspect", "(", "cx", ",", "aspect_name", ")", ":", "if", "isinstance", "(", "cx", ",", "dict", ")", ":", "return", "cx", ".", "get", "(", "aspect_name", ")", "for", "entry", "in", "cx", ":", "if", "list", "(", "entry", ".", "keys", "(", ")", ")", "[", "0", "]", "==", "aspect_name", ":", "return", "entry", "[", "aspect_name", "]" ]
Return an aspect given the name of the aspect
[ "Return", "an", "aspect", "given", "the", "name", "of", "the", "aspect" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cx/hub_layout.py#L13-L19
train
sorgerlab/indra
indra/assemblers/cx/hub_layout.py
classify_nodes
def classify_nodes(graph, hub): """Classify each node based on its type and relationship to the hub.""" node_stats = defaultdict(lambda: defaultdict(list)) for u, v, data in graph.edges(data=True): # This means the node is downstream of the hub if hub == u: h, o = u, v if data['i'] != 'Complex': node_stats[o]['up'].append(-1) else: node_stats[o]['up'].append(0) # This means the node is upstream of the hub elif hub == v: h, o = v, u if data['i'] != 'Complex': node_stats[o]['up'].append(1) else: node_stats[o]['up'].append(0) else: continue node_stats[o]['interaction'].append(edge_type_to_class(data['i'])) node_classes = {} for node_id, stats in node_stats.items(): up = max(set(stats['up']), key=stats['up'].count) # Special case: if up is not 0 then we should exclude complexes # from the edge_type states so that we don't end up with # (-1, complex, ...) or (1, complex, ...) as the node class interactions = [i for i in stats['interaction'] if not (up != 0 and i == 'complex')] edge_type = max(set(interactions), key=interactions.count) node_type = graph.nodes[node_id]['type'] node_classes[node_id] = (up, edge_type, node_type) return node_classes
python
def classify_nodes(graph, hub): """Classify each node based on its type and relationship to the hub.""" node_stats = defaultdict(lambda: defaultdict(list)) for u, v, data in graph.edges(data=True): # This means the node is downstream of the hub if hub == u: h, o = u, v if data['i'] != 'Complex': node_stats[o]['up'].append(-1) else: node_stats[o]['up'].append(0) # This means the node is upstream of the hub elif hub == v: h, o = v, u if data['i'] != 'Complex': node_stats[o]['up'].append(1) else: node_stats[o]['up'].append(0) else: continue node_stats[o]['interaction'].append(edge_type_to_class(data['i'])) node_classes = {} for node_id, stats in node_stats.items(): up = max(set(stats['up']), key=stats['up'].count) # Special case: if up is not 0 then we should exclude complexes # from the edge_type states so that we don't end up with # (-1, complex, ...) or (1, complex, ...) as the node class interactions = [i for i in stats['interaction'] if not (up != 0 and i == 'complex')] edge_type = max(set(interactions), key=interactions.count) node_type = graph.nodes[node_id]['type'] node_classes[node_id] = (up, edge_type, node_type) return node_classes
[ "def", "classify_nodes", "(", "graph", ",", "hub", ")", ":", "node_stats", "=", "defaultdict", "(", "lambda", ":", "defaultdict", "(", "list", ")", ")", "for", "u", ",", "v", ",", "data", "in", "graph", ".", "edges", "(", "data", "=", "True", ")", ":", "# This means the node is downstream of the hub", "if", "hub", "==", "u", ":", "h", ",", "o", "=", "u", ",", "v", "if", "data", "[", "'i'", "]", "!=", "'Complex'", ":", "node_stats", "[", "o", "]", "[", "'up'", "]", ".", "append", "(", "-", "1", ")", "else", ":", "node_stats", "[", "o", "]", "[", "'up'", "]", ".", "append", "(", "0", ")", "# This means the node is upstream of the hub", "elif", "hub", "==", "v", ":", "h", ",", "o", "=", "v", ",", "u", "if", "data", "[", "'i'", "]", "!=", "'Complex'", ":", "node_stats", "[", "o", "]", "[", "'up'", "]", ".", "append", "(", "1", ")", "else", ":", "node_stats", "[", "o", "]", "[", "'up'", "]", ".", "append", "(", "0", ")", "else", ":", "continue", "node_stats", "[", "o", "]", "[", "'interaction'", "]", ".", "append", "(", "edge_type_to_class", "(", "data", "[", "'i'", "]", ")", ")", "node_classes", "=", "{", "}", "for", "node_id", ",", "stats", "in", "node_stats", ".", "items", "(", ")", ":", "up", "=", "max", "(", "set", "(", "stats", "[", "'up'", "]", ")", ",", "key", "=", "stats", "[", "'up'", "]", ".", "count", ")", "# Special case: if up is not 0 then we should exclude complexes", "# from the edge_type states so that we don't end up with", "# (-1, complex, ...) or (1, complex, ...) as the node class", "interactions", "=", "[", "i", "for", "i", "in", "stats", "[", "'interaction'", "]", "if", "not", "(", "up", "!=", "0", "and", "i", "==", "'complex'", ")", "]", "edge_type", "=", "max", "(", "set", "(", "interactions", ")", ",", "key", "=", "interactions", ".", "count", ")", "node_type", "=", "graph", ".", "nodes", "[", "node_id", "]", "[", "'type'", "]", "node_classes", "[", "node_id", "]", "=", "(", "up", ",", "edge_type", ",", "node_type", ")", "return", "node_classes" ]
Classify each node based on its type and relationship to the hub.
[ "Classify", "each", "node", "based", "on", "its", "type", "and", "relationship", "to", "the", "hub", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cx/hub_layout.py#L34-L67
train
sorgerlab/indra
indra/assemblers/cx/hub_layout.py
get_attributes
def get_attributes(aspect, id): """Return the attributes pointing to a given ID in a given aspect.""" attributes = {} for entry in aspect: if entry['po'] == id: attributes[entry['n']] = entry['v'] return attributes
python
def get_attributes(aspect, id): """Return the attributes pointing to a given ID in a given aspect.""" attributes = {} for entry in aspect: if entry['po'] == id: attributes[entry['n']] = entry['v'] return attributes
[ "def", "get_attributes", "(", "aspect", ",", "id", ")", ":", "attributes", "=", "{", "}", "for", "entry", "in", "aspect", ":", "if", "entry", "[", "'po'", "]", "==", "id", ":", "attributes", "[", "entry", "[", "'n'", "]", "]", "=", "entry", "[", "'v'", "]", "return", "attributes" ]
Return the attributes pointing to a given ID in a given aspect.
[ "Return", "the", "attributes", "pointing", "to", "a", "given", "ID", "in", "a", "given", "aspect", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cx/hub_layout.py#L70-L76
train
sorgerlab/indra
indra/assemblers/cx/hub_layout.py
cx_to_networkx
def cx_to_networkx(cx): """Return a MultiDiGraph representation of a CX network.""" graph = networkx.MultiDiGraph() for node_entry in get_aspect(cx, 'nodes'): id = node_entry['@id'] attrs = get_attributes(get_aspect(cx, 'nodeAttributes'), id) attrs['n'] = node_entry['n'] graph.add_node(id, **attrs) for edge_entry in get_aspect(cx, 'edges'): id = edge_entry['@id'] attrs = get_attributes(get_aspect(cx, 'edgeAttributes'), id) attrs['i'] = edge_entry['i'] graph.add_edge(edge_entry['s'], edge_entry['t'], key=id, **attrs) return graph
python
def cx_to_networkx(cx): """Return a MultiDiGraph representation of a CX network.""" graph = networkx.MultiDiGraph() for node_entry in get_aspect(cx, 'nodes'): id = node_entry['@id'] attrs = get_attributes(get_aspect(cx, 'nodeAttributes'), id) attrs['n'] = node_entry['n'] graph.add_node(id, **attrs) for edge_entry in get_aspect(cx, 'edges'): id = edge_entry['@id'] attrs = get_attributes(get_aspect(cx, 'edgeAttributes'), id) attrs['i'] = edge_entry['i'] graph.add_edge(edge_entry['s'], edge_entry['t'], key=id, **attrs) return graph
[ "def", "cx_to_networkx", "(", "cx", ")", ":", "graph", "=", "networkx", ".", "MultiDiGraph", "(", ")", "for", "node_entry", "in", "get_aspect", "(", "cx", ",", "'nodes'", ")", ":", "id", "=", "node_entry", "[", "'@id'", "]", "attrs", "=", "get_attributes", "(", "get_aspect", "(", "cx", ",", "'nodeAttributes'", ")", ",", "id", ")", "attrs", "[", "'n'", "]", "=", "node_entry", "[", "'n'", "]", "graph", ".", "add_node", "(", "id", ",", "*", "*", "attrs", ")", "for", "edge_entry", "in", "get_aspect", "(", "cx", ",", "'edges'", ")", ":", "id", "=", "edge_entry", "[", "'@id'", "]", "attrs", "=", "get_attributes", "(", "get_aspect", "(", "cx", ",", "'edgeAttributes'", ")", ",", "id", ")", "attrs", "[", "'i'", "]", "=", "edge_entry", "[", "'i'", "]", "graph", ".", "add_edge", "(", "edge_entry", "[", "'s'", "]", ",", "edge_entry", "[", "'t'", "]", ",", "key", "=", "id", ",", "*", "*", "attrs", ")", "return", "graph" ]
Return a MultiDiGraph representation of a CX network.
[ "Return", "a", "MultiDiGraph", "representation", "of", "a", "CX", "network", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cx/hub_layout.py#L79-L92
train
sorgerlab/indra
indra/assemblers/cx/hub_layout.py
get_quadrant_from_class
def get_quadrant_from_class(node_class): """Return the ID of the segment of the plane corresponding to a class.""" up, edge_type, _ = node_class if up == 0: return 0 if random.random() < 0.5 else 7 mappings = {(-1, 'modification'): 1, (-1, 'amount'): 2, (-1, 'activity'): 3, (1, 'activity'): 4, (1, 'amount'): 5, (1, 'modification'): 6} return mappings[(up, edge_type)]
python
def get_quadrant_from_class(node_class): """Return the ID of the segment of the plane corresponding to a class.""" up, edge_type, _ = node_class if up == 0: return 0 if random.random() < 0.5 else 7 mappings = {(-1, 'modification'): 1, (-1, 'amount'): 2, (-1, 'activity'): 3, (1, 'activity'): 4, (1, 'amount'): 5, (1, 'modification'): 6} return mappings[(up, edge_type)]
[ "def", "get_quadrant_from_class", "(", "node_class", ")", ":", "up", ",", "edge_type", ",", "_", "=", "node_class", "if", "up", "==", "0", ":", "return", "0", "if", "random", ".", "random", "(", ")", "<", "0.5", "else", "7", "mappings", "=", "{", "(", "-", "1", ",", "'modification'", ")", ":", "1", ",", "(", "-", "1", ",", "'amount'", ")", ":", "2", ",", "(", "-", "1", ",", "'activity'", ")", ":", "3", ",", "(", "1", ",", "'activity'", ")", ":", "4", ",", "(", "1", ",", "'amount'", ")", ":", "5", ",", "(", "1", ",", "'modification'", ")", ":", "6", "}", "return", "mappings", "[", "(", "up", ",", "edge_type", ")", "]" ]
Return the ID of the segment of the plane corresponding to a class.
[ "Return", "the", "ID", "of", "the", "segment", "of", "the", "plane", "corresponding", "to", "a", "class", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cx/hub_layout.py#L95-L106
train
sorgerlab/indra
indra/assemblers/cx/hub_layout.py
get_coordinates
def get_coordinates(node_class): """Generate coordinates for a node in a given class.""" quadrant_size = (2 * math.pi / 8.0) quadrant = get_quadrant_from_class(node_class) begin_angle = quadrant_size * quadrant r = 200 + 800*random.random() alpha = begin_angle + random.random() * quadrant_size x = r * math.cos(alpha) y = r * math.sin(alpha) return x, y
python
def get_coordinates(node_class): """Generate coordinates for a node in a given class.""" quadrant_size = (2 * math.pi / 8.0) quadrant = get_quadrant_from_class(node_class) begin_angle = quadrant_size * quadrant r = 200 + 800*random.random() alpha = begin_angle + random.random() * quadrant_size x = r * math.cos(alpha) y = r * math.sin(alpha) return x, y
[ "def", "get_coordinates", "(", "node_class", ")", ":", "quadrant_size", "=", "(", "2", "*", "math", ".", "pi", "/", "8.0", ")", "quadrant", "=", "get_quadrant_from_class", "(", "node_class", ")", "begin_angle", "=", "quadrant_size", "*", "quadrant", "r", "=", "200", "+", "800", "*", "random", ".", "random", "(", ")", "alpha", "=", "begin_angle", "+", "random", ".", "random", "(", ")", "*", "quadrant_size", "x", "=", "r", "*", "math", ".", "cos", "(", "alpha", ")", "y", "=", "r", "*", "math", ".", "sin", "(", "alpha", ")", "return", "x", ",", "y" ]
Generate coordinates for a node in a given class.
[ "Generate", "coordinates", "for", "a", "node", "in", "a", "given", "class", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cx/hub_layout.py#L109-L118
train
sorgerlab/indra
indra/assemblers/cx/hub_layout.py
get_layout_aspect
def get_layout_aspect(hub, node_classes): """Get the full layout aspect with coordinates for each node.""" aspect = [{'node': hub, 'x': 0.0, 'y': 0.0}] for node, node_class in node_classes.items(): if node == hub: continue x, y = get_coordinates(node_class) aspect.append({'node': node, 'x': x, 'y': y}) return aspect
python
def get_layout_aspect(hub, node_classes): """Get the full layout aspect with coordinates for each node.""" aspect = [{'node': hub, 'x': 0.0, 'y': 0.0}] for node, node_class in node_classes.items(): if node == hub: continue x, y = get_coordinates(node_class) aspect.append({'node': node, 'x': x, 'y': y}) return aspect
[ "def", "get_layout_aspect", "(", "hub", ",", "node_classes", ")", ":", "aspect", "=", "[", "{", "'node'", ":", "hub", ",", "'x'", ":", "0.0", ",", "'y'", ":", "0.0", "}", "]", "for", "node", ",", "node_class", "in", "node_classes", ".", "items", "(", ")", ":", "if", "node", "==", "hub", ":", "continue", "x", ",", "y", "=", "get_coordinates", "(", "node_class", ")", "aspect", ".", "append", "(", "{", "'node'", ":", "node", ",", "'x'", ":", "x", ",", "'y'", ":", "y", "}", ")", "return", "aspect" ]
Get the full layout aspect with coordinates for each node.
[ "Get", "the", "full", "layout", "aspect", "with", "coordinates", "for", "each", "node", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cx/hub_layout.py#L121-L129
train
sorgerlab/indra
indra/assemblers/cx/hub_layout.py
get_node_by_name
def get_node_by_name(graph, name): """Return a node ID given its name.""" for id, attrs in graph.nodes(data=True): if attrs['n'] == name: return id
python
def get_node_by_name(graph, name): """Return a node ID given its name.""" for id, attrs in graph.nodes(data=True): if attrs['n'] == name: return id
[ "def", "get_node_by_name", "(", "graph", ",", "name", ")", ":", "for", "id", ",", "attrs", "in", "graph", ".", "nodes", "(", "data", "=", "True", ")", ":", "if", "attrs", "[", "'n'", "]", "==", "name", ":", "return", "id" ]
Return a node ID given its name.
[ "Return", "a", "node", "ID", "given", "its", "name", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cx/hub_layout.py#L132-L136
train
sorgerlab/indra
indra/assemblers/cx/hub_layout.py
add_semantic_hub_layout
def add_semantic_hub_layout(cx, hub): """Attach a layout aspect to a CX network given a hub node.""" graph = cx_to_networkx(cx) hub_node = get_node_by_name(graph, hub) node_classes = classify_nodes(graph, hub_node) layout_aspect = get_layout_aspect(hub_node, node_classes) cx['cartesianLayout'] = layout_aspect
python
def add_semantic_hub_layout(cx, hub): """Attach a layout aspect to a CX network given a hub node.""" graph = cx_to_networkx(cx) hub_node = get_node_by_name(graph, hub) node_classes = classify_nodes(graph, hub_node) layout_aspect = get_layout_aspect(hub_node, node_classes) cx['cartesianLayout'] = layout_aspect
[ "def", "add_semantic_hub_layout", "(", "cx", ",", "hub", ")", ":", "graph", "=", "cx_to_networkx", "(", "cx", ")", "hub_node", "=", "get_node_by_name", "(", "graph", ",", "hub", ")", "node_classes", "=", "classify_nodes", "(", "graph", ",", "hub_node", ")", "layout_aspect", "=", "get_layout_aspect", "(", "hub_node", ",", "node_classes", ")", "cx", "[", "'cartesianLayout'", "]", "=", "layout_aspect" ]
Attach a layout aspect to a CX network given a hub node.
[ "Attach", "a", "layout", "aspect", "to", "a", "CX", "network", "given", "a", "hub", "node", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cx/hub_layout.py#L139-L145
train
sorgerlab/indra
indra/literature/crossref_client.py
get_metadata
def get_metadata(doi): """Returns the metadata of an article given its DOI from CrossRef as a JSON dict""" url = crossref_url + 'works/' + doi res = requests.get(url) if res.status_code != 200: logger.info('Could not get CrossRef metadata for DOI %s, code %d' % (doi, res.status_code)) return None raw_message = res.json() metadata = raw_message.get('message') return metadata
python
def get_metadata(doi): """Returns the metadata of an article given its DOI from CrossRef as a JSON dict""" url = crossref_url + 'works/' + doi res = requests.get(url) if res.status_code != 200: logger.info('Could not get CrossRef metadata for DOI %s, code %d' % (doi, res.status_code)) return None raw_message = res.json() metadata = raw_message.get('message') return metadata
[ "def", "get_metadata", "(", "doi", ")", ":", "url", "=", "crossref_url", "+", "'works/'", "+", "doi", "res", "=", "requests", ".", "get", "(", "url", ")", "if", "res", ".", "status_code", "!=", "200", ":", "logger", ".", "info", "(", "'Could not get CrossRef metadata for DOI %s, code %d'", "%", "(", "doi", ",", "res", ".", "status_code", ")", ")", "return", "None", "raw_message", "=", "res", ".", "json", "(", ")", "metadata", "=", "raw_message", ".", "get", "(", "'message'", ")", "return", "metadata" ]
Returns the metadata of an article given its DOI from CrossRef as a JSON dict
[ "Returns", "the", "metadata", "of", "an", "article", "given", "its", "DOI", "from", "CrossRef", "as", "a", "JSON", "dict" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/crossref_client.py#L30-L41
train
sorgerlab/indra
indra/literature/crossref_client.py
doi_query
def doi_query(pmid, search_limit=10): """Get the DOI for a PMID by matching CrossRef and Pubmed metadata. Searches CrossRef using the article title and then accepts search hits only if they have a matching journal ISSN and page number with what is obtained from the Pubmed database. """ # Get article metadata from PubMed pubmed_meta_dict = pubmed_client.get_metadata_for_ids([pmid], get_issns_from_nlm=True) if pubmed_meta_dict is None or pubmed_meta_dict.get(pmid) is None: logger.warning('No metadata found in Pubmed for PMID%s' % pmid) return None # The test above ensures we've got this now pubmed_meta = pubmed_meta_dict[pmid] # Check if we already got a DOI from Pubmed itself! if pubmed_meta.get('doi'): return pubmed_meta.get('doi') # Check for the title, which we'll need for the CrossRef search pm_article_title = pubmed_meta.get('title') if pm_article_title is None: logger.warning('No article title found in Pubmed for PMID%s' % pmid) return None # Get the ISSN list pm_issn_list = pubmed_meta.get('issn_list') if not pm_issn_list: logger.warning('No ISSNs found in Pubmed for PMID%s' % pmid) return None # Get the page number pm_page = pubmed_meta.get('page') if not pm_page: logger.debug('No page number found in Pubmed for PMID%s' % pmid) return None # Now query CrossRef using the title we've got url = crossref_search_url params = {'q': pm_article_title, 'sort': 'score'} try: res = requests.get(crossref_search_url, params) except requests.exceptions.ConnectionError as e: logger.error('CrossRef service could not be reached.') logger.error(e) return None except Exception as e: logger.error('Error accessing CrossRef service: %s' % str(e)) return None if res.status_code != 200: logger.info('PMID%s: no search results from CrossRef, code %d' % (pmid, res.status_code)) return None raw_message = res.json() mapped_doi = None # Iterate over the search results, looking up XREF metadata for result_ix, result in enumerate(raw_message): if result_ix > search_limit: logger.info('PMID%s: No match found within first %s results, ' 'giving up!' % (pmid, search_limit)) break xref_doi_url = result['doi'] # Strip the URL prefix off of the DOI m = re.match('^http://dx.doi.org/(.*)$', xref_doi_url) xref_doi = m.groups()[0] # Get the XREF metadata using the DOI xref_meta = get_metadata(xref_doi) if xref_meta is None: continue xref_issn_list = xref_meta.get('ISSN') xref_page = xref_meta.get('page') # If there's no ISSN info for this article, skip to the next result if not xref_issn_list: logger.debug('No ISSN found for DOI %s, skipping' % xref_doi_url) continue # If there's no page info for this article, skip to the next result if not xref_page: logger.debug('No page number found for DOI %s, skipping' % xref_doi_url) continue # Now check for an ISSN match by looking for the set intersection # between the Pubmed ISSN list and the CrossRef ISSN list. matching_issns = set(pm_issn_list).intersection(set(xref_issn_list)) # Before comparing page numbers, regularize the page numbers a bit. # Note that we only compare the first page number, since frequently # the final page number will simply be missing in one of the data # sources. We also canonicalize page numbers of the form '14E' to # 'E14' (which is the format used by Pubmed). pm_start_page = pm_page.split('-')[0].upper() xr_start_page = xref_page.split('-')[0].upper() if xr_start_page.endswith('E'): xr_start_page = 'E' + xr_start_page[:-1] # Now compare the ISSN list and page numbers if matching_issns and pm_start_page == xr_start_page: # We found a match! mapped_doi = xref_doi break # Otherwise, keep looking through the results... # Return a DOI, or None if we didn't find one that met our matching # criteria return mapped_doi
python
def doi_query(pmid, search_limit=10): """Get the DOI for a PMID by matching CrossRef and Pubmed metadata. Searches CrossRef using the article title and then accepts search hits only if they have a matching journal ISSN and page number with what is obtained from the Pubmed database. """ # Get article metadata from PubMed pubmed_meta_dict = pubmed_client.get_metadata_for_ids([pmid], get_issns_from_nlm=True) if pubmed_meta_dict is None or pubmed_meta_dict.get(pmid) is None: logger.warning('No metadata found in Pubmed for PMID%s' % pmid) return None # The test above ensures we've got this now pubmed_meta = pubmed_meta_dict[pmid] # Check if we already got a DOI from Pubmed itself! if pubmed_meta.get('doi'): return pubmed_meta.get('doi') # Check for the title, which we'll need for the CrossRef search pm_article_title = pubmed_meta.get('title') if pm_article_title is None: logger.warning('No article title found in Pubmed for PMID%s' % pmid) return None # Get the ISSN list pm_issn_list = pubmed_meta.get('issn_list') if not pm_issn_list: logger.warning('No ISSNs found in Pubmed for PMID%s' % pmid) return None # Get the page number pm_page = pubmed_meta.get('page') if not pm_page: logger.debug('No page number found in Pubmed for PMID%s' % pmid) return None # Now query CrossRef using the title we've got url = crossref_search_url params = {'q': pm_article_title, 'sort': 'score'} try: res = requests.get(crossref_search_url, params) except requests.exceptions.ConnectionError as e: logger.error('CrossRef service could not be reached.') logger.error(e) return None except Exception as e: logger.error('Error accessing CrossRef service: %s' % str(e)) return None if res.status_code != 200: logger.info('PMID%s: no search results from CrossRef, code %d' % (pmid, res.status_code)) return None raw_message = res.json() mapped_doi = None # Iterate over the search results, looking up XREF metadata for result_ix, result in enumerate(raw_message): if result_ix > search_limit: logger.info('PMID%s: No match found within first %s results, ' 'giving up!' % (pmid, search_limit)) break xref_doi_url = result['doi'] # Strip the URL prefix off of the DOI m = re.match('^http://dx.doi.org/(.*)$', xref_doi_url) xref_doi = m.groups()[0] # Get the XREF metadata using the DOI xref_meta = get_metadata(xref_doi) if xref_meta is None: continue xref_issn_list = xref_meta.get('ISSN') xref_page = xref_meta.get('page') # If there's no ISSN info for this article, skip to the next result if not xref_issn_list: logger.debug('No ISSN found for DOI %s, skipping' % xref_doi_url) continue # If there's no page info for this article, skip to the next result if not xref_page: logger.debug('No page number found for DOI %s, skipping' % xref_doi_url) continue # Now check for an ISSN match by looking for the set intersection # between the Pubmed ISSN list and the CrossRef ISSN list. matching_issns = set(pm_issn_list).intersection(set(xref_issn_list)) # Before comparing page numbers, regularize the page numbers a bit. # Note that we only compare the first page number, since frequently # the final page number will simply be missing in one of the data # sources. We also canonicalize page numbers of the form '14E' to # 'E14' (which is the format used by Pubmed). pm_start_page = pm_page.split('-')[0].upper() xr_start_page = xref_page.split('-')[0].upper() if xr_start_page.endswith('E'): xr_start_page = 'E' + xr_start_page[:-1] # Now compare the ISSN list and page numbers if matching_issns and pm_start_page == xr_start_page: # We found a match! mapped_doi = xref_doi break # Otherwise, keep looking through the results... # Return a DOI, or None if we didn't find one that met our matching # criteria return mapped_doi
[ "def", "doi_query", "(", "pmid", ",", "search_limit", "=", "10", ")", ":", "# Get article metadata from PubMed", "pubmed_meta_dict", "=", "pubmed_client", ".", "get_metadata_for_ids", "(", "[", "pmid", "]", ",", "get_issns_from_nlm", "=", "True", ")", "if", "pubmed_meta_dict", "is", "None", "or", "pubmed_meta_dict", ".", "get", "(", "pmid", ")", "is", "None", ":", "logger", ".", "warning", "(", "'No metadata found in Pubmed for PMID%s'", "%", "pmid", ")", "return", "None", "# The test above ensures we've got this now", "pubmed_meta", "=", "pubmed_meta_dict", "[", "pmid", "]", "# Check if we already got a DOI from Pubmed itself!", "if", "pubmed_meta", ".", "get", "(", "'doi'", ")", ":", "return", "pubmed_meta", ".", "get", "(", "'doi'", ")", "# Check for the title, which we'll need for the CrossRef search", "pm_article_title", "=", "pubmed_meta", ".", "get", "(", "'title'", ")", "if", "pm_article_title", "is", "None", ":", "logger", ".", "warning", "(", "'No article title found in Pubmed for PMID%s'", "%", "pmid", ")", "return", "None", "# Get the ISSN list", "pm_issn_list", "=", "pubmed_meta", ".", "get", "(", "'issn_list'", ")", "if", "not", "pm_issn_list", ":", "logger", ".", "warning", "(", "'No ISSNs found in Pubmed for PMID%s'", "%", "pmid", ")", "return", "None", "# Get the page number", "pm_page", "=", "pubmed_meta", ".", "get", "(", "'page'", ")", "if", "not", "pm_page", ":", "logger", ".", "debug", "(", "'No page number found in Pubmed for PMID%s'", "%", "pmid", ")", "return", "None", "# Now query CrossRef using the title we've got", "url", "=", "crossref_search_url", "params", "=", "{", "'q'", ":", "pm_article_title", ",", "'sort'", ":", "'score'", "}", "try", ":", "res", "=", "requests", ".", "get", "(", "crossref_search_url", ",", "params", ")", "except", "requests", ".", "exceptions", ".", "ConnectionError", "as", "e", ":", "logger", ".", "error", "(", "'CrossRef service could not be reached.'", ")", "logger", ".", "error", "(", "e", ")", "return", "None", "except", "Exception", "as", "e", ":", "logger", ".", "error", "(", "'Error accessing CrossRef service: %s'", "%", "str", "(", "e", ")", ")", "return", "None", "if", "res", ".", "status_code", "!=", "200", ":", "logger", ".", "info", "(", "'PMID%s: no search results from CrossRef, code %d'", "%", "(", "pmid", ",", "res", ".", "status_code", ")", ")", "return", "None", "raw_message", "=", "res", ".", "json", "(", ")", "mapped_doi", "=", "None", "# Iterate over the search results, looking up XREF metadata", "for", "result_ix", ",", "result", "in", "enumerate", "(", "raw_message", ")", ":", "if", "result_ix", ">", "search_limit", ":", "logger", ".", "info", "(", "'PMID%s: No match found within first %s results, '", "'giving up!'", "%", "(", "pmid", ",", "search_limit", ")", ")", "break", "xref_doi_url", "=", "result", "[", "'doi'", "]", "# Strip the URL prefix off of the DOI", "m", "=", "re", ".", "match", "(", "'^http://dx.doi.org/(.*)$'", ",", "xref_doi_url", ")", "xref_doi", "=", "m", ".", "groups", "(", ")", "[", "0", "]", "# Get the XREF metadata using the DOI", "xref_meta", "=", "get_metadata", "(", "xref_doi", ")", "if", "xref_meta", "is", "None", ":", "continue", "xref_issn_list", "=", "xref_meta", ".", "get", "(", "'ISSN'", ")", "xref_page", "=", "xref_meta", ".", "get", "(", "'page'", ")", "# If there's no ISSN info for this article, skip to the next result", "if", "not", "xref_issn_list", ":", "logger", ".", "debug", "(", "'No ISSN found for DOI %s, skipping'", "%", "xref_doi_url", ")", "continue", "# If there's no page info for this article, skip to the next result", "if", "not", "xref_page", ":", "logger", ".", "debug", "(", "'No page number found for DOI %s, skipping'", "%", "xref_doi_url", ")", "continue", "# Now check for an ISSN match by looking for the set intersection", "# between the Pubmed ISSN list and the CrossRef ISSN list.", "matching_issns", "=", "set", "(", "pm_issn_list", ")", ".", "intersection", "(", "set", "(", "xref_issn_list", ")", ")", "# Before comparing page numbers, regularize the page numbers a bit.", "# Note that we only compare the first page number, since frequently", "# the final page number will simply be missing in one of the data", "# sources. We also canonicalize page numbers of the form '14E' to", "# 'E14' (which is the format used by Pubmed).", "pm_start_page", "=", "pm_page", ".", "split", "(", "'-'", ")", "[", "0", "]", ".", "upper", "(", ")", "xr_start_page", "=", "xref_page", ".", "split", "(", "'-'", ")", "[", "0", "]", ".", "upper", "(", ")", "if", "xr_start_page", ".", "endswith", "(", "'E'", ")", ":", "xr_start_page", "=", "'E'", "+", "xr_start_page", "[", ":", "-", "1", "]", "# Now compare the ISSN list and page numbers", "if", "matching_issns", "and", "pm_start_page", "==", "xr_start_page", ":", "# We found a match!", "mapped_doi", "=", "xref_doi", "break", "# Otherwise, keep looking through the results...", "# Return a DOI, or None if we didn't find one that met our matching", "# criteria", "return", "mapped_doi" ]
Get the DOI for a PMID by matching CrossRef and Pubmed metadata. Searches CrossRef using the article title and then accepts search hits only if they have a matching journal ISSN and page number with what is obtained from the Pubmed database.
[ "Get", "the", "DOI", "for", "a", "PMID", "by", "matching", "CrossRef", "and", "Pubmed", "metadata", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/crossref_client.py#L81-L177
train
sorgerlab/indra
indra/assemblers/pysb/assembler.py
get_agent_rule_str
def get_agent_rule_str(agent): """Construct a string from an Agent as part of a PySB rule name.""" rule_str_list = [_n(agent.name)] # If it's a molecular agent if isinstance(agent, ist.Agent): for mod in agent.mods: mstr = abbrevs[mod.mod_type] if mod.residue is not None: mstr += mod.residue if mod.position is not None: mstr += mod.position rule_str_list.append('%s' % mstr) for mut in agent.mutations: res_from = mut.residue_from if mut.residue_from else 'mut' res_to = mut.residue_to if mut.residue_to else 'X' if mut.position is None: mut_site_name = res_from else: mut_site_name = res_from + mut.position mstr = mut_site_name + res_to rule_str_list.append(mstr) if agent.bound_conditions: for b in agent.bound_conditions: if b.is_bound: rule_str_list.append(_n(b.agent.name)) else: rule_str_list.append('n' + _n(b.agent.name)) if agent.location is not None: rule_str_list.append(_n(agent.location)) if agent.activity is not None: if agent.activity.is_active: rule_str_list.append(agent.activity.activity_type[:3]) else: rule_str_list.append(agent.activity.activity_type[:3] + '_inact') rule_str = '_'.join(rule_str_list) return rule_str
python
def get_agent_rule_str(agent): """Construct a string from an Agent as part of a PySB rule name.""" rule_str_list = [_n(agent.name)] # If it's a molecular agent if isinstance(agent, ist.Agent): for mod in agent.mods: mstr = abbrevs[mod.mod_type] if mod.residue is not None: mstr += mod.residue if mod.position is not None: mstr += mod.position rule_str_list.append('%s' % mstr) for mut in agent.mutations: res_from = mut.residue_from if mut.residue_from else 'mut' res_to = mut.residue_to if mut.residue_to else 'X' if mut.position is None: mut_site_name = res_from else: mut_site_name = res_from + mut.position mstr = mut_site_name + res_to rule_str_list.append(mstr) if agent.bound_conditions: for b in agent.bound_conditions: if b.is_bound: rule_str_list.append(_n(b.agent.name)) else: rule_str_list.append('n' + _n(b.agent.name)) if agent.location is not None: rule_str_list.append(_n(agent.location)) if agent.activity is not None: if agent.activity.is_active: rule_str_list.append(agent.activity.activity_type[:3]) else: rule_str_list.append(agent.activity.activity_type[:3] + '_inact') rule_str = '_'.join(rule_str_list) return rule_str
[ "def", "get_agent_rule_str", "(", "agent", ")", ":", "rule_str_list", "=", "[", "_n", "(", "agent", ".", "name", ")", "]", "# If it's a molecular agent", "if", "isinstance", "(", "agent", ",", "ist", ".", "Agent", ")", ":", "for", "mod", "in", "agent", ".", "mods", ":", "mstr", "=", "abbrevs", "[", "mod", ".", "mod_type", "]", "if", "mod", ".", "residue", "is", "not", "None", ":", "mstr", "+=", "mod", ".", "residue", "if", "mod", ".", "position", "is", "not", "None", ":", "mstr", "+=", "mod", ".", "position", "rule_str_list", ".", "append", "(", "'%s'", "%", "mstr", ")", "for", "mut", "in", "agent", ".", "mutations", ":", "res_from", "=", "mut", ".", "residue_from", "if", "mut", ".", "residue_from", "else", "'mut'", "res_to", "=", "mut", ".", "residue_to", "if", "mut", ".", "residue_to", "else", "'X'", "if", "mut", ".", "position", "is", "None", ":", "mut_site_name", "=", "res_from", "else", ":", "mut_site_name", "=", "res_from", "+", "mut", ".", "position", "mstr", "=", "mut_site_name", "+", "res_to", "rule_str_list", ".", "append", "(", "mstr", ")", "if", "agent", ".", "bound_conditions", ":", "for", "b", "in", "agent", ".", "bound_conditions", ":", "if", "b", ".", "is_bound", ":", "rule_str_list", ".", "append", "(", "_n", "(", "b", ".", "agent", ".", "name", ")", ")", "else", ":", "rule_str_list", ".", "append", "(", "'n'", "+", "_n", "(", "b", ".", "agent", ".", "name", ")", ")", "if", "agent", ".", "location", "is", "not", "None", ":", "rule_str_list", ".", "append", "(", "_n", "(", "agent", ".", "location", ")", ")", "if", "agent", ".", "activity", "is", "not", "None", ":", "if", "agent", ".", "activity", ".", "is_active", ":", "rule_str_list", ".", "append", "(", "agent", ".", "activity", ".", "activity_type", "[", ":", "3", "]", ")", "else", ":", "rule_str_list", ".", "append", "(", "agent", ".", "activity", ".", "activity_type", "[", ":", "3", "]", "+", "'_inact'", ")", "rule_str", "=", "'_'", ".", "join", "(", "rule_str_list", ")", "return", "rule_str" ]
Construct a string from an Agent as part of a PySB rule name.
[ "Construct", "a", "string", "from", "an", "Agent", "as", "part", "of", "a", "PySB", "rule", "name", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/assembler.py#L55-L90
train
sorgerlab/indra
indra/assemblers/pysb/assembler.py
add_rule_to_model
def add_rule_to_model(model, rule, annotations=None): """Add a Rule to a PySB model and handle duplicate component errors.""" try: model.add_component(rule) # If the rule was actually added, also add the annotations if annotations: model.annotations += annotations # If this rule is already in the model, issue a warning and continue except ComponentDuplicateNameError: msg = "Rule %s already in model! Skipping." % rule.name logger.debug(msg)
python
def add_rule_to_model(model, rule, annotations=None): """Add a Rule to a PySB model and handle duplicate component errors.""" try: model.add_component(rule) # If the rule was actually added, also add the annotations if annotations: model.annotations += annotations # If this rule is already in the model, issue a warning and continue except ComponentDuplicateNameError: msg = "Rule %s already in model! Skipping." % rule.name logger.debug(msg)
[ "def", "add_rule_to_model", "(", "model", ",", "rule", ",", "annotations", "=", "None", ")", ":", "try", ":", "model", ".", "add_component", "(", "rule", ")", "# If the rule was actually added, also add the annotations", "if", "annotations", ":", "model", ".", "annotations", "+=", "annotations", "# If this rule is already in the model, issue a warning and continue", "except", "ComponentDuplicateNameError", ":", "msg", "=", "\"Rule %s already in model! Skipping.\"", "%", "rule", ".", "name", "logger", ".", "debug", "(", "msg", ")" ]
Add a Rule to a PySB model and handle duplicate component errors.
[ "Add", "a", "Rule", "to", "a", "PySB", "model", "and", "handle", "duplicate", "component", "errors", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/assembler.py#L93-L103
train
sorgerlab/indra
indra/assemblers/pysb/assembler.py
get_create_parameter
def get_create_parameter(model, param): """Return parameter with given name, creating it if needed. If unique is false and the parameter exists, the value is not changed; if it does not exist, it will be created. If unique is true then upon conflict a number is added to the end of the parameter name. Parameters ---------- model : pysb.Model The model to add the parameter to param : Param An assembly parameter object """ norm_name = _n(param.name) parameter = model.parameters.get(norm_name) if not param.unique and parameter is not None: return parameter if param.unique: pnum = 1 while True: pname = norm_name + '_%d' % pnum if model.parameters.get(pname) is None: break pnum += 1 else: pname = norm_name parameter = Parameter(pname, param.value) model.add_component(parameter) return parameter
python
def get_create_parameter(model, param): """Return parameter with given name, creating it if needed. If unique is false and the parameter exists, the value is not changed; if it does not exist, it will be created. If unique is true then upon conflict a number is added to the end of the parameter name. Parameters ---------- model : pysb.Model The model to add the parameter to param : Param An assembly parameter object """ norm_name = _n(param.name) parameter = model.parameters.get(norm_name) if not param.unique and parameter is not None: return parameter if param.unique: pnum = 1 while True: pname = norm_name + '_%d' % pnum if model.parameters.get(pname) is None: break pnum += 1 else: pname = norm_name parameter = Parameter(pname, param.value) model.add_component(parameter) return parameter
[ "def", "get_create_parameter", "(", "model", ",", "param", ")", ":", "norm_name", "=", "_n", "(", "param", ".", "name", ")", "parameter", "=", "model", ".", "parameters", ".", "get", "(", "norm_name", ")", "if", "not", "param", ".", "unique", "and", "parameter", "is", "not", "None", ":", "return", "parameter", "if", "param", ".", "unique", ":", "pnum", "=", "1", "while", "True", ":", "pname", "=", "norm_name", "+", "'_%d'", "%", "pnum", "if", "model", ".", "parameters", ".", "get", "(", "pname", ")", "is", "None", ":", "break", "pnum", "+=", "1", "else", ":", "pname", "=", "norm_name", "parameter", "=", "Parameter", "(", "pname", ",", "param", ".", "value", ")", "model", ".", "add_component", "(", "parameter", ")", "return", "parameter" ]
Return parameter with given name, creating it if needed. If unique is false and the parameter exists, the value is not changed; if it does not exist, it will be created. If unique is true then upon conflict a number is added to the end of the parameter name. Parameters ---------- model : pysb.Model The model to add the parameter to param : Param An assembly parameter object
[ "Return", "parameter", "with", "given", "name", "creating", "it", "if", "needed", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/assembler.py#L106-L138
train
sorgerlab/indra
indra/assemblers/pysb/assembler.py
get_uncond_agent
def get_uncond_agent(agent): """Construct the unconditional state of an Agent. The unconditional Agent is a copy of the original agent but without any bound conditions and modification conditions. Mutation conditions, however, are preserved since they are static. """ agent_uncond = ist.Agent(_n(agent.name), mutations=agent.mutations) return agent_uncond
python
def get_uncond_agent(agent): """Construct the unconditional state of an Agent. The unconditional Agent is a copy of the original agent but without any bound conditions and modification conditions. Mutation conditions, however, are preserved since they are static. """ agent_uncond = ist.Agent(_n(agent.name), mutations=agent.mutations) return agent_uncond
[ "def", "get_uncond_agent", "(", "agent", ")", ":", "agent_uncond", "=", "ist", ".", "Agent", "(", "_n", "(", "agent", ".", "name", ")", ",", "mutations", "=", "agent", ".", "mutations", ")", "return", "agent_uncond" ]
Construct the unconditional state of an Agent. The unconditional Agent is a copy of the original agent but without any bound conditions and modification conditions. Mutation conditions, however, are preserved since they are static.
[ "Construct", "the", "unconditional", "state", "of", "an", "Agent", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/assembler.py#L141-L149
train