Code
stringlengths
103
85.9k
Summary
listlengths
0
94
Please provide a description of the function:def make_stmt(stmt_cls, tf_agent, target_agent, pmid): ev = Evidence(source_api='trrust', pmid=pmid) return stmt_cls(deepcopy(tf_agent), deepcopy(target_agent), evidence=[ev])
[ "Return a Statement based on its type, agents, and PMID." ]
Please provide a description of the function:def get_grounded_agent(gene_name): db_refs = {'TEXT': gene_name} if gene_name in hgnc_map: gene_name = hgnc_map[gene_name] hgnc_id = hgnc_client.get_hgnc_id(gene_name) if hgnc_id: db_refs['HGNC'] = hgnc_id up_id = hgnc_client.get_uniprot_id(hgnc_id) if up_id: db_refs['UP'] = up_id agent = Agent(gene_name, db_refs=db_refs) return agent
[ "Return a grounded Agent based on an HGNC symbol." ]
Please provide a description of the function:def extract_statements(self): for _, (tf, target, effect, refs) in self.df.iterrows(): tf_agent = get_grounded_agent(tf) target_agent = get_grounded_agent(target) if effect == 'Activation': stmt_cls = IncreaseAmount elif effect == 'Repression': stmt_cls = DecreaseAmount else: continue pmids = refs.split(';') for pmid in pmids: stmt = make_stmt(stmt_cls, tf_agent, target_agent, pmid) self.statements.append(stmt)
[ "Process the table to extract Statements." ]
Please provide a description of the function:def process_paper(model_name, pmid): json_directory = os.path.join(model_name, 'jsons') json_path = os.path.join(json_directory, 'PMID%s.json' % pmid) if pmid.startswith('api') or pmid.startswith('PMID'): logger.warning('Invalid PMID: %s' % pmid) # If the paper has been read, use the json output file if os.path.exists(json_path): rp = reach.process_json_file(json_path, citation=pmid) txt_format = 'existing_json' # If the paper has not been read, download the text and read else: try: txt, txt_format = get_full_text(pmid, 'pmid') except Exception: return None, None if txt_format == 'pmc_oa_xml': rp = reach.process_nxml_str(txt, citation=pmid, offline=True, output_fname=json_path) elif txt_format == 'elsevier_xml': # Extract the raw text from the Elsevier XML txt = elsevier_client.extract_text(txt) rp = reach.process_text(txt, citation=pmid, offline=True, output_fname=json_path) elif txt_format == 'abstract': rp = reach.process_text(txt, citation=pmid, offline=True, output_fname=json_path) else: rp = None if rp is not None: check_pmids(rp.statements) return rp, txt_format
[ "Process a paper with the given pubmed identifier\n\n Parameters\n ----------\n model_name : str\n The directory for the INDRA machine\n pmid : str\n The PMID to process.\n\n Returns\n -------\n rp : ReachProcessor\n A ReachProcessor containing the extracted INDRA Statements\n in rp.statements.\n txt_format : str\n A string representing the format of the text\n " ]
Please provide a description of the function:def process_paper_helper(model_name, pmid, start_time_local): try: if not aws_available: rp, txt_format = process_paper(model_name, pmid) else: rp, txt_format = process_paper_aws(pmid, start_time_local) except: logger.exception('uncaught exception while processing %s', pmid) return None, None return rp, txt_format
[ "Wraps processing a paper by either a local or remote service\n and caches any uncaught exceptions" ]
Please provide a description of the function:def run_with_search_helper(model_path, config, num_days=None): logger.info('-------------------------') logger.info(time.strftime('%c')) if not os.path.isdir(model_path): logger.error('%s is not a directory', model_path) sys.exit() default_config_fname = os.path.join(model_path, 'config.yaml') if config: config = get_machine_config(config) elif os.path.exists(default_config_fname): logger.info('Loading default configuration from %s', default_config_fname) config = get_machine_config(default_config_fname) else: logger.error('Configuration file argument missing.') sys.exit() # Probability cutoff for filtering statements default_belief_threshold = 0.95 belief_threshold = config.get('belief_threshold') if belief_threshold is None: belief_threshold = default_belief_threshold msg = 'Belief threshold argument (belief_threshold) not specified.' + \ ' Using default belief threshold %.2f' % default_belief_threshold logger.info(msg) else: logger.info('Using belief threshold: %.2f' % belief_threshold) twitter_cred = get_twitter_cred(config) if twitter_cred: logger.info('Using Twitter with given credentials.') else: logger.info('Not using Twitter due to missing credentials.') gmail_cred = get_gmail_cred(config) if gmail_cred: logger.info('Using Gmail with given credentials.') else: logger.info('Not using Gmail due to missing credentials.') ndex_cred = get_ndex_cred(config) if ndex_cred: logger.info('Using NDEx with given credentials.') else: logger.info('Not using NDEx due to missing information.') pmids = {} # Get email PMIDs if gmail_cred: logger.info('Getting PMIDs from emails.') try: email_pmids = get_email_pmids(gmail_cred) # Put the email_pmids into the pmids dictionary pmids['Gmail'] = email_pmids logger.info('Collected %d PMIDs from Gmail', len(email_pmids)) except Exception: logger.exception('Could not get PMIDs from Gmail, continuing.') # Get PMIDs for general search_terms and genes search_genes = config.get('search_genes') search_terms = config.get('search_terms') if not search_terms: logger.info('No search terms argument (search_terms) specified.') else: if search_genes is not None: search_terms += search_genes logger.info('Using search terms: %s' % ', '.join(search_terms)) if num_days is None: num_days = int(config.get('search_terms_num_days', 5)) logger.info('Searching the last %d days', num_days) pmids_term = get_searchterm_pmids(search_terms, num_days=num_days) num_pmids = len(set(itt.chain.from_iterable(pmids_term.values()))) logger.info('Collected %d PMIDs from PubMed search_terms.', num_pmids) pmids = _extend_dict(pmids, pmids_term) # Get optional grounding map gm_path = config.get('grounding_map_path') if gm_path: try: from indra.preassembler.grounding_mapper import load_grounding_map grounding_map = load_grounding_map(gm_path) except Exception as e: logger.error('Could not load grounding map from %s' % gm_path) logger.error(e) grounding_map = None else: grounding_map = None ''' # Get PMIDs for search_genes # Temporarily removed because Entrez-based article searches # are lagging behind and cannot be time-limited if not search_genes: logger.info('No search genes argument (search_genes) specified.') else: logger.info('Using search genes: %s' % ', '.join(search_genes)) pmids_gene = get_searchgenes_pmids(search_genes, num_days=5) num_pmids = sum([len(pm) for pm in pmids_gene.values()]) logger.info('Collected %d PMIDs from PubMed search_genes.' % num_pmids) pmids = _extend_dict(pmids, pmids_gene) ''' run_machine( model_path, pmids, belief_threshold, search_genes=search_genes, ndex_cred=ndex_cred, twitter_cred=twitter_cred, grounding_map=grounding_map )
[]
Please provide a description of the function:def _load_data(): # Get the cwv reader object. csv_path = path.join(HERE, path.pardir, path.pardir, 'resources', DATAFILE_NAME) data_iter = list(read_unicode_csv(csv_path)) # Get the headers. headers = data_iter[0] # For some reason this heading is oddly formatted and inconsistent with the # rest, or with the usual key-style for dicts. headers[headers.index('Approved.Symbol')] = 'approved_symbol' return [{header: val for header, val in zip(headers, line)} for line in data_iter[1:]]
[ "Load the data from the csv in data.\n\n The \"gene_id\" is the Entrez gene id, and the \"approved_symbol\" is the\n standard gene symbol. The \"hms_id\" is the LINCS ID for the drug.\n\n Returns\n -------\n data : list[dict]\n A list of dicts of row values keyed by the column headers extracted from\n the csv file, described above.\n " ]
Please provide a description of the function:def run_eidos(endpoint, *args): # Make the full path to the class that should be used call_class = '%s.%s' % (eidos_package, endpoint) # Assemble the command line command and append optonal args cmd = ['java', '-Xmx12G', '-cp', eip, call_class] + list(args) logger.info('Running Eidos with command "%s"' % (' '.join(cmd))) subprocess.call(cmd)
[ "Run a given enpoint of Eidos through the command line.\n\n Parameters\n ----------\n endpoint : str\n The class within the Eidos package to run, for instance\n 'apps.ExtractFromDirectory' will run\n 'org.clulab.wm.eidos.apps.ExtractFromDirectory'\n *args\n Any further arguments to be passed as inputs to the class\n being run.\n " ]
Please provide a description of the function:def extract_from_directory(path_in, path_out): path_in = os.path.realpath(os.path.expanduser(path_in)) path_out = os.path.realpath(os.path.expanduser(path_out)) logger.info('Running Eidos on input folder %s' % path_in) run_eidos('apps.ExtractFromDirectory', path_in, path_out)
[ "Run Eidos on a set of text files in a folder.\n\n The output is produced in the specified output folder but\n the output files aren't processed by this function.\n\n Parameters\n ----------\n path_in : str\n Path to an input folder with some text files\n path_out : str\n Path to an output folder in which Eidos places the output\n JSON-LD files\n " ]
Please provide a description of the function:def extract_and_process(path_in, path_out): path_in = os.path.realpath(os.path.expanduser(path_in)) path_out = os.path.realpath(os.path.expanduser(path_out)) extract_from_directory(path_in, path_out) jsons = glob.glob(os.path.join(path_out, '*.jsonld')) logger.info('Found %d JSON-LD files to process in %s' % (len(jsons), path_out)) stmts = [] for json in jsons: ep = process_json_file(json) if ep: stmts += ep.statements return stmts
[ "Run Eidos on a set of text files and process output with INDRA.\n\n The output is produced in the specified output folder but\n the output files aren't processed by this function.\n\n Parameters\n ----------\n path_in : str\n Path to an input folder with some text files\n path_out : str\n Path to an output folder in which Eidos places the output\n JSON-LD files\n\n Returns\n -------\n stmts : list[indra.statements.Statements]\n A list of INDRA Statements\n " ]
Please provide a description of the function:def get_statements(subject=None, object=None, agents=None, stmt_type=None, use_exact_type=False, persist=True, timeout=None, simple_response=False, ev_limit=10, best_first=True, tries=2, max_stmts=None): processor = IndraDBRestProcessor(subject, object, agents, stmt_type, use_exact_type, persist, timeout, ev_limit, best_first, tries, max_stmts) # Format the result appropriately. if simple_response: ret = processor.statements else: ret = processor return ret
[ "Get a processor for the INDRA DB web API matching given agents and type.\n\n There are two types of responses available. You can just get a list of\n INDRA Statements, or you can get an IndraDBRestProcessor object, which allow\n Statements to be loaded in a background thread, providing a sample of the\n best* content available promptly in the sample_statements attribute, and\n populates the statements attribute when the paged load is complete.\n\n The latter should be used in all new code, and where convenient the prior\n should be converted to use the processor, as this option may be removed in\n the future.\n\n * In the sense of having the most supporting evidence.\n\n Parameters\n ----------\n subject/object : str\n Optionally specify the subject and/or object of the statements in\n you wish to get from the database. By default, the namespace is assumed\n to be HGNC gene names, however you may specify another namespace by\n including `@<namespace>` at the end of the name string. For example, if\n you want to specify an agent by chebi, you could use `CHEBI:6801@CHEBI`,\n or if you wanted to use the HGNC id, you could use `6871@HGNC`.\n agents : list[str]\n A list of agents, specified in the same manner as subject and object,\n but without specifying their grammatical position.\n stmt_type : str\n Specify the types of interactions you are interested in, as indicated\n by the sub-classes of INDRA's Statements. This argument is *not* case\n sensitive. If the statement class given has sub-classes\n (e.g. RegulateAmount has IncreaseAmount and DecreaseAmount), then both\n the class itself, and its subclasses, will be queried, by default. If\n you do not want this behavior, set use_exact_type=True. Note that if\n max_stmts is set, it is possible only the exact statement type will\n be returned, as this is the first searched. The processor then cycles\n through the types, getting a page of results for each type and adding it\n to the quota, until the max number of statements is reached.\n use_exact_type : bool\n If stmt_type is given, and you only want to search for that specific\n statement type, set this to True. Default is False.\n persist : bool\n Default is True. When False, if a query comes back limited (not all\n results returned), just give up and pass along what was returned.\n Otherwise, make further queries to get the rest of the data (which may\n take some time).\n timeout : positive int or None\n If an int, block until the work is done and statements are retrieved, or\n until the timeout has expired, in which case the results so far will be\n returned in the response object, and further results will be added in\n a separate thread as they become available. If simple_response is True,\n all statements available will be returned. Otherwise (if None), block\n indefinitely until all statements are retrieved. Default is None.\n simple_response : bool\n If True, a simple list of statements is returned (thus block should also\n be True). If block is False, only the original sample will be returned\n (as though persist was False), until the statements are done loading, in\n which case the rest should appear in the list. This behavior is not\n encouraged. Default is False (which breaks backwards compatibility with\n usage of INDRA versions from before 1/22/2019). WE ENCOURAGE ALL NEW\n USE-CASES TO USE THE PROCESSOR, AS THIS FEATURE MAY BE REMOVED AT A\n LATER DATE.\n ev_limit : int or None\n Limit the amount of evidence returned per Statement. Default is 10.\n best_first : bool\n If True, the preassembled statements will be sorted by the amount of\n evidence they have, and those with the most evidence will be\n prioritized. When using `max_stmts`, this means you will get the \"best\"\n statements. If False, statements will be queried in arbitrary order.\n tries : int > 0\n Set the number of times to try the query. The database often caches\n results, so if a query times out the first time, trying again after a\n timeout will often succeed fast enough to avoid a timeout. This can also\n help gracefully handle an unreliable connection, if you're willing to\n wait. Default is 2.\n max_stmts : int or None\n Select the maximum number of statements to return. When set less than\n 1000 the effect is much the same as setting persist to false, and will\n guarantee a faster response. Default is None.\n\n Returns\n -------\n processor : :py:class:`IndraDBRestProcessor`\n An instance of the IndraDBRestProcessor, which has an attribute\n `statements` which will be populated when the query/queries are done.\n This is the default behavior, and is encouraged in all future cases,\n however a simple list of statements may be returned using the\n `simple_response` option described above.\n " ]
Please provide a description of the function:def get_statements_by_hash(hash_list, ev_limit=100, best_first=True, tries=2): if not isinstance(hash_list, list): raise ValueError("The `hash_list` input is a list, not %s." % type(hash_list)) if not hash_list: return [] if isinstance(hash_list[0], str): hash_list = [int(h) for h in hash_list] if not all([isinstance(h, int) for h in hash_list]): raise ValueError("Hashes must be ints or strings that can be " "converted into ints.") resp = submit_statement_request('post', 'from_hashes', ev_limit=ev_limit, data={'hashes': hash_list}, best_first=best_first, tries=tries) return stmts_from_json(resp.json()['statements'].values())
[ "Get fully formed statements from a list of hashes.\n\n Parameters\n ----------\n hash_list : list[int or str]\n A list of statement hashes.\n ev_limit : int or None\n Limit the amount of evidence returned per Statement. Default is 100.\n best_first : bool\n If True, the preassembled statements will be sorted by the amount of\n evidence they have, and those with the most evidence will be\n prioritized. When using `max_stmts`, this means you will get the \"best\"\n statements. If False, statements will be queried in arbitrary order.\n tries : int > 0\n Set the number of times to try the query. The database often caches\n results, so if a query times out the first time, trying again after a\n timeout will often succeed fast enough to avoid a timeout. This can\n also help gracefully handle an unreliable connection, if you're\n willing to wait. Default is 2.\n " ]
Please provide a description of the function:def get_statements_for_paper(ids, ev_limit=10, best_first=True, tries=2, max_stmts=None): id_l = [{'id': id_val, 'type': id_type} for id_type, id_val in ids] resp = submit_statement_request('post', 'from_papers', data={'ids': id_l}, ev_limit=ev_limit, best_first=best_first, tries=tries, max_stmts=max_stmts) stmts_json = resp.json()['statements'] return stmts_from_json(stmts_json.values())
[ "Get the set of raw Statements extracted from a paper given by the id.\n\n Parameters\n ----------\n ids : list[(<id type>, <id value>)]\n A list of tuples with ids and their type. The type can be any one of\n 'pmid', 'pmcid', 'doi', 'pii', 'manuscript id', or 'trid', which is the\n primary key id of the text references in the database.\n ev_limit : int or None\n Limit the amount of evidence returned per Statement. Default is 10.\n best_first : bool\n If True, the preassembled statements will be sorted by the amount of\n evidence they have, and those with the most evidence will be\n prioritized. When using `max_stmts`, this means you will get the \"best\"\n statements. If False, statements will be queried in arbitrary order.\n tries : int > 0\n Set the number of times to try the query. The database often caches\n results, so if a query times out the first time, trying again after a\n timeout will often succeed fast enough to avoid a timeout. This can also\n help gracefully handle an unreliable connection, if you're willing to\n wait. Default is 2.\n max_stmts : int or None\n Select a maximum number of statements to be returned. Default is None.\n\n Returns\n -------\n stmts : list[:py:class:`indra.statements.Statement`]\n A list of INDRA Statement instances.\n " ]
Please provide a description of the function:def submit_curation(hash_val, tag, curator, text=None, source='indra_rest_client', ev_hash=None, is_test=False): data = {'tag': tag, 'text': text, 'curator': curator, 'source': source, 'ev_hash': ev_hash} url = 'curation/submit/%s' % hash_val if is_test: qstr = '?test' else: qstr = '' return make_db_rest_request('post', url, qstr, data=data)
[ "Submit a curation for the given statement at the relevant level.\n\n Parameters\n ----------\n hash_val : int\n The hash corresponding to the statement.\n tag : str\n A very short phrase categorizing the error or type of curation,\n e.g. \"grounding\" for a grounding error, or \"correct\" if you are\n marking a statement as correct.\n curator : str\n The name or identifier for the curator.\n text : str\n A brief description of the problem.\n source : str\n The name of the access point through which the curation was performed.\n The default is 'direct_client', meaning this function was used\n directly. Any higher-level application should identify itself here.\n ev_hash : int\n A hash of the sentence and other evidence information. Elsewhere\n referred to as `source_hash`.\n is_test : bool\n Used in testing. If True, no curation will actually be added to the\n database.\n " ]
Please provide a description of the function:def get_statement_queries(stmts, **params): def pick_ns(ag): for ns in ['HGNC', 'FPLX', 'CHEMBL', 'CHEBI', 'GO', 'MESH']: if ns in ag.db_refs.keys(): dbid = ag.db_refs[ns] break else: ns = 'TEXT' dbid = ag.name return '%s@%s' % (dbid, ns) queries = [] url_base = get_url_base('statements/from_agents') non_binary_statements = [Complex, SelfModification, ActiveForm] for stmt in stmts: kwargs = {} if type(stmt) not in non_binary_statements: for pos, ag in zip(['subject', 'object'], stmt.agent_list()): if ag is not None: kwargs[pos] = pick_ns(ag) else: for i, ag in enumerate(stmt.agent_list()): if ag is not None: kwargs['agent%d' % i] = pick_ns(ag) kwargs['type'] = stmt.__class__.__name__ kwargs.update(params) query_str = '?' + '&'.join(['%s=%s' % (k, v) for k, v in kwargs.items() if v is not None]) queries.append(url_base + query_str) return queries
[ "Get queries used to search based on a statement.\n\n In addition to the stmts, you can enter any parameters standard to the\n query. See https://github.com/indralab/indra_db/rest_api for a full list.\n\n Parameters\n ----------\n stmts : list[Statement]\n A list of INDRA statements.\n " ]
Please provide a description of the function:def save(self, model_fname='model.pkl'): with open(model_fname, 'wb') as fh: pickle.dump(self.stmts, fh, protocol=4)
[ "Save the state of the IncrementalModel in a pickle file.\n\n Parameters\n ----------\n model_fname : Optional[str]\n The name of the pickle file to save the state of the\n IncrementalModel in. Default: model.pkl\n " ]
Please provide a description of the function:def add_statements(self, pmid, stmts): if pmid not in self.stmts: self.stmts[pmid] = stmts else: self.stmts[pmid] += stmts
[ "Add INDRA Statements to the incremental model indexed by PMID.\n\n Parameters\n ----------\n pmid : str\n The PMID of the paper from which statements were extracted.\n stmts : list[indra.statements.Statement]\n A list of INDRA Statements to be added to the model.\n " ]
Please provide a description of the function:def preassemble(self, filters=None, grounding_map=None): stmts = self.get_statements() # Filter out hypotheses stmts = ac.filter_no_hypothesis(stmts) # Fix grounding if grounding_map is not None: stmts = ac.map_grounding(stmts, grounding_map=grounding_map) else: stmts = ac.map_grounding(stmts) if filters and ('grounding' in filters): stmts = ac.filter_grounded_only(stmts) # Fix sites stmts = ac.map_sequence(stmts) if filters and 'human_only' in filters: stmts = ac.filter_human_only(stmts) # Run preassembly stmts = ac.run_preassembly(stmts, return_toplevel=False) # Run relevance filter stmts = self._relevance_filter(stmts, filters) # Save Statements self.assembled_stmts = stmts
[ "Preassemble the Statements collected in the model.\n\n Use INDRA's GroundingMapper, Preassembler and BeliefEngine\n on the IncrementalModel and save the unique statements and\n the top level statements in class attributes.\n\n Currently the following filter options are implemented:\n - grounding: require that all Agents in statements are grounded\n - human_only: require that all proteins are human proteins\n - prior_one: require that at least one Agent is in the prior model\n - prior_all: require that all Agents are in the prior model\n\n Parameters\n ----------\n filters : Optional[list[str]]\n A list of filter options to apply when choosing the statements.\n See description above for more details. Default: None\n grounding_map : Optional[dict]\n A user supplied grounding map which maps a string to a\n dictionary of database IDs (in the format used by Agents'\n db_refs).\n " ]
Please provide a description of the function:def get_model_agents(self): model_stmts = self.get_statements() agents = [] for stmt in model_stmts: for a in stmt.agent_list(): if a is not None: agents.append(a) return agents
[ "Return a list of all Agents from all Statements.\n\n Returns\n -------\n agents : list[indra.statements.Agent]\n A list of Agents that are in the model.\n " ]
Please provide a description of the function:def get_statements(self): stmt_lists = [v for k, v in self.stmts.items()] stmts = [] for s in stmt_lists: stmts += s return stmts
[ "Return a list of all Statements in a single list.\n\n Returns\n -------\n stmts : list[indra.statements.Statement]\n A list of all the INDRA Statements in the model.\n " ]
Please provide a description of the function:def get_statements_noprior(self): stmt_lists = [v for k, v in self.stmts.items() if k != 'prior'] stmts = [] for s in stmt_lists: stmts += s return stmts
[ "Return a list of all non-prior Statements in a single list.\n\n Returns\n -------\n stmts : list[indra.statements.Statement]\n A list of all the INDRA Statements in the model (excluding\n the prior).\n " ]
Please provide a description of the function:def process_ndex_neighborhood(gene_names, network_id=None, rdf_out='bel_output.rdf', print_output=True): logger.warning('This method is deprecated and the results are not ' 'guaranteed to be correct. Please use ' 'process_pybel_neighborhood instead.') if network_id is None: network_id = '9ea3c170-01ad-11e5-ac0f-000c29cb28fb' url = ndex_bel2rdf + '/network/%s/asBELRDF/query' % network_id params = {'searchString': ' '.join(gene_names)} # The ndex_client returns the rdf as the content of a json dict res_json = ndex_client.send_request(url, params, is_json=True) if not res_json: logger.error('No response for NDEx neighborhood query.') return None if res_json.get('error'): error_msg = res_json.get('message') logger.error('BEL/RDF response contains error: %s' % error_msg) return None rdf = res_json.get('content') if not rdf: logger.error('BEL/RDF response is empty.') return None with open(rdf_out, 'wb') as fh: fh.write(rdf.encode('utf-8')) bp = process_belrdf(rdf, print_output=print_output) return bp
[ "Return a BelRdfProcessor for an NDEx network neighborhood.\n\n Parameters\n ----------\n gene_names : list\n A list of HGNC gene symbols to search the neighborhood of.\n Example: ['BRAF', 'MAP2K1']\n network_id : Optional[str]\n The UUID of the network in NDEx. By default, the BEL Large Corpus\n network is used.\n rdf_out : Optional[str]\n Name of the output file to save the RDF returned by the web service.\n This is useful for debugging purposes or to repeat the same query\n on an offline RDF file later. Default: bel_output.rdf\n\n Returns\n -------\n bp : BelRdfProcessor\n A BelRdfProcessor object which contains INDRA Statements in bp.statements.\n\n Notes\n -----\n This function calls process_belrdf to the returned RDF string from the\n webservice.\n " ]
Please provide a description of the function:def process_pybel_neighborhood(gene_names, network_file=None, network_type='belscript', **kwargs): if network_file is None: # Use large corpus as base network network_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.path.pardir, os.path.pardir, os.path.pardir, 'data', 'large_corpus.bel') if network_type == 'belscript': bp = process_belscript(network_file, **kwargs) elif network_type == 'json': bp = process_json_file(network_file) filtered_stmts = [] for stmt in bp.statements: found = False for agent in stmt.agent_list(): if agent is not None: if agent.name in gene_names: found = True if found: filtered_stmts.append(stmt) bp.statements = filtered_stmts return bp
[ "Return PybelProcessor around neighborhood of given genes in a network.\n\n This function processes the given network file and filters the returned\n Statements to ones that contain genes in the given list.\n\n Parameters\n ----------\n network_file : Optional[str]\n Path to the network file to process. If not given, by default, the\n BEL Large Corpus is used.\n network_type : Optional[str]\n This function allows processing both BEL Script files and JSON files.\n This argument controls which type is assumed to be processed, and the\n value can be either 'belscript' or 'json'. Default: bel_script\n\n Returns\n -------\n bp : PybelProcessor\n A PybelProcessor object which contains INDRA Statements in\n bp.statements.\n " ]
Please provide a description of the function:def process_belrdf(rdf_str, print_output=True): g = rdflib.Graph() try: g.parse(data=rdf_str, format='nt') except ParseError as e: logger.error('Could not parse rdf: %s' % e) return None # Build INDRA statements from RDF bp = BelRdfProcessor(g) bp.get_complexes() bp.get_activating_subs() bp.get_modifications() bp.get_activating_mods() bp.get_transcription() bp.get_activation() bp.get_conversions() # Print some output about the process if print_output: bp.print_statement_coverage() bp.print_statements() return bp
[ "Return a BelRdfProcessor for a BEL/RDF string.\n\n Parameters\n ----------\n rdf_str : str\n A BEL/RDF string to be processed. This will usually come from reading\n a .rdf file.\n\n Returns\n -------\n bp : BelRdfProcessor\n A BelRdfProcessor object which contains INDRA Statements in\n bp.statements.\n\n Notes\n -----\n This function calls all the specific get_type_of_mechanism()\n functions of the newly constructed BelRdfProcessor to extract\n INDRA Statements.\n " ]
Please provide a description of the function:def process_pybel_graph(graph): bp = PybelProcessor(graph) bp.get_statements() if bp.annot_manager.failures: logger.warning('missing %d annotation pairs', sum(len(v) for v in bp.annot_manager.failures.values())) return bp
[ "Return a PybelProcessor by processing a PyBEL graph.\n\n Parameters\n ----------\n graph : pybel.struct.BELGraph\n A PyBEL graph to process\n\n Returns\n -------\n bp : PybelProcessor\n A PybelProcessor object which contains INDRA Statements in\n bp.statements.\n " ]
Please provide a description of the function:def process_belscript(file_name, **kwargs): if 'citation_clearing' not in kwargs: kwargs['citation_clearing'] = False if 'no_identifier_validation' not in kwargs: kwargs['no_identifier_validation'] = True pybel_graph = pybel.from_path(file_name, **kwargs) return process_pybel_graph(pybel_graph)
[ "Return a PybelProcessor by processing a BEL script file.\n\n Key word arguments are passed directly to pybel.from_path,\n for further information, see\n pybel.readthedocs.io/en/latest/io.html#pybel.from_path\n Some keyword arguments we use here differ from the defaults\n of PyBEL, namely we set `citation_clearing` to False\n and `no_identifier_validation` to True.\n\n Parameters\n ----------\n file_name : str\n The path to a BEL script file.\n\n Returns\n -------\n bp : PybelProcessor\n A PybelProcessor object which contains INDRA Statements in\n bp.statements.\n " ]
Please provide a description of the function:def process_json_file(file_name): with open(file_name, 'rt') as fh: pybel_graph = pybel.from_json_file(fh, False) return process_pybel_graph(pybel_graph)
[ "Return a PybelProcessor by processing a Node-Link JSON file.\n\n For more information on this format, see:\n http://pybel.readthedocs.io/en/latest/io.html#node-link-json\n\n Parameters\n ----------\n file_name : str\n The path to a Node-Link JSON file.\n\n Returns\n -------\n bp : PybelProcessor\n A PybelProcessor object which contains INDRA Statements in\n bp.statements.\n " ]
Please provide a description of the function:def process_cbn_jgif_file(file_name): with open(file_name, 'r') as jgf: return process_pybel_graph(pybel.from_cbn_jgif(json.load(jgf)))
[ "Return a PybelProcessor by processing a CBN JGIF JSON file.\n\n Parameters\n ----------\n file_name : str\n The path to a CBN JGIF JSON file.\n\n Returns\n -------\n bp : PybelProcessor\n A PybelProcessor object which contains INDRA Statements in\n bp.statements.\n " ]
Please provide a description of the function:def update_famplex(): famplex_url_pattern = \ 'https://raw.githubusercontent.com/sorgerlab/famplex/master/%s.csv' csv_names = ['entities', 'equivalences', 'gene_prefixes', 'grounding_map', 'relations'] for csv_name in csv_names: url = famplex_url_pattern % csv_name save_from_http(url, os.path.join(path,'famplex/%s.csv' % csv_name))
[ "Update all the CSV files that form the FamPlex resource." ]
Please provide a description of the function:def update_lincs_small_molecules(): url = 'http://lincs.hms.harvard.edu/db/sm/' sm_data = load_lincs_csv(url) sm_dict = {d['HMS LINCS ID']: d.copy() for d in sm_data} assert len(sm_dict) == len(sm_data), "We lost data." fname = os.path.join(path, 'lincs_small_molecules.json') with open(fname, 'w') as fh: json.dump(sm_dict, fh, indent=1)
[ "Load the csv of LINCS small molecule metadata into a dict.\n\n Produces a dict keyed by HMS LINCS small molecule ids, with the metadata\n contained in a dict of row values keyed by the column headers extracted\n from the csv.\n " ]
Please provide a description of the function:def update_lincs_proteins(): url = 'http://lincs.hms.harvard.edu/db/proteins/' prot_data = load_lincs_csv(url) prot_dict = {d['HMS LINCS ID']: d.copy() for d in prot_data} assert len(prot_dict) == len(prot_data), "We lost data." fname = os.path.join(path, 'lincs_proteins.json') with open(fname, 'w') as fh: json.dump(prot_dict, fh, indent=1)
[ "Load the csv of LINCS protein metadata into a dict.\n\n Produces a dict keyed by HMS LINCS protein ids, with the metadata\n contained in a dict of row values keyed by the column headers extracted\n from the csv.\n " ]
Please provide a description of the function:def _get_is_direct(stmt): '''Returns true if there is evidence that the statement is a direct interaction. If any of the evidences associated with the statement indicates a direct interatcion then we assume the interaction is direct. If there is no evidence for the interaction being indirect then we default to direct.''' any_indirect = False for ev in stmt.evidence: if ev.epistemics.get('direct') is True: return True elif ev.epistemics.get('direct') is False: # This guarantees that we have seen at least # some evidence that the statement is indirect any_indirect = True if any_indirect: return False return True
[]
Please provide a description of the function:def make_model(self): for stmt in self.statements: if isinstance(stmt, Modification): card = assemble_modification(stmt) elif isinstance(stmt, SelfModification): card = assemble_selfmodification(stmt) elif isinstance(stmt, Complex): card = assemble_complex(stmt) elif isinstance(stmt, Translocation): card = assemble_translocation(stmt) elif isinstance(stmt, RegulateActivity): card = assemble_regulate_activity(stmt) elif isinstance(stmt, RegulateAmount): card = assemble_regulate_amount(stmt) else: continue if card is not None: card.card['meta'] = {'id': stmt.uuid, 'belief': stmt.belief} if self.pmc_override is not None: card.card['pmc_id'] = self.pmc_override else: card.card['pmc_id'] = get_pmc_id(stmt) self.cards.append(card)
[ "Assemble statements into index cards." ]
Please provide a description of the function:def print_model(self): cards = [c.card for c in self.cards] # If there is only one card, print it as a single # card not as a list if len(cards) == 1: cards = cards[0] cards_json = json.dumps(cards, indent=1) return cards_json
[ "Return the assembled cards as a JSON string.\n\n Returns\n -------\n cards_json : str\n The JSON string representing the assembled cards.\n " ]
Please provide a description of the function:def geneways_action_to_indra_statement_type(actiontype, plo): actiontype = actiontype.lower() statement_generator = None is_direct = (plo == 'P') if actiontype == 'bind': statement_generator = lambda substance1, substance2, evidence: \ Complex([substance1, substance2], evidence=evidence) is_direct = True elif actiontype == 'phosphorylate': statement_generator = lambda substance1, substance2, evidence: \ Phosphorylation(substance1, substance2, evidence=evidence) is_direct = True return (statement_generator, is_direct)
[ "Return INDRA Statement corresponding to Geneways action type.\n\n Parameters\n ----------\n actiontype : str\n The verb extracted by the Geneways processor\n plo : str\n A one character string designating whether Geneways classifies\n this verb as a physical, logical, or other interaction\n\n Returns\n -------\n statement_generator :\n If there is no mapping to INDRA statements from this action type\n the return value is None.\n If there is such a mapping, statement_generator is an anonymous\n function that takes in the subject agent, object agent, and evidence,\n in that order, and returns an INDRA statement object.\n " ]
Please provide a description of the function:def make_statement(self, action, mention): (statement_generator, is_direct) = \ geneways_action_to_indra_statement_type(mention.actiontype, action.plo) if statement_generator is None: # Geneways statement does not map onto an indra statement return None # Try to find the full-text sentence # Unfortunately, the sentence numbers in the Geneways dataset # don't correspond to an obvious sentence segmentation. # This code looks for sentences with the subject, object, and verb # listed by the Geneways action mention table and only includes # it in the evidence if there is exactly one such sentence text = None if self.get_ft_mention: try: content, content_type = get_full_text(mention.pmid, 'pmid') if content is not None: ftm = FullTextMention(mention, content) sentences = ftm.find_matching_sentences() if len(sentences) == 1: text = sentences[0] except Exception: logger.warning('Could not fetch full text for PMID ' + mention.pmid) # Make an evidence object epistemics = dict() epistemics['direct'] = is_direct annotations = mention.make_annotation() annotations['plo'] = action.plo # plo only in action table evidence = Evidence(source_api='geneways', source_id=mention.actionmentionid, pmid=mention.pmid, text=text, epistemics=epistemics, annotations=annotations) # Construct the grounded and name standardized agents # Note that this involves grounding the agent by # converting the Entrez ID listed in the Geneways data with # HGNC and UniProt upstream_agent = get_agent(mention.upstream, action.up) downstream_agent = get_agent(mention.downstream, action.dn) # Make the statement return statement_generator(upstream_agent, downstream_agent, evidence)
[ "Makes an INDRA statement from a Geneways action and action mention.\n\n Parameters\n ----------\n action : GenewaysAction\n The mechanism that the Geneways mention maps to. Note that\n several text mentions can correspond to the same action if they are\n referring to the same relationship - there may be multiple\n Geneways action mentions corresponding to each action.\n mention : GenewaysActionMention\n The Geneways action mention object corresponding to a single\n mention of a mechanism in a specific text. We make a new INDRA\n statement corresponding to each action mention.\n\n Returns\n -------\n statement : indra.statements.Statement\n An INDRA statement corresponding to the provided Geneways action\n mention, or None if the action mention's type does not map onto\n any INDRA statement type in geneways_action_type_mapper.\n " ]
Please provide a description of the function:def load_from_rdf_file(self, rdf_file): self.graph = rdflib.Graph() self.graph.parse(os.path.abspath(rdf_file), format='nt') self.initialize()
[ "Initialize given an RDF input file representing the hierarchy.\"\n\n Parameters\n ----------\n rdf_file : str\n Path to an RDF file.\n " ]
Please provide a description of the function:def load_from_rdf_string(self, rdf_str): self.graph = rdflib.Graph() self.graph.parse(data=rdf_str, format='nt') self.initialize()
[ "Initialize given an RDF string representing the hierarchy.\"\n\n Parameters\n ----------\n rdf_str : str\n An RDF string.\n " ]
Please provide a description of the function:def extend_with(self, rdf_file): self.graph.parse(os.path.abspath(rdf_file), format='nt') self.initialize()
[ "Extend the RDF graph of this HierarchyManager with another RDF file.\n\n Parameters\n ----------\n rdf_file : str\n An RDF file which is parsed such that the current graph and the\n graph described by the file are merged.\n " ]
Please provide a description of the function:def build_transitive_closures(self): self.component_counter = 0 for rel, tc_dict in ((self.isa_objects, self.isa_closure), (self.partof_objects, self.partof_closure), (self.isa_or_partof_objects, self.isa_or_partof_closure)): self.build_transitive_closure(rel, tc_dict)
[ "Build the transitive closures of the hierarchy.\n\n This method constructs dictionaries which contain terms in the\n hierarchy as keys and either all the \"isa+\" or \"partof+\" related terms\n as values.\n " ]
Please provide a description of the function:def build_transitive_closure(self, rel, tc_dict): # Make a function with the righ argument structure rel_fun = lambda node, graph: rel(node) for x in self.graph.all_nodes(): rel_closure = self.graph.transitiveClosure(rel_fun, x) xs = x.toPython() for y in rel_closure: ys = y.toPython() if xs == ys: continue try: tc_dict[xs].append(ys) except KeyError: tc_dict[xs] = [ys] if rel == self.isa_or_partof_objects: self._add_component(xs, ys)
[ "Build a transitive closure for a given relation in a given dict." ]
Please provide a description of the function:def find_entity(self, x): qstr = self.prefixes + .format(x) res = self.graph.query(qstr) if list(res): en = list(res)[0][0].toPython() return en else: return None
[ "\n Get the entity that has the specified name (or synonym).\n\n Parameters\n ----------\n x : string\n Name or synonym for the target entity.\n ", "\n SELECT ?x WHERE {{\n ?x rn:hasName \"{0}\" .\n }}\n " ]
Please provide a description of the function:def directly_or_indirectly_related(self, ns1, id1, ns2, id2, closure_dict, relation_func): # if id2 is None, or both are None, then it's by definition isa: if id2 is None or (id2 is None and id1 is None): return True # If only id1 is None, then it cannot be isa elif id1 is None: return False if closure_dict: term1 = self.get_uri(ns1, id1) term2 = self.get_uri(ns2, id2) ec = closure_dict.get(term1) if ec is not None and term2 in ec: return True else: return False else: if not self.uri_as_name: e1 = self.find_entity(id1) e2 = self.find_entity(id2) if e1 is None or e2 is None: return False t1 = rdflib.term.URIRef(e1) t2 = rdflib.term.URIRef(e2) else: u1 = self.get_uri(ns1, id1) u2 = self.get_uri(ns2, id2) t1 = rdflib.term.URIRef(u1) t2 = rdflib.term.URIRef(u2) to = self.graph.transitiveClosure(relation_func, t1) if t2 in to: return True else: return False
[ "Return True if two entities have the speicified relationship.\n\n This relation is constructed possibly through multiple links connecting\n the two entities directly or indirectly.\n\n Parameters\n ----------\n ns1 : str\n Namespace code for an entity.\n id1 : str\n URI for an entity.\n ns2 : str\n Namespace code for an entity.\n id2 : str\n URI for an entity.\n closure_dict: dict\n A dictionary mapping node names to nodes that have the\n specified relationship, directly or indirectly. Empty if this\n has not been precomputed.\n relation_func: function\n Function with arguments (node, graph) that generates objects\n with some relationship with node on the given graph.\n\n Returns\n -------\n bool\n True if t1 has the specified relationship with t2, either\n directly or through a series of intermediates; False otherwise.\n " ]
Please provide a description of the function:def isa(self, ns1, id1, ns2, id2): rel_fun = lambda node, graph: self.isa_objects(node) return self.directly_or_indirectly_related(ns1, id1, ns2, id2, self.isa_closure, rel_fun)
[ "Return True if one entity has an \"isa\" relationship to another.\n\n Parameters\n ----------\n ns1 : str\n Namespace code for an entity.\n id1 : string\n URI for an entity.\n ns2 : str\n Namespace code for an entity.\n id2 : str\n URI for an entity.\n\n Returns\n -------\n bool\n True if t1 has an \"isa\" relationship with t2, either directly or\n through a series of intermediates; False otherwise.\n " ]
Please provide a description of the function:def partof(self, ns1, id1, ns2, id2): rel_fun = lambda node, graph: self.partof_objects(node) return self.directly_or_indirectly_related(ns1, id1, ns2, id2, self.partof_closure, rel_fun)
[ "Return True if one entity is \"partof\" another.\n\n Parameters\n ----------\n ns1 : str\n Namespace code for an entity.\n id1 : str\n URI for an entity.\n ns2 : str\n Namespace code for an entity.\n id2 : str\n URI for an entity.\n\n Returns\n -------\n bool\n True if t1 has a \"partof\" relationship with t2, either directly or\n through a series of intermediates; False otherwise.\n " ]
Please provide a description of the function:def isa_or_partof(self, ns1, id1, ns2, id2): rel_fun = lambda node, graph: self.isa_or_partof_objects(node) return self.directly_or_indirectly_related(ns1, id1, ns2, id2, self.isa_or_partof_closure, rel_fun)
[ "Return True if two entities are in an \"isa\" or \"partof\" relationship\n\n Parameters\n ----------\n ns1 : str\n Namespace code for an entity.\n id1 : str\n URI for an entity.\n ns2 : str\n Namespace code for an entity.\n id2 : str\n URI for an entity.\n\n Returns\n -------\n bool\n True if t1 has a \"isa\" or \"partof\" relationship with t2, either\n directly or through a series of intermediates; False otherwise.\n " ]
Please provide a description of the function:def is_opposite(self, ns1, id1, ns2, id2): u1 = self.get_uri(ns1, id1) u2 = self.get_uri(ns2, id2) t1 = rdflib.term.URIRef(u1) t2 = rdflib.term.URIRef(u2) rel = rdflib.term.URIRef(self.relations_prefix + 'is_opposite') to = self.graph.objects(t1, rel) if t2 in to: return True return False
[ "Return True if two entities are in an \"is_opposite\" relationship\n\n Parameters\n ----------\n ns1 : str\n Namespace code for an entity.\n id1 : str\n URI for an entity.\n ns2 : str\n Namespace code for an entity.\n id2 : str\n URI for an entity.\n\n Returns\n -------\n bool\n True if t1 has an \"is_opposite\" relationship with t2.\n " ]
Please provide a description of the function:def get_parents(self, uri, type='all'): # First do a quick dict lookup to see if there are any parents all_parents = set(self.isa_or_partof_closure.get(uri, [])) # If there are no parents or we are looking for all, we can return here if not all_parents or type == 'all': return all_parents # If we need immediate parents, we search again, this time knowing that # the uri is definitely in the graph since it has some parents if type == 'immediate': node = rdflib.term.URIRef(uri) immediate_parents = list(set(self.isa_or_partof_objects(node))) return [p.toPython() for p in immediate_parents] elif type == 'top': top_parents = [p for p in all_parents if not self.isa_or_partof_closure.get(p)] return top_parents
[ "Return parents of a given entry.\n\n Parameters\n ----------\n uri : str\n The URI of the entry whose parents are to be returned. See the\n get_uri method to construct this URI from a name space and id.\n type : str\n 'all': return all parents irrespective of level;\n 'immediate': return only the immediate parents;\n 'top': return only the highest level parents\n " ]
Please provide a description of the function:def _get_perf(text, msg_id): msg = KQMLPerformative('REQUEST') msg.set('receiver', 'READER') content = KQMLList('run-text') content.sets('text', text) msg.set('content', content) msg.set('reply-with', msg_id) return msg
[ "Return a request message for a given text." ]
Please provide a description of the function:def read_pmc(self, pmcid): msg = KQMLPerformative('REQUEST') msg.set('receiver', 'READER') content = KQMLList('run-pmcid') content.sets('pmcid', pmcid) content.set('reply-when-done', 'true') msg.set('content', content) msg.set('reply-with', 'P-%s' % pmcid) self.reply_counter += 1 self.send(msg)
[ "Read a given PMC article.\n\n Parameters\n ----------\n pmcid : str\n The PMC ID of the article to read. Note that only\n articles in the open-access subset of PMC will work.\n " ]
Please provide a description of the function:def read_text(self, text): logger.info('Reading: "%s"' % text) msg_id = 'RT000%s' % self.msg_counter kqml_perf = _get_perf(text, msg_id) self.reply_counter += 1 self.msg_counter += 1 self.send(kqml_perf)
[ "Read a given text phrase.\n\n Parameters\n ----------\n text : str\n The text to read. Typically a sentence or a paragraph.\n " ]
Please provide a description of the function:def receive_reply(self, msg, content): reply_head = content.head() if reply_head == 'error': comment = content.gets('comment') logger.error('Got error reply: "%s"' % comment) else: extractions = content.gets('ekb') self.extractions.append(extractions) self.reply_counter -= 1 if self.reply_counter == 0: self.exit(0)
[ "Handle replies with reading results." ]
Please provide a description of the function:def split_long_sentence(sentence, words_per_line): words = sentence.split(' ') split_sentence = '' for i in range(len(words)): split_sentence = split_sentence + words[i] if (i+1) % words_per_line == 0: split_sentence = split_sentence + '\n' elif i != len(words) - 1: split_sentence = split_sentence + " " return split_sentence
[ "Takes a sentence and adds a newline every \"words_per_line\" words.\n\n Parameters\n ----------\n sentence: str\n Sentene to split\n words_per_line: double\n Add a newline every this many words\n " ]
Please provide a description of the function:def shorter_name(key): key_short = key for sep in ['#', '/']: ind = key_short.rfind(sep) if ind is not None: key_short = key_short[ind+1:] else: key_short = key_short return key_short.replace('-', '_').replace('.', '_')
[ "Return a shorter name for an id.\n\n Does this by only taking the last part of the URI,\n after the last / and the last #. Also replaces - and . with _.\n\n Parameters\n ----------\n key: str\n Some URI\n\n Returns\n -------\n key_short: str\n A shortened, but more ambiguous, identifier\n " ]
Please provide a description of the function:def add_event_property_edges(event_entity, entries): do_not_log = ['@type', '@id', 'http://worldmodelers.com/DataProvenance#sourced_from'] for prop in event_entity: if prop not in do_not_log: value = event_entity[prop] value_entry = None value_str = None if '@id' in value[0]: value = value[0]['@id'] if value in entries: value_str = get_entry_compact_text_repr(entries[value], entries) #get_entry_compact_text_repr(entry, entries) if value_str is not None: edges.append([shorter_name(event_entity['@id']), shorter_name(value), shorter_name(prop)]) node_labels[shorter_name(value)] = value_str
[ "Adds edges to the graph for event properties." ]
Please provide a description of the function:def get_sourced_from(entry): sourced_from = 'http://worldmodelers.com/DataProvenance#sourced_from' if sourced_from in entry: values = entry[sourced_from] values = [i['@id'] for i in values] return values
[ "Get a list of values from the source_from attribute" ]
Please provide a description of the function:def get_entry_compact_text_repr(entry, entries): text = get_shortest_text_value(entry) if text is not None: return text else: sources = get_sourced_from(entry) # There are a lot of references to this entity, each of which refer # to it by a different text label. For the sake of visualization, # let's pick one of these labels (in this case, the shortest one) if sources is not None: texts = [] for source in sources: source_entry = entries[source] texts.append(get_shortest_text_value(source_entry)) return get_shortest_string(texts)
[ "If the entry has a text value, return that.\n If the entry has a source_from value, return the text value of the source.\n Otherwise, return None." ]
Please provide a description of the function:def get_entity_type(entry): entry_type = entry['@type'] entry_type = [shorter_name(t) for t in entry_type] entry_type = repr(entry_type) return entry_type
[ "Given a JSON-LD entry, returns the abbreviated @type and the\n text attribute that has the shortest length.\n\n Parameters\n ----------\n entry: dict\n A JSON-LD entry parsed into a nested python dictionary via the json\n module\n\n Returns\n -------\n short_type: str\n The shortest type\n short_text: str\n Of the text values, the shortest one\n " ]
Please provide a description of the function:def process_text(text, output_fmt='json', outbuf=None, cleanup=True, key='', **kwargs): nxml_str = make_nxml_from_text(text) return process_nxml_str(nxml_str, output_fmt, outbuf, cleanup, key, **kwargs)
[ "Return processor with Statements extracted by reading text with Sparser.\n\n Parameters\n ----------\n text : str\n The text to be processed\n output_fmt: Optional[str]\n The output format to obtain from Sparser, with the two options being\n 'json' and 'xml'. Default: 'json'\n outbuf : Optional[file]\n A file like object that the Sparser output is written to.\n cleanup : Optional[bool]\n If True, the temporary file created, which is used as an input\n file for Sparser, as well as the output file created by Sparser\n are removed. Default: True\n key : Optional[str]\n A key which is embedded into the name of the temporary file\n passed to Sparser for reading. Default is empty string.\n\n Returns\n -------\n SparserXMLProcessor or SparserJSONProcessor depending on what output\n format was chosen.\n " ]
Please provide a description of the function:def process_nxml_str(nxml_str, output_fmt='json', outbuf=None, cleanup=True, key='', **kwargs): tmp_fname = 'PMC%s_%d.nxml' % (key, mp.current_process().pid) with open(tmp_fname, 'wb') as fh: fh.write(nxml_str.encode('utf-8')) try: sp = process_nxml_file(tmp_fname, output_fmt, outbuf, cleanup, **kwargs) finally: if cleanup and os.path.exists(tmp_fname): os.remove(tmp_fname) return sp
[ "Return processor with Statements extracted by reading an NXML string.\n\n Parameters\n ----------\n nxml_str : str\n The string value of the NXML-formatted paper to be read.\n output_fmt: Optional[str]\n The output format to obtain from Sparser, with the two options being\n 'json' and 'xml'. Default: 'json'\n outbuf : Optional[file]\n A file like object that the Sparser output is written to.\n cleanup : Optional[bool]\n If True, the temporary file created in this function,\n which is used as an input file for Sparser, as well as the\n output file created by Sparser are removed. Default: True\n key : Optional[str]\n A key which is embedded into the name of the temporary file\n passed to Sparser for reading. Default is empty string.\n\n Returns\n -------\n SparserXMLProcessor or SparserJSONProcessor depending on what output\n format was chosen.\n " ]
Please provide a description of the function:def process_nxml_file(fname, output_fmt='json', outbuf=None, cleanup=True, **kwargs): sp = None out_fname = None try: out_fname = run_sparser(fname, output_fmt, outbuf, **kwargs) sp = process_sparser_output(out_fname, output_fmt) except Exception as e: logger.error("Sparser failed to run on %s." % fname) logger.exception(e) finally: if out_fname is not None and os.path.exists(out_fname) and cleanup: os.remove(out_fname) return sp
[ "Return processor with Statements extracted by reading an NXML file.\n\n Parameters\n ----------\n fname : str\n The path to the NXML file to be read.\n output_fmt: Optional[str]\n The output format to obtain from Sparser, with the two options being\n 'json' and 'xml'. Default: 'json'\n outbuf : Optional[file]\n A file like object that the Sparser output is written to.\n cleanup : Optional[bool]\n If True, the output file created by Sparser is removed.\n Default: True\n\n Returns\n -------\n sp : SparserXMLProcessor or SparserJSONProcessor depending on what output\n format was chosen.\n " ]
Please provide a description of the function:def process_sparser_output(output_fname, output_fmt='json'): if output_fmt not in ['json', 'xml']: logger.error("Unrecognized output format '%s'." % output_fmt) return None sp = None with open(output_fname, 'rt') as fh: if output_fmt == 'json': json_dict = json.load(fh) sp = process_json_dict(json_dict) else: xml_str = fh.read() sp = process_xml(xml_str) return sp
[ "Return a processor with Statements extracted from Sparser XML or JSON\n\n Parameters\n ----------\n output_fname : str\n The path to the Sparser output file to be processed. The file can\n either be JSON or XML output from Sparser, with the output_fmt\n parameter defining what format is assumed to be processed.\n output_fmt : Optional[str]\n The format of the Sparser output to be processed, can either be\n 'json' or 'xml'. Default: 'json'\n\n Returns\n -------\n sp : SparserXMLProcessor or SparserJSONProcessor depending on what output\n format was chosen.\n " ]
Please provide a description of the function:def process_xml(xml_str): try: tree = ET.XML(xml_str, parser=UTB()) except ET.ParseError as e: logger.error('Could not parse XML string') logger.error(e) return None sp = _process_elementtree(tree) return sp
[ "Return processor with Statements extracted from a Sparser XML.\n\n Parameters\n ----------\n xml_str : str\n The XML string obtained by reading content with Sparser, using the\n 'xml' output mode.\n\n Returns\n -------\n sp : SparserXMLProcessor\n A SparserXMLProcessor which has extracted Statements as its\n statements attribute.\n " ]
Please provide a description of the function:def run_sparser(fname, output_fmt, outbuf=None, timeout=600): if not sparser_path or not os.path.exists(sparser_path): logger.error('Sparser executable not set in %s' % sparser_path_var) return None if output_fmt == 'xml': format_flag = '-x' suffix = '.xml' elif output_fmt == 'json': format_flag = '-j' suffix = '.json' else: logger.error('Unknown output format: %s' % output_fmt) return None sparser_exec_path = os.path.join(sparser_path, 'save-semantics.sh') output_path = fname.split('.')[0] + '-semantics' + suffix for fpath in [sparser_exec_path, fname]: if not os.path.exists(fpath): raise Exception("'%s' is not a valid path." % fpath) cmd_list = [sparser_exec_path, format_flag, fname] # This is mostly a copy of the code found in subprocess.run, with the # key change that proc.kill is replaced with os.killpg. This allows the # process to be killed even if it has children. Solution developed from: # https://stackoverflow.com/questions/36952245/subprocess-timeout-failure with sp.Popen(cmd_list, stdout=sp.PIPE) as proc: try: stdout, stderr = proc.communicate(timeout=timeout) except sp.TimeoutExpired: # Yes, this is about as bad as it looks. But it is the only way to # be sure the script actually dies. sp.check_call(['pkill', '-f', 'r3.core.*%s' % fname]) stdout, stderr = proc.communicate() raise sp.TimeoutExpired(proc.args, timeout, output=stdout, stderr=stderr) except BaseException: # See comment on above instance. sp.check_call(['pkill', '-f', fname]) proc.wait() raise retcode = proc.poll() if retcode: raise sp.CalledProcessError(retcode, proc.args, output=stdout, stderr=stderr) if outbuf is not None: outbuf.write(stdout) outbuf.flush() assert os.path.exists(output_path),\ 'No output file \"%s\" created by sparser.' % output_path return output_path
[ "Return the path to reading output after running Sparser reading.\n\n Parameters\n ----------\n fname : str\n The path to an input file to be processed. Due to the Spaser\n executable's assumptions, the file name needs to start with PMC\n and should be an NXML formatted file.\n output_fmt : Optional[str]\n The format in which Sparser should produce its output, can either be\n 'json' or 'xml'.\n outbuf : Optional[file]\n A file like object that the Sparser output is written to.\n timeout : int\n The number of seconds to wait until giving up on this one reading. The\n default is 600 seconds (i.e. 10 minutes). Sparcer is a fast reader and\n the typical type to read a single full text is a matter of seconds.\n\n Returns\n -------\n output_path : str\n The path to the output file created by Sparser.\n " ]
Please provide a description of the function:def get_version(): assert sparser_path is not None, "Sparser path is not defined." with open(os.path.join(sparser_path, 'version.txt'), 'r') as f: version = f.read().strip() return version
[ "Return the version of the Sparser executable on the path.\n\n Returns\n -------\n version : str\n The version of Sparser that is found on the Sparser path.\n " ]
Please provide a description of the function:def make_nxml_from_text(text): text = _escape_xml(text) header = '<?xml version="1.0" encoding="UTF-8" ?>' + \ '<OAI-PMH><article><body><sec id="s1"><p>' footer = '</p></sec></body></article></OAI-PMH>' nxml_str = header + text + footer return nxml_str
[ "Return raw text wrapped in NXML structure.\n\n Parameters\n ----------\n text : str\n The raw text content to be wrapped in an NXML structure.\n\n Returns\n -------\n nxml_str : str\n The NXML string wrapping the raw text input.\n " ]
Please provide a description of the function:def get_hgnc_name(hgnc_id): try: hgnc_name = hgnc_names[hgnc_id] except KeyError: xml_tree = get_hgnc_entry(hgnc_id) if xml_tree is None: return None hgnc_name_tag =\ xml_tree.find("result/doc/str[@name='symbol']") if hgnc_name_tag is None: return None hgnc_name = hgnc_name_tag.text.strip() return hgnc_name
[ "Return the HGNC symbol corresponding to the given HGNC ID.\n\n Parameters\n ----------\n hgnc_id : str\n The HGNC ID to be converted.\n\n Returns\n -------\n hgnc_name : str\n The HGNC symbol corresponding to the given HGNC ID.\n " ]
Please provide a description of the function:def get_current_hgnc_id(hgnc_name): hgnc_id = get_hgnc_id(hgnc_name) if hgnc_id: return hgnc_id hgnc_id = prev_sym_map.get(hgnc_name) return hgnc_id
[ "Return the HGNC ID(s) corresponding to a current or outdate HGNC symbol.\n\n Parameters\n ----------\n hgnc_name : str\n The HGNC symbol to be converted, possibly an outdated symbol.\n\n Returns\n -------\n str or list of str or None\n If there is a single HGNC ID corresponding to the given current or\n outdated HGNC symbol, that ID is returned as a string. If the symbol\n is outdated and maps to multiple current IDs, a list of these\n IDs is returned. If the given name doesn't correspond to either\n a current or an outdated HGNC symbol, None is returned.\n " ]
Please provide a description of the function:def get_hgnc_entry(hgnc_id): url = hgnc_url + 'hgnc_id/%s' % hgnc_id headers = {'Accept': '*/*'} res = requests.get(url, headers=headers) if not res.status_code == 200: return None xml_tree = ET.XML(res.content, parser=UTB()) return xml_tree
[ "Return the HGNC entry for the given HGNC ID from the web service.\n\n Parameters\n ----------\n hgnc_id : str\n The HGNC ID to be converted.\n\n Returns\n -------\n xml_tree : ElementTree\n The XML ElementTree corresponding to the entry for the\n given HGNC ID.\n " ]
Please provide a description of the function:def analyze_reach_log(log_fname=None, log_str=None): assert bool(log_fname) ^ bool(log_str), 'Must specify log_fname OR log_str' started_patt = re.compile('Starting ([\d]+)') # TODO: it might be interesting to get the time it took to read # each paper here finished_patt = re.compile('Finished ([\d]+)') def get_content_nums(txt): pat = 'Retrieved content for ([\d]+) / ([\d]+) papers to be read' res = re.match(pat, txt) has_content, total = res.groups() if res else None, None return has_content, total if log_fname: with open(log_fname, 'r') as fh: log_str = fh.read() # has_content, total = get_content_nums(log_str) # unused pmids = {} pmids['started'] = started_patt.findall(log_str) pmids['finished'] = finished_patt.findall(log_str) pmids['not_done'] = set(pmids['started']) - set(pmids['finished']) return pmids
[ "Return unifinished PMIDs given a log file name." ]
Please provide a description of the function:def get_logs_from_db_reading(job_prefix, reading_queue='run_db_reading_queue'): s3 = boto3.client('s3') gen_prefix = 'reading_results/%s/logs/%s' % (job_prefix, reading_queue) job_log_data = s3.list_objects_v2(Bucket='bigmech', Prefix=join(gen_prefix, job_prefix)) # TODO: Track success/failure log_strs = [] for fdict in job_log_data['Contents']: resp = s3.get_object(Bucket='bigmech', Key=fdict['Key']) log_strs.append(resp['Body'].read().decode('utf-8')) return log_strs
[ "Get the logs stashed on s3 for a particular reading." ]
Please provide a description of the function:def separate_reach_logs(log_str): log_lines = log_str.splitlines() reach_logs = [] reach_lines = [] adding_reach_lines = False for l in log_lines[:]: if not adding_reach_lines and 'Beginning reach' in l: adding_reach_lines = True elif adding_reach_lines and 'Reach finished' in l: adding_reach_lines = False reach_logs.append(('SUCCEEDED', '\n'.join(reach_lines))) reach_lines = [] elif adding_reach_lines: reach_lines.append(l.split('readers - ')[1]) log_lines.remove(l) if adding_reach_lines: reach_logs.append(('FAILURE', '\n'.join(reach_lines))) return '\n'.join(log_lines), reach_logs
[ "Get the list of reach logs from the overall logs." ]
Please provide a description of the function:def get_unyielding_tcids(log_str): tcid_strs = re.findall('INFO: \[.*?\].*? - Got no statements for (\d+).*', log_str) return {int(tcid_str) for tcid_str in tcid_strs}
[ "Extract the set of tcids for which no statements were created." ]
Please provide a description of the function:def analyze_db_reading(job_prefix, reading_queue='run_db_reading_queue'): # Analyze reach failures log_strs = get_logs_from_db_reading(job_prefix, reading_queue) indra_log_strs = [] all_reach_logs = [] log_stats = [] for log_str in log_strs: log_str, reach_logs = separate_reach_logs(log_str) all_reach_logs.extend(reach_logs) indra_log_strs.append(log_str) log_stats.append(get_reading_stats(log_str)) # Analayze the reach failures. failed_reach_logs = [reach_log_str for result, reach_log_str in all_reach_logs if result == 'FAILURE'] failed_id_dicts = [analyze_reach_log(log_str=reach_log) for reach_log in failed_reach_logs if bool(reach_log)] tcids_unfinished = {id_dict['not_done'] for id_dict in failed_id_dicts} print("Found %d unfinished tcids." % len(tcids_unfinished)) # Summarize the global stats if log_stats: sum_dict = dict.fromkeys(log_stats[0].keys()) for log_stat in log_stats: for k in log_stat.keys(): if isinstance(log_stat[k], list): if k not in sum_dict.keys(): sum_dict[k] = [0]*len(log_stat[k]) sum_dict[k] = [sum_dict[k][i] + log_stat[k][i] for i in range(len(log_stat[k]))] else: if k not in sum_dict.keys(): sum_dict[k] = 0 sum_dict[k] += log_stat[k] else: sum_dict = {} return tcids_unfinished, sum_dict, log_stats
[ "Run various analysis on a particular reading job." ]
Please provide a description of the function:def process_pc_neighborhood(gene_names, neighbor_limit=1, database_filter=None): model = pcc.graph_query('neighborhood', gene_names, neighbor_limit=neighbor_limit, database_filter=database_filter) if model is not None: return process_model(model)
[ "Returns a BiopaxProcessor for a PathwayCommons neighborhood query.\n\n The neighborhood query finds the neighborhood around a set of source genes.\n\n http://www.pathwaycommons.org/pc2/#graph\n\n http://www.pathwaycommons.org/pc2/#graph_kind\n\n Parameters\n ----------\n gene_names : list\n A list of HGNC gene symbols to search the neighborhood of.\n Examples: ['BRAF'], ['BRAF', 'MAP2K1']\n neighbor_limit : Optional[int]\n The number of steps to limit the size of the neighborhood around\n the gene names being queried. Default: 1\n database_filter : Optional[list]\n A list of database identifiers to which the query is restricted.\n Examples: ['reactome'], ['biogrid', 'pid', 'psp']\n If not given, all databases are used in the query. For a full\n list of databases see http://www.pathwaycommons.org/pc2/datasources\n\n Returns\n -------\n bp : BiopaxProcessor\n A BiopaxProcessor containing the obtained BioPAX model in bp.model.\n " ]
Please provide a description of the function:def process_pc_pathsbetween(gene_names, neighbor_limit=1, database_filter=None, block_size=None): if not block_size: model = pcc.graph_query('pathsbetween', gene_names, neighbor_limit=neighbor_limit, database_filter=database_filter) if model is not None: return process_model(model) else: gene_blocks = [gene_names[i:i + block_size] for i in range(0, len(gene_names), block_size)] stmts = [] # Run pathsfromto between pairs of blocks and pathsbetween # within each block. This breaks up a single call with N genes into # (N/block_size)*(N/blocksize) calls with block_size genes for genes1, genes2 in itertools.product(gene_blocks, repeat=2): if genes1 == genes2: bp = process_pc_pathsbetween(genes1, database_filter=database_filter, block_size=None) else: bp = process_pc_pathsfromto(genes1, genes2, database_filter=database_filter) stmts += bp.statements
[ "Returns a BiopaxProcessor for a PathwayCommons paths-between query.\n\n The paths-between query finds the paths between a set of genes. Here\n source gene names are given in a single list and all directions of paths\n between these genes are considered.\n\n http://www.pathwaycommons.org/pc2/#graph\n\n http://www.pathwaycommons.org/pc2/#graph_kind\n\n Parameters\n ----------\n gene_names : list\n A list of HGNC gene symbols to search for paths between.\n Examples: ['BRAF', 'MAP2K1']\n neighbor_limit : Optional[int]\n The number of steps to limit the length of the paths between\n the gene names being queried. Default: 1\n database_filter : Optional[list]\n A list of database identifiers to which the query is restricted.\n Examples: ['reactome'], ['biogrid', 'pid', 'psp']\n If not given, all databases are used in the query. For a full\n list of databases see http://www.pathwaycommons.org/pc2/datasources\n block_size : Optional[int]\n Large paths-between queries (above ~60 genes) can error on the server\n side. In this case, the query can be replaced by a series of\n smaller paths-between and paths-from-to queries each of which contains\n block_size genes.\n\n Returns\n -------\n bp : BiopaxProcessor\n A BiopaxProcessor containing the obtained BioPAX model in bp.model.\n " ]
Please provide a description of the function:def process_pc_pathsfromto(source_genes, target_genes, neighbor_limit=1, database_filter=None): model = pcc.graph_query('pathsfromto', source_genes, target_genes, neighbor_limit=neighbor_limit, database_filter=database_filter) if model is not None: return process_model(model)
[ "Returns a BiopaxProcessor for a PathwayCommons paths-from-to query.\n\n The paths-from-to query finds the paths from a set of source genes to\n a set of target genes.\n\n http://www.pathwaycommons.org/pc2/#graph\n\n http://www.pathwaycommons.org/pc2/#graph_kind\n\n Parameters\n ----------\n source_genes : list\n A list of HGNC gene symbols that are the sources of paths being\n searched for.\n Examples: ['BRAF', 'RAF1', 'ARAF']\n target_genes : list\n A list of HGNC gene symbols that are the targets of paths being\n searched for.\n Examples: ['MAP2K1', 'MAP2K2']\n neighbor_limit : Optional[int]\n The number of steps to limit the length of the paths\n between the source genes and target genes being queried. Default: 1\n database_filter : Optional[list]\n A list of database identifiers to which the query is restricted.\n Examples: ['reactome'], ['biogrid', 'pid', 'psp']\n If not given, all databases are used in the query. For a full\n list of databases see http://www.pathwaycommons.org/pc2/datasources\n\n Returns\n -------\n bp : BiopaxProcessor\n A BiopaxProcessor containing the obtained BioPAX model in bp.model.\n " ]
Please provide a description of the function:def process_model(model): bp = BiopaxProcessor(model) bp.get_modifications() bp.get_regulate_activities() bp.get_regulate_amounts() bp.get_activity_modification() bp.get_gef() bp.get_gap() bp.get_conversions() # bp.get_complexes() bp.eliminate_exact_duplicates() return bp
[ "Returns a BiopaxProcessor for a BioPAX model object.\n\n Parameters\n ----------\n model : org.biopax.paxtools.model.Model\n A BioPAX model object.\n\n Returns\n -------\n bp : BiopaxProcessor\n A BiopaxProcessor containing the obtained BioPAX model in bp.model.\n " ]
Please provide a description of the function:def is_protein_or_chemical(agent): '''Return True if the agent is a protein/protein family or chemical.''' # Default is True if agent is None if agent is None: return True dbs = set(['UP', 'HGNC', 'CHEBI', 'PFAM-DEF', 'IP', 'INDRA', 'PUBCHEM', 'CHEMBL']) agent_refs = set(agent.db_refs.keys()) if agent_refs.intersection(dbs): return True return False
[]
Please provide a description of the function:def is_background_knowledge(stmt): '''Return True if Statement is only supported by background knowledge.''' any_background = False # Iterate over all evidence for the statement for ev in stmt.evidence: epi = ev.epistemics if epi is not None: sec = epi.get('section_type') # If there is at least one evidence not from a # background section then we consider this to be # a non-background knowledge finding. if sec is not None and sec not in background_secs: return False # If there is at least one evidence that is explicitly # from a background section then we keep track of that. elif sec in background_secs: any_background = True # If there is any explicit evidence for this statement being # background info (and no evidence otherwise) then we return # True, otherwise (for instnace of there is no section info at all) # we return False. return any_background
[]
Please provide a description of the function:def multiple_sources(stmt): '''Return True if statement is supported by multiple sources. Note: this is currently not used and replaced by BeliefEngine score cutoff ''' sources = list(set([e.source_api for e in stmt.evidence])) if len(sources) > 1: return True return False
[]
Please provide a description of the function:def run_assembly(stmts, folder, pmcid, background_assertions=None): '''Run assembly on a list of statements, for a given PMCID.''' # Folder for index card output (scored submission) indexcard_prefix = folder + '/index_cards/' + pmcid # Folder for other outputs (for analysis, debugging) otherout_prefix = folder + '/other_outputs/' + pmcid # Do grounding mapping here # Load the TRIPS-specific grounding map and add to the default # (REACH-oriented) grounding map: trips_gm = load_grounding_map('trips_grounding_map.csv') default_grounding_map.update(trips_gm) gm = GroundingMapper(default_grounding_map) mapped_agent_stmts = gm.map_agents(stmts) renamed_agent_stmts = gm.rename_agents(mapped_agent_stmts) # Filter for grounding grounded_stmts = [] for st in renamed_agent_stmts: if all([is_protein_or_chemical(a) for a in st.agent_list()]): grounded_stmts.append(st) # Instantiate the Preassembler pa = Preassembler(hierarchies) pa.add_statements(grounded_stmts) print('== %s ====================' % pmcid) print('%d statements collected in total.' % len(pa.stmts)) # Combine duplicates unique_stmts = pa.combine_duplicates() print('%d statements after combining duplicates.' % len(unique_stmts)) # Run BeliefEngine on unique statements epe = BeliefEngine() epe.set_prior_probs(pa.unique_stmts) # Build statement hierarchy related_stmts = pa.combine_related() # Run BeliefEngine on hierarchy epe.set_hierarchy_probs(related_stmts) print('%d statements after combining related.' % len(related_stmts)) # Instantiate the mechanism linker # Link statements linked_stmts = MechLinker.infer_active_forms(related_stmts) linked_stmts += MechLinker.infer_modifications(related_stmts) linked_stmts += MechLinker.infer_activations(related_stmts) # Run BeliefEngine on linked statements epe.set_linked_probs(linked_stmts) # Print linked statements for debugging purposes print('Linked\n=====') for ls in linked_stmts: print(ls.inferred_stmt.belief, ls.inferred_stmt) print('=============') # Combine all statements including linked ones all_statements = related_stmts + [ls.inferred_stmt for ls in linked_stmts] # Instantiate a new preassembler pa = Preassembler(hierarchies, all_statements) # Build hierarchy again pa.combine_duplicates() # Choose the top-level statements related_stmts = pa.combine_related() # Remove top-level statements that came only from the prior if background_assertions is not None: nonbg_stmts = [stmt for stmt in related_stmts if stmt not in background_assertions] else: nonbg_stmts = related_stmts # Dump top-level statements in a pickle with open(otherout_prefix + '.pkl', 'wb') as fh: pickle.dump(nonbg_stmts, fh) # Flatten evidence for statements flattened_evidence_stmts = flatten_evidence(nonbg_stmts) # Start a card counter card_counter = 1 # We don't limit the number of cards reported in this round card_lim = float('inf') top_stmts = [] ############################################### # The belief cutoff for statements belief_cutoff = 0.3 ############################################### # Sort by amount of evidence for st in sorted(flattened_evidence_stmts, key=lambda x: x.belief, reverse=True): if st.belief >= belief_cutoff: print(st.belief, st) if st.belief < belief_cutoff: print('SKIP', st.belief, st) # If it's background knowledge, we skip the statement if is_background_knowledge(st): print('This statement is background knowledge - skipping.') continue # Assemble IndexCards ia = IndexCardAssembler([st], pmc_override=pmcid) ia.make_model() # If the index card was actually made # (not all statements can be assembled into index cards to # this is often not the case) if ia.cards: # Save the index card json ia.save_model(indexcard_prefix + '-%d.json' % card_counter) card_counter += 1 top_stmts.append(st) if card_counter > card_lim: break # Print the English-assembled model for debugging purposes ea = EnglishAssembler(top_stmts) print('=======================') print(ea.make_model().encode('utf-8')) print('=======================') # Print the statement graph graph = render_stmt_graph(nonbg_stmts) graph.draw(otherout_prefix + '_graph.pdf', prog='dot') # Print statement diagnostics print_stmts(pa.stmts, otherout_prefix + '_statements.tsv') print_stmts(related_stmts, otherout_prefix + '_related_statements.tsv')
[]
Please provide a description of the function:def symbol_to_id(self, symbol): if symbol not in self.symbols_to_ids: m = 'Could not look up Entrez ID for Geneways symbol ' + symbol raise Exception(m) return self.symbols_to_ids[symbol]
[ "Returns the list of Entrez IDs for a given Geneways symbol\n (there may be more than one)" ]
Please provide a description of the function:def id_to_symbol(self, entrez_id): entrez_id = str(entrez_id) if entrez_id not in self.ids_to_symbols: m = 'Could not look up symbol for Entrez ID ' + entrez_id raise Exception(m) return self.ids_to_symbols[entrez_id]
[ "Gives the symbol for a given entrez id)" ]
Please provide a description of the function:def _format_id(ns, id): label = '%s:%s' % (ns, id) label = label.replace(' ', '_') url = get_identifiers_url(ns, id) return (label, url)
[ "Format a namespace/ID pair for display and curation." ]
Please provide a description of the function:def make_model(self, output_file, add_curation_cols=False, up_only=False): stmt_header = ['INDEX', 'UUID', 'TYPE', 'STR', 'AG_A_TEXT', 'AG_A_LINKS', 'AG_A_STR', 'AG_B_TEXT', 'AG_B_LINKS', 'AG_B_STR', 'PMID', 'TEXT', 'IS_HYP', 'IS_DIRECT'] if add_curation_cols: stmt_header = stmt_header + \ ['AG_A_IDS_CORRECT', 'AG_A_STATE_CORRECT', 'AG_B_IDS_CORRECT', 'AG_B_STATE_CORRECT', 'EVENT_CORRECT', 'RES_CORRECT', 'POS_CORRECT', 'SUBJ_ACT_CORRECT', 'OBJ_ACT_CORRECT', 'HYP_CORRECT', 'DIRECT_CORRECT'] rows = [stmt_header] for ix, stmt in enumerate(self.statements): # Complexes if len(stmt.agent_list()) > 2: logger.info("Skipping statement with more than two members: %s" % stmt) continue # Self-modifications, ActiveForms elif len(stmt.agent_list()) == 1: ag_a = stmt.agent_list()[0] ag_b = None # All others else: (ag_a, ag_b) = stmt.agent_list() # Put together the data row row = [ix+1, stmt.uuid, stmt.__class__.__name__, str(stmt)] + \ _format_agent_entries(ag_a, up_only) + \ _format_agent_entries(ag_b, up_only) + \ [stmt.evidence[0].pmid, stmt.evidence[0].text, stmt.evidence[0].epistemics.get('hypothesis', ''), stmt.evidence[0].epistemics.get('direct', '')] if add_curation_cols: row = row + ([''] * 11) rows.append(row) # Write to file write_unicode_csv(output_file, rows, delimiter='\t')
[ "Export the statements into a tab-separated text file.\n\n Parameters\n ----------\n output_file : str\n Name of the output file.\n add_curation_cols : bool\n Whether to add columns to facilitate statement curation. Default\n is False (no additional columns).\n up_only : bool\n Whether to include identifiers.org links *only* for the Uniprot\n grounding of an agent when one is available. Because most\n spreadsheets allow only a single hyperlink per cell, this can makes\n it easier to link to Uniprot information pages for curation\n purposes. Default is False.\n " ]
Please provide a description of the function:def get_create_base_agent(self, agent): try: base_agent = self.agents[_n(agent.name)] except KeyError: base_agent = BaseAgent(_n(agent.name)) self.agents[_n(agent.name)] = base_agent # If it's a molecular agent if isinstance(agent, Agent): # Handle bound conditions for bc in agent.bound_conditions: bound_base_agent = self.get_create_base_agent(bc.agent) bound_base_agent.create_site(get_binding_site_name(agent)) base_agent.create_site(get_binding_site_name(bc.agent)) # Handle modification conditions for mc in agent.mods: base_agent.create_mod_site(mc) # Handle mutation conditions for mc in agent.mutations: res_from = mc.residue_from if mc.residue_from else 'mut' res_to = mc.residue_to if mc.residue_to else 'X' if mc.position is None: mut_site_name = res_from else: mut_site_name = res_from + mc.position base_agent.create_site(mut_site_name, states=['WT', res_to]) # Handle location condition if agent.location is not None: base_agent.create_site('loc', [_n(agent.location)]) # Handle activity if agent.activity is not None: site_name = agent.activity.activity_type base_agent.create_site(site_name, ['inactive', 'active']) # There might be overwrites here for db_name, db_ref in agent.db_refs.items(): base_agent.db_refs[db_name] = db_ref return base_agent
[ "Return base agent with given name, creating it if needed." ]
Please provide a description of the function:def create_site(self, site, states=None): if site not in self.sites: self.sites.append(site) if states is not None: self.site_states.setdefault(site, []) try: states = list(states) except TypeError: return self.add_site_states(site, states)
[ "Create a new site on an agent if it doesn't already exist." ]
Please provide a description of the function:def create_mod_site(self, mc): site_name = get_mod_site_name(mc) (unmod_site_state, mod_site_state) = states[mc.mod_type] self.create_site(site_name, (unmod_site_state, mod_site_state)) site_anns = [Annotation((site_name, mod_site_state), mc.mod_type, 'is_modification')] if mc.residue: site_anns.append(Annotation(site_name, mc.residue, 'is_residue')) if mc.position: site_anns.append(Annotation(site_name, mc.position, 'is_position')) self.site_annotations += site_anns
[ "Create modification site for the BaseAgent from a ModCondition." ]
Please provide a description of the function:def add_site_states(self, site, states): for state in states: if state not in self.site_states[site]: self.site_states[site].append(state)
[ "Create new states on an agent site if the state doesn't exist." ]
Please provide a description of the function:def add_activity_form(self, activity_pattern, is_active): if is_active: if activity_pattern not in self.active_forms: self.active_forms.append(activity_pattern) else: if activity_pattern not in self.inactive_forms: self.inactive_forms.append(activity_pattern)
[ "Adds the pattern as an active or inactive form to an Agent.\n\n Parameters\n ----------\n activity_pattern : dict\n A dictionary of site names and their states.\n is_active : bool\n Is True if the given pattern corresponds to an active state.\n " ]
Please provide a description of the function:def add_activity_type(self, activity_type): if activity_type not in self.activity_types: self.activity_types.append(activity_type)
[ "Adds an activity type to an Agent.\n\n Parameters\n ----------\n activity_type : str\n The type of activity to add such as 'activity', 'kinase',\n 'gtpbound'\n " ]
Please provide a description of the function:def make_annotation(self): annotation = dict() # Put all properties of the action object into the annotation for item in dir(self): if len(item) > 0 and item[0] != '_' and \ not inspect.ismethod(getattr(self, item)): annotation[item] = getattr(self, item) # Add properties of each action mention annotation['action_mentions'] = list() for action_mention in self.action_mentions: annotation_mention = action_mention.make_annotation() annotation['action_mentions'].append(annotation_mention) return annotation
[ "Returns a dictionary with all properties of the action\n and each of its action mentions." ]
Please provide a description of the function:def _search_path(self, directory_name, filename): full_path = path.join(directory_name, filename) if path.exists(full_path): return full_path # Could not find the requested file in any of the directories return None
[ "Searches for a given file in the specified directory." ]
Please provide a description of the function:def _init_action_list(self, action_filename): self.actions = list() self.hiid_to_action_index = dict() f = codecs.open(action_filename, 'r', encoding='latin-1') first_line = True for line in f: line = line.rstrip() if first_line: # Ignore the first line first_line = False else: self.actions.append(GenewaysAction(line)) latestInd = len(self.actions)-1 hiid = self.actions[latestInd].hiid if hiid in self.hiid_to_action_index: raise Exception('action hiid not unique: %d' % hiid) self.hiid_to_action_index[hiid] = latestInd
[ "Parses the file and populates the data." ]
Please provide a description of the function:def _link_to_action_mentions(self, actionmention_filename): parser = GenewaysActionMentionParser(actionmention_filename) self.action_mentions = parser.action_mentions for action_mention in self.action_mentions: hiid = action_mention.hiid if hiid not in self.hiid_to_action_index: m1 = 'Parsed action mention has hiid %d, which does not exist' m2 = ' in table of action hiids' raise Exception((m1 + m2) % hiid) else: idx = self.hiid_to_action_index[hiid] self.actions[idx].action_mentions.append(action_mention)
[ "Add action mentions" ]
Please provide a description of the function:def _lookup_symbols(self, symbols_filename): symbol_lookup = GenewaysSymbols(symbols_filename) for action in self.actions: action.up_symbol = symbol_lookup.id_to_symbol(action.up) action.dn_symbol = symbol_lookup.id_to_symbol(action.dn)
[ "Look up symbols for actions and action mentions" ]
Please provide a description of the function:def get_top_n_action_types(self, top_n): # Count action types action_type_to_counts = dict() for action in self.actions: actiontype = action.actiontype if actiontype not in action_type_to_counts: action_type_to_counts[actiontype] = 1 else: action_type_to_counts[actiontype] = \ action_type_to_counts[actiontype] + 1 # Convert the dictionary representation into a pair of lists action_types = list() counts = list() for actiontype in action_type_to_counts.keys(): action_types.append(actiontype) counts.append(action_type_to_counts[actiontype]) # How many actions in total? num_actions = len(self.actions) num_actions2 = 0 for count in counts: num_actions2 = num_actions2 + count if num_actions != num_actions2: raise(Exception('Problem counting everything up!')) # Sort action types by count (lowest to highest) sorted_inds = np.argsort(counts) last_ind = len(sorted_inds)-1 # Return the top N actions top_actions = list() if top_n > len(sorted_inds): raise Exception('Asked for top %d action types, ' + 'but there are only %d action types' % (top_n, len(sorted_inds))) for i in range(top_n): top_actions.append(action_types[sorted_inds[last_ind-i]]) return top_actions
[ "Returns the top N actions by count." ]
Please provide a description of the function:def make_model(self): # Assemble in two stages. # First, create the nodes of the graph for stmt in self.statements: # Skip SelfModification (self loops) -- has one node if isinstance(stmt, SelfModification) or \ isinstance(stmt, Translocation) or \ isinstance(stmt, ActiveForm): continue # Special handling for Associations -- more than 1 node and members # are Events elif isinstance(stmt, Association): for m in stmt.members: self._add_node(m.concept) # Special handling for Complexes -- more than 1 node elif isinstance(stmt, Complex): for m in stmt.members: self._add_node(m) # All else should have exactly 2 nodes elif all([ag is not None for ag in stmt.agent_list()]): if not len(stmt.agent_list()) == 2: logger.warning( '%s has less/more than the expected 2 agents.' % stmt) continue for ag in stmt.agent_list(): self._add_node(ag) # Second, create the edges of the graph for stmt in self.statements: # Skip SelfModification (self loops) -- has one node if isinstance(stmt, SelfModification) or \ isinstance(stmt, Translocation) or \ isinstance(stmt, ActiveForm): continue elif isinstance(stmt, Association): self._add_complex(stmt.members, is_association=True) elif isinstance(stmt, Complex): self._add_complex(stmt.members) elif all([ag is not None for ag in stmt.agent_list()]): self._add_stmt_edge(stmt)
[ "Assemble the graph from the assembler's list of INDRA Statements." ]
Please provide a description of the function:def get_string(self): graph_string = self.graph.to_string() graph_string = graph_string.replace('\\N', '\\n') return graph_string
[ "Return the assembled graph as a string.\n\n Returns\n -------\n graph_string : str\n The assembled graph as a string.\n " ]