Code
stringlengths 103
85.9k
| Summary
listlengths 0
94
|
---|---|
Please provide a description of the function:def join_json_files(prefix):
try:
with open(prefix + '.uaz.entities.json', 'rt') as f:
entities = json.load(f)
with open(prefix + '.uaz.events.json', 'rt') as f:
events = json.load(f)
with open(prefix + '.uaz.sentences.json', 'rt') as f:
sentences = json.load(f)
except IOError as e:
logger.error(
'Failed to open JSON files for %s; REACH error?' % prefix
)
logger.exception(e)
return None
return {'events': events, 'entities': entities, 'sentences': sentences} | [
"Join different REACH output JSON files into a single JSON object.\n\n The output of REACH is broken into three files that need to be joined\n before processing. Specifically, there will be three files of the form:\n `<prefix>.uaz.<subcategory>.json`.\n\n Parameters\n ----------\n prefix : str\n The absolute path up to the extensions that reach will add.\n\n Returns\n -------\n json_obj : dict\n The result of joining the files, keyed by the three subcategories.\n "
]
|
Please provide a description of the function:def read_pmid(pmid, source, cont_path, sparser_version, outbuf=None,
cleanup=True):
"Run sparser on a single pmid."
signal.signal(signal.SIGALRM, _timeout_handler)
signal.alarm(60)
try:
if (source is 'content_not_found'
or source.startswith('unhandled_content_type')
or source.endswith('failure')):
logger.info('No content read for %s.' % pmid)
return # No real content here.
if cont_path.endswith('.nxml') and source.startswith('pmc'):
new_fname = 'PMC%s%d.nxml' % (pmid, mp.current_process().pid)
os.rename(cont_path, new_fname)
try:
sp = sparser.process_nxml_file(
new_fname,
outbuf=outbuf,
cleanup=cleanup
)
finally:
if cleanup and os.path.exists(new_fname):
os.remove(new_fname)
elif cont_path.endswith('.txt'):
content_str = ''
with open(cont_path, 'r') as f:
content_str = f.read()
sp = sparser.process_text(
content_str,
outbuf=outbuf,
cleanup=cleanup
)
signal.alarm(0)
except Exception as e:
logger.error('Failed to process data for %s.' % pmid)
logger.exception(e)
signal.alarm(0)
return
if sp is None:
logger.error('Failed to run sparser on pmid: %s.' % pmid)
return
# At this point, we rewrite the PMID in the Evidence of Sparser
# Statements according to the actual PMID that was read.
sp.set_statements_pmid(pmid)
s3_client.put_reader_output('sparser', sp.json_stmts, pmid,
sparser_version, source)
return sp.statements | []
|
Please provide a description of the function:def get_stmts(pmids_unread, cleanup=True, sparser_version=None):
"Run sparser on the pmids in pmids_unread."
if sparser_version is None:
sparser_version = sparser.get_version()
stmts = {}
now = datetime.now()
outbuf_fname = 'sparser_%s_%s.log' % (
now.strftime('%Y%m%d-%H%M%S'),
mp.current_process().pid,
)
outbuf = open(outbuf_fname, 'wb')
try:
for pmid, result in pmids_unread.items():
logger.info('Reading %s' % pmid)
source = result['content_source']
cont_path = result['content_path']
outbuf.write(('\nReading pmid %s from %s located at %s.\n' % (
pmid,
source,
cont_path
)).encode('utf-8'))
outbuf.flush()
some_stmts = read_pmid(pmid, source, cont_path, sparser_version,
outbuf, cleanup)
if some_stmts is not None:
stmts[pmid] = some_stmts
else:
continue # We didn't get any new statements.
except KeyboardInterrupt as e:
logger.exception(e)
logger.info('Caught keyboard interrupt...stopping. \n'
'Results so far will be pickled unless '
'Keyboard interupt is hit again.')
finally:
outbuf.close()
print("Sparser logs may be found in %s" % outbuf_fname)
return stmts | []
|
Please provide a description of the function:def run_sparser(pmid_list, tmp_dir, num_cores, start_index, end_index,
force_read, force_fulltext, cleanup=True, verbose=True):
'Run the sparser reader on the pmids in pmid_list.'
reader_version = sparser.get_version()
_, _, _, pmids_read, pmids_unread, _ =\
get_content_to_read(
pmid_list, start_index, end_index, tmp_dir, num_cores,
force_fulltext, force_read, 'sparser', reader_version
)
logger.info('Adjusting num cores to length of pmid_list.')
num_cores = min(len(pmid_list), num_cores)
logger.info('Adjusted...')
if num_cores is 1:
stmts = get_stmts(pmids_unread, cleanup=cleanup)
stmts.update({pmid: get_stmts_from_cache(pmid)[pmid]
for pmid in pmids_read.keys()})
elif num_cores > 1:
logger.info("Starting a pool with %d cores." % num_cores)
pool = mp.Pool(num_cores)
pmids_to_read = list(pmids_unread.keys())
N = len(pmids_unread)
dn = int(N/num_cores)
logger.info("Breaking pmids into batches.")
batches = []
for i in range(num_cores):
batches.append({
k: pmids_unread[k]
for k in pmids_to_read[i*dn:min((i+1)*dn, N)]
})
get_stmts_func = functools.partial(
get_stmts,
cleanup=cleanup,
sparser_version=reader_version
)
logger.info("Mapping get_stmts onto pool.")
unread_res = pool.map(get_stmts_func, batches)
logger.info('len(unread_res)=%d' % len(unread_res))
read_res = pool.map(get_stmts_from_cache, pmids_read.keys())
logger.info('len(read_res)=%d' % len(read_res))
pool.close()
logger.info('Multiprocessing pool closed.')
pool.join()
logger.info('Multiprocessing pool joined.')
stmts = {
pmid: stmt_list for res_dict in unread_res + read_res
for pmid, stmt_list in res_dict.items()
}
logger.info('len(stmts)=%d' % len(stmts))
return (stmts, pmids_unread) | []
|
Please provide a description of the function:def upload_process_reach_files(output_dir, pmid_info_dict, reader_version,
num_cores):
# At this point, we have a directory full of JSON files
# Collect all the prefixes into a set, then iterate over the prefixes
# Collect prefixes
json_files = glob.glob(os.path.join(output_dir, '*.json'))
json_prefixes = set([])
for json_file in json_files:
filename = os.path.basename(json_file)
prefix = filename.split('.')[0]
json_prefixes.add(prefix)
# Make a list with PMID and source_text info
logger.info("Uploading reading results for reach.")
pmid_json_tuples = []
for json_prefix in json_prefixes:
try:
full_json = upload_reach_readings(
json_prefix,
pmid_info_dict[json_prefix].get('content_source'),
reader_version,
output_dir
)
pmid_json_tuples.append((json_prefix, full_json))
except Exception as e:
logger.error("Caught an exception while trying to upload reach "
"reading results onto s3 for %s." % json_prefix)
logger.exception(e)
# Create a multiprocessing pool
logger.info('Creating a multiprocessing pool with %d cores' % num_cores)
# Get a multiprocessing pool.
pool = mp.Pool(num_cores)
logger.info('Processing local REACH JSON files')
res = pool.map(upload_process_pmid, pmid_json_tuples)
stmts_by_pmid = {
pmid: stmts for res_dict in res for pmid, stmts in res_dict.items()
}
pool.close()
logger.info('Multiprocessing pool closed.')
pool.join()
logger.info('Multiprocessing pool joined.')
return stmts_by_pmid | [
"\n logger.info('Uploaded REACH JSON for %d files to S3 (%d failures)' %\n (num_uploaded, num_failures))\n failures_file = os.path.join(output_dir, 'failures.txt')\n with open(failures_file, 'wt') as f:\n for fail in failures:\n f.write('%s\\n' % fail)\n "
]
|
Please provide a description of the function:def run_reach(pmid_list, base_dir, num_cores, start_index, end_index,
force_read, force_fulltext, cleanup=False, verbose=True):
logger.info('Running REACH with force_read=%s' % force_read)
logger.info('Running REACH with force_fulltext=%s' % force_fulltext)
# Get the path to the REACH JAR
path_to_reach = get_config('REACHPATH')
if path_to_reach is None or not os.path.exists(path_to_reach):
logger.warning(
'Reach path not set or invalid. Check REACHPATH environment var.'
)
return {}, {}
logger.info('Using REACH jar at: %s' % path_to_reach)
# Get the REACH version
reach_version = get_config('REACH_VERSION')
if reach_version is None:
logger.info('REACH version not set in REACH_VERSION')
m = re.match('reach-(.*?)\.jar', os.path.basename(path_to_reach))
reach_version = re.sub('-SNAP.*?$', '', m.groups()[0])
logger.info('Using REACH version: %s' % reach_version)
tmp_dir, _, output_dir, pmids_read, pmids_unread, num_found =\
get_content_to_read(
pmid_list, start_index, end_index, base_dir, num_cores,
force_fulltext, force_read, 'reach', reach_version
)
stmts = {}
mem_tot = get_mem_total()
if mem_tot is not None and mem_tot <= REACH_MEM + MEM_BUFFER:
logger.error(
"Too little memory to run reach. At least %s required." %
REACH_MEM + MEM_BUFFER
)
logger.info("REACH not run.")
elif len(pmids_unread) > 0 and num_found > 0:
# Create the REACH configuration file
with open(REACH_CONF_FMT_FNAME, 'r') as fmt_file:
conf_file_path = os.path.join(tmp_dir, 'indra.conf')
with open(conf_file_path, 'w') as conf_file:
conf_file.write(
fmt_file.read().format(tmp_dir=os.path.abspath(tmp_dir),
num_cores=num_cores,
loglevel='INFO')
)
# Run REACH!
logger.info("Beginning reach.")
args = ['java', '-Xmx24000m', '-Dconfig.file=%s' % conf_file_path,
'-jar', path_to_reach]
p = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if verbose:
for line in iter(p.stdout.readline, b''):
logger.info(line)
p_out, p_err = p.communicate()
if p.returncode:
logger.error('Problem running REACH:')
logger.error('Stdout: %s' % p_out.decode('utf-8'))
logger.error('Stderr: %s' % p_err.decode('utf-8'))
raise Exception('REACH crashed')
# Process JSON files from local file system, process to INDRA
# Statements and upload to S3
some_stmts = upload_process_reach_files(
output_dir,
pmids_unread,
reach_version,
num_cores
)
stmts.update(some_stmts)
# Delete the tmp directory if desired
if cleanup:
shutil.rmtree(tmp_dir)
# Create a new multiprocessing pool for processing the REACH JSON
# files previously cached on S3
logger.info('Creating multiprocessing pool with %d cpus' % num_cores)
pool = mp.Pool(num_cores)
# Download and process the JSON files on S3
logger.info('Processing REACH JSON from S3 in parallel')
res = pool.map(process_reach_from_s3, pmids_read.keys())
pool.close()
logger.info('Multiprocessing pool closed.')
pool.join()
logger.info('Multiprocessing pool joined.')
s3_stmts = {
pmid: stmt_list for res_dict in res
for pmid, stmt_list in res_dict.items()
}
stmts.update(s3_stmts)
# Save the list of PMIDs with no content found on S3/literature client
'''
content_not_found_file = os.path.join(tmp_dir, 'content_not_found.txt')
with open(content_not_found_file, 'wt') as f:
for c in content_not_found:
f.write('%s\n' % c)
'''
return stmts, pmids_unread | [
"Run reach on a list of pmids."
]
|
Please provide a description of the function:def get_all_descendants(parent):
children = parent.__subclasses__()
descendants = children[:]
for child in children:
descendants += get_all_descendants(child)
return descendants | [
"Get all the descendants of a parent class, recursively."
]
|
Please provide a description of the function:def get_type_hierarchy(s):
tp = type(s) if not isinstance(s, type) else s
p_list = [tp]
for p in tp.__bases__:
if p is not Statement:
p_list.extend(get_type_hierarchy(p))
else:
p_list.append(p)
return p_list | [
"Get the sequence of parents from `s` to Statement.\n\n Parameters\n ----------\n s : a class or instance of a child of Statement\n For example the statement `Phosphorylation(MEK(), ERK())` or just the\n class `Phosphorylation`.\n\n Returns\n -------\n parent_list : list[types]\n A list of the types leading up to Statement.\n\n Examples\n --------\n >> s = Phosphorylation(MAPK1(), Elk1())\n >> get_type_hierarchy(s)\n [Phosphorylation, AddModification, Modification, Statement]\n >> get_type_hierarchy(AddModification)\n [AddModification, Modification, Statement]\n "
]
|
Please provide a description of the function:def get_statement_by_name(stmt_name):
stmt_classes = get_all_descendants(Statement)
for stmt_class in stmt_classes:
if stmt_class.__name__.lower() == stmt_name.lower():
return stmt_class
raise NotAStatementName('\"%s\" is not recognized as a statement type!'
% stmt_name) | [
"Get a statement class given the name of the statement class."
]
|
Please provide a description of the function:def get_unresolved_support_uuids(stmts):
return {s.uuid for stmt in stmts for s in stmt.supports + stmt.supported_by
if isinstance(s, Unresolved)} | [
"Get uuids unresolved in support from stmts from stmts_from_json."
]
|
Please provide a description of the function:def stmt_type(obj, mk=True):
if isinstance(obj, Statement) and mk:
return type(obj)
else:
return type(obj).__name__ | [
"Return standardized, backwards compatible object type String.\n\n This is a temporary solution to make sure type comparisons and\n matches keys of Statements and related classes are backwards\n compatible.\n "
]
|
Please provide a description of the function:def get_hash(self, shallow=True, refresh=False):
if shallow:
if not hasattr(self, '_shallow_hash') or self._shallow_hash is None\
or refresh:
self._shallow_hash = make_hash(self.matches_key(), 14)
ret = self._shallow_hash
else:
if not hasattr(self, '_full_hash') or self._full_hash is None \
or refresh:
ev_mk_list = sorted([ev.matches_key() for ev in self.evidence])
self._full_hash = \
make_hash(self.matches_key() + str(ev_mk_list), 16)
ret = self._full_hash
return ret | [
"Get a hash for this Statement.\n\n There are two types of hash, \"shallow\" and \"full\". A shallow hash is\n as unique as the information carried by the statement, i.e. it is a hash\n of the `matches_key`. This means that differences in source, evidence,\n and so on are not included. As such, it is a shorter hash (14 nibbles).\n The odds of a collision among all the statements we expect to encounter\n (well under 10^8) is ~10^-9 (1 in a billion). Checks for collisions can\n be done by using the matches keys.\n\n A full hash includes, in addition to the matches key, information from\n the evidence of the statement. These hashes will be equal if the two\n Statements came from the same sentences, extracted by the same reader,\n from the same source. These hashes are correspondingly longer (16\n nibbles). The odds of a collision for an expected less than 10^10\n extractions is ~10^-9 (1 in a billion).\n\n Note that a hash of the Python object will also include the `uuid`, so\n it will always be unique for every object.\n\n Parameters\n ----------\n shallow : bool\n Choose between the shallow and full hashes described above. Default\n is true (e.g. a shallow hash).\n refresh : bool\n Used to get a new copy of the hash. Default is false, so the hash,\n if it has been already created, will be read from the attribute.\n This is primarily used for speed testing.\n\n Returns\n -------\n hash : int\n A long integer hash.\n "
]
|
Please provide a description of the function:def _tag_evidence(self):
h = self.get_hash(shallow=False)
for ev in self.evidence:
ev.stmt_tag = h
return | [
"Set all the Evidence stmt_tag to my deep matches-key hash."
]
|
Please provide a description of the function:def agent_list(self, deep_sorted=False):
ag_list = []
for ag_name in self._agent_order:
ag_attr = getattr(self, ag_name)
if isinstance(ag_attr, Concept) or ag_attr is None:
ag_list.append(ag_attr)
elif isinstance(ag_attr, list):
if not all([isinstance(ag, Concept) for ag in ag_attr]):
raise TypeError("Expected all elements of list to be Agent "
"and/or Concept, but got: %s"
% {type(ag) for ag in ag_attr})
if deep_sorted:
ag_attr = sorted_agents(ag_attr)
ag_list.extend(ag_attr)
else:
raise TypeError("Expected type Agent, Concept, or list, got "
"type %s." % type(ag_attr))
return ag_list | [
"Get the canonicallized agent list."
]
|
Please provide a description of the function:def to_json(self, use_sbo=False):
stmt_type = type(self).__name__
# Original comment: For backwards compatibility, could be removed later
all_stmts = [self] + self.supports + self.supported_by
for st in all_stmts:
if not hasattr(st, 'uuid'):
st.uuid = '%s' % uuid.uuid4()
##################
json_dict = _o(type=stmt_type)
json_dict['belief'] = self.belief
if self.evidence:
evidence = [ev.to_json() for ev in self.evidence]
json_dict['evidence'] = evidence
json_dict['id'] = '%s' % self.uuid
if self.supports:
json_dict['supports'] = \
['%s' % st.uuid for st in self.supports]
if self.supported_by:
json_dict['supported_by'] = \
['%s' % st.uuid for st in self.supported_by]
def get_sbo_term(cls):
sbo_term = stmt_sbo_map.get(cls.__name__.lower())
while not sbo_term:
cls = cls.__bases__[0]
sbo_term = stmt_sbo_map.get(cls.__name__.lower())
return sbo_term
if use_sbo:
sbo_term = get_sbo_term(self.__class__)
json_dict['sbo'] = \
'http://identifiers.org/sbo/SBO:%s' % sbo_term
return json_dict | [
"Return serialized Statement as a JSON dict.\n\n Parameters\n ----------\n use_sbo : Optional[bool]\n If True, SBO annotations are added to each applicable element of\n the JSON. Default: False\n\n Returns\n -------\n json_dict : dict\n The JSON-serialized INDRA Statement.\n "
]
|
Please provide a description of the function:def to_graph(self):
def json_node(graph, element, prefix):
if not element:
return None
node_id = '|'.join(prefix)
if isinstance(element, list):
graph.add_node(node_id, label='')
# Enumerate children and add nodes and connect to anchor node
for i, sub_element in enumerate(element):
sub_id = json_node(graph, sub_element, prefix + ['%s' % i])
if sub_id:
graph.add_edge(node_id, sub_id, label='')
elif isinstance(element, dict):
graph.add_node(node_id, label='')
# Add node recursively for each element
# Connect to this node with edge label according to key
for k, v in element.items():
if k == 'id':
continue
elif k == 'name':
graph.node[node_id]['label'] = v
continue
elif k == 'type':
graph.node[node_id]['label'] = v
continue
sub_id = json_node(graph, v, prefix + ['%s' % k])
if sub_id:
graph.add_edge(node_id, sub_id, label=('%s' % k))
else:
if isinstance(element, basestring) and \
element.startswith('http'):
element = element.split('/')[-1]
graph.add_node(node_id, label=('%s' % str(element)))
return node_id
jd = self.to_json()
graph = networkx.DiGraph()
json_node(graph, jd, ['%s' % self.uuid])
return graph | [
"Return Statement as a networkx graph."
]
|
Please provide a description of the function:def make_generic_copy(self, deeply=False):
if deeply:
kwargs = deepcopy(self.__dict__)
else:
kwargs = self.__dict__.copy()
for attr in ['evidence', 'belief', 'uuid', 'supports', 'supported_by',
'is_activation']:
kwargs.pop(attr, None)
for attr in ['_full_hash', '_shallow_hash']:
my_hash = kwargs.pop(attr, None)
my_shallow_hash = kwargs.pop(attr, None)
for attr in self._agent_order:
attr_value = kwargs.get(attr)
if isinstance(attr_value, list):
kwargs[attr] = sorted_agents(attr_value)
new_instance = self.__class__(**kwargs)
new_instance._full_hash = my_hash
new_instance._shallow_hash = my_shallow_hash
return new_instance | [
"Make a new matching Statement with no provenance.\n\n All agents and other attributes besides evidence, belief, supports, and\n supported_by will be copied over, and a new uuid will be assigned.\n Thus, the new Statement will satisfy `new_stmt.matches(old_stmt)`.\n\n If `deeply` is set to True, all the attributes will be deep-copied,\n which is comparatively slow. Otherwise, attributes of this statement\n may be altered by changes to the new matching statement.\n "
]
|
Please provide a description of the function:def load_lincs_csv(url):
resp = requests.get(url, params={'output_type': '.csv'}, timeout=120)
resp.raise_for_status()
if sys.version_info[0] < 3:
csv_io = BytesIO(resp.content)
else:
csv_io = StringIO(resp.text)
data_rows = list(read_unicode_csv_fileobj(csv_io, delimiter=','))
headers = data_rows[0]
return [{header: val for header, val in zip(headers, line_elements)}
for line_elements in data_rows[1:]] | [
"Helper function to turn csv rows into dicts."
]
|
Please provide a description of the function:def get_small_molecule_name(self, hms_lincs_id):
entry = self._get_entry_by_id(self._sm_data, hms_lincs_id)
if not entry:
return None
name = entry['Name']
return name | [
"Get the name of a small molecule from the LINCS sm metadata.\n\n Parameters\n ----------\n hms_lincs_id : str\n The HMS LINCS ID of the small molecule.\n\n Returns\n -------\n str\n The name of the small molecule.\n "
]
|
Please provide a description of the function:def get_small_molecule_refs(self, hms_lincs_id):
refs = {'HMS-LINCS': hms_lincs_id}
entry = self._get_entry_by_id(self._sm_data, hms_lincs_id)
# If there is no entry for this ID
if not entry:
return refs
# If there is an entry then fill up the refs with existing values
mappings = dict(chembl='ChEMBL ID', chebi='ChEBI ID',
pubchem='PubChem CID', lincs='LINCS ID')
for k, v in mappings.items():
if entry.get(v):
refs[k.upper()] = entry.get(v)
return refs | [
"Get the id refs of a small molecule from the LINCS sm metadata.\n\n Parameters\n ----------\n hms_lincs_id : str\n The HMS LINCS ID of the small molecule.\n\n Returns\n -------\n dict\n A dictionary of references.\n "
]
|
Please provide a description of the function:def get_protein_refs(self, hms_lincs_id):
# TODO: We could get phosphorylation states from the protein data.
refs = {'HMS-LINCS': hms_lincs_id}
entry = self._get_entry_by_id(self._prot_data, hms_lincs_id)
# If there is no entry for this ID
if not entry:
return refs
mappings = dict(egid='Gene ID', up='UniProt ID')
for k, v in mappings.items():
if entry.get(v):
refs[k.upper()] = entry.get(v)
return refs | [
"Get the refs for a protein from the LINCs protein metadata.\n\n Parameters\n ----------\n hms_lincs_id : str\n The HMS LINCS ID for the protein\n\n Returns\n -------\n dict\n A dictionary of protein references.\n "
]
|
Please provide a description of the function:def get_bel_stmts(self, filter=False):
if self.basename is not None:
bel_stmt_path = '%s_bel_stmts.pkl' % self.basename
# Check for cached BEL stmt file
if self.basename is not None and os.path.isfile(bel_stmt_path):
logger.info("Loading BEL statements from %s" % bel_stmt_path)
with open(bel_stmt_path, 'rb') as f:
bel_statements = pickle.load(f)
# No cache, so perform the queries
else:
bel_proc = bel.process_pybel_neighborhood(self.gene_list,
network_file=self.bel_corpus)
bel_statements = bel_proc.statements
# Save to pickle file if we're caching
if self.basename is not None:
with open(bel_stmt_path, 'wb') as f:
pickle.dump(bel_statements, f)
# Optionally filter out statements not involving only our gene set
if filter:
if len(self.gene_list) > 1:
bel_statements = ac.filter_gene_list(bel_statements,
self.gene_list, 'all')
return bel_statements | [
"Get relevant statements from the BEL large corpus.\n\n Performs a series of neighborhood queries and then takes the union of\n all the statements. Because the query process can take a long time for\n large gene lists, the resulting list of statements are cached in a\n pickle file with the filename `<basename>_bel_stmts.pkl`. If the\n pickle file is present, it is used by default; if not present, the\n queries are performed and the results are cached.\n\n Parameters\n ----------\n filter : bool\n If True, includes only those statements that exclusively mention\n genes in :py:attr:`gene_list`. Default is False. Note that the\n full (unfiltered) set of statements are cached.\n\n Returns\n -------\n list of :py:class:`indra.statements.Statement`\n List of INDRA statements extracted from the BEL large corpus.\n "
]
|
Please provide a description of the function:def get_biopax_stmts(self, filter=False, query='pathsbetween',
database_filter=None):
# If we're using a cache, initialize the appropriate filenames
if self.basename is not None:
biopax_stmt_path = '%s_biopax_stmts.pkl' % self.basename
biopax_ras_owl_path = '%s_pc_pathsbetween.owl' % self.basename
# Check for cached Biopax stmt file at the given path
# if it's there, return the statements from the cache
if self.basename is not None and os.path.isfile(biopax_stmt_path):
logger.info("Loading Biopax statements from %s" % biopax_stmt_path)
with open(biopax_stmt_path, 'rb') as f:
bp_statements = pickle.load(f)
return bp_statements
# Check for cached file before querying Pathway Commons Web API
if self.basename is not None and os.path.isfile(biopax_ras_owl_path):
logger.info("Loading Biopax from OWL file %s" % biopax_ras_owl_path)
bp = biopax.process_owl(biopax_ras_owl_path)
# OWL file not found; do query and save to file
else:
if (len(self.gene_list) < 2) and (query == 'pathsbetween'):
logger.warning('Using neighborhood query for one gene.')
query = 'neighborhood'
if query == 'pathsbetween':
if len(self.gene_list) > 60:
block_size = 60
else:
block_size = None
bp = biopax.process_pc_pathsbetween(self.gene_list,
database_filter=database_filter,
block_size=block_size)
elif query == 'neighborhood':
bp = biopax.process_pc_neighborhood(self.gene_list,
database_filter=database_filter)
else:
logger.error('Invalid query type: %s' % query)
return []
# Save the file if we're caching
if self.basename is not None:
bp.save_model(biopax_ras_owl_path)
# Save statements to pickle file if we're caching
if self.basename is not None:
with open(biopax_stmt_path, 'wb') as f:
pickle.dump(bp.statements, f)
# Optionally filter out statements not involving only our gene set
if filter:
policy = 'one' if len(self.gene_list) > 1 else 'all'
stmts = ac.filter_gene_list(bp.statements, self.gene_list, policy)
else:
stmts = bp.statements
return stmts | [
"Get relevant statements from Pathway Commons.\n\n Performs a \"paths between\" query for the genes in :py:attr:`gene_list`\n and uses the results to build statements. This function caches two\n files: the list of statements built from the query, which is cached in\n `<basename>_biopax_stmts.pkl`, and the OWL file returned by the Pathway\n Commons Web API, which is cached in `<basename>_pc_pathsbetween.owl`.\n If these cached files are found, then the results are returned based\n on the cached file and Pathway Commons is not queried again.\n\n Parameters\n ----------\n filter : Optional[bool]\n If True, includes only those statements that exclusively mention\n genes in :py:attr:`gene_list`. Default is False.\n query : Optional[str]\n Defined what type of query is executed. The two options are\n 'pathsbetween' which finds paths between the given list of genes\n and only works if more than 1 gene is given, and 'neighborhood'\n which searches the immediate neighborhood of each given gene.\n Note that for pathsbetween queries with more thatn 60 genes, the\n query will be executed in multiple blocks for scalability.\n database_filter: Optional[list[str]]\n A list of PathwayCommons databases to include in the query.\n\n Returns\n -------\n list of :py:class:`indra.statements.Statement`\n List of INDRA statements extracted from Pathway Commons.\n "
]
|
Please provide a description of the function:def get_statements(self, filter=False):
bp_stmts = self.get_biopax_stmts(filter=filter)
bel_stmts = self.get_bel_stmts(filter=filter)
return bp_stmts + bel_stmts | [
"Return the combined list of statements from BEL and Pathway Commons.\n\n Internally calls :py:meth:`get_biopax_stmts` and\n :py:meth:`get_bel_stmts`.\n\n Parameters\n ----------\n filter : bool\n If True, includes only those statements that exclusively mention\n genes in :py:attr:`gene_list`. Default is False.\n\n Returns\n -------\n list of :py:class:`indra.statements.Statement`\n List of INDRA statements extracted the BEL large corpus and Pathway\n Commons.\n "
]
|
Please provide a description of the function:def run_preassembly(self, stmts, print_summary=True):
# First round of preassembly: remove duplicates before sitemapping
pa1 = Preassembler(hierarchies, stmts)
logger.info("Combining duplicates")
pa1.combine_duplicates()
# Map sites
logger.info("Mapping sites")
(valid, mapped) = sm.map_sites(pa1.unique_stmts)
# Combine valid and successfully mapped statements into single list
correctly_mapped_stmts = []
for ms in mapped:
if all([True if mm[1] is not None else False
for mm in ms.mapped_mods]):
correctly_mapped_stmts.append(ms.mapped_stmt)
mapped_stmts = valid + correctly_mapped_stmts
# Second round of preassembly: de-duplicate and combine related
pa2 = Preassembler(hierarchies, mapped_stmts)
logger.info("Combining duplicates again")
pa2.combine_duplicates()
pa2.combine_related()
# Fill out the results dict
self.results = {}
self.results['raw'] = stmts
self.results['duplicates1'] = pa1.unique_stmts
self.results['valid'] = valid
self.results['mapped'] = mapped
self.results['mapped_stmts'] = mapped_stmts
self.results['duplicates2'] = pa2.unique_stmts
self.results['related2'] = pa2.related_stmts
# Print summary
if print_summary:
logger.info("\nStarting number of statements: %d" % len(stmts))
logger.info("After duplicate removal: %d" % len(pa1.unique_stmts))
logger.info("Unique statements with valid sites: %d" % len(valid))
logger.info("Unique statements with invalid sites: %d" %
len(mapped))
logger.info("After post-mapping duplicate removal: %d" %
len(pa2.unique_stmts))
logger.info("After combining related statements: %d" %
len(pa2.related_stmts))
# Save the results if we're caching
if self.basename is not None:
results_filename = '%s_results.pkl' % self.basename
with open(results_filename, 'wb') as f:
pickle.dump(self.results, f)
return self.results | [
"Run complete preassembly procedure on the given statements.\n\n Results are returned as a dict and stored in the attribute\n :py:attr:`results`. They are also saved in the pickle file\n `<basename>_results.pkl`.\n\n Parameters\n ----------\n stmts : list of :py:class:`indra.statements.Statement`\n Statements to preassemble.\n print_summary : bool\n If True (default), prints a summary of the preassembly process to\n the console.\n\n Returns\n -------\n dict\n A dict containing the following entries:\n\n - `raw`: the starting set of statements before preassembly.\n - `duplicates1`: statements after initial de-duplication.\n - `valid`: statements found to have valid modification sites.\n - `mapped`: mapped statements (list of\n :py:class:`indra.preassembler.sitemapper.MappedStatement`).\n - `mapped_stmts`: combined list of valid statements and statements\n after mapping.\n - `duplicates2`: statements resulting from de-duplication of the\n statements in `mapped_stmts`.\n - `related2`: top-level statements after combining the statements\n in `duplicates2`.\n "
]
|
Please provide a description of the function:def _get_grounding(entity):
db_refs = {'TEXT': entity['text']}
groundings = entity.get('grounding')
if not groundings:
return db_refs
def get_ont_concept(concept):
# In the WM context, groundings have no URL prefix and start with /
# The following block does some special handling of these groundings.
if concept.startswith('/'):
concept = concept[1:]
concept = concept.replace(' ', '_')
# We eliminate any entries that aren't ontology categories
# these are typically "examples" corresponding to the category
while concept not in hume_onto_entries:
parts = concept.split('/')
if len(parts) == 1:
break
concept = '/'.join(parts[:-1])
# Otherwise we just return the concept as is
return concept
# Basic collection of grounding entries
raw_grounding_entries = [(get_ont_concept(g['ontologyConcept']),
g['value']) for g in groundings]
# Occasionally we get duplicate grounding entries, we want to
# eliminate those here
grounding_dict = {}
for cat, score in raw_grounding_entries:
if (cat not in grounding_dict) or (score > grounding_dict[cat]):
grounding_dict[cat] = score
# Then we sort the list in reverse order according to score
# Sometimes the exact same score appears multiple times, in this
# case we prioritize by the "depth" of the grounding which is
# obtained by looking at the number of /-s in the entry.
# However, there are still cases where the grounding depth and the score
# are the same. In these cases we just sort alphabetically.
grounding_entries = sorted(list(set(grounding_dict.items())),
key=lambda x: (x[1], x[0].count('/'), x[0]),
reverse=True)
# We could get an empty list here in which case we don't add the
# grounding
if grounding_entries:
db_refs['HUME'] = grounding_entries
return db_refs | [
"Return Hume grounding.",
"Strip slash, replace spaces and remove example leafs."
]
|
Please provide a description of the function:def _find_relations(self):
# Get all extractions
extractions = \
list(self.tree.execute("$.extractions[(@.@type is 'Extraction')]"))
# Get relations from extractions
relations = []
for e in extractions:
label_set = set(e.get('labels', []))
# If this is a DirectedRelation
if 'DirectedRelation' in label_set:
self.relation_dict[e['@id']] = e
subtype = e.get('subtype')
if any(t in subtype for t in polarities.keys()):
relations.append((subtype, e))
# If this is an Event or an Entity
if {'Event', 'Entity'} & label_set:
self.concept_dict[e['@id']] = e
if not relations and not self.relation_dict:
logger.info("No relations found.")
else:
logger.info('%d relations of types %s found'
% (len(relations), ', '.join(polarities.keys())))
logger.info('%d relations in dict.' % len(self.relation_dict))
logger.info('%d concepts found.' % len(self.concept_dict))
return relations | [
"Find all relevant relation elements and return them in a list."
]
|
Please provide a description of the function:def _get_documents(self):
documents = self.tree.execute("$.documents")
for doc in documents:
sentences = {s['@id']: s['text'] for s in doc.get('sentences', [])}
self.document_dict[doc['@id']] = {'sentences': sentences,
'location': doc['location']} | [
"Populate sentences attribute with a dict keyed by document id."
]
|
Please provide a description of the function:def _make_context(self, entity):
loc_context = None
time_context = None
# Look for time and place contexts.
for argument in entity["arguments"]:
if argument["type"] == "place":
entity_id = argument["value"]["@id"]
loc_entity = self.concept_dict[entity_id]
place = loc_entity.get("canonicalName")
if not place:
place = loc_entity['text']
geo_id = loc_entity.get('geoname_id')
loc_context = RefContext(name=place, db_refs={"GEOID": geo_id})
if argument["type"] == "time":
entity_id = argument["value"]["@id"]
temporal_entity = self.concept_dict[entity_id]
text = temporal_entity['mentions'][0]['text']
if len(temporal_entity.get("timeInterval", [])) < 1:
time_context = TimeContext(text=text)
continue
time = temporal_entity["timeInterval"][0]
start = datetime.strptime(time['start'], '%Y-%m-%dT%H:%M')
end = datetime.strptime(time['end'], '%Y-%m-%dT%H:%M')
duration = int(time['duration'])
time_context = TimeContext(text=text, start=start, end=end,
duration=duration)
# Put context together
context = None
if loc_context or time_context:
context = WorldContext(time=time_context, geo_location=loc_context)
return context | [
"Get place and time info from the json for this entity."
]
|
Please provide a description of the function:def _make_concept(self, entity):
# Use the canonical name as the name of the Concept by default
name = self._sanitize(entity['canonicalName'])
# But if there is a trigger head text, we prefer that since
# it almost always results in a cleaner name
# This is removed for now since the head word seems to be too
# minimal for some concepts, e.g. it gives us only "security"
# for "food security".
# Save raw text and Hume scored groundings as db_refs
db_refs = _get_grounding(entity)
concept = Concept(name, db_refs=db_refs)
metadata = {arg['type']: arg['value']['@id']
for arg in entity['arguments']}
return concept, metadata | [
"Return Concept from a Hume entity.",
"\n trigger = entity.get('trigger')\n if trigger is not None:\n head_text = trigger.get('head text')\n if head_text is not None:\n name = head_text\n "
]
|
Please provide a description of the function:def _get_event_and_context(self, event, arg_type):
eid = _choose_id(event, arg_type)
ev = self.concept_dict[eid]
concept, metadata = self._make_concept(ev)
ev_delta = {'adjectives': [],
'states': get_states(ev),
'polarity': get_polarity(ev)}
context = self._make_context(ev)
event_obj = Event(concept, delta=ev_delta, context=context)
return event_obj | [
"Return an INDRA Event based on an event entry."
]
|
Please provide a description of the function:def _get_evidence(self, event, adjectives):
provenance = event.get('provenance')
# First try looking up the full sentence through provenance
doc_id = provenance[0]['document']['@id']
sent_id = provenance[0]['sentence']
text = self.document_dict[doc_id]['sentences'][sent_id]
text = self._sanitize(text)
bounds = [provenance[0]['documentCharPositions'][k]
for k in ['start', 'end']]
annotations = {
'found_by': event.get('rule'),
'provenance': provenance,
'event_type': os.path.basename(event.get('type')),
'adjectives': adjectives,
'bounds': bounds
}
location = self.document_dict[doc_id]['location']
ev = Evidence(source_api='hume', text=text, annotations=annotations,
pmid=location)
return [ev] | [
"Return the Evidence object for the INDRA Statement."
]
|
Please provide a description of the function:def _is_statement_in_list(new_stmt, old_stmt_list):
for old_stmt in old_stmt_list:
if old_stmt.equals(new_stmt):
return True
elif old_stmt.evidence_equals(new_stmt) and old_stmt.matches(new_stmt):
# If we're comparing a complex, make sure the agents are sorted.
if isinstance(new_stmt, Complex):
agent_pairs = zip(old_stmt.sorted_members(),
new_stmt.sorted_members())
else:
agent_pairs = zip(old_stmt.agent_list(), new_stmt.agent_list())
# Compare agent-by-agent.
for ag_old, ag_new in agent_pairs:
s_old = set(ag_old.db_refs.items())
s_new = set(ag_new.db_refs.items())
# If they're equal this isn't the one we're interested in.
if s_old == s_new:
continue
# If the new statement has nothing new to offer, just ignore it
if s_old > s_new:
return True
# If the new statement does have something new, add it to the
# existing statement. And then ignore it.
if s_new > s_old:
ag_old.db_refs.update(ag_new.db_refs)
return True
# If this is a case where different CHEBI ids were mapped to
# the same entity, set the agent name to the CHEBI id.
if _fix_different_refs(ag_old, ag_new, 'CHEBI'):
# Check to make sure the newly described statement does
# not match anything.
return _is_statement_in_list(new_stmt, old_stmt_list)
# If this is a case, like above, but with UMLS IDs, do the same
# thing as above. This will likely never be improved.
if _fix_different_refs(ag_old, ag_new, 'UMLS'):
# Check to make sure the newly described statement does
# not match anything.
return _is_statement_in_list(new_stmt, old_stmt_list)
logger.warning("Found an unexpected kind of duplicate. "
"Ignoring it.")
return True
# This means all the agents matched, which can happen if the
# original issue was the ordering of agents in a Complex.
return True
elif old_stmt.get_hash(True, True) == new_stmt.get_hash(True, True):
# Check to see if we can improve the annotation of the existing
# statement.
e_old = old_stmt.evidence[0]
e_new = new_stmt.evidence[0]
if e_old.annotations['last_verb'] is None:
e_old.annotations['last_verb'] = e_new.annotations['last_verb']
# If the evidence is "the same", modulo annotations, just ignore it
if e_old.get_source_hash(True) == e_new.get_source_hash(True):
return True
return False | [
"Return True of given statement is equivalent to on in a list\n\n Determines whether the statement is equivalent to any statement in the\n given list of statements, with equivalency determined by Statement's\n equals method.\n\n Parameters\n ----------\n new_stmt : indra.statements.Statement\n The statement to compare with\n old_stmt_list : list[indra.statements.Statement]\n The statement list whose entries we compare with statement\n\n Returns\n -------\n in_list : bool\n True if statement is equivalent to any statements in the list\n "
]
|
Please provide a description of the function:def normalize_medscan_name(name):
suffix = ' complex'
for i in range(2):
if name.endswith(suffix):
name = name[:-len(suffix)]
return name | [
"Removes the \"complex\" and \"complex complex\" suffixes from a medscan\n agent name so that it better corresponds with the grounding map.\n\n Parameters\n ----------\n name: str\n The Medscan agent name\n\n Returns\n -------\n norm_name: str\n The Medscan agent name with the \"complex\" and \"complex complex\"\n suffixes removed.\n "
]
|
Please provide a description of the function:def _urn_to_db_refs(urn):
# Convert a urn to a db_refs dictionary
if urn is None:
return {}, None
m = URN_PATT.match(urn)
if m is None:
return None, None
urn_type, urn_id = m.groups()
db_refs = {}
db_name = None
# TODO: support more types of URNs
if urn_type == 'agi-cas':
# Identifier is CAS, convert to CHEBI
chebi_id = get_chebi_id_from_cas(urn_id)
if chebi_id:
db_refs['CHEBI'] = 'CHEBI:%s' % chebi_id
db_name = get_chebi_name_from_id(chebi_id)
elif urn_type == 'agi-llid':
# This is an Entrez ID, convert to HGNC
hgnc_id = get_hgnc_from_entrez(urn_id)
if hgnc_id is not None:
db_refs['HGNC'] = hgnc_id
# Convert the HGNC ID to a Uniprot ID
uniprot_id = get_uniprot_id(hgnc_id)
if uniprot_id is not None:
db_refs['UP'] = uniprot_id
# Try to lookup HGNC name; if it's available, set it to the
# agent name
db_name = get_hgnc_name(hgnc_id)
elif urn_type in ['agi-meshdis', 'agi-ncimorgan', 'agi-ncimtissue',
'agi-ncimcelltype']:
if urn_id.startswith('C') and urn_id[1:].isdigit():
# Identifier is probably UMLS
db_refs['UMLS'] = urn_id
else:
# Identifier is MESH
urn_mesh_name = unquote(urn_id)
mesh_id, mesh_name = mesh_client.get_mesh_id_name(urn_mesh_name)
if mesh_id:
db_refs['MESH'] = mesh_id
db_name = mesh_name
else:
db_name = urn_mesh_name
elif urn_type == 'agi-gocomplex':
# Identifier is GO
db_refs['GO'] = 'GO:%s' % urn_id
elif urn_type == 'agi-go':
# Identifier is GO
db_refs['GO'] = 'GO:%s' % urn_id
# If we have a GO or MESH grounding, see if there is a corresponding
# Famplex grounding
db_sometimes_maps_to_famplex = ['GO', 'MESH']
for db in db_sometimes_maps_to_famplex:
if db in db_refs:
key = (db, db_refs[db])
if key in famplex_map:
db_refs['FPLX'] = famplex_map[key]
# If the urn corresponds to an eccode, groudn to famplex if that eccode
# is in the Famplex equivalences table
if urn.startswith('urn:agi-enz'):
tokens = urn.split(':')
eccode = tokens[2]
key = ('ECCODE', eccode)
if key in famplex_map:
db_refs['FPLX'] = famplex_map[key]
# If the Medscan URN itself maps to a Famplex id, add a Famplex grounding
key = ('MEDSCAN', urn)
if key in famplex_map:
db_refs['FPLX'] = famplex_map[key]
# If there is a Famplex grounding, use Famplex for entity name
if 'FPLX' in db_refs:
db_name = db_refs['FPLX']
elif 'GO' in db_refs:
db_name = go_client.get_go_label(db_refs['GO'])
return db_refs, db_name | [
"Converts a Medscan URN to an INDRA db_refs dictionary with grounding\n information.\n\n Parameters\n ----------\n urn : str\n A Medscan URN\n\n Returns\n -------\n db_refs : dict\n A dictionary with grounding information, mapping databases to database\n identifiers. If the Medscan URN is not recognized, returns an empty\n dictionary.\n db_name : str\n The Famplex name, if available; otherwise the HGNC name if available;\n otherwise None\n "
]
|
Please provide a description of the function:def _untag_sentence(tagged_sentence):
untagged_sentence = TAG_PATT.sub('\\2', tagged_sentence)
clean_sentence = JUNK_PATT.sub('', untagged_sentence)
return clean_sentence.strip() | [
"Removes all tags in the sentence, returning the original sentence\n without Medscan annotations.\n\n Parameters\n ----------\n tagged_sentence : str\n The tagged sentence\n\n Returns\n -------\n untagged_sentence : str\n Sentence with tags and annotations stripped out\n "
]
|
Please provide a description of the function:def _extract_sentence_tags(tagged_sentence):
untagged_sentence = _untag_sentence(tagged_sentence)
decluttered_sentence = JUNK_PATT.sub('', tagged_sentence)
tags = {}
# Iteratively look for all matches of this pattern
endpos = 0
while True:
match = TAG_PATT.search(decluttered_sentence, pos=endpos)
if not match:
break
endpos = match.end()
text = match.group(2)
text = text.replace('CONTEXT', '')
text = text.replace('GLOSSARY', '')
text = text.strip()
start = untagged_sentence.index(text)
stop = start + len(text)
tag_key = match.group(1)
if ',' in tag_key:
for sub_key in tag_key.split(','):
if sub_key == '0':
continue
tags[sub_key] = {'text': text, 'bounds': (start, stop)}
else:
tags[tag_key] = {'text': text, 'bounds': (start, stop)}
return tags | [
"Given a tagged sentence, extracts a dictionary mapping tags to the words\n or phrases that they tag.\n\n Parameters\n ----------\n tagged_sentence : str\n The sentence with Medscan annotations and tags\n\n Returns\n -------\n tags : dict\n A dictionary mapping tags to the words or phrases that they tag.\n "
]
|
Please provide a description of the function:def get_sites(self):
st = self.site_text
suffixes = [' residue', ' residues', ',', '/']
for suffix in suffixes:
if st.endswith(suffix):
st = st[:-len(suffix)]
assert(not st.endswith(','))
# Strip parentheses
st = st.replace('(', '')
st = st.replace(')', '')
st = st.replace(' or ', ' and ') # Treat end and or the same
sites = []
parts = st.split(' and ')
for part in parts:
if part.endswith(','):
part = part[:-1]
if len(part.strip()) > 0:
sites.extend(ReachProcessor._parse_site_text(part.strip()))
return sites | [
"Parse the site-text string and return a list of sites.\n\n Returns\n -------\n sites : list[Site]\n A list of position-residue pairs corresponding to the site-text\n "
]
|
Please provide a description of the function:def process_csxml_file(self, filename, interval=None, lazy=False):
if interval is None:
interval = (None, None)
tmp_fname = tempfile.mktemp(os.path.basename(filename))
fix_character_encoding(filename, tmp_fname)
self.__f = open(tmp_fname, 'rb')
self._gen = self._iter_through_csxml_file_from_handle(*interval)
if not lazy:
for stmt in self._gen:
self.statements.append(stmt)
return | [
"Processes a filehandle to MedScan csxml input into INDRA\n statements.\n\n The CSXML format consists of a top-level `<batch>` root element\n containing a series of `<doc>` (document) elements, in turn containing\n `<sec>` (section) elements, and in turn containing `<sent>` (sentence)\n elements.\n\n Within the `<sent>` element, a series of additional elements appear in\n the following order:\n\n * `<toks>`, which contains a tokenized form of the sentence in its text\n attribute\n * `<textmods>`, which describes any preprocessing/normalization done to\n the underlying text\n * `<match>` elements, each of which contains one of more `<entity>`\n elements, describing entities in the text with their identifiers.\n The local IDs of each entities are given in the `msid` attribute of\n this element; these IDs are then referenced in any subsequent SVO\n elements.\n * `<svo>` elements, representing subject-verb-object triples. SVO\n elements with a `type` attribute of `CONTROL` represent normalized\n regulation relationships; they often represent the normalized\n extraction of the immediately preceding (but unnormalized SVO\n element). However, in some cases there can be a \"CONTROL\" SVO\n element without its parent immediately preceding it.\n\n Parameters\n ----------\n filename : string\n The path to a Medscan csxml file.\n interval : (start, end) or None\n Select the interval of documents to read, starting with the\n `start`th document and ending before the `end`th document. If\n either is None, the value is considered undefined. If the value\n exceeds the bounds of available documents, it will simply be\n ignored.\n lazy : bool\n If True, only create a generator which can be used by the\n `get_statements` method. If True, populate the statements list now.\n "
]
|
Please provide a description of the function:def process_relation(self, relation, last_relation):
subj_res = self.agent_from_entity(relation, relation.subj)
obj_res = self.agent_from_entity(relation, relation.obj)
if subj_res is None or obj_res is None:
# Don't extract a statement if the subject or object cannot
# be resolved
return
subj, subj_bounds = subj_res
obj, obj_bounds = obj_res
# Make evidence object
untagged_sentence = _untag_sentence(relation.tagged_sentence)
if last_relation:
last_verb = last_relation.verb
else:
last_verb = None
# Get the entity information with the character coordinates
annotations = {'verb': relation.verb, 'last_verb': last_verb,
'agents': {'coords': [subj_bounds, obj_bounds]}}
epistemics = dict()
epistemics['direct'] = False # Overridden later if needed
ev = [Evidence(source_api='medscan', source_id=relation.uri,
pmid=relation.pmid, text=untagged_sentence,
annotations=annotations, epistemics=epistemics)]
if relation.verb in INCREASE_AMOUNT_VERBS:
# If the normalized verb corresponds to an IncreaseAmount statement
# then make one
self._add_statement(IncreaseAmount(subj, obj, evidence=ev))
elif relation.verb in DECREASE_AMOUNT_VERBS:
# If the normalized verb corresponds to a DecreaseAmount statement
# then make one
self._add_statement(DecreaseAmount(subj, obj, evidence=ev))
elif relation.verb in ALL_ACTIVATION_VERBS:
# If the normalized verb corresponds to an Activation statement,
# then make one
if relation.verb in D_ACTIVATION_VERBS:
ev[0].epistemics['direction'] = True
self._add_statement(Activation(subj, obj, evidence=ev))
elif relation.verb in ALL_INHIBITION_VERBS:
# If the normalized verb corresponds to an Inhibition statement,
# then make one
if relation.verb in D_INHIBITION_VERBS:
ev[0].epistemics['direct'] = True
self._add_statement(Inhibition(subj, obj, evidence=ev))
elif relation.verb == 'ProtModification':
# The normalized verb 'ProtModification' is too vague to make
# an INDRA statement. We look at the unnormalized verb in the
# previous svo element, if available, to decide what type of
# INDRA statement to construct.
if last_relation is None:
# We cannot make a statement unless we have more fine-grained
# information on the relation type from a preceding
# unnormalized SVO
return
# Map the unnormalized verb to an INDRA statement type
if last_relation.verb == 'TK{phosphorylate}':
statement_type = Phosphorylation
elif last_relation.verb == 'TK{dephosphorylate}':
statement_type = Dephosphorylation
elif last_relation.verb == 'TK{ubiquitinate}':
statement_type = Ubiquitination
elif last_relation.verb == 'TK{acetylate}':
statement_type = Acetylation
elif last_relation.verb == 'TK{methylate}':
statement_type = Methylation
elif last_relation.verb == 'TK{deacetylate}':
statement_type = Deacetylation
elif last_relation.verb == 'TK{demethylate}':
statement_type = Demethylation
elif last_relation.verb == 'TK{hyperphosphorylate}':
statement_type = Phosphorylation
elif last_relation.verb == 'TK{hydroxylate}':
statement_type = Hydroxylation
elif last_relation.verb == 'TK{sumoylate}':
statement_type = Sumoylation
elif last_relation.verb == 'TK{palmitoylate}':
statement_type = Palmitoylation
elif last_relation.verb == 'TK{glycosylate}':
statement_type = Glycosylation
elif last_relation.verb == 'TK{ribosylate}':
statement_type = Ribosylation
elif last_relation.verb == 'TK{deglycosylate}':
statement_type = Deglycosylation
elif last_relation.verb == 'TK{myristylate}':
statement_type = Myristoylation
elif last_relation.verb == 'TK{farnesylate}':
statement_type = Farnesylation
elif last_relation.verb == 'TK{desumoylate}':
statement_type = Desumoylation
elif last_relation.verb == 'TK{geranylgeranylate}':
statement_type = Geranylgeranylation
elif last_relation.verb == 'TK{deacylate}':
statement_type = Deacetylation
else:
# This unnormalized verb is not handled, do not extract an
# INDRA statement
return
obj_text = obj.db_refs['TEXT']
last_info = self.last_site_info_in_sentence
if last_info is not None and obj_text == last_info.object_text:
for site in self.last_site_info_in_sentence.get_sites():
r = site.residue
p = site.position
s = statement_type(subj, obj, residue=r, position=p,
evidence=ev)
self._add_statement(s)
else:
self._add_statement(statement_type(subj, obj, evidence=ev))
elif relation.verb == 'Binding':
# The Binding normalized verb corresponds to the INDRA Complex
# statement.
self._add_statement(
Complex([subj, obj], evidence=ev)
)
elif relation.verb == 'ProtModification-negative':
pass # TODO? These occur so infrequently so maybe not worth it
elif relation.verb == 'Regulation-unknown':
pass # TODO? These occur so infrequently so maybe not worth it
elif relation.verb == 'StateEffect-positive':
pass
# self._add_statement(
# ActiveForm(subj, obj, evidence=ev)
# )
# TODO: disabling for now, since not sure whether we should set
# the is_active flag
elif relation.verb == 'StateEffect':
self.last_site_info_in_sentence = \
ProteinSiteInfo(site_text=subj.name,
object_text=obj.db_refs['TEXT'])
return | [
"Process a relation into an INDRA statement.\n\n Parameters\n ----------\n relation : MedscanRelation\n The relation to process (a CONTROL svo with normalized verb)\n last_relation : MedscanRelation\n The relation immediately proceding the relation to process within\n the same sentence, or None if there are no preceding relations\n within the same sentence. This proceeding relation, if available,\n will refer to the same interaction but with an unnormalized\n (potentially more specific) verb, and is used when processing\n protein modification events.\n "
]
|
Please provide a description of the function:def agent_from_entity(self, relation, entity_id):
# Extract sentence tags mapping ids to the text. We refer to this
# mapping only if the entity doesn't appear in the grounded entity
# list
tags = _extract_sentence_tags(relation.tagged_sentence)
if entity_id is None:
return None
self.num_entities += 1
entity_id = _extract_id(entity_id)
if entity_id not in relation.entities and \
entity_id not in tags:
# Could not find the entity in either the list of grounded
# entities of the items tagged in the sentence. Happens for
# a very small percentage of the dataset.
self.num_entities_not_found += 1
return None
if entity_id not in relation.entities:
# The entity is not in the grounded entity list
# Instead, make an ungrounded entity, with TEXT corresponding to
# the words with the given entity id tagged in the sentence.
entity_data = tags[entity_id]
db_refs = {'TEXT': entity_data['text']}
ag = Agent(normalize_medscan_name(db_refs['TEXT']),
db_refs=db_refs)
return ag, entity_data['bounds']
else:
entity = relation.entities[entity_id]
bounds = (entity.ch_start, entity.ch_end)
prop = entity.properties
if len(prop.keys()) == 2 and 'Protein' in prop \
and 'Mutation' in prop:
# Handle the special case where the entity is a protein
# with a mutation or modification, with those details
# described in the entity properties
protein = prop['Protein']
assert(len(protein) == 1)
protein = protein[0]
mutation = prop['Mutation']
assert(len(mutation) == 1)
mutation = mutation[0]
db_refs, db_name = _urn_to_db_refs(protein.urn)
if db_refs is None:
return None
db_refs['TEXT'] = protein.name
if db_name is None:
agent_name = db_refs['TEXT']
else:
agent_name = db_name
# Check mutation.type. Only some types correspond to situations
# that can be represented in INDRA; return None if we cannot
# map to an INDRA statement (which will block processing of
# the statement in process_relation).
if mutation.type == 'AASite':
# Do not handle this
# Example:
# MedscanEntity(name='D1', urn='urn:agi-aa:D1',
# type='AASite', properties=None)
return None
elif mutation.type == 'Mutation':
# Convert mutation properties to an INDRA MutCondition
r_old, pos, r_new = _parse_mut_string(mutation.name)
if r_old is None:
logger.warning('Could not parse mutation string: ' +
mutation.name)
# Don't create an agent
return None
else:
try:
cond = MutCondition(pos, r_old, r_new)
ag = Agent(normalize_medscan_name(agent_name),
db_refs=db_refs, mutations=[cond])
return ag, bounds
except BaseException:
logger.warning('Could not parse mutation ' +
'string: ' + mutation.name)
return None
elif mutation.type == 'MethSite':
# Convert methylation site information to an INDRA
# ModCondition
res, pos = _parse_mod_string(mutation.name)
if res is None:
return None
cond = ModCondition('methylation', res, pos)
ag = Agent(normalize_medscan_name(agent_name),
db_refs=db_refs, mods=[cond])
return ag, bounds
# Example:
# MedscanEntity(name='R457',
# urn='urn:agi-s-llid:R457-2185', type='MethSite',
# properties=None)
elif mutation.type == 'PhosphoSite':
# Convert phosphorylation site information to an INDRA
# ModCondition
res, pos = _parse_mod_string(mutation.name)
if res is None:
return None
cond = ModCondition('phosphorylation', res, pos)
ag = Agent(normalize_medscan_name(agent_name),
db_refs=db_refs, mods=[cond])
return ag, bounds
# Example:
# MedscanEntity(name='S455',
# urn='urn:agi-s-llid:S455-47', type='PhosphoSite',
# properties=None)
pass
elif mutation.type == 'Lysine':
# Ambiguous whether this is a methylation or
# demethylation; skip
# Example:
# MedscanEntity(name='K150',
# urn='urn:agi-s-llid:K150-5624', type='Lysine',
# properties=None)
return None
else:
logger.warning('Processor currently cannot process ' +
'mutations of type ' + mutation.type)
else:
# Handle the more common case where we just ground the entity
# without mutation or modification information
db_refs, db_name = _urn_to_db_refs(entity.urn)
if db_refs is None:
return None
db_refs['TEXT'] = entity.name
if db_name is None:
agent_name = db_refs['TEXT']
else:
agent_name = db_name
ag = Agent(normalize_medscan_name(agent_name),
db_refs=db_refs)
return ag, bounds | [
"Create a (potentially grounded) INDRA Agent object from a given\n Medscan entity describing the subject or object.\n\n Uses helper functions to convert a Medscan URN to an INDRA db_refs\n grounding dictionary.\n\n If the entity has properties indicating that it is a protein with\n a mutation or modification, then constructs the needed ModCondition\n or MutCondition.\n\n Parameters\n ----------\n relation : MedscanRelation\n The current relation being processed\n entity_id : str\n The ID of the entity to process\n\n Returns\n -------\n agent : indra.statements.Agent\n A potentially grounded INDRA agent representing this entity\n "
]
|
Please provide a description of the function:def get_parser(description, input_desc):
parser = ArgumentParser(description=description)
parser.add_argument(
dest='input_file',
help=input_desc
)
parser.add_argument(
'-r', '--readers',
choices=['reach', 'sparser', 'trips'],
help='List of readers to be used.',
nargs='+'
)
parser.add_argument(
'-n', '--num_procs',
dest='n_proc',
help='Select the number of processes to use.',
type=int,
default=1
)
parser.add_argument(
'-s', '--sample',
dest='n_samp',
help='Read a random sample of size N_SAMP of the inputs.',
type=int
)
parser.add_argument(
'-I', '--in_range',
dest='range_str',
help='Only read input lines in the range given as <start>:<end>.'
)
parser.add_argument(
'-v', '--verbose',
help='Include output from the readers.',
action='store_true'
)
parser.add_argument(
'-q', '--quiet',
help='Suppress most output. Overrides -v and -d options.',
action='store_true'
)
parser.add_argument(
'-d', '--debug',
help='Set the logging to debug level.',
action='store_true'
)
# parser.add_argument(
# '-m', '--messy',
# help='Do not clean up directories created while reading.',
# action='store_true'
# )
return parser | [
"Get a parser that is generic to reading scripts.\n\n Parameters\n ----------\n description : str\n A description of the tool, usually about one line long.\n input_desc: str\n A string describing the nature of the input file used by the reading\n tool.\n\n Returns\n -------\n parser : argparse.ArgumentParser instance\n An argument parser object, to which further arguments can be added.\n "
]
|
Please provide a description of the function:def send_request(endpoint, **kwargs):
if api_key is None:
logger.error('NewsAPI cannot be used without an API key')
return None
url = '%s/%s' % (newsapi_url, endpoint)
if 'apiKey' not in kwargs:
kwargs['apiKey'] = api_key
if 'pageSize' not in kwargs:
kwargs['pageSize'] = 100
res = requests.get(url, params=kwargs)
res.raise_for_status()
res_json = res.json()
return res_json | [
"Return the response to a query as JSON from the NewsAPI web service.\n\n The basic API is limited to 100 results which is chosen unless explicitly\n given as an argument. Beyond that, paging is supported through the \"page\"\n argument, if needed.\n\n Parameters\n ----------\n endpoint : str\n Endpoint to query, e.g. \"everything\" or \"top-headlines\"\n\n kwargs : dict\n A list of keyword arguments passed as parameters with the query.\n The basic ones are \"q\" which is the search query, \"from\" is a start\n date formatted as for instance 2018-06-10 and \"to\" is an end date\n with the same format.\n\n Returns\n -------\n res_json : dict\n The response from the web service as a JSON dict.\n "
]
|
Please provide a description of the function:def process_cx_file(file_name, require_grounding=True):
with open(file_name, 'rt') as fh:
json_list = json.load(fh)
return process_cx(json_list, require_grounding=require_grounding) | [
"Process a CX JSON file into Statements.\n\n Parameters\n ----------\n file_name : str\n Path to file containing CX JSON.\n require_grounding: bool\n Whether network nodes lacking grounding information should be included\n among the extracted Statements (default is True).\n\n Returns\n -------\n NdexCxProcessor\n Processor containing Statements.\n "
]
|
Please provide a description of the function:def process_ndex_network(network_id, username=None, password=None,
require_grounding=True):
nd = ndex2.client.Ndex2(username=username, password=password)
res = nd.get_network_as_cx_stream(network_id)
if res.status_code != 200:
logger.error('Problem downloading network: status code %s' %
res.status_code)
logger.error('Response: %s' % res.text)
return None
json_list = res.json()
summary = nd.get_network_summary(network_id)
return process_cx(json_list, summary=summary,
require_grounding=require_grounding) | [
"Process an NDEx network into Statements.\n\n Parameters\n ----------\n network_id : str\n NDEx network ID.\n username : str\n NDEx username.\n password : str\n NDEx password.\n require_grounding: bool\n Whether network nodes lacking grounding information should be included\n among the extracted Statements (default is True).\n\n Returns\n -------\n NdexCxProcessor\n Processor containing Statements. Returns None if there if the HTTP\n status code indicates an unsuccessful request.\n "
]
|
Please provide a description of the function:def process_cx(cx_json, summary=None, require_grounding=True):
ncp = NdexCxProcessor(cx_json, summary=summary,
require_grounding=require_grounding)
ncp.get_statements()
return ncp | [
"Process a CX JSON object into Statements.\n\n Parameters\n ----------\n cx_json : list\n CX JSON object.\n summary : Optional[dict]\n The network summary object which can be obtained via\n get_network_summary through the web service. THis contains metadata\n such as the owner and the creation time of the network.\n require_grounding: bool\n Whether network nodes lacking grounding information should be included\n among the extracted Statements (default is True).\n\n Returns\n -------\n NdexCxProcessor\n Processor containing Statements.\n "
]
|
Please provide a description of the function:def read_files(files, readers, **kwargs):
reading_content = [Content.from_file(filepath) for filepath in files]
output_list = []
for reader in readers:
res_list = reader.read(reading_content, **kwargs)
if res_list is None:
logger.info("Nothing read by %s." % reader.name)
else:
logger.info("Successfully read %d content entries with %s."
% (len(res_list), reader.name))
output_list += res_list
logger.info("Read %s text content entries in all." % len(output_list))
return output_list | [
"Read the files in `files` with the reader objects in `readers`.\n\n Parameters\n ----------\n files : list [str]\n A list of file paths to be read by the readers. Supported files are\n limited to text and nxml files.\n readers : list [Reader instances]\n A list of Reader objects to be used reading the files.\n **kwargs :\n Other keyword arguments are passed to the `read` method of the readers.\n\n Returns\n -------\n output_list : list [ReadingData]\n A list of ReadingData objects with the contents of the readings.\n "
]
|
Please provide a description of the function:def expand_families(self, stmts):
new_stmts = []
for stmt in stmts:
# Put together the lists of families, with their members. E.g.,
# for a statement involving RAF and MEK, should return a list of
# tuples like [(BRAF, RAF1, ARAF), (MAP2K1, MAP2K2)]
families_list = []
for ag in stmt.agent_list():
ag_children = self.get_children(ag)
# If the agent has no children, then we use the agent itself
if len(ag_children) == 0:
families_list.append([ag])
# Otherwise, we add the tuple of namespaces/IDs for the children
else:
families_list.append(ag_children)
# Now, put together new statements frmo the cross product of the
# expanded family members
for ag_combo in itertools.product(*families_list):
# Create new agents based on the namespaces/IDs, with
# appropriate name and db_refs entries
child_agents = []
for ag_entry in ag_combo:
# If we got an agent, or None, that means there were no
# children; so we use the original agent rather than
# construct a new agent
if ag_entry is None or isinstance(ag_entry, Agent):
new_agent = ag_entry
# Otherwise, create a new agent from the ns/ID
elif isinstance(ag_entry, tuple):
# FIXME FIXME FIXME
# This doesn't reproduce agent state from the original
# family-level statements!
ag_ns, ag_id = ag_entry
new_agent = _agent_from_ns_id(ag_ns, ag_id)
else:
raise Exception('Unrecognized agent entry type.')
# Add agent to our list of child agents
child_agents.append(new_agent)
# Create a copy of the statement
new_stmt = deepcopy(stmt)
# Replace the agents in the statement with the newly-created
# child agents
new_stmt.set_agent_list(child_agents)
# Add to list
new_stmts.append(new_stmt)
return new_stmts | [
"Generate statements by expanding members of families and complexes.\n "
]
|
Please provide a description of the function:def update_ontology(ont_url, rdf_path):
yaml_root = load_yaml_from_url(ont_url)
G = rdf_graph_from_yaml(yaml_root)
save_hierarchy(G, rdf_path) | [
"Load an ontology formatted like Eidos' from github."
]
|
Please provide a description of the function:def rdf_graph_from_yaml(yaml_root):
G = Graph()
for top_entry in yaml_root:
assert len(top_entry) == 1
node = list(top_entry.keys())[0]
build_relations(G, node, top_entry[node], None)
return G | [
"Convert the YAML object into an RDF Graph object."
]
|
Please provide a description of the function:def load_yaml_from_url(ont_url):
res = requests.get(ont_url)
if res.status_code != 200:
raise Exception('Could not load ontology from %s' % ont_url)
root = yaml.load(res.content)
return root | [
"Return a YAML object loaded from a YAML file URL."
]
|
Please provide a description of the function:def register_preprocessed_file(self, infile, pmid, extra_annotations):
infile_base = os.path.basename(infile)
outfile = os.path.join(self.preprocessed_dir, infile_base)
shutil.copyfile(infile, outfile)
infile_key = os.path.splitext(infile_base)[0]
self.pmids[infile_key] = pmid
self.extra_annotations[infile_key] = extra_annotations | [
"Set up already preprocessed text file for reading with ISI reader.\n\n This is essentially a mock function to \"register\" already preprocessed\n files and get an IsiPreprocessor object that can be passed to\n the IsiProcessor.\n\n Parameters\n ----------\n infile : str\n Path to an already preprocessed text file (i.e. one ready to\n be sent for reading to ISI reader).\n pmid : str\n The PMID corresponding to the file\n extra_annotations : dict\n Extra annotations to be added to each statement, possibly including\n metadata about the source (annotations with the key \"interaction\"\n will be overridden)\n "
]
|
Please provide a description of the function:def preprocess_plain_text_string(self, text, pmid, extra_annotations):
output_file = '%s.txt' % self.next_file_id
output_file = os.path.join(self.preprocessed_dir, output_file)
# Tokenize sentence
sentences = nltk.sent_tokenize(text)
# Write sentences to text file
first_sentence = True
with codecs.open(output_file, 'w', encoding='utf-8') as f:
for sentence in sentences:
if not first_sentence:
f.write('\n')
f.write(sentence.rstrip())
first_sentence = False
# Store annotations
self.pmids[str(self.next_file_id)] = pmid
self.extra_annotations[str(self.next_file_id)] = extra_annotations
# Increment file id
self.next_file_id += 1 | [
"Preprocess plain text string for use by ISI reader.\n\n Preprocessing is done by tokenizing into sentences and writing\n each sentence on its own line in a plain text file. All other\n preprocessing functions ultimately call this one.\n\n Parameters\n ----------\n text : str\n The plain text of the article of abstract\n pmid : str\n The PMID from which it comes, or None if not specified\n extra_annotations : dict\n Extra annotations to be added to each statement, possibly including\n metadata about the source (annotations with the key \"interaction\"\n will be overridden)\n "
]
|
Please provide a description of the function:def preprocess_plain_text_file(self, filename, pmid, extra_annotations):
with codecs.open(filename, 'r', encoding='utf-8') as f:
content = f.read()
self.preprocess_plain_text_string(content, pmid,
extra_annotations) | [
"Preprocess a plain text file for use with ISI reder.\n\n Preprocessing results in a new text file with one sentence\n per line.\n\n Parameters\n ----------\n filename : str\n The name of the plain text file\n pmid : str\n The PMID from which it comes, or None if not specified\n extra_annotations : dict\n Extra annotations to be added to each statement, possibly including\n metadata about the source (annotations with the key \"interaction\"\n will be overridden)\n "
]
|
Please provide a description of the function:def preprocess_nxml_file(self, filename, pmid, extra_annotations):
# Create a temporary directory
tmp_dir = tempfile.mkdtemp('indra_isi_nxml2txt_output')
# Run nxml2txt
if nxml2txt_path is None:
logger.error('NXML2TXT_PATH not specified in config file or ' +
'environment variable')
return
if python2_path is None:
logger.error('PYTHON2_PATH not specified in config file or ' +
'environment variable')
return
else:
txt_out = os.path.join(tmp_dir, 'out.txt')
so_out = os.path.join(tmp_dir, 'out.so')
command = [python2_path,
os.path.join(nxml2txt_path, 'nxml2txt'),
filename,
txt_out,
so_out]
ret = subprocess.call(command)
if ret != 0:
logger.warning('nxml2txt returned non-zero error code')
with open(txt_out, 'r') as f:
txt_content = f.read()
# Remote temporary directory
shutil.rmtree(tmp_dir)
# We need to remove some common LaTEX commands from the converted text
# or the reader will get confused
cmd1 = '[^ \{\}]+\{[^\{\}]+\}\{[^\{\}]+\}'
cmd2 = '[^ \{\}]+\{[^\{\}]+\}'
txt_content = re.sub(cmd1, '', txt_content)
txt_content = re.sub(cmd2, '', txt_content)
with open('tmp.txt', 'w') as f:
f.write(txt_content)
# Prepocess text extracted from nxml
self.preprocess_plain_text_string(txt_content, pmid, extra_annotations) | [
"Preprocess an NXML file for use with the ISI reader.\n\n Preprocessing is done by extracting plain text from NXML and then\n creating a text file with one sentence per line.\n\n Parameters\n ----------\n filename : str\n Filename of an nxml file to process\n pmid : str\n The PMID from which it comes, or None if not specified\n extra_annotations : dict\n Extra annotations to be added to each statement, possibly including\n metadata about the source (annotations with the key \"interaction\"\n will be overridden)\n "
]
|
Please provide a description of the function:def preprocess_abstract_list(self, abstract_list):
for abstract_struct in abstract_list:
abs_format = abstract_struct['format']
content_type = abstract_struct['text_type']
content_zipped = abstract_struct['content']
tcid = abstract_struct['tcid']
trid = abstract_struct['trid']
assert(abs_format == 'text')
assert(content_type == 'abstract')
pmid = None # Don't worry about pmid for now
extra_annotations = {'tcid': tcid, 'trid': trid}
# Uncompress content
content = zlib.decompress(content_zipped,
zlib.MAX_WBITS+16).decode('utf-8')
self.preprocess_plain_text_string(content, pmid, extra_annotations) | [
"Preprocess abstracts in database pickle dump format for ISI reader.\n\n For each abstract, creates a plain text file with one sentence per\n line, and stores metadata to be included with each statement from\n that abstract.\n\n Parameters\n ----------\n abstract_list : list[dict]\n Compressed abstracts with corresopnding metadata in INDRA database\n pickle dump format.\n "
]
|
Please provide a description of the function:def process_geneways_files(input_folder=data_folder, get_evidence=True):
gp = GenewaysProcessor(input_folder, get_evidence)
return gp | [
"Reads in Geneways data and returns a list of statements.\n\n Parameters\n ----------\n input_folder : Optional[str]\n A folder in which to search for Geneways data. Looks for these\n Geneways extraction data files: human_action.txt,\n human_actionmention.txt, human_symbols.txt.\n Omit this parameter to use the default input folder which is\n indra/data.\n get_evidence : Optional[bool]\n Attempt to find the evidence text for an extraction by downloading\n the corresponding text content and searching for the given offset\n in the text to get the evidence sentence. Default: True\n\n Returns\n -------\n gp : GenewaysProcessor\n A GenewaysProcessor object which contains a list of INDRA statements\n generated from the Geneways action mentions.\n "
]
|
Please provide a description of the function:def post_update(self, post_id, tag_string=None, rating=None, source=None,
parent_id=None, has_embedded_notes=None,
is_rating_locked=None, is_note_locked=None,
is_status_locked=None):
params = {
'post[tag_string]': tag_string,
'post[rating]': rating,
'ost[source]': source,
'post[parent_id]': parent_id,
'post[has_embedded_notes]': has_embedded_notes,
'post[is_rating_locked]': is_rating_locked,
'post[is_note_locked]': is_note_locked,
'post[is_status_locked]': is_status_locked
}
return self._get('posts/{0}.json'.format(post_id), params, 'PUT',
auth=True) | [
"Update a specific post (Requires login).\n\n Parameters:\n post_id (int): The id number of the post to update.\n tag_string (str): A space delimited list of tags.\n rating (str): The rating for the post. Can be: safe, questionable,\n or explicit.\n source (str): If this is a URL, Danbooru will download the file.\n parent_id (int): The ID of the parent post.\n has_embedded_notes (int): Can be 1, 0.\n is_rating_locked (int): Can be: 0, 1 (Builder+ only).\n is_note_locked (int): Can be: 0, 1 (Builder+ only).\n is_status_locked (int): Can be: 0, 1 (Admin only).\n "
]
|
Please provide a description of the function:def post_revert(self, post_id, version_id):
return self._get('posts/{0}/revert.json'.format(post_id),
{'version_id': version_id}, 'PUT', auth=True) | [
"Function to reverts a post to a previous version (Requires login).\n\n Parameters:\n post_id (int):\n version_id (int): The post version id to revert to.\n "
]
|
Please provide a description of the function:def post_copy_notes(self, post_id, other_post_id):
return self._get('posts/{0}/copy_notes.json'.format(post_id),
{'other_post_id': other_post_id}, 'PUT', auth=True) | [
"Function to copy notes (requires login).\n\n Parameters:\n post_id (int):\n other_post_id (int): The id of the post to copy notes to.\n "
]
|
Please provide a description of the function:def post_mark_translated(self, post_id, check_translation,
partially_translated):
param = {
'post[check_translation]': check_translation,
'post[partially_translated]': partially_translated
}
return self._get('posts/{0}/mark_as_translated.json'.format(post_id),
param, method='PUT', auth=True) | [
"Mark post as translated (Requires login) (UNTESTED).\n\n If you set check_translation and partially_translated to 1 post will\n be tagged as 'translated_request'\n\n Parameters:\n post_id (int):\n check_translation (int): Can be 0, 1.\n partially_translated (int): Can be 0, 1\n "
]
|
Please provide a description of the function:def post_vote(self, post_id, score):
return self._get('posts/{0}/votes.json'.format(post_id),
{'score': score}, 'POST', auth=True) | [
"Action lets you vote for a post (Requires login).\n Danbooru: Post votes/create.\n\n Parameters:\n post_id (int):\n score (str): Can be: up, down.\n "
]
|
Please provide a description of the function:def post_unvote(self, post_id):
return self._get('posts/{0}/unvote.json'.format(post_id),
method='PUT', auth=True) | [
"Action lets you unvote for a post (Requires login).\n\n Parameters:\n post_id (int):\n "
]
|
Please provide a description of the function:def post_flag_list(self, creator_id=None, creator_name=None, post_id=None,
reason_matches=None, is_resolved=None, category=None):
params = {
'search[creator_id]': creator_id,
'search[creator_name]': creator_name,
'search[post_id]': post_id,
}
return self._get('post_flags.json', params, auth=True) | [
"Function to flag a post (Requires login).\n\n Parameters:\n creator_id (int): The user id of the flag's creator.\n creator_name (str): The name of the flag's creator.\n post_id (int): The post id if the flag.\n "
]
|
Please provide a description of the function:def post_flag_create(self, post_id, reason):
params = {'post_flag[post_id]': post_id, 'post_flag[reason]': reason}
return self._get('post_flags.json', params, 'POST', auth=True) | [
"Function to flag a post.\n\n Parameters:\n post_id (int): The id of the flagged post.\n reason (str): The reason of the flagging.\n "
]
|
Please provide a description of the function:def post_appeals_list(self, creator_id=None, creator_name=None,
post_id=None):
params = {
'creator_id': creator_id,
'creator_name': creator_name,
'post_id': post_id
}
return self._get('post_appeals.json', params, auth=True) | [
"Function to return list of appeals (Requires login).\n\n Parameters:\n creator_id (int): The user id of the appeal's creator.\n creator_name (str): The name of the appeal's creator.\n post_id (int): The post id if the appeal.\n "
]
|
Please provide a description of the function:def post_appeals_create(self, post_id, reason):
params = {'post_appeal[post_id]': post_id,
'post_appeal[reason]': reason}
return self._get('post_appeals.json', params, 'POST', auth=True) | [
"Function to create appeals (Requires login).\n\n Parameters:\n post_id (int): The id of the appealed post.\n reason (str) The reason of the appeal.\n "
]
|
Please provide a description of the function:def post_versions_list(self, updater_name=None, updater_id=None,
post_id=None, start_id=None):
params = {
'search[updater_name]': updater_name,
'search[updater_id]': updater_id,
'search[post_id]': post_id,
'search[start_id]': start_id
}
return self._get('post_versions.json', params) | [
"Get list of post versions.\n\n Parameters:\n updater_name (str):\n updater_id (int):\n post_id (int):\n start_id (int):\n "
]
|
Please provide a description of the function:def post_versions_undo(self, version_id):
return self._get('post_versions/{0}/undo.json'.format(version_id),
method='PUT', auth=True) | [
"Undo post version (Requires login) (UNTESTED).\n\n Parameters:\n version_id (int):\n "
]
|
Please provide a description of the function:def upload_list(self, uploader_id=None, uploader_name=None, source=None):
params = {
'search[uploader_id]': uploader_id,
'search[uploader_name]': uploader_name,
'search[source]': source
}
return self._get('uploads.json', params, auth=True) | [
"Search and return an uploads list (Requires login).\n\n Parameters:\n uploader_id (int): The id of the uploader.\n uploader_name (str): The name of the uploader.\n source (str): The source of the upload (exact string match).\n "
]
|
Please provide a description of the function:def upload_create(self, tags, rating, file_=None, source=None,
parent_id=None):
if file_ or source is not None:
params = {
'upload[source]': source,
'upload[rating]': rating,
'upload[parent_id]': parent_id,
'upload[tag_string]': tags
}
file_ = {'upload[file]': open(file_, 'rb')}
return self._get('uploads.json', params, 'POST', auth=True,
file_=file_)
else:
raise PybooruAPIError("'file_' or 'source' is required.") | [
"Function to create a new upload (Requires login).\n\n Parameters:\n tags (str):\n rating (str): Can be: `s`, `q`, or `e`. Alternatively, you can\n specify `rating:safe`, `rating:questionable`, or\n `rating:explicit` in the tag string.\n file_ (file_path): The file data encoded as a multipart form.\n source (str): The source URL.\n parent_id (int): The parent post id.\n\n Raises:\n PybooruAPIError: When file_ or source are empty.\n "
]
|
Please provide a description of the function:def comment_list(self, group_by, limit=None, page=None, body_matches=None,
post_id=None, post_tags_match=None, creator_name=None,
creator_id=None, is_deleted=None):
params = {
'group_by': group_by,
'limit': limit,
'page': page,
'search[body_matches]': body_matches,
'search[post_id]': post_id,
'search[post_tags_match]': post_tags_match,
'search[creator_name]': creator_name,
'search[creator_id]': creator_id,
'search[is_deleted]': is_deleted
}
return self._get('comments.json', params) | [
"Return a list of comments.\n\n Parameters:\n limit (int): How many posts you want to retrieve.\n page (int): The page number.\n group_by: Can be 'comment', 'post'. Comment will return recent\n comments. Post will return posts that have been recently\n commented on.\n body_matches (str): Body contains the given terms.\n post_id (int):\n post_tags_match (str): The comment's post's tags match the\n given terms. Meta-tags not supported.\n creator_name (str): The name of the creator (exact match).\n creator_id (int): The user id of the creator.\n is_deleted (bool): Can be: True, False.\n\n Raises:\n PybooruAPIError: When 'group_by' is invalid.\n "
]
|
Please provide a description of the function:def comment_create(self, post_id, body, do_not_bump_post=None):
params = {
'comment[post_id]': post_id,
'comment[body]': body,
'comment[do_not_bump_post]': do_not_bump_post
}
return self._get('comments.json', params, 'POST', auth=True) | [
"Action to lets you create a comment (Requires login).\n\n Parameters:\n post_id (int):\n body (str):\n do_not_bump_post (bool): Set to 1 if you do not want the post to be\n bumped to the top of the comment listing.\n "
]
|
Please provide a description of the function:def comment_update(self, comment_id, body):
params = {'comment[body]': body}
return self._get('comments/{0}.json'.format(comment_id), params, 'PUT',
auth=True) | [
"Function to update a comment (Requires login).\n\n Parameters:\n comment_id (int):\n body (str):\n "
]
|
Please provide a description of the function:def comment_delete(self, comment_id):
return self._get('comments/{0}.json'.format(comment_id),
method='DELETE', auth=True) | [
"Remove a specific comment (Requires login).\n\n Parameters:\n comment_id (int): The id number of the comment to remove.\n "
]
|
Please provide a description of the function:def comment_undelete(self, comment_id):
return self._get('comments/{0}/undelete.json'.format(comment_id),
method='POST', auth=True) | [
"Undelete a specific comment (Requires login) (UNTESTED).\n\n Parameters:\n comment_id (int):\n "
]
|
Please provide a description of the function:def comment_vote(self, comment_id, score):
params = {'score': score}
return self._get('comments/{0}/votes.json'.format(comment_id), params,
method='POST', auth=True) | [
"Lets you vote for a comment (Requires login).\n\n Parameters:\n comment_id (int):\n score (str): Can be: up, down.\n "
]
|
Please provide a description of the function:def comment_unvote(self, comment_id):
return self._get('posts/{0}/unvote.json'.format(comment_id),
method='POST', auth=True) | [
"Lets you unvote a specific comment (Requires login).\n\n Parameters:\n comment_id (int):\n "
]
|
Please provide a description of the function:def favorite_remove(self, post_id):
return self._get('favorites/{0}.json'.format(post_id), method='DELETE',
auth=True) | [
"Remove a post from favorites (Requires login).\n\n Parameters:\n post_id (int): Where post_id is the post id.\n "
]
|
Please provide a description of the function:def dmail_list(self, message_matches=None, to_name=None, to_id=None,
from_name=None, from_id=None, read=None):
params = {
'search[message_matches]': message_matches,
'search[to_name]': to_name,
'search[to_id]': to_id,
'search[from_name]': from_name,
'search[from_id]': from_id,
'search[read]': read
}
return self._get('dmails.json', params, auth=True) | [
"Return list of Dmails. You can only view dmails you own\n (Requires login).\n\n Parameters:\n message_matches (str): The message body contains the given terms.\n to_name (str): The recipient's name.\n to_id (int): The recipient's user id.\n from_name (str): The sender's name.\n from_id (int): The sender's user id.\n read (bool): Can be: true, false.\n "
]
|
Please provide a description of the function:def dmail_create(self, to_name, title, body):
params = {
'dmail[to_name]': to_name,
'dmail[title]': title,
'dmail[body]': body
}
return self._get('dmails.json', params, 'POST', auth=True) | [
"Create a dmail (Requires login)\n\n Parameters:\n to_name (str): The recipient's name.\n title (str): The title of the message.\n body (str): The body of the message.\n "
]
|
Please provide a description of the function:def dmail_delete(self, dmail_id):
return self._get('dmails/{0}.json'.format(dmail_id), method='DELETE',
auth=True) | [
"Delete a dmail. You can only delete dmails you own (Requires login).\n\n Parameters:\n dmail_id (int): where dmail_id is the dmail id.\n "
]
|
Please provide a description of the function:def artist_list(self, query=None, artist_id=None, creator_name=None,
creator_id=None, is_active=None, is_banned=None,
empty_only=None, order=None):
params = {
'search[name]': query,
'search[id]': artist_id,
'search[creator_name]': creator_name,
'search[creator_id]': creator_id,
'search[is_active]': is_active,
'search[is_banned]': is_banned,
'search[empty_only]': empty_only,
'search[order]': order
}
return self._get('artists.json', params) | [
"Get an artist of a list of artists.\n\n Parameters:\n query (str):\n This field has multiple uses depending on what the query starts\n with:\n 'http:desired_url':\n Search for artist with this URL.\n 'name:desired_url':\n Search for artists with the given name as their base name.\n 'other:other_name':\n Search for artists with the given name in their other\n names.\n 'group:group_name':\n Search for artists belonging to the group with the given\n name.\n 'status:banned':\n Search for artists that are banned. else Search for the\n given name in the base name and the other names.\n artist_id (id): The artist id.\n creator_name (str): Exact creator name.\n creator_id (id): Artist creator id.\n is_active (bool): Can be: true, false\n is_banned (bool): Can be: true, false\n empty_only (True): Search for artists that have 0 posts. Can be:\n true\n order (str): Can be: name, updated_at.\n "
]
|
Please provide a description of the function:def artist_create(self, name, other_names_comma=None, group_name=None,
url_string=None, body=None):
params = {
'artist[name]': name,
'artist[other_names_comma]': other_names_comma,
'artist[group_name]': group_name,
'artist[url_string]': url_string,
'artist[body]': body,
}
return self.get('artists.json', params, method='POST', auth=True) | [
"Function to create an artist (Requires login) (UNTESTED).\n\n Parameters:\n name (str):\n other_names_comma (str): List of alternative names for this\n artist, comma delimited.\n group_name (str): The name of the group this artist belongs to.\n url_string (str): List of URLs associated with this artist,\n whitespace or newline delimited.\n body (str): DText that will be used to create a wiki entry at the\n same time.\n "
]
|
Please provide a description of the function:def artist_update(self, artist_id, name=None, other_names_comma=None,
group_name=None, url_string=None, body=None):
params = {
'artist[name]': name,
'artist[other_names_comma]': other_names_comma,
'artist[group_name]': group_name,
'artist[url_string]': url_string,
'artist[body]': body
}
return self .get('artists/{0}.json'.format(artist_id), params,
method='PUT', auth=True) | [
"Function to update artists (Requires login) (UNTESTED).\n\n Parameters:\n artist_id (str):\n name (str): Artist name.\n other_names_comma (str): List of alternative names for this\n artist, comma delimited.\n group_name (str): The name of the group this artist belongs to.\n url_string (str): List of URLs associated with this artist,\n whitespace or newline delimited.\n body (str): DText that will be used to create/update a wiki entry\n at the same time.\n "
]
|
Please provide a description of the function:def artist_delete(self, artist_id):
return self._get('artists/{0}.json'.format(artist_id), method='DELETE',
auth=True) | [
"Action to lets you delete an artist (Requires login) (UNTESTED)\n (Only Builder+).\n\n Parameters:\n artist_id (int): Where artist_id is the artist id.\n "
]
|
Please provide a description of the function:def artist_undelete(self, artist_id):
return self._get('artists/{0}/undelete.json'.format(artist_id),
method='POST', auth=True) | [
"Lets you undelete artist (Requires login) (UNTESTED) (Only Builder+).\n\n Parameters:\n artist_id (int):\n "
]
|
Please provide a description of the function:def artist_revert(self, artist_id, version_id):
params = {'version_id': version_id}
return self._get('artists/{0}/revert.json'.format(artist_id), params,
method='PUT', auth=True) | [
"Revert an artist (Requires login) (UNTESTED).\n\n Parameters:\n artist_id (int): The artist id.\n version_id (int): The artist version id to revert to.\n "
]
|
Please provide a description of the function:def artist_versions(self, name=None, updater_name=None, updater_id=None,
artist_id=None, is_active=None, is_banned=None,
order=None):
params = {
'search[name]': name,
'search[updater_name]': updater_name,
'search[updater_id]': updater_id,
'search[artist_id]': artist_id,
'search[is_active]': is_active,
'search[is_banned]': is_banned,
'search[order]': order
}
return self._get('artist_versions.json', params, auth=True) | [
"Get list of artist versions (Requires login).\n\n Parameters:\n name (str):\n updater_name (str):\n updater_id (int):\n artist_id (int):\n is_active (bool): Can be: True, False.\n is_banned (bool): Can be: True, False.\n order (str): Can be: name (Defaults to ID)\n "
]
|
Please provide a description of the function:def artist_commentary_list(self, text_matches=None, post_id=None,
post_tags_match=None, original_present=None,
translated_present=None):
params = {
'search[text_matches]': text_matches,
'search[post_id]': post_id,
'search[post_tags_match]': post_tags_match,
'search[original_present]': original_present,
'search[translated_present]': translated_present
}
return self._get('artist_commentaries.json', params) | [
"list artist commentary.\n\n Parameters:\n text_matches (str):\n post_id (int):\n post_tags_match (str): The commentary's post's tags match the\n giventerms. Meta-tags not supported.\n original_present (str): Can be: yes, no.\n translated_present (str): Can be: yes, no.\n "
]
|
Please provide a description of the function:def artist_commentary_create_update(self, post_id, original_title,
original_description, translated_title,
translated_description):
params = {
'artist_commentary[post_id]': post_id,
'artist_commentary[original_title]': original_title,
'artist_commentary[original_description]': original_description,
'artist_commentary[translated_title]': translated_title,
'artist_commentary[translated_description]': translated_description
}
return self._get('artist_commentaries/create_or_update.json', params,
method='POST', auth=True) | [
"Create or update artist commentary (Requires login) (UNTESTED).\n\n Parameters:\n post_id (int): Post id.\n original_title (str): Original title.\n original_description (str): Original description.\n translated_title (str): Translated title.\n translated_description (str): Translated description.\n "
]
|
Please provide a description of the function:def artist_commentary_revert(self, id_, version_id):
params = {'version_id': version_id}
return self._get('artist_commentaries/{0}/revert.json'.format(id_),
params, method='PUT', auth=True) | [
"Revert artist commentary (Requires login) (UNTESTED).\n\n Parameters:\n id_ (int): The artist commentary id.\n version_id (int): The artist commentary version id to\n revert to.\n "
]
|
Please provide a description of the function:def artist_commentary_versions(self, post_id, updater_id):
params = {'search[updater_id]': updater_id, 'search[post_id]': post_id}
return self._get('artist_commentary_versions.json', params) | [
"Return list of artist commentary versions.\n\n Parameters:\n updater_id (int):\n post_id (int):\n "
]
|
Please provide a description of the function:def note_list(self, body_matches=None, post_id=None, post_tags_match=None,
creator_name=None, creator_id=None, is_active=None):
params = {
'search[body_matches]': body_matches,
'search[post_id]': post_id,
'search[post_tags_match]': post_tags_match,
'search[creator_name]': creator_name,
'search[creator_id]': creator_id,
'search[is_active]': is_active
}
return self._get('notes.json', params) | [
"Return list of notes.\n\n Parameters:\n body_matches (str): The note's body matches the given terms.\n post_id (int): A specific post.\n post_tags_match (str): The note's post's tags match the given terms.\n creator_name (str): The creator's name. Exact match.\n creator_id (int): The creator's user id.\n is_active (bool): Can be: True, False.\n "
]
|
Please provide a description of the function:def note_create(self, post_id, coor_x, coor_y, width, height, body):
params = {
'note[post_id]': post_id,
'note[x]': coor_x,
'note[y]': coor_y,
'note[width]': width,
'note[height]': height,
'note[body]': body
}
return self._get('notes.json', params, method='POST', auth=True) | [
"Function to create a note (Requires login) (UNTESTED).\n\n Parameters:\n post_id (int):\n coor_x (int): The x coordinates of the note in pixels,\n with respect to the top-left corner of the image.\n coor_y (int): The y coordinates of the note in pixels,\n with respect to the top-left corner of the image.\n width (int): The width of the note in pixels.\n height (int): The height of the note in pixels.\n body (str): The body of the note.\n "
]
|
Please provide a description of the function:def note_update(self, note_id, coor_x=None, coor_y=None, width=None,
height=None, body=None):
params = {
'note[x]': coor_x,
'note[y]': coor_y,
'note[width]': width,
'note[height]': height,
'note[body]': body
}
return self._get('notes/{0}.jso'.format(note_id), params, method='PUT',
auth=True) | [
"Function to update a note (Requires login) (UNTESTED).\n\n Parameters:\n note_id (int): Where note_id is the note id.\n coor_x (int): The x coordinates of the note in pixels,\n with respect to the top-left corner of the image.\n coor_y (int): The y coordinates of the note in pixels,\n with respect to the top-left corner of the image.\n width (int): The width of the note in pixels.\n height (int): The height of the note in pixels.\n body (str): The body of the note.\n "
]
|
Please provide a description of the function:def note_delete(self, note_id):
return self._get('notes/{0}.json'.format(note_id), method='DELETE',
auth=True) | [
"delete a specific note (Requires login) (UNTESTED).\n\n Parameters:\n note_id (int): Where note_id is the note id.\n "
]
|
Please provide a description of the function:def note_revert(self, note_id, version_id):
return self._get('notes/{0}/revert.json'.format(note_id),
{'version_id': version_id}, method='PUT', auth=True) | [
"Function to revert a specific note (Requires login) (UNTESTED).\n\n Parameters:\n note_id (int): Where note_id is the note id.\n version_id (int): The note version id to revert to.\n "
]
|
Please provide a description of the function:def note_versions(self, updater_id=None, post_id=None, note_id=None):
params = {
'search[updater_id]': updater_id,
'search[post_id]': post_id,
'search[note_id]': note_id
}
return self._get('note_versions.json', params) | [
"Get list of note versions.\n\n Parameters:\n updater_id (int):\n post_id (int):\n note_id (int):\n "
]
|
Please provide a description of the function:def user_list(self, name=None, name_matches=None, min_level=None,
max_level=None, level=None, user_id=None, order=None):
params = {
'search[name]': name,
'search[name_matches]': name_matches,
'search[min_level]': min_level,
'search[max_level]': max_level,
'search[level]': level,
'search[id]': user_id,
'search[order]': order
}
return self._get('users.json', params) | [
"Function to get a list of users or a specific user.\n\n Levels:\n Users have a number attribute called level representing their role.\n The current levels are:\n\n Member 20, Gold 30, Platinum 31, Builder 32, Contributor 33,\n Janitor 35, Moderator 40 and Admin 50.\n\n Parameters:\n name (str): Supports patterns.\n name_matches (str): Same functionality as name.\n min_level (int): Minimum level (see section on levels).\n max_level (int): Maximum level (see section on levels).\n level (int): Current level (see section on levels).\n user_id (int): The user id.\n order (str): Can be: 'name', 'post_upload_count', 'note_count',\n 'post_update_count', 'date'.\n "
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.