Code
stringlengths 103
85.9k
| Summary
listlengths 0
94
|
---|---|
Please provide a description of the function:def is_async_call(func):
'''inspect.iscoroutinefunction that looks through partials.'''
while isinstance(func, partial):
func = func.func
return inspect.iscoroutinefunction(func) | []
|
Please provide a description of the function:def from_string(cls, string, *, default_func=None):
'''Construct a NetAddress from a string and return a (host, port) pair.
If either (or both) is missing and default_func is provided, it is called with
ServicePart.HOST or ServicePart.PORT to get a default.
'''
if not isinstance(string, str):
raise TypeError(f'address must be a string: {string}')
host, port = _split_address(string)
if default_func:
host = host or default_func(ServicePart.HOST)
port = port or default_func(ServicePart.PORT)
if not host or not port:
raise ValueError(f'invalid address string: {string}')
return cls(host, port) | []
|
Please provide a description of the function:def from_string(cls, string, *, default_func=None):
'''Construct a Service from a string.
If default_func is provided and any ServicePart is missing, it is called with
default_func(protocol, part) to obtain the missing part.
'''
if not isinstance(string, str):
raise TypeError(f'service must be a string: {string}')
parts = string.split('://', 1)
if len(parts) == 2:
protocol, address = parts
else:
item, = parts
protocol = None
if default_func:
if default_func(item, ServicePart.HOST) and default_func(item, ServicePart.PORT):
protocol, address = item, ''
else:
protocol, address = default_func(None, ServicePart.PROTOCOL), item
if not protocol:
raise ValueError(f'invalid service string: {string}')
if default_func:
default_func = partial(default_func, protocol.lower())
address = NetAddress.from_string(address, default_func=default_func)
return cls(protocol, address) | []
|
Please provide a description of the function:def _process_phenotype_hpoa(self, raw, limit):
src_key = 'hpoa'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
filedate = datetime.utcfromtimestamp(
os.stat(raw)[ST_CTIME]).strftime("%Y-%m-%d")
# this will cause two dates to be attached to the dataset
# (one from the filedate, and the other from here)
# TODO when #112 is implemented,
# this will result in only the whole dataset being versioned
col = self.files[src_key]['columns']
with open(raw, 'r', encoding="utf8") as tsvfile:
reader = csv.reader(tsvfile, delimiter='\t', quotechar='\"')
row = next(reader) # drop Description
row = str(next(reader))[9:19]
LOG.info("Ingest from %s", row)
date = datetime.strptime(
row.strip(), '%Y-%m-%d').strftime("%Y-%m-%d-%H-%M")
self.dataset.setVersion(filedate, date)
row = next(reader) # drop tracker url
row = next(reader) # drop release url
row = next(reader) # headers
row[0] = row[0][1:] # uncomment
if col != row:
LOG.info(
'%s\nExpected Headers:\t%s\nRecived Headers:\t%s\n',
src_key, col, row)
LOG.info(set(col) - set(row))
for row in reader:
if row[0][0] == '#' or row[0] == 'DatabaseID': # headers
continue
row = [str(col).strip() for col in row]
disease_id = row[col.index('DatabaseID')]
# 98246 OMIM
# 68646 ORPHA
# 297 DECIPHER
if self.test_mode:
try:
id_list = self.test_ids
if id_list is None or disease_id not in id_list:
continue
except AttributeError:
continue
# row[col.index('DiseaseName')] unused
if row[col.index('Qualifier')] == 'NOT':
continue
pheno_id = row[col.index('HPO_ID')]
publist = row[col.index('Reference')]
eco_id = self.resolve(row[col.index('Evidence')])
onset = row[col.index('Onset')]
freq = row[col.index('Frequency')]
sex = row[col.index('Sex')].lower()
# row[col.index('Modifier')] unused
asp = row[col.index('Aspect')]
# row[col.index('Biocuration')] unused
# LOG.info(
# 'adding <%s>-to-<%s> because <%s>', disease_id, pheno_id, eco_id)
model.addClassToGraph(disease_id)
model.addClassToGraph(pheno_id)
model.addClassToGraph(eco_id)
if onset is not None and onset != '':
model.addClassToGraph(onset)
if asp in ('P', 'M'): # phenotype? abnormality or mortality
assoc = D2PAssoc( # default rel=self.globaltt['has phenotype']
graph, self.name, disease_id, pheno_id,
onset, freq)
elif asp in ('I', 'C'): # inheritance pattern or clinical course/onset
assoc = D2PAssoc(
graph, self.name, disease_id, pheno_id,
rel=self.globaltt['has disposition'])
else:
LOG.error("Unknown aspect : %s at line %i", asp, reader.line_num)
assoc.add_evidence(eco_id)
if sex is not None and sex != '':
self.graph.addTriple(
assoc.get_association_id(),
self.globaltt['has_sex_specificty'],
self.globaltt[sex])
# Publication
# cut -f 5 phenotype.hpoa | grep ";" | tr ';' '\n' | cut -f1 -d ':' |\
# sort | uniq -c | sort -nr
# 629 PMID
# 63 OMIM
# 42 ISBN-13
# 36 http
for pub in publist.split(';'):
pub = pub.strip()
# there have been several malformed PMIDs
if pub[:4] != 'http' and \
graph.curie_regexp.fullmatch(pub) is None:
LOG.warning(
'Record %s has a malformed Reference %s', disease_id, pub)
continue
pubtype = None
if pub[:5] == 'PMID:':
pubtype = self.globaltt['journal article']
elif pub[:4] == 'ISBN':
pubtype = self.globaltt['publication']
elif pub[:5] == 'OMIM:':
pub = 'http://omim.org/entry/' + pub[5:]
pubtype = self.globaltt['web page']
elif pub[:9] == 'DECIPHER:':
pubtype = self.globaltt['web page']
elif pub[:6] == 'ORPHA:':
pubtype = self.globaltt['web page']
elif pub[:4] == 'http':
pubtype = self.globaltt['web page']
else:
LOG.error(
'Unknown pub type for disease %s from "%s"',
disease_id, pub)
continue
if pub is not None:
assoc.add_source(pub)
if pubtype is not None:
ref = Reference(graph, pub, pubtype)
# ref.setTitle(''); ref.setYear()
ref.addRefToGraph()
# TODO add curator
# pprint.pprint(assoc)
assoc.add_association_to_graph()
if not self.test_mode and limit is not None and reader.line_num > limit:
break
return | [
"\n see info on format here:\n http://www.human-phenotype-ontology.org/contao/index.php/annotation-guide.html\n\n :param raw:\n :param limit:\n :return:\n\n "
]
|
Please provide a description of the function:def get_common_files(self):
# curl -sLu "username:personal-acess-token" \
# GITAPI + "/hpo-annotation-data/tarball/master" > hpoa.tgz
repo_dir = self.rawdir + '/git'
username = CONF['user']['hpoa']
response = requests.get(
GITAPI + '/hpo-annotation-data/tarball/master',
auth=(username, CONF['keys'][username]))
with open(self.rawdir + '/hpoa.tgz', 'wb') as tgz:
tgz.write(response.content)
if os.path.isdir(repo_dir): # scoarched earth approach
shutil.rmtree(repo_dir)
os.mkdir(repo_dir)
with tarfile.open('raw/hpoa/hpoa.tgz', 'r') as tarball:
tarball.extractall(repo_dir)
# TO-DO add this to the dataset object
# hmm ...kay... have git commit-hash in tarball repo name
repo_hash = glob.glob(
str(
'/'.join(
(self.rawdir, 'git', 'monarch-initiative-hpo-annotation-data-*')
)))[-42:]
print(repo_hash)
repo_hash = str(repo_hash)
# (note this makes little sense as it is a private repo)
self.dataset.setFileAccessUrl(
'/'.join((
'https://github.com/monarch-initiative/hpo-annotation-data/tree',
repo_hash)))
return | [
"\n Fetch the hpo-annotation-data\n [repository](https://github.com/monarch-initiative/hpo-annotation-data.git)\n as a tarball\n\n :return:\n\n "
]
|
Please provide a description of the function:def add_common_files_to_file_list(self):
'''
The (several thousands) common-disease files from the repo tarball
are added to the files object.
try adding the 'common-disease-mondo' files as well?
'''
repo_dir = '/'.join((self.rawdir, 'git'))
common_disease_dir = '/'.join((
repo_dir,
'monarch-initiative-hpo-annotation-*', 'common-diseases-mondo/*.tab'))
# add the files to the self.files object
filelist = glob.glob(common_disease_dir)
fcount = 0
for small_file in filelist:
if small_file[-4:] == '.tab':
fcount += 1
self.files[
'common' + str(fcount).zfill(7)] = {
'file': '/'.join((common_disease_dir, small_file)),
}
LOG.info("Found %d common disease files", fcount)
return | []
|
Please provide a description of the function:def process_all_common_disease_files(self, limit=None):
LOG.info("Iterating over all common disease files")
common_file_count = 0
total_processed = "" # stopgap gill we fix common-disease files
unpadded_doids = "" # stopgap gill we fix common-disease files
for ingest in self.files:
if ingest[:5] == 'common':
common_file_count += 1
raw = self.files[ingest]['file']
total_processed += self.process_common_disease_file(
raw, unpadded_doids, limit)
if not self.test_mode and limit is not None and total_processed > limit:
break
LOG.info("Finished iterating over all common disease files.")
return | [
"\n Loop through all of the files that we previously fetched from git,\n creating the disease-phenotype association.\n :param limit:\n :return:\n\n "
]
|
Please provide a description of the function:def process_common_disease_file(self, raw, unpadded_doids, limit=None):
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
assoc_count = 0
replace_id_flag = False
col = self.small_files['columns']
with open(raw, 'r', encoding="utf8") as tsvfile:
reader = csv.reader(tsvfile, delimiter='\t', quotechar='\"')
header = tsvfile.readline()
if header != col:
LOG.error("HEADER: has changed in %s.", raw)
raise ValueError(col - header)
disease_id = None
for row in reader:
row = [str(x).strip() for x in row]
did = row[col.index('Disease ID')]
# genotype = row[col.index('Genotype')]
phenotype_id = row[col.index('Phenotype ID')]
age_of_onset_id = row[col.index('Age of Onset ID')]
eid = row[col.index('Evidence ID')]
frequency = row[col.index('Frequency')]
negation_id = row[col.index('Negation ID')]
description = row[col.index('Description')]
pub_ids = row[col.index('Pub')]
disease_id = re.sub(r'DO(ID)?[-\:](DOID:)?', 'DOID:', did)
disease_id = re.sub(r'MESH-', 'MESH:', disease_id)
if not re.search(r'(DOID\:|MESH\:\w)\d+', disease_id):
LOG.warning("Invalid id format: %s", disease_id)
# figure out if the doid should be unpadded,
# then use the unpadded version instead
if re.match(r'DOID', disease_id):
unpadded_num = re.sub(r'DOID:', '', disease_id)
unpadded_num = unpadded_num.lstrip('0')
if unpadded_num in unpadded_doids:
fixed_id = 'DOID:' + unpadded_num
replace_id_flag = True
disease_id = fixed_id.strip()
if self.test_mode and disease_id not in self.test_ids:
# since these are broken up into disease-by-disease,
# just skip the whole file
return 0
if negation_id != '':
continue # TODO add negative associations
if disease_id != '' and phenotype_id != '':
assoc = D2PAssoc(
graph, self.name, disease_id, phenotype_id.strip())
if age_of_onset_id != '':
assoc.onset = age_of_onset_id
if frequency != '':
assoc.frequency = frequency
eco_id = self.localtt[eid]
if eco_id is None:
eco_id = self.localtt['ITM']
assoc.add_evidence(eco_id)
# TODO add sex? - not in dataset yet
if description != '':
assoc.set_description(description)
if pub_ids != '':
for pub in pub_ids.split(';'):
pub = re.sub(r' *', '', pub) # fixed now but just in case
# there have been several malformed PMIDs curies
if pub[:4] != 'http' and \
graph.curie_regexp.fullmatch(pub) is None:
LOG.warning(
'Record %s has a malformed Pub %s', did, pub)
continue
if re.search(
r'(DOID|MESH)', pub) or re.search(
r'Disease name contained', description):
# skip "pubs" that are derived from
# the classes themselves
continue
assoc.add_source(pub.strip())
# TODO assigned by?
assoc.add_association_to_graph()
assoc_count += 1
if not self.test_mode and limit is not None\
and reader.line_num > limit:
break
if replace_id_flag:
LOG.info("replaced DOID with unpadded version")
self.replaced_id_count += 1
LOG.info(
"Added %d associations for %s.", assoc_count, disease_id)
return assoc_count | [
"\n Make disaese-phenotype associations.\n Some identifiers need clean up:\n * DOIDs are listed as DOID-DOID: --> DOID:\n * DOIDs may be unnecessarily zero-padded.\n these are remapped to their non-padded equivalent.\n\n :param raw:\n :param unpadded_doids:\n :param limit:\n :return:\n\n "
]
|
Please provide a description of the function:def replace(oldstr, newstr, infile, dryrun=False):
linelist = []
with open(infile) as reader:
for item in reader:
newitem = re.sub(oldstr, newstr, item)
linelist.append(newitem)
if dryrun is False:
with open(infile, "w") as writer:
writer.truncate()
for line in linelist:
writer.writelines(line)
elif dryrun is True:
for line in linelist:
print(line, end='')
else:
exit() | [
"\n Sed-like Replace function..\n Usage: pysed.replace(<Old string>, <Replacement String>, <Text File>)\n Example: pysed.replace('xyz', 'XYZ', '/path/to/file.txt')\n\n This will dump the output to STDOUT instead of changing the input file.\n Example 'DRYRUN':\n pysed.replace('xyz', 'XYZ', '/path/to/file.txt', dryrun=True)\n\n ",
"Unknown option specified to 'dryrun' argument,\n Usage: dryrun=<True|False>."
]
|
Please provide a description of the function:def rmlinematch(oldstr, infile, dryrun=False):
linelist = []
with open(infile) as reader:
for item in reader:
rmitem = re.match(r'.*{}'.format(oldstr), item)
# if isinstance(rmitem) == isinstance(None): Not quite sure the intent here
if rmitem is None:
linelist.append(item)
if dryrun is False:
with open(infile, "w") as writer:
writer.truncate()
for line in linelist:
writer.writelines(line)
elif dryrun is True:
for line in linelist:
print(line, end='')
else:
exit() | [
"\n Sed-like line deletion function based on given string..\n Usage: pysed.rmlinematch(<Unwanted string>, <Text File>)\n Example: pysed.rmlinematch('xyz', '/path/to/file.txt')\n Example:\n 'DRYRUN': pysed.rmlinematch('xyz', '/path/to/file.txt', dryrun=True)\n This will dump the output to STDOUT instead of changing the input file.\n\n ",
"Unknown option specified to 'dryrun' argument,\n Usage: dryrun=<True|False>."
]
|
Please provide a description of the function:def rmlinenumber(linenumber, infile, dryrun=False):
linelist = []
linecounter = 0
if isinstance(linenumber, int):
exit()
with open(infile) as reader:
for item in reader:
linecounter = linecounter + 1
if linecounter != linenumber:
linelist.append(item)
if dryrun is False:
with open(infile, "w") as writer:
writer.truncate()
for line in linelist:
writer.writelines(line)
elif dryrun is True:
for line in linelist:
print(line, end='')
else:
exit() | [
"\n Sed-like line deletion function based on given line number..\n Usage: pysed.rmlinenumber(<Unwanted Line Number>, <Text File>)\n Example: pysed.rmlinenumber(10, '/path/to/file.txt')\n Example 'DRYRUN': pysed.rmlinenumber(10, '/path/to/file.txt', dryrun=True)\n #This will dump the output to STDOUT instead of changing the input file.\n\n ",
"'linenumber' argument must be an integer.",
"Unknown option specified to 'dryrun' argument,\n Usage: dryrun=<True|False>."
]
|
Please provide a description of the function:def fetch(self, is_dl_forced=False):
self.get_files(is_dl_forced)
ncbi = NCBIGene(self.graph_type, self.are_bnodes_skized)
# ncbi.fetch()
gene_group = ncbi.files['gene_group']
self.fetch_from_url(
gene_group['url'], '/'.join((ncbi.rawdir, gene_group['file'])), False)
# load and tag a list of OMIM IDs with types
# side effect of populating omim replaced
self.omim_type = self.find_omim_type()
return | [
"\n :param is_dl_forced:\n :return:\n "
]
|
Please provide a description of the function:def scrub(self):
LOG.info("Scrubbing out the nasty characters that break our parser.")
myfile = '/'.join((self.rawdir, self.files['data']['file']))
tmpfile = '/'.join((self.rawdir, self.files['data']['file']+'.tmp.gz'))
tmp = gzip.open(tmpfile, 'wb')
du = DipperUtil()
with gzip.open(myfile, 'rb') as fh:
filereader = io.TextIOWrapper(fh, newline="")
for line in filereader:
line = du.remove_control_characters(line) + '\n'
tmp.write(line.encode('utf-8'))
tmp.close()
# TEC I do not like this at all. original data must be preserved as is.
# also may be heavy handed as chars which do not break the parser
# are stripped as well (i.e. tabs and newlines)
# move the temp file
LOG.info("Replacing the original data with the scrubbed file.")
shutil.move(tmpfile, myfile)
return | [
"\n The XML file seems to have mixed-encoding;\n we scrub out the control characters\n from the file for processing.\n\n i.e.?i\n omia.xml:1555328.28: PCDATA invalid Char value 2\n <field name=\"journal\">Bulletin et M\u0002emoires de la Soci\u0002et\u0002e Centrale de M\u0002edic\n\n :return:\n\n "
]
|
Please provide a description of the function:def find_omim_type(self):
'''
This f(x) needs to be rehomed and shared.
Use OMIM's discription of their identifiers
to heuristically partition them into genes | phenotypes-diseases
type could be
- `obsolete` Check `omim_replaced` populated as side effect
- 'Suspected' (phenotype) Ignoring thus far
- 'gene'
- 'Phenotype'
- 'heritable_phenotypic_marker' Probable phenotype
- 'has_affected_feature' Use as both a gene and a phenotype
:return hash of omim_number to ontology_curie
'''
src_key = 'mimtitles'
myfile = '/'.join((self.rawdir, self.files[src_key]['file']))
# col = self.files[src_key]['columns']
omim_type = {}
with open(myfile, 'r') as filereader:
reader = csv.reader(filereader, delimiter='\t')
# todo header check
for row in reader:
if row[0][0] == '#': # skip comments
continue
elif row[0] == 'Caret': # moved|removed|split -> moved twice
# populating a dict from an omim to a set of omims
# here as a side effect which is less than ideal
(prefix, omim_id, destination, empty, empty) = row
omim_type[omim_id] = self.globaltt['obsolete']
if row[2][:9] == 'MOVED TO ':
token = row[2].split(' ')
rep = token[2]
if not re.match(r'^[0-9]{6}$', rep):
LOG.error('Report malformed omim replacement %s', rep)
# clean up one I know about
if rep[0] == '{' and rep[7] == '}':
rep = rep[1:6]
LOG.info('cleaned up %s', rep)
if len(rep) == 7 and rep[6] == ',':
rep = rep[:5]
LOG.info('cleaned up %s', rep)
# asuming splits are typically to both gene & phenotype
if len(token) > 3:
self.omim_replaced[omim_id] = {rep, token[4]}
else:
self.omim_replaced[omim_id] = {rep}
elif row[0] == 'Asterisk': # declared as gene
(prefix, omim_id, pref_label, alt_label, inc_label) = row
omim_type[omim_id] = self.globaltt['gene']
elif row[0] == 'NULL':
# potential model of disease?
(prefix, omim_id, pref_label, alt_label, inc_label) = row
#
omim_type[omim_id] = self.globaltt['Suspected'] # NCIT:C71458
elif row[0] == 'Number Sign':
(prefix, omim_id, pref_label, alt_label, inc_label) = row
omim_type[omim_id] = self.globaltt['Phenotype']
elif row[0] == 'Percent':
(prefix, omim_id, pref_label, alt_label, inc_label) = row
omim_type[omim_id] = self.globaltt['heritable_phenotypic_marker']
elif row[0] == 'Plus':
(prefix, omim_id, pref_label, alt_label, inc_label) = row
# to be interperted as a gene and/or a phenotype
omim_type[omim_id] = self.globaltt['has_affected_feature']
else:
LOG.error('Unlnown OMIM type line %s', reader.line_num)
return omim_type | []
|
Please provide a description of the function:def process_species(self, limit):
myfile = '/'.join((self.rawdir, self.files['data']['file']))
fh = gzip.open(myfile, 'rb')
filereader = io.TextIOWrapper(fh, newline="")
filereader.readline() # remove the xml declaration line
for event, elem in ET.iterparse(filereader): # iterparse is not deprecated
# Species ids are == NCBITaxon ids
self.process_xml_table(
elem, 'Species_gb', self._process_species_table_row, limit)
fh.close()
return | [
"\n Loop through the xml file and process the species.\n We add elements to the graph, and store the\n id-to-label in the label_hash dict.\n :param limit:\n :return:\n "
]
|
Please provide a description of the function:def process_classes(self, limit):
myfile = '/'.join((self.rawdir, self.files['data']['file']))
fh = gzip.open(myfile, 'rb')
filereader = io.TextIOWrapper(fh, newline="")
filereader.readline() # remove the xml declaration line
# iterparse is not deprecated
for event, elem in ET.iterparse(filereader):
self.process_xml_table(elem, 'Articles', self._process_article_row, limit)
self.process_xml_table(elem, 'Breed', self._process_breed_row, limit)
self.process_xml_table(elem, 'Genes_gb', self._process_gene_row, limit)
self.process_xml_table(
elem, 'OMIA_Group', self._process_omia_group_row, limit)
self.process_xml_table(elem, 'Phene', self._process_phene_row, limit)
self.process_xml_table(
elem, 'Omim_Xref', self._process_omia_omim_map, limit)
fh.close()
# post-process the omia-omim associations to filter out the genes
# (keep only phenotypes/diseases)
self.clean_up_omim_genes()
return | [
"\n After all species have been processed .\n Loop through the xml file and process the articles,\n breed, genes, phenes, and phenotype-grouping classes.\n We add elements to the graph,\n and store the id-to-label in the label_hash dict,\n along with the internal key-to-external id in the id_hash dict.\n The latter are referenced in the association processing functions.\n :param limit:\n :return:\n "
]
|
Please provide a description of the function:def process_associations(self, limit):
myfile = '/'.join((self.rawdir, self.files['data']['file']))
f = gzip.open(myfile, 'rb')
filereader = io.TextIOWrapper(f, newline="")
filereader.readline() # remove the xml declaration line
for event, elem in ET.iterparse(filereader): # iterparse is not deprecated
self.process_xml_table(
elem, 'Article_Breed', self._process_article_breed_row, limit)
self.process_xml_table(
elem, 'Article_Phene', self._process_article_phene_row, limit)
self.process_xml_table(
elem, 'Breed_Phene', self._process_breed_phene_row, limit)
self.process_xml_table(
elem, 'Lida_Links', self._process_lida_links_row, limit)
self.process_xml_table(
elem, 'Phene_Gene', self._process_phene_gene_row, limit)
self.process_xml_table(
elem, 'Group_MPO', self._process_group_mpo_row, limit)
f.close()
return | [
"\n Loop through the xml file and process the article-breed, article-phene,\n breed-phene, phene-gene associations, and the external links to LIDA.\n\n :param limit:\n :return:\n\n "
]
|
Please provide a description of the function:def _process_article_phene_row(self, row):
# article_id, phene_id, added_by
# look up the article in the hashmap
phenotype_id = self.id_hash['phene'].get(row['phene_id'])
article_id = self.id_hash['article'].get(row['article_id'])
omia_id = self._get_omia_id_from_phene_id(phenotype_id)
if self.test_mode or omia_id not in self.test_ids['disease'] \
or phenotype_id is None or article_id is None:
return
# make a triple, where the article is about the phenotype
self.graph.addTriple(
article_id,
self.globaltt['is_about'], phenotype_id)
return | [
"\n Linking articles to species-specific phenes.\n\n :param row:\n :return:\n "
]
|
Please provide a description of the function:def _process_omia_omim_map(self, row):
# omia_id, omim_id, added_by
model = Model(self.graph)
omia_id = 'OMIA:' + row['omia_id']
omim_id = 'OMIM:' + row['omim_id']
# also store this for use when we say that a given animal is
# a model of a disease
if omia_id not in self.omia_omim_map:
self.omia_omim_map[omia_id] = set()
self.omia_omim_map[omia_id].add(omim_id)
if self.test_mode and omia_id not in self.test_ids['disease']:
return
model.addXref(omia_id, omim_id)
return | [
"\n Links OMIA groups to OMIM equivalents.\n :param row:\n :return:\n "
]
|
Please provide a description of the function:def _process_group_mpo_row(self, row):
omia_id = 'OMIA:' + row['omia_id']
mpo_num = int(row['MPO_no'])
mpo_id = 'MP:' + str(mpo_num).zfill(7)
assoc = D2PAssoc(self.graph, self.name, omia_id, mpo_id)
assoc.add_association_to_graph()
return | [
"\n Make OMIA to MP associations\n :param row:\n :return:\n "
]
|
Please provide a description of the function:def filter_keep_phenotype_entry_ids(self, entry):
'''
doubt this should be kept
'''
omim_id = str(entry['mimNumber'])
otype = self.globaltt['obsolete']
if omim_id in self.omim_type:
otype = self.omim_type[omim_id]
if otype == self.globaltt['obsolete'] and omim_id in self.omim_replaced:
omim_id = self.omim_replaced[omim_id]
otype = self.omim_type[omim_id]
# else: # removed or multiple
if otype not in (
self.globaltt['Phenotype'], self.globaltt['has_affected_feature']):
omim_id = None
return omim_id | []
|
Please provide a description of the function:def clean_up_omim_genes(self):
'''
Attempt to limit omim links to diseases and not genes/locus
'''
# get all the omim ids
allomim_curie = set()
for omia in self.omia_omim_map:
allomim_curie.update(self.omia_omim_map[omia])
# strip the curie prefix
allomimids = set([o.split(':')[-1] for o in allomim_curie])
LOG.info("Have %i omim_ids before filtering", len(allomimids))
LOG.info("Exists %i omim_ids replaceable", len(self.omim_replaced))
if len(self.omim_replaced) > 0:
LOG.info(
"Sample of each (all & replace) look like: %s , %s",
list(allomimids)[0],
list(self.omim_replaced.keys())[0])
# deal with replaced identifiers
replaced = allomimids & self.omim_replaced.keys()
if replaced is not None and len(replaced) > 0:
LOG.warning("These OMIM ID's are past their pull date: %s", str(replaced))
for oid in replaced:
allomimids.remove(oid)
replacements = self.omim_replaced[oid]
for rep in replacements:
allomimids.update(rep)
# guard against omim identifiers which have been removed
obsolete = [
o for o in self.omim_type
if self.omim_type[o] == self.globaltt['obsolete']]
removed = allomimids & set(obsolete)
if removed is not None and len(removed) > 0:
LOG.warning("These OMIM ID's are gone: %s", str(removed))
for oid in removed:
allomimids.remove(oid)
# get a list of omim ids which we consider to be for disease / phenotype
omim_phenotypes = set([
omim for omim in self.omim_type if self.omim_type[omim] in (
self.globaltt['Phenotype'],
self.globaltt['has_affected_feature'],
self.globaltt['heritable_phenotypic_marker'])])
LOG.info(
"Have %i omim_ids globally typed as phenotypes from OMIM",
len(omim_phenotypes))
entries_that_are_phenotypes = allomimids & omim_phenotypes
LOG.info(
"Filtered out %d/%d entries that are genes or features",
len(allomimids - entries_that_are_phenotypes), len(allomimids))
# now iterate again and remove those non-phenotype ids
# this could be redone with set operations
removed_count = 0
for omia in self.omia_omim_map:
cleanids = set()
for dirty_curie in self.omia_omim_map[omia]:
dirty_num = dirty_curie.split(':')[-1]
if dirty_num in entries_that_are_phenotypes:
cleanids.add(dirty_curie)
else:
removed_count += 1 # keep track of how many we've removed
self.omia_omim_map[omia] = cleanids
LOG.info("Removed %d omim ids from the omia-to-omim map", removed_count)
return | []
|
Please provide a description of the function:def make_spo(sub, prd, obj):
'''
Decorates the three given strings as a line of ntriples
'''
# To establish string as a curie and expand,
# we use a global curie_map(.yaml)
# sub are allways uri (unless a bnode)
# prd are allways uri (unless prd is 'a')
# should fail loudly if curie does not exist
if prd == 'a':
prd = 'rdf:type'
try:
(subcuri, subid) = re.split(r':', sub)
except Exception:
LOG.error("not a Subject Curie '%s'", sub)
raise ValueError
try:
(prdcuri, prdid) = re.split(r':', prd)
except Exception:
LOG.error("not a Predicate Curie '%s'", prd)
raise ValueError
objt = ''
# object is a curie or bnode or literal [string|number]
objcuri = None
match = re.match(CURIERE, obj)
if match is not None:
try:
(objcuri, objid) = re.split(r':', obj)
except ValueError:
match = None
if match is not None and objcuri in CURIEMAP:
objt = CURIEMAP[objcuri] + objid.strip()
# allow unexpanded bnodes in object
if objcuri != '_' or CURIEMAP[objcuri] != '_:b':
objt = '<' + objt + '>'
elif obj.isnumeric():
objt = '"' + obj + '"'
else:
# Literals may not contain the characters ", LF, CR '\'
# except in their escaped forms. internal quotes as well.
obj = obj.strip('"').replace('\\', '\\\\').replace('"', '\'')
obj = obj.replace('\n', '\\n').replace('\r', '\\r')
objt = '"' + obj + '"'
# allow unexpanded bnodes in subject
if subcuri is not None and subcuri in CURIEMAP and \
prdcuri is not None and prdcuri in CURIEMAP:
subjt = CURIEMAP[subcuri] + subid.strip()
if subcuri != '_' or CURIEMAP[subcuri] != '_:b':
subjt = '<' + subjt + '>'
return subjt + ' <' + CURIEMAP[prdcuri] + prdid.strip() + '> ' + objt + ' .'
else:
LOG.error(
'Cant work with: <%s> %s , <%s> %s, %s',
subcuri, subid, prdcuri, prdid, objt)
return None | []
|
Please provide a description of the function:def write_spo(sub, prd, obj):
'''
write triples to a buffer incase we decide to drop them
'''
rcvtriples.append(make_spo(sub, prd, obj)) | []
|
Please provide a description of the function:def scv_link(scv_sig, rcv_trip):
'''
Creates links between SCV based on their pathonicty/significance calls
# GENO:0000840 - GENO:0000840 --> is_equilavent_to SEPIO:0000098
# GENO:0000841 - GENO:0000841 --> is_equilavent_to SEPIO:0000098
# GENO:0000843 - GENO:0000843 --> is_equilavent_to SEPIO:0000098
# GENO:0000844 - GENO:0000844 --> is_equilavent_to SEPIO:0000098
# GENO:0000840 - GENO:0000844 --> contradicts SEPIO:0000101
# GENO:0000841 - GENO:0000844 --> contradicts SEPIO:0000101
# GENO:0000841 - GENO:0000843 --> contradicts SEPIO:0000101
# GENO:0000840 - GENO:0000841 --> is_consistent_with SEPIO:0000099
# GENO:0000843 - GENO:0000844 --> is_consistent_with SEPIO:0000099
# GENO:0000840 - GENO:0000843 --> strongly_contradicts SEPIO:0000100
'''
sig = { # 'arbitrary scoring scheme increments as powers of two'
'GENO:0000840': 1, # pathogenic
'GENO:0000841': 2, # likely pathogenic
'GENO:0000844': 4, # likely benign
'GENO:0000843': 8, # benign
'GENO:0000845': 16, # uncertain significance
}
lnk = { # specific result from diff in 'arbitrary scoring scheme'
0: 'SEPIO:0000098', # is_equilavent_to
1: 'SEPIO:0000099', # is_consistent_with
2: 'SEPIO:0000101', # contradicts
3: 'SEPIO:0000101', # contradicts
4: 'SEPIO:0000099', # is_consistent_with
6: 'SEPIO:0000101', # contradicts
7: 'SEPIO:0000100', # strongly_contradicts
8: 'SEPIO:0000126', # is_inconsistent_with
12: 'SEPIO:0000126',
14: 'SEPIO:0000126',
15: 'SEPIO:0000126',
}
keys = sorted(scv_sig.keys())
for scv_a in keys:
scv_av = scv_sig.pop(scv_a)
for scv_b in scv_sig.keys():
link = lnk[abs(sig[scv_av] - sig[scv_sig[scv_b]])]
rcv_trip.append(make_spo(scv_a, link, scv_b))
rcv_trip.append(make_spo(scv_b, link, scv_a))
return | []
|
Please provide a description of the function:def resolve(label):
'''
composite mapping
given f(x) and g(x) here: GLOBALTT & LOCALTT respectivly
in order of preference
return g(f(x))|f(x)|g(x) | x
TODO consider returning x on fall through
: return label's mapping
'''
term_id = label
if label is not None and label in LOCALTT:
term_id = LOCALTT[label]
if term_id in GLOBALTT:
term_id = GLOBALTT[term_id]
else:
LOG.warning(
'Local translation but do not have a global term_id for %s', label)
elif label is not None and label in GLOBALTT:
term_id = GLOBALTT[label]
else:
LOG.error('Do not have any mapping for label: ' + label)
return term_id | []
|
Please provide a description of the function:def _process_ddg2p_annotations(self, limit):
line_counter = 0
if self.graph is not None:
graph = self.graph
else:
graph = self.graph
# in order for this to work, we need to map the HGNC id-symbol;
hgnc = HGNC(self.graph_type, self.are_bnodes_skolemized)
hgnc_symbol_id_map = hgnc.get_symbol_id_map()
myzip = ZipFile(
'/'.join((self.rawdir, self.files['annot']['file'])), 'r')
# use the ddg2p.txt file
fname = 'ddg2p.txt'
unmapped_omim_counter = 0
unmapped_gene_count = 0
with myzip.open(fname, 'r') as f:
f = io.TextIOWrapper(f)
reader = csv.reader(f, delimiter='\t', quotechar='\"')
# score_means_by_measure = {}
# strain_scores_by_measure = {} # TODO theseare unused
for row in reader:
line_counter += 1
if re.match(r'#', row[0]): # skip comments
continue
(gencode_gene_name, mode, category, consequence, disease, omim,
ddg2p_id, pubmed_ids, hpo_codes) = row
hgnc_id = hgnc_symbol_id_map.get(gencode_gene_name.strip())
if hgnc_id is None:
LOG.error(
"Couldn't map the gene symbol %s to HGNC.",
gencode_gene_name)
unmapped_gene_count += 1
continue
# add the gene
self.model.addClassToGraph(hgnc_id, gencode_gene_name)
# TODO make VSLC with the variation
# to associate with the disorder
# TODO use the Inheritance and Mutation consequence
# to classify the VSLCs
allele_id = self.make_allele_by_consequence(
consequence, hgnc_id, gencode_gene_name)
if omim.strip() != '':
omim_id = 'OMIM:'+str(omim.strip())
# assume this is declared elsewhere in ontology
self.model.addClassToGraph(omim_id, None)
# ??? rel is never used
# if category.strip() == 'Confirmed DD gene':
# rel = self.self.globaltt['has phenotype']
# elif category.strip() == 'Probable DD gene':
# rel = self.self.globaltt['has phenotype']
# elif category.strip() == 'Possible DD gene':
# rel = self.self.globaltt['contributes to']
# elif category.strip() == 'Not DD gene':
# # TODO negative annotation
# continue
assoc = G2PAssoc(graph, self.name, allele_id, omim_id)
# TODO 'rel' is assigned to but never used
for p in re.split(r';', pubmed_ids):
p = p.strip()
if p != '':
pmid = 'PMID:' + str(p)
r = Reference(
graph, pmid, self.globaltt['journal article'])
r.addRefToGraph()
assoc.add_source(pmid)
assoc.add_association_to_graph()
else:
# these are unmapped to a disease id.
# note that some match OMIM disease labels
# but the identifiers are just not included.
# TODO consider mapping to OMIM or DOIDs in other ways
LOG.warning(
"No omim id on line %d\n%s", line_counter, str(row))
unmapped_omim_counter += 1
# TODO hpo phenotypes
# since the DDG2P file is not documented,
# I don't know what the HPO annotations are actually about
# are they about the gene? the omim disease? something else?
# So, we wont create associations until this is clarified
if not self.test_mode and limit is not None and line_counter > limit:
break
myzip.close()
LOG.warning(
"gene-disorder associations with no omim id: %d",
unmapped_omim_counter)
LOG.warning("unmapped gene count: %d", unmapped_gene_count)
return | [
"\n The ddg2p annotations associate a gene symbol to an omim disease,\n along with some HPO ids and pubs. The gene symbols come from gencode,\n which in turn come from HGNC official gene symbols. Therefore,\n we use the HGNC source class to get the id/symbol mapping for\n use in our annotations here.\n\n According to http://www.gencodegenes.org/faq.html,\n \"Gene names are usually HGNC or MGI-approved gene symbols mapped\n to the GENCODE genes by the Ensembl xref pipeline. Sometimes,\n when there is no official gene symbol, the Havana clone-based\n name is used.\"\n\n The kind of variation that is linked to a disease is indicated\n (LOF, GOF, CNV, etc) in the source data.\n Here, we create an anonymous variant of the specified gene of\n the indicated type (mapped to the sequence ontology (SO)).\n\n :param limit:\n :return:\n\n "
]
|
Please provide a description of the function:def make_allele_by_consequence(self, consequence, gene_id, gene_symbol):
allele_id = None
# Loss of function : Nonsense, frame-shifting indel,
# essential splice site mutation, whole gene deletion or any other
# mutation where functional analysis demonstrates clear reduction
# or loss of function
# All missense/in frame : Where all the mutations described in the data
# source are either missense or in frame deletions and there is no
# evidence favoring either loss-of-function, activating or
# dominant negative effect
# Dominant negative : Mutation within one allele of a gene that creates
# a significantly greater deleterious effect on gene product
# function than a monoallelic loss of function mutation
# Activating : Mutation, usually missense that results in
# a constitutive functional activation of the gene product
# Increased gene dosage : Copy number variation that increases
# the functional dosage of the gene
# Cis-regulatory or promotor mutation : Mutation in cis-regulatory
# elements that lies outwith the known transcription unit and
# promotor of the controlled gene
# Uncertain : Where the exact nature of the mutation is unclear or
# not recorded
type_id = self.resolve(consequence, mandatory=False)
if type_id == consequence:
LOG.warning("Consequence type unmapped: %s", str(consequence))
type_id = self.globaltt['sequence_variant']
# make the allele
allele_id = ''.join((gene_id, type_id))
allele_id = re.sub(r':', '', allele_id)
allele_id = '_:'+allele_id # make this a BNode
allele_label = ' '.join((consequence, 'allele in', gene_symbol))
self.model.addIndividualToGraph(allele_id, allele_label, type_id)
self.geno.addAlleleOfGene(allele_id, gene_id)
return allele_id | [
"\n Given a \"consequence\" label that describes a variation type,\n create an anonymous variant of the specified gene as an instance of\n that consequence type.\n\n :param consequence:\n :param gene_id:\n :param gene_symbol:\n :return: allele_id\n "
]
|
Please provide a description of the function:def parse(self, limit: Optional[int]=None):
if limit is not None:
LOG.info("Only parsing first %d rows", limit)
LOG.info("Parsing files...")
file_path = '/'.join((
self.rawdir, self.files['developmental_disorders']['file']))
with gzip.open(file_path, 'rt') as csvfile:
reader = csv.reader(csvfile)
next(reader) # header
for row in reader:
if limit is None or reader.line_num <= (limit + 1):
self._add_gene_disease(row)
else:
break
LOG.info("Done parsing.") | [
"\n Here we parse each row of the gene to phenotype file\n\n We create anonymous variants along with their attributes\n (allelic requirement, functional consequence)\n and connect these to genes and diseases\n\n genes are connected to variants via\n global_terms['has_affected_locus']\n\n variants are connected to attributes via:\n global_terms['has_allelic_requirement']\n global_terms['has_functional_consequence']\n\n variants are connected to disease based on\n mappings to the DDD category column,\n see the translationtable specific to this source\n for mappings\n\n For cases where there are no disease OMIM id,\n we either use a disease cache file with mappings\n to MONDO that has been manually curated\n\n :param limit: {int} number of rows to parse\n :return: None\n "
]
|
Please provide a description of the function:def _add_gene_disease(self, row): # ::List getting syntax error here
col = self.files['developmental_disorders']['columns']
if len(row) != len(col):
raise ValueError("Unexpected number of fields for row {}".format(row))
variant_label = "variant of {}".format(row[col.index('gene_symbol')])
disease_omim_id = row[col.index('disease_omim_id')]
if disease_omim_id == 'No disease mim':
# check if we've manually curated
disease_label = row[col.index('disease_label')]
if disease_label in self.mondo_map:
disease_id = self.mondo_map[disease_label]
else:
return # sorry for this
else:
disease_id = 'OMIM:' + disease_omim_id
hgnc_curie = 'HGNC:' + row[col.index('hgnc_id')]
relation_curie = self.resolve(row[col.index('g2p_relation_label')])
mutation_consequence = row[col.index('mutation_consequence')]
if mutation_consequence not in ('uncertain', ''):
consequence_relation = self.resolve(
self._get_consequence_predicate(mutation_consequence))
consequence_curie = self.resolve(mutation_consequence)
variant_label = "{} {}".format(mutation_consequence, variant_label)
else:
consequence_relation = None
consequence_curie = None
allelic_requirement = row[col.index('allelic_requirement')]
if allelic_requirement != '':
requirement_curie = self.resolve(allelic_requirement)
else:
requirement_curie = None
pmids = row[col.index('pmids')]
if pmids != '':
pmid_list = ['PMID:' + pmid for pmid in pmids.split(';')]
else:
pmid_list = []
# build the model
# Should we build a reusable object and/or tuple that
# could be passed to a more general model builder for
# this and orphanet (and maybe clinvar)
self._build_gene_disease_model(
hgnc_curie,
relation_curie,
disease_id,
variant_label,
consequence_relation,
consequence_curie,
requirement_curie,
pmid_list
) | [
"\n Parse and add gene variant disease model\n Model building happens in _build_gene_disease_model\n\n :param row {List}: single row from DDG2P.csv\n :return: None\n "
]
|
Please provide a description of the function:def _build_gene_disease_model(
self,
gene_id,
relation_id,
disease_id,
variant_label,
consequence_predicate=None,
consequence_id=None,
allelic_requirement=None,
pmids=None):
model = Model(self.graph)
geno = Genotype(self.graph)
pmids = [] if pmids is None else pmids
is_variant = False
variant_or_gene = gene_id
variant_id_string = variant_label
variant_bnode = self.make_id(variant_id_string, "_")
if consequence_predicate is not None \
and consequence_id is not None:
is_variant = True
model.addTriple(variant_bnode,
consequence_predicate,
consequence_id)
# Hack to add labels to terms that
# don't exist in an ontology
if consequence_id.startswith(':'):
model.addLabel(consequence_id,
consequence_id.strip(':').replace('_', ' '))
if is_variant:
variant_or_gene = variant_bnode
# Typically we would type the variant using the
# molecular consequence, but these are not specific
# enough for us to make mappings (see translation table)
model.addIndividualToGraph(variant_bnode,
variant_label,
self.globaltt['variant_locus'])
geno.addAffectedLocus(variant_bnode, gene_id)
model.addBlankNodeAnnotation(variant_bnode)
assoc = G2PAssoc(
self.graph, self.name, variant_or_gene, disease_id, relation_id)
assoc.source = pmids
assoc.add_association_to_graph()
if allelic_requirement is not None and is_variant is False:
model.addTriple(
assoc.assoc_id, self.globaltt['has_allelic_requirement'],
allelic_requirement)
if allelic_requirement.startswith(':'):
model.addLabel(
allelic_requirement,
allelic_requirement.strip(':').replace('_', ' ')) | [
"\n Builds gene variant disease model\n\n :return: None\n "
]
|
Please provide a description of the function:def _process_qtls_genetic_location(
self, raw, txid, common_name, limit=None):
aql_curie = self.files[common_name + '_cm']['curie']
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
line_counter = 0
geno = Genotype(graph)
model = Model(graph)
eco_id = self.globaltt['quantitative trait analysis evidence']
taxon_curie = 'NCBITaxon:' + txid
LOG.info("Processing genetic location for %s from %s", taxon_curie, raw)
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(qtl_id,
qtl_symbol,
trait_name,
assotype,
empty,
chromosome,
position_cm,
range_cm,
flankmark_a2,
flankmark_a1,
peak_mark,
flankmark_b1,
flankmark_b2,
exp_id,
model_id,
test_base,
sig_level,
lod_score,
ls_mean,
p_values,
f_statistics,
variance,
bayes_value,
likelihood_ratio,
trait_id, dom_effect,
add_effect,
pubmed_id,
gene_id,
gene_id_src,
gene_id_type,
empty2) = row
if self.test_mode and int(qtl_id) not in self.test_ids:
continue
qtl_id = common_name + 'QTL:' + qtl_id.strip()
trait_id = ':'.join((aql_curie, trait_id.strip()))
# Add QTL to graph
feature = Feature(graph, qtl_id, qtl_symbol, self.globaltt['QTL'])
feature.addTaxonToFeature(taxon_curie)
# deal with the chromosome
chrom_id = makeChromID(chromosome, taxon_curie, 'CHR')
# add a version of the chromosome which is defined as
# the genetic map
build_id = 'MONARCH:'+common_name.strip()+'-linkage'
build_label = common_name+' genetic map'
geno.addReferenceGenome(build_id, build_label, taxon_curie)
chrom_in_build_id = makeChromID(chromosome, build_id, 'MONARCH')
geno.addChromosomeInstance(
chromosome, build_id, build_label, chrom_id)
start = stop = None
# range_cm sometimes ends in "(Mb)" (i.e pig 2016 Nov)
range_mb = re.split(r'\(', range_cm)
if range_mb is not None:
range_cm = range_mb[0]
if re.search(r'[0-9].*-.*[0-9]', range_cm):
range_parts = re.split(r'-', range_cm)
# check for poorly formed ranges
if len(range_parts) == 2 and\
range_parts[0] != '' and range_parts[1] != '':
(start, stop) = [
int(float(x.strip())) for x in re.split(r'-', range_cm)]
else:
LOG.info(
"A cM range we can't handle for QTL %s: %s",
qtl_id, range_cm)
elif position_cm != '':
match = re.match(r'([0-9]*\.[0-9]*)', position_cm)
if match is not None:
position_cm = match.group()
start = stop = int(float(position_cm))
# FIXME remove converion to int for start/stop
# when schema can handle floats add in the genetic location
# based on the range
feature.addFeatureStartLocation(
start, chrom_in_build_id, None,
[self.globaltt['FuzzyPosition']])
feature.addFeatureEndLocation(
stop, chrom_in_build_id, None,
[self.globaltt['FuzzyPosition']])
feature.addFeatureToGraph()
# sometimes there's a peak marker, like a rsid.
# we want to add that as a variant of the gene,
# and xref it to the qtl.
dbsnp_id = None
if peak_mark != '' and peak_mark != '.' and \
re.match(r'rs', peak_mark.strip()):
dbsnp_id = 'dbSNP:'+peak_mark.strip()
model.addIndividualToGraph(
dbsnp_id, None,
self.globaltt['sequence_alteration'])
model.addXref(qtl_id, dbsnp_id)
gene_id = gene_id.replace('uncharacterized ', '').strip()
if gene_id is not None and gene_id != '' and gene_id != '.'\
and re.fullmatch(r'[^ ]*', gene_id) is not None:
# we assume if no src is provided and gene_id is an integer,
# then it is an NCBI gene ... (okay, lets crank that back a notch)
if gene_id_src == '' and gene_id.isdigit() and \
gene_id in self.gene_info:
# LOG.info(
# 'Warm & Fuzzy saying %s is a NCBI gene for %s',
# gene_id, common_name)
gene_id_src = 'NCBIgene'
elif gene_id_src == '' and gene_id.isdigit():
LOG.warning(
'Cold & Prickely saying %s is a NCBI gene for %s',
gene_id, common_name)
gene_id_src = 'NCBIgene'
elif gene_id_src == '':
LOG.error(
' "%s" is a NOT NCBI gene for %s', gene_id, common_name)
gene_id_src = None
if gene_id_src == 'NCBIgene':
gene_id = 'NCBIGene:' + gene_id
# we will expect that these will get labels elsewhere
geno.addGene(gene_id, None)
# FIXME what is the right relationship here?
geno.addAffectedLocus(qtl_id, gene_id)
if dbsnp_id is not None:
# add the rsid as a seq alt of the gene_id
vl_id = '_:' + re.sub(
r':', '', gene_id) + '-' + peak_mark.strip()
geno.addSequenceAlterationToVariantLocus(
dbsnp_id, vl_id)
geno.addAffectedLocus(vl_id, gene_id)
# add the trait
model.addClassToGraph(trait_id, trait_name)
# Add publication
reference = None
if re.match(r'ISU.*', pubmed_id):
pub_id = 'AQTLPub:'+pubmed_id.strip()
reference = Reference(graph, pub_id)
elif pubmed_id != '':
pub_id = 'PMID:' + pubmed_id.strip()
reference = Reference(
graph, pub_id, self.globaltt['journal article'])
if reference is not None:
reference.addRefToGraph()
# make the association to the QTL
assoc = G2PAssoc(
graph, self.name, qtl_id, trait_id, self.globaltt['is marker for'])
assoc.add_evidence(eco_id)
assoc.add_source(pub_id)
# create a description from the contents of the file
# desc = ''
# assoc.addDescription(g, assoc_id, desc)
# TODO add exp_id as evidence
# if exp_id != '':
# exp_id = 'AQTLExp:'+exp_id
# gu.addIndividualToGraph(g, exp_id, None, eco_id)
if p_values != '':
scr = re.sub(r'<', '', p_values)
scr = re.sub(r',', '.', scr) # international notation
if scr.isnumeric():
score = float(scr)
assoc.set_score(score) # todo add score type
# TODO add LOD score?
assoc.add_association_to_graph()
# make the association to the dbsnp_id, if found
if dbsnp_id is not None:
# make the association to the dbsnp_id
assoc = G2PAssoc(
graph, self.name, dbsnp_id, trait_id,
self.globaltt['is marker for'])
assoc.add_evidence(eco_id)
assoc.add_source(pub_id)
# create a description from the contents of the file
# desc = ''
# assoc.addDescription(g, assoc_id, desc)
# TODO add exp_id
# if exp_id != '':
# exp_id = 'AQTLExp:'+exp_id
# gu.addIndividualToGraph(g, exp_id, None, eco_id)
if p_values != '':
scr = re.sub(r'<', '', p_values)
scr = re.sub(r',', '.', scr)
if scr.isnumeric():
score = float(scr)
assoc.set_score(score) # todo add score type
# TODO add LOD score?
assoc.add_association_to_graph()
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Done with QTL genetic info")
return | [
"\n This function processes\n\n Triples created:\n\n :param limit:\n :return:\n\n "
]
|
Please provide a description of the function:def _process_qtls_genomic_location(
self, raw, txid, build_id, build_label, common_name, limit=None):
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
geno = Genotype(graph)
# assume that chrs get added to the genome elsewhere
taxon_curie = 'NCBITaxon:' + txid
eco_id = self.globaltt['quantitative trait analysis evidence']
LOG.info("Processing QTL locations for %s from %s", taxon_curie, raw)
with gzip.open(raw, 'rt', encoding='ISO-8859-1') as tsvfile:
reader = csv.reader(tsvfile, delimiter="\t")
for row in reader:
line_counter += 1
if re.match(r'^#', ' '.join(row)):
continue
(chromosome, qtl_source, qtl_type, start_bp, stop_bp, frame, strand,
score, attr) = row
example = '''
Chr.Z Animal QTLdb Production_QTL 33954873 34023581...
QTL_ID=2242;Name="Spleen percentage";Abbrev="SPLP";PUBMED_ID=17012160;trait_ID=2234;
trait="Spleen percentage";breed="leghorn";"FlankMarkers=ADL0022";VTO_name="spleen mass";
MO_name="spleen weight to body weight ratio";Map_Type="Linkage";Model="Mendelian";
Test_Base="Chromosome-wise";Significance="Significant";P-value="<0.05";F-Stat="5.52";
Variance="2.94";Dominance_Effect="-0.002";Additive_Effect="0.01
'''
str(example)
# make dictionary of attributes
# keys are:
# QTL_ID,Name,Abbrev,PUBMED_ID,trait_ID,trait,FlankMarkers,
# VTO_name,Map_Type,Significance,P-value,Model,
# Test_Base,Variance, Bayes-value,PTO_name,gene_IDsrc,peak_cM,
# CMO_name,gene_ID,F-Stat,LOD-score,Additive_Effect,
# Dominance_Effect,Likelihood_Ratio,LS-means,Breed,
# trait (duplicate with Name),Variance,Bayes-value,
# F-Stat,LOD-score,Additive_Effect,Dominance_Effect,
# Likelihood_Ratio,LS-means
# deal with poorly formed attributes
if re.search(r'"FlankMarkers";', attr):
attr = re.sub(r'FlankMarkers;', '', attr)
attr_items = re.sub(r'"', '', attr).split(";")
bad_attrs = set()
for attributes in attr_items:
if not re.search(r'=', attributes):
# remove this attribute from the list
bad_attrs.add(attributes)
attr_set = set(attr_items) - bad_attrs
attribute_dict = dict(item.split("=") for item in attr_set)
qtl_num = attribute_dict.get('QTL_ID')
if self.test_mode and int(qtl_num) not in self.test_ids:
continue
# make association between QTL and trait based on taxon
qtl_id = common_name + 'QTL:' + str(qtl_num)
model.addIndividualToGraph(qtl_id, None, self.globaltt['QTL'])
geno.addTaxon(taxon_curie, qtl_id)
#
trait_id = 'AQTLTrait:' + attribute_dict.get('trait_ID')
# if pub is in attributes, add it to the association
pub_id = None
if 'PUBMED_ID' in attribute_dict.keys():
pub_id = attribute_dict.get('PUBMED_ID')
if re.match(r'ISU.*', pub_id):
pub_id = 'AQTLPub:' + pub_id.strip()
reference = Reference(graph, pub_id)
else:
pub_id = 'PMID:' + pub_id.strip()
reference = Reference(
graph, pub_id, self.globaltt['journal article'])
reference.addRefToGraph()
# Add QTL to graph
assoc = G2PAssoc(
graph, self.name, qtl_id, trait_id,
self.globaltt['is marker for'])
assoc.add_evidence(eco_id)
assoc.add_source(pub_id)
if 'P-value' in attribute_dict.keys():
scr = re.sub(r'<', '', attribute_dict.get('P-value'))
if ',' in scr:
scr = re.sub(r',', '.', scr)
if scr.isnumeric():
score = float(scr)
assoc.set_score(score)
assoc.add_association_to_graph()
# TODO make association to breed
# (which means making QTL feature in Breed background)
# get location of QTL
chromosome = re.sub(r'Chr\.', '', chromosome)
chrom_id = makeChromID(chromosome, taxon_curie, 'CHR')
chrom_in_build_id = makeChromID(chromosome, build_id, 'MONARCH')
geno.addChromosomeInstance(
chromosome, build_id, build_label, chrom_id)
qtl_feature = Feature(graph, qtl_id, None, self.globaltt['QTL'])
if start_bp == '':
start_bp = None
qtl_feature.addFeatureStartLocation(
start_bp, chrom_in_build_id, strand,
[self.globaltt['FuzzyPosition']])
if stop_bp == '':
stop_bp = None
qtl_feature.addFeatureEndLocation(
stop_bp, chrom_in_build_id, strand,
[self.globaltt['FuzzyPosition']])
qtl_feature.addTaxonToFeature(taxon_curie)
qtl_feature.addFeatureToGraph()
if not self.test_mode and limit is not None and line_counter > limit:
break
# LOG.warning("Bad attribute flags in this file") # what does this even mean??
LOG.info("Done with QTL genomic mappings for %s", taxon_curie)
return | [
"\n This method\n\n Triples created:\n\n :param limit:\n :return:\n "
]
|
Please provide a description of the function:def _process_trait_mappings(self, raw, limit=None):
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
line_counter = 0
model = Model(graph)
with open(raw, 'r') as csvfile:
filereader = csv.reader(csvfile, delimiter=',', quotechar='\"')
next(filereader, None) # skip header line
for row in filereader:
line_counter += 1
# need to skip the last line
if len(row) < 8:
LOG.info("skipping line %d: %s", line_counter, '\t'.join(row))
continue
(vto_id, pto_id, cmo_id, ato_column, species, trait_class,
trait_type, qtl_count) = row
ato_id = re.sub(
r'ATO #', 'AQTLTrait:', re.sub(
r'\].*', '', re.sub(r'\[', '', ato_column)))
ato_id = ato_id.strip()
ato_label = re.sub(r'.*\]\s*', '', ato_column)
model.addClassToGraph(ato_id, ato_label.strip())
if re.match(r'VT:.*', vto_id):
model.addClassToGraph(vto_id, None)
model.addEquivalentClass(ato_id, vto_id)
if re.match(r'LPT:.*', pto_id):
model.addClassToGraph(pto_id, None)
model.addXref(ato_id, pto_id)
if re.match(r'CMO:.*', cmo_id):
model.addClassToGraph(cmo_id, None)
model.addXref(ato_id, cmo_id)
LOG.info("Done with trait mappings")
return | [
"\n This method mapps traits from/to ...\n\n Triples created:\n\n :param limit:\n :return:\n "
]
|
Please provide a description of the function:def _get_identifiers(self, limit):
LOG.info("getting identifier mapping")
line_counter = 0
f = '/'.join((self.rawdir, self.files['identifiers']['file']))
myzip = ZipFile(f, 'r')
# assume that the first entry is the item
fname = myzip.namelist()[0]
foundheader = False
# TODO align this species filter with the one above
# speciesfilters = 'Homo sapiens,Mus musculus,Drosophila melanogaster,
# Danio rerio, Caenorhabditis elegans,Xenopus laevis'.split(',')
speciesfilters = 'Homo sapiens,Mus musculus'.split(',')
with myzip.open(fname, 'r') as csvfile:
for line in csvfile:
# skip header lines
if not foundheader:
if re.match(r'BIOGRID_ID', line.decode()):
foundheader = True
continue
line = line.decode().strip()
# BIOGRID_ID
# IDENTIFIER_VALUE
# IDENTIFIER_TYPE
# ORGANISM_OFFICIAL_NAME
# 1 814566 ENTREZ_GENE Arabidopsis thaliana
(biogrid_num, id_num, id_type,
organism_label) = line.split('\t')
if self.test_mode:
graph = self.testgraph
# skip any genes that don't match our test set
if int(biogrid_num) not in self.biogrid_ids:
continue
else:
graph = self.graph
model = Model(graph)
# for each one of these,
# create the node and add equivalent classes
biogrid_id = 'BIOGRID:'+biogrid_num
prefix = self.localtt[id_type]
# TODO make these filters available as commandline options
# geneidtypefilters='NCBIGene,OMIM,MGI,FlyBase,ZFIN,MGI,HGNC,
# WormBase,XenBase,ENSEMBL,miRBase'.split(',')
geneidtypefilters = 'NCBIGene,MGI,ENSEMBL,ZFIN,HGNC'.split(',')
# proteinidtypefilters='HPRD,Swiss-Prot,NCBIProtein'
if (speciesfilters is not None) \
and (organism_label.strip() in speciesfilters):
line_counter += 1
if (geneidtypefilters is not None) \
and (prefix in geneidtypefilters):
mapped_id = ':'.join((prefix, id_num))
model.addEquivalentClass(biogrid_id, mapped_id)
# this symbol will only get attached to the biogrid class
elif id_type == 'OFFICIAL_SYMBOL':
model.addClassToGraph(biogrid_id, id_num)
# elif (id_type == 'SYNONYM'):
# FIXME - i am not sure these are synonyms, altids?
# gu.addSynonym(g,biogrid_id,id_num)
if not self.test_mode and limit is not None and line_counter > limit:
break
myzip.close()
return | [
"\n This will process the id mapping file provided by Biogrid.\n The file has a very large header, which we scan past,\n then pull the identifiers, and make equivalence axioms\n\n :param limit:\n :return:\n\n "
]
|
Please provide a description of the function:def makeChromID(chrom, reference=None, prefix=None):
# blank nodes
if reference is None:
LOG.warning('No reference for this chr. You may have conflicting ids')
# replace any chr-like prefixes with blank to standardize
chrid = re.sub(r'ch(r?)[omse]*', '', str(chrom))
# remove the build/taxon prefixes to look cleaner
ref = reference
if re.match(r'.+:.+', reference):
ref = re.match(r'.*:(.*)', reference).group(1)
chrid = ''.join((ref, 'chr', chrid))
if prefix is None:
chrid = ''.join(('_', chrid))
else:
chrid = ':'.join((prefix, chrid))
return chrid | [
"\n This will take a chromosome number and a NCBI taxon number,\n and create a unique identifier for the chromosome. These identifiers\n are made in the @base space like:\n Homo sapiens (9606) chr1 ==> :9606chr1\n Mus musculus (10090) chrX ==> :10090chrX\n\n :param chrom: the chromosome (preferably without any chr prefix)\n :param reference: the numeric portion of the taxon id\n\n :return:\n\n "
]
|
Please provide a description of the function:def addFeatureStartLocation(
self, coordinate, reference_id, strand=None, position_types=None):
# make an object for the start, which has:
# {coordinate : integer, reference : reference_id, types = []}
self.start = self._getLocation(coordinate, reference_id, strand, position_types)
return | [
"\n Adds coordinate details for the start of this feature.\n :param coordinate:\n :param reference_id:\n :param strand:\n :param position_types:\n\n :return:\n\n "
]
|
Please provide a description of the function:def addFeatureEndLocation(
self, coordinate, reference_id, strand=None, position_types=None):
self.stop = self._getLocation(coordinate, reference_id, strand, position_types)
return | [
"\n Adds the coordinate details for the end of this feature\n :param coordinate:\n :param reference_id:\n :param strand:\n\n :return:\n\n "
]
|
Please provide a description of the function:def _getLocation(self, coordinate, reference_id, strand, position_types):
loc = {}
loc['coordinate'] = coordinate
loc['reference'] = reference_id
loc['type'] = []
strand_id = self._getStrandType(strand)
if strand_id is not None:
loc['type'].append(strand_id)
if position_types is not None:
loc['type'] += position_types
if position_types == []:
loc['type'].append(self.globaltt['Position'])
return loc | [
"\n Make an object for the location, which has:\n {coordinate : integer, reference : reference_id, types = []}\n where the strand is indicated in the type array\n :param coordinate:\n :param reference_id:\n :param strand:\n :param position_types:\n\n :return:\n\n "
]
|
Please provide a description of the function:def _getStrandType(self, strand):
# TODO make this a dictionary/enum: PLUS, MINUS, BOTH, UNKNOWN
strand_id = None
if strand == '+':
strand_id = self.globaltt['plus_strand']
elif strand == '-':
strand_id = self.globaltt['minus_strand']
elif strand == '.':
strand_id = self.globaltt['both_strand']
elif strand is None: # assume this is Unknown
pass
else:
LOG.warning("strand type could not be mapped: %s", str(strand))
return strand_id | [
"\n :param strand:\n :return:\n "
]
|
Please provide a description of the function:def addFeatureToGraph(
self, add_region=True, region_id=None, feature_as_class=False):
if feature_as_class:
self.model.addClassToGraph(
self.fid, self.label, self.ftype, self.description)
else:
self.model.addIndividualToGraph(
self.fid, self.label, self.ftype, self.description)
if self.start is None and self.stop is None:
add_region = False
if add_region:
# create a region that has the begin/end positions
regionchr = re.sub(r'\w+\:_?', '', self.start['reference'])
if region_id is None:
# in case the values are undefined
# if we know only one of the coordinates,
# then we'll add an "unknown" other.
st = sp = 'UN'
strand = None
if self.start is not None and self.start['coordinate'] is not None:
st = str(self.start['coordinate'])
strand = self._getStrandStringFromPositionTypes(self.start['type'])
if self.stop is not None and self.stop['coordinate'] is not None:
sp = str(self.stop['coordinate'])
if strand is not None:
strand = self._getStrandStringFromPositionTypes(
self.stop['type'])
# assume that the strand is the same for both start and stop.
# this will need to be fixed in the future
region_items = [regionchr, st, sp]
if strand is not None:
region_items += [strand]
region_id = '-'.join(region_items)
rid = region_id
rid = re.sub(r'\w+\:', '', rid, 1) # replace the id prefix
rid = '_:'+rid+"-Region"
region_id = rid
self.graph.addTriple(self.fid, self.globaltt['location'], region_id)
self.model.addIndividualToGraph(region_id, None, self.globaltt['Region'])
else:
region_id = self.fid
self.model.addType(region_id, self.globaltt['region'])
# add the start/end positions to the region
beginp = endp = None
if self.start is not None:
beginp = self._makePositionId(
self.start['reference'], self.start['coordinate'], self.start['type'])
self.addPositionToGraph(
self.start['reference'], self.start['coordinate'], self.start['type'])
if self.stop is not None:
endp = self._makePositionId(
self.stop['reference'], self.stop['coordinate'], self.stop['type'])
self.addPositionToGraph(
self.stop['reference'], self.stop['coordinate'], self.stop['type'])
self.addRegionPositionToGraph(region_id, beginp, endp)
# {coordinate : integer, reference : reference_id, types = []}
return | [
"\n We make the assumption here that all features are instances.\n The features are located on a region,\n which begins and ends with faldo:Position\n The feature locations leverage the Faldo model,\n which has a general structure like:\n Triples:\n feature_id a feature_type (individual)\n faldo:location region_id\n region_id a faldo:region\n faldo:begin start_position\n faldo:end end_position\n start_position a\n (any of: faldo:(((Both|Plus|Minus)Strand)|Exact)Position)\n faldo:position Integer(numeric position)\n faldo:reference reference_id\n end_position a\n (any of: faldo:(((Both|Plus|Minus)Strand)|Exact)Position)\n faldo:position Integer(numeric position)\n faldo:reference reference_id\n\n :param graph:\n\n :return:\n\n "
]
|
Please provide a description of the function:def _makePositionId(self, reference, coordinate, types=None):
if reference is None:
LOG.error("Trying to make position with no reference.")
return None
curie = '_:'
reference = re.sub(r'\w+\:', '', reference, 1)
if re.match(r'^_', reference):
# this is in the case if the reference is a bnode
reference = re.sub(r'^_', '', reference)
curie += reference
if coordinate is not None:
# just in case it isn't a string already
curie = '-'.join((curie, str(coordinate)))
if types is not None:
tstring = self._getStrandStringFromPositionTypes(types)
if tstring is not None:
curie = '-'.join((curie, tstring))
return curie | [
"\n Note that positions should have a reference (we will enforce).\n Only exact positions need a coordinate.\n :param reference:\n :param coordinate:\n :param types:\n :return:\n "
]
|
Please provide a description of the function:def addPositionToGraph(
self, reference_id, position, position_types=None, strand=None):
pos_id = self._makePositionId(reference_id, position, position_types)
if position is not None:
self.graph.addTriple(
pos_id, self.globaltt['position'], position, object_is_literal=True,
literal_type="xsd:integer")
self.graph.addTriple(pos_id, self.globaltt['reference'], reference_id)
if position_types is not None:
for pos_type in position_types:
self.model.addType(pos_id, pos_type)
strnd = None
if strand is not None:
strnd = strand
if not re.match(r'faldo', strand):
# not already mapped to faldo, so expect we need to map it
strnd = self._getStrandType(strand)
# else:
# strnd = self.globaltt['both_strand']
if strnd is None and (position_types is None or position_types == []):
strnd = self.globaltt['Position']
if strnd is not None:
self.model.addType(pos_id, strnd)
return pos_id | [
"\n Add the positional information to the graph, following the faldo model.\n We assume that if the strand is None,\n we give it a generic \"Position\" only.\n Triples:\n my_position a (any of: faldo:(((Both|Plus|Minus)Strand)|Exact)Position)\n faldo:position Integer(numeric position)\n faldo:reference reference_id\n\n :param graph:\n :param reference_id:\n :param position:\n :param position_types:\n :param strand:\n\n :return: Identifier of the position created\n\n "
]
|
Please provide a description of the function:def addSubsequenceOfFeature(self, parentid):
self.graph.addTriple(self.fid, self.globaltt['is subsequence of'], parentid)
# this should be expected to be done in reasoning not ETL
self.graph.addTriple(parentid, self.globaltt['has subsequence'], self.fid)
return | [
"\n This will add reciprocal triples like:\n feature <is subsequence of> parent\n parent has_subsequence feature\n :param graph:\n :param parentid:\n\n :return:\n\n "
]
|
Please provide a description of the function:def addTaxonToFeature(self, taxonid):
self.taxon = taxonid
self.graph.addTriple(self.fid, self.globaltt['in taxon'], self.taxon)
return | [
"\n Given the taxon id, this will add the following triple:\n feature in_taxon taxonid\n :param graph:\n :param taxonid:\n :return:\n "
]
|
Please provide a description of the function:def add_supporting_evidence(self, evidence_line, evidence_type=None, label=None):
self.graph.addTriple(
self.association, self.globaltt['has_supporting_evidence_line'],
evidence_line)
if evidence_type is not None:
self.model.addIndividualToGraph(evidence_line, label, evidence_type)
return | [
"\n Add supporting line of evidence node to association id\n\n :param evidence_line: curie or iri, evidence line\n :param evidence_type: curie or iri, evidence type if available\n :return: None\n "
]
|
Please provide a description of the function:def add_data_individual(self, data_curie, label=None, ind_type=None):
part_length = len(data_curie.split(':'))
if part_length == 0:
curie = "_:{}".format(data_curie)
elif part_length > 2:
raise ValueError("Misformatted curie {}".format(data_curie))
else:
curie = data_curie
self.model.addIndividualToGraph(curie, label, ind_type)
return | [
"\n Add data individual\n :param data_curie: str either curie formatted or long string,\n long strings will be converted to bnodes\n :param type: str curie\n :param label: str\n :return: None\n "
]
|
Please provide a description of the function:def add_supporting_data(self, evidence_line, measurement_dict):
for measurement in measurement_dict:
self.graph.addTriple(
evidence_line, self.globaltt['has_evidence_item'], measurement)
self.graph.addTriple(
measurement, self.globaltt['has_value'], # 'has measurement value' ??
measurement_dict[measurement], True)
return | [
"\n Add supporting data\n :param evidence_line:\n :param data_object: dict, where keys are curies or iris\n and values are measurement values for example:\n {\n \"_:1234\" : \"1.53E07\"\n \"_:4567\": \"20.25\"\n }\n Note: assumes measurements are RDF:Type 'ed elsewhere\n :return: None\n "
]
|
Please provide a description of the function:def add_supporting_publication(
self, evidence_line, publication, label=None, pub_type=None):
self.graph.addTriple(
evidence_line, self.globaltt['evidence_has_supporting_reference'], publication)
self.model.addIndividualToGraph(publication, label, pub_type)
return | [
"\n <evidence> <evidence_has_supporting_reference> <source>\n <source> <rdf:type> <type>\n <source> <rdfs:label> \"label\"\n :param evidence_line: str curie\n :param publication: str curie\n :param label: optional, str type as curie\n :param type: optional, str type as curie\n :return:\n "
]
|
Please provide a description of the function:def add_source(self, evidence_line, source, label=None, src_type=None):
self.graph.addTriple(evidence_line, self.globaltt['source'], source)
self.model.addIndividualToGraph(source, label, src_type)
return | [
"\n Applies the triples:\n <evidence> <dc:source> <source>\n <source> <rdf:type> <type>\n <source> <rdfs:label> \"label\"\n\n TODO this should belong in a higher level class\n :param evidence_line: str curie\n :param source: str source as curie\n :param label: optional, str type as curie\n :param type: optional, str type as curie\n :return: None\n "
]
|
Please provide a description of the function:def _process_phenotype_data(self, limit):
src_key = 'catalog'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
fname = '/'.join((self.rawdir, self.files[src_key]['file']))
self.strain_hash = {}
self.id_label_hash = {}
genes_with_no_ids = set()
stem_cell_class = self.globaltt['stem cell']
mouse_taxon = self.globaltt['Mus musculus']
geno = Genotype(graph)
with open(fname, 'r', encoding="utf8") as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='\"')
# This MMRRC catalog data file was generated on YYYY-MM-DD
# insert or check date w/dataset
line = next(reader)
# gen_date = line[-10:]
line = next(reader)
col = self.files['catalog']['columns']
if col != line:
LOG.error(
'%s\nExpected Headers:\t%s\nRecived Headers:\t%s\n',
src_key, col, line)
LOG.info(set(col) - set(line))
line = next(reader)
if line != []:
LOG.warning('Expected third line to be blank. got "%s" instead', line)
for row in reader:
strain_id = row[col.index('STRAIN/STOCK_ID')].strip()
strain_label = row[col.index('STRAIN/STOCK_DESIGNATION')]
# strain_type_symbol = row[col.index('STRAIN_TYPE')]
strain_state = row[col.index('STATE')]
mgi_allele_id = row[col.index('MGI_ALLELE_ACCESSION_ID')].strip()
mgi_allele_symbol = row[col.index('ALLELE_SYMBOL')]
# mgi_allele_name = row[col.index('ALLELE_NAME')]
# mutation_type = row[col.index('MUTATION_TYPE')]
# chrom = row[col.index('CHROMOSOME')]
mgi_gene_id = row[col.index('MGI_GENE_ACCESSION_ID')].strip()
mgi_gene_symbol = row[col.index('GENE_SYMBOL')].strip()
mgi_gene_name = row[col.index('GENE_NAME')]
# sds_url = row[col.index('SDS_URL')]
# accepted_date = row[col.index('ACCEPTED_DATE')]
mpt_ids = row[col.index('MPT_IDS')].strip()
pubmed_nums = row[col.index('PUBMED_IDS')].strip()
research_areas = row[col.index('RESEARCH_AREAS')].strip()
if self.test_mode and (strain_id not in self.test_ids) \
or mgi_gene_name == 'withdrawn':
continue
# strip off stuff after the dash -
# is the holding center important?
# MMRRC:00001-UNC --> MMRRC:00001
strain_id = re.sub(r'-\w+$', '', strain_id)
self.id_label_hash[strain_id] = strain_label
# get the variant or gene to save for later building of
# the genotype
if strain_id not in self.strain_hash:
self.strain_hash[strain_id] = {
'variants': set(), 'genes': set()}
# flag bad ones
if mgi_allele_id[:4] != 'MGI:' and mgi_allele_id != '':
LOG.error("Erroneous MGI allele id: %s", mgi_allele_id)
if mgi_allele_id[:3] == 'MG:':
mgi_allele_id = 'MGI:' + mgi_allele_id[3:]
else:
mgi_allele_id = ''
if mgi_allele_id != '':
self.strain_hash[strain_id]['variants'].add(mgi_allele_id)
self.id_label_hash[mgi_allele_id] = mgi_allele_symbol
# use the following if needing to add the sequence alteration types
# var_type = self.localtt[mutation_type]
# make a sequence alteration for this variant locus,
# and link the variation type to it
# sa_id = '_'+re.sub(r':','',mgi_allele_id)+'SA'
# if self.nobnodes:
# sa_id = ':'+sa_id
# gu.addIndividualToGraph(g, sa_id, None, var_type)
# geno.addSequenceAlterationToVariantLocus(sa_id, mgi_allele_id)
# scrub out any spaces, fix known issues
mgi_gene_id = re.sub(r'\s+', '', mgi_gene_id)
if mgi_gene_id == 'NULL':
mgi_gene_id = ''
elif mgi_gene_id[:7] == 'GeneID:':
mgi_gene_id = 'NCBIGene:' + mgi_gene_id[7:]
if mgi_gene_id != '':
[curie, localid] = mgi_gene_id.split(':')
if curie not in ['MGI', 'NCBIGene']:
LOG.info("MGI Gene id not recognized: %s", mgi_gene_id)
self.strain_hash[strain_id]['genes'].add(mgi_gene_id)
self.id_label_hash[mgi_gene_id] = mgi_gene_symbol
# catch some errors - too many. report summary at the end
# some things have gene labels, but no identifiers - report
if mgi_gene_symbol != '' and mgi_gene_id == '':
# LOG.error(
# "Gene label with no MGI identifier for strain %s: %s",
# strain_id, mgi_gene_symbol)
genes_with_no_ids.add(mgi_gene_symbol)
# make a temp id for genes that aren't identified ... err wow.
# tmp_gene_id = '_' + mgi_gene_symbol
# self.id_label_hash[tmp_gene_id.strip()] = mgi_gene_symbol
# self.strain_hash[strain_id]['genes'].add(tmp_gene_id)
# split apart the mp ids
# ataxia [MP:0001393] ,hypoactivity [MP:0001402] ...
# mpt_ids are a comma delimited list
# labels with MP terms following in brackets
phenotype_ids = []
if mpt_ids != '':
for lb_mp in mpt_ids.split(r','):
lb_mp = lb_mp.strip()
if lb_mp[-1:] == ']' and lb_mp[-12:-8] == '[MP:':
phenotype_ids.append(lb_mp[-11:-2])
# pubmed ids are space delimited
pubmed_ids = []
if pubmed_nums != '':
for pm_num in re.split(r'\s+', pubmed_nums):
pmid = 'PMID:' + pm_num.strip()
pubmed_ids.append(pmid)
ref = Reference(graph, pmid, self.globaltt['journal article'])
ref.addRefToGraph()
# https://www.mmrrc.org/catalog/sds.php?mmrrc_id=00001
# is a good example of 4 genotype parts
model.addClassToGraph(mouse_taxon, None)
if research_areas == '':
research_areas = None
else:
research_areas = 'Research Areas: ' + research_areas
strain_type = mouse_taxon
if strain_state == 'ES':
strain_type = stem_cell_class
model.addIndividualToGraph( # an inst of mouse??
strain_id, strain_label, strain_type, research_areas)
model.makeLeader(strain_id)
# phenotypes are associated with the alleles
for pid in phenotype_ids:
# assume the phenotype label is in some ontology
model.addClassToGraph(pid, None)
if mgi_allele_id is not None and mgi_allele_id != '':
assoc = G2PAssoc(
graph, self.name, mgi_allele_id, pid,
self.globaltt['has phenotype'])
for p in pubmed_ids:
assoc.add_source(p)
assoc.add_association_to_graph()
else:
LOG.info("Phenotypes and no allele for %s", strain_id)
if not self.test_mode and (
limit is not None and reader.line_num > limit):
break
# now that we've collected all of the variant information, build it
# we don't know their zygosities
for s in self.strain_hash:
h = self.strain_hash.get(s)
variants = h['variants']
genes = h['genes']
vl_set = set()
# make variant loci for each gene
if len(variants) > 0:
for var in variants:
vl_id = var.strip()
vl_symbol = self.id_label_hash[vl_id]
geno.addAllele(
vl_id, vl_symbol, self.globaltt['variant_locus'])
vl_set.add(vl_id)
if len(variants) == 1 and len(genes) == 1:
for gene in genes:
geno.addAlleleOfGene(vl_id, gene)
else:
geno.addAllele(vl_id, vl_symbol)
else: # len(vars) == 0
# it's just anonymous variants in some gene
for gene in genes:
vl_id = '_:' + re.sub(r':', '', gene) + '-VL'
vl_symbol = self.id_label_hash[gene]+'<?>'
self.id_label_hash[vl_id] = vl_symbol
geno.addAllele(
vl_id, vl_symbol, self.globaltt['variant_locus'])
geno.addGene(gene, self.id_label_hash[gene])
geno.addAlleleOfGene(vl_id, gene)
vl_set.add(vl_id)
# make the vslcs
vl_list = sorted(vl_set)
vslc_list = []
for vl in vl_list:
# for unknown zygosity
vslc_id = re.sub(r'^_', '', vl)+'U'
vslc_id = re.sub(r':', '', vslc_id)
vslc_id = '_:' + vslc_id
vslc_label = self.id_label_hash[vl] + '/?'
self.id_label_hash[vslc_id] = vslc_label
vslc_list.append(vslc_id)
geno.addPartsToVSLC(
vslc_id, vl, None, self.globaltt['indeterminate'],
self.globaltt['has_variant_part'], None)
model.addIndividualToGraph(
vslc_id, vslc_label,
self.globaltt['variant single locus complement'])
if len(vslc_list) > 0:
if len(vslc_list) > 1:
gvc_id = '-'.join(vslc_list)
gvc_id = re.sub(r'_|:', '', gvc_id)
gvc_id = '_:'+gvc_id
gvc_label = '; '.join(self.id_label_hash[v] for v in vslc_list)
model.addIndividualToGraph(
gvc_id, gvc_label,
self.globaltt['genomic_variation_complement'])
for vslc_id in vslc_list:
geno.addVSLCtoParent(vslc_id, gvc_id)
else:
# the GVC == VSLC, so don't have to make an extra piece
gvc_id = vslc_list.pop()
gvc_label = self.id_label_hash[gvc_id]
genotype_label = gvc_label + ' [n.s.]'
bkgd_id = re.sub(
r':', '', '-'.join((
self.globaltt['unspecified_genomic_background'], s)))
genotype_id = '-'.join((gvc_id, bkgd_id))
bkgd_id = '_:' + bkgd_id
geno.addTaxon(mouse_taxon, bkgd_id)
geno.addGenomicBackground(
bkgd_id, 'unspecified (' + s + ')',
self.globaltt['unspecified_genomic_background'],
"A placeholder for the unspecified genetic background for " + s)
geno.addGenomicBackgroundToGenotype(
bkgd_id, genotype_id,
self.globaltt['unspecified_genomic_background'])
geno.addParts(
gvc_id, genotype_id, self.globaltt['has_variant_part'])
geno.addGenotype(genotype_id, genotype_label)
graph.addTriple(
s, self.globaltt['has_genotype'], genotype_id)
else:
# LOG.debug(
# "Strain %s is not making a proper genotype.", s)
pass
LOG.warning(
"The following gene symbols did not list identifiers: %s",
str(sorted(list(genes_with_no_ids))))
LOG.error(
'%i symbols given are missing their gene identifiers',
len(genes_with_no_ids))
return | [
"\n NOTE: If a Strain carries more than one mutation,\n then each Mutation description,\n i.e., the set: (\n Mutation Type - Chromosome - Gene Symbol -\n Gene Name - Allele Symbol - Allele Name)\n will require a separate line.\n\n Note that MMRRC curates phenotypes to alleles,\n even though they distribute only one file with the\n phenotypes appearing to be associated with a strain.\n\n So, here we process the allele-to-phenotype relationships separately\n from the strain-to-allele relationships.\n\n :param limit:\n :return:\n\n "
]
|
Please provide a description of the function:def write(graph, fileformat=None, filename=None):
filewriter = None
if fileformat is None:
fileformat = 'turtle'
if filename is not None:
with open(filename, 'wb') as filewriter:
LOG.info("Writing triples in %s to %s", fileformat, filename)
# rdflib serialize
graph.serialize(filewriter, format=fileformat)
else:
print(graph.serialize(fileformat).decode())
return | [
"\n A basic graph writer (to stdout) for any of the sources.\n this will write raw triples in rdfxml, unless specified.\n to write turtle, specify format='turtle'\n an optional file can be supplied instead of stdout\n :return: None\n\n "
]
|
Please provide a description of the function:def get_properties_from_graph(graph):
# collapse to single list
property_set = set()
for row in graph.predicates():
property_set.add(row)
return property_set | [
"\n Wrapper for RDFLib.graph.predicates() that returns a unique set\n :param graph: RDFLib.graph\n :return: set, set of properties\n "
]
|
Please provide a description of the function:def _get_chrbands(self, limit, taxon):
if limit is None:
limit = sys.maxsize # practical limit anyway
model = Model(self.graph)
line_counter = 0
myfile = '/'.join((self.rawdir, self.files[taxon]['file']))
LOG.info("Processing Chr bands from FILE: %s", myfile)
geno = Genotype(self.graph)
monochrom = Monochrom(self.graph_type, self.are_bnodes_skized)
# used to hold band definitions for a chr
# in order to compute extent of encompasing bands
mybands = {}
# build the organism's genome from the taxon
genome_label = self.files[taxon]['genome_label']
taxon_id = 'NCBITaxon:' + taxon
# add the taxon as a class. adding the class label elsewhere
model.addClassToGraph(taxon_id, None)
model.addSynonym(taxon_id, genome_label)
geno.addGenome(taxon_id, genome_label)
# add the build and the taxon it's in
build_num = self.files[taxon]['build_num']
build_id = 'UCSC:' + build_num
geno.addReferenceGenome(build_id, build_num, taxon_id)
# process the bands
col = ['scaffold', 'start', 'stop', 'band_num', 'rtype']
with gzip.open(myfile, 'rb') as f:
for line in f:
line_counter += 1
# skip comments
line = line.decode().strip()
if line[0] == '#' or line_counter > limit:
continue
# chr13 4500000 10000000 p12 stalk
row = line.split('\t')
scaffold = row[col.index('scaffold')]
start = row[col.index('start')]
stop = row[col.index('stop')]
band_num = row[col.index('band_num')].strip()
rtype = row[col.index('rtype')]
# NOTE some less-finished genomes have
# placed and unplaced scaffolds
# * Placed scaffolds:
# the scaffolds have been placed within a chromosome.
# * Unlocalized scaffolds:
# although the chromosome within which the scaffold occurs
# is known, the scaffold's position or orientation
# is not known.
# * Unplaced scaffolds:
# it is not known which chromosome the scaffold belongs to
#
# find out if the thing is a full on chromosome, or a scaffold:
# ex: unlocalized scaffold: chr10_KL568008v1_random
# ex: unplaced scaffold: chrUn_AABR07022428v1
placed_scaffold_pattern = r'(chr(?:\d+|X|Y|Z|W|M))'
unlocalized_scaffold_pattern = placed_scaffold_pattern+r'_(\w+)_random'
unplaced_scaffold_pattern = r'chr(Un(?:_\w+)?)'
mch = re.match(placed_scaffold_pattern + r'$', scaffold)
if mch is not None and len(mch.groups()) == 1:
# the chromosome is the first match of the pattern
chrom_num = mch.group(1)
else:
# skip over anything that isn't a placed_scaffold
# at the class level
LOG.info("Found non-placed chromosome %s", scaffold)
chrom_num = None
m_chr_unloc = re.match(unlocalized_scaffold_pattern, scaffold)
m_chr_unplaced = re.match(unplaced_scaffold_pattern, scaffold)
scaffold_num = None
if mch:
pass
elif m_chr_unloc is not None and len(m_chr_unloc.groups()) == 2:
chrom_num = m_chr_unloc.group(1)
scaffold_num = chrom_num + '_' + m_chr_unloc.group(2)
elif m_chr_unplaced is not None and len(m_chr_unplaced.groups()) == 1:
scaffold_num = m_chr_unplaced.group(1)
else:
LOG.error(
"There's a chr pattern that we aren't matching: %s", scaffold)
if chrom_num is not None:
# the chrom class (generic) id
chrom_class_id = makeChromID(chrom_num, taxon, 'CHR')
# first, add the chromosome class (in the taxon)
geno.addChromosomeClass(
chrom_num, taxon_id, self.files[taxon]['genome_label'])
# then, add the chromosome instance (from the given build)
geno.addChromosomeInstance(
chrom_num, build_id, build_num, chrom_class_id)
# add the chr to the hashmap of coordinates for this build
# the chromosome coordinate space is itself
if chrom_num not in mybands.keys():
mybands[chrom_num] = {
'min': 0,
'max': int(stop),
'chr': chrom_num,
'ref': build_id,
'parent': None,
'stain': None,
'type': self.globaltt['chromosome']}
if scaffold_num is not None:
# this will put the coordinates of the scaffold
# in the scaffold-space and make sure that the scaffold
# is part of the correct parent.
# if chrom_num is None,
# then it will attach it to the genome,
# just like a reg chrom
mybands[scaffold_num] = {
'min': start,
'max': stop,
'chr': scaffold_num,
'ref': build_id,
'parent': chrom_num,
'stain': None,
'type': self.globaltt['assembly_component'],
'synonym': scaffold}
parents = list()
if band_num is not None and band_num != '':
# add the specific band
mybands[chrom_num+band_num] = {
'min': start,
'max': stop,
'chr': chrom_num,
'ref': build_id,
'parent': None,
'stain': None,
'type': None}
# add the staining intensity of the band
if re.match(r'g(neg|pos|var)', rtype):
mybands[chrom_num+band_num]['stain'] = self.resolve(rtype)
# get the parent bands, and make them unique
parents = list(
monochrom.make_parent_bands(band_num, set()))
# alphabetical sort will put them in smallest to biggest,
# so we reverse
parents.sort(reverse=True)
# print('parents of',chrom,band,':',parents)
if len(parents) > 0:
mybands[chrom_num + band_num]['parent'] = chrom_num + parents[0]
# loop through the parents and add them to the hash
# add the parents to the graph, in hierarchical order
# TODO PYLINT Consider using enumerate
# instead of iterating with range and len
for i in range(len(parents)):
rti = getChrPartTypeByNotation(parents[i])
pnum = chrom_num+parents[i]
sta = int(start)
sto = int(stop)
if pnum not in mybands.keys():
# add the parental band to the hash
bnd = {
'min': min(sta, sto),
'max': max(sta, sto),
'chr': chrom_num,
'ref': build_id,
'parent': None,
'stain': None,
'type': rti}
mybands[pnum] = bnd
else:
# band already in the hash means it's a grouping band
# need to update the min/max coords
bnd = mybands.get(pnum)
bnd['min'] = min(sta, sto, bnd['min'])
bnd['max'] = max(sta, sto, bnd['max'])
mybands[pnum] = bnd
# also, set the max for the chrom
chrom = mybands.get(chrom_num)
chrom['max'] = max(sta, sto, chrom['max'])
mybands[chrom_num] = chrom
# add the parent relationships to each
if i < len(parents) - 1:
mybands[pnum]['parent'] = chrom_num+parents[i+1]
else:
# add the last one (p or q usually)
# as attached to the chromosome
mybands[pnum]['parent'] = chrom_num
f.close() # end looping through file
# loop through the hash and add the bands to the graph
for bnd in mybands.keys():
myband = mybands.get(bnd)
band_class_id = makeChromID(bnd, taxon, 'CHR')
band_class_label = makeChromLabel(bnd, genome_label)
band_build_id = makeChromID(bnd, build_num, 'MONARCH')
band_build_label = makeChromLabel(bnd, build_num)
# the build-specific chrom
chrom_in_build_id = makeChromID(myband['chr'], build_num, 'MONARCH')
# if it's != part, then add the class
if myband['type'] != self.globaltt['assembly_component']:
model.addClassToGraph(
band_class_id, band_class_label, myband['type'])
bfeature = Feature(
self.graph, band_build_id, band_build_label, band_class_id)
else:
bfeature = Feature(
self.graph, band_build_id, band_build_label, myband['type'])
if 'synonym' in myband:
model.addSynonym(band_build_id, myband['synonym'])
if myband['parent'] is None:
if myband['type'] == self.globaltt['assembly_component']:
# since we likely don't know the chr,
# add it as a part of the build
geno.addParts(band_build_id, build_id)
elif myband['type'] == self.globaltt['assembly_component']:
# geno.addParts(band_build_id, chrom_in_build_id)
parent_chrom_in_build = makeChromID(
myband['parent'], build_num, 'MONARCH')
bfeature.addSubsequenceOfFeature(parent_chrom_in_build)
# add the band as a feature
# (which also instantiates the owl:Individual)
bfeature.addFeatureStartLocation(myband['min'], chrom_in_build_id)
bfeature.addFeatureEndLocation(myband['max'], chrom_in_build_id)
if 'stain' in myband and myband['stain'] is not None:
bfeature.addFeatureProperty(
self.globaltt['has_sequence_attribute'], myband['stain'])
# type the band as a faldo:Region directly (add_region=False)
# bfeature.setNoBNodes(self.nobnodes)
# to come when we merge in ZFIN.py
bfeature.addFeatureToGraph(False)
return | [
"\n :param limit:\n :return:\n\n "
]
|
Please provide a description of the function:def _create_genome_builds(self):
# TODO add more species
graph = self.graph
geno = Genotype(graph)
model = Model(graph)
LOG.info("Adding equivalent assembly identifiers")
for sp in self.species:
tax_id = self.globaltt[sp]
txid_num = tax_id.split(':')[1]
for key in self.files[txid_num]['assembly']:
ucsc_id = key
try:
ucsc_label = ucsc_id.split(':')[1]
except IndexError:
LOG.error('%s Assembly id: "%s" is problematic', sp, key)
continue
if key in self.localtt:
mapped_id = self.localtt[key]
else:
LOG.error(
'%s Assembly id: "%s" is not in local translation table',
sp, key)
mapped_label = mapped_id.split(':')[1]
mapped_label = 'NCBI build ' + str(mapped_label)
geno.addReferenceGenome(ucsc_id, ucsc_label, tax_id)
geno.addReferenceGenome(mapped_id, mapped_label, tax_id)
model.addSameIndividual(ucsc_id, mapped_id)
return | [
"\n Various resources will map variations to either UCSC (hg*)\n or to NCBI assemblies. Here we create the equivalences between them.\n Data taken from:\n https://genome.ucsc.edu/FAQ/FAQreleases.html#release1\n\n :return:\n\n "
]
|
Please provide a description of the function:def add_association_to_graph(self):
Assoc.add_association_to_graph(self)
# make a blank stage
if self.start_stage_id or self.end_stage_id is not None:
stage_process_id = '-'.join((str(self.start_stage_id),
str(self.end_stage_id)))
stage_process_id = '_:'+re.sub(r':', '', stage_process_id)
self.model.addIndividualToGraph(
stage_process_id, None, self.globaltt['developmental_process'])
self.graph.addTriple(
stage_process_id, self.globaltt['starts during'], self.start_stage_id)
self.graph.addTriple(
stage_process_id, self.globaltt['ends during'], self.end_stage_id)
self.stage_process_id = stage_process_id
self.graph.addTriple(
self.assoc_id, self.globaltt['has_qualifier'], self.stage_process_id)
if self.environment_id is not None:
self.graph.addTriple(
self.assoc_id, self.globaltt['has_qualifier'], self.environment_id)
return | [
"\n Overrides Association by including bnode support\n\n The reified relationship between a genotype (or any genotype part)\n and a phenotype is decorated with some provenance information.\n This makes the assumption that\n both the genotype and phenotype are classes.\n\n currently hardcoded to map the annotation to the monarch namespace\n :param g:\n :return:\n "
]
|
Please provide a description of the function:def make_g2p_id(self):
attributes = [self.environment_id, self.start_stage_id, self.end_stage_id]
assoc_id = self.make_association_id(
self.definedby, self.entity_id, self.rel, self.phenotype_id, attributes)
return assoc_id | [
"\n Make an association id for phenotypic associations that is defined by:\n source of association +\n (Annot subject) +\n relationship +\n phenotype/disease +\n environment +\n start stage +\n end stage\n\n :return:\n\n "
]
|
Please provide a description of the function:def _process_diseasegene(self, limit):
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
line_counter = 0
model = Model(graph)
myfile = '/'.join((self.rawdir, self.files['disease-gene']['file']))
for event, elem in ET.iterparse(myfile):
if elem.tag == 'Disorder':
# get the element name and id, ignore element name
# id = elem.get('id') # some internal identifier
disorder_num = elem.find('OrphaNumber').text
disorder_id = 'ORPHA:' + str(disorder_num)
if self.test_mode and disorder_id not in self.all_test_ids['disease']:
continue
disorder_label = elem.find('Name').text
# assuming that these are in the ontology (...any particular one?)
model.addClassToGraph(disorder_id, disorder_label)
assoc_list = elem.find('DisorderGeneAssociationList')
expected_genes = assoc_list.get('count')
LOG.info(
'Expecting %s genes associated with disorder %s.',
expected_genes, disorder_id)
processed_genes = 0
for assoc in assoc_list.findall('DisorderGeneAssociation'):
processed_genes += 1
gene = assoc.find('Gene')
# get gene's curie HGNC or Ensembl ...
lclid = gene.find('OrphaNumber').text
gene_curie = 'ORPHA:' + lclid
gene_set = {'ORPHA': lclid}
for gene_ref in gene.findall(
'./ExternalReferenceList/ExternalReference'):
gene_set[gene_ref.find('Source').text] = \
gene_ref.find('Reference').text
# set priority (clique leader if available) but default to OPRHA
for pfx in ('HGNC', 'Ensembl', 'SwissProt'):
if pfx in gene_set:
if pfx in self.localtt:
pfx = self.localtt[pfx]
gene_curie = pfx + ':' + gene_set[pfx]
gene_set.pop(pfx)
model.addClassToGraph(gene_curie, None)
break
# TEC have reservations w.r.t aggerator links being gene classes
for prefix in gene_set:
lclid = gene_set[prefix]
if prefix in self.localtt:
prefix = self.localtt[prefix]
dbxref = prefix + ':' + lclid
if gene_curie != dbxref:
model.addClassToGraph(dbxref, None)
model.addEquivalentClass(gene_curie, dbxref)
# TEC. would prefer this not happen here. let HGNC handle it
# except there are some w/o explicit external links ...
gene_symbol = gene.find('Symbol').text
syn_list = gene.find('./SynonymList')
if int(syn_list.get('count')) > 0:
for syn in syn_list.findall('./Synonym'):
model.addSynonym(gene_curie, syn.text)
dg_label = assoc.find('./DisorderGeneAssociationType/Name').text
# use dg association status to issue an evidence code
# FIXME I think that these codes are sub-optimal
eco_id = self.resolve(
assoc.find('DisorderGeneAssociationStatus/Name').text)
rel_id = self.resolve(dg_label)
g2p_assoc = G2PAssoc(self.graph, self.name, gene_curie, disorder_id, rel_id)
g2p_assoc.add_evidence(eco_id)
g2p_assoc.add_association_to_graph()
elem.clear() # empty the element
if int(expected_genes) != processed_genes:
LOG.warning(
'% expected %s associated genes but we processed %i',
disorder_id, expected_genes, processed_genes)
if self.test_mode and limit is not None and line_counter > limit:
return
return | [
"\n :param limit:\n :return:\n "
]
|
Please provide a description of the function:def parse(self, limit=None):
if limit is not None:
LOG.info("Only parsing first %d rows", limit)
rgd_file = '/'.join(
(self.rawdir, self.files['rat_gene2mammalian_phenotype']['file']))
# ontobio gafparser implemented here
p = GafParser()
assocs = p.parse(open(rgd_file, "r"))
for i, assoc in enumerate(assocs):
if 'relation' in assoc.keys():
self.make_association(assoc)
if limit is not None and i > limit:
break
return | [
"\n Override Source.parse()\n Args:\n :param limit (int, optional) limit the number of rows processed\n Returns:\n :return None\n "
]
|
Please provide a description of the function:def make_association(self, record):
model = Model(self.graph)
record['relation']['id'] = self.resolve("has phenotype")
# define the triple
gene = record['subject']['id']
relation = record['relation']['id']
phenotype = record['object']['id']
# instantiate the association
g2p_assoc = Assoc(self.graph, self.name, sub=gene, obj=phenotype, pred=relation)
# add the references
references = record['evidence']['has_supporting_reference']
# created RGDRef prefix in curie map to route to proper reference URL in RGD
references = [
x.replace('RGD', 'RGDRef') if 'PMID' not in x else x for x in references]
if len(references) > 0:
# make first ref in list the source
g2p_assoc.add_source(identifier=references[0])
ref_model = Reference(
self.graph, references[0],
self.globaltt['publication']
)
ref_model.addRefToGraph()
if len(references) > 1:
# create equivalent source for any other refs in list
# This seems to be specific to this source and
# there could be non-equivalent references in this list
for ref in references[1:]:
model.addSameIndividual(sub=references[0], obj=ref)
# add the date created on
g2p_assoc.add_date(date=record['date'])
g2p_assoc.add_evidence(self.resolve(record['evidence']['type'])) # ?set where?
g2p_assoc.add_association_to_graph()
return | [
"\n contstruct the association\n :param record:\n :return: modeled association of genotype to mammalian phenotype\n "
]
|
Please provide a description of the function:def parse(self, limit=None):
if limit is not None:
LOG.info("Only parsing first %s rows fo each file", str(limit))
LOG.info("Parsing files...")
self._process_straininfo(limit)
# the following will provide us the hash-lookups
# These must be processed in a specific order
# mapping between assays and ontology terms
self._process_ontology_mappings_file(limit)
# this is the metadata about the measurements
self._process_measurements_file(limit)
# get all the measurements per strain
self._process_strainmeans_file(limit)
# The following will use the hash populated above
# to lookup the ids when filling in the graph
self._fill_provenance_graph(limit)
LOG.info("Finished parsing.")
return | [
"\n MPD data is delivered in four separate csv files and one xml file,\n which we process iteratively and write out as\n one large graph.\n\n :param limit:\n :return:\n "
]
|
Please provide a description of the function:def _process_strainmeans_file(self, limit):
LOG.info("Processing strain means ...")
line_counter = 0
raw = '/'.join((self.rawdir, self.files['strainmeans']['file']))
with gzip.open(raw, 'rb') as f:
f = io.TextIOWrapper(f)
reader = csv.reader(f)
self.check_header(self.files['strainmeans']['file'], f.readline())
score_means_by_measure = {}
strain_scores_by_measure = {}
for row in reader:
try:
# (measnum, varname, strain, strainid, sex, mean, nmice, sd, sem,
# cv, minval, maxval, logmean, logsd, zscore, logzscore)
(measnum, varname, strain, strainid, sex, mean, nmice, sd, sem,
cv, minval, maxval, zscore
) = row
except ValueError:
continue
line_counter += 1
strain_num = int(strainid)
assay_num = int(measnum)
# assuming the zscore is across all the items
# in the same measure+var+strain+sex
# note: it seems that there is only ever 1 varname per measnum.
# note: some assays only tested one sex!
# we split this here by sex
if assay_num not in score_means_by_measure:
score_means_by_measure[assay_num] = {}
if sex not in score_means_by_measure[assay_num]:
score_means_by_measure[assay_num][sex] = list()
score_means_by_measure[assay_num][sex].append(float(mean))
if strain_num not in strain_scores_by_measure:
strain_scores_by_measure[strain_num] = {}
if sex not in strain_scores_by_measure[strain_num]:
strain_scores_by_measure[strain_num][sex] = {}
strain_scores_by_measure[strain_num][sex][assay_num] = \
{'mean': float(mean), 'zscore': float(zscore)}
# end loop over strainmeans
self.score_means_by_measure = score_means_by_measure
self.strain_scores_by_measure = strain_scores_by_measure
return | [
"\n This will store the entire set of strain means in a hash.\n Not the most efficient representation,\n but easy access.\n We will loop through this later to then apply cutoffs\n and add associations\n :param limit:\n :return:\n\n "
]
|
Please provide a description of the function:def _add_g2p_assoc(self, graph, strain_id, sex, assay_id, phenotypes, comment):
geno = Genotype(graph)
model = Model(graph)
eco_id = self.globaltt['experimental phenotypic evidence']
strain_label = self.idlabel_hash.get(strain_id)
# strain genotype
genotype_id = '_:'+'-'.join((re.sub(r':', '', strain_id), 'genotype'))
genotype_label = '[' + strain_label + ']'
sex_specific_genotype_id = '_:'+'-'.join((
re.sub(r':', '', strain_id), sex, 'genotype'))
if strain_label is not None:
sex_specific_genotype_label = strain_label + ' (' + sex + ')'
else:
sex_specific_genotype_label = strain_id + '(' + sex + ')'
genotype_type = self.globaltt['sex_qualified_genotype']
if sex == 'm':
genotype_type = self.globaltt['male_genotype']
elif sex == 'f':
genotype_type = self.globaltt['female_genotype']
# add the genotype to strain connection
geno.addGenotype(
genotype_id, genotype_label,
self.globaltt['genomic_background'])
graph.addTriple(
strain_id, self.globaltt['has_genotype'], genotype_id)
geno.addGenotype(
sex_specific_genotype_id, sex_specific_genotype_label,
genotype_type)
# add the strain as the background for the genotype
graph.addTriple(
sex_specific_genotype_id,
self.globaltt['has_sex_agnostic_part'],
genotype_id)
# ############# BUILD THE G2P ASSOC #############
# TODO add more provenance info when that model is completed
if phenotypes is not None:
for phenotype_id in phenotypes:
assoc = G2PAssoc(
graph, self.name, sex_specific_genotype_id, phenotype_id)
assoc.add_evidence(assay_id)
assoc.add_evidence(eco_id)
assoc.add_association_to_graph()
assoc_id = assoc.get_association_id()
model.addComment(assoc_id, comment)
model._addSexSpecificity(assoc_id, self.resolve(sex))
return | [
"\n Create an association between a sex-specific strain id\n and each of the phenotypes.\n Here, we create a genotype from the strain,\n and a sex-specific genotype.\n Each of those genotypes are created as anonymous nodes.\n\n The evidence code is hardcoded to be:\n ECO:experimental_phenotypic_evidence.\n\n :param g:\n :param strain_id:\n :param sex:\n :param assay_id:\n :param phenotypes: a list of phenotypes to association with the strain\n :param comment:\n :return:\n\n "
]
|
Please provide a description of the function:def build_measurement_description(row, localtt):
(measnum,
mpdsector,
projsym,
varname,
descrip,
units,
method,
intervention,
paneldesc,
datatype,
sextested,
nstrainstested,
ageweeks,) = row
if sextested in localtt:
sextested = localtt[sextested]
else:
LOG.warning("Unknown sex tested key: %s", sextested)
description = "This is an assay of [" + descrip + "] shown as a [" + \
datatype + "] measured in [" + units + "]"
if intervention is not None and intervention != "":
description += " in response to [" + intervention + "]"
description += ". The overall experiment is entitled [" + projsym + "]. "
description += "It was conducted in [" + sextested + "] mice at [" + \
ageweeks + "] of age in" + " [" + nstrainstested + \
"] different mouse strains. "
return description | [
"\n As of 9/28/2017 intparm is no longer in the measurements.tsv\n if intparm is not None and intervention != \"\":\n description += \\\n \". This represents the [\" + intparm + \\\n \"] arm, using materials and methods that included [\" + \\\n method + \"]\"\n ",
"\n As of 9/28/2017 cat1-3 are no longer in the measurements.tsv\n description += \"Keywords: \" + cat1 + \\\n ((\", \" + cat2) if cat2.strip() is not \"\" else \"\") + \\\n ((\", \" + cat3) if cat3.strip() is not \"\" else \"\") + \".\"\n "
]
|
Please provide a description of the function:def _add_assertion_provenance(
self,
assoc_id,
evidence_line_bnode
):
provenance_model = Provenance(self.graph)
model = Model(self.graph)
assertion_bnode = self.make_id(
"assertion{0}{1}".format(assoc_id, self.localtt['IMPC']), '_')
model.addIndividualToGraph(assertion_bnode, None, self.globaltt['assertion'])
provenance_model.add_assertion(
assertion_bnode, self.localtt['IMPC'],
'International Mouse Phenotyping Consortium')
self.graph.addTriple(
assoc_id, self.globaltt['proposition_asserted_in'], assertion_bnode)
self.graph.addTriple(
assertion_bnode,
self.resolve('is_assertion_supported_by_evidence'), # "SEPIO:0000111"
evidence_line_bnode)
return | [
"\n Add assertion level provenance, currently always IMPC\n :param assoc_id:\n :param evidence_line_bnode:\n :return:\n "
]
|
Please provide a description of the function:def _add_study_provenance(
self,
phenotyping_center,
colony,
project_fullname,
pipeline_name,
pipeline_stable_id,
procedure_stable_id,
procedure_name,
parameter_stable_id,
parameter_name,
statistical_method,
resource_name,
row_num
):
provenance_model = Provenance(self.graph)
model = Model(self.graph)
# Add provenance
# A study is a blank node equal to its parts
study_bnode = self.make_id("{0}{1}{2}{3}{4}{5}{6}{7}".format(
phenotyping_center, colony, project_fullname, pipeline_stable_id,
procedure_stable_id, parameter_stable_id, statistical_method,
resource_name), '_')
model.addIndividualToGraph(
study_bnode, None, self.globaltt['study'])
# List of nodes linked to study with has_part property
study_parts = []
# Add study parts
model.addIndividualToGraph(self.resolve(procedure_stable_id), procedure_name)
study_parts.append(self.resolve(procedure_stable_id))
study_parts.append(self.resolve(statistical_method))
provenance_model.add_study_parts(study_bnode, study_parts)
# Add parameter/measure statement: study measures parameter
parameter_label = "{0} ({1})".format(parameter_name, procedure_name)
logging.info("Adding Provenance")
model.addIndividualToGraph(
self.resolve(parameter_stable_id), parameter_label)
provenance_model.add_study_measure(
study_bnode, self.resolve(parameter_stable_id))
# Add Colony
colony_bnode = self.make_id("{0}".format(colony), '_')
model.addIndividualToGraph(colony_bnode, colony)
# Add study agent
model.addIndividualToGraph(
self.resolve(phenotyping_center), phenotyping_center,
self.globaltt['organization'])
# self.graph
model.addTriple(
study_bnode, self.globaltt['has_agent'], self.resolve(phenotyping_center))
# add pipeline and project
model.addIndividualToGraph(
self.resolve(pipeline_stable_id), pipeline_name)
# self.graph
model.addTriple(
study_bnode, self.globaltt['part_of'], self.resolve(pipeline_stable_id))
model.addIndividualToGraph(
self.resolve(project_fullname), project_fullname, self.globaltt['project'])
# self.graph
model.addTriple(
study_bnode, self.globaltt['part_of'], self.resolve(project_fullname))
return study_bnode | [
"\n :param phenotyping_center: str, from self.files['all']\n :param colony: str, from self.files['all']\n :param project_fullname: str, from self.files['all']\n :param pipeline_name: str, from self.files['all']\n :param pipeline_stable_id: str, from self.files['all']\n :param procedure_stable_id: str, from self.files['all']\n :param procedure_name: str, from self.files['all']\n :param parameter_stable_id: str, from self.files['all']\n :param parameter_name: str, from self.files['all']\n :param statistical_method: str, from self.files['all']\n :param resource_name: str, from self.files['all']\n :return: study bnode\n "
]
|
Please provide a description of the function:def _add_evidence(
self,
assoc_id,
eco_id,
p_value,
percentage_change,
effect_size,
study_bnode
):
evidence_model = Evidence(self.graph, assoc_id)
provenance_model = Provenance(self.graph)
model = Model(self.graph)
# Add line of evidence
evidence_line_bnode = self.make_id(
"{0}{1}".format(assoc_id, study_bnode), '_')
evidence_model.add_supporting_evidence(evidence_line_bnode)
model.addIndividualToGraph(evidence_line_bnode, None, eco_id)
# Add supporting measurements to line of evidence
measurements = {}
if p_value is not None or p_value != "":
p_value_bnode = self.make_id(
"{0}{1}{2}".format(evidence_line_bnode, 'p_value', p_value), '_')
model.addIndividualToGraph(p_value_bnode, None, self.globaltt['p-value'])
try:
measurements[p_value_bnode] = float(p_value)
except ValueError:
measurements[p_value_bnode] = p_value
if percentage_change is not None and percentage_change != '':
fold_change_bnode = self.make_id(
"{0}{1}{2}".format(
evidence_line_bnode, 'percentage_change', percentage_change), '_')
model.addIndividualToGraph(
fold_change_bnode, None, self.resolve('percentage_change'))
measurements[fold_change_bnode] = percentage_change
if effect_size is not None or effect_size != "":
fold_change_bnode = self.make_id(
"{0}{1}{2}".format(
evidence_line_bnode, 'effect_size', effect_size), '_')
model.addIndividualToGraph(
fold_change_bnode, None, self.globaltt['effect size estimate'])
measurements[fold_change_bnode] = effect_size
evidence_model.add_supporting_data(evidence_line_bnode, measurements)
# Link evidence to provenance by connecting to study node
provenance_model.add_study_to_measurements(study_bnode, measurements.keys())
self.graph.addTriple(
evidence_line_bnode, self.globaltt['has_evidence_item_output_from'], study_bnode)
return evidence_line_bnode | [
"\n :param assoc_id: assoc curie used to reify a\n genotype to phenotype association, generated in _process_data()\n :param eco_id: eco_id as curie, hardcoded in _process_data()\n :param p_value: str, from self.files['all']\n :param percentage_change: str, from self.files['all']\n :param effect_size: str, from self.files['all']\n :param study_bnode: str, from self.files['all']\n :param phenotyping_center: str, from self.files['all']\n :return: str, evidence_line_bnode as curie\n "
]
|
Please provide a description of the function:def parse_checksum_file(self, file):
checksums = dict()
file_path = '/'.join((self.rawdir, file))
with open(file_path, 'rt') as tsvfile:
reader = csv.reader(tsvfile, delimiter=' ')
for row in reader:
(checksum, whitespace, file_name) = row
checksums[checksum] = file_name
return checksums | [
"\n :param file\n :return dict\n\n "
]
|
Please provide a description of the function:def compare_checksums(self):
is_match = True
reference_checksums = self.parse_checksum_file(
self.files['checksum']['file'])
for md5, file in reference_checksums.items():
if os.path.isfile('/'.join((self.rawdir, file))):
if self.get_file_md5(self.rawdir, file) != md5:
is_match = False
LOG.warning('%s was not downloaded completely', file)
return is_match
return is_match | [
"\n test to see if fetched file matches checksum from ebi\n :return: True or False\n\n "
]
|
Please provide a description of the function:def addClassToGraph(
self, class_id, label=None, class_type=None, description=None
):
assert class_id is not None
self.graph.addTriple(
class_id, self.globaltt['type'], self.globaltt['class'])
if label is not None:
self.graph.addTriple(
class_id, self.globaltt['label'], label, object_is_literal=True)
if class_type is not None:
self.graph.addTriple(class_id, self.globaltt['subclass_of'], class_type)
if description is not None:
self.graph.addTriple(
class_id, self.globaltt['description'], description,
object_is_literal=True) | [
"\n Any node added to the graph will get at least 3 triples:\n *(node, type, owl:Class) and\n *(node, label, literal(label))\n *if a type is added,\n then the node will be an OWL:subclassOf that the type\n *if a description is provided,\n it will also get added as a dc:description\n :param class_id:\n :param label:\n :param class_type:\n :param description:\n :return:\n\n "
]
|
Please provide a description of the function:def addDeprecatedClass(self, old_id, new_ids=None):
self.graph.addTriple(
old_id, self.globaltt['type'], self.globaltt['class'])
self._addReplacementIds(old_id, new_ids) | [
"\n Will mark the oldid as a deprecated class.\n if one newid is supplied, it will mark it as replaced by.\n if >1 newid is supplied, it will mark it with consider properties\n :param old_id: str - the class id to deprecate\n :param new_ids: list - the class list that is\n the replacement(s) of the old class. Not required.\n :return: None\n\n "
]
|
Please provide a description of the function:def addDeprecatedIndividual(self, old_id, new_ids=None):
self.graph.addTriple(
old_id, self.globaltt['type'], self.globaltt['named_individual'])
self._addReplacementIds(old_id, new_ids) | [
"\n Will mark the oldid as a deprecated individual.\n if one newid is supplied, it will mark it as replaced by.\n if >1 newid is supplied, it will mark it with consider properties\n :param g:\n :param oldid: the individual id to deprecate\n :param newids: the individual idlist that is the replacement(s) of\n the old individual. Not required.\n :return:\n\n "
]
|
Please provide a description of the function:def addSynonym(
self, class_id, synonym, synonym_type=None):
if synonym_type is None:
synonym_type = self.globaltt['has_exact_synonym']
if synonym is not None:
self.graph.addTriple(
class_id, synonym_type, synonym, object_is_literal=True) | [
"\n Add the synonym as a property of the class cid.\n Assume it is an exact synonym, unless otherwise specified\n :param g:\n :param cid: class id\n :param synonym: the literal synonym label\n :param synonym_type: the CURIE of the synonym type (not the URI)\n :return:\n\n "
]
|
Please provide a description of the function:def makeLeader(self, node_id):
self.graph.addTriple(
node_id, self.globaltt['clique_leader'], True, object_is_literal=True,
literal_type='xsd:boolean') | [
"\n Add an annotation property to the given ```node_id```\n to be the clique_leader.\n This is a monarchism.\n :param node_id:\n :return:\n "
]
|
Please provide a description of the function:def _addSexSpecificity(self, subject_id, sex):
self.graph.addTriple(subject_id, self.globaltt['has_sex_specificty'], sex) | [
"\n Add sex specificity to a subject (eg association node)\n\n In our modeling we use this to add a qualifier to a triple\n for example, this genotype to phenotype association\n is specific to this sex (see MGI, IMPC)\n\n This expects the client to define the ontology term\n for sex (eg PATO)\n\n Note this class is probably not the right place for this\n method, but putting here until a better home is found\n :param subject_id:\n :param sex:\n :return:\n "
]
|
Please provide a description of the function:def main():
parser = argparse.ArgumentParser(usage=__doc__)
parser.add_argument('--config', '-c', required=True, help='JSON configuration file')
parser.add_argument('--out', '-o', required=False, help='output directory', default="./")
parser.add_argument('--use_cache', '-cached', action="store_true",
required=False, help='use cached files', default=False)
args = parser.parse_args()
# Hardcoded dir for raw files
out_path = Path(args.out)
raw_dir = out_path / "out"
raw_dir.mkdir(parents=True, exist_ok=True)
# Hardcoded unmapped file
VERSION = 'v10.5'
STRING_BASE = "http://string-db.org/download/" \
"protein.links.detailed.{}".format(VERSION)
config_file = open(args.config, 'r')
config = yaml.load(config_file)
config_file.close()
out_unmapped_file = out_path / "unmapped_ids.tsv"
unmapped_file = out_unmapped_file.open("w")
# Connect to ensembl
connection = connect_to_database(host=config['database']['host'],
username=config['database']['username'],
port=config['database']['port'])
cursor = connection.cursor()
# Process MGI eqs #
####################
taxon = config['taxa_specific']['mouse']['tax_id']
# IO
dump_file = raw_dir / '{}.protein.links.detailed.{}.txt.gz' \
.format(taxon, VERSION)
mouse_map_file = out_path / config['taxa_specific']['mouse']['output_file']
mouse_file = mouse_map_file.open('w')
path = '{}/{}.protein.links.detailed.{}.txt.gz' \
.format(STRING_BASE, taxon, VERSION)
if not args.use_cache:
download_file(path, dump_file)
ensembl = Ensembl("rdf_graph", True)
p2gene_map = ensembl.fetch_protein_gene_map(taxon)
fh = gzip.open(str(dump_file), 'rb')
df = pd.read_csv(fh, sep='\s+')
fh.close()
proteins = pd.unique(df[['protein1', 'protein2']].values.ravel())
logger.info("Processing {} proteins".format(len(proteins)))
for protein in proteins:
prot = protein.replace('{}.'.format(str(taxon)), '')
try:
ens_gene = p2gene_map[prot]
ens_curie = "ENSEMBL:{}".format(ens_gene)
mouse_file.write("{}\t{}\n".format(prot, ens_curie))
continue
except KeyError:
pass
ens_gene = get_deprecated_protein_gene_rel(
cursor, prot, config['taxa_specific']['mouse']['ensembl'],
config)
intermine_resp = query_mousemine(
config['taxa_specific']['mouse']['intermine'], ens_gene)
if intermine_resp.is_successful:
mouse_file.write("{}\t{}\n".format(prot, intermine_resp.gene_id))
else:
unmapped_file.write("{}\t{}\t{}\n".format(prot, ens_gene, taxon))
mouse_file.close()
# Process Fly eqs #
####################
taxon = config['taxa_specific']['fly']['tax_id']
# IO
dump_file = raw_dir / '{}.protein.links.detailed.{}.txt.gz' \
.format(taxon, VERSION)
fly_map_file = out_path / config['taxa_specific']['fly']['output_file']
fly_file = fly_map_file.open('w')
path = '{}/{}.protein.links.detailed.{}.txt.gz' \
.format(STRING_BASE, taxon, VERSION)
if not args.use_cache:
download_file(path, dump_file)
ensembl = Ensembl("rdf_graph", True)
p2gene_map = ensembl.fetch_protein_gene_map(taxon)
fh = gzip.open(str(dump_file), 'rb')
df = pd.read_csv(fh, sep='\s+')
fh.close()
proteins = pd.unique(df[['protein1', 'protein2']].values.ravel())
logger.info("Processing {} proteins".format(len(proteins)))
for protein in proteins:
prot = protein.replace('{}.'.format(str(taxon)), '')
try:
ens_gene = p2gene_map[prot]
ens_curie = "ENSEMBL:{}".format(ens_gene)
fly_file.write("{}\t{}\n".format(prot, ens_curie))
continue
except KeyError:
pass
ens_gene = get_xref_protein_gene_rel(
cursor, prot, config['taxa_specific']['fly']['ensembl'],
config, taxon)
if ens_gene is not None:
fly_file.write("{}\t{}\n".format(prot, "ENSEMBL:{}".format(ens_gene)))
else:
unmapped_file.write("{}\t{}\t{}\n".format(prot, '', taxon))
fly_file.close()
# Process Worm eqs #
####################
taxon = config['taxa_specific']['worm']['tax_id']
# IO
dump_file = raw_dir / '{}.protein.links.detailed.{}.txt.gz' \
.format(taxon, VERSION)
uniprot_file = raw_dir / config['taxa_specific']['worm']['uniprot_file']
worm_map_file = out_path / config['taxa_specific']['worm']['output_file']
worm_file = worm_map_file.open('w')
path = '{}/{}.protein.links.detailed.{}.txt.gz' \
.format(STRING_BASE, taxon, VERSION)
if not args.use_cache:
download_file(path, dump_file)
download_file(config['taxa_specific']['worm']['uniprot_mappings'],
uniprot_file)
ensembl = Ensembl("rdf_graph", True)
p2gene_map = ensembl.fetch_protein_gene_map(taxon)
uni2gene_map = ensembl.fetch_uniprot_gene_map(taxon)
fh = gzip.open(str(uniprot_file), 'rb')
df = pd.read_csv(fh, sep='\s+')
fh.close()
string_uniprot_map = {}
for index, row in df.iterrows():
uniprot_ac = row['uniprot_ac|uniprot_id'].split('|')[0]
string_uniprot_map[row['string_id']] = uniprot_ac
fh = gzip.open(str(dump_file), 'rb')
df = pd.read_csv(fh, sep='\s+')
fh.close()
proteins = pd.unique(df[['protein1', 'protein2']].values.ravel())
logger.info("Processing {} proteins".format(len(proteins)))
for protein in proteins:
prot = protein.replace('{}.'.format(str(taxon)), '')
try:
ens_gene = p2gene_map[prot]
ens_curie = "ENSEMBL:{}".format(ens_gene)
worm_file.write("{}\t{}\n".format(prot, ens_curie))
continue
except KeyError:
pass
try:
uniprot_ac = string_uniprot_map[prot]
ens_gene = uni2gene_map[uniprot_ac]
ens_curie = "ENSEMBL:{}".format(ens_gene)
worm_file.write("{}\t{}\n".format(prot, ens_curie))
continue
except KeyError:
pass
unmapped_file.write("{}\t{}\t{}\n".format(prot, '', taxon))
worm_file.close()
# Process ZFIN eqs #
####################
taxon = config['taxa_specific']['zebrafish']['tax_id']
# IO
dump_file = raw_dir / '{}.protein.links.detailed.{}.txt.gz' \
.format(taxon, VERSION)
zfin_map_file = out_path / config['taxa_specific']['zebrafish']['output_file']
zfin_file = zfin_map_file.open('w')
path = '{}/{}.protein.links.detailed.{}.txt.gz' \
.format(STRING_BASE, taxon, VERSION)
if not args.use_cache:
download_file(path, dump_file)
ensembl = Ensembl("rdf_graph", True)
p2gene_map = ensembl.fetch_protein_gene_map(taxon)
# in 3.6 gzip accepts Paths
fh = gzip.open(str(dump_file), 'rb')
df = pd.read_csv(fh, sep='\s+')
fh.close()
proteins = pd.unique(df[['protein1', 'protein2']].values.ravel())
logger.info("Processing {} proteins".format(len(proteins)))
for protein in proteins:
prot = protein.replace('{}.'.format(str(taxon)), '')
try:
ens_gene = p2gene_map[prot]
ens_curie = "ENSEMBL:{}".format(ens_gene)
zfin_file.write("{}\t{}\n".format(prot, ens_curie))
continue
except KeyError:
pass
intermine_resp = query_fishmine(
config['taxa_specific']['zebrafish']['intermine'], prot)
if intermine_resp.is_successful:
zfin_file.write("{}\t{}\n".format(prot, intermine_resp.gene_id))
continue
ens_gene = get_deprecated_protein_gene_rel(
cursor, prot, config['taxa_specific']['zebrafish']['ensembl'],
config)
intermine_resp = query_fishmine(
config['taxa_specific']['zebrafish']['intermine'], ens_gene)
if intermine_resp.is_successful:
zfin_file.write("{}\t{}\n".format(prot, intermine_resp.gene_id))
continue
intermine_resp = query_fishmine(
config['taxa_specific']['zebrafish']['intermine'],
ens_gene, "Pseudogene")
if intermine_resp.is_successful:
zfin_file.write("{}\t{}\n".format(prot, intermine_resp.gene_id))
else:
unmapped_file.write("{}\t{}\t{}\n".format(prot, ens_gene, taxon))
zfin_file.close()
unmapped_file.close()
connection.close()
logger.info("ID Map Finished") | [
"\n Zebrafish:\n 1. Map ENSP to ZFIN Ids using Intermine\n 2. Map deprecated ENSP IDs to ensembl genes\n by querying the ensembl database then use\n intermine to resolve to gene IDs\n Mouse: Map deprecated ENSP IDs to ensembl genes\n by querying the ensembl database then use\n intermine to resolve to MGI IDs\n Fly: ENSP IDs appear as xrefs on translation IDs\n Worm: Use UniProt Mapping file provided by String\n "
]
|
Please provide a description of the function:def query_mousemine(intermine_url: str, gene_id: str) -> IntermineResult:
service = Service(intermine_url)
query = service.new_query("SequenceFeature")
query.add_view("primaryIdentifier")
query.add_constraint("SequenceFeature", "LOOKUP", "{}".format(gene_id), code="A")
query.add_constraint("organism.shortName", "=", "M. musculus", code="B")
result_list = ["{}".format(val['primaryIdentifier']) for val in query.rows()]
return intermine_response_factory(result_list, gene_id) | [
"\n :param intermine_url: intermine server, eg\n http://www.mousemine.org/mousemine/service\n :param gene_id: gene ID, eg ENSMUSG00000063180\n :return: Intermine_Result object\n "
]
|
Please provide a description of the function:def fetch_protein_list(self, taxon_id):
protein_list = list()
# col = self.columns['ensembl_biomart']
col = ['ensembl_peptide_id', ]
params = urllib.parse.urlencode(
{'query': self._build_biomart_gene_query(taxon_id, col)})
conn = http.client.HTTPConnection(ENS_URL)
conn.request("GET", '/biomart/martservice?' + params)
response = conn.getresponse()
for line in response:
line = line.decode('utf-8').rstrip()
row = line.split('\t')
if len(row) != len(col):
LOG.warning("Data error for p-list query on %d", taxon_id)
continue
protein_list.append(row[col.index('ensembl_peptide_id')])
conn.close()
return protein_list | [
"\n Fetch a list of proteins for a species in biomart\n :param taxid:\n :return: list\n "
]
|
Please provide a description of the function:def fetch_protein_gene_map(self, taxon_id):
protein_dict = dict()
# col = self.columns['ensembl_biomart']
col = ['ensembl_peptide_id', 'ensembl_gene_id']
raw_query = self._build_biomart_gene_query(taxon_id, col)
params = urllib.parse.urlencode({'query': raw_query})
conn = http.client.HTTPConnection(ENS_URL)
conn.request("GET", '/biomart/martservice?' + params)
response = conn.getresponse()
for line in response:
line = line.decode('utf-8').rstrip()
row = line.split('\t')
if len(row) != len(col):
LOG.warning("Data error for p2g query on %s", taxon_id)
LOG.warning("Expected columns for \n%s", col)
LOG.warning("Got data \n%s", row)
continue
protein_dict[
row[col.index('ensembl_peptide_id')]] = [
row[col.index('ensembl_gene_id')]
]
conn.close()
LOG.info(
"length protien list for taxon: %s is %i", taxon_id, len(protein_dict))
return protein_dict | [
"\n Fetch a list of proteins for a species in biomart\n :param taxid:\n :return: dict\n "
]
|
Please provide a description of the function:def _build_biomart_gene_query(self, taxid, cols_to_fetch):
taxid = str(taxid)
# basic stuff for ensembl ids.
if taxid != '9606': # drop hgnc column
cols_to_fetch = [x for x in cols_to_fetch if x != 'hgnc_id']
# LOG.info('Build BMQ with taxon %s and mapping %s', taxid, self.localtt)
query_attributes = {
"virtualSchemaName": "default", "formatter": "TSV", "header": "0",
"uniqueRows": "1", "count": "0", "datasetConfigVersion": "0.6"}
qry = etree.Element("Query", query_attributes)
if taxid in self.localtt:
object_attributes = {"name": self.localtt[taxid], "interface": "default"}
dataset = etree.SubElement(qry, "Dataset", object_attributes)
for col in cols_to_fetch:
etree.SubElement(dataset, "Attribute", {"name": col})
# is indent right?
query = '<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE Query>' \
+ etree.tostring(qry, encoding="unicode")
else:
LOG.warning("not finding taxon %s in the local translation table", taxid)
query = None
return query | [
"\n Building url to fetch equivalent identifiers via Biomart Restful API.\n Documentation at\n http://uswest.ensembl.org/info/data/biomart/biomart_restful.html\n :param taxid:\n :param array of ensembl biomart attributes to include\n :return:\n\n "
]
|
Please provide a description of the function:def fetch(self, is_dl_forced=False):
self.get_files(is_dl_forced)
# load and tag a list of OMIM IDs with types
self.omim_type = self.find_omim_type()
return | [
"\n We fetch GeneReviews id-label map and id-omim mapping files from NCBI.\n :return: None\n "
]
|
Please provide a description of the function:def parse(self, limit=None):
if self.test_only:
self.test_mode = True
self._get_titles(limit)
self._get_equivids(limit)
self.create_books()
self.process_nbk_html(limit)
# no test subset for now; test == full graph
self.testgraph = self.graph
return | [
"\n :return: None\n "
]
|
Please provide a description of the function:def _get_equivids(self, limit):
raw = '/'.join((self.rawdir, self.files['idmap']['file']))
model = Model(self.graph)
LOG.info('Looping over %s', raw)
# we look some stuff up in OMIM, so initialize here
# omim = OMIM(self.graph_type, self.are_bnodes_skized)
id_map = {}
allomimids = set()
col = ['NBK_id', 'GR_shortname', 'OMIM']
with open(raw, 'r', encoding="utf8") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
header = next(filereader)
header[0] = header[0][1:]
if header != col:
LOG.error(
'\nExpected header: %s\nRecieved header: %s', col, header)
exit(-1)
for row in filereader:
nbk_num = row[col.index('NBK_id')]
shortname = row[col.index('GR_shortname')]
omim_num = row[col.index('OMIM')]
gr_id = 'GeneReviews:' + nbk_num
omim_id = 'OMIM:' + omim_num
if not (
(self.test_mode and
len(self.test_ids) > 0 and
omim_id in self.test_ids) or not
self.test_mode):
continue
# sometimes there's bad omim nums
omim_num = omim_num.strip()
if len(omim_num) > 6:
LOG.warning(
"OMIM number incorrectly formatted in row %d; skipping:\n%s",
filereader.line_num, '\t'.join(row))
continue
# build up a hashmap of the mappings; then process later
if nbk_num not in id_map:
id_map[nbk_num] = set()
id_map[nbk_num].add(omim_num)
# add the class along with the shortname
model.addClassToGraph(gr_id, None)
model.addSynonym(gr_id, shortname)
allomimids.add(omim_num)
if not self.test_mode and limit is not None \
and filereader.line_num > limit:
break
# end looping through file
# get the omim ids that are not genes
# entries_that_are_phenotypes = omim.process_entries(
# list(allomimids), filter_keep_phenotype_entry_ids, None, None,
# limit=limit, globaltt=self.globaltt)
#
# LOG.info(
# "Filtered out %d/%d entries that are genes or features",
# len(allomimids)-len(entries_that_are_phenotypes), len(allomimids))
##########################################################################
# given all_omim_ids from GR,
# we want to update any which are changed or removed
# before deciding which are disease / phenotypes
replaced = allomimids & self.omim_replaced.keys()
if replaced is not None and len(replaced) > 0:
LOG.warning("These OMIM ID's are past their pull date: %s", str(replaced))
for oid in replaced:
allomimids.remove(oid)
replacements = self.omim_replaced[oid]
for rep in replacements:
allomimids.update(rep)
# guard against omim identifiers which have been removed
obsolete = [
o for o in self.omim_type
if self.omim_type[o] == self.globaltt['obsolete']]
removed = allomimids & set(obsolete)
if removed is not None and len(removed) > 0:
LOG.warning("These OMIM ID's are gone: %s", str(removed))
for oid in removed:
allomimids.remove(oid)
# filter for disease /phenotype types (we can argue about what is included)
omim_phenotypes = set([
omim for omim in self.omim_type if self.omim_type[omim] in (
self.globaltt['Phenotype'],
self.globaltt['has_affected_feature'], # both a gene and a phenotype
self.globaltt['heritable_phenotypic_marker'])]) # probable phenotype
LOG.info(
"Have %i omim_ids globally typed as phenotypes from OMIM",
len(omim_phenotypes))
entries_that_are_phenotypes = allomimids & omim_phenotypes
LOG.info(
"Filtered out %d/%d entries that are genes or features",
len(allomimids - entries_that_are_phenotypes), len(allomimids))
for nbk_num in self.book_ids:
gr_id = 'GeneReviews:'+nbk_num
if nbk_num in id_map:
omim_ids = id_map.get(nbk_num)
for omim_num in omim_ids:
omim_id = 'OMIM:'+omim_num
# add the gene reviews as a superclass to the omim id,
# but only if the omim id is not a gene
if omim_id in entries_that_are_phenotypes:
model.addClassToGraph(omim_id, None)
model.addSubClass(omim_id, gr_id)
# add this as a generic subclass -- TEC: this is the job of inference
model.addSubClass(gr_id, self.globaltt['disease'])
return | [
"\n The file processed here is of the format:\n #NBK_id GR_shortname OMIM\n NBK1103 trimethylaminuria 136132\n NBK1103 trimethylaminuria 602079\n NBK1104 cdls 122470\n Where each of the rows represents a mapping between\n a gr id and an omim id. These are a 1:many relationship,\n and some of the omim ids are genes(not diseases).\n Therefore, we need to create a loose coupling here.\n We make the assumption that these NBKs are generally higher-level\n grouping classes; therefore the OMIM ids are treated as subclasses.\n\n (This assumption is poor for those omims that are actually genes,\n but we have no way of knowing what those are here...\n we will just have to deal with that for now.) -- fixed\n\n :param limit:\n :return:\n\n "
]
|
Please provide a description of the function:def _get_titles(self, limit):
raw = '/'.join((self.rawdir, self.files['titles']['file']))
model = Model(self.graph)
col = ['GR_shortname', 'GR_Title', 'NBK_id', 'PMID']
with open(raw, 'r', encoding='latin-1') as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
header = next(filereader)
header[0] = header[0][1:]
colcount = len(col)
if header != col:
LOG.error(
'\nExpected header: %s\nRecieved header: %s', col, header)
exit(-1)
for row in filereader:
if len(row) != colcount:
LOG.error("Unexpected row. got: %s", row)
LOG.error("Expected data for: %s", col)
exit(-1)
nbk_num = row[col.index('NBK_id')]
gr_id = 'GeneReviews:' + nbk_num
self.book_ids.add(nbk_num) # a global set of the book nums
if limit is None or filereader.line_num < limit:
model.addClassToGraph(gr_id, row[col.index('GR_Title')])
model.addSynonym(gr_id, row[col.index('GR_shortname')])
# TODO include the new PMID?
return | [
"\n The file processed here is of the format:\n #NBK_id GR_shortname OMIM\n NBK1103 trimethylaminuria 136132\n NBK1103 trimethylaminuria 602079\n NBK1104 cdls 122470\n Where each of the rows represents a mapping between\n a gr id and an omim id. These are a 1:many relationship,\n and some of the omim ids are genes (not diseases).\n Therefore, we need to create a loose coupling here.\n We make the assumption that these NBKs are generally higher-level\n grouping classes; therefore the OMIM ids are treated as subclasses.\n (This assumption is poor for those omims that are actually genes,\n but we have no way of knowing what those are here...\n we will just have to deal with that for now.)\n :param limit:\n :return:\n "
]
|
Please provide a description of the function:def process_nbk_html(self, limit):
model = Model(self.graph)
cnt = 0
books_not_found = set()
clin_des_regx = re.compile(r".*Summary.sec0")
lit_cite_regex = re.compile(r".*Literature_Cited")
pubmed_regex = re.compile(r"pubmed") # ??? for a static string?
for nbk in self.book_ids:
cnt += 1
nbk_id = 'GeneReviews:'+nbk
book_item = self.all_books.get(nbk)
url = '/'.join((self.rawdir, book_item['file']))
# figure out if the book is there; if so, process, otherwise skip
book_dir = '/'.join((self.rawdir, 'books'))
book_files = os.listdir(book_dir)
if ''.join((nbk, '.html')) not in book_files:
# LOG.warning("No book found locally for %s; skipping", nbk)
books_not_found.add(nbk)
continue
LOG.info("Processing %s", nbk)
page = open(url)
soup = BeautifulSoup(page.read())
# sec0 == clinical description
clin_summary = soup.find('div', id=clin_des_regx)
if clin_summary is not None:
ptext = clin_summary.find('p').text
ptext = re.sub(r'\s+', ' ', ptext)
unlst = clin_summary.find('ul')
if unlst is not None:
item_text = list()
for lst_itm in unlst.find_all('li'):
item_text.append(re.sub(r'\s+', ' ', lst_itm.text))
ptext += ' '.join(item_text)
# add in the copyright and citation info to description
ptext = ' '.join((
ptext, '[GeneReviews:NBK1116, GeneReviews:NBK138602, ' +
nbk_id + ']'))
model.addDefinition(nbk_id, ptext.strip())
# get the pubs
pmid_set = set()
pub_div = soup.find('div', id=lit_cite_regex)
if pub_div is not None:
ref_list = pub_div.find_all('div', attrs={'class': "bk_ref"})
for ref in ref_list:
for anchor in ref.find_all(
'a', attrs={'href': pubmed_regex}):
if re.match(r'PubMed:', anchor.text):
pmnum = re.sub(r'PubMed:\s*', '', anchor.text)
else:
pmnum = re.search(
r'\/pubmed\/(\d+)$', anchor['href']).group(1)
if pmnum is not None:
pmid = 'PMID:'+str(pmnum)
self.graph.addTriple(
pmid, self.globaltt['is_about'], nbk_id)
pmid_set.add(pmnum)
reference = Reference(
self.graph, pmid, self.globaltt['journal article'])
reference.addRefToGraph()
# TODO add author history, copyright, license to dataset
# TODO get PMID-NBKID equivalence (near foot of page),
# and make it "is about" link
# self.gu.addTriple(
# self.graph, pmid,
# self.globaltt['is_about'], nbk_id)
# for example: NBK1191 PMID:20301370
# add the book to the dataset
self.dataset.setFileAccessUrl(book_item['url'])
if limit is not None and cnt > limit:
break
# finish looping through books
bknfd = len(books_not_found)
if len(books_not_found) > 0:
if bknfd > 100:
LOG.warning("There were %d books not found.", bknfd)
else:
LOG.warning(
"The following %d books were not found locally: %s", bknfd,
str(books_not_found))
LOG.info(
"Finished processing %d books for clinical descriptions", cnt - bknfd)
return | [
"\n Here we process the gene reviews books to fetch\n the clinical descriptions to include in the ontology.\n We only use books that have been acquired manually,\n as NCBI Bookshelf does not permit automated downloads.\n This parser will only process the books that are found in\n the ```raw/genereviews/books``` directory,\n permitting partial completion.\n\n :param limit:\n :return:\n "
]
|
Please provide a description of the function:def find_omim_type(self):
'''
This f(x) needs to be rehomed and shared.
Use OMIM's discription of their identifiers
to heuristically partition them into genes | phenotypes-diseases
type could be
- `obsolete` Check `omim_replaced` populated as side effect
- 'Suspected' (phenotype) Ignoring thus far
- 'gene'
- 'Phenotype'
- 'heritable_phenotypic_marker' Probable phenotype
- 'has_affected_feature' Use as both a gene and a phenotype
:return hash of omim_number to ontology_curie
'''
myfile = '/'.join((self.rawdir, self.files['mimtitles']['file']))
LOG.info("Looping over: %s", myfile)
omim_type = {}
col = [
'Prefix', # prefix
'Mim Number', # omim_id
'Preferred Title; symbol', # pref_label
'Alternative Title(s); symbol(s)', # alt_label
'Included Title(s); symbols' # inc_label
]
with open(myfile, 'r') as fh:
reader = csv.reader(fh, delimiter='\t')
# Copyright (c) ...
header = next(reader)
# Generated: 2018-11-29
header = next(reader)
# date_generated = header[0].split(':')[1].strip()
header = next(reader)
header[0] = header[0][2:]
if header != col:
LOG.error(
'Header is not as expected: %s',
set(header).symmetric_difference(set(col)))
exit(-1)
for row in reader:
if row[0][0] == '#':
continue
prefix = row[col.index('Prefix')]
pref_label = row[col.index('Preferred Title; symbol')]
omim_id = row[col.index('Mim Number')]
if prefix == 'Caret': # moved|removed|split -> moved twice
# populating a dict from an omim to a set of omims
# here as a side effect which is less than ideal
destination = pref_label # they overload the column semantics
if destination[:9] == 'MOVED TO ':
token = destination.split(' ')
rep = token[2]
if not re.match(r'^[0-9]{6}$', rep):
LOG.error(
'Malformed omim replacement %s in %s line %i',
rep, myfile, reader.line_num)
# clean up oddities I know about
if rep[0] == '{' and rep[7] == '}':
rep = rep[1:6]
if len(rep) == 7 and rep[6] == ',':
rep = rep[:5]
# asuming splits are typically to both gene & phenotype
if len(token) > 3:
self.omim_replaced[omim_id] = {rep, token[4]}
else:
self.omim_replaced[omim_id] = {rep}
elif prefix == 'Asterisk': # declared as gene
omim_type[omim_id] = self.globaltt['gene']
elif prefix == 'NULL':
# potential model of disease?
omim_type[omim_id] = self.globaltt['Suspected'] # NCIT:C71458
elif prefix == 'Number Sign':
omim_type[omim_id] = self.globaltt['Phenotype']
elif prefix == 'Percent':
omim_type[omim_id] = self.globaltt['heritable_phenotypic_marker']
elif prefix == 'Plus':
# to be interperted as a gene and/or a phenotype
omim_type[omim_id] = self.globaltt['has_affected_feature']
else:
LOG.error('Unknown OMIM type line %i', reader.line_num)
return omim_type | []
|
Please provide a description of the function:def addPathway(
self, pathway_id, pathway_label, pathway_type=None,
pathway_description=None):
if pathway_type is None:
pathway_type = self.globaltt['cellular_process']
self.model.addClassToGraph(
pathway_id, pathway_label, pathway_type, pathway_description)
self.model.addSubClass(pathway_id, self.globaltt['pathway'])
return | [
"\n Adds a pathway as a class. If no specific type is specified, it will\n default to a subclass of \"GO:cellular_process\" and \"PW:pathway\".\n :param pathway_id:\n :param pathway_label:\n :param pathway_type:\n :param pathway_description:\n :return:\n "
]
|
Please provide a description of the function:def addGeneToPathway(self, gene_id, pathway_id):
gene_product = '_:'+re.sub(r':', '', gene_id) + 'product'
self.model.addIndividualToGraph(
gene_product, None, self.globaltt['gene_product'])
self.graph.addTriple(
gene_id, self.globaltt['has gene product'], gene_product)
self.addComponentToPathway(gene_product, pathway_id)
return | [
"\n When adding a gene to a pathway, we create an intermediate\n 'gene product' that is involved in\n the pathway, through a blank node.\n\n gene_id RO:has_gene_product _gene_product\n _gene_product RO:involved_in pathway_id\n\n :param pathway_id:\n :param gene_id:\n :return:\n "
]
|
Please provide a description of the function:def addComponentToPathway(self, component_id, pathway_id):
self.graph.addTriple(component_id, self.globaltt['involved in'], pathway_id)
return | [
"\n This can be used directly when the component is directly involved in\n the pathway. If a transforming event is performed on the component\n first, then the addGeneToPathway should be used instead.\n\n :param pathway_id:\n :param component_id:\n :return:\n "
]
|
Please provide a description of the function:def fetch(self, is_dl_forced=False):
dir_path = Path(self.rawdir)
aeolus_file = dir_path / self.files['aeolus']['file']
if self.checkIfRemoteIsNewer(aeolus_file):
aeolis_fh = aeolus_file.open('w')
aeolis_fh.write("[\n")
params = {
'q': '_exists_:aeolus',
'from': 0,
'rows': 10
}
result_count = params['rows']
while params['from'] < result_count:
solr_request = requests.get(self.MY_DRUG_API, params=params)
response = solr_request.json()
for index, doc in enumerate(response['hits']):
if params['from'] == 0 and index == 0:
aeolis_fh.write("{}".format(json.dumps(doc)))
else:
aeolis_fh.write(",\n{}".format(json.dumps(doc)))
if params['from'] % 500 == 0:
LOG.info("Fetched %s documents", params['from'])
result_count = response['total']
params['from'] += params['rows']
aeolis_fh.write("\n]")
aeolis_fh.close() | [
"\n Note there is a unpublished mydrug client that works like this:\n from mydrug import MyDrugInfo\n md = MyDrugInfo()\n r = list(md.query('_exists_:aeolus', fetch_all=True))\n\n :param is_dl_forced: boolean, force download\n :return:\n "
]
|
Please provide a description of the function:def parse(self, limit=None, or_limit=1):
dir_path = Path(self.rawdir)
aeolus_file = dir_path / self.files['aeolus']['file']
aeolus_fh = aeolus_file.open('r')
count = 0
for line in aeolus_fh.readlines():
if limit is not None and count >= limit:
break
line = line.rstrip("\n,")
if line != '[' and line != ']':
self._parse_aeolus_data(document=json.loads(line),
or_limit=or_limit)
count += 1
if count % 500 == 0:
LOG.info("Processed %i documents", count)
aeolus_fh.close()
return | [
"\n Parse mydrug files\n :param limit: int limit json docs processed\n :param or_limit: int odds ratio limit\n :return: None\n "
]
|
Please provide a description of the function:def _add_outcome_provenance(self, association, outcome):
provenance = Provenance(self.graph)
base = self.curie_map.get_base()
provenance.add_agent_to_graph(base, 'Monarch Initiative')
self.graph.addTriple(association, self.globaltt['asserted_by'], base) | [
"\n :param association: str association curie\n :param outcome: dict (json)\n :return: None\n "
]
|
Please provide a description of the function:def _add_outcome_evidence(self, association, outcome):
evidence = Evidence(self.graph, association)
source = {
'curie': "DOI:10.5061/dryad.8q0s4/1",
'label': "Data from: A curated and standardized adverse "
"drug event resource to accelerate drug safety research",
'type': self.globaltt['data set']
}
reference = {
'curie': "PMID:27193236",
'label': None,
'type': self.globaltt['publication']
}
evidence_curie = self.make_id("{0}{1}{2}".format(
association, outcome['id'], self.name
))
evidence_type = self.globaltt['clinical study evidence']
evidence.add_supporting_evidence(evidence_curie, evidence_type)
evidence.add_supporting_publication(
evidence_curie, reference['curie'], reference['label'],
reference['type'])
evidence.add_source(
evidence_curie, source['curie'], source['label'], source['type'])
count_bnode = self.make_id(
"{0}{1}{2}".format(evidence_curie,
outcome['case_count'], self.name), prefix="_")
pr_ratio_bnode = self.make_id(
"{0}{1}{2}{3}".format(evidence_curie, outcome['prr'], self.name, 'prr'),
prefix="_")
odds_ratio_bnode = self.make_id(
"{0}{1}{2}{3}".format(evidence_curie, outcome['ror'], self.name, 'ror'),
prefix="_")
evidence.add_data_individual(
count_bnode, ind_type=self.globaltt['count'])
evidence.add_data_individual(
pr_ratio_bnode,
ind_type=self.globaltt['proportional_reporting_ratio'])
evidence.add_data_individual(
odds_ratio_bnode, ind_type=self.globaltt['odds_ratio'])
value_map = {
count_bnode: outcome['case_count'],
pr_ratio_bnode: outcome['prr'],
odds_ratio_bnode: outcome['ror']
}
evidence.add_supporting_data(evidence_curie, value_map)
return | [
"\n :param association: str association curie\n :param outcome: dict (json)\n :return: None\n "
]
|
Please provide a description of the function:def checkIfRemoteIsNewer(self, localfile):
is_remote_newer = False
if localfile.exists() and localfile.stat().st_size > 0:
LOG.info("File exists locally, using cache")
else:
is_remote_newer = True
LOG.info("No cache file, fetching entries")
return is_remote_newer | [
"\n Need to figure out how biothings records releases,\n for now if the file exists we will assume it is\n a fully downloaded cache\n :param localfile: str file path\n :return: boolean True if remote file is newer else False\n "
]
|
Please provide a description of the function:def write(self, fmt='turtle', stream=None):
fmt_ext = {
'rdfxml': 'xml',
'turtle': 'ttl',
'nt': 'nt', # ntriples
'nquads': 'nq',
'n3': 'n3' # notation3
}
# make the regular graph output file
dest = None
if self.name is not None:
dest = '/'.join((self.outdir, self.name))
if fmt in fmt_ext:
dest = '.'.join((dest, fmt_ext.get(fmt)))
else:
dest = '.'.join((dest, fmt))
LOG.info("Setting outfile to %s", dest)
# make the dataset_file name, always format as turtle
self.datasetfile = '/'.join(
(self.outdir, self.name + '_dataset.ttl'))
LOG.info("Setting dataset file to %s", self.datasetfile)
if self.dataset is not None and self.dataset.version is None:
self.dataset.set_version_by_date()
LOG.info("No version for %s setting to date issued.", self.name)
else:
LOG.warning("No output file set. Using stdout")
stream = 'stdout'
gu = GraphUtils(None)
# the _dataset description is always turtle
gu.write(self.dataset.getGraph(), 'turtle', filename=self.datasetfile)
if self.test_mode:
# unless we stop hardcoding, the test dataset is always turtle
LOG.info("Setting testfile to %s", self.testfile)
gu.write(self.testgraph, 'turtle', filename=self.testfile)
# print graph out
if stream is None:
outfile = dest
elif stream.lower().strip() == 'stdout':
outfile = None
else:
LOG.error("I don't understand our stream.")
return
gu.write(self.graph, fmt, filename=outfile) | [
"\n This convenience method will write out all of the graphs\n associated with the source.\n Right now these are hardcoded to be a single \"graph\"\n and a \"src_dataset.ttl\" and a \"src_test.ttl\"\n If you do not supply stream='stdout'\n it will default write these to files.\n\n In addition, if the version number isn't yet set in the dataset,\n it will be set to the date on file.\n :return: None\n\n "
]
|
Please provide a description of the function:def checkIfRemoteIsNewer(self, remote, local, headers):
LOG.info(
"Checking if remote file \n(%s)\n is newer than local \n(%s)",
remote, local)
# check if local file exists
# if no local file, then remote is newer
if os.path.exists(local):
LOG.info("Local File exists as %s", local)
else:
LOG.info("Local File does NOT exist as %s", local)
return True
# get remote file details
if headers is None:
headers = self._get_default_request_headers()
req = urllib.request.Request(remote, headers=headers)
LOG.info("Request header: %s", str(req.header_items()))
response = urllib.request.urlopen(req)
try:
resp_headers = response.info()
size = resp_headers.get('Content-Length')
last_modified = resp_headers.get('Last-Modified')
except urllib.error.URLError as err:
resp_headers = None
size = 0
last_modified = None
LOG.error(err)
if size is not None and size != '':
size = int(size)
else:
size = 0
fstat = os.stat(local)
LOG.info(
"Local File date: %s",
datetime.utcfromtimestamp(fstat[ST_CTIME]))
if last_modified is not None:
# Thu, 07 Aug 2008 16:20:19 GMT
dt_obj = datetime.strptime(
last_modified, "%a, %d %b %Y %H:%M:%S %Z")
# get local file details
# check date on local vs remote file
if dt_obj > datetime.utcfromtimestamp(fstat[ST_CTIME]):
# check if file size is different
if fstat[ST_SIZE] < size:
LOG.info("New Remote File exists")
return True
if fstat[ST_SIZE] > size:
LOG.warning("New Remote File exists but it is SMALLER")
return True
# filesize is a fairly imperfect metric here
LOG.info("New Remote fFle has same filesize--will not download")
elif fstat[ST_SIZE] != size:
LOG.info(
"Remote File is %i \t Local File is %i", size, fstat[ST_SIZE])
return True
return False | [
"\n Given a remote file location, and the corresponding local file\n this will check the datetime stamp on the files to see if the remote\n one is newer.\n This is a convenience method to be used so that we don't have to\n re-fetch files that we already have saved locally\n :param remote: URL of file to fetch from remote server\n :param local: pathname to save file to locally\n :return: True if the remote file is newer and should be downloaded\n\n "
]
|
Please provide a description of the function:def get_files(self, is_dl_forced, files=None):
fstat = None
if files is None:
files = self.files
for fname in files:
LOG.info("Getting %s", fname)
headers = None
filesource = files[fname]
if 'headers' in filesource:
headers = filesource['headers']
self.fetch_from_url(
filesource['url'], '/'.join((self.rawdir, filesource['file'])),
is_dl_forced, headers)
# if the key 'clean' exists in the sources `files` dict
# expose that instead of the longer url
if 'clean' in filesource and filesource['clean'] is not None:
self.dataset.setFileAccessUrl(filesource['clean'])
else:
self.dataset.setFileAccessUrl(filesource['url'])
fstat = os.stat('/'.join((self.rawdir, filesource['file'])))
# only keeping the date from the last file
filedate = datetime.utcfromtimestamp(fstat[ST_CTIME]).strftime("%Y-%m-%d")
# FIXME
# change this so the date is attached only to each file, not the entire dataset
self.dataset.set_date_issued(filedate) | [
"\n Given a set of files for this source, it will go fetch them, and\n set a default version by date. If you need to set the version number\n by another method, then it can be set again.\n :param is_dl_forced - boolean\n :param files dict - override instance files dict\n :return: None\n "
]
|
Please provide a description of the function:def fetch_from_url(
self, remotefile, localfile=None, is_dl_forced=False, headers=None):
# The 'file' dict in the ingest script is where 'headers' may be found
# e.g. OMIM.py has: 'headers': {'User-Agent': 'Mozilla/5.0'}
response = None
if ((is_dl_forced is True) or localfile is None or
(self.checkIfRemoteIsNewer(remotefile, localfile, headers))):
LOG.info("Fetching from %s", remotefile)
# TODO url verification, etc
if headers is None:
headers = self._get_default_request_headers()
request = urllib.request.Request(remotefile, headers=headers)
response = urllib.request.urlopen(request)
if localfile is not None:
with open(localfile, 'wb') as binwrite:
while True:
chunk = response.read(CHUNK)
if not chunk:
break
binwrite.write(chunk)
LOG.info("Finished. Wrote file to %s", localfile)
if self.compare_local_remote_bytes(remotefile, localfile, headers):
LOG.debug("local file is same size as remote after download")
else:
raise Exception(
"Error downloading file: local file size != remote file size")
fstat = os.stat(localfile)
LOG.info("file size: %s", fstat[ST_SIZE])
LOG.info(
"file created: %s", time.asctime(time.localtime(fstat[ST_CTIME])))
else:
LOG.error('Local filename is required')
exit(-1)
else:
LOG.info("Using existing file %s", localfile)
return response | [
"\n Given a remote url and a local filename, attempt to determine\n if the remote file is newer; if it is,\n fetch the remote file and save it to the specified localfile,\n reporting the basic file information once it is downloaded\n :param remotefile: URL of remote file to fetch\n :param localfile: pathname of file to save locally\n :return: None\n\n "
]
|
Please provide a description of the function:def process_xml_table(self, elem, table_name, processing_function, limit):
line_counter = 0
table_data = elem.find("[@name='" + table_name + "']")
if table_data is not None:
LOG.info("Processing " + table_name)
row = {}
for line in table_data.findall('row'):
for field in line.findall('field'):
atts = dict(field.attrib)
row[atts['name']] = field.text
processing_function(row)
line_counter += 1
if self.test_mode and limit is not None and line_counter > limit:
continue
elem.clear() | [
"\n This is a convenience function to process the elements of an xml dump of\n a mysql relational database.\n The \"elem\" is akin to a mysql table, with it's name of ```table_name```.\n It will process each ```row``` given the ```processing_function``` supplied.\n :param elem: The element data\n :param table_name: The name of the table to process\n :param processing_function: The row processing function\n :param limit:\n\n Appears to be making calls to the elementTree library\n although it not explicitly imported here.\n\n :return:\n\n "
]
|
Please provide a description of the function:def _check_list_len(row, length):
if len(row) != length:
raise Exception(
"row length does not match expected length of " +
str(length) + "\nrow: " + str(row)) | [
"\n Sanity check for csv parser\n :param row\n :param length\n :return:None\n "
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.