Code
stringlengths 103
85.9k
| Summary
listlengths 0
94
|
---|---|
Please provide a description of the function:def get_remote_content_len(self, remote, headers=None):
if headers is None:
headers = self._get_default_request_headers()
req = urllib.request.Request(remote, headers=headers)
try:
response = urllib.request.urlopen(req)
resp_header = response.info()
byte_size = resp_header.get('Content-length')
except OSError as err:
byte_size = None
LOG.error(err)
return byte_size | [
"\n :param remote:\n :return: size of remote file\n "
]
|
Please provide a description of the function:def compare_local_remote_bytes(self, remotefile, localfile, remote_headers=None):
is_equal = True
remote_size = self.get_remote_content_len(remotefile, remote_headers)
local_size = self.get_local_file_size(localfile)
if remote_size is not None and local_size != int(remote_size):
is_equal = False
LOG.error(
'local file and remote file different sizes\n'
'%s has size %s, %s has size %s',
localfile, local_size, remotefile, remote_size)
return is_equal | [
"\n test to see if fetched file is the same size as the remote file\n using information in the content-length field in the HTTP header\n :return: True or False\n "
]
|
Please provide a description of the function:def get_eco_map(url):
# this would go in a translation table but it is generated dynamicly
# maybe when we move to a make driven system
eco_map = {}
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
for line in response:
line = line.decode('utf-8').rstrip()
if re.match(r'^#', line):
continue
(code, go_ref, eco_curie) = line.split('\t')
if go_ref != 'Default':
eco_map["{}-{}".format(code, go_ref)] = eco_curie
else:
eco_map[code] = eco_curie
return eco_map | [
"\n To conver the three column file to\n a hashmap we join primary and secondary keys,\n for example\n IEA\tGO_REF:0000002\tECO:0000256\n IEA\tGO_REF:0000003\tECO:0000501\n IEA\tDefault\tECO:0000501\n\n becomes\n IEA-GO_REF:0000002: ECO:0000256\n IEA-GO_REF:0000003: ECO:0000501\n IEA: ECO:0000501\n\n :return: dict\n "
]
|
Please provide a description of the function:def declareAsOntology(self, graph):
# <http://data.monarchinitiative.org/ttl/biogrid.ttl> a owl:Ontology ;
# owl:versionInfo
# <https://archive.monarchinitiative.org/YYYYMM/ttl/biogrid.ttl>
model = Model(graph)
# is self.outfile suffix set yet???
ontology_file_id = 'MonarchData:' + self.name + ".ttl"
model.addOntologyDeclaration(ontology_file_id)
# add timestamp as version info
cur_time = datetime.now()
t_string = cur_time.strftime("%Y-%m-%d")
ontology_version = t_string
# TEC this means the MonarchArchive IRI needs the release updated
# maybe extract the version info from there
# should not hardcode the suffix as it may change
archive_url = 'MonarchArchive:' + 'ttl/' + self.name + '.ttl'
model.addOWLVersionIRI(ontology_file_id, archive_url)
model.addOWLVersionInfo(ontology_file_id, ontology_version) | [
"\n The file we output needs to be declared as an ontology,\n including it's version information.\n\n TEC: I am not convinced dipper reformating external data as RDF triples\n makes an OWL ontology (nor that it should be considered a goal).\n\n Proper ontologies are built by ontologists. Dipper reformats data\n and anotates/decorates it with a minimal set of carefully arranged\n terms drawn from from multiple proper ontologies.\n Which allows the whole (dipper's RDF triples and parent ontologies)\n to function as a single ontology we can reason over when combined\n in a store such as SciGraph.\n\n Including more than the minimal ontological terms in dipper's RDF\n output constitutes a liability as it allows greater divergence\n between dipper artifacts and the proper ontologies.\n\n Further information will be augmented in the dataset object.\n :param version:\n :return:\n\n "
]
|
Please provide a description of the function:def remove_backslash_r(filename, encoding):
with open(filename, 'r', encoding=encoding, newline=r'\n') as filereader:
contents = filereader.read()
contents = re.sub(r'\r', '', contents)
with open(filename, "w") as filewriter:
filewriter.truncate()
filewriter.write(contents) | [
"\n A helpful utility to remove Carriage Return from any file.\n This will read a file into memory,\n and overwrite the contents of the original file.\n\n TODO: This function may be a liability\n\n :param filename:\n\n :return:\n\n "
]
|
Please provide a description of the function:def open_and_parse_yaml(yamlfile):
# ??? what if the yaml file does not contain a dict datastructure?
mapping = dict()
if os.path.exists(os.path.join(os.path.dirname(__file__), yamlfile)):
map_file = open(os.path.join(os.path.dirname(__file__), yamlfile), 'r')
mapping = yaml.safe_load(map_file)
map_file.close()
else:
LOG.warning("file: %s not found", yamlfile)
return mapping | [
"\n :param file: String, path to file containing label-id mappings in\n the first two columns of each row\n :return: dict where keys are labels and values are ids\n "
]
|
Please provide a description of the function:def parse_mapping_file(file):
id_map = {}
if os.path.exists(os.path.join(os.path.dirname(__file__), file)):
with open(os.path.join(os.path.dirname(__file__), file)) as tsvfile:
reader = csv.reader(tsvfile, delimiter="\t")
for row in reader:
key = row[0]
value = row[1]
id_map[key] = value
return id_map | [
"\n :param file: String, path to file containing label-id mappings\n in the first two columns of each row\n :return: dict where keys are labels and values are ids\n "
]
|
Please provide a description of the function:def load_local_translationtable(self, name):
'''
Load "ingest specific" translation from whatever they called something
to the ontology label we need to map it to.
To facilitate seeing more ontology lables in dipper ingests
a reverse mapping from ontology lables to external strings is also generated
and available as a dict localtcid
'''
localtt_file = 'translationtable/' + name + '.yaml'
try:
with open(localtt_file):
pass
except IOError:
# write a stub file as a place holder if none exists
with open(localtt_file, 'w') as write_yaml:
yaml.dump({name: name}, write_yaml)
finally:
with open(localtt_file, 'r') as read_yaml:
localtt = yaml.safe_load(read_yaml)
# inverse local translation.
# note: keeping this invertable will be work.
# Useful to not litter an ingest with external syntax
self.localtcid = {v: k for k, v in localtt.items()}
return localtt | []
|
Please provide a description of the function:def resolve(self, word, mandatory=True):
'''
composite mapping
given f(x) and g(x)
here: localtt & globaltt respectivly
return g(f(x))|g(x)||f(x)|x in order of preference
returns x on fall through if finding a mapping
is not mandatory (by default finding is mandatory).
This may be specialized further from any mapping
to a global mapping only; if need be.
:param word: the srting to find as a key in translation tables
:param mandatory: boolean to cauae failure when no key exists
:return
value from global translation table,
or value from local translation table,
or the query key if finding a value is not mandatory (in this order)
'''
assert word is not None
# we may not agree with a remote sources use of our global term we have
# this provides oppertunity for us to overide
if word in self.localtt:
label = self.localtt[word]
if label in self.globaltt:
term_id = self.globaltt[label]
else:
logging.info(
"Translated to '%s' but no global term_id for: '%s'", label, word)
term_id = label
elif word in self.globaltt:
term_id = self.globaltt[word]
else:
if mandatory:
raise KeyError("Mapping required for: ", word)
logging.warning("We have no translation for: '%s'", word)
term_id = word
return term_id | []
|
Please provide a description of the function:def addGenotype(
self, genotype_id, genotype_label,
genotype_type=None,
genotype_description=None
):
if genotype_type is None:
genotype_type = self.globaltt['intrinsic_genotype']
self.model.addIndividualToGraph(
genotype_id, genotype_label, genotype_type, genotype_description)
return | [
"\n If a genotype_type is not supplied,\n we will default to 'intrinsic_genotype'\n :param genotype_id:\n :param genotype_label:\n :param genotype_type:\n :param genotype_description:\n :return:\n\n "
]
|
Please provide a description of the function:def addAllele(
self, allele_id, allele_label, allele_type=None,
allele_description=None):
# TODO should we accept a list of allele types?
if allele_type is None:
allele_type = self.globaltt['allele'] # TODO is this a good idea?
self.model.addIndividualToGraph(
allele_id, allele_label, allele_type, allele_description)
return | [
"\n Make an allele object.\n If no allele_type is added, it will default to a geno:allele\n :param allele_id: curie for allele (required)\n :param allele_label: label for allele (required)\n :param allele_type: id for an allele type (optional,\n recommended SO or GENO class)\n :param allele_description: a free-text description of the allele\n :return:\n\n "
]
|
Please provide a description of the function:def addGene(
self, gene_id, gene_label, gene_type=None, gene_description=None
):
''' genes are classes '''
if gene_type is None:
gene_type = self.globaltt['gene']
self.model.addClassToGraph(gene_id, gene_label, gene_type, gene_description)
return | []
|
Please provide a description of the function:def addDerivesFrom(self, child_id, parent_id):
self.graph.addTriple(
child_id, self.globaltt['derives_from'], parent_id)
return | [
"\n We add a derives_from relationship between the child and parent id.\n Examples of uses include between:\n an allele and a construct or strain here,\n a cell line and it's parent genotype. Adding the parent and child to\n the graph should happen outside of this function call to ensure graph\n integrity.\n :param child_id:\n :param parent_id:\n :return:\n\n "
]
|
Please provide a description of the function:def addAlleleOfGene(self, allele_id, gene_id, rel_id=None):
if rel_id is None:
rel_id = self.globaltt["is_allele_of"]
self.graph.addTriple(allele_id, rel_id, gene_id)
return | [
"\n We make the assumption here that if the relationship is not provided,\n it is a\n GENO:is_allele_of.\n\n Here, the allele should be a variant_locus, not a sequence alteration.\n :param allele_id:\n :param gene_id:\n :param rel_id:\n :return:\n\n "
]
|
Please provide a description of the function:def addAffectedLocus(
self, allele_id, gene_id, rel_id=None):
if rel_id is None:
rel_id = self.globaltt['has_affected_feature']
self.graph.addTriple(allele_id, rel_id, gene_id)
return | [
"\n We make the assumption here that if the relationship is not provided,\n it is a\n GENO:is_allele_of.\n\n Here, the allele should be a variant_locus, not a sequence alteration.\n :param allele_id:\n :param gene_id:\n :param rel_id:\n :return:\n\n "
]
|
Please provide a description of the function:def addGeneProduct(
self, sequence_id, product_id, product_label=None, product_type=None):
if product_label is not None and product_type is not None:
self.model.addIndividualToGraph(
product_id, product_label, product_type)
self.graph.addTriple(
sequence_id, self.globaltt['has gene product'], product_id)
return | [
"\n Add gene/variant/allele has_gene_product relationship\n Can be used to either describe a gene to transcript relationship\n or gene to protein\n :param sequence_id:\n :param product_id:\n :param product_label:\n :param product_type:\n :return:\n\n "
]
|
Please provide a description of the function:def addPolypeptide(
self, polypeptide_id, polypeptide_label=None,
transcript_id=None, polypeptide_type=None):
if polypeptide_type is None:
polypeptide_type = self.globaltt['polypeptide']
self.model.addIndividualToGraph(
polypeptide_id, polypeptide_label, polypeptide_type)
if transcript_id is not None:
self.graph.addTriple(
transcript_id, self.globaltt['translates_to'], polypeptide_id)
return | [
"\n :param polypeptide_id:\n :param polypeptide_label:\n :param polypeptide_type:\n :param transcript_id:\n :return:\n\n "
]
|
Please provide a description of the function:def addPartsToVSLC(
self, vslc_id, allele1_id, allele2_id, zygosity_id=None,
allele1_rel=None, allele2_rel=None):
# vslc has parts allele1/allele2
if allele1_id is not None:
self.addParts(allele1_id, vslc_id, allele1_rel)
if allele2_id is not None and allele2_id.strip() != '':
self.addParts(allele2_id, vslc_id, allele2_rel)
# figure out zygosity if it's not supplied
if zygosity_id is None:
if allele1_id == allele2_id:
zygosity_id = self.globaltt['homozygous']
else:
zygosity_id = self.globaltt['heterozygous']
if zygosity_id is not None:
self.graph.addTriple(vslc_id, self.globaltt['has_zygosity'], zygosity_id)
return | [
"\n Here we add the parts to the VSLC. While traditionally alleles\n (reference or variant loci) are traditionally added, you can add any\n node (such as sequence_alterations for unlocated variations) to a vslc\n if they are known to be paired. However, if a sequence_alteration's\n loci is unknown, it probably should be added directly to the GVC.\n :param vslc_id:\n :param allele1_id:\n :param allele2_id:\n :param zygosity_id:\n :param allele1_rel:\n :param allele2_rel:\n :return:\n\n "
]
|
Please provide a description of the function:def addVSLCtoParent(self, vslc_id, parent_id):
self.addParts(vslc_id, parent_id, self.globaltt['has_variant_part'])
return | [
"\n The VSLC can either be added to a genotype or to a GVC.\n The vslc is added as a part of the parent.\n :param vslc_id:\n :param parent_id:\n :return:\n "
]
|
Please provide a description of the function:def addParts(self, part_id, parent_id, part_relationship=None):
if part_relationship is None:
part_relationship = self.globaltt['has_part']
# Fail loudly if parent or child identifiers are None
if parent_id is None:
raise TypeError('Attempt to pass None as parent')
elif part_id is None:
raise TypeError('Attempt to pass None as child')
elif part_relationship is None:
part_relationship = self.globaltt['has_part']
self.graph.addTriple(parent_id, part_relationship, part_id)
return | [
"\n This will add a has_part (or subproperty) relationship between\n a parent_id and the supplied part.\n By default the relationship will be BFO:has_part,\n but any relationship could be given here.\n :param part_id:\n :param parent_id:\n :param part_relationship:\n :return:\n\n "
]
|
Please provide a description of the function:def addTaxon(self, taxon_id, genopart_id):
self.graph.addTriple(
genopart_id, self.globaltt['in taxon'], taxon_id)
return | [
"\n The supplied geno part will have the specified taxon added with\n RO:in_taxon relation.\n Generally the taxon is associated with a genomic_background,\n but could be added to any genotype part (including a gene,\n regulatory element, or sequence alteration).\n :param taxon_id:\n :param genopart_id:\n\n :return:\n\n "
]
|
Please provide a description of the function:def addGeneTargetingReagent(
self, reagent_id, reagent_label, reagent_type, gene_id,
description=None):
# TODO add default type to reagent_type
self.model.addIndividualToGraph(
reagent_id, reagent_label, reagent_type, description)
self.graph.addTriple(reagent_id, self.globaltt['targets_gene'], gene_id)
return | [
"\n Here, a gene-targeting reagent is added.\n The actual targets of this reagent should be added separately.\n :param reagent_id:\n :param reagent_label:\n :param reagent_type:\n\n :return:\n\n "
]
|
Please provide a description of the function:def addReagentTargetedGene(
self, reagent_id, gene_id, targeted_gene_id=None,
targeted_gene_label=None, description=None):
# akin to a variant locus
if targeted_gene_id is None:
targeted_gene_id = '_' + gene_id + '-' + reagent_id
targeted_gene_id = targeted_gene_id.replace(":", "")
self.model.addIndividualToGraph(
targeted_gene_id, targeted_gene_label,
self.globaltt['reagent_targeted_gene'], description)
if gene_id is not None:
self.graph.addTriple(
targeted_gene_id, self.globaltt['is_expression_variant_of'],
gene_id)
self.graph.addTriple(
targeted_gene_id, self.globaltt['is_targeted_by'], reagent_id)
return | [
"\n This will create the instance of a gene that is targeted by a molecular\n reagent (such as a morpholino or rnai).\n If an instance id is not supplied,\n we will create it as an anonymous individual which is of the type\n GENO:reagent_targeted_gene.\n We will also add the targets relationship between the reagent and\n gene class.\n\n <targeted_gene_id> a GENO:reagent_targeted_gene\n rdf:label targeted_gene_label\n dc:description description\n <reagent_id> GENO:targets_gene <gene_id>\n\n :param reagent_id:\n :param gene_id:\n :param targeted_gene_id:\n :return:\n\n "
]
|
Please provide a description of the function:def addChromosome(
self, chrom, tax_id, tax_label=None, build_id=None, build_label=None):
family = Family(self.graph)
# first, make the chromosome class, at the taxon level
chr_id = makeChromID(str(chrom), tax_id)
if tax_label is not None:
chr_label = makeChromLabel(chrom, tax_label)
else:
chr_label = makeChromLabel(chrom)
genome_id = self.makeGenomeID(tax_id)
self.model.addClassToGraph(chr_id, chr_label, self.globaltt['chromosome'])
self.addTaxon(tax_id, genome_id) # add the taxon to the genome
if build_id is not None:
# the build-specific chromosome
chrinbuild_id = makeChromID(chrom, build_id)
if build_label is None:
build_label = build_id
chrinbuild_label = makeChromLabel(chrom, build_label)
# add the build-specific chromosome as an instance of the chr class
self.model.addIndividualToGraph(chrinbuild_id, chrinbuild_label, chr_id)
# add the build-specific chromosome
# as a member of the build (both ways)
family.addMember(build_id, chrinbuild_id)
family.addMemberOf(chrinbuild_id, build_id)
return | [
"\n if it's just the chromosome, add it as an instance of a SO:chromosome,\n and add it to the genome. If a build is included,\n punn the chromosome as a subclass of SO:chromsome, and make the\n build-specific chromosome an instance of the supplied chr.\n The chr then becomes part of the build or genome.\n "
]
|
Please provide a description of the function:def addChromosomeInstance(
self, chr_num, reference_id, reference_label, chr_type=None):
family = Family(self.graph)
chr_id = makeChromID(str(chr_num), reference_id, 'MONARCH')
chr_label = makeChromLabel(str(chr_num), reference_label)
self.model.addIndividualToGraph(chr_id, chr_label, self.globaltt['chromosome'])
if chr_type is not None:
self.model.addType(chr_id, chr_type)
# add the build-specific chromosome
# as a member of the build (both ways)
family.addMember(reference_id, chr_id)
family.addMemberOf(chr_id, reference_id) # usage dependent, todo: ommit
return | [
"\n Add the supplied chromosome as an instance within the given reference\n :param chr_num:\n :param reference_id: for example, a build id like UCSC:hg19\n :param reference_label:\n :param chr_type: this is the class that this is an instance of.\n typically a genome-specific chr\n\n :return:\n\n "
]
|
Please provide a description of the function:def make_vslc_label(self, gene_label, allele1_label, allele2_label):
vslc_label = ''
if gene_label is None and allele1_label is None and allele2_label is None:
LOG.error("Not enough info to make vslc label")
return None
top = self.make_variant_locus_label(gene_label, allele1_label)
bottom = ''
if allele2_label is not None:
bottom = self.make_variant_locus_label(gene_label, allele2_label)
vslc_label = '/'.join((top, bottom))
return vslc_label | [
"\n Make a Variant Single Locus Complement (VSLC) in monarch-style.\n :param gene_label:\n :param allele1_label:\n :param allele2_label:\n :return:\n "
]
|
Please provide a description of the function:def get_ncbi_taxon_num_by_label(label):
req = {'db': 'taxonomy', 'retmode': 'json', 'term': label}
req.update(EREQ)
request = SESSION.get(ESEARCH, params=req)
LOG.info('fetching: %s', request.url)
request.raise_for_status()
result = request.json()['esearchresult']
# Occasionally eutils returns the json blob
# {'ERROR': 'Invalid db name specified: taxonomy'}
if 'ERROR' in result:
request = SESSION.get(ESEARCH, params=req)
LOG.info('fetching: %s', request.url)
request.raise_for_status()
result = request.json()['esearchresult']
tax_num = None
if 'count' in result and str(result['count']) == '1':
tax_num = result['idlist'][0]
else:
# TODO throw errors
LOG.warning('ESEARCH for taxon label "%s" returns %s', label, str(result))
return tax_num | [
"\n Here we want to look up the NCBI Taxon id using some kind of label.\n It will only return a result if there is a unique hit.\n\n :return:\n\n "
]
|
Please provide a description of the function:def is_omim_disease(gene_id):
SCIGRAPH_BASE = 'https://scigraph-ontology-dev.monarchinitiative.org/scigraph/graph/'
session = requests.Session()
adapter = requests.adapters.HTTPAdapter(max_retries=10)
session.mount('https://', adapter)
isOmimDisease = False
url = SCIGRAPH_BASE + gene_id + '.json'
response = session.get(url)
try:
results = response.json()
if 'nodes' in results and len(results['nodes']) > 0:
if 'meta' in results['nodes'][0] \
and 'category' in results['nodes'][0]['meta'] \
and 'disease' in results['nodes'][0]['meta']['category']:
LOG.info("%s is a disease, skipping", gene_id)
isOmimDisease = True
except ValueError:
pass
return isOmimDisease | [
"\n Process omim equivalencies by examining the monarch ontology scigraph\n As an alternative we could examine mondo.owl, since the ontology\n scigraph imports the output of this script which creates an odd circular\n dependency (even though we're querying mondo.owl through scigraph)\n\n :param graph: rdfLib graph object\n :param gene_id: ncbi gene id as curie\n :param omim_id: omim id as curie\n :return: None\n "
]
|
Please provide a description of the function:def get_ncbi_id_from_symbol(gene_symbol):
monarch_url = 'https://solr.monarchinitiative.org/solr/search/select'
params = DipperUtil._get_solr_weight_settings()
params["q"] = "{0} \"{0}\"".format(gene_symbol)
params["fq"] = ["taxon:\"NCBITaxon:9606\"", "category:\"gene\""]
gene_id = None
try:
monarch_request = requests.get(monarch_url, params=params)
response = monarch_request.json()
count = response['response']['numFound']
if count > 0:
gene_id = response['response']['docs'][0]['id']
except requests.ConnectionError:
print("error fetching {0}".format(monarch_url))
return gene_id | [
"\n Get ncbi gene id from symbol using monarch and mygene services\n :param gene_symbol:\n :return:\n "
]
|
Please provide a description of the function:def set_association_id(self, assoc_id=None):
if assoc_id is None:
self.assoc_id = self.make_association_id(
self.definedby, self.sub, self.rel, self.obj)
else:
self.assoc_id = assoc_id
return self.assoc_id | [
"\n This will set the association ID based on the internal parts\n of the association.\n To be used in cases where an external association identifier\n should be used.\n\n :param assoc_id:\n\n :return:\n\n "
]
|
Please provide a description of the function:def make_association_id(definedby, sub, pred, obj, attributes=None):
items_to_hash = [definedby, sub, pred, obj]
if attributes is not None and len(attributes) > 0:
items_to_hash += attributes
items_to_hash = [x for x in items_to_hash if x is not None]
assoc_id = ':'.join(('MONARCH', GraphUtils.digest_id('+'.join(items_to_hash))))
assert assoc_id is not None
return assoc_id | [
"\n A method to create unique identifiers for OBAN-style associations,\n based on all the parts of the association\n If any of the items is empty or None, it will convert it to blank.\n It effectively digests the string of concatonated values.\n Subclasses of Assoc can submit an additional array of attributes\n that will be appeded to the ID.\n\n Note this is equivalent to a RDF blank node\n\n :param definedby: The (data) resource that provided the annotation\n :param subject:\n :param predicate:\n :param object:\n :param attributes:\n\n :return:\n\n "
]
|
Please provide a description of the function:def parse(self, limit=None):
if limit is not None:
LOG.info("Only parsing first %d rows", limit)
sgd_file = '/'.join((self.rawdir, self.files['sgd_phenotype']['file']))
columns = [
'Feature Name', 'Feature Type', 'Gene Name', 'SGDID', 'Reference',
'Experiment Type', 'Mutant Type', 'Allele', 'Strain Background',
'Phenotype', 'Chemical', 'Condition', 'Details', 'Reporter']
sgd_df = pd.read_csv(sgd_file, sep='\t', names=columns)
records = sgd_df.to_dict(orient='records')
for index, assoc in enumerate(records):
if isinstance(assoc['Gene Name'], str):
if limit is not None and index > limit:
break
self.make_association(assoc)
return | [
"\n Override Source.parse()\n Args:\n :param limit (int, optional) limit the number of rows processed\n Returns:\n :return None\n "
]
|
Please provide a description of the function:def make_association(self, record):
# prep record
# remove description and mapp Experiment Type to apo term
experiment_type = record['Experiment Type'].split('(')[0]
experiment_type = experiment_type.split(',')
record['experiment_type'] = list()
for exp_type in experiment_type:
exp_type = exp_type.lstrip().rstrip()
record['experiment_type'].append(
{
'id': self.apo_term_id[exp_type],
'term': exp_type,
})
sgd_phenotype = record['Phenotype']
pheno_obj = {
'entity': {
'term': None,
'apo_id': None
},
'quality': {
'term': None,
'apo_id': None
},
'has_quality': False # descriptive and don't bother looking for a quality
}
phenotype = record['Phenotype']
if ':' in phenotype:
pheno_obj['has_quality'] = True
ent_qual = sgd_phenotype.split(': ')
entity = ent_qual[0]
quality = ent_qual[1]
pheno_obj['entity']['term'] = entity
pheno_obj['entity']['apo_id'] = self.apo_term_id[entity]
pheno_obj['quality']['term'] = quality
pheno_obj['quality']['apo_id'] = self.apo_term_id[quality]
else:
pheno_obj['entity']['term'] = phenotype
pheno_obj['entity']['apo_id'] = self.apo_term_id[phenotype]
record['pheno_obj'] = pheno_obj
# begin modeling
model = Model(self.graph)
# define the triple
gene = 'SGD:{}'.format(record['SGDID'])
relation = self.globaltt['has phenotype']
if record['pheno_obj']['has_quality']:
pheno_label = '{0}:{1}'.format(
record['pheno_obj']['entity']['term'],
record['pheno_obj']['quality']['term'])
pheno_id = 'MONARCH:{0}{1}'.format(
record['pheno_obj']['entity']['apo_id'].replace(':', '_'),
record['pheno_obj']['quality']['apo_id'].replace(':', '_')
)
g2p_assoc = Assoc(
self.graph, self.name, sub=gene, obj=pheno_id, pred=relation)
else:
pheno_label = record['pheno_obj']['entity']['term']
pheno_id = record['pheno_obj']['entity']['apo_id']
g2p_assoc = Assoc(
self.graph, self.name, sub=gene, obj=pheno_id, pred=relation)
assoc_id = g2p_assoc.make_association_id(
'yeastgenome.org', gene, relation, pheno_id)
g2p_assoc.set_association_id(assoc_id=assoc_id)
# add to graph to mint assoc id
g2p_assoc.add_association_to_graph()
model.addLabel(subject_id=gene, label=record['Gene Name'])
# add the association triple
model.addTriple(subject_id=gene, predicate_id=relation, obj=pheno_id)
model.addTriple(
subject_id=pheno_id,
predicate_id=self.globaltt['subclass_of'],
obj=self.globaltt['Phenotype'])
# label nodes
# pheno label
model.addLabel(subject_id=pheno_id, label=pheno_label)
g2p_assoc.description = self._make_description(record)
# add the references
references = record['Reference']
references = references.replace(' ', '')
references = references.split('|')
# created Ref prefix in curie map to route to proper reference URL in SGD
if len(references) > 0:
# make first ref in list the source
g2p_assoc.add_source(identifier=references[0])
ref_model = Reference(
self.graph, references[0],
self.globaltt['publication']
)
ref_model.addRefToGraph()
if len(references) > 1:
# create equivalent source for any other refs in list
for ref in references[1:]:
model.addSameIndividual(sub=references[0], obj=ref)
# add experiment type as evidence
for exp_type in record['experiment_type']:
g2p_assoc.add_evidence(exp_type['id'])
model.addLabel(subject_id=exp_type['id'], label=exp_type['term'])
try:
g2p_assoc.add_association_to_graph()
except Exception as e:
print(e)
return | [
"\n contstruct the association\n :param record:\n :return: modeled association of genotype to mammalian??? phenotype\n "
]
|
Please provide a description of the function:def setVersion(self, date_issued, version_id=None):
if date_issued is not None:
self.set_date_issued(date_issued)
elif version_id is not None:
self.set_version_by_num(version_id)
else:
LOG.error("date or version not set!")
# TODO throw error
return
if version_id is not None:
self.set_version_by_num(version_id)
else:
LOG.info("set version to %s", self.version)
self.set_version_by_date(date_issued)
LOG.info("set version to %s", self.version)
return | [
"\n Legacy function...\n should use the other set_* for version and date\n\n as of 2016-10-20 used in:\n\n dipper/sources/HPOAnnotations.py 139:\n dipper/sources/CTD.py 99:\n dipper/sources/BioGrid.py 100:\n dipper/sources/MGI.py 255:\n dipper/sources/EOM.py 93:\n dipper/sources/Coriell.py 200:\n dipper/sources/MMRRC.py 77:\n\n # TODO set as deprecated\n\n :param date_issued:\n :param version_id:\n :return:\n\n "
]
|
Please provide a description of the function:def set_version_by_date(self, date_issued=None):
if date_issued is not None:
dat = date_issued
elif self.date_issued is not None:
dat = self.date_issued
else:
dat = self.date_accessed
LOG.info(
"No date supplied, using download timestamp for date_issued")
LOG.info("setting version by date to: %s", dat)
self.set_version_by_num(dat)
return | [
"\n This will set the version by the date supplied,\n the date already stored in the dataset description,\n or by the download date (today)\n :param date_issued:\n :return:\n "
]
|
Please provide a description of the function:def toRoman(num):
if not 0 < num < 5000:
raise ValueError("number %n out of range (must be 1..4999)", num)
if int(num) != num:
raise TypeError("decimals %n can not be converted", num)
result = ""
for numeral, integer in romanNumeralMap:
while num >= integer:
result += numeral
num -= integer
return result | [
"convert integer to Roman numeral"
]
|
Please provide a description of the function:def fromRoman(strng):
if not strng:
raise TypeError('Input can not be blank')
if not romanNumeralPattern.search(strng):
raise ValueError('Invalid Roman numeral: %s', strng)
result = 0
index = 0
for numeral, integer in romanNumeralMap:
while strng[index:index+len(numeral)] == numeral:
result += integer
index += len(numeral)
return result | [
"convert Roman numeral to integer"
]
|
Please provide a description of the function:def fetch(self, is_dl_forced=False):
file_paths = self._get_file_paths(self.tax_ids, 'protein_links')
self.get_files(is_dl_forced, file_paths)
self.get_files(is_dl_forced, self.id_map_files) | [
"\n Override Source.fetch()\n Fetches resources from String\n\n We also fetch ensembl to determine if protein pairs are from\n the same species\n Args:\n :param is_dl_forced (bool): Force download\n Returns:\n :return None\n "
]
|
Please provide a description of the function:def parse(self, limit=None):
if limit is not None:
LOG.info("Only parsing first %d rows", limit)
protein_paths = self._get_file_paths(self.tax_ids, 'protein_links')
col = ['NCBI taxid', 'entrez', 'STRING']
for taxon in protein_paths:
ensembl = Ensembl(self.graph_type, self.are_bnodes_skized)
string_file_path = '/'.join((
self.rawdir, protein_paths[taxon]['file']))
with gzip.open(string_file_path, 'rb') as reader:
dataframe = pd.read_csv(reader, sep=r'\s+')
p2gene_map = dict()
if taxon in self.id_map_files:
LOG.info("Using string provided id_map files")
map_file = '/'.join((self.rawdir, self.id_map_files[taxon]['file']))
with gzip.open(map_file, 'rt') as reader:
line = next(reader).strip()
if line != '# NCBI taxid / entrez / STRING':
LOG.error(
'Expected Headers:\t%s\nRecived Headers:\t%s\n', col, line)
exit(-1)
for line in reader.readlines():
row = line.rstrip('\n').split('\t')
# tax = row[col.index(''NCBI taxid')].strip()
gene = row[col.index('entrez')].strip()
prot = row[col.index('STRING')].strip()
genes = gene.split('|')
p2gene_map[prot.replace(taxon + '.', '')] = [
"NCBIGene:" + entrez_id for entrez_id in genes]
else:
LOG.info("Fetching ensembl proteins for taxon %s", taxon)
p2gene_map = ensembl.fetch_protein_gene_map(taxon)
for key in p2gene_map:
for phen, gene in enumerate(p2gene_map[key]):
p2gene_map[key][phen] = "ENSEMBL:{}".format(gene)
LOG.info(
"Finished fetching ENSP ID mappings, fetched %i proteins",
len(p2gene_map))
LOG.info(
"Fetching protein protein interactions for taxon %s", taxon)
self._process_protein_links(dataframe, p2gene_map, taxon, limit) | [
"\n Override Source.parse()\n Args:\n :param limit (int, optional) limit the number of rows processed\n Returns:\n :return None\n "
]
|
Please provide a description of the function:def _get_file_paths(self, tax_ids, file_type):
file_paths = dict()
if file_type not in self.files:
raise KeyError("file type {} not configured".format(file_type))
for taxon in tax_ids:
file_paths[taxon] = {
'file': "{}.{}".format(taxon, self.files[file_type]['pattern']),
'url': "{}{}.{}".format(
self.files[file_type]['path'], taxon,
self.files[file_type]['pattern']),
'headers': {'User-Agent': USER_AGENT}
}
return file_paths | [
"\n Assemble file paths from tax ids\n Args:\n :param tax_ids (list) list of taxa\n Returns:\n :return file dict\n "
]
|
Please provide a description of the function:def process_fish(self, limit=None):
LOG.info("Processing Fish Parts")
raw = '/'.join((self.rawdir, self.files['fish_components']['file']))
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
taxon_id = self.globaltt['Danio rerio']
geno = Genotype(graph)
allele_to_construct_hash = {}
with open(raw, 'r', encoding="utf8") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
(fish_num, fish_name, gene_num, gene_symbol, affector_num,
affector_symbol, construct_num, construct_symbol,
background_num, background_symbol, genotype_num,
genotype_name
# , empty
) = row
# fish have the following components:
# * genotype, which is the intrinsic genotype;
# this may be a genetic background (WT)
# * an optional background for the intrinsic genotype
# * affectors == alleles or morphants
# * constructs which may give rise to the affectors
# * affected genes
if fish_num not in self.fish_parts:
self.fish_parts[fish_num] = {}
self.fish_parts[fish_num] = {
'intrinsic_genotype': genotype_num,
'affectors': set(),
'fish_label': fish_name
}
# HACK - bad allele id - replace it with the new one FIXME
if affector_num == 'ZDB-ALT-090504-1':
affector_num = 'ZDB-ALT-040723-4'
self.fish_parts[fish_num]['affectors'].add(affector_num)
# add the constructs that the allele comes from
if construct_num != '':
if affector_num not in allele_to_construct_hash:
allele_to_construct_hash[affector_num] = set()
allele_to_construct_hash[affector_num].add(construct_num)
# ### finish looping through fish file
# given the components of a fish,
# subtract out the intrinsic parts to just leave the extrinsic
# to create the extrinsic genotypes.
line_counter = 0
for fish_num in self.fish_parts:
if self.test_mode and fish_num not in self.test_ids['fish']:
continue
line_counter += 1
fish_id = 'ZFIN:'+fish_num
fish = self.fish_parts[fish_num]
# get the intrinsic parts
intrinsic_genotype_num = fish['intrinsic_genotype']
intrinsic_genotype_id = 'ZFIN:'+intrinsic_genotype_num
intrinsic_genotype_label = self.id_label_map.get(
intrinsic_genotype_id)
if intrinsic_genotype_num not in self.geno_alleles:
intrinsic_parts = set()
else:
intrinsic_parts = self.geno_alleles[intrinsic_genotype_num]
# subtract out the intrinsic parts, to get the extrinsic parts
extrinsic_parts = fish['affectors'] - intrinsic_parts
extrinsic_list = list(sorted(extrinsic_parts))
# build up the extrinsic genotype from it's parts.
# these will be reagents/morphants.
if len(extrinsic_list) > 0:
list_of_targeted_genes = []
gene_to_reagent_hash = {}
for eid in extrinsic_list:
# link the morpholino to the genes that it affects
eid = 'ZFIN:' + eid
# just in case, skip over the ALTs
if re.search(r'ALT', eid):
continue
ag = self.variant_loci_genes.get(eid)
# LOG.debug("%s affected genes %s", eid, str(ag))
if ag is None:
pass
# LOG.warn("No affected genes for %s", eid)
else:
# turn the gene-targeting-reagents inside out,
# such that instead of morph -> set(genes)
# we make a gene -> set(morphs)
for gid in ag:
if gid not in gene_to_reagent_hash:
gene_to_reagent_hash[gid] = set()
gene_to_reagent_hash[gid].add(eid)
# end loop through each extrinsic component
for gid in gene_to_reagent_hash:
reagent_list = sorted(list(gene_to_reagent_hash.get(gid)))
# create variant gene(s) that have been targeted
# by the reagent
if gid not in self.id_label_map:
# should not happen, except maybe in testing
LOG.error("%s not in id-label-hash", gid)
glabel = gid
else:
glabel = self.id_label_map[gid]
eid = '-'.join(reagent_list)
targeted_gene_id = self.make_targeted_gene_id(
gid, eid)
# get the reagent labels
elabel = ', '.join(
self.id_label_map.get(l) for l in reagent_list)
if elabel is None:
elabel = eid # should not happen, but just in case
targeted_gene_label = glabel + '<' + elabel + '>'
for r in reagent_list:
geno.addReagentTargetedGene(r, gid, targeted_gene_id,
targeted_gene_label)
self.id_label_map[targeted_gene_id] = targeted_gene_label
list_of_targeted_genes += [targeted_gene_id]
# end loop through each gene that is targeted
list_of_targeted_genes = sorted(list_of_targeted_genes)
extrinsic_id = '_:'+re.sub(
r':?_?', '', '-'.join(list_of_targeted_genes))
extrinsic_label = '; '.join(
str(self.id_label_map.get(l))
for l in list_of_targeted_genes)
self.id_label_map[extrinsic_id] = extrinsic_label
# add the parts
for tg in list_of_targeted_genes:
if tg != extrinsic_id:
geno.addParts(
tg, extrinsic_id, self.globaltt['has_variant_part'])
else:
extrinsic_id = None
extrinsic_label = None
if extrinsic_id is not None:
geno.addGenotype(
extrinsic_id, extrinsic_label, self.globaltt['extrinsic_genotype'])
geno.addParts(
extrinsic_id, fish_id, self.globaltt['has_variant_part'])
# check if the intrinsic is in the wildtype genotypes,
# then it's a genomic background
if intrinsic_genotype_id in self.wildtype_genotypes:
intrinsic_rel = self.globaltt['has_reference_part']
intrinsic_type = self.globaltt['genomic_background']
else:
intrinsic_rel = self.globaltt['has_variant_part']
intrinsic_type = self.globaltt['intrinsic_genotype']
geno.addGenotype(
intrinsic_genotype_id, intrinsic_genotype_label, intrinsic_type)
# add the intrinsic to the fish
geno.addParts(intrinsic_genotype_id, fish_id, intrinsic_rel)
# build the fish label
if extrinsic_id is None:
fish_label = intrinsic_genotype_label
else:
fish_label = '; '.join((
str(intrinsic_genotype_label), extrinsic_label))
fish_type = self.globaltt['effective_genotype']
geno.addGenotype(fish_id, fish_label, fish_type)
geno.addTaxon(taxon_id, fish_id)
# since we re-create a label,
# add the zfin fish label as the synonym
model.addSynonym(fish_id, fish['fish_label'])
self.id_label_map[fish_id] = fish_label
if not self.test_mode and limit is not None and line_counter > limit:
break
# ###finish iterating over fish
# iterate of the alleles and attache the constructs to them
LOG.info("Adding Allele/Construct relationships")
for a in allele_to_construct_hash:
if self.test_mode and a not in self.test_ids['allele']:
continue
allele_id = 'ZFIN:' + a
constructs = allele_to_construct_hash.get(a)
if len(constructs) > 0:
for c in constructs:
cid = 'ZFIN:' + c
geno.addSequenceDerivesFrom(allele_id, cid)
# LOG.info("constructs for %s: %s", allele_id,
# str(constructs))
# migrate the transgenic features to be alternate parts
# of the transgene insertion/alteration
if cid in self.transgenic_parts:
tg_parts = self.transgenic_parts.get(cid)
if tg_parts is not None:
for p in tg_parts:
# HACK - if it's a promoter part,
# then make it a simple has_part
if re.search(r'promoter', p):
r = self.globaltt['has_part']
else:
r = self.globaltt['has_variant_part']
geno.addParts(p, allele_id, r)
return | [
"\n Fish give identifiers to the \"effective genotypes\" that we create.\n We can match these by:\n Fish = (intrinsic) genotype + set of morpholinos\n\n We assume here that the intrinsic genotypes and their parts\n will be processed separately, prior to calling this function.\n\n :param limit:\n :return:\n\n "
]
|
Please provide a description of the function:def _process_genotype_features(self, limit=None):
raw = '/'.join((self.rawdir, self.files['geno']['file']))
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
taxon_id = self.globaltt['Danio rerio']
geno_hash = {} # This is used to store the genotype partonomy
gvc_hash = {}
LOG.info("Processing Genotypes")
line_counter = 0
geno = Genotype(graph)
with open(raw, 'r', encoding="utf8") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(genotype_num, genotype_name, genotype_unique_name, allele_num,
allele_name, allele_ab, allele_type, allele_disp_type,
gene_symbol, gene_num, zygosity, construct_name,
construct_num
# , empty
) = row
if self.test_mode and genotype_num not in self.test_ids['genotype']:
continue
# add the genotype to the graph
# not adding the genotype label here,
# since it doesn't include the background
# that will be done in another method
genotype_id = 'ZFIN:' + genotype_num.strip()
geno.addGenotype(genotype_id, None)
# add the given name and uniquename as synonyms
model.addSynonym(genotype_id, genotype_name)
model.addSynonym(genotype_id, genotype_unique_name)
# store the alleles of the genotype,
# in order to use when processing fish
if genotype_num not in self.geno_alleles:
self.geno_alleles[genotype_num] = set()
self.geno_alleles[genotype_num].add(allele_num)
if genotype_id not in geno_hash:
geno_hash[genotype_id] = {}
genoparts = geno_hash[genotype_id]
# reassign the allele_type to a proper GENO or SO class
# allele_type = self._map_allele_type_to_geno(allele_type)
allele_type_id = self.resolve(allele_type, False)
if allele_type_id == allele_type:
allele_type_id = self.globaltt['unspecified'] # is geno: not zfa:
allele_id = 'ZFIN:' + allele_num.strip()
if allele_num != '':
self.id_label_map[allele_id] = allele_name
# alleles in zfin are really sequence alterations in our system
geno.addSequenceAlteration(allele_id, allele_name, allele_type_id)
model.addSynonym(allele_id, allele_ab)
# here, we assemble the items into a genotype hash
# we need to do this because each row only holds one allele
# of a gene but a genotype may have many alleles and therefore
# many rows so we loop through the file once to build a hash of
# genotype components
if gene_num is not None and gene_num.strip() != '':
# add the gene to the graph, along with it's symbol
# as the primary label
gene_id = 'ZFIN:' + gene_num.strip()
geno.addGene(gene_id, gene_symbol)
self.id_label_map[gene_id] = gene_symbol
# if it's a transgenic construct,
# then we'll have to get the other bits
if construct_num is not None and construct_num.strip() != '':
construct_id = 'ZFIN:' + construct_num.strip()
geno.addSequenceDerivesFrom(allele_id, construct_id)
self.id_label_map[construct_id] = construct_name
# allele to gene
if allele_id not in self.variant_loci_genes:
self.variant_loci_genes[allele_id] = [gene_id]
else:
if gene_id not in self.variant_loci_genes[allele_id]:
self.variant_loci_genes[allele_id] += [gene_id]
if gene_id not in genoparts:
genoparts[gene_id] = [allele_id]
else:
genoparts[gene_id] += [allele_id]
other_allele = self._get_other_allele_by_zygosity(
allele_id, zygosity)
if other_allele is not None:
genoparts[gene_id] += [other_allele]
else:
# if the gene is not known,
# still need to add the allele to the genotype hash
# these will be added as sequence alterations.
genoparts[allele_id] = [allele_id]
other_allele = self._get_other_allele_by_zygosity(
allele_id, zygosity)
if other_allele is not None:
genoparts[allele_id] += [other_allele]
geno_hash[genotype_id] = genoparts
# fetch the other affected genes,
# and make sure they are in the geno hash
# we have to do this because some of the affected genes
# are not listed in this file
genes_from_hash = None
if allele_id in self.variant_loci_genes:
genes_from_hash = self.variant_loci_genes[allele_id]
else:
pass
# LOG.info('no gene found for %s', allele_id)
if genes_from_hash is not None \
and genes_from_hash != [gene_id] \
and gene_id not in genes_from_hash:
LOG.info(
"***Found genes not in genotype_features for %s: %s",
allele_id, genes_from_hash)
for gh in genes_from_hash:
if gh not in genoparts:
genoparts[gh] = [allele_id]
else:
genoparts[gh] += [allele_id]
other_allele = self._get_other_allele_by_zygosity(
allele_id, zygosity)
if other_allele is not None:
genoparts[gh].append(other_allele)
if not self.test_mode and limit is not None and line_counter > limit:
break
# end loop through file
csvfile.close()
LOG.info("Finished parsing file")
# ############## BUILD THE INTRINSIC GENOTYPES ###############
# using the geno_hash, build the genotype parts,
# and add them to the graph
# the hash is organized like:
# genotype_id : {
# gene_id : [list, of, alleles], # for located things
# allele_id : [list, of, alleles] # for unlocated things
# }
# now loop through the geno_hash, and build the vslcs
LOG.info("Building intrinsic genotypes from partonomy")
for gt in geno_hash:
if self.test_mode and re.sub(r'ZFIN:', '', gt) \
not in self.test_ids['genotype']:
print('skipping ', gt)
continue
if gt not in gvc_hash:
gvc_hash[gt] = []
gvcparts = gvc_hash[gt]
for locus_id in geno_hash[gt]:
# LOG.info("locus id %s",locus_id)
locus_label = self.id_label_map[locus_id]
variant_locus_parts = geno_hash.get(gt).get(locus_id)
# LOG.info(
# 'vl parts: %s',pprint.pformat(variant_locus_parts))
# if the locus == part, then it isn't a gene,
# rather a variant not in a specific gene
if locus_id in variant_locus_parts:
# set the gene_id to none
gene_id = None
else:
gene_id = locus_id
allele1_id = variant_locus_parts[0]
if allele1_id not in self.id_label_map:
allele1_label = allele1_id
LOG.error('allele1 %s not in hash', allele1_id)
else:
allele1_label = self.id_label_map[allele1_id]
allele2_id = None
allele2_label = None
zygosity_id = None
if len(variant_locus_parts) > 2:
LOG.error(
"There may be a problem. >2 parts for this locus (%s): %s",
locus_id, variant_locus_parts)
elif len(variant_locus_parts) > 1:
allele2_id = variant_locus_parts[1]
if allele2_id not in ['0', '?']:
allele2_label = self.id_label_map[allele2_id]
else:
allele2_label = allele2_id
if allele2_id is not None:
if allele2_id == '?':
zygosity_id = self.globaltt['indeterminate']
allele2_id = 'UN'
elif allele2_id == '0':
zygosity_id = self.globaltt['hemizygous']
elif allele1_id != allele2_id:
zygosity_id = self.globaltt['compound heterozygous']
elif allele1_id == allele2_id:
zygosity_id = self.globaltt['homozygous']
else:
zygosity_id = self.globaltt['simple heterozygous']
allele2_label = '+'
allele2_id = 'WT'
# make variant_loci
vloci2 = vloci2_label = None
if gene_id is not None:
vloci1 = self._make_variant_locus_id(gene_id, allele1_id)
vloci1_label = geno.make_variant_locus_label(
locus_label, allele1_label)
geno.addSequenceAlterationToVariantLocus(
allele1_id, vloci1)
geno.addAlleleOfGene(vloci1, gene_id)
model.addIndividualToGraph(
vloci1, vloci1_label, self.globaltt['variant_locus'])
if allele2_id is not None and allele2_id not in ['WT', '0', 'UN']:
vloci2 = self._make_variant_locus_id(
gene_id, allele2_id)
vloci2_label = geno.make_variant_locus_label(
locus_label, allele2_label)
geno.addSequenceAlterationToVariantLocus(
allele2_id, vloci2)
model.addIndividualToGraph(
vloci2, vloci2_label, self.globaltt['variant_locus'])
geno.addAlleleOfGene(vloci2, gene_id)
else:
vloci1 = allele1_id
vloci1_label = allele1_label
vloci2 = None
if allele2_id not in ['WT', '0', 'UN']:
vloci2 = allele2_id
vloci2_label = allele2_label
# create the vslc
gene_label = ''
if gene_id is None:
gn = 'UN'
else:
gn = gene_id
gene_label = self.id_label_map[gene_id]
# TODO also consider adding this to Genotype.py
vslc_id = '-'.join((gn, allele1_id, allele2_id))
vslc_id = '_:' + re.sub(r'(ZFIN)?:', '', vslc_id)
vslc_label = geno.make_vslc_label(
gene_label, allele1_label, allele2_label)
# add to global hash
self.id_label_map[vslc_id] = vslc_label
model.addIndividualToGraph(
vslc_id, vslc_label,
self.globaltt['variant single locus complement'])
geno.addPartsToVSLC(
vslc_id, vloci1, vloci2, zygosity_id,
self.globaltt['has_variant_part'],
self.globaltt['has_variant_part'])
gvcparts += [vslc_id]
gvc_hash[gt] = gvcparts
# end loop through geno_hash
LOG.info('Finished finding all the intrinsic genotype parts')
LOG.info('Build pretty genotype labels')
# now loop through the gvc_hash, and build the gvc
for gt in gvc_hash:
if self.test_mode and re.sub(r'ZFIN:', '', gt) \
not in self.test_ids['genotype']:
continue
gvc_parts = gvc_hash[gt]
# only need to make a gvc specifically if there's >1 vslc
if len(gvc_parts) > 1:
gvc_labels = []
# put these in order so they will always make the same id
gvc_parts.sort()
gvc_id = '-'.join(gvc_parts)
gvc_id = re.sub(r'(ZFIN)?:', '', gvc_id)
gvc_id = '_:' + re.sub(r'^_*', '', gvc_id)
for vslc_id in gvc_parts:
# add the vslc to the gvc
geno.addVSLCtoParent(vslc_id, gvc_id)
# build the gvc label
vslc_label = self.id_label_map[vslc_id]
if vslc_label is not None:
gvc_labels += [vslc_label]
else:
gvc_labels += [vslc_id]
gvc_labels.sort()
gvc_label = '; '.join(gvc_labels)
# add the gvc to the id-label hash
self.id_label_map[gvc_id] = gvc_label
# add the gvc
model.addIndividualToGraph(
gvc_id, gvc_label, self.globaltt['genomic_variation_complement'])
elif len(gvc_parts) == 1:
# assign the vslc to be also a gvc
vslc_id = gvc_parts[0]
gvc_id = vslc_id
gvc_label = self.id_label_map[vslc_id]
model.addType(vslc_id, self.globaltt['genomic_variation_complement'])
else:
gvc_id = None
gvc_label = ''
LOG.error("No GVC parts for %s", gt)
if gt in self.genotype_backgrounds:
background_id = self.genotype_backgrounds[gt]
if background_id in self.id_label_map:
background_label = self.id_label_map[background_id]
else:
background_label = background_id
LOG.error("We don't have the label for %s stored", background_id)
else:
background_num = re.sub(r'ZFIN:', '', gt)
background_id = '_:bkgd-'+background_num
background_label = 'n.s. (' + background_num + ')'
background_desc = 'This genomic background is unknown. ' +\
'This is a placeholder background for ' + gt + '.'
# there is no background for this genotype;
# need to add the taxon to this one!
# make an anonymous background for this genotype
geno.addGenomicBackground(
background_id, background_label, None, background_desc)
geno.addGenomicBackgroundToGenotype(background_id, gt)
background_label = 'n.s.'
geno.addTaxon(taxon_id, background_id)
genotype_name = gvc_label + ' [' + background_label + ']'
geno.addGenotype(gt, genotype_name)
self.id_label_map[gt] = genotype_name
# Add the GVC to the genotype
geno.addParts(gvc_id, gt, self.globaltt['has_variant_part'])
# end of gvc loop
# end of genotype loop
# TODO this is almost complete;
# deficiencies with >1 locus deleted are still not right
LOG.info("Finished building genotype labels")
LOG.info("Done with genotypes")
return | [
"\n Here we process the genotype_features file, which lists genotypes\n together with any intrinsic sequence alterations, their zygosity,\n and affected gene.\n Because we don't necessarily get allele pair (VSLC) ids\n in a single row, we iterate through the file and build up a hash\n that contains all of a genotype's partonomy.\n We then assemble a genotype based on that partonomy.\n This table does not list the background genotype/strain:\n that is listed elsewhere.\n\n ZFIN \"ALT\" objects are mapped to sequence alterations in our system.\n\n By the end of this method, we have built up the intrinsic genotype,\n with Monarch-style labels.\n All ZFIN labels are added as synonyms (including the \"sup\" html tags).\n\n We make assumptions here that any variants that affect the same locus\n are in trans.\n All of the genotype parts are created as BNodes at the moment,\n to avoid minting new Monarch ids, which means for anyone consuming this\n data they are inherently unstable. This may change in the future.\n\n\n :param limit:\n :return:\n\n "
]
|
Please provide a description of the function:def _process_genotype_backgrounds(self, limit=None):
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
LOG.info("Processing genotype backgrounds")
line_counter = 0
raw = '/'.join((self.rawdir, self.files['backgrounds']['file']))
geno = Genotype(graph)
# Add the taxon as a class
taxon_id = self.globaltt['Danio rerio']
model.addClassToGraph(taxon_id, None)
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
# Genotype_ID Genotype_Name Background Background_Name
(genotype_id, genotype_name, background_id, unused) = row
if self.test_mode and genotype_id not in self.test_ids['genotype']:
continue
genotype_id = 'ZFIN:' + genotype_id.strip()
background_id = 'ZFIN:' + background_id.strip()
# store this in the hash for later lookup
# when building fish genotypes
self.genotype_backgrounds[genotype_id] = background_id
# add the background into the graph,
# in case we haven't seen it before
geno.addGenomicBackground(background_id, None)
# hang the taxon from the background
geno.addTaxon(taxon_id, background_id)
# add the intrinsic genotype to the graph
# we DO NOT ADD THE LABEL here
# as it doesn't include the background
geno.addGenotype(genotype_id, None, self.globaltt['intrinsic_genotype'])
# Add background to the intrinsic genotype
geno.addGenomicBackgroundToGenotype(background_id, genotype_id)
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Done with genotype backgrounds")
return | [
"\n This table provides a mapping of genotypes to background genotypes\n Note that the background_id is also a genotype_id.\n\n Makes these triples:\n <ZFIN:genotype_id> GENO:has_reference_part <ZFIN:background_id>\n <ZFIN:background_id> a GENO:genomic_background\n <ZFIN:background_id> in_taxon <taxon_id>\n <taxon_id> a class\n :param limit:\n :return:\n\n "
]
|
Please provide a description of the function:def _process_wildtypes(self, limit=None):
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
# model = Model(graph) # unused
LOG.info("Processing wildtype genotypes")
line_counter = 0
geno = Genotype(graph)
raw = '/'.join((self.rawdir, self.files['wild']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(fish_num, fish_name, fish_abbreviation, genotype_num
# , empty
) = row
# ZDB-FISH-150901-10750 INDO INDO ZDB-GENO-980210-32
fish_id = 'ZFIN:'+fish_num
genotype_id = 'ZFIN:' + genotype_num.strip()
background_type = self.globaltt['genomic_background']
# Add genotype to graph with label and description,
# as a genomic_background genotype
unspecified_background = 'ZDB-GENO-030619-2'
if re.match(genotype_num.strip(), unspecified_background):
background_type = self.globaltt['unspecified_genomic_background']
geno.addGenomicBackground(
genotype_id, fish_abbreviation, background_type, fish_name)
graph.addTriple(fish_id, self.globaltt['has_genotype'], genotype_id)
# Build the hash for the wild type genotypes.
self.id_label_map[genotype_id] = fish_abbreviation
# store these in a special hash to look up later
self.wildtype_genotypes += [genotype_id]
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Done with wildtype genotypes")
return | [
"\n This table provides the genotype IDs, name,\n and abbreviation of the wildtype genotypes.\n These are the typical genomic backgrounds...there's about 20 of them.\n http://zfin.org/downloads/wildtypes_fish.txt\n\n Triples created:\n <genotype id> a GENO:wildtype\n <genotype id> rdfs:label genotype_abbreviation\n <genotype id> dc:description genotype_name\n\n :param limit:\n :return:\n\n "
]
|
Please provide a description of the function:def _process_stages(self, limit=None):
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
LOG.info("Processing stages")
line_counter = 0
raw = '/'.join((self.rawdir, self.files['stage']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(stage_id, stage_obo_id, stage_name, begin_hours, end_hours
# ,empty # till next time
) = row
# Add the stage as a class, and it's obo equivalent
stage_id = 'ZFIN:' + stage_id.strip()
model.addClassToGraph(stage_id, stage_name)
model.addEquivalentClass(stage_id, stage_obo_id)
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Done with stages")
return | [
"\n This table provides mappings between ZFIN stage IDs and ZFS terms,\n and includes the starting and ending hours for the developmental stage.\n Currently only processing the mapping from the ZFIN stage ID\n to the ZFS ID.\n\n :param limit:\n :return:\n\n "
]
|
Please provide a description of the function:def _process_g2p(self, limit=None):
LOG.info("Processing G2P")
line_counter = 0
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
missing_zpids = list()
mapped_zpids = list()
model = Model(graph)
eco_id = self.globaltt['experimental phenotypic evidence']
raw = '/'.join((self.rawdir, self.files['pheno']['file']))
with open(raw, 'r', encoding="utf8") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(fish_num, fish_name, start_stage_id, start_stage_name,
end_stage_id, end_stage_name, subterm1_id, subterm1_name,
postcomp1_rel_id, postcomp1_rel_name, superterm1_id,
superterm1_name, quality_id, quality_name, modifier,
subterm2_id, subterm2_name, postcomp2_rel_id,
postcomp2_rel_name, superterm2_id, superterm2_name, pub_id,
env_id
# , empty # till next time
) = row
if self.test_mode and (
fish_num not in self.test_ids['fish'] or
env_id not in self.test_ids['environment']):
continue
fish_id = 'ZFIN:' + fish_num.strip()
env_id = 'ZFIN:' + env_id.strip()
# ########### PHENOTYPES ##########
phenotype_id = self._map_sextuple_to_phenotype(superterm1_id,
subterm1_id,
quality_id,
superterm2_id,
subterm2_id,
modifier)
if phenotype_id is None:
# check to see if it is a "normal" phenotype;
# if so, then
# check to see if the "abnormal" version is found
# if the abnormal version is not found, then report it
if modifier == 'normal':
p2 = self._map_sextuple_to_phenotype(
superterm1_id, subterm1_id, quality_id,
superterm2_id, subterm2_id, 'abnormal')
if p2 is None:
missing_zpids.append([
superterm1_id, subterm1_id, quality_id,
superterm2_id, subterm2_id, modifier])
else:
pass
# LOG.info("Normal phenotype found,
# and abnormal version exists")
else:
missing_zpids.append([
superterm1_id, subterm1_id, quality_id,
superterm2_id, subterm2_id, modifier])
else:
mapped_zpids.append([
superterm1_id, subterm1_id, quality_id,
superterm2_id, subterm2_id, modifier])
if pub_id != '':
pub_id = 'ZFIN:' + pub_id.strip()
ref = Reference(graph, pub_id)
ref.addRefToGraph()
if not re.match(r'^normal', modifier):
if phenotype_id is None:
continue
if start_stage_id != '':
start_stage_id = 'ZFIN:' + start_stage_id.strip()
if end_stage_id != '':
end_stage_id = 'ZFIN:' + end_stage_id.strip()
# add association
assoc = G2PAssoc(graph, self.name, fish_id, phenotype_id)
# only add the environment if there's components to it
if env_id in self.environment_hash \
and len(self.environment_hash.get(env_id)) > 0:
assoc.set_environment(env_id)
assoc.set_stage(start_stage_id, end_stage_id)
assoc.add_evidence(eco_id)
assoc.add_source(pub_id)
assoc.add_association_to_graph()
assoc_id = assoc.get_association_id()
if env_id not in self.environment_hash \
or len(self.environment_hash.get(env_id)) > 0:
model.addComment(
assoc_id, 'Legacy environment id ' + env_id)
else:
# TODO add normal phenotypes as associations #134 when
# https://github.com/sba1/bio-ontology-zp/issues/9
# is finished, we can use these add normal phenotypes
# as a comment on the genotype for now
clist = []
for x in [superterm1_name, subterm1_name, quality_name,
superterm2_name, subterm2_name, modifier]:
if x != '':
clist += [x]
c = '+'.join(clist)
c = ' '.join(("Normal phenotype observed:", c, "(" + pub_id + ")"))
if pub_id != '':
graph.addTriple(pub_id, self.globaltt['mentions'], fish_id)
if not self.test_mode and limit is not None and line_counter > limit:
break
myset = set([','.join(x) for x in mapped_zpids])
myset2 = set([','.join(x) for x in missing_zpids])
LOG.info(
"Phenotype-sextuples: %d mapped : %d unmapped", len(myset), len(myset2))
self._write_missing_zp_report(missing_zpids)
return | [
"\n Here, we process the fish-to-phenotype associations,\n which also include environmental perturbations.\n The phenotypes may also be recorded as observed at specific stages.\n We create association objects with as much of the information\n as possible.\n\n A full association object may include:\n\n assoc hasSubject effective genotype\n assoc hasObject phenotype\n assoc hasSource publication (PMID or ZFIN pub id)\n assoc hasEnvironment environment\n assoc hasStartStage start_stage_id\n assoc hasEndStage end_stage_id\n\n :param limit:\n :return:\n\n "
]
|
Please provide a description of the function:def _write_missing_zp_report(self, missing_zpids, include_normal=True):
f = '/'.join((self.outdir, 'missing_zps.txt'))
myset = set([','.join(x) for x in missing_zpids])
# missing_zpids = set(missing_zpids) # make it a unique set
with open(f, 'w', newline='\n') as csvfile:
writer = csv.writer(csvfile, delimiter='\t')
# write header
h = ['superterm1_id', 'subterm1_id', 'quality_id',
'superterm2_id', 'subterm2_id', 'modifier']
writer.writerow(h)
for x in myset:
writer.writerow(x.split(','))
csvfile.close()
LOG.info("Wrote %d missing zp defs to %s", len(myset), f)
return | [
"\n This will write the sextuples of anatomy+quality to a file\n if they do not map to any current ZP definition.\n Set include_normal to False if you do not want to log\n the unmatched \"normal\" phenotypes.\n :param missing_zpids:\n :param include_normal:\n :return:\n\n "
]
|
Please provide a description of the function:def _process_genes(self, limit=None):
LOG.info("Processing genes")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
raw = '/'.join((self.rawdir, self.files['gene']['file']))
geno = Genotype(graph)
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(gene_id, gene_so_id, gene_symbol, ncbi_gene_id
# , empty # till next time
) = row
if self.test_mode and gene_id not in self.test_ids['gene']:
continue
gene_id = 'ZFIN:' + gene_id.strip()
ncbi_gene_id = 'NCBIGene:' + ncbi_gene_id.strip()
self.id_label_map[gene_id] = gene_symbol
if not self.test_mode and limit is not None and line_counter > limit:
pass
else:
geno.addGene(gene_id, gene_symbol)
model.addEquivalentClass(gene_id, ncbi_gene_id)
LOG.info("Done with genes")
return | [
"\n This table provides the ZFIN gene id, the SO type of the gene,\n the gene symbol, and the NCBI Gene ID.\n\n Triples created:\n <gene id> a class\n <gene id> rdfs:label gene_symbol\n <gene id> equivalent class <ncbi_gene_id>\n :param limit:\n :return:\n\n "
]
|
Please provide a description of the function:def _process_features(self, limit=None):
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
LOG.info("Processing features")
line_counter = 0
geno = Genotype(graph)
raw = '/'.join((self.rawdir, self.files['features']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(genomic_feature_id, feature_so_id,
genomic_feature_abbreviation, genomic_feature_name,
genomic_feature_type, mutagen, mutagee, construct_id,
construct_name, construct_so_id, talen_crispr_id,
talen_crispr_nam
# , empty
) = row
if self.test_mode and (
genomic_feature_id not in self.test_ids['allele']):
continue
genomic_feature_id = 'ZFIN:' + genomic_feature_id.strip()
model.addIndividualToGraph(
genomic_feature_id, genomic_feature_name, feature_so_id)
model.addSynonym(
genomic_feature_id, genomic_feature_abbreviation)
if construct_id is not None and construct_id != '':
construct_id = 'ZFIN:' + construct_id.strip()
geno.addConstruct(
construct_id, construct_name, construct_so_id)
geno.addSequenceDerivesFrom(
genomic_feature_id, construct_id)
# Note, we don't really care about how the variant was derived.
# so we skip that.
# add to the id-label map
self.id_label_map[
genomic_feature_id] = genomic_feature_abbreviation
self.id_label_map[construct_id] = construct_name
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Done with features")
return | [
"\n This module provides information for the intrinsic\n and extrinsic genotype features of zebrafish.\n All items here are 'alterations', and are therefore instances.\n\n sequence alteration ID, SO type, abbreviation, and relationship to\n the affected gene, with the gene's ID, symbol,\n and SO type (gene/pseudogene).\n\n Triples created:\n <gene id> a class:\n :param limit:\n :return:\n\n "
]
|
Please provide a description of the function:def _process_feature_affected_genes(self, limit=None):
# can use this to process and build the variant locus.
# but will need to process through some kind of helper hash,
# just like we do with the genotype file.
# that's because each gene is on a separate line
# for example, ZDB-ALT-021021-2 is a deficiency that affects 4 genes
# that case is when the relationship is != 'is allele of'
LOG.info("Processing feature affected genes")
line_counter = 0
raw = '/'.join(
(self.rawdir, self.files['feature_affected_gene']['file']))
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
geno = Genotype(graph)
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(genomic_feature_id, feature_so_id,
genomic_feature_abbreviation, gene_symbol, gene_id,
gene_so_id, genomic_feature_marker_relationship) = row[0:7]
# Sequence alteration types present in file:
# SO:0000159 - deletion,
# SO:0000199 - translocation,
# SO:0000667 - insertion,
# SO:0001059 - sequence_alteration,
# SO:0001060 - sequence_variant,
# SO:0001218 - transgenic insertion,
# SO:1000005 - complex_substitution,
# SO:1000008 - point_mutation,
# SO:1000029 - chromosomal_deletion,
# SO:1000032 - indel
genomic_feature_id = 'ZFIN:' + genomic_feature_id.strip()
gene_id = 'ZFIN:' + gene_id.strip()
self.id_label_map[
genomic_feature_id] = genomic_feature_abbreviation
self.id_label_map[gene_id] = gene_symbol
if self.test_mode and (
re.sub(r'ZFIN:', '', gene_id) not in self.test_ids['gene'] and
re.sub(r'ZFIN:', '', genomic_feature_id)
not in self.test_ids['allele']):
continue
geno.addGene(gene_id, gene_symbol, gene_so_id)
# add the gene to the list of things altered by this thing
if genomic_feature_id not in self.variant_loci_genes:
self.variant_loci_genes[genomic_feature_id] = [gene_id]
else:
if gene_id not in self.variant_loci_genes[genomic_feature_id]:
self.variant_loci_genes[genomic_feature_id] += [gene_id]
sequence_alteration_type = feature_so_id
# Add the sequence alteration id, label, and type
geno.addSequenceAlteration(
genomic_feature_id,
genomic_feature_abbreviation,
sequence_alteration_type)
if sequence_alteration_type == 'is allele of':
vl_label = geno.make_variant_locus_label(
gene_symbol, genomic_feature_abbreviation)
vl_id = self._make_variant_locus_id(gene_id, genomic_feature_id)
self.id_label_map[vl_id] = vl_label
# create the variant locus,
# add it's parts and relationship to the gene
geno.addSequenceAlterationToVariantLocus(
genomic_feature_id, vl_id)
model.addIndividualToGraph(
vl_id, vl_label, self.globaltt['variant_locus'])
geno.addAlleleOfGene(vl_id, gene_id)
# note that deficiencies or translocations
# that affect only one gene are considered alleles here
# by zfin, which is appropriate.
# I don't yet see duplications
else:
# don't make the variant loci for the other things
# which include deficiencies, translocations, transgenes
# TODO review this
pass
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Done with feature affected genes")
return | [
"\n This table lists (intrinsic) genomic sequence alterations\n and their affected gene(s).\n It provides the sequence alteration ID, SO type, abbreviation,\n and relationship to the affected gene, with the gene's ID, symbol,\n and SO type (gene/pseudogene).\n\n Triples created:\n <gene id> a class:\n <gene id> rdfs:label gene_symbol\n <gene id> subclass of gene/pseudogene\n\n <variant locus id> is a GENO:allele\n <variant locus id> rdfs:label <variant_locus_label>\n <variant locus id> is an allele of <gene id>\n <variant locus id> has alternate part <sequence alteration id>\n\n <sequence alteration id> is an allele of <gene id>\n <sequence alteration id> rdf:type <sequence alteration type>\n\n :param limit:\n :return:\n\n "
]
|
Please provide a description of the function:def _process_gene_marker_relationships(self, limit=None):
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
LOG.info("Processing gene marker relationships")
line_counter = 0
raw = '/'.join((self.rawdir, self.files['gene_marker_rel']['file']))
geno = Genotype(graph)
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(gene_id, gene_so_id, gene_symbol, marker_id, marker_so_id,
marker_symbol, relationship
# , empty
) = row
if self.test_mode and not (
gene_id in self.test_ids['gene'] or
marker_id in self.test_ids['allele'] or
marker_id in self.test_ids['morpholino']):
continue
# there are many relationships, but we only take a few for now
if relationship in [
'knockdown reagent targets gene',
'coding sequence of',
'gene product recognized by antibody',
'promoter of',
'transcript targets gene']:
gene_id = 'ZFIN:' + gene_id.strip()
geno.addGene(gene_id, gene_symbol, gene_so_id)
marker_id = 'ZFIN:' + marker_id.strip()
if relationship == 'knockdown reagent targets gene':
geno.addGeneTargetingReagent(
marker_id, marker_symbol, marker_so_id, gene_id)
# waiting to add the reagent_targeted_gene
# until processing environments
elif relationship == 'coding sequence of':
# we add the partonomy
# directly to the allele in the process_fish method
geno.addConstruct(
marker_id, marker_symbol, marker_so_id)
transgene_part_id = self._make_transgene_part_id(
marker_id, gene_id, relationship)
transgene_part_label = 'Tg('+relationship+' '+gene_symbol+')'
model.addIndividualToGraph(
transgene_part_id, transgene_part_label,
self.globaltt['coding_transgene_feature'])
geno.addSequenceDerivesFrom(transgene_part_id, gene_id)
# save the transgenic parts in a hashmap for later
if marker_id not in self.transgenic_parts:
self.transgenic_parts[marker_id] = set()
self.transgenic_parts[marker_id].add(transgene_part_id)
self.id_label_map[transgene_part_id] = transgene_part_label
elif relationship == 'gene product recognized by antibody':
# TODO for ticket #32
pass
elif relationship == 'promoter of':
# transgenic constructs with promoters regions
# we add the partonomy
# directly to the allele in the process_fish method
geno.addConstruct(marker_id, marker_symbol, marker_so_id)
transgene_part_id = self._make_transgene_part_id(
marker_id, gene_id, relationship)
transgene_part_label = 'Tg(' + relationship + ' ' +\
gene_symbol + ')'
model.addIndividualToGraph(
transgene_part_id, transgene_part_label,
self.globaltt['regulatory_transgene_feature'])
geno.addSequenceDerivesFrom(transgene_part_id, gene_id)
# save the transgenic parts in a hashmap for later
if marker_id not in self.transgenic_parts:
self.transgenic_parts[marker_id] = set()
self.transgenic_parts[marker_id].add(transgene_part_id)
elif relationship == 'transcript targets gene': # miRNAs
# TODO should this be an interaction
# instead of this special relationship?
model.addIndividualToGraph(
marker_id, marker_symbol, marker_so_id)
graph.addTriple(
marker_id, self.globaltt['targets_gene'], gene_id)
else:
pass
self.id_label_map[marker_id] = marker_symbol
# just in case we haven't seen it before
self.id_label_map[gene_id] = gene_symbol
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Done with gene marker relationships")
return | [
"\n Gene-marker relationships include:\n clone contains gene,\n coding sequence of,\n contains polymorphism,\n gene contains small segment,\n gene encodes small segment,\n gene has artifact,\n gene hybridized by small segment,\n gene produces transcript,\n gene product recognized by antibody,\n knockdown reagent targets gene,\n promoter of,\n transcript targets gene\n\n Here, we only process the following:\n knockdown reagent targets gene,\n coding sequence of,\n promoter of,\n transcript targets gene\n\n We only take a fraction of these here...\n we are interested in the knockdown reagents, promoters, and\n the transgenic constructs with coding bits.\n\n :param limit:\n :return:\n\n "
]
|
Please provide a description of the function:def _process_pubinfo(self, limit=None):
line_counter = 0
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
raw = '/'.join((self.rawdir, self.files['pubs']['file']))
with open(raw, 'r', encoding="latin-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
try:
(pub_id, pubmed_id, authors, title,
journal, year, vol, pages) = row
except ValueError:
try:
(pub_id, pubmed_id, authors, title,
journal, year, vol, pages
# , empty
) = row
except ValueError:
LOG.warning("Error parsing row %s: ", row)
if self.test_mode and (
'ZFIN:' + pub_id not in self.test_ids['pub'] and
'PMID:' + pubmed_id not in self.test_ids['pub']):
continue
pub_id = 'ZFIN:' + pub_id.strip()
# trim the author list for ease of reading
alist = re.split(r',', authors)
if len(alist) > 1:
astring = ' '.join((alist[0].strip(), 'et al'))
else:
astring = authors
pub_label = '; '.join((astring, title, journal, year, vol, pages))
ref = Reference(graph, pub_id)
ref.setShortCitation(pub_label)
ref.setYear(year)
ref.setTitle(title)
if pubmed_id is not None and pubmed_id != '':
# let's make an assumption that if there's a pubmed id,
# that it is a journal article
ref.setType(self.globaltt['journal article'])
pubmed_id = 'PMID:' + pubmed_id.strip()
rpm = Reference(graph, pubmed_id, self.globaltt['journal article'])
rpm.addRefToGraph()
model.addSameIndividual(pub_id, pubmed_id)
model.makeLeader(pubmed_id)
ref.addRefToGraph()
if not self.test_mode and limit is not None and line_counter > limit:
break
return | [
"\n This will pull the zfin internal publication information,\n and map them to their equivalent pmid, and make labels.\n\n Triples created:\n <pub_id> is an individual\n <pub_id> rdfs:label <pub_label>\n <pubmed_id> is an individual\n <pubmed_id> rdfs:label <pub_label>\n\n <pub_id> sameIndividual <pubmed_id>\n :param limit:\n :return:\n\n "
]
|
Please provide a description of the function:def _process_pub2pubmed(self, limit=None):
line_counter = 0
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
raw = '/'.join((self.rawdir, self.files['pub2pubmed']['file']))
with open(raw, 'r', encoding="latin-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(pub_id, pubmed_id
# , empty
) = row
if self.test_mode and (
'ZFIN:' + pub_id not in self.test_ids['pub'] and
'PMID:' + pubmed_id not in self.test_ids['pub']):
continue
pub_id = 'ZFIN:' + pub_id.strip()
rtype = None
if pubmed_id != '' and pubmed_id is not None:
pubmed_id = 'PMID:' + pubmed_id.strip()
rtype = self.globaltt['journal article']
rpm = Reference(graph, pubmed_id, rtype)
rpm.addRefToGraph()
model.addSameIndividual(pub_id, pubmed_id)
ref = Reference(graph, pub_id, rtype)
ref.addRefToGraph()
if not self.test_mode and limit is not None and line_counter > limit:
break
return | [
"\n This will pull the zfin internal publication to pubmed mappings.\n Somewhat redundant with the process_pubinfo method,\n but this includes additional mappings.\n\n <pub_id> is an individual\n <pub_id> rdfs:label <pub_label>\n <pubmed_id> is an individual\n <pubmed_id> rdfs:label <pub_label>\n\n <pub_id> sameIndividual <pubmed_id>\n :param limit:\n :return:\n "
]
|
Please provide a description of the function:def _process_targeting_reagents(self, reagent_type, limit=None):
LOG.info("Processing Gene Targeting Reagents")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
line_counter = 0
model = Model(graph)
geno = Genotype(graph)
if reagent_type not in ['morph', 'talen', 'crispr']:
LOG.error("You didn't specify the right kind of file type.")
return
raw = '/'.join((self.rawdir, self.files[reagent_type]['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
if reagent_type in ['morph', 'crispr']:
try:
(gene_num, gene_so_id, gene_symbol, reagent_num,
reagent_so_id, reagent_symbol, reagent_sequence,
publication, note) = row
except ValueError:
# Catch lines without publication or note
(gene_num, gene_so_id, gene_symbol, reagent_num,
reagent_so_id, reagent_symbol, reagent_sequence,
publication) = row
elif reagent_type == 'talen':
(gene_num, gene_so_id, gene_symbol, reagent_num,
reagent_so_id, reagent_symbol, reagent_sequence,
reagent_sequence2, publication, note) = row
else:
# should not get here
return
reagent_id = 'ZFIN:' + reagent_num.strip()
gene_id = 'ZFIN:' + gene_num.strip()
self.id_label_map[reagent_id] = reagent_symbol
if self.test_mode and (
reagent_num not in self.test_ids['morpholino'] and
gene_num not in self.test_ids['gene']):
continue
geno.addGeneTargetingReagent(reagent_id, reagent_symbol,
reagent_so_id, gene_id)
# The reagent targeted gene is added
# in the pheno_environment processing function.
# Add publication
# note that the publications can be comma-delimited,
# like: ZDB-PUB-100719-4,ZDB-PUB-130703-22
if publication != '':
pubs = re.split(r',', publication.strip())
for pub in pubs:
pub_id = 'ZFIN:' + pub.strip()
ref = Reference(graph, pub_id)
ref.addRefToGraph()
graph.addTriple(pub_id, self.globaltt['mentions'], reagent_id)
# Add comment?
if note != '':
model.addComment(reagent_id, note)
# use the variant hash for reagents to list the affected genes
if reagent_id not in self.variant_loci_genes:
self.variant_loci_genes[reagent_id] = [gene_id]
else:
if gene_id not in self.variant_loci_genes[reagent_id]:
self.variant_loci_genes[reagent_id] += [gene_id]
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Done with Reagent type %s", reagent_type)
return | [
"\n This method processes the gene targeting knockdown reagents,\n such as morpholinos, talens, and crisprs.\n We create triples for the reagents and pass the data into a hash map\n for use in the pheno_enviro method.\n\n Morpholinos work similar to RNAi.\n TALENs are artificial restriction enzymes\n that can be used for genome editing in situ.\n CRISPRs are knockdown reagents, working similar to RNAi\n but at the transcriptional level instead of mRNA level.\n\n You can read more about TALEN and CRISPR techniques in review\n [Gaj et al]\n http://www.cell.com/trends/biotechnology/abstract/S0167-7799%2813%2900087-5\n\n TODO add sequences\n\n Triples created:\n <reagent_id> is a gene_targeting_reagent\n <reagent_id> rdfs:label <reagent_symbol>\n <reagent_id> has type <reagent_so_id>\n <reagent_id> has comment <note>\n\n <publication_id> is an individual\n <publication_id> mentions <morpholino_id>\n :param reagent_type: should be one of: morph, talen, crispr\n :param limit:\n :return:\n\n "
]
|
Please provide a description of the function:def _process_pheno_enviro(self, limit=None):
LOG.info("Processing environments")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
line_counter = 0
env_hash = {}
envo = Environment(graph)
# pp = pprint.PrettyPrinter(indent=4)
raw = '/'.join((self.rawdir, self.files['enviro']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
# (environment_num, condition_group,
# condition, description, blank) = row
(environment_id,
zeco_term_name, zeco_term_id,
chebi_term_name, chebi_term_id,
zfa_term_name, zfa_term_id,
altered_structure_name, altered_structure_id,
ncbi_taxon_name, ncbi_taxon_id) = row
environment_id = 'ZFIN:' + environment_id.strip()
if self.test_mode and environment_id not in self.test_ids['environment']:
continue
# We can start to build the extrinsic genotype using this file.
# A single environment can have >1 row in the file,
# so we build a hash to store all the components of
# the environment first.
# Then we can build a label containing all the parts.
# Using a strategy similar to what is used for genotypes
# to get the VSLCs and GVCs.
if environment_id not in env_hash:
env_hash[environment_id] = []
# create environmental components, and add to the hash
# cleanup the "condition" to remove non-id-friendly chars
cond_id = zeco_term_id.strip()
cond_id = re.sub(r'\W+', '-', cond_id)
# TODO Matt re model
# description is gone
# condition is gone
# condition_group is gone
# subcond_id = description.strip()
# subcond_id = re.sub(r'\W+', '-', subcond_id)
# env_component_id = '-'.join((condition_group.strip(),
# cond_id.strip()))
# if subcond_id != '':
# env_component_id = '-'.join((env_component_id, subcond_id))
# make them blank nodes
# env_component_id = '_:' + env_component_id
# env_condition = condition.strip()
# env_component_label = condition_group + '[' + condition + ']'
# if description != '':
# env_component_label += ': ' + description
# self.id_label_map[env_component_id] = env_component_label
# env_hash[environment_id] += [env_component_id]
# if environment_id not in enviro_label_hash:
# enviro_label_hash[environment_id] = [env_component_id]
# else:
# enviro_label_hash[environment_id].append(env_component_id)
# add each component to the environment as a part
# envo.addEnvironmentalCondition(
# env_component_id, env_component_label)
if not self.test_mode and limit is not None and line_counter > limit:
break
# End of loop through pheno_env file
csvfile.close()
LOG.info("Building complex environments from components")
self.environment_hash = env_hash
# iterate through the env hash to build the full environment label
for env_id in env_hash:
environment_labels = []
env_hash[env_id].sort()
env_component_list = env_hash[env_id]
for env_comp_id in env_component_list:
env_comp_label = self.id_label_map[env_comp_id]
environment_labels += [env_comp_label]
envo.addComponentToEnvironment(env_id, env_comp_id)
environment_labels.sort()
env_label = 'Environment that includes: ' + '; '.join(environment_labels)
envo.addEnvironment(env_id, env_label)
self.id_label_map[env_id] = env_label
LOG.info("Done with environments")
return | [
"\n The pheno_environment.txt (became pheno_environment_fish.txt?)\n file ties experimental conditions\n to an environment ID.\n An environment ID may have one or more associated conditions.\n Condition groups present:\n * chemical, physical, physiological,\n * salinity, temperature, _Generic-control\n\n First, we build a nice human-readable label\n of all of the components of the environment.\n This is added to our global id-label hash.\n TODO\n Eventually, we will add each component of the environment\n to an environmental object, but needs to be modeled first\n\n Triples created:\n <environment_id> is an Individual\n <environment_id> rdfs:label <environment_label>\n <environment_id> has type GENO:environment\n\n :param limit:\n :return:\n\n "
]
|
Please provide a description of the function:def _process_mappings(self, limit=None):
LOG.info("Processing chromosome mappings")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
line_counter = 0
model = Model(graph)
geno = Genotype(graph)
raw = '/'.join((self.rawdir, self.files['mappings']['file']))
taxon_num = '7955'
taxon_id = 'NCBITaxon:' + taxon_num
taxon_label = 'Danio rerio'
# genome_id = geno.makeGenomeID(taxon_id)
geno.addGenome(taxon_id, taxon_label)
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(zfin_num, symbol, so_id, panel_symbol,
chromosome, location, metric
# , empty
) = row
if self.test_mode and zfin_num \
not in self.test_ids['gene'] + self.test_ids['allele']:
continue
zfin_id = 'ZFIN:' + zfin_num.strip()
if re.match(r'ZDB-GENE.*', zfin_num):
# assume type and label get added elsewhere
model.addClassToGraph(zfin_id, None)
geno.addTaxon(taxon_id, zfin_id)
elif re.match(r'ZDB-ALT.*', zfin_num):
# assume type and label get added elsewhere
model.addIndividualToGraph(zfin_id, None)
geno.addTaxon(taxon_id, zfin_id)
else:
continue
# skip any of the others
# ZFIN don't catalog non-fish things, thankfully
model.makeLeader(zfin_id)
# make the chromosome class
chr_id = makeChromID(chromosome, taxon_id, 'CHR')
# chr_label = makeChromLabel(chromosome, taxon_label)
geno.addChromosomeClass(chromosome, taxon_id, taxon_label)
pinfo = self._get_mapping_panel_info(panel_symbol)
panel_label = ' '.join((panel_symbol, pinfo['type'], 'map'))
if pinfo is not None:
# add the panel as a genome build
panel_id = 'ZFIN:' + pinfo['id']
geno.addReferenceGenome(panel_id, panel_label, taxon_id)
model.addSynonym(panel_id, panel_symbol)
model.addDescription(panel_id, pinfo['name'])
# add the mapping-panel chromosome
chr_inst_id = makeChromID(chromosome, panel_id, 'MONARCH')
geno.addChromosomeInstance(
chromosome, panel_id, panel_label, chr_id)
# add the feature to the mapping-panel chromosome
feat = Feature(graph, zfin_id, None, None)
feat.addSubsequenceOfFeature(chr_inst_id)
# TODO add the coordinates see:
# https://github.com/JervenBolleman/FALDO/issues/24
else:
LOG.error(
"There's a panel (%s) we don't have info for", panel_symbol)
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Done with chromosome mappings")
return | [
"\n This function imports linkage mappings of various entities\n to genetic locations in cM or cR.\n Entities include sequence variants, BAC ends, cDNA, ESTs, genes,\n PAC ends, RAPDs, SNPs, SSLPs, and STSs.\n Status: NEEDS REVIEW\n :param limit:\n :return:\n\n "
]
|
Please provide a description of the function:def _process_uniprot_ids(self, limit=None):
LOG.info("Processing UniProt IDs")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
line_counter = 0
model = Model(graph)
geno = Genotype(graph)
raw = '/'.join((self.rawdir, self.files['uniprot']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(gene_id, gene_so_id, gene_symbol, uniprot_id
# , empty
) = row
if self.test_mode and gene_id not in self.test_ids['gene']:
continue
gene_id = 'ZFIN:' + gene_id.strip()
uniprot_id = 'UniProtKB:' + uniprot_id.strip()
geno.addGene(gene_id, gene_symbol)
# TODO: Abstract to one of the model utilities
model.addIndividualToGraph(
uniprot_id, None, self.globaltt['polypeptide'])
graph.addTriple(
gene_id, self.globaltt['has gene product'], uniprot_id)
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Done with UniProt IDs")
return | [
"\n This method processes the mappings from ZFIN gene IDs to UniProtKB IDs.\n\n Triples created:\n <zfin_gene_id> a class\n <zfin_gene_id> rdfs:label gene_symbol\n\n <uniprot_id> is an Individual\n <uniprot_id> has type <polypeptide>\n\n <zfin_gene_id> has_gene_product <uniprot_id>\n :param limit:\n :return:\n\n "
]
|
Please provide a description of the function:def _process_human_orthos(self, limit=None):
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
LOG.info("Processing human orthos")
line_counter = 0
geno = Genotype(graph)
# model = Model(graph) # unused
raw = '/'.join((self.rawdir, self.files['human_orthos']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(zfin_id, zfin_symbol, zfin_name, human_symbol, human_name,
omim_id, gene_id, hgnc_id, evidence_code, pub_id
# , empty
) = row
if self.test_mode and zfin_id not in self.test_ids['gene']:
continue
# Add the zebrafish gene.
zfin_id = 'ZFIN:' + zfin_id.strip()
geno.addGene(zfin_id, zfin_symbol, None, zfin_name)
# Add the human gene.
gene_id = 'NCBIGene:' + gene_id.strip()
geno.addGene(gene_id, human_symbol, None, human_name)
# make the association
assoc = OrthologyAssoc(graph, self.name, zfin_id, gene_id)
# we don't know anything about the orthology type,
# so we just use the default
if re.match(r'ZDB', pub_id):
assoc.add_source('ZFIN:'+pub_id)
eco_id = self.get_orthology_evidence_code(evidence_code)
if eco_id is not None:
assoc.add_evidence(eco_id)
assoc.add_association_to_graph()
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Done with human orthos")
return | [
"\n This table provides ortholog mappings between zebrafish and humans.\n ZFIN has their own process of creating orthology mappings,\n that we take in addition to other orthology-calling sources\n (like PANTHER). We ignore the omim ids, and only use the gene_id.\n\n Triples created:\n <zfin gene id> a class\n <zfin gene id> rdfs:label gene_symbol\n <zfin gene id> dc:description gene_name\n\n <human gene id> a class\n <human gene id> rdfs:label gene_symbol\n <human gene id> dc:description gene_name\n <human gene id> equivalent class <omim id>\n\n <zfin gene id> orthology association <human gene id>\n :param limit:\n :return:\n\n "
]
|
Please provide a description of the function:def _map_sextuple_to_phenotype(
self, superterm1_id, subterm1_id, quality_id, superterm2_id,
subterm2_id, modifier):
zp_id = None
# zfin uses free-text modifiers,
# but we need to convert them to proper PATO classes for the mapping
mod_id = self.resolve(modifier, False)
if modifier == mod_id:
LOG.warning("no mapping for pato modifier " + modifier)
key = self._make_zpkey(
superterm1_id, subterm1_id, quality_id,
superterm2_id, subterm2_id, mod_id)
mapping = self.zp_map.get(key)
if mapping is None:
if modifier == 'normal':
pass
# LOG.info("Normal phenotypes not yet supported")
else:
LOG.warning(
"Couldn't map ZP id to %s with modifier %s", "_"
.join((
superterm1_id, subterm1_id, quality_id,
superterm2_id, subterm2_id, mod_id)), modifier)
else:
zp_id = mapping['zp_id']
return zp_id | [
"\n This will take the 6-part EQ-style annotation\n used by ZFIN and return the ZP id.\n Currently relies on an external mapping file,\n but the method may be swapped out in the future\n :param superterm1_id:\n :param subterm1_id:\n :param quality_id:\n :param superterm2_id:\n :param subterm2_id:\n :param modifier:\n :return: ZP id\n "
]
|
Please provide a description of the function:def _load_zp_mappings(self, file):
zp_map = {}
LOG.info("Loading ZP-to-EQ mappings")
line_counter = 0
with open(file, 'r', encoding="utf-8") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(zp_id, zp_label, superterm1_id, subterm1_id, quality_id,
modifier, superterm2_id, subterm2_id) = row
key = self._make_zpkey(
superterm1_id, subterm1_id, quality_id,
superterm2_id, subterm2_id, modifier)
zp_map[key] = {
'zp_id': zp_id,
'label': zp_label,
'superterm1_id': superterm1_id,
'subterm1_id': subterm1_id,
'quality_id': quality_id,
'modifier': modifier,
'superterm2_id': superterm2_id,
'subterm2_id': subterm2_id,
}
LOG.info("Loaded %s zp terms", zp_map.__len__())
return zp_map | [
"\n Given a file that defines the mapping between\n ZFIN-specific EQ definitions and the automatically derived ZP ids,\n create a mapping here.\n This may be deprecated in the future\n :return:\n\n "
]
|
Please provide a description of the function:def _get_other_allele_by_zygosity(allele_id, zygosity):
other_allele = None
if zygosity == 'homozygous':
other_allele = allele_id
elif zygosity == 'hemizygous':
other_allele = '0'
elif zygosity == 'unknown': # we'll use this as a convention
other_allele = '?'
elif zygosity == 'complex': # transgenics
other_allele = '0'
elif zygosity == 'heterozygous':
# passing on hets until a different fxn
pass
else:
LOG.warning("Unconfigured zygosity: %s", zygosity)
return other_allele | [
"\n A helper function to switch on the zygosity,\n and return the appropriate allele id, or symbol.\n :param allele_id:\n :param zygosity:\n :return:\n "
]
|
Please provide a description of the function:def _make_variant_locus_id(gene_id, allele_id):
varloci = '-'.join((gene_id, allele_id))
varloci = '_:' + re.sub(r'(ZFIN)?:', '', varloci)
return varloci | [
"\n A convenience method to uniformly create variant loci.\n If we want to materialize these in the monarch space,\n then we wrap with the self.make_id function.\n :param gene_id:\n :param allele_id:\n :return:\n\n "
]
|
Please provide a description of the function:def get_orthology_sources_from_zebrafishmine(self):
# For further documentation you can visit:
# http://www.intermine.org/wiki/PythonClient
# The following two lines will be needed in every python script:
service = Service("http://zebrafishmine.org/service")
# Get a new query on the class (table) you will be querying:
query = service.new_query("Gene")
# The view specifies the output columns
query.add_view(
"primaryIdentifier", "symbol", "homologues.homologue.symbol",
"homologues.evidence.evidenceCode.abbreviation",
"homologues.evidence.publications.primaryIdentifier",
"homologues.evidence.publications.pubMedId",
"homologues.crossReferences.identifier",
# only "orthologue" is used
# "homologues.crossReferences.linkType",
# only needed if >1 source
# "homologues.crossReferences.source.name"
)
# This query's custom sort order is specified below:
query.add_sort_order("Gene.name", "ASC")
# You can edit the constraint values below
query.add_constraint(
"homologues.dataSets.name", "=",
"ZFIN Curated Human, Mouse, Fly, Yeast Orthologue Data Set",
code="A")
query.add_constraint(
"homologues.homologue.organism.name", "=",
"Homo sapiens", code="B")
query.add_constraint(
"homologues.crossReferences.source.name", "=",
"Gene", code="D") # NCBIGene
query.add_constraint("symbol", "=", "*", code="C")
# Uncomment and edit the code below to specify your own custom logic:
# query.set_logic("C and A and B and D and D")
self.files['zmine_ortho_evidence'] = {}
self.files['zmine_ortho_evidence']['file'] = 'zmine_ortho_evidence.txt'
file = '/'.join(
(self.rawdir, self.files['zmine_ortho_evidence']['file']))
with open(file, 'w', encoding="utf-8", newline='\n') as csvfile:
filewriter = csv.writer(csvfile, delimiter='\t', quotechar='\"')
for row in query.rows():
stuff = [
row["primaryIdentifier"],
row["symbol"],
row["homologues.homologue.symbol"],
row["homologues.crossReferences.identifier"],
row["homologues.evidence.evidenceCode.abbreviation"],
row["homologues.evidence.publications.primaryIdentifier"],
row["homologues.evidence.publications.pubMedId"],
# row["homologues.crossReferences.linkType"],
# row["homologues.crossReferences.source.name"]
]
filewriter.writerow(stuff)
return | [
"\n Fetch the zfin gene to other species orthology annotations,\n together with the evidence for the assertion.\n Write the file locally to be read in a separate function.\n :return:\n\n "
]
|
Please provide a description of the function:def get_orthology_evidence_code(self, abbrev):
'''
move to localtt & globltt
'''
# AA Amino acid sequence comparison.
# CE Coincident expression.
# CL Conserved genome location (synteny).
# FC Functional complementation.
# FH Formation of functional heteropolymers.
# IX Immunological cross-reaction.
# NS Not specified.
# NT Nucleotide sequence comparison.
# SI Similar response to inhibitors.
# SL Similar subcellular location.
# SS Similar substrate specificity.
# SU Similar subunit structure.
# XH Cross-hybridization to same molecular probe.
# PT Phylogenetic Tree.
# OT Other
eco_abbrev_map = {
'AA': 'ECO:0000031', # BLAST protein sequence similarity evidence
'CE': 'ECO:0000008', # expression evidence
'CL': 'ECO:0000044', # sequence similarity FIXME
'FC': 'ECO:0000012', # functional complementation
# functional complementation in a heterologous system
'FH': 'ECO:0000064',
'IX': 'ECO:0000040', # immunological assay evidence
'NS': None,
'NT': 'ECO:0000032', # nucleotide blast
'SI': 'ECO:0000094', # biological assay evidence FIXME
'SL': 'ECO:0000122', # protein localization evidence FIXME
'SS': 'ECO:0000024', # protein binding evidence FIXME
'SU': 'ECO:0000027', # structural similarity evidence
'XH': 'ECO:0000002', # direct assay evidence FIXME
'PT': 'ECO:0000080', # phylogenetic evidence
'OT': None,
}
if abbrev not in eco_abbrev_map:
LOG.warning("Evidence code for orthology (%s) not mapped", str(abbrev))
return eco_abbrev_map.get(abbrev) | []
|
Please provide a description of the function:def parse(self, limit=None):
if limit is not None:
LOG.info("Only parsing first %s rows fo each file", str(limit))
LOG.info("Parsing files...")
if self.test_only:
self.test_mode = True
self._process_diseases(limit)
self._process_genes(limit)
self._process_genes_kegg2ncbi(limit)
self._process_omim2gene(limit)
self._process_omim2disease(limit)
self._process_kegg_disease2gene(limit)
self._process_pathways(limit)
self._process_pathway_pubmed(limit)
# self._process_pathway_pathway(limit)
self._process_pathway_disease(limit)
self._process_pathway_ko(limit)
self._process_ortholog_classes(limit)
# TODO add in when refactoring for #141
# for f in ['hsa_orthologs', 'mmu_orthologs', 'rno_orthologs',
# 'dme_orthologs','dre_orthologs','cel_orthologs']:
# file = '/'.join((self.rawdir, self.files[f]['file']))
# self._process_orthologs(file, limit) # DONE #
LOG.info("Finished parsing")
return | [
"\n :param limit:\n :return:\n\n "
]
|
Please provide a description of the function:def _process_pathways(self, limit=None):
LOG.info("Processing pathways")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
path = Pathway(graph)
raw = '/'.join((self.rawdir, self.files['pathway']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(pathway_id, pathway_name) = row
if self.test_mode and pathway_id not in self.test_ids['pathway']:
continue
pathway_id = 'KEGG-'+pathway_id.strip()
path.addPathway(pathway_id, pathway_name)
# we know that the pathway images from kegg map 1:1 here.
# so add those
image_filename = re.sub(r'KEGG-path:', '', pathway_id) + '.png'
image_url = 'http://www.genome.jp/kegg/pathway/map/'+image_filename
model.addDepiction(pathway_id, image_url)
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Done with pathways")
return | [
"\n This method adds the KEGG pathway IDs.\n These are the canonical pathways as defined in KEGG.\n We also encode the graphical depiction\n which maps 1:1 with the identifier.\n\n Triples created:\n <pathway_id> is a GO:signal_transduction\n <pathway_id> rdfs:label <pathway_name>\n <gene_id> RO:involved_in <pathway_id>\n :param limit:\n :return:\n\n "
]
|
Please provide a description of the function:def _process_diseases(self, limit=None):
LOG.info("Processing diseases")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
line_counter = 0
model = Model(graph)
raw = '/'.join((self.rawdir, self.files['disease']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(disease_id, disease_name) = row
disease_id = 'KEGG-'+disease_id.strip()
if disease_id not in self.label_hash:
self.label_hash[disease_id] = disease_name
if self.test_mode and disease_id not in self.test_ids['disease']:
continue
# Add the disease as a class.
# we don't get all of these from MONDO yet see:
# https://github.com/monarch-initiative/human-disease-ontology/issues/3
model.addClassToGraph(disease_id, disease_name)
# not typing the diseases as DOID:4 yet because
# I don't want to bulk up the graph unnecessarily
if not self.test_mode and (
limit is not None and line_counter > limit):
break
LOG.info("Done with diseases")
return | [
"\n This method processes the KEGG disease IDs.\n\n Triples created:\n <disease_id> is a class\n <disease_id> rdfs:label <disease_name>\n :param limit:\n :return:\n\n "
]
|
Please provide a description of the function:def _process_genes(self, limit=None):
LOG.info("Processing genes")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
family = Family(graph)
geno = Genotype(graph)
raw = '/'.join((self.rawdir, self.files['hsa_genes']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(gene_id, gene_name) = row
gene_id = 'KEGG-'+gene_id.strip()
# the gene listing has a bunch of labels
# that are delimited, as:
# DST, BP240, BPA, BPAG1, CATX-15, CATX15, D6S1101, DMH, DT,
# EBSB2, HSAN6, MACF2; dystonin; K10382 dystonin
# it looks like the list is semicolon delimited
# (symbol, name, gene_class)
# where the symbol is a comma-delimited list
# here, we split them up.
# we will take the first abbreviation and make it the symbol
# then take the rest as synonyms
gene_stuff = re.split('r;', gene_name)
symbollist = re.split(r',', gene_stuff[0])
first_symbol = symbollist[0].strip()
if gene_id not in self.label_hash:
self.label_hash[gene_id] = first_symbol
if self.test_mode and gene_id not in self.test_ids['genes']:
continue
# Add the gene as a class.
geno.addGene(gene_id, first_symbol)
# add the long name as the description
if len(gene_stuff) > 1:
description = gene_stuff[1].strip()
model.addDefinition(gene_id, description)
# add the rest of the symbols as synonyms
for i in enumerate(symbollist, start=1):
model.addSynonym(gene_id, i[1].strip())
if len(gene_stuff) > 2:
ko_part = gene_stuff[2]
ko_match = re.search(r'K\d+', ko_part)
if ko_match is not None and len(ko_match.groups()) == 1:
ko = 'KEGG-ko:'+ko_match.group(1)
family.addMemberOf(gene_id, ko)
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Done with genes")
return | [
"\n This method processes the KEGG gene IDs.\n The label for the gene is pulled as\n the first symbol in the list of gene symbols;\n the rest are added as synonyms.\n The long-form of the gene name is added as a definition.\n This is hardcoded to just processes human genes.\n\n Triples created:\n <gene_id> is a SO:gene\n <gene_id> rdfs:label <gene_name>\n\n :param limit:\n :return:\n\n "
]
|
Please provide a description of the function:def _process_ortholog_classes(self, limit=None):
LOG.info("Processing ortholog classes")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
raw = '/'.join((self.rawdir, self.files['ortholog_classes']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(orthology_class_id, orthology_class_name) = row
if self.test_mode and orthology_class_id \
not in self.test_ids['orthology_classes']:
continue
# The orthology class is essentially a KEGG gene ID
# that is species agnostic.
# Add the ID and label as a gene family class
other_labels = re.split(r'[;,]', orthology_class_name)
# the first one is the label we'll use
orthology_label = other_labels[0]
orthology_class_id = 'KEGG-'+orthology_class_id.strip()
orthology_type = self.globaltt['gene_family']
model.addClassToGraph(
orthology_class_id, orthology_label, orthology_type)
if len(other_labels) > 1:
# add the rest as synonyms
# todo skip the first
for s in other_labels:
model.addSynonym(orthology_class_id, s.strip())
# add the last one as the description
d = other_labels[len(other_labels)-1]
model.addDescription(orthology_class_id, d)
# add the enzyme commission number (EC:1.2.99.5)as an xref
# sometimes there's two, like [EC:1.3.5.1 1.3.5.4]
# can also have a dash, like EC:1.10.3.-
ec_matches = re.findall(r'((?:\d+|\.|-){5,7})', d)
if ec_matches is not None:
for ecm in ec_matches:
model.addXref(orthology_class_id, 'EC:' + ecm)
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Done with ortholog classes")
return | [
"\n This method add the KEGG orthology classes to the graph.\n\n If there's an embedded enzyme commission number,\n that is added as an xref.\n\n Triples created:\n <orthology_class_id> is a class\n <orthology_class_id> has label <orthology_symbols>\n <orthology_class_id> has description <orthology_description>\n :param limit:\n\n :return:\n "
]
|
Please provide a description of the function:def _process_orthologs(self, raw, limit=None):
LOG.info("Processing orthologs")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(gene_id, orthology_class_id) = row
orthology_class_id = 'KEGG:'+orthology_class_id.strip()
gene_id = 'KEGG:' + gene_id.strip()
# note that the panther_id references a group of orthologs,
# and is not 1:1 with the rest
# add the KO id as a gene-family grouping class
OrthologyAssoc(
graph, self.name, gene_id, None).add_gene_family_to_graph(
orthology_class_id)
# add gene and orthology class to graph;
# assume labels will be taken care of elsewhere
model.addClassToGraph(gene_id, None)
model.addClassToGraph(orthology_class_id, None)
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Done with orthologs")
return | [
"\n This method maps orthologs for a species to the KEGG orthology classes.\n\n Triples created:\n <gene_id> is a class\n <orthology_class_id> is a class\n\n <assoc_id> has subject <gene_id>\n <assoc_id> has object <orthology_class_id>\n :param limit:\n :return:\n\n "
]
|
Please provide a description of the function:def _process_omim2gene(self, limit=None):
LOG.info("Processing OMIM to KEGG gene")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
geno = Genotype(graph)
raw = '/'.join((self.rawdir, self.files['omim2gene']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(kegg_gene_id, omim_id, link_type) = row
if self.test_mode and kegg_gene_id not in self.test_ids['genes']:
continue
kegg_gene_id = 'KEGG-' + kegg_gene_id.strip()
omim_id = re.sub(r'omim', 'OMIM', omim_id)
if link_type == 'equivalent':
# these are genes!
# so add them as a class then make equivalence
model.addClassToGraph(omim_id, None)
geno.addGene(kegg_gene_id, None)
if not DipperUtil.is_omim_disease(omim_id):
model.addEquivalentClass(kegg_gene_id, omim_id)
elif link_type == 'reverse':
# make an association between an OMIM ID & the KEGG gene ID
# we do this with omim ids because
# they are more atomic than KEGG ids
alt_locus_id = self._make_variant_locus_id(kegg_gene_id, omim_id)
alt_label = self.label_hash[alt_locus_id]
model.addIndividualToGraph(
alt_locus_id, alt_label, self.globaltt['variant_locus'])
geno.addAffectedLocus(alt_locus_id, kegg_gene_id)
model.addBlankNodeAnnotation(alt_locus_id)
# Add the disease to gene relationship.
rel = self.globaltt['is marker for']
assoc = G2PAssoc(graph, self.name, alt_locus_id, omim_id, rel)
assoc.add_association_to_graph()
elif link_type == 'original':
# these are sometimes a gene, and sometimes a disease
LOG.info(
'Unable to handle original link for %s-%s',
kegg_gene_id, omim_id)
else:
# don't know what these are
LOG.warning(
'Unhandled link type for %s-%s: %s',
kegg_gene_id, omim_id, link_type)
if (not self.test_mode) and (
limit is not None and line_counter > limit):
break
LOG.info("Done with OMIM to KEGG gene")
return | [
"\n This method maps the OMIM IDs and KEGG gene ID.\n Currently split based on the link_type field.\n Equivalent link types are mapped as gene XRefs.\n Reverse link types are mapped as disease to gene associations.\n Original link types are currently skipped.\n\n Triples created:\n <kegg_gene_id> is a Gene\n <omim_gene_id> is a Gene\n <kegg_gene_id>> hasXref <omim_gene_id>\n\n <assoc_id> has subject <omim_disease_id>\n <assoc_id> has object <kegg_gene_id>\n :param limit:\n\n :return:\n "
]
|
Please provide a description of the function:def _process_omim2disease(self, limit=None):
LOG.info("Processing 1:1 KEGG disease to OMIM disease mappings")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
line_counter = 0
model = Model(graph)
raw = '/'.join((self.rawdir, self.files['omim2disease']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
(omim_disease_id, kegg_disease_id, link_type) = row
kegg_disease_id = 'KEGG-' + kegg_disease_id.strip()
omim_disease_id = re.sub(r'omim', 'OMIM', omim_disease_id)
# Create hash for the links from OMIM ID -> KEGG ID
if omim_disease_id not in self.omim_disease_hash:
self.omim_disease_hash[omim_disease_id] = [kegg_disease_id]
else:
self.omim_disease_hash[omim_disease_id].append(kegg_disease_id)
# Create hash for the links from KEGG ID -> OMIM ID
if kegg_disease_id not in self.kegg_disease_hash:
self.kegg_disease_hash[kegg_disease_id] = [omim_disease_id]
else:
self.kegg_disease_hash[kegg_disease_id].append(omim_disease_id)
# Now process the disease hashes
# and only pass 1:1 omim disease:KEGG disease entries.
for omim_disease_id in self.omim_disease_hash:
if self.test_mode and omim_disease_id not in self.test_ids['disease']:
continue
if (not self.test_mode) and (limit is not None and line_counter > limit):
break
line_counter += 1
if len(self.omim_disease_hash[omim_disease_id]) == 1:
kegg_disease_id = ''.join(self.omim_disease_hash.get(omim_disease_id))
if len(self.kegg_disease_hash[kegg_disease_id]) == 1:
# add ids, and deal with the labels separately
model.addClassToGraph(kegg_disease_id, None)
model.addClassToGraph(omim_disease_id, None)
# TODO is this safe?
model.addEquivalentClass(kegg_disease_id, omim_disease_id)
else:
pass
# gu.addXref(g, omim_disease_id, kegg_disease_id)
# TODO add xrefs if >1:1 mapping?
LOG.info("Done with KEGG disease to OMIM disease mappings.")
return | [
"\n This method maps the KEGG disease IDs to\n the corresponding OMIM disease IDs.\n Currently this only maps KEGG diseases and OMIM diseases that are 1:1.\n\n Triples created:\n <kegg_disease_id> is a class\n <omim_disease_id> is a class\n <kegg_disease_id> hasXref <omim_disease_id>\n :param limit:\n\n :return:\n\n "
]
|
Please provide a description of the function:def _process_genes_kegg2ncbi(self, limit=None):
LOG.info("Processing KEGG gene IDs to NCBI gene IDs")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
raw = '/'.join((self.rawdir, self.files['ncbi']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(kegg_gene_id, ncbi_gene_id, link_type) = row
if self.test_mode and kegg_gene_id not in self.test_ids['genes']:
continue
# Adjust the NCBI gene ID prefix.
ncbi_gene_id = re.sub(r'ncbi-geneid', 'NCBIGene', ncbi_gene_id)
kegg_gene_id = 'KEGG-' + kegg_gene_id
# Adding the KEGG gene ID to the graph here is redundant,
# unless there happens to be additional gene IDs in this table
# not present in the genes table.
model.addClassToGraph(kegg_gene_id, None)
model.addClassToGraph(ncbi_gene_id, None)
model.addEquivalentClass(kegg_gene_id, ncbi_gene_id)
if not self.test_mode and (
limit is not None and line_counter > limit):
break
LOG.info("Done with KEGG gene IDs to NCBI gene IDs")
return | [
"\n This method maps the KEGG human gene IDs\n to the corresponding NCBI Gene IDs.\n\n Triples created:\n <kegg_gene_id> is a class\n <ncbi_gene_id> is a class\n <kegg_gene_id> equivalentClass <ncbi_gene_id>\n :param limit:\n :return:\n\n "
]
|
Please provide a description of the function:def _process_pathway_pubmed(self, limit):
LOG.info("Processing KEGG pathways to pubmed ids")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
line_counter = 0
raw = '/'.join((self.rawdir, self.files['pathway_pubmed']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(pubmed_id, kegg_pathway_num) = row
if self.test_mode and kegg_pathway_num not in self.test_ids['pathway']:
continue
pubmed_id = pubmed_id.upper()
# will look like KEGG-path:map04130
kegg_id = 'KEGG-' + kegg_pathway_num
r = Reference(graph, pubmed_id, self.globaltt['journal article'])
r.addRefToGraph()
graph.addTriple(pubmed_id, self.globaltt['is_about'], kegg_id)
if not self.test_mode and limit is not None and line_counter > limit:
break
return | [
"\n Indicate that a pathway is annotated directly to a paper (is about)\n via it's pubmed id.\n :param limit:\n :return:\n "
]
|
Please provide a description of the function:def _process_pathway_disease(self, limit):
LOG.info("Processing KEGG pathways to disease ids")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
line_counter = 0
raw = '/'.join((self.rawdir, self.files['pathway_disease']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(disease_id, kegg_pathway_num) = row
if self.test_mode and kegg_pathway_num not in self.test_ids['pathway']:
continue
disease_id = 'KEGG-' + disease_id
# will look like KEGG-path:map04130 or KEGG-path:hsa04130
pathway_id = 'KEGG-' + kegg_pathway_num
graph.addTriple(
pathway_id,
self.globaltt['causally upstream of or within'],
disease_id)
if not self.test_mode and limit is not None and line_counter > limit:
break
return | [
"\n We make a link between the pathway identifiers,\n and any diseases associated with them.\n Since we model diseases as processes, we make a triple saying that\n the pathway may be causally upstream of or within the disease process.\n\n :param limit:\n :return:\n\n "
]
|
Please provide a description of the function:def _process_pathway_pathway(self, limit):
LOG.info("Processing KEGG pathways to other ids")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
line_counter = 0
model = Model(graph)
raw = '/'.join((self.rawdir, self.files['pathway_pathway']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(pathway_id_1, pathway_id_2) = row
if self.test_mode and pathway_id_1 not in self.test_ids['pathway']:
continue
pathway_id_1 = 'KEGG-' + pathway_id_1
# will look like KEGG-path:map04130 or KEGG-path:ko04130
pathway_id_2 = 'KEGG-' + pathway_id_2
if pathway_id_1 != pathway_id_2:
model.addEquivalentClass(pathway_id_1, pathway_id_2)
if not self.test_mode and limit is not None and line_counter > limit:
break
return | [
"\n There are \"map\" and \"ko\" identifiers for pathways.\n This makes equivalence mapping between them, where they exist.\n :param limit:\n :return:\n\n "
]
|
Please provide a description of the function:def _process_pathway_ko(self, limit):
LOG.info("Processing KEGG pathways to kegg ortholog classes")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
line_counter = 0
raw = '/'.join((self.rawdir, self.files['pathway_ko']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(ko_id, pathway_id) = row
if self.test_mode and pathway_id not in self.test_ids['pathway']:
continue
pathway_id = 'KEGG-' + pathway_id
ko_id = 'KEGG-' + ko_id
p = Pathway(graph)
p.addGeneToPathway(ko_id, pathway_id)
if not self.test_mode and limit is not None and line_counter > limit:
break
return | [
"\n This adds the kegg orthologous group (gene) to the canonical pathway.\n :param limit:\n\n :return:\n "
]
|
Please provide a description of the function:def _make_variant_locus_id(self, gene_id, disease_id):
alt_locus_id = '_:'+re.sub(
r':', '', gene_id) + '-' + re.sub(r':', '', disease_id) + 'VL'
alt_label = self.label_hash.get(gene_id)
disease_label = self.label_hash.get(disease_id)
if alt_label is not None and alt_label != '':
alt_label = 'some variant of ' + str(alt_label)
if disease_label is not None and disease_label != '':
alt_label += ' that is associated with ' + str(disease_label)
else:
alt_label = None
self.label_hash[alt_locus_id] = alt_label
return alt_locus_id | [
"\n We actually want the association between the gene and the disease\n to be via an alternate locus not the \"wildtype\" gene itself.\n so we make an anonymous alternate locus,\n and put that in the association\n We also make the label for the anonymous class,\n and add it to the label hash\n\n :param gene_id:\n :param disease_id:\n :return:\n\n "
]
|
Please provide a description of the function:def add_gene_family_to_graph(self, family_id):
family = Family(self.graph)
gene_family = self.globaltt['gene_family']
# make the assumption that the genes
# have already been added as classes previously
self.model.addIndividualToGraph(family_id, None, gene_family)
# add each gene to the family
family.addMember(family_id, self.sub)
family.addMember(family_id, self.obj)
return | [
"\n Make an association between a group of genes and some grouping class.\n We make the assumption that the genes in the association\n are part of the supplied family_id, and that the genes have\n already been declared as classes elsewhere.\n The family_id is added as an individual of type DATA:gene_family.\n\n Triples:\n <family_id> a EDAM-DATA:gene_family\n <family_id> RO:has_member <gene1>\n <family_id> RO:has_member <gene2>\n\n :param family_id:\n :param g: the graph to modify\n :return:\n "
]
|
Please provide a description of the function:def parse(self, limit=None):
if self.test_only:
self.test_mode = True
if self.tax_ids is None:
LOG.info("No taxon filter set; Dumping all orthologous associations.")
else:
LOG.info("Only the following taxa will be dumped: %s", self.tax_ids)
self._get_orthologs(limit)
return | [
"\n :return: None\n "
]
|
Please provide a description of the function:def _get_orthologs(self, limit):
LOG.info("getting orthologs")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
unprocessed_gene_ids = set() # may be faster to make a set after
for k in self.files.keys():
f = '/'.join((self.rawdir, self.files[k]['file']))
matchcounter = 0
mytar = tarfile.open(f, 'r:gz')
# assume that the first entry is the item
fname = mytar.getmembers()[0]
LOG.info("Parsing %s", fname.name)
line_counter = 0
with mytar.extractfile(fname) as csvfile:
for line in csvfile:
# skip comment lines
if re.match(r'^#', line.decode()):
LOG.info("Skipping header line")
continue
line_counter += 1
# a little feedback to the user since there's so many
if line_counter % 1000000 == 0:
LOG.info(
"Processed %d lines from %s",
line_counter, fname.name)
line = line.decode().strip()
# parse each row. ancestor_taxon is unused
# HUMAN|Ensembl=ENSG00000184730|UniProtKB=Q0VD83
# MOUSE|MGI=MGI=2176230|UniProtKB=Q8VBT6
# LDO Euarchontoglires PTHR15964
(a, b, orthology_class, ancestor_taxon,
panther_id) = line.split('\t')
(species_a, gene_a, protein_a) = a.split('|')
(species_b, gene_b, protein_b) = b.split('|')
# skip the entries that don't have homolog relationships
# with the test ids
if self.test_mode and not (
re.sub(r'UniProtKB=', '',
protein_a) in self.test_ids or
re.sub(r'UniProtKB=', '', protein_b)
in self.test_ids):
continue
# map the taxon abbreviations to ncbi taxon id numbers
taxon_a = self.resolve(species_a).split(':')[1].strip()
taxon_b = self.resolve(species_b).split(':')[1].strip()
# ###uncomment the following code block
# if you want to filter based on taxid of favorite animals
# taxids = [9606,10090,10116,7227,7955,6239,8355]
# taxids = [9606] #human only
# retain only those orthologous relationships to genes
# in the specified taxids
# using AND will get you only those associations where
# gene1 AND gene2 are in the taxid list (most-filter)
# using OR will get you any associations where
# gene1 OR gene2 are in the taxid list (some-filter)
if self.tax_ids is not None and \
(taxon_a not in self.tax_ids) and \
(taxon_b not in self.tax_ids):
continue
else:
matchcounter += 1
if limit is not None and matchcounter > limit:
break
# ### end code block for filtering on taxon
# fix the gene identifiers
gene_a = re.sub(r'=', ':', gene_a)
gene_b = re.sub(r'=', ':', gene_b)
clean_gene = self._clean_up_gene_id(
gene_a, species_a, self.curie_map)
if clean_gene is None:
unprocessed_gene_ids.add(gene_a)
gene_a = clean_gene
clean_gene = self._clean_up_gene_id(
gene_b, species_b, self.curie_map)
if clean_gene is None:
unprocessed_gene_ids.add(gene_b)
gene_b = clean_gene
# a special case here; mostly some rat genes
# they use symbols instead of identifiers. will skip
if gene_a is None or gene_b is None:
continue
rel = self.resolve(orthology_class)
evidence_id = self.globaltt['phylogenetic evidence']
# add the association and relevant nodes to graph
assoc = OrthologyAssoc(graph, self.name, gene_a, gene_b, rel)
assoc.add_evidence(evidence_id)
# add genes to graph;
# assume labels will be taken care of elsewhere
model.addClassToGraph(gene_a, None)
model.addClassToGraph(gene_b, None)
# might as well add the taxon info for completeness
graph.addTriple(
gene_a, self.globaltt['in taxon'], 'NCBITaxon:' + taxon_a)
graph.addTriple(
gene_b, self.globaltt['in taxon'], 'NCBITaxon:' + taxon_b)
assoc.add_association_to_graph()
# note this is incomplete...
# it won't construct the full family hierarchy,
# just the top-grouping
assoc.add_gene_family_to_graph(
':'.join(('PANTHER', panther_id)))
if not self.test_mode \
and limit is not None and line_counter > limit:
break
# make report on unprocessed_gene_ids
LOG.info("finished processing %s", f)
LOG.warning(
"The following gene ids were unable to be processed: %s",
str(unprocessed_gene_ids))
return | [
"\n This will process each of the specified pairwise orthology files,\n creating orthology associations based on the specified orthology code.\n this currently assumes that each of the orthology files is identically\n formatted. Relationships are made between genes here.\n\n There is also a nominal amount of identifier re-formatting:\n MGI:MGI --> MGI\n Ensembl --> ENSEMBL\n\n we skip any genes where we don't know how to map the gene identifiers.\n For example, Gene:Huwe1 for RAT is not an identifier, so we skip any\n mappings to this identifier. Often, the there are two entries for the\n same gene (base on equivalent Uniprot id), and so we are not actually\n losing any information.\n\n We presently have a hard-coded filter to select only orthology\n relationships where one of the pair is in our species of interest\n (Mouse and Human, for the moment).\n This will be added as a configurable parameter in the future.\n\n Genes are also added to a grouping class defined with a PANTHER id.\n\n Triples:\n <gene1_id> RO:othologous <gene2_id>\n <assoc_id> :hasSubject <gene1_id>\n <assoc_id> :hasObject <gene2_id>\n <assoc_id> :hasPredicate <RO:orthologous>\n <assoc_id> dc:evidence ECO:phylogenetic_evidence\n\n <panther_id> a DATA:gene_family\n <panther_id> RO:has_member <gene1_id>\n <panther_id> RO:has_member <gene2_id>\n\n :param limit:\n :return:\n\n "
]
|
Please provide a description of the function:def _clean_up_gene_id(geneid, sp, curie_map):
# special case for MGI
geneid = re.sub(r'MGI:MGI:', 'MGI:', geneid)
# rewrite Ensembl --> ENSEMBL
geneid = re.sub(r'Ensembl', 'ENSEMBL', geneid)
# rewrite Gene:CELE --> WormBase
# these are old-school cosmid identifier
geneid = re.sub(r'Gene:CELE', 'WormBase:', geneid)
if sp == 'CAEEL':
if re.match(r'(Gene|ENSEMBLGenome):\w+\.\d+', geneid):
geneid = re.sub(
r'(?:Gene|ENSEMBLGenome):(\w+\.\d+)',
r'WormBase:\1', geneid)
if sp == 'DROME':
if re.match(r'(ENSEMBLGenome):\w+\.\d+', geneid):
geneid = re.sub(
r'(?:ENSEMBLGenome):(\w+\.\d+)', r'FlyBase:\1', geneid)
# rewrite GeneID --> NCBIGene
geneid = re.sub(r'GeneID', 'NCBIGene', geneid)
# rewrite Gene:Dmel --> FlyBase
geneid = re.sub(r'Gene:Dmel_', 'FlyBase:', geneid)
# rewrite Gene:CG --> FlyBase:CG
geneid = re.sub(r'Gene:CG', 'FlyBase:CG', geneid)
# rewrite ENSEMBLGenome:FBgn --> FlyBase:FBgn
geneid = re.sub(r'ENSEMBLGenome:FBgn', 'FlyBase:FBgn', geneid)
# rewrite Gene:<ensembl ids> --> ENSEMBL:<id>
geneid = re.sub(r'Gene:ENS', 'ENSEMBL:ENS', geneid)
# rewrite Gene:<Xenbase ids> --> Xenbase:<id>
geneid = re.sub(r'Gene:Xenbase:', 'Xenbase:', geneid)
# TODO this would be much better done as
# if foo not in selfcurie_map:
# if re.match(r'(Gene|ENSEMBLGenome):', geneid) or \
# re.match(r'Gene_ORFName', geneid) or \
# re.match(r'Gene_Name', geneid):
# # LOG.warning(
# #"Found an identifier I don't know how to fix (species %s): %s",
# # sp, geneid)
pfxlcl = re.split(r':', geneid)
pfx = pfxlcl[0]
if pfx is None or pfx not in curie_map:
# LOG.warning( "No curie prefix for (species %s): %s", sp, geneid)
geneid = None
return geneid | [
"\n A series of identifier rewriting to conform with\n standard gene identifiers.\n :param geneid:\n :param sp:\n :return:\n "
]
|
Please provide a description of the function:def parse(self, limit=None):
if limit is not None:
LOG.info("Only parsing first %d rows", limit)
LOG.info("Parsing files...")
# pub_map = dict()
# file_path = '/'.join((self.rawdir,
# self.static_files['publications']['file']))
# if os.path.exists(file_path) is True:
# pub_map = self._parse_publication_file(
# self.static_files['publications']['file']
# )
if self.test_only:
self.test_mode = True
self.geno = Genotype(self.graph)
self.pathway = Pathway(self.graph)
self._parse_ctd_file(
limit, self.files['chemical_disease_interactions']['file'])
self._parse_ctd_file(limit, self.files['gene_pathway']['file'])
self._parse_ctd_file(limit, self.files['gene_disease']['file'])
self._parse_curated_chem_disease(limit)
LOG.info("Done parsing files.")
return | [
"\n Override Source.parse()\n Parses version and interaction information from CTD\n Args:\n :param limit (int, optional) limit the number of rows processed\n Returns:\n :return None\n "
]
|
Please provide a description of the function:def _parse_ctd_file(self, limit, file):
row_count = 0
version_pattern = re.compile(r'^# Report created: (.+)$')
is_versioned = False
file_path = '/'.join((self.rawdir, file))
with gzip.open(file_path, 'rt') as tsvfile:
reader = csv.reader(tsvfile, delimiter="\t")
for row in reader:
# Scan the header lines until we get the version
# There is no official version sp we are using
# the upload timestamp instead
if is_versioned is False:
match = re.match(version_pattern, ' '.join(row))
if match:
version = re.sub(r'\s|:', '-', match.group(1))
# TODO convert this timestamp to a proper timestamp
self.dataset.setVersion(version)
is_versioned = True
elif re.match(r'^#', ' '.join(row)):
pass
else:
row_count += 1
if file == self.files[
'chemical_disease_interactions']['file']:
self._process_interactions(row)
elif file == self.files['gene_pathway']['file']:
self._process_pathway(row)
elif file == self.files['gene_disease']['file']:
self._process_disease2gene(row)
if not self.test_mode and limit is not None and row_count >= limit:
break
return | [
"\n Parses files in CTD.files dictionary\n Args:\n :param limit (int): limit the number of rows processed\n :param file (str): file name (must be defined in CTD.file)\n Returns:\n :return None\n "
]
|
Please provide a description of the function:def _process_pathway(self, row):
model = Model(self.graph)
self._check_list_len(row, 4)
(gene_symbol, gene_id, pathway_name, pathway_id) = row
if self.test_mode and (int(gene_id) not in self.test_geneids):
return
entrez_id = 'NCBIGene:' + gene_id
pathways_to_scrub = [
'REACT:REACT_116125', # disease
"REACT:REACT_111045", # developmental biology
"REACT:REACT_200794", # Mus musculus biological processes
"REACT:REACT_13685"] # neuronal system ?
if pathway_id in pathways_to_scrub:
# these are lame "pathways" like generic
# "disease" and "developmental biology"
return
# convert KEGG pathway ids... KEGG:12345 --> KEGG-path:map12345
if re.match(r'KEGG', pathway_id):
pathway_id = re.sub(r'KEGG:', 'KEGG-path:map', pathway_id)
# just in case, add it as a class
model.addClassToGraph(entrez_id, None)
self.pathway.addPathway(pathway_id, pathway_name)
self.pathway.addGeneToPathway(entrez_id, pathway_id)
return | [
"\n Process row of CTD data from CTD_genes_pathways.tsv.gz\n and generate triples\n Args:\n :param row (list): row of CTD data\n Returns:\n :return None\n "
]
|
Please provide a description of the function:def _fetch_disambiguating_assoc(self):
disambig_file = '/'.join(
(self.rawdir, self.static_files['publications']['file']))
assoc_file = '/'.join(
(self.rawdir, self.files['chemical_disease_interactions']['file']))
# check if there is a local association file,
# and download if it's dated later than the original intxn file
if os.path.exists(disambig_file):
dfile_dt = os.stat(disambig_file)
afile_dt = os.stat(assoc_file)
if dfile_dt < afile_dt:
LOG.info(
"Local file date before chem-disease assoc file. "
" Downloading...")
else:
LOG.info(
"Local file date after chem-disease assoc file. "
" Skipping download.")
return
all_pubs = set()
dual_evidence = re.compile(r'^marker\/mechanism\|therapeutic$')
# first get all the unique publications
with gzip.open(assoc_file, 'rt') as tsvfile:
reader = csv.reader(tsvfile, delimiter="\t")
for row in reader:
if re.match(r'^#', ' '.join(row)):
continue
self._check_list_len(row, 10)
(chem_name, chem_id, cas_rn, disease_name, disease_id,
direct_evidence, inferred_gene_symbol, inference_score,
omim_ids, pubmed_ids) = row
if direct_evidence == '' or not \
re.match(dual_evidence, direct_evidence):
continue
if pubmed_ids is not None and pubmed_ids != '':
all_pubs.update(set(re.split(r'\|', pubmed_ids)))
sorted_pubs = sorted(list(all_pubs))
# now in batches of 4000, we fetch the chemical-disease associations
batch_size = 4000
params = {
'inputType': 'reference',
'report': 'diseases_curated',
'format': 'tsv',
'action': 'Download'
}
url = 'http://ctdbase.org/tools/batchQuery.go?q'
start = 0
end = min((batch_size, len(all_pubs))) # get them in batches of 4000
with open(disambig_file, 'wb') as dmbf:
while start < len(sorted_pubs):
params['inputTerms'] = '|'.join(sorted_pubs[start:end])
# fetch the data from url
LOG.info(
'fetching %d (%d-%d) refs: %s',
len(re.split(r'\|', params['inputTerms'])),
start, end, params['inputTerms'])
data = urllib.parse.urlencode(params)
encoding = 'utf-8'
binary_data = data.encode(encoding)
req = urllib.request.Request(url, binary_data)
resp = urllib.request.urlopen(req)
dmbf.write(resp.read())
start = end
end = min((start + batch_size, len(sorted_pubs)))
return | [
"\n For any of the items in the chemical-disease association file that have\n ambiguous association types we fetch the disambiguated associations\n using the batch query API, and store these in a file. Elsewhere, we can\n loop through the file and create the appropriate associations.\n\n :return:\n\n "
]
|
Please provide a description of the function:def _process_interactions(self, row):
model = Model(self.graph)
self._check_list_len(row, 10)
(chem_name, chem_id, cas_rn, disease_name, disease_id, direct_evidence,
inferred_gene_symbol, inference_score, omim_ids, pubmed_ids) = row
if direct_evidence == '':
return
evidence_pattern = re.compile(r'^therapeutic|marker\/mechanism$')
# dual_evidence = re.compile(r'^marker\/mechanism\|therapeutic$')
# filter on those diseases that are mapped to omim ids in the test set
intersect = list(
set(['OMIM:' + str(i) for i in omim_ids.split('|')] +
[disease_id]) & set(self.test_diseaseids))
if self.test_mode and len(intersect) < 1:
return
chem_id = 'MESH:' + chem_id
reference_list = self._process_pubmed_ids(pubmed_ids)
if re.match(evidence_pattern, direct_evidence):
rel_id = self.resolve(direct_evidence)
model.addClassToGraph(chem_id, chem_name)
model.addClassToGraph(disease_id, None)
self._make_association(chem_id, disease_id, rel_id, reference_list)
else:
# there's dual evidence, but haven't mapped the pubs
pass
# LOG.debug(
# "Dual evidence for %s (%s) and %s (%s)",
# chem_name, chem_id, disease_name, disease_id)
return | [
"\n Process row of CTD data from CTD_chemicals_diseases.tsv.gz\n and generate triples. Only create associations based on direct evidence\n (not using the inferred-via-gene), and unambiguous relationships.\n (Ambiguous ones will be processed in the sister method using the\n disambiguated file). There are no OMIM ids for diseases in these cases,\n so we associate with only the mesh disease ids.\n Args:\n :param row (list): row of CTD data\n Returns:\n :return None\n "
]
|
Please provide a description of the function:def _process_disease2gene(self, row):
# if self.test_mode:
# graph = self.testgraph
# else:
# graph = self.graph
# self._check_list_len(row, 9)
# geno = Genotype(graph)
# gu = GraphUtils(curie_map.get())
model = Model(self.graph)
(gene_symbol, gene_id, disease_name, disease_id, direct_evidence,
inference_chemical_name, inference_score, omim_ids, pubmed_ids) = row
# we only want the direct associations; skipping inferred for now
if direct_evidence == '' or direct_evidence != 'marker/mechanism':
return
# scrub some of the associations...
# it seems odd to link human genes to the following "diseases"
diseases_to_scrub = [
'MESH:D004283', # dog diseases
'MESH:D004195', # disease models, animal
'MESH:D030342', # genetic diseases, inborn
'MESH:D040181', # genetic dieases, x-linked
'MESH:D020022'] # genetic predisposition to a disease
if disease_id in diseases_to_scrub:
LOG.info(
"Skipping association between NCBIGene:%s and %s",
str(gene_id), disease_id)
return
intersect = list(
set(['OMIM:' + str(i) for i in omim_ids.split('|')] +
[disease_id]) & set(self.test_diseaseids))
if self.test_mode and (
int(gene_id) not in self.test_geneids or len(intersect) < 1):
return
# there are three kinds of direct evidence:
# (marker/mechanism | marker/mechanism|therapeutic | therapeutic)
# we are only using the "marker/mechanism" for now
# TODO what does it mean for a gene to be therapeutic for disease?
# a therapeutic target?
gene_id = 'NCBIGene:' + gene_id
preferred_disease_id = disease_id
if omim_ids is not None and omim_ids != '':
omim_id_list = re.split(r'\|', omim_ids)
# If there is only one OMIM ID for the Disease ID
# or in the omim_ids list,
# use the OMIM ID preferentially over any MeSH ID.
if re.match(r'OMIM:.*', disease_id):
if len(omim_id_list) > 1:
# the disease ID is an OMIM ID and
# there is more than one OMIM entry in omim_ids.
# Currently no entries satisfy this condition
pass
elif disease_id != ('OMIM:' + omim_ids):
# the disease ID is an OMIM ID and
# there is only one non-equiv OMIM entry in omim_ids
# we preferentially use the disease_id here
LOG.warning(
"There may be alternate identifier for %s: %s",
disease_id, omim_ids)
# TODO: What should be done with the alternate disease IDs?
else:
if len(omim_id_list) == 1:
# the disease ID is not an OMIM ID
# and there is only one OMIM entry in omim_ids.
preferred_disease_id = 'OMIM:' + omim_ids
elif len(omim_id_list) > 1:
# This is when the disease ID is not an OMIM ID and
# there is more than one OMIM entry in omim_ids.
pass
model.addClassToGraph(gene_id, None)
# not sure if MESH is getting added separately.
# adding labels here for good measure
dlabel = None
if re.match(r'MESH', preferred_disease_id):
dlabel = disease_name
model.addClassToGraph(preferred_disease_id, dlabel)
# Add the disease to gene relationship.
rel_id = self.resolve(direct_evidence)
refs = self._process_pubmed_ids(pubmed_ids)
self._make_association(gene_id, preferred_disease_id, rel_id, refs)
return | [
"\n Here, we process the disease-to-gene associations.\n Note that we ONLY process direct associations\n (not inferred through chemicals).\n Furthermore, we also ONLY process \"marker/mechanism\" associations.\n\n We preferentially utilize OMIM identifiers over MESH identifiers\n for disease/phenotype.\n Therefore, if a single OMIM id is listed under the \"omim_ids\" list,\n we will choose this over any MeSH id that might be listed as\n the disease_id. If multiple OMIM ids are listed in the omim_ids column,\n we toss this for now.\n (Mostly, we are not sure what to do with this information.)\n\n We also pull in the MeSH labels here (but not OMIM) to ensure that\n we have them (as they may not be brought in separately).\n :param row:\n :return:\n\n "
]
|
Please provide a description of the function:def _make_association(self, subject_id, object_id, rel_id, pubmed_ids):
# TODO pass in the relevant Assoc class rather than relying on G2P
assoc = G2PAssoc(self.graph, self.name, subject_id, object_id, rel_id)
if pubmed_ids is not None and len(pubmed_ids) > 0:
for pmid in pubmed_ids:
ref = Reference(
self.graph, pmid, self.globaltt['journal article'])
ref.addRefToGraph()
assoc.add_source(pmid)
assoc.add_evidence(self.globaltt['traceable author statement'])
assoc.add_association_to_graph()
return | [
"\n Make a reified association given an array of pubmed identifiers.\n\n Args:\n :param subject_id id of the subject of the association (gene/chem)\n :param object_id id of the object of the association (disease)\n :param rel_id relationship id\n :param pubmed_ids an array of pubmed identifiers\n Returns:\n :return None\n\n "
]
|
Please provide a description of the function:def _process_pubmed_ids(pubmed_ids):
if pubmed_ids.strip() == '':
id_list = []
else:
id_list = pubmed_ids.split('|')
for (i, val) in enumerate(id_list):
id_list[i] = 'PMID:' + val
return id_list | [
"\n Take a list of pubmed IDs and add PMID prefix\n Args:\n :param pubmed_ids - string representing publication\n ids seperated by a | symbol\n Returns:\n :return list: Pubmed curies\n\n "
]
|
Please provide a description of the function:def _getnode(self, curie):
if re.match(r'^_:', curie):
if self.are_bnodes_skized is True:
node = self.skolemizeBlankNode(curie)
else:
node = curie
elif re.match(r'^http|^ftp', curie):
node = curie
elif len(curie.split(':')) == 2:
node = StreamedGraph.curie_util.get_uri(curie)
else:
raise TypeError("Cannot process curie {}".format(curie))
return node | [
"\n Returns IRI, or blank node curie/iri depending on\n self.skolemize_blank_node setting\n\n :param curie: str id as curie or iri\n :return:\n "
]
|
Please provide a description of the function:def _getLiteralXSDType(self, literal):
if isinstance(literal, int):
return self._getnode("xsd:integer")
if isinstance(literal, float):
return self._getnode("xsd:double") | [
"\n This could be much more nuanced, but for now\n if a literal is not a str, determine if it's\n a xsd int or double\n :param literal:\n :return: str - xsd full iri\n "
]
|
Please provide a description of the function:def add_assertion(self, assertion, agent, agent_label, date=None):
self.model.addIndividualToGraph(assertion, None, self.globaltt['assertion'])
self.add_agent_to_graph(agent, agent_label, self.globaltt['organization'])
self.graph.addTriple(
assertion, self.globaltt['created_by'], agent)
if date is not None:
self.graph.addTriple(
self.graph, assertion, self.globaltt['date_created'], date)
return | [
"\n Add assertion to graph\n :param assertion:\n :param agent:\n :param evidence_line:\n :param date:\n :return: None\n "
]
|
Please provide a description of the function:def fetch(self, is_dl_forced=False):
(files_to_download, ftp) = self._get_file_list(
self.files['anat_entity']['path'],
self.files['anat_entity']['pattern'])
LOG.info(
'Will Check \n%s\nfrom %s',
'\n'.join(list(files_to_download)), ftp.getwelcome())
for dlname in files_to_download:
localfile = '/'.join((self.rawdir, dlname))
info = ftp.sendcmd("MLST {}".format(dlname)) # fetch remote file stats
info = info.split('\n')[1].strip() # drop pre & post script
info = info.split(';') # partition fields
info = [item.strip() for item in info[:-1]] # cleanup an drop final name
info = [item.split('=') for item in info] # make pairs
info = {item[0]: item[1] for item in info} # transform list to dict
LOG.info(
'%s\n'
'Remote File Size: %i\n'
'Remote timestamp: %s',
dlname, int(info['size']),
self._convert_ftp_time_to_iso(info['modify']))
if not os.path.exists(localfile) or is_dl_forced or \
self.checkIfRemoteIsNewer(
localfile, int(info['size']), info['modify']):
LOG.info("Fetching %s", dlname)
LOG.info("Writing to %s", localfile)
ftp.retrbinary('RETR {}'.format(dlname), open(localfile, 'wb').write)
remote_dt = Bgee._convert_ftp_time_to_iso(info['modify'])
os.utime(
localfile,
(time.mktime(remote_dt.timetuple()),
time.mktime(remote_dt.timetuple())))
ftp.quit()
return | [
"\n :param is_dl_forced: boolean, force download\n :return:\n "
]
|
Please provide a description of the function:def parse(self, limit=None):
files_to_download, ftp = self._get_file_list(
self.files['anat_entity']['path'],
self.files['anat_entity']['pattern'])
for dlname in files_to_download:
localfile = '/'.join((self.rawdir, dlname))
with gzip.open(localfile, 'rt', encoding='ISO-8859-1') as fh:
LOG.info("Processing %s", localfile)
self._parse_gene_anatomy(fh, limit)
return | [
"\n Given the input taxa, expects files in the raw directory\n with the name {tax_id}_anat_entity_all_data_Pan_troglodytes.tsv.zip\n\n :param limit: int Limit to top ranked anatomy associations per group\n :return: None\n "
]
|
Please provide a description of the function:def _parse_gene_anatomy(self, fh, limit):
dataframe = pd.read_csv(fh, sep='\t')
col = self.files['anat_entity']['columns']
if list(dataframe) != col:
LOG.warning(
'\nExpected headers: %s\nRecived headers: %s', col, list(dataframe))
gene_groups = dataframe.sort_values(
'rank score', ascending=False).groupby('Ensembl gene ID')
if limit is None:
limit = 20
gene_groups = gene_groups.head(limit).groupby('Ensembl gene ID')
for gene, group in gene_groups:
for index, row in group.iterrows():
self._add_gene_anatomy_association(
row['Ensembl gene ID'].strip(),
row['anatomical entity ID'].strip(),
row['rank score']
)
# uberon <==> bto equivelance?
return | [
"\n Process anat_entity files with columns:\n Ensembl gene ID,gene name, anatomical entity ID,\n anatomical entity name, rank score, XRefs to BTO\n\n :param fh: filehandle\n :param limit: int, limit per group\n :return: None\n "
]
|
Please provide a description of the function:def _add_gene_anatomy_association(self, gene_id, anatomy_curie, rank):
g2a_association = Assoc(self.graph, self.name)
model = Model(self.graph)
gene_curie = "ENSEMBL:{}".format(gene_id)
rank = re.sub(r',', '', str(rank)) # ? can't do RE on a float ...
model.addIndividualToGraph(gene_curie, None)
g2a_association.sub = gene_curie
g2a_association.obj = anatomy_curie
g2a_association.rel = self.globaltt['expressed in']
g2a_association.add_association_to_graph()
g2a_association.add_predicate_object(
self.globaltt['has_quantifier'], float(rank), 'Literal', 'xsd:float')
return | [
"\n :param gene_id: str Non curified ID\n :param gene_label: str Gene symbol\n :param anatomy_curie: str curified anatomy term\n :param rank: str rank\n :return: None\n "
]
|
Please provide a description of the function:def checkIfRemoteIsNewer(self, localfile, remote_size, remote_modify):
is_remote_newer = False
status = os.stat(localfile)
LOG.info(
"\nLocal file size: %i"
"\nLocal Timestamp: %s",
status[ST_SIZE], datetime.fromtimestamp(status.st_mtime))
remote_dt = Bgee._convert_ftp_time_to_iso(remote_modify)
if remote_dt != datetime.fromtimestamp(status.st_mtime) or \
status[ST_SIZE] != int(remote_size):
is_remote_newer = True
LOG.info(
"Object on server is has different size %i and/or date %s",
remote_size, remote_dt)
return is_remote_newer | [
"\n Overrides checkIfRemoteIsNewer in Source class\n\n :param localfile: str file path\n :param remote_size: str bytes\n :param remote_modify: str last modify date in the form 20160705042714\n :return: boolean True if remote file is newer else False\n "
]
|
Please provide a description of the function:def _convert_ftp_time_to_iso(ftp_time):
date_time = datetime(
int(ftp_time[:4]), int(ftp_time[4:6]), int(ftp_time[6:8]),
int(ftp_time[8:10]), int(ftp_time[10:12]), int(ftp_time[12:14]))
return date_time | [
"\n Convert datetime in the format 20160705042714 to a datetime object\n\n :return: datetime object\n "
]
|
Please provide a description of the function:def _get_file_list(self, working_dir, file_regex=re.compile(r'.*'), ftp=None):
if ftp is None:
ftp = ftplib.FTP(BGEE_FTP)
ftp.login("anonymous", "[email protected]")
working_dir = "{}{}".format(self.version, working_dir)
LOG.info('Looking for remote files in %s', working_dir)
ftp.cwd(working_dir)
remote_files = ftp.nlst()
# LOG.info('All remote files \n%s', '\n'.join(remote_files))
files_to_download = [
dnload for dnload in remote_files if re.match(file_regex, dnload) and
re.findall(r'^\d+', dnload)[0] in self.tax_ids]
# LOG.info('Choosing remote files \n%s', '\n'.join(list(files_to_download)))
return files_to_download, ftp | [
"\n Get file list from ftp server filtered by taxon\n :return: Tuple of (Generator object with Tuple(\n file name, info object), ftp object)\n "
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.