Code
stringlengths
103
85.9k
Summary
listlengths
0
94
Please provide a description of the function:def fetch_from_pgdb(self, tables, cxn, limit=None, force=False): con = None try: con = psycopg2.connect( host=cxn['host'], database=cxn['database'], port=cxn['port'], user=cxn['user'], password=cxn['password']) cur = con.cursor() for tab in tables: LOG.info("Fetching data from table %s", tab) self._getcols(cur, tab) query = ' '.join(("SELECT * FROM", tab)) countquery = ' '.join(("SELECT COUNT(*) FROM", tab)) if limit is not None: query = ' '.join((query, "LIMIT", str(limit))) countquery = ' '.join((countquery, "LIMIT", str(limit))) outfile = '/'.join((self.rawdir, tab)) filerowcount = -1 tablerowcount = -1 if not force: # check local copy. assume that if the # rows are the same, # that the table is the same # TODO may want to fix this assumption if os.path.exists(outfile): # get rows in the file filerowcount = self.file_len(outfile) LOG.info( "(%s) rows in local file for table %s", filerowcount, tab) # get rows in the table # tablerowcount=cur.rowcount cur.execute(countquery) tablerowcount = cur.fetchone()[0] # rowcount-1 because there's a header if force or filerowcount < 0 or (filerowcount-1) != tablerowcount: if force: LOG.info("Forcing download of %s", tab) else: LOG.info( "%s local (%d) different from remote (%d); fetching.", tab, filerowcount, tablerowcount) # download the file LOG.info("COMMAND:%s", query) outputquery = .format(query) with open(outfile, 'w') as f: cur.copy_expert(outputquery, f) else: LOG.info("local data same as remote; reusing.") finally: if con: con.close() return
[ "\n Will fetch all Postgres tables from the specified database\n in the cxn connection parameters.\n This will save them to a local file named the same as the table,\n in tab-delimited format, including a header.\n :param tables: Names of tables to fetch\n :param cxn: database connection details\n :param limit: A max row count to fetch for each table\n :return: None\n ", "\n COPY ({0}) TO STDOUT WITH DELIMITER AS '\\t' CSV HEADER" ]
Please provide a description of the function:def fetch_query_from_pgdb(self, qname, query, con, cxn, limit=None, force=False): if con is None and cxn is None: LOG.error("ERROR: you need to supply connection information") return if con is None and cxn is not None: con = psycopg2.connect( host=cxn['host'], database=cxn['database'], port=cxn['port'], user=cxn['user'], password=cxn['password']) outfile = '/'.join((self.rawdir, qname)) cur = con.cursor() # wrap the query to get the count countquery = ' '.join(("SELECT COUNT(*) FROM (", query, ") x")) if limit is not None: countquery = ' '.join((countquery, "LIMIT", str(limit))) # check local copy. # assume that if the # rows are the same, that the table is the same # TEC - opinion: # the only thing to assume is that if the counts are different # is the data could not be the same. # # i.e: for MGI, the dbinfo table has a single row that changes # to check if they are the same sort & compare digests. ( filerowcount = -1 tablerowcount = -1 if not force: if os.path.exists(outfile): # get rows in the file filerowcount = self.file_len(outfile) LOG.info("INFO: rows in local file: %s", filerowcount) # get rows in the table # tablerowcount=cur.rowcount cur.execute(countquery) tablerowcount = cur.fetchone()[0] # rowcount-1 because there's a header if force or filerowcount < 0 or (filerowcount-1) != tablerowcount: if force: LOG.info("Forcing download of %s", qname) else: LOG.info( "%s local (%s) different from remote (%s); fetching.", qname, filerowcount, tablerowcount) # download the file LOG.debug("COMMAND:%s", query) outputquery = .format(query) with open(outfile, 'w') as f: cur.copy_expert(outputquery, f) # Regenerate row count to check integrity filerowcount = self.file_len(outfile) if (filerowcount-1) < tablerowcount: raise Exception( "Download from %s failed, %s != %s", cxn['host'] + ':' + cxn['database'], (filerowcount-1), tablerowcount) elif (filerowcount-1) > tablerowcount: LOG.warning( "Fetched from %s more rows in file (%s) than reported in count(%s)", cxn['host'] + ':'+cxn['database'], (filerowcount-1), tablerowcount) else: LOG.info("local data same as remote; reusing.") return
[ "\n Supply either an already established connection, or connection parameters.\n The supplied connection will override any separate cxn parameter\n :param qname: The name of the query to save the output to\n :param query: The SQL query itself\n :param con: The already-established connection\n :param cxn: The postgres connection information\n :param limit: If you only want a subset of rows from the query\n :return:\n ", "\n COPY ({0}) TO STDOUT WITH DELIMITER AS '\\t' CSV HEADER" ]
Please provide a description of the function:def _getcols(cur, table): query = ' '.join(("SELECT * FROM", table, "LIMIT 0")) # for testing cur.execute(query) colnames = [desc[0] for desc in cur.description] LOG.info("COLS (%s): %s", table, colnames) return
[ "\n Will execute a pg query to get the column names for the given table.\n :param cur:\n :param table:\n :return:\n " ]
Please provide a description of the function:def fetch(self, is_dl_forced=False): '''connection details for DISCO''' cxn = {} cxn['host'] = 'nif-db.crbs.ucsd.edu' cxn['database'] = 'disco_crawler' cxn['port'] = '5432' cxn['user'] = config.get_config()['user']['disco'] cxn['password'] = config.get_config()['keys'][cxn['user']] self.dataset.setFileAccessUrl( 'jdbc:postgresql://'+cxn['host']+':'+cxn['port']+'/'+cxn['database'], is_object_literal=True) # process the tables # self.fetch_from_pgdb(self.tables,cxn,100) #for testing self.fetch_from_pgdb(self.tables, cxn) self.get_files(is_dl_forced) # FIXME: Everything needed for data provenance? fstat = os.stat('/'.join((self.rawdir, 'dvp.pr_nlx_157874_1'))) filedate = datetime.utcfromtimestamp(fstat[ST_CTIME]).strftime("%Y-%m-%d") self.dataset.setVersion(filedate) return
[]
Please provide a description of the function:def parse(self, limit=None): ''' Over ride Source.parse inherited via PostgreSQLSource ''' if limit is not None: LOG.info("Only parsing first %s rows of each file", limit) if self.test_only: self.test_mode = True LOG.info("Parsing files...") self._process_nlx_157874_1_view( '/'.join((self.rawdir, 'dvp.pr_nlx_157874_1')), limit) self._map_eom_terms( '/'.join((self.rawdir, self.files['map']['file'])), limit) LOG.info("Finished parsing.") # since it's so small, # we default to copying the entire graph to the test set self.testgraph = self.graph return
[]
Please provide a description of the function:def _process_nlx_157874_1_view(self, raw, limit=None): model = Model(self.graph) with open(raw, 'r') as f1: f1.readline() # read the header row; skip reader = csv.reader(f1, delimiter='\t', quotechar='\"') for line in reader: (morphology_term_id, morphology_term_num, morphology_term_label, morphology_term_url, terminology_category_label, terminology_category_url, subcategory, objective_definition, subjective_definition, comments, synonyms, replaces, small_figure_url, large_figure_url, e_uid, v_uid, v_uuid, v_last_modified, v_status, v_lastmodified_epoch) = line # note: # e_uid v_uuid v_last_modified terminology_category_url # subcategory v_uid morphology_term_num # terminology_category_label hp_label notes # are currently unused. # Add morphology term to graph as a class # with label, type, and description. model.addClassToGraph(morphology_term_id, morphology_term_label) # Assemble the description text if subjective_definition != '' and not ( re.match(r'.+\.$', subjective_definition)): # add a trailing period. subjective_definition = subjective_definition.strip() + '.' if objective_definition != '' and not ( re.match(r'.+\.$', objective_definition)): # add a trailing period. objective_definition = objective_definition.strip() + '.' definition = ' '.join( (objective_definition, subjective_definition)).strip() model.addDefinition(morphology_term_id, definition) # <term id> FOAF:depicted_by literal url # <url> type foaf:depiction # do we want both images? # morphology_term_id has depiction small_figure_url if small_figure_url != '': model.addDepiction(morphology_term_id, small_figure_url) # morphology_term_id has depiction large_figure_url if large_figure_url != '': model.addDepiction(morphology_term_id, large_figure_url) # morphology_term_id has comment comments if comments != '': model.addComment(morphology_term_id, comments.strip()) for syn in synonyms.split(';'): model.addSynonym( morphology_term_id, syn.strip(), self.globaltt['has_exact_synonym']) # morphology_term_id has_related_synonym replaces (; delimited) if replaces != '' and replaces != synonyms: for syn in replaces.split(';'): model.addSynonym( morphology_term_id, syn.strip(), self.globaltt['has_related_synonym']) # <morphology_term_id> <foaf:page> morphology_term_url if morphology_term_id is not None and morphology_term_url is not None: reference = Reference( self.graph, morphology_term_url, self.globaltt['web page']) reference.addPage(morphology_term_id, morphology_term_url) if limit is not None and reader.line_num > limit: break return
[ "\n This table contains the Elements of Morphology data that has been\n screen-scraped into DISCO.\n Note that foaf:depiction is inverse of foaf:depicts relationship.\n\n Since it is bad form to have two definitions,\n we concatenate the two into one string.\n\n Turtle:\n <eom id> a owl:Class\n rdf:label Literal(eom label)\n oboInOwl:has_related_synonym Literal(synonym list)\n IAO:definition Literal(objective_def. subjective def)\n foaf:depiction Literal(small_image_url),\n Literal(large_image_url)\n foaf:page Literal(page_url)\n rdfs:comment Literal(long commented text)\n\n\n :param raw:\n :param limit:\n :return:\n " ]
Please provide a description of the function:def _map_eom_terms(self, raw, limit=None): model = Model(self.graph) line_counter = 0 with open(raw, 'r') as f1: f1.readline() # read the header row; skip for line in f1: line_counter += 1 row = line.split('\t') ( morphology_term_id, morphology_term_label, hp_id, hp_label, notes) = row # Sub out the underscores for colons. hp_id = re.sub('_', ':', hp_id) if re.match(".*HP:.*", hp_id): # add the HP term as a class model.addClassToGraph(hp_id, None) # Add the HP ID as an equivalent class model.addEquivalentClass(morphology_term_id, hp_id) else: LOG.warning('No matching HP term for %s', morphology_term_label) if limit is not None and line_counter > limit: break return
[ "\n This table contains the HP ID mappings from the local tsv file.\n Triples:\n <eom id> owl:equivalentClass <hp id>\n :param raw:\n :param limit:\n :return:\n " ]
Please provide a description of the function:def get_symbol_id_map(self): symbol_id_map = {} f = '/'.join((self.rawdir, self.files['genes']['file'])) with open(f, 'r', encoding="utf8") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: (hgnc_id, symbol, name, locus_group, locus_type, status, location, location_sortable, alias_symbol, alias_name, prev_symbol, prev_name, gene_family, gene_family_id, date_approved_reserved, date_symbol_changed, date_name_changed, date_modified, entrez_id, ensembl_gene_id, vega_id, ucsc_id, ena, refseq_accession, ccds_id, uniprot_ids, pubmed_id, mgd_id, rgd_id, lsdb, cosmic, omim_id, mirbase, homeodb, snornabase, bioparadigms_slc, orphanet, pseudogene_org, horde_id, merops, imgt, iuphar, kznf_gene_catalog, mamit_trnadb, cd, lncrnadb, enzyme_id, intermediate_filament_db) = row symbol_id_map[symbol.strip()] = hgnc_id return symbol_id_map
[ "\n A convenience method to create a mapping between the HGNC\n symbols and their identifiers.\n :return:\n\n " ]
Please provide a description of the function:def fetch(self, is_dl_forced=False): # check if config exists; if it doesn't, error out and let user know if 'dbauth' not in config.get_config() and 'mgi' \ not in config.get_config()['dbauth']: LOG.error("not configured with PG user/password.") # create the connection details for MGI cxn = config.get_config()['dbauth']['mgi'] self.dataset.setFileAccessUrl(''.join(( 'jdbc:postgresql://', cxn['host'], ':', str(cxn['port']), '/', cxn['database'])), is_object_literal=True) # process the tables # self.fetch_from_pgdb(self.tables, cxn, 100) # for testing only # self.fetch_from_pgdb(self.tables, cxn, None, is_dl_forced) for query_map in self.resources['query_map']: query_fh = open(os.path.join( os.path.dirname(__file__), query_map['query']), 'r') query = query_fh.read() force = False if 'Force' in query_map: force = query_map['Force'] self.fetch_query_from_pgdb( query_map['outfile'], query, None, cxn, force=force) # always get this - it has the verion info self.fetch_transgene_genes_from_db(cxn) datestamp = ver = None # get the resource version information from # table mgi_dbinfo, already fetched above outfile = '/'.join((self.rawdir, 'mgi_dbinfo')) if os.path.exists(outfile): with open(outfile, 'r') as f: f.readline() # read the header row; skip info = f.readline() cols = info.split('\t') ver = cols[0] # col 0 is public_version ver = ver.replace('MGI ', '') # MGI 5.20 --> 5.20 # MGI has a datestamp for the data within the database; # use it instead of the download date # datestamp in the table: 2014-12-23 00:14:20[.12345] # modification date without micro seconds dat = cols[1].strip().split('.')[0] datestamp = datetime.strptime( dat, "%Y-%m-%d %H:%M:%S").strftime("%Y-%m-%d") f.close() self.dataset.setVersion(datestamp, ver) return
[ "\n For the MGI resource, we connect to the remote database,\n and pull the tables into local files.\n We'll check the local table versions against the remote version\n :return:\n " ]
Please provide a description of the function:def parse(self, limit=None): if limit is not None: LOG.info("Only parsing first %d rows of each file", limit) LOG.info("Parsing files...") if self.test_only: self.test_mode = True # the following will provide us the hash-lookups # These must be processed in a specific order self._process_prb_strain_acc_view(limit) self._process_mrk_acc_view() self._process_all_summary_view(limit) self._process_bib_acc_view(limit) self._process_gxd_genotype_summary_view(limit) # The following will use the hash populated above # to lookup the ids when filling in the graph self._process_prb_strain_view(limit) # self._process_prb_strain_genotype_view(limit) self._process_gxd_genotype_view(limit) self._process_mrk_marker_view(limit) self._process_mrk_acc_view_for_equiv(limit) self._process_mrk_summary_view(limit) self._process_all_allele_view(limit) self._process_all_allele_mutation_view(limit) self._process_gxd_allele_pair_view(limit) self._process_voc_annot_view(limit) self._process_evidence_view(limit) self._process_mgi_note_vocevidence_view(limit) self._process_mrk_location_cache(limit) self.process_mgi_relationship_transgene_genes(limit) self.process_mgi_note_allele_view(limit) LOG.info("Finished parsing.") LOG.info("Loaded %d nodes", len(self.graph)) return
[ "\n We process each of the postgres tables in turn.\n The order of processing is important here, as we build\n up a hashmap of internal vs external identifers\n (unique keys by type to MGI id). These include allele, marker (gene),\n publication, strain, genotype, annotation (association),\n and descriptive notes.\n :param limit: Only parse this many lines of each table\n :return:\n\n " ]
Please provide a description of the function:def _process_gxd_genotype_view(self, limit=None): line_counter = 0 if self.test_mode: graph = self.testgraph else: graph = self.graph geno = Genotype(graph) model = Model(graph) raw = '/'.join((self.rawdir, 'gxd_genotype_view')) LOG.info("getting genotypes and their backgrounds") with open(raw, 'r') as f1: f1.readline() # read the header row; skip for line in f1: line = line.rstrip("\n") line_counter += 1 (genotype_key, strain_key, strain, mgiid) = line.split('\t') if self.test_mode is True: if int(genotype_key) not in self.test_keys.get('genotype'): continue if self.idhash['genotype'].get(genotype_key) is None: # just in case we haven't seen it before, # catch and add the id mapping here self.idhash['genotype'][genotype_key] = mgiid geno.addGenotype(mgiid, None) # the label is elsewhere... # need to add the MGI label as a synonym # if it's in the hash, # assume that the individual was created elsewhere strain_id = self.idhash['strain'].get(strain_key) background_type = self.globaltt['genomic_background'] if strain_id is None or int(strain_key) < 0: if strain_id is None: # some of the strains don't have public identifiers! # so we make one up, and add it to the hash strain_id = self._makeInternalIdentifier('strain', strain_key) self.idhash['strain'].update({strain_key: strain_id}) model.addComment(strain_id, "strain_key:" + strain_key) elif int(strain_key) < 0: # these are ones that are unidentified/unknown. # so add instances of each. strain_id = self._makeInternalIdentifier( 'strain', re.sub(r':', '', str(strain_id))) strain_id += re.sub(r':', '', str(mgiid)) strain_id = re.sub(r'^_', '_:', strain_id) strain_id = re.sub(r'::', ':', strain_id) model.addDescription( strain_id, "This genomic background is unknown. " + "This is a placeholder background for " + mgiid + ".") background_type = self.globaltt[ 'unspecified_genomic_background'] # add it back to the idhash LOG.info( "adding background as internal id: %s %s: %s", strain_key, strain, strain_id) geno.addGenomicBackgroundToGenotype( strain_id, mgiid, background_type) self.label_hash[strain_id] = strain # add BG to a hash so we can build the genotype label later self.geno_bkgd[mgiid] = strain_id if not self.test_mode and limit is not None and line_counter > limit: break return
[ "\n This table indicates the relationship between a genotype\n and it's background strain. It leverages the Genotype class methods\n to do this.\n\n Makes these triples:\n <MGI:genotypeid> GENO:has_reference_part <MGI:strainid>\n <MGI:strainid> a GENO:genomic_background\n\n If the genotype id isn't in the hashmap, it adds it here\n (but this shouldn't happen):\n <MGI:genotypeid> a GENO:genotype\n\n If the strain isn't in the hashmap, it also adds it here with a\n monarchized identifier using the unique key of the strain,\n formatted like: :_mgistrainkey12345\n\n :param limit:\n :return:\n " ]
Please provide a description of the function:def _process_gxd_genotype_summary_view(self, limit=None): if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 geno_hash = {} raw = '/'.join((self.rawdir, 'gxd_genotype_summary_view')) LOG.info("building labels for genotypes") with open(raw, 'r') as f: f.readline() # read the header row; skip for line in f: line = line.rstrip("\n") line_counter += 1 (object_key, preferred, mgiid, subtype, short_description) = line.split('\t') if self.test_mode is True: if int(object_key) not in self.test_keys.get('genotype'): continue # add the internal genotype to mgi mapping self.idhash['genotype'][object_key] = mgiid if preferred == '1': d = re.sub(r'\,', '/', short_description.strip()) if mgiid not in geno_hash: geno_hash[mgiid] = {'vslcs': [d], 'subtype': subtype, 'key': object_key} else: vslcs = geno_hash[mgiid].get('vslcs') vslcs.append(d) else: pass # TODO what to do with != preferred if not self.test_mode and limit is not None and line_counter > limit: break # now, loop through the hash and add the genotypes as individuals # we add the mgi genotype as a synonym # (we generate our own label later) geno = Genotype(graph) for gt in geno_hash: genotype = geno_hash.get(gt) gvc = sorted(genotype.get('vslcs')) label = '; '.join(gvc) + ' [' + genotype.get('subtype') + ']' geno.addGenotype(gt, None) model.addComment(gt, self._makeInternalIdentifier( 'genotype', genotype.get('key'))) model.addSynonym(gt, label.strip()) return
[ "\n Add the genotype internal id to mgiid mapping to the idhashmap.\n Also, add them as individuals to the graph.\n We re-format the label to put the background strain in brackets\n after the gvc.\n\n We must pass through the file once to get the ids and\n aggregate the vslcs into a hashmap into the genotype\n\n Triples created:\n <genotype id> a GENO:intrinsic_genotype\n <genotype id> rdfs:label \"<gvc> [bkgd]\"\n\n :param limit:\n :return:\n " ]
Please provide a description of the function:def _process_all_summary_view(self, limit): if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 raw = '/'.join((self.rawdir, 'all_summary_view')) LOG.info( "alleles with labels and descriptions from all_summary_view") with open(raw, 'r') as f: col_count = f.readline().count('\t') # read the header row; skip # head -1 workspace/build-mgi-ttl/dipper/raw/mgi/all_summary_view|\ # tr '\t' '\n' | grep -n . | \ # awk -F':' '{col=$1;$1="";print $0,",\t #" col}' for line in f: line = line.rstrip("\n") line_counter += 1 cols = line.count('\t') # bail if the row is malformed if cols != col_count: LOG.warning('Expected ' + str(col_count) + ' columns.') LOG.warning('Received ' + str(cols) + ' columns.') LOG.warning(line.format()) continue # no stray tab in the description column (object_key, preferred, mgiid, description, short_description) = line.split('\t') # NOTE: May want to filter alleles based on the preferred field # (preferred = 1) or will get duplicates # (24288, to be exact... # Reduced to 480 if filtered on preferred = 1) if self.test_mode is True: if int(object_key) not in self.test_keys.get('allele'): continue # we are setting the allele type to None, # so that we can add the type later # since we don't actually know # if it's a reference or altered allele altype = None # temporary; we'll assign the type later # If we want to filter on preferred: if preferred == '1': # add the allele key to the hash for later lookup self.idhash['allele'][object_key] = mgiid # TODO consider not adding the individuals in this one model.addIndividualToGraph( mgiid, short_description.strip(), altype, description.strip()) self.label_hash[mgiid] = short_description.strip() # TODO deal with non-preferreds, are these deprecated? if not self.test_mode and limit is not None and line_counter > limit: break return
[ "\n Here, we get the allele definitions: id, label, description, type\n We also add the id to this source's global idhash for lookup later\n\n <alleleid> a OWL:NamedIndividual\n rdf:label \"allele symbol\"\n dc:description \"long allele name\"\n\n :param limit:\n :return:\n\n " ]
Please provide a description of the function:def _process_all_allele_view(self, limit): # transmission_key -> inheritance? Need to locate related table. if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) geno = Genotype(graph) line_counter = 0 LOG.info( "adding alleles, mapping to markers, " + "extracting their sequence alterations " + "from all_allele_view") raw = '/'.join((self.rawdir, 'all_allele_view')) with open(raw, 'r') as f: col_count = f.readline().count('\t') # read the header row; skip for line in f: line = line.rstrip("\n") line_counter += 1 cols = line.count('\t') # bail if the row is malformed if cols != col_count: LOG.warning('Expected ' + str(col_count) + ' columns.') LOG.warning('Received ' + str(cols) + ' columns.') LOG.warning(line.format()) continue (allele_key, marker_key, strain_key, symbol, name, iswildtype) = line.split('\t') # TODO update processing to use this view better # including jnums! if self.test_mode is True: if int(allele_key) not in self.test_keys.get('allele'): continue allele_id = self.idhash['allele'].get(allele_key) if allele_id is None: LOG.error( "what to do! can't find allele_id. skipping %s %s", allele_key, symbol) continue marker_id = None if marker_key is not None and marker_key != '': # we make the assumption here that the markers # have already been added to the table marker_id = self.idhash['marker'].get(marker_key) if marker_id is None: LOG.error( "what to do! can't find marker_id. skipping %s %s", marker_key, symbol) continue iseqalt_id = self._makeInternalIdentifier('seqalt', allele_key) # for non-wild type alleles: if iswildtype == '0': locus_type = self.globaltt['variant_locus'] locus_rel = self.globaltt['is_allele_of'] # for wild type alleles: elif iswildtype == '1': locus_type = self.globaltt['reference_locus'] locus_rel = self.globaltt['is_reference_allele_of'] # add the allele to the wildtype set for lookup later self.wildtype_alleles.add(allele_id) else: locus_rel = None locus_type = None model.addIndividualToGraph(allele_id, symbol, locus_type) model.makeLeader(allele_id) self.label_hash[allele_id] = symbol self.idhash['seqalt'][allele_key] = iseqalt_id # HACK - if the label of the allele == marker, # then make the thing a seq alt allele_label = self.label_hash.get(allele_id) marker_label = self.label_hash.get(marker_id) if allele_label is not None and allele_label == marker_label: model.addSameIndividual(allele_id, marker_id) self.idhash['seqalt'][allele_key] = allele_id model.addComment( allele_id, self._makeInternalIdentifier('allele', allele_key)) elif marker_id is not None: # marker_id will be none if the allele # is not linked to a marker # (as in, it's not mapped to a locus) geno.addAlleleOfGene(allele_id, marker_id, locus_rel) # sequence alteration in strain if iswildtype == '0': sa_label = symbol sa_id = iseqalt_id if marker_key is not None \ and allele_label != marker_label and marker_key != '': # sequence alteration has label reformatted(symbol) if re.match(r".*<.*>.*", symbol): sa_label = re.sub(r".*<", "<", symbol) elif re.match(r"\+", symbol): # TODO: Check to see if this is the proper handling # as while symbol is just +, # marker symbol has entries without any <+>. sa_label = '<+>' geno.addSequenceAlterationToVariantLocus(iseqalt_id, allele_id) else: # make the sequence alteration == allele sa_id = allele_id # else this will end up adding the non-located transgenes # as sequence alterations also removing the < and > from sa sa_label = re.sub(r'[\<\>]', '', sa_label) # gu.addIndividualToGraph(graph,sa_id,sa_label,None,name) geno.addSequenceAlteration(sa_id, sa_label, None, name) self.label_hash[sa_id] = sa_label strain_id = self.idhash['strain'].get(strain_key) # scrub out if the strain is "not specified" if strain_id is not None and \ strain_id not in ['MGI:4867032', 'MGI:5649511']: geno.addSequenceDerivesFrom(allele_id, strain_id) if not self.test_mode and limit is not None and line_counter > limit: break return
[ "\n Add the allele as a variant locus (or reference locus if wild-type).\n If the marker is specified, we add the link to the marker.\n We assume that the MGI ids are available in the idhash,\n added in all_summary_view.\n We add the sequence alteration as a BNode here, if there is a marker.\n Otherwise, the allele itself is a sequence alteration.\n\n Triples:\n <MGI:allele_id> a GENO:variant_locus\n OR GENO:reference_locus\n OR GENO:sequence_alteration IF no marker_id specified.\n\n [GENO:has_variant_part OR GENO:has_reference_part] <MGI:marker_id>\n GENO:derived_from <MGI:strain_id>\n GENO:has_variant_part <_seq_alt_id>\n <_seq_alt_id> a GENO:sequence_alteration\n derives_from <strain_id>\n\n :param limit:\n :return:\n\n " ]
Please provide a description of the function:def _process_all_allele_mutation_view(self, limit): if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 raw = '/'.join((self.rawdir, 'all_allele_mutation_view')) LOG.info("getting mutation types for sequence alterations") with open(raw, 'r') as f: f.readline() # read the header row; skip for line in f: line = line.rstrip("\n") line_counter += 1 (allele_key, mutation) = line.split('\t') iseqalt_id = self.idhash['seqalt'].get(allele_key) if iseqalt_id is None: iseqalt_id = self._makeInternalIdentifier('seqalt', allele_key) if self.test_mode and int(allele_key) \ not in self.test_keys.get('allele'): continue # TODO we might need to map the seq alteration to the MGI id # for unlocated things; need to use hashmap # map the sequence_alteration_type seq_alt_type_id = self.resolve(mutation, False) if seq_alt_type_id == mutation: LOG.error("No mappjng found for seq alt '%s'", mutation) LOG.info("Defaulting to 'sequence_alteration'") seq_alt_type_id = self.globaltt['sequence_alteration'] # HACK - if the seq alteration is a transgene, # then make sure it is a transgenic insertion allele_id = self.idhash['allele'].get(allele_key) if allele_id is not None: allele_label = self.label_hash.get(allele_id) if allele_label is not None and re.search(r'Tg\(', allele_label): LOG.info( "Found a transgenic insertion for %s", allele_label) # transgenic_insertion, instead of plain old insertion seq_alt_type_id = self.globaltt["transgenic_insertion"] model.addIndividualToGraph(iseqalt_id, None, seq_alt_type_id) if not self.test_mode and limit is not None and line_counter > limit: break return
[ "\n This fetches the mutation type for the alleles,\n and maps them to the sequence alteration.\n Note that we create a BNode for the sequence alteration because\n it isn't publicly identified.\n <sequence alteration id> a <SO:mutation_type>\n\n :param limit:\n :return:\n\n " ]
Please provide a description of the function:def _process_voc_annot_view(self, limit): # TODO also get Strain/Attributes (annottypekey = 1000) # TODO what is Phenotype (Derived) vs # non-derived? (annottypekey = 1015) # TODO is evidence in this table? what is the evidence vocab key? if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 LOG.info("getting G2P associations") raw = '/'.join((self.rawdir, 'voc_annot_view')) col = [ 'annot_key', 'annot_type', 'object_key', 'term_key', 'qualifier_key', 'qualifier', 'term', 'accid'] with open(raw, 'r') as f: header = f.readline() # read the header row; skip if header != col: LOG.error("\nExpected header: %s\nReceived header: %s", col, header) for line in f: row = line.rstrip('\n').split('\t') annot_key = row[col.index('annot_key')] annot_type = row[col.index('annot_type')] object_key = row[col.index('object_key')] term_key = row[col.index('term_key')] qualifier_key = row[col.index('qualifier_key')] # qualifier, # term, accid = row[col.index('accid')] if self.test_mode is True: if int(annot_key) not in self.test_keys.get('annot'): continue # iassoc_id = self._makeInternalIdentifier('annot', annot_key) # assoc_id = self.make_id(iassoc_id) assoc_id = None # Mammalian Phenotype/Genotype are curated G2P assoc if annot_type == 'Mammalian Phenotype/Genotype': line_counter += 1 # We expect the label for the phenotype # to be taken care of elsewhere model.addClassToGraph(accid, None) genotype_id = self.idhash['genotype'].get(object_key) if genotype_id is None: LOG.error( "can't find genotype id for %s", object_key) else: # add the association assoc = G2PAssoc(graph, self.name, genotype_id, accid) assoc.add_association_to_graph() assoc_id = assoc.get_association_id() # OMIM/Genotype are disease-models elif annot_type == 'DO/Genotype': # skip NOT annotations for now FIXME if qualifier_key == '1614157': continue genotype_id = self.idhash['genotype'].get(object_key) if genotype_id is None: LOG.error("can't find genotype id for %s", object_key) else: # add the association assoc = Assoc(graph, self.name) # TODO PYLINT # Redefinition of assoc type from # dipper.models.assoc.G2PAssoc.G2PAssoc to # dipper.models.assoc.Association.Assoc assoc.set_subject(genotype_id) assoc.set_object(accid) assoc.set_relationship(self.globaltt['is model of']) assoc.add_association_to_graph() assoc_id = assoc.get_association_id() elif annot_type == 'MCV/Marker': # marker category == type marker_id = self.idhash['marker'].get(object_key) if str(term_key).strip() in self.localtt: term_id = self.resolve(str(term_key).strip()) else: term_id = None logging.warning('No type mapping for: %s', term_key) # note that the accid here is an internal mouse cv term, # and we don't use it. if term_id is not None and marker_id is not None: # do something special for transgenics - # make sure these are transgenic insertions model.addType(marker_id, term_id) elif annot_type == 'DO/Allele': # allele/Disease allele_id = self.idhash['allele'].get(object_key) if allele_id is None: LOG.error("can't find genotype id for %s", object_key) else: # add the association assoc = Assoc(graph, self.name) assoc.set_subject(allele_id) assoc.set_object(accid) assoc.set_relationship(self.globaltt['is model of']) assoc.add_association_to_graph() assoc_id = assoc.get_association_id() if assoc_id is not None: # add the assoc to the hashmap (using the monarch id) self.idhash['annot'][annot_key] = assoc_id model.addComment(assoc_id, "annot_key:" + annot_key) if not self.test_mode and limit is not None and line_counter > limit: break return
[ "\n This MGI table represents associations between things.\n\n We add the internal annotation id to the idhashmap.\n It is expected that the genotypes have already been added to the idhash\n\n :param limit:\n :return:\n\n " ]
Please provide a description of the function:def _process_evidence_view(self, limit): if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 LOG.info("getting evidence and pubs for annotations") raw = '/'.join((self.rawdir, 'evidence_view')) col = [ 'annot_evidence_key', 'annot_key', 'evidence_code', 'jnumid', 'qualifier', 'qualifier_value', 'annotation_type'] with open(raw, 'r') as reader: reader.readline() # read the header row; skip for line in reader: line = line.rstrip("\n") line_counter += 1 row = line.split('\t') annot_evidence_key = row[col.index('annot_evidence_key')] annot_key = int(row[col.index('annot_key')]) evidence_code = row[col.index('evidence_code')] jnumid = row[col.index('jnumid')] qualifier = row[col.index('qualifier')] qualifier_value = row[col.index('qualifier_value')] # annotation_type = row[col.index('annotation_type')] if self.test_mode and annot_key not in self.test_keys.get('annot'): continue # add the association id to map to the evidence key # (to attach the right note to the right assn) self.idhash['notes'][annot_evidence_key] = annot_key assoc_id = self.idhash['annot'].get(annot_key) if assoc_id is None: # assume that we only want to add the evidence/source # for annots that we have in our db continue evidence_id = self.resolve(evidence_code) reference = Reference(graph, jnumid) reference.addRefToGraph() # add the ECO and citation information to the annot model.addTriple(assoc_id, self.globaltt['has evidence'], evidence_id) model.addTriple(assoc_id, self.globaltt['source'], jnumid) # For Mammalian Phenotype/Genotype annotation types # MGI adds sex specificity qualifiers here if qualifier == 'MP-Sex-Specificity' and qualifier_value in ('M', 'F'): model._addSexSpecificity(assoc_id, self.resolve(qualifier_value)) if not self.test_mode and limit is not None and line_counter > limit: break return
[ "\n Here we fetch the evidence (code and publication) for the associations.\n The evidence codes are mapped from the standard GO codes to ECO.\n J numbers are added for publications.\n We will only add the evidence if the annotation is in our idhash.\n\n We also pull in evidence qualifiers, as of June 2018 they are\n Data Interpretation Center (eg IMPC)\n external ref (eg UniProtKB:Q9JHI2-3 for Proteoform/Marker assoc)\n Phenotyping Center (eg WTSI)\n Resource Name (eg MGP)\n MP-Sex-Specificity (eg NA, M, F)\n\n Triples:\n <annot_id> dc:evidence <evidence_id>\n <pub_id> a owl:NamedIndividual\n <annot_id> dc:source <pub_id>\n\n :param limit:\n :return:\n\n " ]
Please provide a description of the function:def _process_bib_acc_view(self, limit): if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) # firstpass, get the J number mapping, and add to the global hash LOG.info('populating pub id hash') raw = '/'.join((self.rawdir, 'bib_acc_view')) col = [ 'accid', 'prefixpart', 'numericpart', 'object_key', 'logical_db', 'logicaldb_key'] with open(raw, 'r', encoding="utf8") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') header = next(filereader) if header != col: LOG.error('bib_acc_view expected:\n%s\n\tBut got:\n%s', col, header) for row in filereader: accid = row[col.index('accid')] prefixpart = row[col.index('prefixpart')] # 'numericpart' object_key = int(row[col.index('object_key')]) # logical_db = row[col.index('logical_db')] # logicaldb_key = row[col.index('logicaldb_key')] if self.test_mode and object_key not in self.test_keys.get('pub'): continue # we use the J number here because # it is the externally-accessible identifier if prefixpart != 'J:': continue self.idhash['publication'][object_key] = accid reference = Reference(graph, accid) reference.addRefToGraph() if not self.test_mode and limit is not None and \ filereader.line_num > limit: break # 2nd pass, look up the MGI identifier in the hash LOG.info("getting pub equivalent ids") with open(raw, 'r', encoding="utf8") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') header = next(filereader) for row in filereader: accid = row[col.index('accid')] prefixpart = row[col.index('prefixpart')] # 'numericpart' object_key = int(row[col.index('object_key')]) logical_db = row[col.index('logical_db')] logicaldb_key = row[col.index('logicaldb_key')] if self.test_mode is True: if int(object_key) not in self.test_keys.get('pub'): continue logical_db = logical_db.strip() jid = self.idhash['publication'].get(object_key) pub_id = None if logicaldb_key == '29': # pubmed pub_id = 'PMID:' + accid elif logicaldb_key == '1' and re.match(r'MGI:', prefixpart): # don't get the J numbers, # because we dont' need to make the equiv to itself. pub_id = accid elif logical_db == 'Journal Link': # some DOIs seem to have spaces # FIXME MGI needs to FIX THESE UPSTREAM!!!! # we'll scrub them here for the time being accid = re.sub(r'\s+', '', accid) # some DOIs have un-urlencoded brackets <> accid = re.sub(r'<', '%3C', accid) accid = re.sub(r'>', '%3E', accid) pub_id = 'DOI:' + accid elif logicaldb_key == '1' and re.match(r'J:', prefixpart): # we can skip the J numbers continue if pub_id is not None: # only add these to the graph if # it's mapped to something we understand reference = Reference(graph, pub_id) # make the assumption that if it is a PMID, it is a journal if re.match(r'PMID', pub_id): reference.setType(self.globaltt['journal article']) model.makeLeader(pub_id) reference.addRefToGraph() model.addSameIndividual(jid, pub_id) else: LOG.warning( "Publication from (%s) not mapped for %s", logical_db, object_key) if not self.test_mode and limit is not None and \ filereader.line_num > limit: break return
[ "\n This traverses the table twice:\n once to look up the internal key to J number mapping\n for the id hashmap then again to make the equivalences.\n All internal keys have both a J and MGI identifier.\n This will make equivalences between the different pub ids\n Triples:\n <pub_id> a owl:NamedIndividual\n <other_pub_id> a owl:NamedIndividual\n <pub_id> owl:sameAs <other_pub_id>\n :param limit:\n :return:\n\n " ]
Please provide a description of the function:def _process_prb_strain_view(self, limit): # Only 9 strain types if we want to map them # recombinant congenci, inbred strain, NA, # congenic, consomic, coisogenic, # recombinant inbred, NS, conplastic if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 geno = Genotype(graph) raw = '/'.join((self.rawdir, 'prb_strain_view')) LOG.info("getting strains and adding their taxa") with open(raw, 'r', encoding="utf8") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for line in filereader: line_counter += 1 if line_counter == 1: continue (strain_key, strain, species) = line if self.test_mode is True: if int(strain_key) not in self.test_keys.get('strain'): continue strain_id = self.idhash['strain'].get(strain_key) if strain_id is not None: self.label_hash[strain_id] = strain # add the species to the graph as a class species = species.strip() sp = self.resolve(species, False) if sp == species: LOG.error("No taxon mapping for " + species) LOG.warning("defaulting to Mus Genus") sp = self.globaltt['Mus'] model.addClassToGraph(sp, None) geno.addTaxon(sp, strain_id) model.addIndividualToGraph(strain_id, strain, sp) if not self.test_mode and limit is not None and line_counter > limit: break return
[ "\n Process a table to get strains (with internal ids), and their labels.\n These strains are created as instances of the species that they are.\n Triples:\n <strain id> a GENO:intrinsic_genotype\n rdf:label \"strain label\"\n RO:in_taxon <NCBI taxon id>\n\n :param limit:\n :return:\n\n " ]
Please provide a description of the function:def _process_mrk_marker_view(self, limit): if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) geno = Genotype(graph) line_counter = 0 raw = '/'.join((self.rawdir, 'mrk_marker_view')) LOG.info("getting markers and assigning types") with open(raw, 'r') as f: f.readline() # read the header row; skip for line in f: line = line.rstrip("\n") line_counter += 1 (marker_key, organism_key, marker_status_key, symbol, name, latin_name, marker_type) = line.split('\t') if self.test_mode is True: if int(marker_key) not in self.test_keys.get('marker'): continue # use only non-withdrawn markers if marker_status_key != '2': marker_id = self.idhash['marker'].get(marker_key) # only pull info for mouse genes for now # other species should come from other dbs if organism_key != '1': continue if marker_id is None: LOG.error( "can't find %s %s in the id hash", marker_key, symbol) mapped_marker_type = self.resolve(marker_type.strip()) # if it's unlocated, or is not a gene, # then don't add it as a class because # it's not added as a gene. # everything except for genes are modeled as individuals if mapped_marker_type in [ self.globaltt['gene'], self.globaltt['pseudogene']]: model.addClassToGraph( marker_id, symbol, mapped_marker_type, name) model.addSynonym( marker_id, name, self.globaltt['has_exact_synonym']) self.markers['classes'].append(marker_id) else: model.addIndividualToGraph( marker_id, symbol, mapped_marker_type, name) model.addSynonym( marker_id, name, self.globaltt['has_exact_synonym']) self.markers['indiv'].append(marker_id) self.label_hash[marker_id] = symbol # add the taxon taxon_id = self.resolve(latin_name) # not always proper binomial geno.addTaxon(taxon_id, marker_id) # make MGI the leader for mouse genes. if taxon_id == self.globaltt['Mus musculus']: model.makeLeader(marker_id) if not self.test_mode and limit is not None and line_counter > limit: break return
[ "\n This is the definition of markers\n (as in genes, but other genomic loci types as well).\n It looks up the identifiers in the hashmap\n This includes their labels, specific class, and identifiers\n TODO should we use the mrk_mouse_view instead?\n\n Triples:\n <marker_id> a owl:Class OR owl:NamedIndividual\n GENO:marker_type\n rdf:label <symbol>\n RO:in_taxon <NCBITaxon_id>\n\n :param limit:\n :return:\n " ]
Please provide a description of the function:def _process_mrk_summary_view(self, limit): if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) LOG.info("getting markers and equivalent ids from mrk_summary_view") line_counter = 0 raw = '/'.join((self.rawdir, 'mrk_summary_view')) with open(raw, 'r') as fh: fh.readline() # read the header row; skip for line in fh: line = line.rstrip("\n") line_counter += 1 (accid, logicaldb_key, object_key, preferred, mgiid, subtype, short_description) = line.split('\t') if self.test_mode is True: if int(object_key) not in self.test_keys.get('marker'): continue if preferred == '1': if self.idhash['marker'].get(object_key) is None: # can't find the marker in the hash; add it here: self.idhash['marker'][object_key] = mgiid LOG.error( "this marker hasn't been seen before %s %s", mgiid, short_description) if accid == mgiid: # don't need to make equivalences to itself continue mapped_id = None if logicaldb_key == '60': mapped_id = 'ENSEMBL:' + accid elif logicaldb_key == '1': # don't need to add the equivalence to itself. continue elif logicaldb_key == '55': mapped_id = 'NCBIGene:' + accid if mapped_id is not None: if mgiid in self.markers['classes'] \ or subtype in ['Gene', 'Pseudogene']: model.addClassToGraph(mapped_id, None) model.addEquivalentClass(mgiid, mapped_id) elif mgiid in self.markers['indiv']: model.addIndividualToGraph(mapped_id, None) model.addSameIndividual(mgiid, mapped_id) # could parse the "subtype" string # to get the kind of thing the marker is if not self.test_mode and limit is not None and line_counter > limit: break return
[ "\n Here we pull the mgiid of the features, and make equivalent (or sameAs)\n associations to referenced ids.\n Only adding the ENSEMBL genes and NCBI gene ids.\n Will wait on other ids later.\n\n :param limit:\n :return:\n\n " ]
Please provide a description of the function:def _process_mrk_acc_view(self): # make a pass through the table first, # to create the mapping between the external and internal identifiers line_counter = 0 LOG.info("mapping markers to internal identifiers") raw = '/'.join((self.rawdir, 'mrk_acc_view')) col = [ 'accid', 'prefix_part', 'logicaldb_key', 'object_key', 'preferred', 'organism_key'] with open(raw, 'r') as fh: fh.readline() # read the header row; skip for line in fh: line = line.rstrip('\n') line_counter += 1 row = line.split('\t') accid = row[col.index('accid')] prefix_part = row[col.index('prefix_part')] logicaldb_key = row[col.index('logicaldb_key')] object_key = row[col.index('object_key')] preferred = row[col.index('preferred')] # organism_key) if self.test_mode is True: if int(object_key) not in self.test_keys.get('marker'): continue # get the hashmap of the identifiers if logicaldb_key == '1' and prefix_part == 'MGI:' and preferred == '1': self.idhash['marker'][object_key] = accid return
[ "\n Use this table to create the idmap between the internal marker id and\n the public mgiid.\n No triples are produced in this process\n :return:\n\n " ]
Please provide a description of the function:def _process_mrk_acc_view_for_equiv(self, limit): if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) # pass through the file again, # and make the equivalence statements to a subset of the idspaces. # TODO verify the difference between what the # mrk_acc_view vs mrk_summary_view buys us here. # if nothing, then we should remove one or the other. LOG.info("mapping marker equivalent identifiers in mrk_acc_view") line_counter = 0 with open('/'.join((self.rawdir, 'mrk_acc_view')), 'r') as f: f.readline() # read the header row; skip for line in f: line = line.rstrip("\n") line_counter += 1 (accid, prefix_part, logicaldb_key, object_key, preferred, organism_key) = line.split('\t') if self.test_mode is True: if int(object_key) not in self.test_keys.get('marker'): continue # right now not caring about other organisms if organism_key != 1: continue mgiid = self.idhash['marker'].get(object_key) if mgiid is None: # presumably we've already added the relevant MGI ids, # so skip those that we can't find LOG.debug("can't find mgiid for %s", object_key) continue marker_id = None if preferred == '1': # TODO what does it mean if it's 0? if logicaldb_key == '55': # entrez/ncbi marker_id = 'NCBIGene:' + accid elif logicaldb_key == '1' and prefix_part != 'MGI:': marker_id = accid elif logicaldb_key == '60': marker_id = 'ENSEMBL:'+accid # TODO get non-preferred ids==deprecated? if marker_id is not None: if mgiid in self.markers['classes']: model.addClassToGraph(marker_id, None) model.addEquivalentClass(mgiid, marker_id) elif mgiid in self.markers['indiv']: model.addIndividualToGraph(marker_id, None) model.addSameIndividual(mgiid, marker_id) else: LOG.error("mgiid not in class or indiv hash %s", mgiid) if not self.test_mode and limit is not None and line_counter > limit: break return
[ "\n Add the equivalences, either sameAs or equivalentClass,\n depending on the nature of the marker.\n We only process the ENSEMBL genes and NCBI gene ids.\n :param limit:\n :return:\n\n " ]
Please provide a description of the function:def _process_prb_strain_acc_view(self, limit): # make a pass through the table first, # to create the mapping between the external and internal identifiers line_counter = 0 if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) LOG.info("mapping strains to internal identifiers") raw = '/'.join((self.rawdir, 'prb_strain_acc_view')) tax_id = self.globaltt["Mus musculus"] with open(raw, 'r') as fh: fh.readline() # read the header row; skip for line in fh: line = line.rstrip("\n") line_counter += 1 (accid, prefixpart, logicaldb_key, object_key, preferred) \ = line.split('\t') # scrub out the backticks from accids # TODO notify the source upstream accid = re.sub(r'`', '', accid).strip() if self.test_mode is True: if int(object_key) not in self.test_keys.get('strain'): continue # get the hashmap of the identifiers if logicaldb_key == '1' and prefixpart == 'MGI:' and preferred == '1': self.idhash['strain'][object_key] = accid model.addIndividualToGraph(accid, None, tax_id) # The following are the stock centers for the strains # (asterisk indicates complete) # *1 MGI Mouse Genome Informatics # *22 JAX Registry (null) # *37 EMMA European Mutant Mouse Archive # *38 MMRRC Mutant Mouse Regional Resource Center # 39 Harwell Mammalian Genome Unit Stock List # *40 ORNL Oak Ridge National Lab mutant resource # *54 NCIMR NCI Mouse Repository # *56 NMICE Neuromice.org, a consortium of three NIH-sponsored # mutagenesis projects designed to search for # neurological mutations # 57 CARD Center for Animal Resources and Development @ Kumamoto U # *70 RIKEN BRC RIKEN BioResource Center # *71 CMMR Canadian Mouse Mutant Resource # 84 JPGA The Center for New Mouse Models of # Heart, Lung, BLood and Sleep Disorders, # JAX-PGA at The Jackson Laboratory # *87 MUGEN Network of Excellence in Integrated Functional Genomics # in Mutant Mouse Models as Tools to Investigate the # Complexity of Human Immunological Disease # *90 APB Australian Phenomics Bank # ? 91 EMS Elizabeth M. Simpson # ? 93 NIG National Institute of Genetics, # Mammalian Genetics Laboratory, Japan # 94 TAC Taconic # 154 OBS Oriental BioService , Inc. # 161 RMRC-NLAC National Applied Research Laboratories,Taiwan, R.O.C. # pass through the file again, # and make the equivalence statements to a subset of the idspaces LOG.info("mapping strain equivalent identifiers") line_counter = 0 with open(raw, 'r') as fh: fh.readline() # read the header row; skip for line in fh: line = line.rstrip("\n") line_counter += 1 (accid, prefixpart, logicaldb_key, object_key, preferred) \ = line.split('\t') # scrub out the backticks from accids # TODO notify the source upstream accid = re.sub(r'`', '', accid).strip() if self.test_mode is True: if int(object_key) not in self.test_keys.get('strain'): continue mgiid = self.idhash['strain'].get(object_key) if mgiid is None: # presumably we've already added the relevant MGI ids, # so skip those that we can't find # LOG.info("can't find mgiid for %s",object_key) continue strain_id = None deprecated = False comment = None if preferred == '1': # what does it mean if it's 0? if logicaldb_key == '22': # JAX # scrub out the backticks from accids # TODO notify the source upstream accid = re.sub(r'`', '', accid).strip() strain_id = 'JAX:' + accid elif logicaldb_key == '38': # MMRRC strain_id = accid if not re.match(r'MMRRC:', strain_id): strain_id = 'MMRRC:'+strain_id elif logicaldb_key == '37': # EMMA strain_id = re.sub(r'EM:', 'EMMA:', accid) elif logicaldb_key == '90': # APB strain_id = 'APB:' + accid # Check elif logicaldb_key == '40': # ORNL # ORNL is not in existence any more. # these are deprecated, and we will prefix with JAX strain_id = 'JAX:' + accid comment = "Originally from ORNL." deprecated = True # add these as synonyms of the MGI mouse model.addSynonym(mgiid, accid) elif logicaldb_key == '54': # NCIMR strain_id = 'NCIMR:'+accid # CMMR not great - doesn't resolve well # elif logicaldb_key == '71': # strain_id = 'CMMR:'+accid elif logicaldb_key == '56': # neuromice # neuromice.org doesn't exist any more. # but all these are actually MGI ids strain_id = accid elif logicaldb_key == '70': # RIKEN # like # http://www2.brc.riken.jp/lab/animal/detail.php?brc_no=RBRC00160 strain_id = 'RBRC:' + accid elif logicaldb_key == '87': strain_id = 'MUGEN:' + accid # I can't figure out how to get to some of the strains # TODO get non-preferred ids==deprecated? # TODO make these strains, rather than instance of taxon? if strain_id is not None: model.addIndividualToGraph(strain_id, None, tax_id) if deprecated: model.addDeprecatedIndividual(strain_id, [mgiid]) model.addSynonym(mgiid, accid) else: model.addSameIndividual(mgiid, strain_id) if re.match(r'MMRRC', strain_id): model.makeLeader(strain_id) if comment is not None: model.addComment(strain_id, comment) if not self.test_mode and limit is not None and line_counter > limit: break return
[ "\n Use this table to create the idmap between\n the internal marker id and the public mgiid.\n Also, add the equivalence statements between strains for MGI and JAX\n Triples:\n <strain_id> a GENO:intrinsic_genotype\n <other_strain_id> a GENO:intrinsic_genotype\n <strain_id> owl:sameAs <other_strain_id>\n\n :param limit:\n :return:\n\n " ]
Please provide a description of the function:def _process_mgi_note_vocevidence_view(self, limit): line_counter = 0 if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) LOG.info("getting free text descriptions for annotations") raw = '/'.join((self.rawdir, 'mgi_note_vocevidence_view')) with open(raw, 'r', encoding="utf8") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for line in filereader: line_counter += 1 if line_counter == 1: continue (object_key, note) = line if self.test_mode is True: if int(object_key) not in self.test_keys.get('notes'): continue # object_key == evidence._annotevidence_key annotkey = self.idhash['notes'].get(object_key) annot_id = self.idhash['annot'].get(annotkey) # only add the description for the annotations # we have captured through processing if annot_id is not None: model.addDescription(annot_id, note.strip()) if not self.test_mode and limit is not None and line_counter > limit: break return
[ "\n Here we fetch the free text descriptions of the phenotype associations.\n Triples:\n <annot_id> dc:description \"description text\"\n :param limit:\n :return:\n\n " ]
Please provide a description of the function:def process_mgi_relationship_transgene_genes(self, limit=None): if self.test_mode: graph = self.testgraph else: graph = self.graph LOG.info("getting transgene genes") raw = '/'.join((self.rawdir, 'mgi_relationship_transgene_genes')) geno = Genotype(graph) col = [ 'rel_key', 'allele_key', 'allele_id', 'allele_label', 'category_key', 'category_name', 'property_key', 'property_name', 'gene_num' ] with open(raw, 'r', encoding="utf8") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') header = next(filereader) if header != col: LOG.error('expected columns: %s\n\tBut got:\n%s', col, header) for row in filereader: # rel_key, allele_key = int(row[col.index('allele_key')]) allele_id = row[col.index('allele_id')] # allele_label, # category_key, # category_name, # property_key, # property_name, gene_num = int(row[col.index('gene_num')]) if self.test_mode and allele_key not in self.test_keys.get('allele')\ and gene_num not in self.test_ids: continue gene_id = 'NCBIGene:' + str(gene_num) # geno.addParts(gene_id, allele_id, self.globaltt['has_variant_part']) seqalt_id = self.idhash['seqalt'].get(allele_key) if seqalt_id is None: seqalt_id = allele_id geno.addSequenceDerivesFrom(seqalt_id, gene_id) if not self.test_mode and limit is not None and \ filereader.line_num > limit: break return
[ "\n Here, we have the relationship between MGI transgene alleles,\n and the non-mouse gene ids that are part of them.\n We augment the allele with the transgene parts.\n\n :param limit:\n :return:\n\n " ]
Please provide a description of the function:def process_mgi_note_allele_view(self, limit=None): line_counter = 0 if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) LOG.info("Assembling notes on alleles") raw = '/'.join((self.rawdir, 'mgi_note_allele_view')) notehash = {} with open(raw, 'r', encoding="utf8") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for line in filereader: line_counter += 1 if line_counter == 1: continue (object_key, notetype, note, sequencenum) = line # read all the notes into a hash to concatenate if object_key not in notehash: notehash[object_key] = {} if notetype not in notehash[object_key]: notehash[object_key][notetype] = [] if len(notehash[object_key][notetype]) < int(sequencenum): for i in range( len(notehash[object_key][notetype]), int(sequencenum) ): notehash[object_key][notetype].append('') # ??? I don't get it notehash[object_key][notetype][int(sequencenum)-1] = note.strip() # finish iteration over notes line_counter = 0 for allele_key in notehash: if self.test_mode is True: if int(allele_key) not in self.test_keys.get('allele'): continue line_counter += 1 allele_id = self.idhash['allele'].get(allele_key) if allele_id is None: continue for n in notehash[allele_key]: LOG.info( "found %d %s notes for %s", len(notehash[allele_key]), n, allele_id) notes = ''.join(notehash[allele_key][n]) notes += ' ['+n+']' model.addDescription(allele_id, notes) if not self.test_mode and limit is not None and line_counter > limit: break return
[ "\n These are the descriptive notes about the alleles.\n Note that these notes have embedded HTML -\n should we do anything about that?\n :param limit:\n :return:\n\n " ]
Please provide a description of the function:def _process_prb_strain_genotype_view(self, limit=None): line_counter = 0 if self.test_mode: graph = self.testgraph else: graph = self.graph LOG.info("Getting genotypes for strains") raw = '/'.join((self.rawdir, 'prb_strain_genotype_view')) with open(raw, 'r', encoding="utf8") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for line in filereader: line_counter += 1 if line_counter == 1: continue (strain_key, genotype_key) = line if self.test_mode is True: if int(genotype_key) not in self.test_keys.get('genotype') \ and int(strain_key) not in self.test_keys.get('strain'): continue strain_id = self.idhash['strain'].get(strain_key) if strain_id is None: strain_id = self._makeInternalIdentifier( 'strain', strain_key) genotype_id = self.idhash['genotype'].get(genotype_key) if genotype_id is None: genotype_id = self._makeInternalIdentifier( 'genotype', genotype_key) if strain_id is not None and genotype_id is not None: self.strain_to_genotype_map[strain_id] = genotype_id graph.addTriple(strain_id, self.globaltt['has_genotype'], genotype_id) # TODO # verify if this should be contingent on the exactness or not # if qualifier == 'Exact': # gu.addTriple( # graph, strain_id, # self.globaltt['has_genotype'], # genotype_id) # else: # gu.addXref(graph, strain_id, genotype_id) if not self.test_mode and limit is not None and line_counter > limit: break return
[ "\n Here we fetch the free text descriptions of the phenotype associations.\n Triples:\n <annot_id> dc:description \"description text\"\n :param limit:\n\n :return:\n " ]
Please provide a description of the function:def update_wsnum_in_files(self, vernum): self.version_num = vernum # replace the WSNUMBER in the url paths with the real WS### for f in self.files: url = self.files[f].get('url') url = re.sub(r'WSNUMBER', self.version_num, url) self.files[f]['url'] = url LOG.debug( "Replacing WSNUMBER in %s with %s", f, self.version_num) # also the letter file - keep this so we know the version number # self.files['checksums']['file'] = re.sub( # r'WSNUMBER', self.version_num, self.files['checksums']['file']) return
[ "\n With the given version number ```vernum```,\n update the source's version number, and replace in the file hashmap.\n the version number is in the CHECKSUMS file.\n :param vernum:\n :return:\n\n " ]
Please provide a description of the function:def process_allele_phenotype(self, limit=None): raw = '/'.join((self.rawdir, self.files['allele_pheno']['file'])) if self.test_mode: graph = self.testgraph else: graph = self.graph LOG.info("Processing Allele phenotype associations") line_counter = 0 geno = Genotype(graph) with open(raw, 'r') as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: if re.match(r'!', ''.join(row)): # header continue line_counter += 1 (db, gene_num, gene_symbol, is_not, phenotype_id, ref, eco_symbol, with_or_from, aspect, gene_name, gene_synonym, gene_class, taxon, date, assigned_by, blank, blank2) = row if self.test_mode and gene_num not in self.test_ids['gene']: continue # TODO add NOT phenotypes if is_not == 'NOT': continue eco_symbol = eco_symbol.strip() eco_id = None if eco_symbol.strip() != '': eco_id = self.resolve(eco_symbol) # according to the GOA spec, persons are not allowed to be # in the reference column, therefore they the variant and # persons are swapped between the reference and with column. # we unswitch them here. temp_var = temp_ref = None if re.search(r'WBVar|WBRNAi', ref): temp_var = ref # move the paper from the with column into the ref if re.search(r'WBPerson', with_or_from): temp_ref = with_or_from if temp_var is not None or temp_ref is not None: with_or_from = temp_var ref = temp_ref allele_list = re.split(r'\|', with_or_from) if len(allele_list) == 0: LOG.error( "Missing alleles from phenotype assoc at line %d", line_counter) continue else: for allele in allele_list: allele_num = re.sub(r'WB:', '', allele.strip()) allele_id = 'WormBase:' + allele_num gene_id = 'WormBase:' + gene_num if re.search(r'WBRNAi', allele_id): # make the reagent-targeted gene, # & annotate that instead of the RNAi item directly rnai_num = re.sub(r'WormBase:', '', allele_id) rnai_id = allele_id rtg_id = self.make_reagent_targeted_gene_id( gene_num, rnai_num) geno.addReagentTargetedGene( rnai_id, 'WormBase:' + gene_num, rtg_id) geno.addGeneTargetingReagent( rnai_id, None, self.globaltt['RNAi_reagent'], gene_id) allele_id = rtg_id elif re.search(r'WBVar', allele_id): # this may become deprecated by using wormmine # make the allele to gene relationship # the WBVars are really sequence alterations # the public name will come from elsewhere geno.addSequenceAlteration(allele_id, None) vl_id = '_:'+'-'.join((gene_num, allele_num)) geno.addSequenceAlterationToVariantLocus( allele_id, vl_id) geno.addAlleleOfGene(vl_id, gene_id) else: LOG.warning( "Some kind of allele I don't recognize: %s", allele_num) continue assoc = G2PAssoc(graph, self.name, allele_id, phenotype_id) if eco_id is not None: assoc.add_evidence(eco_id) if ref is not None and ref != '': ref = re.sub(r'(WB:|WB_REF:)', 'WormBase:', ref) reference = Reference(graph, ref) if re.search(r'Person', ref): reference.setType(self.globaltt['person']) assoc.add_evidence( self.globaltt[ 'inference from background scientific knowledge']) reference.addRefToGraph() assoc.add_source(ref) assoc.add_association_to_graph() # finish looping through all alleles if not self.test_mode \ and limit is not None and line_counter > limit: break return
[ "\n This file compactly lists variant to phenotype associations,\n such that in a single row, there may be >1 variant listed\n per phenotype and paper. This indicates that each variant is\n individually assocated with the given phenotype,\n as listed in 1+ papers.\n (Not that the combination of variants is producing the phenotype.)\n :param limit:\n :return:\n\n " ]
Please provide a description of the function:def process_gene_interaction(self, limit): raw = '/'.join((self.rawdir, self.files['gene_interaction']['file'])) if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) LOG.info("Processing gene interaction associations") line_counter = 0 with gzip.open(raw, 'rb') as csvfile: filereader = csv.reader( io.TextIOWrapper(csvfile, newline=""), delimiter='\t', quotechar="'") for row in filereader: line_counter += 1 if re.match(r'#', ''.join(row)): continue (interaction_num, interaction_type, interaction_subtype, summary, citation) = row[0:5] # print(row) interaction_id = 'WormBase:'+interaction_num # TODO deal with subtypes interaction_type_id = None if interaction_type == 'Genetic': interaction_type_id = self.globaltt['genetically interacts with'] elif interaction_type == 'Physical': interaction_type_id = self.globaltt['molecularly_interacts_with'] elif interaction_type == 'Regulatory': interaction_type_id = self.globaltt['regulates'] else: LOG.info( "An interaction type I don't understand %s", interaction_type) num_interactors = (len(row) - 5) / 3 if num_interactors != 2: LOG.info( "Skipping interactions with !=2 participants:\n %s", str(row)) continue gene_a_id = 'WormBase:'+row[5] gene_b_id = 'WormBase:'+row[8] if self.test_mode \ and gene_a_id not in self.test_ids['gene'] \ and gene_b_id not in self.test_ids['gene']: continue assoc = InteractionAssoc( graph, self.name, gene_a_id, gene_b_id, interaction_type_id) assoc.set_association_id(interaction_id) assoc.add_association_to_graph() assoc_id = assoc.get_association_id() # citation is not a pmid or WBref - get this some other way model.addDescription(assoc_id, summary) if not self.test_mode and limit is not None and line_counter > limit: break return
[ "\n The gene interaction file includes identified interactions,\n that are between two or more gene (products).\n In the case of interactions with >2 genes, this requires creating\n groups of genes that are involved in the interaction.\n From the wormbase help list: In the example WBInteraction000007779\n it would likely be misleading to suggest that lin-12 interacts with\n (suppresses in this case) smo-1 ALONE or that lin-12 suppresses let-60\n ALONE; the observation in the paper; see Table V in paper PMID:15990876\n was that a lin-12 allele (heterozygous lin-12(n941/+)) could suppress\n the \"multivulva\" phenotype induced synthetically by simultaneous\n perturbation of BOTH smo-1 (by RNAi) AND let-60 (by the n2021 allele).\n So this is necessarily a three-gene interaction.\n\n Therefore, we can create groups of genes based on their \"status\" of\n Effector | Effected.\n\n Status: IN PROGRESS\n\n :param limit:\n :return:\n\n " ]
Please provide a description of the function:def main(): parser = argparse.ArgumentParser() parser.add_argument('--input', '-i', type=str, required=True, help='Location of input file') parser.add_argument('--yaml', '-y', type=str, required=True, help='Location of input file') parser.add_argument('--output', '-o', type=str, required=True, help='Location of output file') args = parser.parse_args() output_fh = open(args.output, 'w') procedure_list = json.load(open(args.input, 'r')) param_map = yaml.load(open(args.yaml, 'r')) for procedure_map in procedure_list: for code in procedure_map: if code not in param_map: param_map[code] = procedure_map[code] elif procedure_map[code] != param_map[code]: if re.search(r'protocol', procedure_map[code]): param_map[code] = procedure_map[code] elif re.search(r'protocol', param_map[code]): logger.info("Found dupe, keeping {0} over {1}". format(param_map[code], procedure_map[code])) json.dump(param_map, output_fh) output_fh.close()
[ "\n Collapse results of scrape-impc.py and manual mappings from impc_procedures.yaml\n Note the manual map exists due to some procedures being served as pdf and not\n parsable by our web scraper. There are also duplicate pages for certain iDs,\n for example:\n {\"JAX_LDT_001\": \"https://www.mousephenotype.org/impress/parameters/159/12\"},\n {\"JAX_LDT_001\": \"https://www.mousephenotype.org/impress/protocol/159/12\"},\n {\"JAX_LDT_001\": \"https://www.mousephenotype.org/impress/parameters/159\"}\n\n In these cases we take the protocol page\n " ]
Please provide a description of the function:def parse(self, limit=None): if limit is not None: LOG.info("Only parsing first %d rows", limit) ensembl_file = '/'.join((self.rawdir, self.files['ensembl2pathway']['file'])) self._parse_reactome_association_file( ensembl_file, limit, subject_prefix='ENSEMBL', object_prefix='REACT') chebi_file = '/'.join((self.rawdir, self.files['chebi2pathway']['file'])) self._parse_reactome_association_file( chebi_file, limit, subject_prefix='CHEBI', object_prefix='REACT') return
[ "\n Override Source.parse()\n Args:\n :param limit (int, optional) limit the number of rows processed\n Returns:\n :return None\n " ]
Please provide a description of the function:def _parse_reactome_association_file( self, file, limit=None, subject_prefix=None, object_prefix=None): eco_map = Reactome.get_eco_map(Reactome.map_files['eco_map']) count = 0 with open(file, 'r') as tsvfile: reader = csv.reader(tsvfile, delimiter="\t") for row in reader: (component, pathway_id, pathway_iri, pathway_label, go_ecode, species_name) = row count += 1 self._add_component_pathway_association( eco_map, component, subject_prefix, pathway_id, object_prefix, pathway_label, go_ecode) if limit is not None and count >= limit: break return
[ "\n Parse ensembl gene to reactome pathway file\n :param file: file path (not handle)\n :param limit: limit (int, optional) limit the number of rows processed\n :return: None\n " ]
Please provide a description of the function:def _getnode(self, curie): # convention is lowercase names node = None if curie[0] == '_': if self.are_bnodes_skized is True: node = self.skolemizeBlankNode(curie) else: # delete the leading underscore to make it cleaner node = BNode(re.sub(r'^_:|^_', '', curie, 1)) # Check if curie string is actually an IRI elif curie[:4] == 'http' or curie[:3] == 'ftp': node = URIRef(curie) else: iri = RDFGraph.curie_util.get_uri(curie) if iri is not None: node = URIRef(RDFGraph.curie_util.get_uri(curie)) # Bind prefix map to graph prefix = curie.split(':')[0] if prefix not in self.namespace_manager.namespaces(): mapped_iri = self.curie_map[prefix] self.bind(prefix, Namespace(mapped_iri)) else: LOG.error("couldn't make URI for %s", curie) return node
[ "\n This is a wrapper for creating a URIRef or Bnode object\n with a given a curie or iri as a string.\n\n If an id starts with an underscore, it assigns it to a BNode, otherwise\n it creates it with a standard URIRef.\n Alternatively, self.skolemize_blank_node is True,\n it will skolemize the blank node\n\n :param curie: str identifier formatted as curie or iri\n :return: node: RDFLib URIRef or BNode object\n " ]
Please provide a description of the function:def bind_all_namespaces(self): ''' Results in the RDF @prefix directives for every ingest being added to this ingest. ''' for prefix in self.curie_map.keys(): iri = self.curie_map[prefix] self.bind(prefix, Namespace(iri)) return
[]
Please provide a description of the function:def add_association_to_graph(self): # add the basic association nodes # if rel == self.globaltt[['has disposition']: Assoc.add_association_to_graph(self) # anticipating trouble with onsets ranges that look like curies if self.onset is not None and self.onset != '': self.graph.addTriple(self.assoc_id, self.globaltt['onset'], self.onset) if self.frequency is not None and self.frequency != '': self.graph.addTriple( self.assoc_id, self.globaltt['frequency'], self.frequency) return
[ "\n The reified relationship between a disease and a phenotype is decorated\n with some provenance information.\n This makes the assumption that both the disease and phenotype\n are classes.\n\n :param g:\n\n :return:\n\n " ]
Please provide a description of the function:def process_catalog(self, limit=None): raw = '/'.join((self.rawdir, self.files['catalog']['file'])) LOG.info("Processing Data from %s", raw) efo_ontology = RDFGraph(False, "EFO") LOG.info("Loading EFO ontology in separate rdf graph") efo_ontology.parse(self.files['efo']['url'], format='xml') efo_ontology.bind_all_namespaces() LOG.info("Finished loading EFO ontology") so_ontology = RDFGraph(False, "SO") LOG.info("Loading SO ontology in separate rdf graph") so_ontology.parse(self.files['so']['url'], format='xml') so_ontology.bind_all_namespaces() LOG.info("Finished loading SO ontology") with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t') header = next(filereader, None) # the header row header_len = len(header) LOG.info('header length:\t %i', header_len) for row in filereader: if not row: pass else: if header_len != len(row): LOG.error('BadRow: %i has %i columns', filereader.line_num, row) (date_added_to_catalog, pubmed_num, first_author, pub_date, journal, link, study_name, disease_or_trait, initial_sample_description, replicate_sample_description, region, chrom_num, chrom_pos, reported_gene_nums, mapped_gene, upstream_gene_num, downstream_gene_num, snp_gene_nums, upstream_gene_distance, downstream_gene_distance, strongest_snp_risk_allele, snps, merged, snp_id_current, context, intergenic_flag, risk_allele_frequency, pvalue, pvalue_mlog, pvalue_text, or_or_beta, confidence_interval_95, platform_with_snps_passing_qc, cnv_flag, mapped_trait, mapped_trait_uri, study_accession, GENOTYPING_TECHNOLOGY ) = row if self.test_mode: continue # 06-May-2015 25917933 # Zai CC 20-Nov-2014 J Psychiatr Res http://europepmc.org/abstract/MED/25917933 # A genome-wide association study of suicide severity scores in bipolar disorder. # Suicide in bipolar disorder # 959 European ancestry individuals NA # 10p11.22 10 32704340 C10orf68, CCDC7, ITGB1 CCDC7 # rs7079041-A rs7079041 0 7079041 intron 0 2E-6 5.698970 variant_curie, variant_type = self._get_curie_and_type_from_id( strongest_snp_risk_allele) if strongest_snp_risk_allele.strip() == '': LOG.debug( "No strongest SNP risk allele for %s:\n%s", pubmed_num, str(row)) # still consider adding in the EFO terms # for what the study measured? continue if variant_type == 'snp': self._add_snp_to_graph( variant_curie, strongest_snp_risk_allele, chrom_num, chrom_pos, context, risk_allele_frequency) self._add_deprecated_snp( variant_curie, snp_id_current, merged, chrom_num, chrom_pos) self._add_snp_gene_relation( variant_curie, snp_gene_nums, upstream_gene_num, downstream_gene_num) elif variant_type == 'haplotype': self._process_haplotype( variant_curie, strongest_snp_risk_allele, chrom_num, chrom_pos, context, risk_allele_frequency, mapped_gene, so_ontology) elif variant_type is None: LOG.warning( "There's a snp id i can't manage: %s", strongest_snp_risk_allele) continue description = self._make_description( disease_or_trait, initial_sample_description, replicate_sample_description, platform_with_snps_passing_qc, pvalue) self._add_variant_trait_association( variant_curie, mapped_trait_uri, efo_ontology, pubmed_num, description) if not self.test_mode and ( limit is not None and filereader.line_num > limit): break # TODO loop through the location hash, # and make all snps at that location equivalent for l in self.id_location_map: snp_ids = self.id_location_map[l] if len(snp_ids) > 1: LOG.info("%s has >1 snp id: %s", l, str(snp_ids)) return
[ "\n :param limit:\n :return:\n\n " ]
Please provide a description of the function:def _get_curie_and_type_from_id(variant_id): curie = None variant_type = None # remove space before hyphens variant_id = re.sub(r' -', '-', variant_id).strip() if re.search(r' x ', variant_id) or re.search(r',', variant_id): # TODO deal with rs1234 x rs234... (haplotypes?) LOG.warning("Cannot parse variant groups of this format: %s", variant_id) elif re.search(r';', variant_id): curie = ':haplotype_' + Source.hash_id(variant_id) # deliberate 404 variant_type = "haplotype" elif variant_id[:2] == 'rs': curie = 'dbSNP:' + variant_id.split('-')[0] # curie = re.sub(r'-.*$', '', curie).strip() variant_type = "snp" # remove the alteration elif variant_id[:3] == 'kgp': # http://www.1000genomes.org/faq/what-are-kgp-identifiers curie = ':kgp-' + variant_id # deliberate 404 variant_type = "snp" elif variant_id[:3] == 'chr': # like: chr10:106180121-G # variant_id = re.sub(r'-?', '-N', variant_id) variant_id = re.sub(r' ', '', variant_id) curie = ':gwas-' + re.sub(r':', '-', variant_id) # deliberate 404 variant_type = "snp" elif variant_id.strip() == '': pass else: LOG.warning("There's a snp id i can't manage: %s", variant_id) return curie, variant_type
[ "\n Given a variant id, our best guess at its curie and type (snp, haplotype, etc)\n 'None' will be used for both curie and type for IDs that we can't process\n :param variant_id:\n :return:\n " ]
Please provide a description of the function:def getChrPartTypeByNotation(notation, graph=None): # Note that for mouse, # they don't usually include the "q" in their notation, # though UCSC does. We may need to adjust for that here if re.match(r'p$', notation): rti = graph.globaltt['short_chromosome_arm'] elif re.match(r'q$', notation): rti = graph.globaltt['long_chromosome_arm'] elif re.match(r'[pq][A-H\d]$', notation): rti = graph.globaltt['chromosome_region'] elif re.match(r'[pq][A-H\d]\d', notation): rti = graph.globaltt['chromosome_band'] elif re.match(r'[pq][A-H\d]\d\.\d+', notation): rti = graph.globaltt['chromosome_subband'] else: rti = graph.globaltt['chromosome_part'] return rti
[ "\n This method will figure out the kind of feature that a given band\n is based on pattern matching to standard karyotype notation.\n (e.g. 13q22.2 ==> chromosome sub-band)\n\n This has been validated against human, mouse, fish, and rat nomenclature.\n :param notation: the band (without the chromosome prefix)\n :return:\n\n " ]
Please provide a description of the function:def _get_chrbands(self, limit, taxon): model = Model(self.graph) line_counter = 0 myfile = '/'.join((self.rawdir, self.files[taxon]['file'])) LOG.info("Processing Chr bands from FILE: %s", myfile) geno = Genotype(self.graph) # build the organism's genome from the taxon genome_label = self.files[taxon]['genome_label'] taxon_id = 'NCBITaxon:' + taxon # add the taxon as a class. adding the class label elsewhere model.addClassToGraph(taxon_id, None) model.addSynonym(taxon_id, genome_label) genome_id = geno.makeGenomeID(taxon_id) geno.addGenome(taxon_id, genome_label) model.addOWLPropertyClassRestriction( genome_id, self.globaltt['in taxon'], taxon_id) placed_scaffold_pattern = r'chr(\d+|X|Y|Z|W|MT|M)' # currently unused patterns # unlocalized_scaffold_pattern = placed_scaffold_pattern + r'_(\w+)_random' # unplaced_scaffold_pattern = r'chrUn_(\w+)' col = ['chrom', 'start', 'stop', 'band', 'rtype'] with gzip.open(myfile, 'rb') as reader: for line in reader: line_counter += 1 # skip comments line = line.decode().strip() if line[0] == '#': continue # chr13 4500000 10000000 p12 stalk row = line.split('\t') chrom = row[col.index('chrom')] band = row[col.index('band')] rtype = row[col.index('rtype')] # NOTE # some less-finished genomes have placed and unplaced scaffolds # * Placed scaffolds: # Scaffold has an oriented location within a chromosome. # * Unlocalized scaffolds: # scaffold 's chromosome is known, # scaffold's position, orientation or both is not known. # *Unplaced scaffolds: # it is not known which chromosome the scaffold belongs to. # find out if the thing is a full on chromosome, or a scaffold: # ex: unlocalized scaffold: chr10_KL568008v1_random # ex: unplaced scaffold: chrUn_AABR07022428v1 mch = re.match(placed_scaffold_pattern+r'$', chrom) if mch is not None and len(mch.groups()) == 1: # the chromosome is the first match of the pattern # chrom = m.group(1) # TODO unused pass else: # let's skip over anything that isn't a placed_scaffold LOG.info("Skipping non-placed chromosome %s", chrom) continue # the chrom class, taxon as the reference cclassid = makeChromID(chrom, taxon, 'CHR') # add the chromosome as a class geno.addChromosomeClass(chrom, taxon_id, genome_label) model.addOWLPropertyClassRestriction( cclassid, self.globaltt['member of'], genome_id) # add the band(region) as a class maplocclass_id = cclassid+band maplocclass_label = makeChromLabel(chrom+band, genome_label) if band is not None and band.strip() != '': region_type_id = self.map_type_of_region(rtype) model.addClassToGraph( maplocclass_id, maplocclass_label, region_type_id) else: region_type_id = self.globaltt['chromosome'] # add the staining intensity of the band if re.match(r'g(neg|pos|var)', rtype): if region_type_id in [ self.globaltt['chromosome_band'], self.globaltt['chromosome_subband']]: stain_type = self.resolve(rtype) if stain_type is not None: model.addOWLPropertyClassRestriction( maplocclass_id, self.globaltt['has_sequence_attribute'], self.resolve(rtype)) else: # usually happens if it's a chromosome because # they don't actually have banding info LOG.info("feature type %s != chr band", region_type_id) else: LOG.warning('staining type not found: %s', rtype) # get the parent bands, and make them unique parents = list(self.make_parent_bands(band, set())) # alphabetical sort will put them in smallest to biggest parents.sort(reverse=True) # print("PARENTS of", maplocclass_id, "=", parents) # add the parents to the graph, in hierarchical order # TODO this is somewhat inefficient due to # re-adding upper-level nodes when iterating over the file for prnt in parents: parent = prnt.strip() if parent is None or parent == "": continue pclassid = cclassid + parent # class chr parts pclass_label = makeChromLabel(chrom + parent, genome_label) rti = getChrPartTypeByNotation(parent, self.graph) model.addClassToGraph(pclassid, pclass_label, rti) # for canonical chromosomes, # then the subbands are subsequences of the full band # add the subsequence stuff as restrictions if prnt != parents[-1]: grandparent = 1 + parents.index(prnt) pid = cclassid + parents[grandparent] # the instance model.addOWLPropertyClassRestriction( pclassid, self.globaltt['is subsequence of'], pid) model.addOWLPropertyClassRestriction( pid, self.globaltt['has subsequence'], pclassid) else: # add the last one (p or q usually) # as attached to the chromosome model.addOWLPropertyClassRestriction( pclassid, self.globaltt['is subsequence of'], cclassid) model.addOWLPropertyClassRestriction( cclassid, self.globaltt['has subsequence'], pclassid) # connect the band here to the first one in the parent list if len(parents) > 0: model.addOWLPropertyClassRestriction( maplocclass_id, self.globaltt['is subsequence of'], cclassid + parents[0]) model.addOWLPropertyClassRestriction( cclassid + parents[0], self.globaltt['has subsequence'], maplocclass_id) if limit is not None and line_counter > limit: break # TODO figure out the staining intensities for the encompassing bands return
[ "\n For the given taxon, it will fetch the chr band file.\n We will not deal with the coordinate information with this parser.\n Here, we only are concerned with building the partonomy.\n :param limit:\n :return:\n\n " ]
Please provide a description of the function:def make_parent_bands(self, band, child_bands): m = re.match(r'([pq][A-H\d]+(?:\.\d+)?)', band) if len(band) > 0: if m: p = str(band[0:len(band)-1]) p = re.sub(r'\.$', '', p) if p is not None: child_bands.add(p) self.make_parent_bands(p, child_bands) else: child_bands = set() return child_bands
[ "\n this will determine the grouping bands that it belongs to, recursively\n 13q21.31 ==> 13, 13q, 13q2, 13q21, 13q21.3, 13q21.31\n\n :param band:\n :param child_bands:\n :return:\n\n " ]
Please provide a description of the function:def map_type_of_region(self, regiontype): if regiontype in self.localtt: so_id = self.resolve(regiontype) else: so_id = self.globaltt['chromosome_part'] LOG.warning( "Unmapped code %s. Defaulting to chr_part '" + self.globaltt['chromosome_part'] + "'.", regiontype) return so_id
[ "\n Note that \"stalk\" refers to the short arm of acrocentric chromosomes\n chr13,14,15,21,22 for human.\n :param regiontype:\n :return:\n\n " ]
Please provide a description of the function:def get_curie(self, uri): '''Get a CURIE from a URI ''' prefix = self.get_curie_prefix(uri) if prefix is not None: key = self.curie_map[prefix] return '%s:%s' % (prefix, uri[len(key):len(uri)]) return None
[]
Please provide a description of the function:def get_curie_prefix(self, uri): ''' Return the CURIE's prefix:''' for key, value in self.uri_map.items(): if uri.startswith(key): return value return None
[]
Please provide a description of the function:def get_uri(self, curie): ''' Get a URI from a CURIE ''' if curie is None: return None parts = curie.split(':') if len(parts) == 1: if curie != '': LOG.error("Not a properly formed curie: \"%s\"", curie) return None prefix = parts[0] if prefix in self.curie_map: return '%s%s' % (self.curie_map.get(prefix), curie[(curie.index(':') + 1):]) LOG.error("Curie prefix not defined for %s", curie) return None
[]
Please provide a description of the function:def fetch(self, is_dl_forced=True): username = config.get_config()['dbauth']['udp']['user'] password = config.get_config()['dbauth']['udp']['password'] credentials = (username, password) # Get patient map file: patient_id_map = self.open_and_parse_yaml(self.map_files['patient_ids']) udp_internal_ids = patient_id_map.keys() phenotype_fields = ['Patient', 'HPID', 'Present'] # Get phenotype ids for each patient phenotype_params = { 'method': 'search_subjects', 'subject_type': 'Phenotype', 'search_mode': 'DEEP', 'fields': 'Patient', 'conditions': 'equals', 'values': ','.join(udp_internal_ids), 'user_fields': ','.join(phenotype_fields) } prioritized_variants = [ 'Patient', 'Gene', 'Chromosome Position', 'Variant Allele', 'Transcript'] prioritized_params = { 'method': 'search_subjects', 'subject_type': 'Variant Prioritization', 'search_mode': 'DEEP', 'fields': 'Patient', 'conditions': 'equals', 'values': ','.join(udp_internal_ids), 'user_fields': ','.join(prioritized_variants), 'format': 'json'} variant_fields = [ 'Patient', 'Family', 'Chr', 'Build', 'Chromosome Position', 'Reference Allele', 'Variant Allele', 'Parent of origin', 'Allele Type', 'Mutation Type', 'Gene', 'Transcript', 'Original Amino Acid', 'Variant Amino Acid', 'Amino Acid Change', 'Segregates with', 'Position', 'Exon', 'Inheritance model', 'Zygosity', 'dbSNP ID', '1K Frequency', 'Number of Alleles'] variant_params = { 'method': 'search_subjects', 'subject_type': 'Exome Analysis Results', 'search_mode': 'DEEP', 'fields': 'Patient', 'conditions': 'equals', 'user_fields': ','.join(variant_fields), 'format': 'json'} pheno_file = open( '/'.join((self.rawdir, self.files['patient_phenotypes']['file'])), 'w') variant_file = open( '/'.join((self.rawdir, self.files['patient_variants']['file'])), 'w') pheno_file.write('{0}\n'.format('\t'.join(phenotype_fields))) variant_file.write('{0}\n'.format('\t'.join(variant_fields))) variant_gene = self._fetch_data_from_udp( udp_internal_ids, prioritized_params, prioritized_variants, credentials) variant_gene_map = dict() for line in variant_gene: variant_gene_map.setdefault(line[0], []).append( # Try to make a unique value based on gene-pos-variantAlele-transcript # TODO make this a dict for readability purposes "{0}-{1}-{2}-{3}".format(line[1], line[2], line[3], line[4])) variant_info = self._fetch_data_from_udp( udp_internal_ids, variant_params, variant_fields, credentials) for line in variant_info: variant = "{0}-{1}-{2}-{3}".format(line[10], line[4], line[6], line[11]) if variant in variant_gene_map[line[0]]: line[0] = patient_id_map[line[0]] line[4] = re.sub(r'\.0$', '', line[4]) variant_file.write('{0}\n'.format('\t'.join(line))) phenotype_info = self._fetch_data_from_udp( udp_internal_ids, phenotype_params, phenotype_fields, credentials) for line in phenotype_info: line[0] = patient_id_map[line[0]] pheno_file.write('{0}\n'.format('\t'.join(line))) variant_file.close() pheno_file.close() return
[ "\n Fetches data from udp collaboration server,\n see top level comments for class for more information\n :return:\n " ]
Please provide a description of the function:def parse(self, limit=None): if limit is not None: LOG.info("Only parsing first %d rows", limit) phenotype_file = '/'.join( (self.rawdir, self.files['patient_phenotypes']['file'])) variant_file = '/'.join((self.rawdir, self.files['patient_variants']['file'])) phenotype_file_handler = open(phenotype_file, 'r') variant_file_handler = open(variant_file, 'r') self._parse_patient_phenotypes(phenotype_file_handler, limit) self._parse_patient_variants(variant_file_handler) return
[ "\n Override Source.parse()\n Args:\n :param limit (int, optional) limit the number of rows processed\n Returns:\n :return None\n " ]
Please provide a description of the function:def _parse_patient_variants(self, file): patient_var_map = self._convert_variant_file_to_dict(file) gene_coordinate_map = self._parse_gene_coordinates( self.map_files['gene_coord_map']) rs_map = self._parse_rs_map_file(self.map_files['dbsnp_map']) genotype = Genotype(self.graph) model = Model(self.graph) self._add_variant_gene_relationship(patient_var_map, gene_coordinate_map) for patient in patient_var_map: patient_curie = ':{0}'.format(patient) # make intrinsic genotype for each patient intrinsic_geno_bnode = self.make_id( "{0}-intrinsic-genotype".format(patient), "_") genotype_label = "{0} genotype".format(patient) genotype.addGenotype( intrinsic_geno_bnode, genotype_label, model.globaltt['intrinsic_genotype']) self.graph.addTriple( patient_curie, model.globaltt['has_genotype'], intrinsic_geno_bnode) for variant_id, variant in patient_var_map[patient].items(): build = variant['build'] chromosome = variant['chromosome'] position = variant['position'] reference_allele = variant['reference_allele'] variant_allele = variant['variant_allele'] genes_of_interest = variant['genes_of_interest'] rs_id = variant['rs_id'] variant_label = '' variant_bnode = self.make_id("{0}".format(variant_id), "_") # maybe should have these look like the elif statements below if position and reference_allele and variant_allele: variant_label = self._build_variant_label( build, chromosome, position, reference_allele, variant_allele, genes_of_interest) elif not position and reference_allele and variant_allele \ and len(genes_of_interest) == 1: variant_label = self._build_variant_label( build, chromosome, position, reference_allele, variant_allele, genes_of_interest) elif position and (not reference_allele or not variant_allele) \ and len(genes_of_interest) == 1: variant_label = "{0}{1}({2}):g.{3}".format( build, chromosome, genes_of_interest[0], position) elif len(genes_of_interest) == 1: variant_label = 'variant of interest in {0} gene of patient' \ ' {1}'.format(genes_of_interest[0], patient) else: variant_label = 'variant of interest in patient {0}'.format(patient) genotype.addSequenceAlteration(variant_bnode, None) # check if it we have built the label # in _add_variant_gene_relationship() labels = self.graph.objects( BNode(re.sub(r'^_:', '', variant_bnode, 1)), RDFS['label']) label_list = list(labels) if len(label_list) == 0: model.addLabel(variant_bnode, variant_label) self.graph.addTriple( variant_bnode, self.globaltt['in taxon'], self.globaltt['Homo sapiens']) self.graph.addTriple( intrinsic_geno_bnode, self.globaltt['has_variant_part'], variant_bnode) if rs_id: dbsnp_curie = 'dbSNP:{0}'.format(rs_id) model.addSameIndividual(variant_bnode, dbsnp_curie) self._add_variant_sameas_relationships(patient_var_map, rs_map) return
[ "\n :param file: file handler\n :return:\n " ]
Please provide a description of the function:def _add_variant_gene_relationship(self, patient_var_map, gene_coordinate_map): # genotype = Genotype(self.graph) dipper_util = DipperUtil() model = Model(self.graph) # Note this could be compressed in someway to remove one level of for looping for patient in patient_var_map: for variant_id, variant in patient_var_map[patient].items(): variant_bnode = self.make_id("{0}".format(variant_id), "_") genes_of_interest = variant['genes_of_interest'] if len(genes_of_interest) == 1: # Assume variant is variant allele of gene gene = genes_of_interest[0] gene_id = dipper_util.get_ncbi_id_from_symbol(gene) self._add_gene_to_graph( gene, variant_bnode, gene_id, self.globaltt['has_affected_feature']) elif re.search(r'upstream|downstream', variant['type'], flags=re.I): # Attempt to disambiguate ref_gene = [] up_down_gene = [] unmatched_genes = [] for gene in variant['genes_of_interest']: if gene_id and gene_id != '' and gene_id in gene_coordinate_map: if gene_coordinate_map[gene_id]['start'] \ <= variant['position']\ <= gene_coordinate_map[gene_id]['end']: gene_info = { 'symbol': gene, 'strand': gene_coordinate_map[gene_id]['strand'] } ref_gene.append(gene_info) else: up_down_gene.append(gene) else: unmatched_genes.append(gene) if len(ref_gene) == 1: self._add_gene_to_graph( ref_gene[0]['symbol'], variant_bnode, gene_id, self.globaltt['has_affected_feature']) # update label with gene gene_list = [ref_gene[0]['symbol']] # build label expects list variant_label = self._build_variant_label( variant['build'], variant['chromosome'], variant['position'], variant['reference_allele'], variant['variant_allele'], gene_list) model.addLabel(variant_bnode, variant_label) # In some cases there are multiple instances # of same gene from dupe rows in the source # Credit http://stackoverflow.com/a/3844832 elif len(ref_gene) > 0 and ref_gene[1:] == ref_gene[:-1]: self._add_gene_to_graph( ref_gene[0]['symbol'], variant_bnode, gene_id, self.globaltt['has_affected_feature']) # build label function expects list gene_list = [ref_gene[0]['symbol']] variant_label = self._build_variant_label( variant['build'], variant['chromosome'], variant['position'], variant['reference_allele'], variant['variant_allele'], gene_list) model.addLabel(variant_bnode, variant_label) # Check if reference genes are on different strands elif len(ref_gene) == 2: strands = [st['strand'] for st in ref_gene] if "minus" in strands and "plus" in strands: for r_gene in ref_gene: self._add_gene_to_graph( r_gene['symbol'], variant_bnode, gene_id, self.globaltt['has_affected_feature']) else: LOG.warning( "unable to map intron variant to gene coordinates: %s", variant) for r_gene in ref_gene: self._add_gene_to_graph( r_gene['symbol'], variant_bnode, gene_id, self.globaltt['causally_influences']) elif re.search(r'intron', variant['type'], flags=re.I): LOG.warning( "unable to map intron variant to gene coordinates_2: %s", variant) for neighbor in up_down_gene: self._add_gene_to_graph( neighbor, variant_bnode, gene_id, self.globaltt['causally_influences']) # Unmatched genes are likely because we cannot map to an NCBIGene # or we do not have coordinate information for unmatched_gene in unmatched_genes: self._add_gene_to_graph( unmatched_gene, variant_bnode, gene_id, self.globaltt['causally_influences']) return
[ "\n Right now it is unclear the best approach on how to connect\n variants to genes. In most cases has_affected_locus/GENO:0000418\n is accurate; however, there are cases where a variant is in the intron\n on one gene and is purported to causally affect another gene down or\n upstream. In these cases we must first disambiguate which gene\n is the affected locus, and which gene(s) are predicated to be\n causully influenced by (RO:0002566)\n\n UPDATE 8-30: In the latest dataset we no longer have 1-many mappings\n between variants and genes, but leaving this here in case we see\n these in the future\n\n The logic followed here is:\n if mutation type contains downstream/upstream and more than one\n gene of interest, investigate coordinates of all genes to\n see if we can disambiguate which genes are which\n :return: None\n " ]
Please provide a description of the function:def _convert_variant_file_to_dict(varfile): patient_variant_map = {} # line_num = 0 note this is builtin to the reader as reader.line_num reader = csv.reader(varfile, delimiter="\t") col = [ 'patient', 'family', 'chromosome', 'build', 'position', 'reference_allele', 'variant_allele', 'parent_of_origin', 'allele_type', 'mutation_type', 'gene_symbol', 'transcript', 'reference_aa', 'variant_aa, aa_change', 'segregates_with', 'locus', 'exon', 'inheritance_model', 'zygosity', 'dbsnp_id', 'frequency', 'num_of_alleles' ] # row = next(reader) # there is no header # if len(row) != len(col): # LOG.error('Got:\n\t%s\nExpected:\n\t%s\n', row, col) # raise TypeError('header does not match expected format') for row in reader: patient = row[col.index('patient')] # family, chromosome = row[col.index('chromosome')] build = row[col.index('build')] position = row[col.index('position')] reference_allele = row[col.index('reference_allele')] variant_allele = row[col.index('variant_allele')] # parent_of_origin, # allele_type, mutation_type = row[col.index('mutation_type')] gene_symbol = row[col.index('gene_symbol')] # transcript, # reference_aa, # variant_aa, # aa_change, # segregates_with, # locus, # exon, # inheritance_model, # zygosity, dbsnp_id = row[col.index('dbsnp_id')] # frequency, # num_of_alleles if patient not in patient_variant_map: patient_variant_map[patient] = {} formatted_chr = re.sub(r'^CHR', 'chr', chromosome, flags=re.I) if re.fullmatch(r'[XY]|[0-9]{1,2}', chromosome, flags=re.I): formatted_chr = "chr{0}".format(chromosome.upper()) formatted_build = re.sub(r'^HG', 'hg', build, flags=re.I) ref_base = reference_allele.upper() var_base = variant_allele.upper() rs_id = '' # Catch misformatted data if re.search(r'LEFT FLANK|NM_|EXON', ref_base): ref_base = '' if re.search(r'LEFT FLANK|NM_|EXON', var_base): var_base = '' if dbsnp_id != '': match = re.fullmatch(r'^(rs\d+).*', dbsnp_id) if match: rs_id = match.group(1) # Format variant object variant_info = [formatted_chr, formatted_build, position, ref_base, var_base] if '' in variant_info: filt_list = [info for info in variant_info if info != ''] variant_id = str(reader.line_num) + '-' + '-'.join(filt_list) else: variant_id = '-'.join(variant_info) if variant_id in patient_variant_map[patient]: patient_variant_map[patient][variant_id]['genes_of_interest'].append( gene_symbol) else: patient_variant_map[patient][variant_id] = { 'build': formatted_build, 'position': position, 'chromosome': formatted_chr, 'reference_allele': ref_base, 'variant_allele': var_base, 'type': mutation_type, 'rs_id': '' } if rs_id: patient_variant_map[patient][variant_id]['rs_id'] = rs_id patient_variant_map[patient][variant_id]['genes_of_interest']\ = [gene_symbol] return patient_variant_map
[ "\n Converts tsv to dicts with this structure\n {\n 'patient_1': {\n 'variant-id': {\n 'build': 'hg19'\n 'chromosome': 'chr7',\n 'reference_allele': 'A',\n 'variant_allele': 'G',\n 'position': '1234',\n 'rs_id' : 'RS1234',\n 'type': 'SNV',\n 'genes_of_interest' : [SHH, BRCA1]\n }\n }\n }\n TODO the above structure can be simplified as it results\n in duplicated variant dicts, the join-by-id approach\n below would be an improvement:\n {\n 'patient_1': {\n 'variants': [variant-id1,variant-id2, 3, 4]\n }\n 'variants : [\n 'variant-id1': {\n 'build': hg19\n 'chromosome': 'chr7',\n 'reference_allele': 'A',\n 'variant_allele': 'G',\n 'position': '1234'\n 'rs_id' : 'RS1234',\n 'type': 'SNV\",\n 'genes_of_interest' : [SHH, BRCA1]\n }\n ]\n }\n\n If any part of the core variant information is missing\n (build, chr, bp change(s), the line number will be used\n to make the variant unique\n\n Variant id will be used downstream to form blank nodes (checksumed)\n\n See docstring for _add_variant_gene_relationship for explanation\n on why there is a one to many mapping between variants and genes\n\n Values are normalized with these rules:\n 1. Basepairs are upper case\n 2. HG19 -> hg19\n 3. X -> chrX\n :return: dict\n " ]
Please provide a description of the function:def _parse_patient_phenotypes(self, file, limit=None): model = Model(self.graph) line_counter = 0 reader = csv.reader(file, delimiter="\t") for row in reader: (patient_id, hpo_curie, present) = row patient_curie = ':{0}'.format(patient_id) if patient_id == 'Patient': # skip header line_counter += 1 continue model.addPerson(patient_curie, patient_id) self.graph.addTriple( patient_curie, self.globaltt['has phenotype'], self.globaltt['disease']) if present == 'yes': self.graph.addTriple( patient_curie, self.globaltt['has phenotype'], hpo_curie) line_counter += 1 if not self.test_mode and limit is not None \ and line_counter >= limit: break
[ "\n :param file: file handler\n :param limit: limit rows processed\n :return:\n " ]
Please provide a description of the function:def _parse_gene_coordinates(file): id_map = {} col = ['gene_curie', 'start', 'end', 'strand', 'build'] if os.path.exists(os.path.join(os.path.dirname(__file__), file)): with open(os.path.join(os.path.dirname(__file__), file)) as tsvfile: reader = csv.reader(tsvfile, delimiter="\t") for row in reader: id_map[row[col.index('gene_curie')]] = { 'start': row[col.index('start')], 'end': row[col.index('end')], 'strand': row[col.index('strand')], 'build': row[col.index('build')] } return id_map
[ "\n :param file: file path\n :param limit: limit (int, optional) limit the number of rows processed\n :return: dict\n " ]
Please provide a description of the function:def _parse_rs_map_file(rsfile): rs_map = {} col = ['chromosome', 'position', 'rs_id', 'var_type', 'alleles'] if os.path.exists(os.path.join(os.path.dirname(__file__), rsfile)): with open(os.path.join(os.path.dirname(__file__), rsfile)) as tsvfile: reader = csv.reader(tsvfile, delimiter="\t") for row in reader: chromosome = row[col.index('chromosome')] position = row[col.index('position')] # rs_id, var_type, alleles) = row map_key = "chr{0}-{1}".format(chromosome, position) rs_info = { 'type': row[col.index('var_type')], 'rs_id': row[col.index('rs_id')], 'alleles': row[col.index('alleles')] } if map_key in rs_map: rs_map[map_key].append(rs_info) else: rs_map[map_key] = [rs_info] return rs_map
[ "\n Parses rsID mapping file from dbSNP\n Outputs dict where keys are coordinates in the format\n {chromsome}-{position}\n\n {\n chr1-1234: [\n {\n 'type': 'snp'\n 'rs_id': 'rs1234'\n 'alleles': 'A/G/T'\n }\n ]\n }\n\n :param file: file path\n :param limit: limit (int, optional) limit the number of rows processed\n :return: dict\n " ]
Please provide a description of the function:def _build_variant_label( build, chromosome, position, reference_allele, variant_allele, gene_symbols=None ): variant_label = '' prefix = '' if gene_symbols and len(gene_symbols) == 1 and gene_symbols[0]: prefix = "{0}{1}({2})".format(build, chromosome, gene_symbols[0]) else: prefix = "{0}{1}".format(build, chromosome) if reference_allele == '-': variant_label = "{0}:g.{1}ins{2}".format(prefix, position, variant_allele) elif variant_allele == '-': variant_label = "{0}:g.{1}del{2}".format( prefix, position, reference_allele) else: variant_label = "{0}:g.{1}{2}>{3}".format( prefix, position, reference_allele, variant_allele) return variant_label
[ "\n Function to build HGVS variant labels\n :param build: {str} build id\n :param chromosome: {str} chromosome\n :param position: {str} variation position as string or int\n :param reference_allele: {str} single letter ref bp\n :param variant_allele: {str} single letter bp change\n :param gene_symbol: {str} gene symbol (hgvs)\n :return: {str} variant label\n " ]
Please provide a description of the function:def _add_gene_to_graph(self, gene, variant_bnode, gene_id, relation): model = Model(self.graph) if gene_id: self.graph.addTriple(variant_bnode, relation, gene_id) elif gene: LOG.info("gene %s not mapped to NCBI gene, making blank node", gene) gene_bnode = self.make_id("{0}".format(gene), "_") model.addIndividualToGraph(gene_bnode, gene) self.graph.addTriple(variant_bnode, relation, gene_bnode)
[ "\n :param gene:\n :param variant_bnode:\n :return:\n " ]
Please provide a description of the function:def _add_variant_sameas_relationships(self, patient_var_map, rs_map): model = Model(self.graph) for patient in patient_var_map: for variant_id, variant in patient_var_map[patient].items(): variant_bnode = self.make_id("{0}".format(variant_id), "_") build = variant['build'] chromosome = variant['chromosome'] position = variant['position'] reference_allele = variant['reference_allele'] variant_allele = variant['variant_allele'] if build and chromosome and position\ and reference_allele and variant_allele: if re.fullmatch(r'[ATCG]', reference_allele)\ and re.fullmatch(r'[ATCG]', variant_allele): # variation is snp rs_id = self._get_rs_id(variant, rs_map, 'snp') if rs_id: dbsnp_curie = 'dbSNP:rs{0}'.format(rs_id) model.addSameIndividual(variant_bnode, dbsnp_curie) elif re.fullmatch(r'\-', reference_allele)\ or re.fullmatch(r'\-', variant_allele): rs_id = self._get_rs_id(variant, rs_map, 'indel') if rs_id is not None: dbsnp_curie = 'dbSNP:rs{0}'.format(rs_id) model.addSameIndividual(variant_bnode, dbsnp_curie) else: rs_id = self.\ _get_rs_id(variant, rs_map, 'indel') if rs_id is not None: dbsnp_curie = 'dbSNP:rs{0}'.format(rs_id) model.addSameIndividual(variant_bnode, dbsnp_curie) return
[ "\n Adds same as relationships between udp variant bnodes and dbsnp ids\n :param patient_var_map:\n :param rs_map:\n :return:\n " ]
Please provide a description of the function:def _get_rs_id(variant, rs_map, variant_type): rs_id = None if variant_type == 'snp': variant_key = "{0}-{1}".format(variant['chromosome'], variant['position']) if variant_key in rs_map: snp_candidates = [ rs_dict for rs_dict in rs_map[variant_key] if rs_dict['type'] == 'snp'] if len(snp_candidates) == 1: rs_id = snp_candidates[0]["rs_id"] elif variant_type == 'indel': rs_candidates = [] variant_key = "{0}-{1}".format(variant['chromosome'], variant['position']) if variant_key in rs_map: snp_candidates = [ rs_dict for rs_dict in rs_map[variant_key] if rs_dict['type'] == 'in-del'] for candidate in snp_candidates: alleles = candidate['alleles'].split('/') if variant['reference_allele'] in alleles \ and variant['variant_allele'] in alleles: rs_candidates.append(candidate['rs_id']) if len(rs_candidates) == 1: rs_id = rs_candidates[0] elif len(rs_candidates) > 1: LOG.info( "ambiguous rs mapping for: %s\ncandidate ids: %s", variant, rs_candidates) else: LOG.info( "rs at coordinate but no match found" " for variant %s\n candidate ids: %s", variant, rs_map[variant_key]) else: LOG.warning("type: %s unsupported", variant_type) return rs_id
[ "\n Given a variant dict, return unambiguous RS ID\n TODO\n Some sequence alterations appear to have mappings to dbsnp's notation\n for example,\n reference allele: TTTTTTTTTTTTTT\n variant allele: TTTTTTTTTTTTTTT\n Is theoretically the same as -/T, we should clarify with UDP and then add\n functionality to map this notation to the more common -/T\n :param variant:\n :param rs_map:\n :param type: snp or indel\n :return:\n " ]
Please provide a description of the function:def fetch(self, is_dl_forced=False): host = config.get_config()['dbauth']['coriell']['host'] key = config.get_config()['dbauth']['coriell']['private_key'] user = config.get_config()['user']['coriell'] passwd = config.get_config()['keys'][user] with pysftp.Connection( host, username=user, password=passwd, private_key=key) as sftp: # check to make sure each file is in there # get the remote files remote_files = sftp.listdir_attr() files_by_repo = {} for attr in remote_files: # for each catalog, get the most-recent filename mch = re.match('(NIGMS|NIA|NHGRI|NINDS)', attr.filename) if mch is not None and len(mch.groups()) > 0: # there should just be one now files_by_repo[mch.group(1)] = attr # sort each array in hash, # & get the name and time of the most-recent file for each catalog for rmt in self.files: LOG.info("Checking on %s catalog file", rmt) fname = self.files[rmt]['file'] remotef = files_by_repo[rmt] target_name = '/'.join((self.rawdir, fname)) # check if the local file is out of date, if so, download. # otherwise, skip. # we rename (for simplicity) the original file fstat = None if os.path.exists(target_name): fstat = os.stat(target_name) LOG.info( "Local file date: %s", datetime.utcfromtimestamp(fstat[stat.ST_CTIME])) if fstat is None or remotef.st_mtime > fstat[stat.ST_CTIME]: if fstat is None: LOG.info("File does not exist locally; downloading...") else: LOG.info( "New version of %s catalog available; downloading...", rmt) sftp.get(remotef.filename, target_name) LOG.info( "Fetched remote %s -> %s", remotef.filename, target_name) fstat = os.stat(target_name) filedate = datetime.utcfromtimestamp( remotef.st_mtime).strftime("%Y-%m-%d") LOG.info( "New file date: %s", datetime.utcfromtimestamp(fstat[stat.ST_CTIME])) else: LOG.info("File %s exists; using local copy", fname) filedate = datetime.utcfromtimestamp( fstat[stat.ST_CTIME]).strftime("%Y-%m-%d") self.dataset.setFileAccessUrl(remotef.filename, True) self.dataset.setVersion(filedate) return
[ "\n Here we connect to the coriell sftp server using private connection\n details. They dump bi-weekly files with a timestamp in the filename.\n For each catalog, we ping the remote site and pull the most-recently\n updated file, renaming it to our local latest.csv.\n\n Be sure to have pg user/password connection details in your conf.yaml\n file, like:\n dbauth : {\"coriell\" : {\n \"user\" : \"<username>\", \"password\" : \"<password>\",\n \"host\" : <host>, \"private_key\"=path/to/rsa_key}\n }\n\n :param is_dl_forced:\n :return:\n\n " ]
Please provide a description of the function:def _process_data(self, src_key, limit=None): raw = '/'.join((self.rawdir, self.files[src_key]['file'])) LOG.info("Processing Data from %s", raw) if self.test_mode: # set the graph to build graph = self.testgraph else: graph = self.graph family = Family(graph) model = Model(graph) line_counter = 1 geno = Genotype(graph) diputil = DipperUtil() col = self.files[src_key]['columns'] # affords access with # x = row[col.index('x')].strip() with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter=',', quotechar=r'"') # we can keep a close watch on changing file formats fileheader = next(filereader, None) fileheader = [c.lower() for c in fileheader] if col != fileheader: # assert LOG.error('Expected %s to have columns: %s', raw, col) LOG.error('But Found %s to have columns: %s', raw, fileheader) raise AssertionError('Incomming data headers have changed.') for row in filereader: line_counter += 1 if len(row) != len(col): LOG.warning( 'Expected %i values but find %i in row %i', len(col), len(row), line_counter) continue # (catalog_id, description, omim_number, sample_type, # cell_line_available, dna_in_stock, dna_ref, gender, age, # race, ethnicity, affected, karyotype, relprob, mutation, # gene, family_id, collection, url, cat_remark, pubmed_ids, # family_member, variant_id, dbsnp_id, species) = row # example: # GM00003,HURLER SYNDROME,607014,Fibroblast,Yes,No, # ,Female,26 YR,Caucasian,,,, # parent,,,39,NIGMS Human Genetic Cell Repository, # http://ccr.coriell.org/Sections/Search/Sample_Detail.aspx?Ref=GM00003, # 46;XX; clinically normal mother of a child with Hurler syndrome; # proband not in Repository,, # 2,,18343,Homo sapiens catalog_id = row[col.index('catalog_id')].strip() if self.test_mode and catalog_id not in self.test_lines: # skip rows not in our test lines, when in test mode continue # ########### BUILD REQUIRED VARIABLES ########### # Make the cell line ID cell_line_id = 'Coriell:' + catalog_id # Map the cell/sample type cell_type = self.resolve(row[col.index('sample_type')].strip()) # on fail cell_type = self.globaltt['cell'] ? # Make a cell line label collection = row[col.index('collection')].strip() line_label = collection.partition(' ')[0] + '-' + catalog_id # Map the repository/collection repository = self.localtt[collection] # patients are uniquely identified by one of: # dbsnp id (which is == an individual haplotype) # family id + family member (if present) OR # probands are usually family member zero # cell line id # since some patients have >1 cell line derived from them, # we must make sure that the genotype is attached to # the patient, and can be inferred to the cell line # examples of repeated patients are: # famid=1159, member=1; fam=152,member=1 # Make the patient ID # make an anonymous patient patient_id = '_:person' fam_id = row[col.index('fam')].strip() fammember = row[col.index('fammember')].strip() if fam_id != '': patient_id = '-'.join((patient_id, fam_id, fammember)) else: # make an anonymous patient patient_id = '-'.join((patient_id, catalog_id)) # properties of the individual patients: sex, family id, # member/relproband, description descriptions are # really long and ugly SCREAMING text, so need to clean up # the control cases are so odd with this labeling scheme; # but we'll deal with it as-is for now. description = row[col.index('description')].strip() short_desc = (description.split(';')[0]).capitalize() gender = row[col.index('gender')].strip().lower() affected = row[col.index('affected')].strip() relprob = row[col.index('relprob')].strip() if affected == '': affected = 'unspecified' elif affected in self.localtt: affected = self.localtt[affected] else: LOG.warning( 'Novel Affected status %s at row: %i of %s', affected, line_counter, raw) patient_label = ' '.join((affected, gender, relprob)) if relprob == 'proband': patient_label = ' '.join(( patient_label.strip(), 'with', short_desc)) else: patient_label = ' '.join(( patient_label.strip(), 'of proband with', short_desc)) # ############# BUILD THE CELL LINE ############# # Adding the cell line as a typed individual. cell_line_reagent_id = self.globaltt['cell line'] model.addIndividualToGraph( cell_line_id, line_label, cell_line_reagent_id) # add the equivalent id == dna_ref dna_ref = row[col.index('dna_ref')].strip() if dna_ref != '' and dna_ref != catalog_id: equiv_cell_line = 'Coriell:' + dna_ref # some of the equivalent ids are not defined # in the source data; so add them model.addIndividualToGraph( equiv_cell_line, None, cell_line_reagent_id) model.addSameIndividual(cell_line_id, equiv_cell_line) # Cell line derives from patient geno.addDerivesFrom(cell_line_id, patient_id) geno.addDerivesFrom(cell_line_id, cell_type) # Cell line a member of repository family.addMember(repository, cell_line_id) cat_remark = row[col.index('cat_remark')].strip() if cat_remark != '': model.addDescription(cell_line_id, cat_remark) # Cell age_at_sampling # TODO add the age nodes when modeled properly in #78 # if (age != ''): # this would give a BNode that is an instance of Age. # but i don't know how to connect # the age node to the cell line? we need to ask @mbrush # age_id = '_'+re.sub('\s+','_',age) # gu.addIndividualToGraph( # graph,age_id,age,self.globaltt['age']) # gu.addTriple( # graph,age_id,self.globaltt['has measurement value'],age, # True) # ############# BUILD THE PATIENT ############# # Add the patient ID as an individual. model.addPerson(patient_id, patient_label) # TODO map relationship to proband as a class # (what ontology?) # Add race of patient # FIXME: Adjust for subcategories based on ethnicity field # EDIT: There are 743 different entries for ethnicity... # Too many to map? # Add ethnicity as literal in addition to the mapped race? # Adjust the ethnicity txt (if using) # to initial capitalization to remove ALLCAPS # TODO race should go into the individual's background # and abstracted out to the Genotype class punting for now. # if race != '': # mapped_race = self.resolve(race) # if mapped_race is not None: # gu.addTriple( # g,patient_id,self.globaltt['race'], mapped_race) # model.addSubClass( # mapped_race,self.globaltt['ethnic_group']) # ############# BUILD THE FAMILY ############# # Add triples for family_id, if present. if fam_id != '': family_comp_id = 'CoriellFamily:' + fam_id family_label = ' '.join(('Family of proband with', short_desc)) # Add the family ID as a named individual model.addIndividualToGraph( family_comp_id, family_label, self.globaltt['family']) # Add the patient as a member of the family family.addMemberOf(patient_id, family_comp_id) # ############# BUILD THE GENOTYPE ############# # the important things to pay attention to here are: # karyotype = chr rearrangements (somatic?) # mutation = protein-level mutation as a label, # often from omim # gene = gene symbol - TODO get id # variant_id = omim variant ids (; delimited) # dbsnp_id = snp individual ids = full genotype? # note GM00633 is a good example of chromosomal variation # - do we have enough to capture this? # GM00325 has both abnormal karyotype and variation # make an assumption that if the taxon is blank, # that it is human! species = row[col.index('species')].strip() if species is None or species == '': species = 'Homo sapiens' taxon = self.resolve(species) # if there's a dbSNP id, # this is actually the individual's genotype genotype_id = None genotype_label = None dbsnp_id = row[col.index('dbsnp_id')].strip() if dbsnp_id != '': genotype_id = 'dbSNPIndividual:' + dbsnp_id omim_map = {} gvc_id = None # some of the karyotypes are encoded # with terrible hidden codes. remove them here # i've seen a <98> character karyotype = row[col.index('karyotype')].strip() karyotype = diputil.remove_control_characters(karyotype) karyotype_id = None if karyotype.strip() != '': karyotype_id = '_:'+re.sub( 'MONARCH:', '', self.make_id(karyotype)) # add karyotype as karyotype_variation_complement model.addIndividualToGraph( karyotype_id, karyotype, self.globaltt['karyotype_variation_complement']) # TODO break down the karyotype into parts # and map into GENO. depends on #77 # place the karyotype in a location(s). karyo_chrs = self._get_affected_chromosomes_from_karyotype( karyotype) for chrom in karyo_chrs: chr_id = makeChromID(chrom, taxon, 'CHR') # add an anonymous sequence feature, # each located on chr karyotype_feature_id = '-'.join((karyotype_id, chrom)) karyotype_feature_label = \ 'some karyotype alteration on chr' + str(chrom) feat = Feature( graph, karyotype_feature_id, karyotype_feature_label, self.globaltt['sequence_alteration']) feat.addFeatureStartLocation(None, chr_id) feat.addFeatureToGraph() geno.addParts( karyotype_feature_id, karyotype_id, self.globaltt['has_variant_part']) gene = row[col.index('gene')].strip() mutation = row[col.index('mutation')].strip() if gene != '': varl = gene + '(' + mutation + ')' # fix the variant_id so it's always in the same order variant_id = row[col.index('variant_id')].strip() vids = variant_id.split(';') variant_id = ';'.join(sorted(list(set(vids)))) if karyotype.strip() != '' and not self._is_normal_karyotype( karyotype): gvc_id = karyotype_id if variant_id != '': gvc_id = '_:' + variant_id.replace(';', '-') + '-' \ + re.sub(r'\w*:', '', karyotype_id) if mutation.strip() != '': gvc_label = '; '.join((varl, karyotype)) else: gvc_label = karyotype elif variant_id.strip() != '': gvc_id = '_:' + variant_id.replace(';', '-') gvc_label = varl else: # wildtype? pass # add the karyotype to the gvc. # use reference if normal karyotype karyo_rel = self.globaltt['has_variant_part'] if self._is_normal_karyotype(karyotype): karyo_rel = self.globaltt['has_reference_part'] if karyotype_id is not None \ and not self._is_normal_karyotype(karyotype) \ and gvc_id is not None and karyotype_id != gvc_id: geno.addParts(karyotype_id, gvc_id, karyo_rel) if variant_id.strip() != '': # split the variants & add them as part of the genotype # we don't necessarily know their zygosity, # just that they are part of the genotype variant ids # are from OMIM, so prefix as such we assume that the # sequence alts will be defined in OMIM not here # TODO sort the variant_id list, if the omim prefix is # the same, then assume it's the locus make a hashmap # of the omim id to variant id list; # then build the genotype hashmap is also useful for # removing the "genes" from the list of "phenotypes" # will hold gene/locus id to variant list omim_map = {} locus_num = None for var in variant_id.split(';'): # handle omim-style and odd var ids # like 610661.p.R401X mch = re.match(r'(\d+)\.+(.*)', var.strip()) if mch is not None and len(mch.groups()) == 2: (locus_num, var_num) = mch.groups() if locus_num is not None and locus_num not in omim_map: omim_map[locus_num] = [var_num] else: omim_map[locus_num] += [var_num] for omim in omim_map: # gene_id = 'OMIM:' + omim # TODO unused vslc_id = '_:' + '-'.join( [omim + '.' + a for a in omim_map.get(omim)]) vslc_label = varl # we don't really know the zygosity of # the alleles at all. # so the vslcs are just a pot of them model.addIndividualToGraph( vslc_id, vslc_label, self.globaltt['variant single locus complement']) for var in omim_map.get(omim): # this is actually a sequence alt allele1_id = 'OMIM:' + omim + '.' + var geno.addSequenceAlteration(allele1_id, None) # assume that the sa -> var_loc -> gene # is taken care of in OMIM geno.addPartsToVSLC( vslc_id, allele1_id, None, self.globaltt['indeterminate'], self.globaltt['has_variant_part']) if vslc_id != gvc_id: geno.addVSLCtoParent(vslc_id, gvc_id) if affected == 'unaffected': # let's just say that this person is wildtype model.addType(patient_id, self.globaltt['wildtype']) elif genotype_id is None: # make an anonymous genotype id (aka blank node) genotype_id = '_:geno' + catalog_id.strip() # add the gvc if gvc_id is not None: model.addIndividualToGraph( gvc_id, gvc_label, self.globaltt['genomic_variation_complement']) # add the gvc to the genotype if genotype_id is not None: if affected == 'unaffected': rel = self.globaltt['has_reference_part'] else: rel = self.globaltt['has_variant_part'] geno.addParts(gvc_id, genotype_id, rel) if karyotype_id is not None \ and self._is_normal_karyotype(karyotype): if gvc_label is not None and gvc_label != '': genotype_label = '; '.join((gvc_label, karyotype)) elif karyotype is not None: genotype_label = karyotype if genotype_id is None: genotype_id = karyotype_id else: geno.addParts( karyotype_id, genotype_id, self.globaltt['has_reference_part']) else: genotype_label = gvc_label # use the catalog id as the background genotype_label += ' ['+catalog_id.strip()+']' if genotype_id is not None and gvc_id is not None: # only add the genotype if it has some parts geno.addGenotype( genotype_id, genotype_label, self.globaltt['intrinsic_genotype']) geno.addTaxon(taxon, genotype_id) # add that the patient has the genotype # TODO check if the genotype belongs to # the cell line or to the patient graph.addTriple( patient_id, self.globaltt['has_genotype'], genotype_id) else: geno.addTaxon(taxon, patient_id) # TODO: Add sex/gender (as part of the karyotype?) # = row[col.index('')].strip() # ############# DEAL WITH THE DISEASES ############# omim_num = row[col.index('omim_num')].strip() # we associate the disease to the patient if affected == 'affected' and omim_num != '': for disease in omim_num.split(';'): if disease is not None and disease != '': # if the omim number is in omim_map, # then it is a gene not a pheno # TEC - another place to use the mimTitle omim # classifier omia & genereviews are using if disease not in omim_map: disease_id = 'OMIM:' + disease.strip() # assume the label is taken care of in OMIM model.addClassToGraph(disease_id, None) # add the association: # the patient has the disease assoc = G2PAssoc( graph, self.name, patient_id, disease_id) assoc.add_association_to_graph() # this line is a model of this disease # TODO abstract out model into # it's own association class? graph.addTriple( cell_line_id, self.globaltt['is model of'], disease_id) else: LOG.info('drop gene %s from disease list', disease) # ############# ADD PUBLICATIONS ############# pubmed_ids = row[col.index('pubmed_ids')].strip() if pubmed_ids != '': for pmid in pubmed_ids.split(';'): pubmed_id = 'PMID:' + pmid.strip() ref = Reference(graph, pubmed_id) ref.setType(self.globaltt['journal article']) ref.addRefToGraph() graph.addTriple( pubmed_id, self.globaltt['mentions'], cell_line_id) if not self.test_mode and ( limit is not None and line_counter > limit): break return
[ "\n This function will process the data files from Coriell.\n We make the assumption that any alleles listed are variants\n (alternates to w.t.)\n\n Triples: (examples)\n\n :NIGMSrepository a CLO_0000008 #repository\n label : NIGMS Human Genetic Cell Repository\n foaf:page\n https://catalog.coriell.org/0/sections/collections/NIGMS/?SsId=8\n\n line_id a CL_0000057, #fibroblast line\n derives_from patient_id\n part_of :NIGMSrepository\n RO:model_of OMIM:disease_id\n\n patient id a foaf:person,\n label: \"fibroblast from patient 12345 with disease X\"\n member_of family_id #what is the right thing here?\n SIO:race EFO:caucasian #subclass of EFO:0001799\n in_taxon NCBITaxon:9606\n dc:description Literal(remark)\n RO:has_phenotype OMIM:disease_id\n GENO:has_genotype genotype_id\n\n family_id a owl:NamedIndividual\n foaf:page\n \"https://catalog.coriell.org/0/Sections/BrowseCatalog/FamilyTypeSubDetail.aspx?PgId=402&fam=2104&coll=GM\"\n\n genotype_id a intrinsic_genotype\n GENO:has_alternate_part allelic_variant_id\n we don't necessarily know much about the genotype,\n other than the allelic variant. also there's the sex here\n\n pub_id mentions cell_line_id\n\n :param raw:\n :param limit:\n :return:\n\n " ]
Please provide a description of the function:def _process_collection(self, collection_id, label, page): # ############# BUILD THE CELL LINE REPOSITORY ############# for graph in [self.graph, self.testgraph]: # TODO: How to devise a label for each repository? model = Model(graph) reference = Reference(graph) repo_id = 'CoriellCollection:' + collection_id repo_label = label repo_page = page model.addIndividualToGraph( repo_id, repo_label, self.globaltt['collection']) reference.addPage(repo_id, repo_page) return
[ "\n This function will process the data supplied internally\n about the repository from Coriell.\n\n Triples:\n Repository a ERO:collection\n rdf:label Literal(label)\n foaf:page Literal(page)\n\n :param collection_id:\n :param label:\n :param page:\n :return:\n " ]
Please provide a description of the function:def _is_normal_karyotype(karyotype): is_normal = True if karyotype is not None: karyotype = karyotype.strip() if karyotype not in ['46;XX', '46;XY', '']: is_normal = False return is_normal
[ "\n This will default to true if no karyotype is provided.\n This is assuming human karyotypes.\n :param karyotype:\n :return:\n " ]
Please provide a description of the function:def fetch(self, is_dl_forced=False): # create the connection details for Flybase cxn = { 'host': 'chado.flybase.org', 'database': 'flybase', 'port': 5432, 'user': 'flybase', 'password': 'no password'} self.dataset.setFileAccessUrl( ''.join(('jdbc:postgresql://', cxn['host'], ':', str(cxn['port']), '/', cxn['database'])), is_object_literal=True) # process the tables # self.fetch_from_pgdb(self.tables,cxn,100) # for testing self.fetch_from_pgdb(self.tables, cxn, None, is_dl_forced) for query_map in self.resources: query_fh = open(os.path.join( os.path.dirname(__file__), query_map['query']), 'r') query = query_fh.read() self.fetch_query_from_pgdb( query_map['outfile'], query, None, cxn) # we want to fetch the features, # but just a subset to reduce the processing time # query = \ # "SELECT " \ # " feature_id, dbxref_id, organism_id, name, uniquename, " \ # " null as residues, seqlen, md5checksum, type_id, is_analysis," \ # " timeaccessioned, timelastmodified, is_obsolete " \ # "FROM feature WHERE is_analysis = false" self.fetch_query_from_pgdb( 'feature', self.querys['feature'], None, cxn, None, is_dl_forced) self._get_human_models_file() self.get_files(False) self.dataset.set_version_by_num(self.version_num) return
[ "\n :return:\n\n " ]
Please provide a description of the function:def parse(self, limit=None): if limit is not None: LOG.info("Only parsing first %d rows of each file", limit) LOG.info("Parsing files...") if self.test_only: self.test_mode = True # the following will provide us the hash-lookups self._process_dbxref() self._process_cvterm() self._process_genotypes(limit) self._process_pubs(limit) # do this before environments to get the external ids self._process_environment_cvterm() self._process_environments() self._process_organisms(limit) # must be done before features self._process_organism_dbxref(limit) self._process_features(limit) self._process_phenotype(limit) self._process_phenotype_cvterm() # gets external mappings for features (genes, variants, etc) self._process_feature_dbxref(limit) # do this after organisms to get the right taxonomy self._process_stocks(limit) # figures out types of some of the features self._get_derived_feature_types(limit) # These are the associations amongst the objects above self._process_stockprop(limit) self._process_pub_dbxref(limit) self._process_phendesc(limit) self._process_feature_genotype(limit) self._process_feature_pub(limit) self._process_stock_genotype(limit) self._process_phenstatement(limit) # these are G2P associations self._process_feature_relationship(limit) self._process_disease_models(limit) # TODO add version info from file somehow # (in parser rather than during fetching) LOG.info("Finished parsing.") LOG.info("Loaded %d nodes", len(self.graph)) return
[ "\n We process each of the postgres tables in turn.\n The order of processing is important here, as we build up a hashmap of\n internal vs external identifers (unique keys by type to FB id).\n These include allele, marker (gene), publication, strain, genotype,\n annotation (association), and descriptive notes.\n :param limit: Only parse this many lines of each table\n :return:\n\n " ]
Please provide a description of the function:def _process_genotypes(self, limit): if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 raw = '/'.join((self.rawdir, 'genotype')) LOG.info("building labels for genotypes") geno = Genotype(graph) fly_tax = self.globaltt['Drosophila melanogaster'] with open(raw, 'r') as f: f.readline() # read the header row; skip filereader = csv.reader(f, delimiter='\t', quotechar='\"') for line in filereader: line_counter += 1 (genotype_num, uniquename, description, name) = line # if self.test_mode is True: # if int(object_key) not in self.test_keys.get('genotype'): # continue # add the internal genotype to pub mapping genotype_id = 'MONARCH:FBgeno'+str(genotype_num) self.idhash['genotype'][genotype_num] = genotype_id if description == '': description = None if not self.test_mode and limit is not None and line_counter > limit: pass else: if self.test_mode and int(genotype_num) \ not in self.test_keys['genotype']: continue model.addIndividualToGraph( genotype_id, uniquename, self.globaltt['intrinsic_genotype'], description) # we know all genotypes are in flies # FIXME we assume here they are in melanogaster, # but that isn't necessarily true!!! # TODO should the taxon be == genomic background? geno.addTaxon(fly_tax, genotype_id) genotype_iid = self._makeInternalIdentifier( 'genotype', genotype_num) model.addComment( genotype_id, genotype_iid) if name.strip() != '': model.addSynonym(genotype_id, name) return
[ "\n Add the genotype internal id to flybase mapping to the idhashmap.\n Also, add them as individuals to the graph.\n\n Triples created:\n <genotype id> a GENO:intrinsic_genotype\n <genotype id> rdfs:label \"<gvc> [bkgd]\"\n\n :param limit:\n :return:\n " ]
Please provide a description of the function:def _process_stocks(self, limit): if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 raw = '/'.join((self.rawdir, 'stock')) LOG.info("building labels for stocks") with open(raw, 'r') as f: f.readline() # read the header row; skip filereader = csv.reader(f, delimiter='\t', quotechar='\"') for line in filereader: line_counter += 1 (stock_id, dbxref_id, organism_id, name, uniquename, description, type_id, is_obsolete) = line # 2 12153979 1 2 FBst0000002 w[*]; betaTub60D[2] Kr[If-1]/CyO 10670 stock_num = stock_id stock_id = 'FlyBase:'+uniquename self.idhash['stock'][stock_num] = stock_id stock_label = description organism_key = organism_id taxon = self.idhash['organism'][organism_key] # from what i can tell, the dbxrefs are just more FBst, # so no added information vs uniquename if not self.test_mode and limit is not None and line_counter > limit: pass else: if self.test_mode \ and int(stock_num) not in self.test_keys['strain']: continue # tax_label = self.label_hash[taxon] # unused # add the tax in case it hasn't been already model.addClassToGraph(taxon) model.addIndividualToGraph(stock_id, stock_label, taxon) if is_obsolete == 't': model.addDeprecatedIndividual(stock_id) return
[ "\n Stock definitions.\n Here we instantiate them as instances of the given taxon.\n\n :param limit:\n :return:\n\n " ]
Please provide a description of the function:def _process_pubs(self, limit): if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 raw = '/'.join((self.rawdir, 'pub')) LOG.info("building labels for pubs") with open(raw, 'r') as f: f.readline() # read the header row; skip filereader = csv.reader(f, delimiter='\t', quotechar='\"') for line in filereader: (pub_id, title, volumetitle, volume, series_name, issue, pyear, pages, miniref, type_id, is_obsolete, publisher, pubplace, uniquename) = line # 2 12153979 1 2 FBst0000002 w[*]; betaTub60D[2] Kr[If-1]/CyO 10670 # if self.test_mode is True: # if int(object_key) not in self.test_keys.get('genotype'): # continue pub_num = pub_id pub_id = 'FlyBase:'+uniquename.strip() self.idhash['publication'][pub_num] = pub_id # TODO figure out the type of pub by type_id if not re.match(r'(FBrf|multi)', uniquename): continue line_counter += 1 reference = Reference(graph, pub_id) if title != '': reference.setTitle(title) if pyear != '': reference.setYear(str(pyear)) if miniref != '': reference.setShortCitation(miniref) if not self.test_mode and limit is not None and line_counter > limit: pass else: if self.test_mode and int(pub_num) not in self.test_keys['pub']: continue if is_obsolete == 't': model.addDeprecatedIndividual(pub_id) else: reference.addRefToGraph() return
[ "\n Flybase publications.\n\n :param limit:\n :return:\n\n " ]
Please provide a description of the function:def _process_environments(self): if self.test_mode: graph = self.testgraph else: graph = self.graph raw = '/'.join((self.rawdir, 'environment')) LOG.info("building labels for environment") env_parts = {} label_map = {} env = Environment(graph) with open(raw, 'r') as f: filereader = csv.reader(f, delimiter='\t', quotechar='\"') f.readline() # read the header row; skip for line in filereader: (environment_id, uniquename, description) = line # 22 heat sensitive | tetracycline conditional environment_num = environment_id environment_internal_id = self._makeInternalIdentifier( 'environment', environment_num) if environment_num not in self.idhash['environment']: self.idhash['environment'][environment_num] = \ environment_internal_id environment_id = self.idhash['environment'][environment_num] environment_label = uniquename if environment_label == 'unspecified': environment_label += ' environment' env.addEnvironment(environment_id, environment_label) self.label_hash[environment_id] = environment_label # split up the environment into parts # if there's parts, then add them to the hash; # we'll match the components in a second pass components = re.split(r'\|', uniquename) if len(components) > 1: env_parts[environment_id] = components else: label_map[environment_label] = environment_id # ### end loop through file # build the environmental components for eid in env_parts: eid = eid.strip() for e in env_parts[eid]: # search for the environmental component by label env_id = label_map.get(e.strip()) env.addComponentToEnvironment(eid, env_id) return
[ "\n There's only about 30 environments in which the phenotypes\n are recorded.\n There are no externally accessible identifiers for environments,\n so we make anonymous nodes for now.\n Some of the environments are comprised of >1 of the other environments;\n we do some simple parsing to match the strings of the environmental\n labels to the other atomic components.\n\n :return:\n\n " ]
Please provide a description of the function:def _process_features(self, limit): if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) raw = '/'.join((self.rawdir, 'feature')) LOG.info("building labels for features") line_counter = 0 with open(raw, 'r') as f: filereader = csv.reader(f, delimiter='\t', quotechar='\"') f.readline() # read the header row; skip for line in filereader: (feature_id, dbxref_id, organism_id, name, uniquename, residues, seqlen, md5checksum, type_id, is_analysis, timeaccessioned, timelastmodified) = line feature_key = feature_id if re.search(r'[\|\s\[\]\{\}\\<\>]', uniquename): # some uniquenames have pipes or other nasty chars! # for example: FB||||FBrf0133242|Hugh-u1 feature_id = self._makeInternalIdentifier('feature', feature_key) else: feature_id = 'FlyBase:' + uniquename self.idhash['feature'][feature_key] = feature_id self.feature_types[feature_key] = type_id self.label_hash[feature_id] = name if feature_key not in self.feature_to_organism_hash: self.feature_to_organism_hash[feature_key] = set() self.feature_to_organism_hash[feature_key].add(organism_id) # HACK - FBgn are genes, and therefore classes, # all else be individuals is_gene = False if re.search(r'(FBgn|FBog)', feature_id): self.idhash['gene'][feature_key] = feature_id is_gene = True elif re.search(r'FBa[lb]', feature_id): self.idhash['allele'][feature_key] = feature_id elif re.search(r'FBt[ip]', feature_id): self.idhash['feature'][feature_key] = feature_id if self.test_mode and \ feature_key not in self.test_keys['gene'] and \ feature_key not in self.test_keys['allele'] and \ feature_key not in self.test_keys['feature']: continue # now do something with it! # switch on type_id if name.strip() == '': name = uniquename type_key = type_id type_id = self.idhash['cvterm'][type_key] # skip some features by type types_to_skip = [ 'SO:0000316', # CDS 'SO:0000696', # oligos 'SO:0000358', # polypeptide 'SO:0000234', # transcripts ] type_keys_to_skip = [ 596, # pcr_product 57096, # mature peptide 57097, # signal_peptide 57270, # repeat masker 58210, # alignment 59643, # cDNA_clone 60006, # uncharacterized_change_in_nucleotide_sequence 61351, # oligo 61467, # polypeptide_domain 257, # exon 286, # intron ] organisms_to_skip = [ 2 # computational result ] if type_id in types_to_skip \ or int(type_key) in type_keys_to_skip\ or int(organism_id) in organisms_to_skip: continue line_counter += 1 if int(type_key) == 604: # RNAi_reagent # TODO add other reagents? self.idhash['reagent'][feature_key] = feature_id # deal with the taxonomy # only get taxa for features that are actually used in our set tax_internal_id = self._makeInternalIdentifier( 'organism', organism_id) if organism_id not in self.checked_organisms: # will get the NCBITax if necessary tax_id = self._get_organism_id(organism_id) self.checked_organisms.add(organism_id) else: tax_id = self.idhash['organism'][organism_id] tax_label = self.label_hash.get(tax_id) if not re.search(r'FBog', feature_id) and \ re.search(r'Drosophila', tax_label): # make only fly things leaders model.makeLeader(feature_id) if not self.test_mode and limit is not None and line_counter > limit: pass else: if is_gene: model.addClassToGraph(feature_id, name, type_id) graph.addTriple(feature_id, self.globaltt['in taxon'], tax_id) else: if re.search('FBa[lb]', feature_id): type_id = self.globaltt['allele'] model.addIndividualToGraph(feature_id, name, type_id) # stop adding what we do not appreciate # if is_obsolete == 't': # if is_gene: # model.addDeprecatedClass(feature_id) # else: # model.addDeprecatedIndividual(feature_id) # self.deprecated_features.add(feature_key) model.addClassToGraph(tax_id) if tax_id != tax_internal_id: model.addEquivalentClass(tax_id, tax_internal_id) model.addComment( feature_id, self._makeInternalIdentifier('feature', feature_key)) # TODO save checked_organisms fbid to ncbitax mapping to # a local file to speed up subsequent searches return
[ "\n These are all of the genomic features genes, variations,\n transgenes, etc\n :param limit:\n :return:\n\n " ]
Please provide a description of the function:def _process_phendesc(self, limit): if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) raw = '/'.join((self.rawdir, 'phendesc')) LOG.info("processing G2P") line_counter = 0 with open(raw, 'r') as f: filereader = csv.reader(f, delimiter='\t', quotechar='\"') f.readline() # read the header row; skip for line in filereader: ( phendesc_id, genotype_id, environment_id, description, type_id, pub_id) = line # 1 2 1 Hemizygous males are wild type, homozygous males are sterile. 60466 209729 line_counter += 1 phendesc_key = phendesc_id phendesc_id = self._makeInternalIdentifier('phendesc', phendesc_key) # for now, just attach the description to the genotype genotype_key = genotype_id genotype_id = self.idhash['genotype'][genotype_key] pub_key = pub_id pub_id = self.idhash['publication'][pub_key] environment_key = environment_id environment_id = self.idhash['environment'][environment_key] if self.test_mode and\ int(genotype_key) not in self.test_keys['genotype']: continue # TODO type id ==> ECO??? # just make associations with abnormal phenotype # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # note this is not "abnormal phenotype" if that was what is wanted # "phenotype": "FBcv:0001347" ~~> "Phenotype": "UPHENO:0001001" # but it is a near collision with an existing term phenotype_id = self.localtt['phenotype'] assoc = G2PAssoc(graph, self.name, genotype_id, phenotype_id) assoc.add_source(pub_id) assoc.set_description(description) assoc.set_environment(environment_id) assoc.add_association_to_graph() assoc_id = assoc.get_association_id() model.addComment(assoc_id, phendesc_id) if not self.test_mode and limit is not None and line_counter > limit: break return
[ "\n The description of the resulting phenotypes\n with the genotype+environment\n\n :param limit:\n :return:\n " ]
Please provide a description of the function:def _process_feature_pub(self, limit): if self.test_mode: graph = self.testgraph else: graph = self.graph raw = '/'.join((self.rawdir, 'feature_pub')) LOG.info("processing feature_pub") line_counter = 0 with open(raw, 'r') as f: filereader = csv.reader(f, delimiter='\t', quotechar='\"') f.readline() # read the header row; skip for line in filereader: (feature_pub_id, feature_id, pub_id) = line # 1440 3175682 62137 # 2 3160606 99159 feature_key = feature_id if self.test_mode and not ( int(feature_key) in self.test_keys['gene'] + self.test_keys['allele'] and int(pub_id) in self.test_keys['pub']): continue if feature_key not in self.idhash['feature']: continue feature_id = self.idhash['feature'][feature_key] pub_key = pub_id pub_id = self.idhash['publication'][pub_key] graph.addTriple(pub_id, self.globaltt['mentions'], feature_id) line_counter += 1 if not self.test_mode and limit is not None and line_counter > limit: break return
[ "\n The description of the resulting phenotypes\n with the genotype+environment\n\n :param limit:\n :return:\n " ]
Please provide a description of the function:def _process_stock_genotype(self, limit): if self.test_mode: graph = self.testgraph else: graph = self.graph raw = '/'.join((self.rawdir, 'stock_genotype')) LOG.info("processing stock genotype") line_counter = 0 with open(raw, 'r') as f: filereader = csv.reader(f, delimiter='\t', quotechar='\"') f.readline() # read the header row; skip for line in filereader: (stock_genotype_id, stock_id, genotype_id) = line stock_key = stock_id stock_id = self.idhash['stock'][stock_key] genotype_key = genotype_id genotype_id = self.idhash['genotype'][genotype_key] if self.test_mode \ and int(genotype_key) not in self.test_keys['genotype']: continue graph.addTriple(stock_id, self.globaltt['has_genotype'], genotype_id) line_counter += 1 if not self.test_mode and limit is not None and line_counter > limit: break return
[ "\n The genotypes of the stocks.\n\n :param limit:\n :return:\n " ]
Please provide a description of the function:def _process_pub_dbxref(self, limit): if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) raw = '/'.join((self.rawdir, 'pub_dbxref')) LOG.info("processing pub_dbxref") line_counter = 0 with open(raw, 'r') as f: filereader = csv.reader(f, delimiter='\t', quotechar='\"') f.readline() # read the header row; skip for line in filereader: (pub_dbxref_id, pub_id, dbxref_id, is_current) = line # 49648 43222 395730 t pub_key = pub_id pub_id = self.idhash['publication'][pub_key] if self.test_mode and int(pub_key) not in self.test_keys['pub']: continue # get any dbxrefs for pubs, including pmids and dois dbxref_key = dbxref_id if str(dbxref_key) in self.dbxrefs: dbxrefs = self.dbxrefs[str(dbxref_key)] # pub_dbs = [75, 51, 76, 95, 126] pmid_ids = [50, 77, 275, 286, 347] # flybase_ids = [4] # TODO unused isbn = [75, 51] for d in dbxrefs: dbxref_id = None if int(d) in pmid_ids: if re.match(r'^PMID', dbxrefs[d]): dbxref_id = dbxrefs[d].strip() else: dbxref_id = 'PMID:'+dbxrefs[d].strip() model.makeLeader(dbxref_id) elif int(d) in isbn: dbxref_id = 'ISBN:'+dbxrefs[d].strip() elif int(d) == 161: dbxref_id = 'DOI:'+dbxrefs[d].strip() # elif int(d) == 4: # dbxref_id = 'FlyBase:'+dbxrefs[d].strip() if dbxref_id is not None: reference = Reference( graph, dbxref_id, self.globaltt['publication']) reference.addRefToGraph() model.addSameIndividual(pub_id, dbxref_id) line_counter += 1 if not self.test_mode and limit is not None and line_counter > limit: break return
[ "\n Xrefs for publications (ie FBrf = PMID)\n :param limit:\n :return:\n\n " ]
Please provide a description of the function:def _process_dbxref(self): raw = '/'.join((self.rawdir, 'dbxref')) LOG.info("processing dbxrefs") line_counter = 0 with open(raw, 'r') as f: filereader = csv.reader(f, delimiter='\t', quotechar='\"') f.readline() # read the header row; skip for line in filereader: (dbxref_id, db_id, accession, version, description, url) = line # dbxref_id db_id accession version description url # 1 2 SO:0000000 "" accession = accession.strip() db_id = db_id.strip() if accession != '' and db_id in self.localtt: # scrub some identifiers here mch = re.match( r'(doi|SO|GO|FBcv|FBbt_root|FBdv|FBgn|FBdv_root|FlyBase|FBbt):', accession) if mch: accession = re.sub(mch.group(1)+r'\:', '', accession) elif re.match( r'(FlyBase miscellaneous CV|cell_lineprop|relationship type|FBgn$)', accession): continue elif re.match(r'\:', accession): # starts with a colon accession = re.sub(r'\:', '', accession) elif re.search(r'\s', accession): # skip anything with a space # LOG.debug( # 'dbxref %s accession has a space: %s', dbxref_id, accession) continue if re.match(r'http', accession): did = accession else: prefix = self.localtt[db_id] did = ':'.join((prefix, accession)) if re.search(r'\:', accession) and prefix != 'DOI': LOG.warning('id %s may be malformed; skipping', did) self.dbxrefs[dbxref_id] = {db_id: did} elif url != '': self.dbxrefs[dbxref_id] = {db_id: url.strip()} else: continue # the following are some special cases that we scrub if int(db_id) == 2 and accession.strip() == 'transgenic_transposon': # transgenic_transposable_element self.dbxrefs[dbxref_id] = { db_id: self.globaltt['transgenic_transposable_element']} line_counter += 1 return
[ "\n We bring in the dbxref identifiers and store them in a hashmap for\n lookup in other functions.\n Note that some dbxrefs aren't mapped to identifiers.\n For example, 5004018 is mapped to a string,\n \"endosome & imaginal disc epithelial cell | somatic clone...\"\n In those cases, there just isn't a dbxref that's used\n when referencing with a cvterm; it'll just use the internal key.\n\n :return:\n\n " ]
Please provide a description of the function:def _process_phenotype(self, limit): if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) raw = '/'.join((self.rawdir, 'phenotype')) LOG.info("processing phenotype") line_counter = 0 with open(raw, 'r') as f: filereader = csv.reader(f, delimiter='\t', quotechar='\"') f.readline() # read the header row; skip for line in filereader: (phenotype_id, uniquename, observable_id, attr_id, value, cvalue_id, assay_id) = line # 8505 unspecified # 20142 mesothoracic leg disc | somatic clone 87719 60468 60468 60468 # 8507 sex comb | ectopic 88877 60468 60468 60468 # 8508 tarsal segment 83664 60468 60468 60468 # 18404 oocyte | oogenesis stage S9 86769 60468 60468 60468 # for now make these as phenotypic classes # will need to dbxref at some point phenotype_key = phenotype_id phenotype_id = None phenotype_internal_id = self._makeInternalIdentifier( 'phenotype', phenotype_key) phenotype_label = None self.label_hash[phenotype_internal_id] = uniquename cvterm_id = None if observable_id != '' and int(observable_id) == 60468: # undefined - typically these are already phenotypes if cvalue_id in self.idhash['cvterm']: cvterm_id = self.idhash['cvterm'][cvalue_id] phenotype_id = self.idhash['cvterm'][cvalue_id] elif observable_id in self.idhash['cvterm']: # observations to anatomical classes cvterm_id = self.idhash['cvterm'][observable_id] phenotype_id = self.idhash['cvterm'][observable_id] + 'PHENOTYPE' if cvterm_id is not None and cvterm_id in self.label_hash: phenotype_label = self.label_hash[cvterm_id] phenotype_label += ' phenotype' self.label_hash[phenotype_id] = phenotype_label else: LOG.info('cvtermid=%s not in label_hash', cvterm_id) else: LOG.info( "No observable id or label for %s: %s", phenotype_key, uniquename) # TODO store this composite phenotype in some way # as a proper class definition? self.idhash['phenotype'][phenotype_key] = phenotype_id # assay_id is currently only "undefined" key=60468 if not self.test_mode and\ limit is not None and line_counter > limit: pass else: if phenotype_id is not None: # assume that these fit into the phenotypic uberpheno # elsewhere model.addClassToGraph(phenotype_id, phenotype_label) line_counter += 1 return
[ "\n Get the phenotypes, and declare the classes.\n If the \"observable\" is \"unspecified\", then we assign the phenotype to\n the \"cvalue\" id; otherwise we convert the phenotype into a\n uberpheno-style identifier, simply based on the anatomical part that's\n affected...that is listed as the observable_id, concatenated with\n the literal \"PHENOTYPE\"\n\n Note that some of the phenotypes no not have a dbxref to a FBcv;\n for these cases it will make a node with an anonymous node with an\n internal id like, \"_fbcvtermkey100920PHENOTYPE\". This is awkward,\n but not sure how else to construct identifiers.\n Maybe they should be fed back into Upheno and then leveraged by FB?\n\n Note that assay_id is the same for all current items,\n so we do nothing with this.\n :param limit:\n :return:\n\n " ]
Please provide a description of the function:def _process_phenstatement(self, limit): if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) raw = '/'.join((self.rawdir, 'phenstatement')) LOG.info("processing phenstatement") line_counter = 0 with open(raw, 'r') as f: filereader = csv.reader(f, delimiter='\t', quotechar='\"') f.readline() # read the header row; skip for line in filereader: (phenstatement_id, genotype_id, environment_id, phenotype_id, type_id, pub_id) = line # 168549 166695 1 8507 60468 151256 # 168550 166695 1 8508 60468 151256 # 168551 166696 1 8509 60468 151256 # 168552 166696 1 8510 60468 151256 line_counter += 1 phenstatement_key = phenstatement_id phenstatement_id = self._makeInternalIdentifier( 'phenstatement', phenstatement_key) genotype_key = genotype_id if self.test_mode and \ int(genotype_key) not in self.test_keys['genotype']: continue genotype_id = self.idhash['genotype'][genotype_key] environment_key = environment_id environment_id = self.idhash['environment'][environment_key] phenotype_key = phenotype_id phenotype_internal_id = self._makeInternalIdentifier( 'phenotype', phenotype_key) # TEMP phenotype_internal_label = self.label_hash[ phenotype_internal_id] phenotype_id = self.idhash['phenotype'][phenotype_key] pub_key = pub_id pub_id = self.idhash['publication'][pub_key] # figure out if there is a relevant stage assoc = G2PAssoc(graph, self.name, genotype_id, phenotype_id) if phenotype_id in self.phenocv: stages = set( s for s in self.phenocv[phenotype_id] if re.match(r'FBdv', s)) if len(stages) == 1: s = stages.pop() assoc.set_stage(s, s) elif len(stages) > 1: LOG.warning( "There's more than one stage specified per " "phenotype. I don't know what to do. %s", str(stages)) non_stage_ids = self.phenocv[phenotype_id] - stages LOG.debug('Other non-stage bits: %s', str(non_stage_ids)) # TODO do something with the other parts # of a pheno-cv relationship assoc.set_environment(environment_id) # TODO check remove unspecified environments? assoc.add_source(pub_id) assoc.add_association_to_graph() assoc_id = assoc.get_association_id() model.addComment(assoc_id, phenstatement_id) model.addDescription(assoc_id, phenotype_internal_label) if not self.test_mode and limit is not None and line_counter > limit: break return
[ "\n The phenstatements are the genotype-to-phenotype associations,\n in the context of an environment.\n These are also curated to a publication. So we make oban associations,\n adding the pubs as a source. We additionally add the internal key as\n a comment for tracking purposes.\n :param limit:\n :return:\n\n " ]
Please provide a description of the function:def _process_phenotype_cvterm(self): line_counter = 0 raw = '/'.join((self.rawdir, 'phenotype_cvterm')) LOG.info("processing phenotype cvterm mappings") with open(raw, 'r') as f: f.readline() # read the header row; skip filereader = csv.reader(f, delimiter='\t', quotechar='\"') for line in filereader: line_counter += 1 (phenotype_cvterm_id, phenotype_id, cvterm_id, rank) = line # 4532 8507 60793 0 # 4533 8513 60830 0 # add the internal genotype to pub mapping phenotype_key = phenotype_id cvterm_key = cvterm_id phenotype_id = self.idhash['phenotype'][phenotype_key] if cvterm_key in self.idhash['cvterm']: cvterm_id = self.idhash['cvterm'][cvterm_key] if phenotype_key not in self.phenocv: self.phenocv[phenotype_id] = set() self.phenocv[phenotype_id].add(cvterm_id) else: LOG.info("Not storing the cvterm info for %s", cvterm_key) return
[ "\n These are the qualifiers for the phenotype location itself.\n But are just the qualifiers.\n The actual \"observable\" part of the phenotype is only in\n the phenotype table. These get added to a lookup variable used to\n augment a phenotype association statement.\n :return:\n\n " ]
Please provide a description of the function:def _process_cvterm(self): line_counter = 0 raw = '/'.join((self.rawdir, 'cvterm')) LOG.info("processing cvterms") with open(raw, 'r') as f: f.readline() # read the header row; skip filereader = csv.reader(f, delimiter='\t', quotechar='\"') for line in filereader: line_counter += 1 (cvterm_id, cv_id, definition, dbxref_id, is_obsolete, is_relationshiptype, name) = line # 316 6 1665919 0 0 rRNA_cleavage_snoRNA_primary_transcript # 28 5 1663309 0 0 synonym # 455 6 1665920 0 0 tmRNA # not sure the following is necessary # cv_prefixes = { # 6 : 'SO', # 20: 'FBcv', # 28: 'GO', # 29: 'GO', # 30: 'GO', # 31: 'FBcv', # not actually FBcv - I think FBbt. # 32: 'FBdv', # 37: 'GO', # these are relationships # 73: 'DOID' # } # if int(cv_id) not in cv_prefixes: # continue cvterm_key = cvterm_id cvterm_id = self._makeInternalIdentifier('cvterm', cvterm_key) self.label_hash[cvterm_id] = name self.idhash['cvterm'][cvterm_key] = cvterm_id # look up the dbxref_id for the cvterm # hopefully it's one-to-one dbxrefs = self.dbxrefs.get(dbxref_id) if dbxrefs is not None: if len(dbxrefs) > 1: LOG.info( ">1 dbxref for this cvterm (%s: %s): %s", str(cvterm_id), name, dbxrefs.values()) elif len(dbxrefs) == 1: # replace the cvterm with # the dbxref (external) identifier did = dbxrefs.popitem()[1] # get the value self.idhash['cvterm'][cvterm_key] = did # also add the label to the dbxref self.label_hash[did] = name return
[ "\n CVterms are the internal identifiers for any controlled vocab\n or ontology term. Many are xrefd to actual ontologies. The actual\n external id is stored in the dbxref table, which we place into\n the internal hashmap for lookup with the cvterm id. The name of\n the external term is stored in the \"name\" element of this table, and\n we add that to the label hashmap for lookup elsewhere\n\n :return:\n\n " ]
Please provide a description of the function:def _process_environment_cvterm(self): line_counter = 0 raw = '/'.join((self.rawdir, 'environment_cvterm')) LOG.info("processing environment to cvterm mappings") with open(raw, 'r') as f: f.readline() # read the header row; skip filereader = csv.reader(f, delimiter='\t', quotechar='\"') for line in filereader: line_counter += 1 (environment_cvterm_id, environment_id, cvterm_id) = line # 1 1 60468 environment_key = environment_id cvterm_key = cvterm_id cvterm_id = self.idhash['cvterm'][cvterm_key] # look up the dbxref_id for the cvterm # hopefully it's one-to-one self.idhash['environment'][environment_key] = cvterm_id return
[ "\n This is the mapping between the internal environment id\n and the external ones; here we map the internal environment id to\n the external one in the hashmap.\n :return:\n\n " ]
Please provide a description of the function:def _process_feature_dbxref(self, limit): if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 raw = '/'.join((self.rawdir, 'feature_dbxref')) LOG.info("processing feature_dbxref mappings") with open(raw, 'r') as f: f.readline() # read the header row; skip filereader = csv.reader(f, delimiter='\t', quotechar='\"') for line in filereader: (feature_dbxref_id, feature_id, dbxref_id, is_current) = line # 431890 3091292 596211 t # 2 9 55044 t # 3 9 55045 t # 437595 4551668 277309 t # 437596 4551662 277307 t if is_current == 'f': # not sure what to do with it? continue feature_key = feature_id if self.test_mode and int(feature_key) not in \ self.test_keys['gene'] + self.test_keys['allele']: continue if feature_key not in self.idhash['feature']: # some features may not be found in the hash # if they are "analysis features" # LOG.debug("Feature %s not found in hash", feature_key) continue feature_id = self.idhash['feature'][feature_key] dbxref_key = dbxref_id dbxrefs = self.dbxrefs.get(dbxref_key) if dbxrefs is not None: for d in dbxrefs: # need to filter based on db ? # TODO make other species' identifiers primary?? # instead of flybase? did = dbxrefs.get(d) if did.endswith('&class=protein'): did = did[0:len(dbxrefs)-15] # don't make something sameAs itself if did == feature_id: continue dlabel = self.label_hash.get(did) if re.search(r'FB(gn|og)', feature_id): # only want to add equivalences for fly things if not re.match(r'OMIM', did): # these are only omim diseases, not genes; # we shouldn't be adding these here anyway # model.addClassToGraph(did, dlabel) # model.addXref(feature_id, did) pass # True # that elif did is not None and dlabel is not None \ and feature_id is not None: model.addIndividualToGraph(did, dlabel) model.addXref(feature_id, did) line_counter += 1 if not self.test_mode \ and limit is not None and line_counter > limit: break # FIXME - some flybase genes are xrefed to OMIM diseases!!!!!! # for example, # FBog0000375495 xref to omim 601181 (gene) # and 608033 (phenotype) return
[ "\n This is the mapping between the flybase features and external\n repositories. Generally we want to leave the flybase feature id\n as the primary identifier. But we need to make the equivalences/sameAs.\n\n :param limit:\n :return:\n\n " ]
Please provide a description of the function:def _get_derived_feature_types(self, limit): if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) raw = '/'.join((self.rawdir, 'feature_relationship')) LOG.info("determining some feature types based on relationships") with open(raw, 'r') as f: f.readline() # read the header row; skip filereader = csv.reader(f, delimiter='\t', quotechar='\"') for line in filereader: (feature_relationship_id, subject_id, object_id, name, rank, value) = line if name == 'derived_tp_assoc_alleles': # derived_tp_assoc_alleles self.feature_types[subject_id] = \ self.globaltt['transgenic_insertion'] sid = self.idhash['allele'].get(subject_id) model.addType(sid, self.feature_types[subject_id]) elif name == 'derived_sf_assoc_alleles': # only take the derived_sf_assoc_alleles # my subject is a reagent_targeted_gene # my object is the dsRNA self.feature_types[subject_id] = \ self.globaltt['reagent_targeted_gene'] sid = self.idhash['allele'].get(subject_id) model.addType(sid, self.feature_types[subject_id]) else: continue return
[ "\n Make a pass through the feature table in order to properly type\n the FBal (allele) features, which are derived either from other\n sequence features (which can be things like RNAi products)\n or transgenic-transposons. We'll save the allele type into a hasmap.\n\n :param limit:\n :return:\n\n " ]
Please provide a description of the function:def _process_organisms(self, limit): if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) raw = '/'.join((self.rawdir, 'organism')) LOG.info("processing organisms") line_counter = 0 with open(raw, 'r') as f: filereader = csv.reader(f, delimiter='\t', quotechar='\"') f.readline() # read the header row; skip for line in filereader: (organism_id, abbreviation, genus, species, common_name, comment) = line # 1 Dmel Drosophila melanogaster fruit fly # 2 Comp Computational result line_counter += 1 tax_internal_id = self._makeInternalIdentifier('organism', organism_id) tax_label = ' '.join((genus, species)) tax_id = tax_internal_id self.idhash['organism'][organism_id] = tax_id self.label_hash[tax_id] = tax_label # we won't actually add the organism to the graph, # unless we actually use it therefore it is added outside of # this function if self.test_mode and int(organism_id) not in self.test_keys['organism']: continue if not self.test_mode and limit is not None and line_counter > limit: pass else: model.addClassToGraph(tax_id) for s in [common_name, abbreviation]: if s is not None and s.strip() != '': model.addSynonym(tax_id, s) model.addComment(tax_id, tax_internal_id) return
[ "\n The internal identifiers for the organisms in flybase\n\n :param limit:\n :return:\n\n " ]
Please provide a description of the function:def _process_organism_dbxref(self, limit): if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 raw = '/'.join((self.rawdir, 'organism_dbxref')) LOG.info("processing organsim dbxref mappings") with open(raw, 'r') as f: f.readline() # read the header row; skip filereader = csv.reader(f, delimiter='\t', quotechar='\"') for line in filereader: (organism_dbxref_id, organism_id, dbxref_id, is_current) = line if self.test_mode \ and int(organism_id) not in self.test_keys['organism']: continue organism_key = organism_id if organism_key not in self.idhash['organism']: continue organism_id = self.idhash['organism'][organism_key] dbxref_key = dbxref_id dbxrefs = self.dbxrefs.get(dbxref_key) if dbxrefs is not None: for d in dbxrefs: did = dbxrefs.get(d) # don't make something sameAs itself if did == organism_id: continue dlabel = self.label_hash.get(did) model.addXref(organism_id, did) if re.match(r'NCBITaxon', did): model.makeLeader(did) else: model.addIndividualToGraph(did, dlabel) line_counter += 1 if not self.test_mode and limit is not None and line_counter > limit: break return
[ "\n This is the mapping between the flybase organisms and\n external identifier \"FBsp\". We will want to use the NCBITaxon as\n the primary, if possible, but will default to a blank node/internal id\n if that is all that is available\n But we need to make the equivalences/sameAs.\n\n :param limit:\n :return:\n\n " ]
Please provide a description of the function:def _process_disease_models(self, limit): if self.test_mode: graph = self.testgraph else: graph = self.graph raw = '/'.join((self.rawdir, self.files['disease_models']['file'])) LOG.info("processing disease models") line_counter = 0 geno = Genotype(graph) fly_taxon = self.globaltt["Drosophila melanogaster"] with gzip.open(raw, 'rb') as f: filereader = csv.reader( io.TextIOWrapper(f, newline=""), delimiter='\t', quotechar='\"') for line in filereader: # skip comments if re.match(r'#', ''.join(line)) or ''.join(line) == '': continue (allele_id, allele_symbol, qualifier, doid_label, doid_id, evidence_or_interacting_allele, pub_id) = line line_counter += 1 if self.test_mode and self.test_ids['disease'] is not None \ and doid_id not in self.test_ids['disease']: continue rel = None allele_id = 'FlyBase:' + allele_id if qualifier == 'model of': rel = self.globaltt['is model of'] else: # TODO amelorates, exacerbates, and DOES NOT * continue animal_id = geno.make_experimental_model_with_genotype( allele_id, allele_symbol, fly_taxon, 'fly') assoc = G2PAssoc(graph, self.name, animal_id, doid_id, rel) if pub_id != '': pub_id = 'FlyBase:'+pub_id assoc.add_source(pub_id) if evidence_or_interacting_allele == 'inferred from mutant phenotype': evidence_id = self.globaltt['mutant phenotype evidence'] assoc.add_evidence(evidence_id) else: assoc.set_description(evidence_or_interacting_allele) assoc.add_association_to_graph() if not self.test_mode and limit is not None and line_counter > limit: break return
[ "\n Here we make associations between a disease and the supplied \"model\".\n In this case it's an allele.\n FIXME consider changing this... are alleles really models?\n Perhaps map these alleles into actual animals/strains or genotypes?\n :param limit:\n :return:\n\n " ]
Please provide a description of the function:def _process_stockprop(self, limit): if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) raw = '/'.join((self.rawdir, 'stockprop')) LOG.info("processing stock-image depictions") line_counter = 0 with open(raw, 'r') as f: f.readline() # read the header row; skip filereader = csv.reader(f, delimiter='\t', quotechar='\"') for line in filereader: # skip comments if re.match(r'#', ''.join(line)) or ''.join(line) == '': continue (stockprop_id, stock_id, cvterm, value, rank) = line line_counter += 1 if self.test_mode and self.test_keys['strain'] is not None \ and int(stock_id) not in self.test_keys['strain']: continue sid = self.idhash['stock'].get(stock_id) # linked_image if cvterm == "linked_image" and re.match(r'FBim', value): # FIXME make sure this image url is perm image_url = 'http://flybase.org/tmp-shared/reports/'+value+'.png' if sid is not None: model.addDepiction(sid, image_url) # TODO should this be a Reference object? # TODO add the stockprop_pub table when there is data to pull if not self.test_mode and limit is not None and line_counter > limit: break return
[ "\n This will add depiction association between a strain and\n images hosted at flybase.\n :param limit:\n :return:\n\n " ]
Please provide a description of the function:def _get_human_models_file(self): base_url = 'ftp.flybase.net' human_disease_dir = 'releases/current/precomputed_files/human_disease' from ftplib import FTP ftp = FTP(base_url) # connect to host ftp.login() ftp.cwd(human_disease_dir) l = ftp.nlst() # get list of files ftp.quit() f = None f_list = [ i for i, x in enumerate(l) if re.match(r'allele_human_disease_model', x)] if len(f_list) == 0: LOG.error("Can't find the human_disease_model file") elif len(f_list) > 1: LOG.error( "There's >1 human disease model file, " + "and I don't know which to choose: %s", str(l)) else: f = l[f_list[0]] if f is not None: # cat the url together file_url = '/'.join(('ftp:/', base_url, human_disease_dir, f)) self.files['disease_models']['url'] = file_url # while we're at it, set the version... m = re.match( r'allele_human_disease_model_data_fb_(\d+_\d+).tsv.gz', f) # allele_human_disease_model_data_fb_2015_03.tsv.gz if m: ver = 'FB' + m.group(1) self.version_num = ver return
[ "\n This function uses ftp to probe the FTP site to get the name of\n the current human_models file, and sets it in the files object.\n :return:\n\n " ]
Please provide a description of the function:def _get_gene_info(self, limit): src_key = 'gene_info' if self.test_mode: graph = self.testgraph else: graph = self.graph geno = Genotype(graph) model = Model(graph) # not unzipping the file LOG.info("Processing 'Gene Info' records") line_counter = 0 gene_info = '/'.join((self.rawdir, self.files[src_key]['file'])) LOG.info("FILE: %s", gene_info) # Add taxa and genome classes for those in our filter band_regex = re.compile(r'[0-9A-Z]+[pq](\d+)?(\.\d+)?$') for tax_num in self.tax_ids: tax_id = ':'.join(('NCBITaxon', tax_num)) # tax label can get added elsewhere geno.addGenome(tax_id, tax_num) # label added elsewhere model.addClassToGraph(tax_id, None) col = self.files[src_key]['columns'] with gzip.open(gene_info, 'rb') as tsv: row = tsv.readline().decode().strip().split('\t') row[0] = row[0][1:] # strip comment if col != row: LOG.info( '%s\nExpected Headers:\t%s\nRecived Headers:\t%s\n', src_key, col, row) LOG.info(set(col) - set(row)) for line in tsv: line = line.strip() line_counter += 1 if line[0] == '#': # skip comments continue row = line.decode().strip().split('\t') # ##set filter=None in init if you don't want to have a filter # if self.id_filter is not None: # if ((self.id_filter == 'taxids' and \ # (tax_num not in self.tax_ids)) # or (self.id_filter == 'geneids' and \ # (int(gene_num) not in self.gene_ids))): # continue # #### end filter gene_num = row[col.index('GeneID')] if self.test_mode and int(gene_num) not in self.gene_ids: continue tax_num = row[col.index('tax_id')] if not self.test_mode and tax_num not in self.tax_ids: continue tax_id = ':'.join(('NCBITaxon', tax_num)) gene_id = ':'.join(('NCBIGene', gene_num)) gtype = row[col.index('type_of_gene')].strip() gene_type_id = self.resolve(gtype) symbol = row[col.index('Symbol')] if symbol == 'NEWENTRY': label = None else: label = symbol # sequence feature, not a gene if gene_type_id == self.globaltt['sequence_feature']: self.class_or_indiv[gene_id] = 'I' else: self.class_or_indiv[gene_id] = 'C' if not self.test_mode and limit is not None and line_counter > limit: continue desc = row[col.index('description')] if self.class_or_indiv[gene_id] == 'C': model.addClassToGraph(gene_id, label, gene_type_id, desc) # NCBI will be the default leader (for non mods), # so we will not add the leader designation here. else: model.addIndividualToGraph(gene_id, label, gene_type_id, desc) # in this case, they aren't genes. # so we want someone else to be the leader name = row[col.index('Full_name_from_nomenclature_authority')] if name != '-': model.addSynonym(gene_id, name) synonyms = row[col.index('Synonyms')].strip() if synonyms != '-': for syn in synonyms.split('|'): model.addSynonym( gene_id, syn.strip(), model.globaltt['has_related_synonym']) other_designations = row[col.index('Other_designations')].strip() if other_designations != '-': for syn in other_designations.split('|'): model.addSynonym( gene_id, syn.strip(), model.globaltt['has_related_synonym']) dbxrefs = row[col.index('dbXrefs')].strip() if dbxrefs != '-': self._add_gene_equivalencies(dbxrefs, gene_id, tax_id) # edge cases of id | symbol | chr | map_loc: # 263 AMD1P2 X|Y with Xq28 and Yq12 # 438 ASMT X|Y with Xp22.3 or Yp11.3 # in PAR # no idea why there's two bands listed - possibly 2 assemblies # 419 ART3 4 with 4q21.1|4p15.1-p14 # 28227 PPP2R3B X|Y Xp22.33; Yp11.3 # in PAR # this is of "unknown" type == susceptibility # 619538 OMS 10|19|3 10q26.3;19q13.42-q13.43;3p25.3 # unlocated scaffold # 101928066 LOC101928066 1|Un -\ # mouse --> 2C3 # 11435 Chrna1 2 2 C3|2 43.76 cM # mouse --> 11B1.1 # 11548 Adra1b 11 11 B1.1|11 25.81 cM # 11717 Ampd3 7 7 57.85 cM|7 E2-E3 # mouse # 14421 B4galnt1 10 10 D3|10 74.5 cM # mouse # 323212 wu:fb92e12 19|20 - # fish # 323368 ints10 6|18 - # fish # 323666 wu:fc06e02 11|23 - # fish # feel that the chr placement can't be trusted in this table # when there is > 1 listed # with the exception of human X|Y, # we will only take those that align to one chr # FIXME remove the chr mapping below # when we pull in the genomic coords chrom = row[col.index('chromosome')].strip() if chrom != '-' and chrom != '': if re.search(r'\|', chrom) and chrom not in ['X|Y', 'X; Y']: # means that there's uncertainty in the mapping. # so skip it # TODO we'll need to figure out how to deal with # >1 loc mapping LOG.info( '%s is non-uniquely mapped to %s. Skipping for now.', gene_id, chrom) continue # X|Y Xp22.33;Yp11.3 # if(not re.match( # r'(\d+|(MT)|[XY]|(Un)$',str(chr).strip())): # print('odd chr=',str(chr)) if chrom == 'X; Y': chrom = 'X|Y' # rewrite the PAR regions for processing # do this in a loop to allow PAR regions like X|Y for chromosome in re.split(r'\|', chrom): # assume that the chromosome label is added elsewhere geno.addChromosomeClass(chromosome, tax_id, None) mychrom = makeChromID(chromosome, tax_num, 'CHR') # temporarily use taxnum for the disambiguating label mychrom_syn = makeChromLabel(chromosome, tax_num) model.addSynonym(mychrom, mychrom_syn) map_loc = row[col.index('map_location')].strip() band_match = re.match(band_regex, map_loc) if band_match is not None and len(band_match.groups()) > 0: # if tax_num != '9606': # continue # this matches the regular kind of chrs, # so make that kind of band # not sure why this matches? # chrX|Y or 10090chr12|Un" # TODO we probably need a different regex # per organism # the maploc_id already has the numeric chromosome # in it, strip it first bid = re.sub(r'^' + chromosome, '', map_loc) # the generic location (no coordinates) maploc_id = makeChromID(chromosome + bid, tax_num, 'CHR') # print(map_loc,'-->',bid,'-->',maploc_id) # Assume it's type will be added elsewhere band = Feature(graph, maploc_id, None, None) band.addFeatureToGraph() # add the band as the containing feature graph.addTriple( gene_id, self.globaltt['is subsequence of'], maploc_id) else: # TODO handle these cases: examples are: # 15q11-q22,Xp21.2-p11.23,15q22-qter,10q11.1-q24, # 12p13.3-p13.2|12p13-p12,1p13.3|1p21.3-p13.1, # 12cen-q21,22q13.3|22q13.3 LOG.debug( 'not regular band pattern for %s: %s', gene_id, map_loc) # add the gene as a subsequence of the chromosome graph.addTriple( gene_id, self.globaltt['is subsequence of'], mychrom) geno.addTaxon(tax_id, gene_id) return
[ "\n Currently loops through the gene_info file and\n creates the genes as classes, typed with SO. It will add their label,\n any alternate labels as synonyms, alternate ids as equivlaent classes.\n HPRDs get added as protein products.\n The chromosome and chr band get added as blank node regions,\n and the gene is faldo:located\n on the chr band.\n :param limit:\n :return:\n\n " ]
Please provide a description of the function:def _add_gene_equivalencies(self, xrefs, gene_id, taxon): clique_map = self.open_and_parse_yaml(self.resources['clique_leader']) if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) filter_out = ['Vega', 'IMGT/GENE-DB', 'Araport'] # deal with the dbxrefs # MIM:614444|HGNC:HGNC:16851|Ensembl:ENSG00000136828|HPRD:11479|Vega:OTTHUMG00000020696 for dbxref in xrefs.strip().split('|'): prefix = ':'.join(dbxref.split(':')[:-1]).strip() if prefix in self.localtt: prefix = self.localtt[prefix] dbxref_curie = ':'.join((prefix, dbxref.split(':')[-1])) if dbxref_curie is not None and prefix != '': if prefix == 'HPRD': # proteins are not == genes. model.addTriple( gene_id, self.globaltt['has gene product'], dbxref_curie) continue # skip some of these for now based on curie prefix if prefix in filter_out: continue if prefix == 'ENSEMBL': model.addXref(gene_id, dbxref_curie) if prefix == 'OMIM': if DipperUtil.is_omim_disease(dbxref_curie): continue try: if self.class_or_indiv.get(gene_id) == 'C': model.addEquivalentClass(gene_id, dbxref_curie) if taxon in clique_map: if clique_map[taxon] == prefix: model.makeLeader(dbxref_curie) elif clique_map[taxon] == gene_id.split(':')[0]: model.makeLeader(gene_id) else: model.addSameIndividual(gene_id, dbxref_curie) except AssertionError as err: LOG.warning("Error parsing %s: %s", gene_id, err) return
[ "\n Add equivalentClass and sameAs relationships\n\n Uses external resource map located in\n /resources/clique_leader.yaml to determine\n if an NCBITaxon ID space is a clique leader\n " ]
Please provide a description of the function:def _get_gene_history(self, limit): src_key = 'gene_history' if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) LOG.info("Processing Gene records") line_counter = 0 myfile = '/'.join((self.rawdir, self.files[src_key]['file'])) LOG.info("FILE: %s", myfile) col = self.files[src_key]['columns'] with gzip.open(myfile, 'rb') as tsv: row = tsv.readline().decode().strip().split('\t') row[0] = row[0][1:] # strip comment if col != row: LOG.info( '%s\nExpected Headers:\t%s\nRecived Headers:\t %s\n', src_key, col, row) for line in tsv: # skip comments row = line.decode().strip().split('\t') if row[0][0] == '#': continue # (tax_num, gene_num, discontinued_num, discontinued_symbol, # discontinued_date) = line.split('\t') # set filter=None in init if you don't want to have a filter # if self.id_filter is not None: # if ((self.id_filter == 'taxids' and \ # (int(tax_num) not in self.tax_ids)) # or (self.id_filter == 'geneids' and \ # (int(gene_num) not in self.gene_ids))): # continue # end filter gene_num = row[col.index('GeneID')].strip() discontinued_num = row[col.index('Discontinued_GeneID')].strip() if gene_num == '-' or discontinued_num == '-': continue if self.test_mode and int(gene_num) not in self.gene_ids: continue tax_num = row[col.index('tax_id')].strip() if not self.test_mode and tax_num not in self.tax_ids: continue line_counter += 1 gene_id = ':'.join(('NCBIGene', gene_num)) discontinued_gene_id = ':'.join(('NCBIGene', discontinued_num)) discontinued_symbol = row[col.index('Discontinued_Symbol')].strip() # add the two genes if self.class_or_indiv.get(gene_id) == 'C': model.addClassToGraph(gene_id, None) model.addClassToGraph( discontinued_gene_id, discontinued_symbol) # add the new gene id to replace the old gene id model.addDeprecatedClass(discontinued_gene_id, [gene_id]) else: model.addIndividualToGraph(gene_id, None) model.addIndividualToGraph( discontinued_gene_id, discontinued_symbol) model.addDeprecatedIndividual(discontinued_gene_id, [gene_id]) # also add the old symbol as a synonym of the new gene model.addSynonym(gene_id, discontinued_symbol) if not self.test_mode and (limit is not None and line_counter > limit): break return
[ "\n Loops through the gene_history file and adds the old gene ids\n as deprecated classes, where the new gene id is the replacement for it.\n The old gene symbol is added as a synonym to the gene.\n :param limit:\n :return:\n\n " ]
Please provide a description of the function:def _get_gene2pubmed(self, limit): src_key = 'gene2pubmed' if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) LOG.info("Processing Gene records") line_counter = 0 myfile = '/'.join((self.rawdir, self.files[src_key]['file'])) LOG.info("FILE: %s", myfile) assoc_counter = 0 col = self.files[src_key]['columns'] with gzip.open(myfile, 'rb') as tsv: row = tsv.readline().decode().strip().split('\t') row[0] = row[0][1:] # strip comment if col != row: LOG.info( '%s\nExpected Headers:\t%s\nRecived Headers:\t %s\n', src_key, col, row) for line in tsv: line_counter += 1 # skip comments row = line.decode().strip().split('\t') if row[0][0] == '#': continue # (tax_num, gene_num, pubmed_num) = line.split('\t') # ## set id_filter=None in init if you don't want to have a filter # if self.id_filter is not None: # if ((self.id_filter == 'taxids' and \ # (int(tax_num) not in self.tax_ids)) # or (self.id_filter == 'geneids' and \ # (int(gene_num) not in self.gene_ids))): # continue # #### end filter gene_num = row[col.index('GeneID')].strip() if self.test_mode and int(gene_num) not in self.gene_ids: continue tax_num = row[col.index('tax_id')].strip() if not self.test_mode and tax_num not in self.tax_ids: continue pubmed_num = row[col.index('PubMed_ID')].strip() if gene_num == '-' or pubmed_num == '-': continue gene_id = ':'.join(('NCBIGene', gene_num)) pubmed_id = ':'.join(('PMID', pubmed_num)) if self.class_or_indiv.get(gene_id) == 'C': model.addClassToGraph(gene_id, None) else: model.addIndividualToGraph(gene_id, None) # add the publication as a NamedIndividual # add type publication model.addIndividualToGraph(pubmed_id, None, None) reference = Reference( graph, pubmed_id, self.globaltt['journal article']) reference.addRefToGraph() graph.addTriple( pubmed_id, self.globaltt['is_about'], gene_id) assoc_counter += 1 if not self.test_mode and limit is not None and line_counter > limit: break LOG.info( "Processed %d pub-gene associations", assoc_counter) return
[ "\n Loops through the gene2pubmed file and adds a simple triple to say\n that a given publication is_about a gene.\n Publications are added as NamedIndividuals.\n\n These are filtered on the taxon.\n\n :param limit:\n :return:\n\n " ]
Please provide a description of the function:def add_orthologs_by_gene_group(self, graph, gene_ids): src_key = 'gene_group' LOG.info("getting gene groups") src_file = '/'.join((self.rawdir, self.files[src_key]['file'])) found_counter = 0 # because many of the orthologous groups are grouped by human gene, # we need to do this by generating two-way hash # group_id => orthologs # ortholog id => group # this will be the fastest approach, though not memory-efficient. geno = Genotype(graph) model = Model(graph) group_to_orthology = {} gene_to_group = {} gene_to_taxon = {} col = self.files[src_key]['columns'] with gzip.open(src_file, 'rb') as tsv: # none of the other files use csv ... # tsv = csv.reader( # io.TextIOWrapper(tsvfile, newline=""), delimiter='\t', quotechar='\"') # row = tsv.readline() row = tsv.readline().decode().strip().split('\t') row[0] = row[0][1:] # strip octothorp if col != row: LOG.info( '%s\nExpected Headers:\t%s\nRecived Headers:\t %s\n', src_key, col, row) for row in tsv: row = row.decode().strip().split('\t') tax_a = row[col.index('tax_id')] gene_a = row[col.index('GeneID')] rel = row[col.index('relationship')] tax_b = row[col.index('Other_tax_id')] gene_b = row[col.index('Other_GeneID')] if rel != 'Ortholog': continue if gene_a not in group_to_orthology: group_to_orthology[gene_a] = set() group_to_orthology[gene_a].add(gene_b) if gene_b not in gene_to_group: gene_to_group[gene_b] = set() gene_to_group[gene_b].add(gene_a) gene_to_taxon[gene_a] = tax_a gene_to_taxon[gene_b] = tax_b # also add the group lead as a member of the group group_to_orthology[gene_a].add(gene_a) # end loop through gene_group file LOG.debug("Finished hashing gene groups") LOG.debug("Making orthology associations") for gid in gene_ids: gene_num = re.sub(r'NCBIGene:', '', gid) group_nums = gene_to_group.get(gene_num) if group_nums is not None: for group_num in group_nums: orthologs = group_to_orthology.get(group_num) if orthologs is not None: for orth in orthologs: oid = 'NCBIGene:' + str(orth) model.addClassToGraph(oid, None, self.globaltt['gene']) otaxid = 'NCBITaxon:' + str(gene_to_taxon[orth]) geno.addTaxon(otaxid, oid) assoc = OrthologyAssoc(graph, self.name, gid, oid) assoc.add_source('PMID:24063302') assoc.add_association_to_graph() # todo get gene label for orthologs - # this could get expensive found_counter += 1 # finish loop through annotated genes LOG.info( "Made %d orthology relationships for %d genes", found_counter, len(gene_ids)) return
[ "\n This will get orthologies between human and other vertebrate genomes\n based on the gene_group annotation pipeline from NCBI.\n More information 9can be learned here:\n http://www.ncbi.nlm.nih.gov/news/03-13-2014-gene-provides-orthologs-regions/\n The method for associations is described in\n [PMCID:3882889](http://www.ncbi.nlm.nih.gov/pmc/articles/PMC3882889/)\n == [PMID:24063302](http://www.ncbi.nlm.nih.gov/pubmed/24063302/).\n Because these are only between human and vertebrate genomes,\n they will certainly miss out on very distant orthologies,\n and should not be considered complete.\n\n We do not run this within the NCBI parser itself;\n rather it is a convenience function for others parsers to call.\n\n :param graph:\n :param gene_ids: Gene ids to fetch the orthology\n :return:\n\n " ]
Please provide a description of the function:def _get_omim_ids(self): ''' side effect: populate omim_type map from a omim number to an ontology term the ontology terms's labels as - 'gene' when they declare it as a gene - 'Phenotype' Phenotype, molecular basis known - 'heritable_phenotypic_marker' Phenotype or locus, molecular basis unknown - 'obsolete' when Removed or moved to another entry - 'has_affected_feature' "when declared as "Gene and phenotype, combined" hope being it could be detected and used as either :return a unique list of omim numbers ''' src_key = 'mim2gene' omim_nums = set() # all types line_counter = 0 raw = '/'.join((self.rawdir, self.files[src_key]['file'])) LOG.info("Obtaining OMIM record identifiers from: %s", raw) # TODO check to see if the file is there col = self.files[src_key]['columns'] with open(raw, "r") as reader: reader.readline() # copyright reader.readline() # Generated: YYYY-MM-DD reader.readline() # discription reader.readline() # disclaimer line = reader.readline() # column headers row = line.strip().split('\t') if row != col: # assert LOG.error('Expected %s to have columns: %s', raw, col) LOG.error('But Found %s to have columns: %s', raw, row) raise AssertionError('Incomming data headers have changed.') line_counter = 5 for line in reader: line_counter += 1 row = line.strip().split('\t') if len(row) != len(col): LOG.warning( 'Unexpected input on line: %i got: %s', line_counter, row) continue omim_num = row[col.index('MIM Number')] mimtype = row[col.index( 'MIM Entry Type (see FAQ 1.3 at https://omim.org/help/faq)')] # ncbigene = row[col.index('Entrez Gene ID (NCBI)')] # hgnc = row[col.index('Approved Gene Symbol (HGNC)')] # ensembl = row[col.index('Ensembl Gene ID (Ensembl)')] omim_nums.update({omim_num}) self.omim_type[omim_num] = None if mimtype == 'gene': self.omim_type[omim_num] = self.globaltt['gene'] # Phenotype, molecular basis known elif mimtype == 'phenotype': self.omim_type[omim_num] = self.globaltt['Phenotype'] # Phenotype or locus, molecular basis unknown elif mimtype == 'predominantly phenotypes': self.omim_type[omim_num] = self.globaltt[ 'heritable_phenotypic_marker'] # ? # Removed or moved to another entry elif mimtype == 'moved/removed': self.omim_type[omim_num] = self.globaltt['obsolete'] # "Gene and phenotype, combined" works as both/either. elif mimtype == 'gene/phenotype': self.omim_type[omim_num] = self.globaltt['has_affected_feature'] else: LOG.warning( 'Unknown OMIM TYPE of %s on line %i', mimtype, line_counter) LOG.info("Done. found %d omim ids", len(omim_nums)) return list(omim_nums)
[]
Please provide a description of the function:def process_entries( self, omimids, transform, included_fields=None, graph=None, limit=None, globaltt=None ): omimparams = {} # add the included_fields as parameters if included_fields is not None and included_fields: omimparams['include'] = ','.join(included_fields) processed_entries = list() # scrub any omim prefixes from the omimids before processing # cleanomimids = set() # for omimid in omimids: # scrubbed = str(omimid).split(':')[-1] # if re.match(r'^\d+$', str(scrubbed)): # cleanomimids.update(scrubbed) # omimids = list(cleanomimids) cleanomimids = [o.split(':')[-1] for o in omimids] diff = set(omimids) - set(cleanomimids) if diff: LOG.warning('OMIM has %i dirty bits see"\n %s', len(diff), str(diff)) omimids = cleanomimids else: cleanomimids = list() acc = 0 # for counting # note that you can only do request batches of 20 # see info about "Limits" at http://omim.org/help/api # TODO 2017 May seems a majority of many groups of 20 # are producing python None for RDF triple Objects groupsize = 20 if not self.test_mode and limit is not None: # just in case the limit is larger than the number of records, maxit = limit if limit > len(omimids): maxit = len(omimids) else: maxit = len(omimids) while acc < maxit: end = min((maxit, acc + groupsize)) # iterate through the omim ids list, # and fetch from the OMIM api in batches of 20 if self.test_mode: intersect = list( set([str(i) for i in self.test_ids]) & set(omimids[acc:end])) # some of the test ids are in the omimids if intersect: LOG.info("found test ids: %s", intersect) omimparams.update({'mimNumber': ','.join(intersect)}) else: acc += groupsize continue else: omimparams.update({'mimNumber': ','.join(omimids[acc:end])}) url = OMIMAPI + urllib.parse.urlencode(omimparams) try: req = urllib.request.urlopen(url) except HTTPError as e: # URLError? LOG.warning('fetching: %s', url) error_msg = e.read() if re.search(r'The API key: .* is invalid', str(error_msg)): msg = "API Key not valid" raise HTTPError(url, e.code, msg, e.hdrs, e.fp) LOG.error("Failed with: %s", str(error_msg)) break resp = req.read().decode() acc += groupsize myjson = json.loads(resp) # snag a copy with open('./raw/omim/_' + str(acc) + '.json', 'w') as fp: json.dump(myjson, fp) entries = myjson['omim']['entryList'] for e in entries: # apply the data transformation, and save it to the graph processed_entry = transform(e, graph, globaltt) if processed_entry is not None: processed_entries.append(processed_entry) # ### end iterating over batch of entries return processed_entries
[ "\n Given a list of omim ids,\n this will use the omim API to fetch the entries, according to the\n ```included_fields``` passed as a parameter.\n If a transformation function is supplied,\n this will iterate over each entry,\n and either add the results to the supplied ```graph```\n or will return a set of processed entries that the calling function\n can further iterate.\n\n If no ```included_fields``` are provided, this will simply fetch\n the basic entry from omim,\n which includes an entry's: prefix, mimNumber, status, and titles.\n\n :param omimids: the set of omim entry ids to fetch using their API\n :param transform: Function to transform each omim entry when looping\n :param included_fields: A set of what fields are required to retrieve\n from the API\n :param graph: the graph to add the transformed data into\n :return:\n " ]
Please provide a description of the function:def _process_all(self, limit): omimids = self._get_omim_ids() LOG.info('Have %i omim numbers to fetch records from their API', len(omimids)) LOG.info('Have %i omim types ', len(self.omim_type)) if self.test_mode: graph = self.testgraph else: graph = self.graph geno = Genotype(graph) model = Model(graph) tax_label = 'Homo sapiens' tax_id = self.globaltt[tax_label] # add genome and taxon geno.addGenome(tax_id, tax_label) # tax label can get added elsewhere model.addClassToGraph(tax_id, None) # label added elsewhere includes = set() includes.add('all') self.process_entries( omimids, self._transform_entry, includes, graph, limit, self.globaltt)
[ "\n This takes the list of omim identifiers from the omim.txt.Z file,\n and iteratively queries the omim api for the json-formatted data.\n This will create OMIM classes, with the label,\n definition, and some synonyms.\n If an entry is \"removed\",\n it is added as a deprecated class.\n If an entry is \"moved\",\n it is deprecated and consider annotations are added.\n\n Additionally, we extract:\n *phenotypicSeries ids as superclasses\n *equivalent ids for Orphanet and UMLS\n\n If set to testMode,\n it will write only those items in the test_ids to the testgraph.\n\n :param limit:\n :return:\n " ]
Please provide a description of the function:def _process_morbidmap(self, limit): if self.test_mode: graph = self.testgraph else: graph = self.graph line_counter = 0 assoc_count = 0 src_key = 'morbidmap' col = self.files[src_key]['columns'] raw = '/'.join((self.rawdir, self.files[src_key]['file'])) with open(raw) as reader: line = reader.readline() # Copyright line = reader.readline() # Generated: 2016-04-11 line = reader.readline() # EOF for field spec line = reader.readline().strip() # columns header line_counter = 4 row = line.split('\t') # includes funky leading octothorpe if row != col: # assert LOG.error('Expected %s to have columns: %s', raw, col) LOG.error('But Found %s to have columns: %s', raw, row) raise AssertionError('Incomming data headers have changed.') for line in reader: line_counter += 1 line = line.strip() # since there are comments at the end of the file as well, if line[0] == '#': continue row = line.split('\t') if len(row) != len(col): LOG.warning( 'Unexpected input on line: %i got: %s', line_counter, row) continue disorder = row[col.index('# Phenotype')] gene_symbols = row[col.index('Gene Symbols')] gene_num = row[col.index('MIM Number')] # loc = row[col.index('Cyto Location')] # LOG.info("morbidmap disorder: %s", disorder) # too verbose # disorder = disorder label , number (mapping key) # 3-M syndrome 1, 273750 (3)|CUL7, 3M1|609577|6p21.1 # but note that for those diseases where they are genomic loci # (not genes though), the omim id is only listed as the gene # Alopecia areata 1 (2)|AA1|104000|18p11.3-p11.2 # when there's a gene and disease disorder_match = self.disorder_regex.match(disorder) nogene_match = self.nogene_regex.match(disorder) if disorder_match is not None: disorder_parts = disorder_match.groups() (disorder_label, disorder_num, phene_key) = disorder_parts if self.test_mode and ( int(disorder_num) not in self.test_ids or int(gene_num) not in self.test_ids): continue assoc_count += 1 gene_symbols = gene_symbols.split(', ') gene_id = 'OMIM:' + str(gene_num) self._make_pheno_assoc( graph, gene_id, disorder_num, disorder_label, phene_key) elif nogene_match is not None: # this is a case where the disorder # a blended gene/phenotype # we lookup the NCBIGene feature and make the association (disorder_label, phene_key) = nogene_match.groups() disorder_num = gene_num # make what's in the gene column the disease disorder_id = 'OMIM:' + str(disorder_num) if self.test_mode and int(disorder_num) not in self.test_ids: continue if disorder_id in self.omim_ncbigene_idmap: # get the gene ids gene_ids = self.omim_ncbigene_idmap[disorder_id] if gene_ids is None: continue for gene_num in gene_ids: # TODO add gene filter for testMode and NCBIGenes gene_id = 'NCBIGene:' + str(gene_num).strip() assoc_count += 1 self._make_pheno_assoc( graph, gene_id, disorder_num, disorder_label, phene_key) else: # we can create an anonymous feature # to house this thing for example, 158900 feature_id = self._make_anonymous_feature(gene_num) assoc_count += 1 self._make_pheno_assoc( graph, feature_id, disorder_num, disorder_label, phene_key) LOG.info( "We don't have an NCBIGene feature id to link %s with %s", disorder_id, disorder_label) if self.test_mode and gene_num not in self.test_ids: continue else: LOG.warning( "There are misformatted rows %i:%s", line_counter, line) if not self.test_mode and limit is not None and line_counter > limit: break LOG.info("Added %d G2P associations", assoc_count)
[ "\n This will process the morbidmap file to get the links between\n omim genes and diseases. Here, we create anonymous nodes for some\n variant loci that are variants of the gene that causes the disease.\n Triples created:\n <some_anonymous_variant_locus>\n is_allele_of\n <omim_gene_id>\n <some_anonymous_variant_locus> causes condition <omim_disease_id>\n <assoc> hasSubject <some_anonymous_variant_locus>\n <assoc> hasObject <omim_disease_id>\n <assoc> hasPredicate <causes condition>\n <assoc> DC:evidence <eco_id>\n :param limit:\n :return:\n " ]
Please provide a description of the function:def _make_pheno_assoc( self, graph, gene_id, disorder_num, disorder_label, phene_key ): disorder_id = ':'.join(('OMIM', disorder_num)) rel_label = 'causes condition' rel_id = self.globaltt[rel_label] if disorder_label.startswith('['): rel_id = self.globaltt['is marker for'] # rel_label = 'is a marker for' elif disorder_label.startswith('{'): rel_id = self.globaltt['contributes to'] # rel_label = 'contributes to' elif disorder_label.startswith('?'): # this is a questionable mapping! skip? rel_id = self.globaltt['contributes to'] assoc = G2PAssoc(graph, self.name, gene_id, disorder_id, rel_id) if phene_key is not None: evidence = self.resolve(phene_key, False) if evidence != phene_key: assoc.add_evidence(evidence) # evidence is Found assoc.add_association_to_graph()
[ "\n From the docs:\n Brackets, \"[ ]\", indicate \"nondiseases,\" mainly genetic variations\n that lead to apparently abnormal laboratory test values\n (e.g., dysalbuminemic euthyroidal hyperthyroxinemia).\n\n Braces, \"{ }\", indicate mutations that contribute to susceptibility\n to multifactorial disorders (e.g., diabetes, asthma) or to\n susceptibility to infection (e.g., malaria).\n\n A question mark, \"?\", before the phenotype name indicates that the\n relationship between the phenotype and gene is provisional.\n More details about this relationship are provided in the comment\n field of the map and in the gene and phenotype OMIM entries.\n\n Phene key:\n The number in parentheses after the name of each disorder indicates\n the following:\n (1) the disorder was positioned by mapping of the wildtype gene;\n (2) the disease phenotype itself was mapped;\n (3) the molecular basis of the disorder is known;\n (4) the disorder is a chromosome deletion or duplication syndrome.\n\n reference: https://omim.org/help/faq#1_6\n\n :param graph: graph object of type dipper.graph.Graph\n :param gene_id: str, gene id as curie\n :param gene_symbol: str, symbol\n :param disorder_num: str, disorder id\n :param disorder_label: str, disorder label\n :param phene_key: int or str, 1-4, see docstring\n :return:\n " ]
Please provide a description of the function:def _get_description(entry): description = None if entry is not None and 'textSectionList' in entry: textsectionlist = entry['textSectionList'] for ts in textsectionlist: if ts['textSection']['textSectionName'] == 'description': description = ts['textSection']['textSectionContent'] # there are internal references to OMIM identifiers in # the description, I am formatting them in our style. description = re.sub(r'{(\d+)}', r'OMIM:\1', description) # TODO # reformat the citations in the description with PMIDs break return description
[ "\n Get the description of the omim entity\n from the textSection called 'description'.\n Note that some of these descriptions have linebreaks.\n If printed in turtle syntax, they will appear to be triple-quoted.\n :param entry:\n :return:\n\n " ]
Please provide a description of the function:def _cleanup_label(label): conjunctions = ['and', 'but', 'yet', 'for', 'nor', 'so'] little_preps = [ 'at', 'by', 'in', 'of', 'on', 'to', 'up', 'as', 'it', 'or'] articles = ['a', 'an', 'the'] # remove the abbreviation lbl = label.split(r';')[0] fixedwords = [] i = 0 for wrd in lbl.split(): i += 1 # convert the roman numerals to numbers, # but assume that the first word is not # a roman numeral (this permits things like "X inactivation" if i > 1 and re.match(romanNumeralPattern, wrd): n = fromRoman(wrd) # make the assumption that the number of syndromes are <100 # this allows me to retain "SYNDROME C" # and not convert it to "SYNDROME 100" if 0 < n < 100: # get the non-roman suffix, if present. # for example, IIIB or IVA suffix = wrd.replace(toRoman(n), '', 1) fixed = ''.join((str(n), suffix)) wrd = fixed # capitalize first letter wrd = wrd.title() # replace interior conjunctions, prepositions, # and articles with lowercase if wrd.lower() in (conjunctions+little_preps+articles) and i != 1: wrd = wrd.lower() fixedwords.append(wrd) lbl = ' '.join(fixedwords) # print (label, '-->', lbl) return lbl
[ "\n Reformat the ALL CAPS OMIM labels to something more pleasant to read.\n This will:\n 1. remove the abbreviation suffixes\n 2. convert the roman numerals to integer numbers\n 3. make the text title case,\n except for suplied conjunctions/prepositions/articles\n :param label:\n :return:\n " ]
Please provide a description of the function:def _process_phenotypicseries(self, limit): if self.test_mode: graph = self.testgraph else: graph = self.graph LOG.info("getting phenotypic series titles") model = Model(graph) line_counter = 0 src_key = 'phenotypicSeries' col = self.files[src_key]['columns'] raw = '/'.join((self.rawdir, self.files[src_key]['file'])) with open(raw) as reader: line = reader.readline() # title line = reader.readline() # date downloaded line = reader.readline() # copyright line = reader.readline() # <blank> line = reader.readline().strip() # column headers line_counter = 5 row = line.split('\t') if row != col: # assert LOG.error('Expected %s to have columns: %s', raw, col) LOG.error('But Found %s to have columns: %s', raw, row) raise AssertionError('Incomming data headers have changed.') for line in reader: line_counter += 1 row = line.strip().split('\t') if row and len(row) != len(col): LOG.warning( 'Unexpected input on line: %i got: %s', line_counter, row) continue ps_label = row[col.index('Phenotypic Series Title')].strip() ps_num = row[col.index('Phenotypic Series number')].strip() omimps_curie = 'OMIMPS:' + ps_num model.addClassToGraph(omimps_curie, ps_label) if not self.test_mode and limit is not None and line_counter > limit: break
[ "\n Creates classes from the OMIM phenotypic series list.\n These are grouping classes to hook the more granular OMIM diseases.\n # TEC what does 'hook' mean here?\n\n :param limit:\n :return:\n\n " ]
Please provide a description of the function:def _get_phenotypicseries_parents(entry, graph): model = Model(graph) omim_num = str(entry['mimNumber']) omim_curie = 'OMIM:' + omim_num # the phenotypic series mappings serieslist = [] if 'phenotypicSeriesExists' in entry: if entry['phenotypicSeriesExists'] is True: if 'phenotypeMapList' in entry: phenolist = entry['phenotypeMapList'] for p in phenolist: for q in p['phenotypeMap']['phenotypicSeriesNumber'].split(','): serieslist.append(q) if 'geneMap' in entry and 'phenotypeMapList' in entry['geneMap']: phenolist = entry['geneMap']['phenotypeMapList'] for p in phenolist: if 'phenotypicSeriesNumber' in p['phenotypeMap']: for q in p['phenotypeMap']['phenotypicSeriesNumber'].split( ','): serieslist.append(q) # add this entry as a subclass of the series entry for ser in serieslist: series_id = 'OMIMPS:' + ser model.addClassToGraph(series_id, None) model.addSubClass(omim_curie, series_id)
[ "\n Extract the phenotypic series parent relationship out of the entry\n :param entry:\n :return:\n " ]