Delete plant-multi-species-genomes.py
Browse files- plant-multi-species-genomes.py +0 -173
plant-multi-species-genomes.py
DELETED
|
@@ -1,173 +0,0 @@
|
|
| 1 |
-
|
| 2 |
-
"""Script for the plant multi-species genomes dataset. This dataset contains the genomes
|
| 3 |
-
from 48 different species."""
|
| 4 |
-
|
| 5 |
-
from typing import List
|
| 6 |
-
import datasets
|
| 7 |
-
from Bio import SeqIO
|
| 8 |
-
import os
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
# Find for instance the citation on arxiv or on the dataset repo/website
|
| 12 |
-
_CITATION = """\
|
| 13 |
-
@article{o2016reference,
|
| 14 |
-
title={Reference sequence (RefSeq) database at NCBI: current status, taxonomic expansion, and functional annotation},
|
| 15 |
-
author={O'Leary, Nuala A and Wright, Mathew W and Brister, J Rodney and Ciufo, Stacy and Haddad, Diana and McVeigh, Rich and Rajput, Bhanu and Robbertse, Barbara and Smith-White, Brian and Ako-Adjei, Danso and others},
|
| 16 |
-
journal={Nucleic acids research},
|
| 17 |
-
volume={44},
|
| 18 |
-
number={D1},
|
| 19 |
-
pages={D733--D745},
|
| 20 |
-
year={2016},
|
| 21 |
-
publisher={Oxford University Press}
|
| 22 |
-
}
|
| 23 |
-
"""
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
# You can copy an official description
|
| 27 |
-
_DESCRIPTION = """\
|
| 28 |
-
Dataset made of diverse genomes available on NCBI and coming from 48 different species.
|
| 29 |
-
Test and validation are made of 2 species each. The rest of the genomes are used for training.
|
| 30 |
-
Default configuration "6kbp" yields chunks of 6.2kbp (100bp overlap on each side). The chunks of DNA are cleaned and processed so that
|
| 31 |
-
they can only contain the letters A, T, C, G and N.
|
| 32 |
-
"""
|
| 33 |
-
|
| 34 |
-
_HOMEPAGE = "https://www.ncbi.nlm.nih.gov/"
|
| 35 |
-
|
| 36 |
-
_LICENSE = "https://www.ncbi.nlm.nih.gov/home/about/policies/"
|
| 37 |
-
|
| 38 |
-
_CHUNK_LENGTHS = [6000,]
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
def filter_fn(char: str) -> str:
|
| 42 |
-
"""
|
| 43 |
-
Transforms any letter different from a base nucleotide into an 'N'.
|
| 44 |
-
"""
|
| 45 |
-
if char in {'A', 'T', 'C', 'G'}:
|
| 46 |
-
return char
|
| 47 |
-
else:
|
| 48 |
-
return 'N'
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
def clean_sequence(seq: str) -> str:
|
| 52 |
-
"""
|
| 53 |
-
Process a chunk of DNA to have all letters in upper and restricted to
|
| 54 |
-
A, T, C, G and N.
|
| 55 |
-
"""
|
| 56 |
-
seq = seq.upper()
|
| 57 |
-
seq = map(filter_fn, seq)
|
| 58 |
-
seq = ''.join(list(seq))
|
| 59 |
-
return seq
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
class PlantMultiSpeciesGenomesConfig(datasets.BuilderConfig):
|
| 63 |
-
"""BuilderConfig for the Plant Multi Species Pre-training Dataset."""
|
| 64 |
-
|
| 65 |
-
def __init__(self, *args, chunk_length: int, overlap: int = 100, **kwargs):
|
| 66 |
-
"""BuilderConfig for the multi species genomes.
|
| 67 |
-
Args:
|
| 68 |
-
chunk_length (:obj:`int`): Chunk length.
|
| 69 |
-
overlap: (:obj:`int`): Overlap in base pairs for two consecutive chunks (defaults to 100).
|
| 70 |
-
**kwargs: keyword arguments forwarded to super.
|
| 71 |
-
"""
|
| 72 |
-
num_kbp = int(chunk_length/1000)
|
| 73 |
-
super().__init__(
|
| 74 |
-
*args,
|
| 75 |
-
name=f'{num_kbp}kbp',
|
| 76 |
-
**kwargs,
|
| 77 |
-
)
|
| 78 |
-
self.chunk_length = chunk_length
|
| 79 |
-
self.overlap = overlap
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
class PlantMultiSpeciesGenomes(datasets.GeneratorBasedBuilder):
|
| 83 |
-
"""Genomes from 48 species, filtered and split into chunks of consecutive
|
| 84 |
-
nucleotides. 2 genomes are taken for test, 2 for validation and 44
|
| 85 |
-
for training."""
|
| 86 |
-
|
| 87 |
-
VERSION = datasets.Version("1.1.0")
|
| 88 |
-
BUILDER_CONFIG_CLASS = PlantMultiSpeciesGenomesConfig
|
| 89 |
-
BUILDER_CONFIGS = [PlantMultiSpeciesGenomesConfig(chunk_length=chunk_length) for chunk_length in _CHUNK_LENGTHS]
|
| 90 |
-
DEFAULT_CONFIG_NAME = "6kbp"
|
| 91 |
-
|
| 92 |
-
def _info(self):
|
| 93 |
-
|
| 94 |
-
features = datasets.Features(
|
| 95 |
-
{
|
| 96 |
-
"sequence": datasets.Value("string"),
|
| 97 |
-
"description": datasets.Value("string"),
|
| 98 |
-
"start_pos": datasets.Value("int32"),
|
| 99 |
-
"end_pos": datasets.Value("int32"),
|
| 100 |
-
}
|
| 101 |
-
)
|
| 102 |
-
return datasets.DatasetInfo(
|
| 103 |
-
# This is the description that will appear on the datasets page.
|
| 104 |
-
description=_DESCRIPTION,
|
| 105 |
-
# This defines the different columns of the dataset and their types
|
| 106 |
-
features=features,
|
| 107 |
-
# Homepage of the dataset for documentation
|
| 108 |
-
homepage=_HOMEPAGE,
|
| 109 |
-
# License for the dataset if available
|
| 110 |
-
license=_LICENSE,
|
| 111 |
-
# Citation for the dataset
|
| 112 |
-
citation=_CITATION,
|
| 113 |
-
)
|
| 114 |
-
|
| 115 |
-
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
| 116 |
-
|
| 117 |
-
filepaths_txt = dl_manager.download_and_extract('plant_genome_file_names.txt')
|
| 118 |
-
with open(filepaths_txt) as f:
|
| 119 |
-
filepaths = [os.path.join("plant_genomes",filepath.rstrip()) for filepath in f]
|
| 120 |
-
|
| 121 |
-
test_paths = filepaths[-2:] # 2 genomes for test set
|
| 122 |
-
validation_paths = filepaths[-4:-2] # 2 genomes for validation set
|
| 123 |
-
train_paths = filepaths[:-4] # 44 genomes for training
|
| 124 |
-
|
| 125 |
-
train_downloaded_files = dl_manager.download_and_extract(train_paths)
|
| 126 |
-
test_downloaded_files = dl_manager.download_and_extract(test_paths)
|
| 127 |
-
validation_downloaded_files = dl_manager.download_and_extract(validation_paths)
|
| 128 |
-
|
| 129 |
-
return [
|
| 130 |
-
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": train_downloaded_files, "chunk_length": self.config.chunk_length}),
|
| 131 |
-
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"files": validation_downloaded_files, "chunk_length": self.config.chunk_length}),
|
| 132 |
-
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"files": test_downloaded_files, "chunk_length": self.config.chunk_length}),
|
| 133 |
-
]
|
| 134 |
-
|
| 135 |
-
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
| 136 |
-
def _generate_examples(self, files, chunk_length):
|
| 137 |
-
key = 0
|
| 138 |
-
for file in files:
|
| 139 |
-
with open(file, 'rt') as f:
|
| 140 |
-
fasta_sequences = SeqIO.parse(f, 'fasta')
|
| 141 |
-
|
| 142 |
-
for record in fasta_sequences:
|
| 143 |
-
|
| 144 |
-
# parse descriptions in the fasta file
|
| 145 |
-
sequence, description = str(record.seq), record.description
|
| 146 |
-
|
| 147 |
-
# clean chromosome sequence
|
| 148 |
-
sequence = clean_sequence(sequence)
|
| 149 |
-
seq_length = len(sequence)
|
| 150 |
-
|
| 151 |
-
# split into chunks
|
| 152 |
-
num_chunks = (seq_length - 2 * self.config.overlap) // chunk_length
|
| 153 |
-
|
| 154 |
-
if num_chunks < 1:
|
| 155 |
-
continue
|
| 156 |
-
|
| 157 |
-
sequence = sequence[:(chunk_length * num_chunks + 2 * self.config.overlap)]
|
| 158 |
-
seq_length = len(sequence)
|
| 159 |
-
|
| 160 |
-
for i in range(num_chunks):
|
| 161 |
-
# get chunk
|
| 162 |
-
start_pos = i * chunk_length
|
| 163 |
-
end_pos = min(seq_length, (i+1) * chunk_length + 2 * self.config.overlap)
|
| 164 |
-
chunk_sequence = sequence[start_pos:end_pos]
|
| 165 |
-
|
| 166 |
-
# yield chunk
|
| 167 |
-
yield key, {
|
| 168 |
-
'sequence': chunk_sequence,
|
| 169 |
-
'description': description,
|
| 170 |
-
'start_pos': start_pos,
|
| 171 |
-
'end_pos': end_pos,
|
| 172 |
-
}
|
| 173 |
-
key += 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|