Datasets:

Languages:
English
ArXiv:
License:
File size: 4,496 Bytes
504ec61
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
import os
import datasets
from datasets import Features, Value
from huggingface_hub import snapshot_download
import glob
import yaml


class PathoBenchConfig(datasets.BuilderConfig):
    def __init__(self, **kwargs):
        
        # Extract task_in_dataset and dataset_to_download from kwargs
        self.task_in_dataset = kwargs.pop("task_in_dataset", None)
        self.dataset_to_download = kwargs.pop("dataset_to_download", None)
        self.force_download = kwargs.pop("force_download", True)
        
        # Set default values for task_in_dataset and dataset_to_download
        if self.dataset_to_download is None and self.task_in_dataset is None:
            # If neither are provided, default both to '*'
            self.dataset_to_download = '*'
            self.task_in_dataset = '*'
        elif self.dataset_to_download is None and self.task_in_dataset is not None:
            # If task_in_dataset is provided but dataset_to_download is not, raise an error
            raise AssertionError("Dataset needs to be defined for the task_in_dataset provided.")
        elif self.dataset_to_download is not None and self.task_in_dataset is None:
            # If dataset_to_download is provided but task_in_dataset is not, default task_in_dataset to '*'
            self.task_in_dataset = '*'
            
        super().__init__(**kwargs)


class PathoBenchDataset(datasets.GeneratorBasedBuilder):
    BUILDER_CONFIGS = [
        PathoBenchConfig(name="custom_config", version="1.0.0", description="PathoBench config")
    ]
    BUILDER_CONFIG_CLASS = PathoBenchConfig

    
    def _info(self):
        return datasets.DatasetInfo(
            description="PathoBench: collection of canonical computational pathology tasks",
            homepage="https://github.com/mahmoodlab/patho-bench",
            license="CC BY-NC-SA 4.0 Deed",
            features=Features({
                'path': Value('string')
            })
        )

    def _split_generators(self, dl_manager):
        repo_id = self.repo_id
        dataset_to_download = self.config.dataset_to_download
        local_dir = self._cache_dir_root
        force_download = self.config.force_download
        task_in_dataset = self.config.task_in_dataset

        # Ensure the base local directory exists
        os.makedirs(local_dir, exist_ok=True)

        # download available_splits.yaml if not yet downloaded
        snapshot_download(
            repo_id=repo_id,
            allow_patterns=["available_splits.yaml"],
            repo_type="dataset",
            local_dir=local_dir,
            force_download=force_download,
        )

        # open yaml and get a list of datasets implemented
        with open(os.path.join(local_dir, "available_splits.yaml"), 'r') as file:
            available_splits = yaml.safe_load(file)
        
        # ensure dataset_to_download is in implemented_datasets
        if dataset_to_download != "*":
            assert dataset_to_download in available_splits, f"{dataset_to_download} was not found. Available splits: ({available_splits})"
            if task_in_dataset != "*":
                assert task_in_dataset in available_splits[dataset_to_download], f"{task_in_dataset} was not found in {dataset_to_download}. Available tasks: ({available_splits[dataset_to_download]})"

        # Determine parent folder based on dataset naming
        os.makedirs(local_dir, exist_ok=True)
        
        # Determine the download pattern
        if dataset_to_download == "*":
            allow_patterns = [f"*/*"]
        else:
            task_path = "*" if task_in_dataset == '*' else f"{task_in_dataset}/*"
            allow_patterns = [f"{dataset_to_download}/{task_path}"]
    
        # Download the required datasets
        snapshot_download(
            repo_id=repo_id,
            allow_patterns=allow_patterns,
            repo_type="dataset",
            local_dir=local_dir,
            force_download=force_download,
        )
        
        # Locate all .tsv files
        search_pattern = os.path.join(local_dir, '**', '*.tsv')
        all_tsv_splits = glob.glob(search_pattern, recursive=True)
        
        return [
            datasets.SplitGenerator(
                name="full",
                gen_kwargs={"filepath": all_tsv_splits},
            )
        ]


    def _generate_examples(self, filepath):
        idx = 0
        for file in filepath:
            yield idx, {
                'path': file
            }
            idx += 1