Datasets:

Modalities:
Image
ArXiv:
Libraries:
Datasets
License:
sal4ahm commited on
Commit
abe92bb
·
1 Parent(s): 7c665df

added dataset.py

Browse files
Files changed (1) hide show
  1. dataset.py +178 -0
dataset.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import os
3
+ import json
4
+ import time
5
+ from PIL import Image
6
+ from torch.utils.data import Dataset, DataLoader
7
+ from torchvision import transforms
8
+
9
+ class RQADataset(Dataset):
10
+ def __init__(self, data_config, transform=None):
11
+ """
12
+ Initializes the dataset.
13
+
14
+ Args:
15
+ data_config: Configuration object containing paths and settings.
16
+ transform: Optional transform to be applied on a sample.
17
+ """
18
+ self.img_dir = data_config.img_dir
19
+ self.json_dir = data_config.json_dir
20
+ self.filter_list_file = data_config.filter_list
21
+ self.train = data_config.train
22
+ self.transform = transform or transforms.Compose([
23
+ transforms.Resize((512, 512))
24
+ ])
25
+
26
+ self.questions = []
27
+
28
+ # Load file names for testing or use all files for training
29
+ self.file_names = self._load_file_names()
30
+ self._create_questions()
31
+ print(f"Total Questions Loaded: {len(self.questions)}")
32
+
33
+ def _load_file_names(self):
34
+ """
35
+ Loads the list of file names to be processed.
36
+
37
+ Returns:
38
+ A list of file names without extensions.
39
+ """
40
+ if not self.train and self.filter_list_file:
41
+ with open(self.filter_list_file, 'r') as f:
42
+ file_names = [line.strip() for line in f]
43
+ print(f"Loaded {len(file_names)} test files from {self.filter_list_file}")
44
+ return file_names
45
+ else:
46
+ # Use all files for training
47
+ return [os.path.splitext(file)[0] for file in os.listdir(self.json_dir) if file.endswith('.json')]
48
+
49
+ def _create_questions(self):
50
+ """
51
+ Creates the list of questions from JSON files.
52
+ """
53
+ start_time = time.time()
54
+ unused_count = 0
55
+
56
+ for file_name in self.file_names:
57
+ json_path = os.path.join(self.json_dir, file_name + '.json')
58
+ if not os.path.exists(json_path):
59
+ unused_count += 1
60
+ continue
61
+
62
+ with open(json_path, 'r') as f:
63
+ json_data = json.load(f)
64
+ for item in json_data:
65
+ if 'PMC_ID' not in item or 'qa_id' not in item:
66
+ continue # Ensure all necessary fields are present
67
+ item['image_path'] = os.path.join(self.img_dir, item['PMC_ID'] + '.jpg')
68
+ if os.path.exists(item['image_path']):
69
+ self.questions.append(item)
70
+ else:
71
+ unused_count += 1
72
+
73
+ elapsed_time = time.time() - start_time
74
+ print(f"Elapsed time to create questions: {elapsed_time:.2f} seconds = {elapsed_time/60:.2f} minutes")
75
+ print(f'Total unused/used images: {unused_count} / {len(self.file_names) - unused_count}')
76
+
77
+ def __len__(self):
78
+ return len(self.questions)
79
+
80
+ def __getitem__(self, idx):
81
+ return self._load_data(idx)
82
+
83
+ def _load_data(self, idx):
84
+ """
85
+ Loads a single data point.
86
+
87
+ Args:
88
+ idx: Index of the data point.
89
+
90
+ Returns:
91
+ A dictionary containing the image, question, and answer data.
92
+ """
93
+ question_block = self.questions[idx]
94
+ image_path = question_block['image_path']
95
+ image = Image.open(image_path).convert("RGB")
96
+
97
+ # Apply transformation if available
98
+ if self.transform:
99
+ image = self.transform(image)
100
+
101
+ return {
102
+ 'image': image,
103
+ 'question': question_block['question'],
104
+ 'answer': question_block['answer'],
105
+ 'qa_id': question_block['qa_id'],
106
+ 'PMC_ID': question_block['PMC_ID']
107
+ }
108
+
109
+ @staticmethod
110
+ def custom_collate(batch):
111
+ """
112
+ Custom collate function to handle batch processing.
113
+
114
+ Args:
115
+ batch: A batch of data points.
116
+
117
+ Returns:
118
+ A dictionary containing the collated batch data.
119
+ """
120
+ images = [item['image'] for item in batch]
121
+ questions = [item['question'] for item in batch]
122
+ answers = [item['answer'] for item in batch]
123
+ qa_ids = [item['qa_id'] for item in batch]
124
+ pmc_ids = [item['PMC_ID'] for item in batch]
125
+
126
+ return {
127
+ 'images': images,
128
+ 'questions': questions,
129
+ 'answers': answers,
130
+ 'qa_ids': qa_ids,
131
+ 'PMC_IDs': pmc_ids
132
+ }
133
+
134
+ if __name__ == "__main__":
135
+ # Define a simple data structure to hold the paths
136
+ class DataConfig:
137
+ img_dir = '/home/jupyter/RealCQA/code/data/RQA_V0/images'
138
+ json_dir = '/home/jupyter/RealCQA/code/data/RQA_V0/qa'
139
+ filter_list = '/home/jupyter/RealCQA/code/data/RQA_V0/test_filenames.txt'
140
+ train = False # Set to False to prepare the test files
141
+
142
+ # Initialize dataset
143
+ dataset = RQADataset(DataConfig)
144
+
145
+ # Test loading a single item
146
+ print(f"Number of samples in dataset: {len(dataset)}")
147
+ sample = dataset[0]
148
+ print("Sample data:", sample)
149
+
150
+ # Initialize DataLoader
151
+ dataloader = DataLoader(dataset, batch_size=4, collate_fn=RQADataset.custom_collate)
152
+
153
+ # Test DataLoader
154
+ for batch in dataloader:
155
+ print("Batch data:", batch)
156
+ break # Load only one batch for testing
157
+
158
+ class DataConfig:
159
+ img_dir = '/home/jupyter/RealCQA/code/data/RQA_V0/images'
160
+ json_dir = '/home/jupyter/RealCQA/code/data/RQA_V0/qa'
161
+ filter_list = '/home/jupyter/RealCQA/code/data/RQA_V0/test_filenames.txt'
162
+ train = True # Set to False to prepare the test files
163
+
164
+ # Initialize dataset
165
+ dataset = RQADataset(DataConfig)
166
+
167
+ # Test loading a single item
168
+ print(f"Number of samples in dataset: {len(dataset)}")
169
+ sample = dataset[0]
170
+ print("Sample data:", sample)
171
+
172
+ # Initialize DataLoader
173
+ dataloader = DataLoader(dataset, batch_size=4, collate_fn=RQADataset.custom_collate)
174
+
175
+ # Test DataLoader
176
+ for batch in dataloader:
177
+ print("Batch data:", batch)
178
+ break # Load only one batch for testing