Sneriko commited on
Commit
2284802
·
verified ·
1 Parent(s): e76856a

Upload folder using huggingface_hub

Browse files
data/images/goteborgs_poliskammare_fore_1900_images_1.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f9ee6581b33a7aa39faceacbdd7b1a05271ca9f80d36e606692b5283c37505f
3
+ size 4145365222
data/images/goteborgs_poliskammare_fore_1900_images_2.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99469e915e3774342d505a0bba78887823f08aa3f9715ecc5b4208a46524a39e
3
+ size 3498286828
data/page_xmls/goteborgs_poliskammare_fore_1900_page_xmls_1.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5aca19ee93f2a0b53bf3db9907e7f6094206cff9bbb20ffec99ba1a5311e3b0
3
+ size 36434847
data/page_xmls/goteborgs_poliskammare_fore_1900_page_xmls_2.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:336cf89a506492efd171bf7cb155462d8a4cd5bba3feff677f9af163c65adda2
3
+ size 35385978
goteborgs_poliskammare_fore_1900.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ladda upp datasetet i en zip av imgs och en zip av xml, skapa flera archive iterators och använd dom (men hur blir det med ordningen?)
2
+
3
+ import os
4
+ import xml.etree.ElementTree as ET
5
+ from glob import glob
6
+ from pathlib import Path, PurePath
7
+
8
+ import cv2
9
+ import numpy as np
10
+ from datasets import (
11
+ BuilderConfig,
12
+ DatasetInfo,
13
+ Features,
14
+ GeneratorBasedBuilder,
15
+ Image,
16
+ Split,
17
+ SplitGenerator,
18
+ Value,
19
+ )
20
+ from PIL import Image as PILImage
21
+
22
+
23
+ class HTRDatasetConfig(BuilderConfig):
24
+ """BuilderConfig for HTRDataset"""
25
+
26
+ def __init__(self, **kwargs):
27
+ super(HTRDatasetConfig, self).__init__(**kwargs)
28
+
29
+
30
+ class HTRDataset(GeneratorBasedBuilder):
31
+ BUILDER_CONFIGS = [
32
+ HTRDatasetConfig(
33
+ name="htr_dataset",
34
+ version="1.0.0",
35
+ description="Line dataset for text recognition of historical swedish",
36
+ )
37
+ ]
38
+
39
+ def _info(self):
40
+ features = Features({"image": Image(), "transcription": Value("string")})
41
+ return DatasetInfo(features=features)
42
+
43
+ def _split_generators(self, dl_manager):
44
+ """
45
+ images = dl_manager.download_and_extract(
46
+ [
47
+ f"https://huggingface.co/datasets/Riksarkivet/alvsborgs_losen/resolve/main/data/images/alvsborgs_losen_imgs_{i}.tar.gz"
48
+ for i in range(1, 3)
49
+ ]
50
+ )
51
+ xmls = dl_manager.download_and_extract(
52
+ [
53
+ f"https://huggingface.co/datasets/Riksarkivet/alvsborgs_losen/resolve/main/data/page_xmls/alvsborgs_losen_page_xmls_{i}.tar.gz"
54
+ for i in range(1, 3)
55
+ ]
56
+ )
57
+ """
58
+
59
+ images = dl_manager.download_and_extract(
60
+ [
61
+ f"https://huggingface.co/datasets/Riksarkivet/goteborgs_poliskammare_fore_1900/resolve/main/data/images/goteborgs_poliskammare_fore_1900_images_{i}.tar.gz"
62
+ for i in range(1, 3)
63
+ ]
64
+ )
65
+ xmls = dl_manager.download_and_extract(
66
+ [
67
+ f"https://huggingface.co/datasets/Riksarkivet/goteborgs_poliskammare_fore_1900/resolve/main/data/page_xmls/goteborgs_poliskammare_fore_1900_page_xmls_{i}.tar.gz"
68
+ for i in range(1, 3)
69
+ ]
70
+ )
71
+ image_extensions = [
72
+ "*.jpg",
73
+ "*.jpeg",
74
+ "*.png",
75
+ "*.gif",
76
+ "*.bmp",
77
+ "*.tif",
78
+ "*.tiff",
79
+ "*.JPG",
80
+ "*.JPEG",
81
+ "*.PNG",
82
+ "*.GIF",
83
+ "*.BMP",
84
+ "*.TIF",
85
+ "*.TIFF",
86
+ ]
87
+ imgs_nested = [glob(os.path.join(x, "**", ext), recursive=True) for ext in image_extensions for x in images]
88
+ imgs_flat = [item for sublist in imgs_nested for item in sublist]
89
+ sorted_imgs = sorted(imgs_flat, key=lambda x: Path(x).stem)
90
+ xmls_nested = [glob(os.path.join(x, "**", "*.xml"), recursive=True) for x in xmls]
91
+ xmls_flat = [item for sublist in xmls_nested for item in sublist]
92
+ sorted_xmls = sorted(xmls_flat, key=lambda x: Path(x).stem)
93
+ assert len(sorted_imgs) == len(sorted_xmls)
94
+ imgs_xmls = []
95
+ for img, xml in zip(sorted_imgs, sorted_xmls):
96
+ imgs_xmls.append((img, xml))
97
+
98
+ return [
99
+ SplitGenerator(
100
+ name=Split.TRAIN,
101
+ gen_kwargs={"imgs_xmls": imgs_xmls},
102
+ )
103
+ ]
104
+
105
+ def _generate_examples(self, imgs_xmls):
106
+ for img, xml in imgs_xmls:
107
+ assert Path(img).stem == Path(xml).stem
108
+ img_filename = Path(img).stem
109
+ volume = PurePath(img).parts[-2]
110
+
111
+ lines_data = self.parse_pagexml(xml)
112
+
113
+ # Convert the bytes to a NumPy array
114
+ image_array = cv2.imread(img)
115
+
116
+ for i, line in enumerate(lines_data):
117
+ line_id = str(i).zfill(4)
118
+ try:
119
+ cropped_image = self.crop_line_image(image_array, line["coords"])
120
+ except Exception as e:
121
+ print(e)
122
+ continue
123
+
124
+ # Logging to ensure data types and shapes
125
+ cropped_image_np = np.array(cropped_image, dtype=np.uint8)
126
+
127
+ # Ensure transcription is a string and not None
128
+ transcription = str(line["transcription"])
129
+ if transcription is None or not isinstance(transcription, str) or transcription == "":
130
+ print(f"Invalid transcription: {transcription}")
131
+ continue
132
+
133
+ # Generate and log the unique key
134
+ unique_key = f"{volume}_{img_filename}_{line_id}"
135
+
136
+ try:
137
+ yield (
138
+ unique_key,
139
+ {"image": cropped_image, "transcription": transcription},
140
+ )
141
+ except Exception as e:
142
+ print(f"Error yielding example {unique_key}: {e}")
143
+
144
+ def parse_pagexml(self, xml):
145
+ try:
146
+ tree = ET.parse(xml)
147
+ root = tree.getroot()
148
+ except ET.ParseError as e:
149
+ print(e)
150
+ return []
151
+
152
+ namespaces = {"ns": "http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15"}
153
+ page = root.find("ns:Page", namespaces)
154
+ if page is None:
155
+ print("no page")
156
+ return []
157
+
158
+ text_regions = page.findall("ns:TextRegion", namespaces)
159
+ lines_data = []
160
+ for region in text_regions:
161
+ lines = region.findall("ns:TextLine", namespaces)
162
+
163
+ for line in lines:
164
+ try:
165
+ line_id = line.get("id")
166
+ coords = line.find("ns:Coords", namespaces).get("points")
167
+ coords = [tuple(map(int, p.split(","))) for p in coords.split()]
168
+ transcription = line.find("ns:TextEquiv/ns:Unicode", namespaces).text
169
+
170
+ lines_data.append({"line_id": line_id, "coords": coords, "transcription": transcription})
171
+ except Exception as e:
172
+ print(e)
173
+ continue
174
+
175
+ return lines_data
176
+
177
+ def crop_line_image(self, img, coords):
178
+ coords = np.array(coords)
179
+ # img = HTRDataset.np_to_cv2(image)
180
+ mask = np.zeros(img.shape[0:2], dtype=np.uint8)
181
+
182
+ try:
183
+ cv2.drawContours(mask, [coords], -1, (255, 255, 255), -1, cv2.LINE_AA)
184
+ except Exception as e:
185
+ print(e)
186
+ res = cv2.bitwise_and(img, img, mask=mask)
187
+ rect = cv2.boundingRect(coords)
188
+
189
+ wbg = np.ones_like(img, np.uint8) * 255
190
+ cv2.bitwise_not(wbg, wbg, mask=mask)
191
+
192
+ # overlap the resulted cropped image on the white background
193
+ dst = wbg + res
194
+
195
+ cropped = dst[rect[1] : rect[1] + rect[3], rect[0] : rect[0] + rect[2]]
196
+
197
+ cropped = HTRDataset.cv2_to_pil(cropped)
198
+ return cropped
199
+
200
+ def np_to_cv2(image_array):
201
+ image = cv2.imdecode(image_array, cv2.IMREAD_COLOR)
202
+ image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
203
+ return image_rgb
204
+
205
+ # Convert OpenCV image to PIL Image
206
+ def cv2_to_pil(cv2_image):
207
+ # Convert BGR to RGB
208
+ cv2_image_rgb = cv2.cvtColor(cv2_image, cv2.COLOR_BGR2RGB)
209
+ # Convert NumPy array to PIL image
210
+ pil_image = PILImage.fromarray(cv2_image_rgb)
211
+ return pil_image