vkashko commited on
Commit
206b336
·
1 Parent(s): c683d45

feat: load script

Browse files
Files changed (1) hide show
  1. fights-segmentation.py +174 -0
fights-segmentation.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from xml.etree import ElementTree as ET
3
+
4
+ import datasets
5
+
6
+ _CITATION = """\
7
+ @InProceedings{huggingface:dataset,
8
+ title = {fights-segmentation},
9
+ author = {TrainingDataPro},
10
+ year = {2023}
11
+ }
12
+ """
13
+
14
+ _DESCRIPTION = """\
15
+ The dataset consists of a collection of photos extracted from **videos of fights**.
16
+ It includes **segmentation masks** for **fighters, referees, mats, and the background**.
17
+ The dataset offers a resource for *object detection, instance segmentation,
18
+ action recognition, or pose estimation*.
19
+ It could be useful for **sport community** in identification and detection of
20
+ the violations, dispute resolution and general optimisation of referee's work using
21
+ computer vision.
22
+ """
23
+ _NAME = "fights-segmentation"
24
+
25
+ _HOMEPAGE = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}"
26
+
27
+ _LICENSE = ""
28
+
29
+ _DATA = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}/resolve/main/data/"
30
+
31
+ _LABELS = ["referee", "background", "wrestling", "human"]
32
+
33
+
34
+ class FightsSegmentation(datasets.GeneratorBasedBuilder):
35
+ BUILDER_CONFIGS = [
36
+ datasets.BuilderConfig(name="video_01", data_dir=f"{_DATA}video_01.zip"),
37
+ datasets.BuilderConfig(name="video_02", data_dir=f"{_DATA}video_02.zip"),
38
+ datasets.BuilderConfig(name="video_03", data_dir=f"{_DATA}video_03.zip"),
39
+ ]
40
+
41
+ DEFAULT_CONFIG_NAME = "video_01"
42
+
43
+ def _info(self):
44
+ return datasets.DatasetInfo(
45
+ description=_DESCRIPTION,
46
+ features=datasets.Features(
47
+ {
48
+ "id": datasets.Value("int32"),
49
+ "name": datasets.Value("string"),
50
+ "image": datasets.Image(),
51
+ "mask": datasets.Image(),
52
+ "shapes": datasets.Sequence(
53
+ {
54
+ "track_id": datasets.Value("uint32"),
55
+ "label": datasets.ClassLabel(
56
+ num_classes=len(_LABELS),
57
+ names=_LABELS,
58
+ ),
59
+ "type": datasets.Value("string"),
60
+ "points": datasets.Sequence(
61
+ datasets.Sequence(
62
+ datasets.Value("float"),
63
+ ),
64
+ ),
65
+ "rotation": datasets.Value("float"),
66
+ "occluded": datasets.Value("uint8"),
67
+ "z_order": datasets.Value("uint16"),
68
+ "attributes": datasets.Sequence(
69
+ {
70
+ "name": datasets.Value("string"),
71
+ "text": datasets.Value("string"),
72
+ }
73
+ ),
74
+ }
75
+ ),
76
+ }
77
+ ),
78
+ supervised_keys=None,
79
+ homepage=_HOMEPAGE,
80
+ citation=_CITATION,
81
+ )
82
+
83
+ def _split_generators(self, dl_manager):
84
+ data = dl_manager.download_and_extract(self.config.data_dir)
85
+ return [
86
+ datasets.SplitGenerator(
87
+ name=datasets.Split.TRAIN,
88
+ gen_kwargs={
89
+ "data": data,
90
+ },
91
+ ),
92
+ ]
93
+
94
+ @staticmethod
95
+ def extract_shapes_from_tracks(
96
+ root: ET.Element, file: str, index: int
97
+ ) -> ET.Element:
98
+ img = ET.Element("image")
99
+ img.set("name", file)
100
+ img.set("id", str(index))
101
+ for track in root.iter("track"):
102
+ shape = track.find(f".//*[@frame='{index}']")
103
+ if not (shape is None):
104
+ shape.set("label", track.get("label"))
105
+ shape.set("track_id", track.get("id"))
106
+ img.append(shape)
107
+
108
+ return img
109
+
110
+ @staticmethod
111
+ def parse_shape(shape: ET.Element) -> dict:
112
+ label = shape.get("label")
113
+ track_id = shape.get("track_id")
114
+ shape_type = shape.tag
115
+ rotation = shape.get("rotation", 0.0)
116
+ occluded = shape.get("occluded", 0)
117
+ z_order = shape.get("z_order", 0)
118
+
119
+ points = None
120
+
121
+ if shape_type == "points":
122
+ points = tuple(map(float, shape.get("points").split(",")))
123
+
124
+ elif shape_type == "box":
125
+ points = [
126
+ (float(shape.get("xtl")), float(shape.get("ytl"))),
127
+ (float(shape.get("xbr")), float(shape.get("ybr"))),
128
+ ]
129
+
130
+ elif shape_type == "polygon":
131
+ points = [
132
+ tuple(map(float, point.split(",")))
133
+ for point in shape.get("points").split(";")
134
+ ]
135
+
136
+ attributes = []
137
+
138
+ for attr in shape:
139
+ attr_name = attr.get("name")
140
+ attr_text = attr.text
141
+ attributes.append({"name": attr_name, "text": attr_text})
142
+
143
+ shape_data = {
144
+ "label": label,
145
+ "track_id": track_id,
146
+ "type": shape_type,
147
+ "points": points,
148
+ "rotation": rotation,
149
+ "occluded": occluded,
150
+ "z_order": z_order,
151
+ "attributes": attributes,
152
+ }
153
+
154
+ return shape_data
155
+
156
+ def _generate_examples(self, data):
157
+ tree = ET.parse(os.path.join(data, "annotations.xml"))
158
+ root = tree.getroot()
159
+
160
+ for idx, file in enumerate(sorted(os.listdir(os.path.join(data, "images")))):
161
+ img = self.extract_shapes_from_tracks(root, file, idx)
162
+
163
+ image_id = img.get("id")
164
+ name = img.get("name")
165
+ shapes = [self.parse_shape(shape) for shape in img]
166
+ print(shapes)
167
+
168
+ yield idx, {
169
+ "id": image_id,
170
+ "name": name,
171
+ "image": os.path.join(data, "images", file),
172
+ "mask": os.path.join(data, "masks", file),
173
+ "shapes": shapes,
174
+ }