zqiu commited on
Commit
e46319c
·
1 Parent(s): 3053d88

Upload control-celeba-hq.py

Browse files
Files changed (1) hide show
  1. control-celeba-hq.py +117 -0
control-celeba-hq.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ from huggingface_hub import hf_hub_url
3
+ import datasets
4
+ import os
5
+
6
+ _VERSION = datasets.Version("0.0.1")
7
+
8
+ _DESCRIPTION = "TODO"
9
+ _HOMEPAGE = "TODO"
10
+ _LICENSE = "TODO"
11
+ _CITATION = "TODO"
12
+
13
+ _FEATURES = datasets.Features(
14
+ {
15
+ "image": datasets.Image(),
16
+ "conditioning_image": datasets.Image(),
17
+ "text": datasets.Value("string"),
18
+ },
19
+ )
20
+
21
+ TRAIN_METADATA_URL = hf_hub_url(
22
+ "oftverse/control-celeba-hq",
23
+ filename="train.jsonl",
24
+ repo_type="dataset",
25
+ )
26
+
27
+ TEST_METADATA_URL = hf_hub_url(
28
+ "oftverse/control-celeba-hq",
29
+ filename="test.jsonl",
30
+ repo_type="dataset",
31
+ )
32
+
33
+ IMAGES_URL = hf_hub_url(
34
+ "oftverse/control-celeba-hq",
35
+ filename="images.zip",
36
+ repo_type="dataset",
37
+ )
38
+
39
+ CONDITIONING_IMAGES_URL = hf_hub_url(
40
+ "oftverse/control-celeba-hq",
41
+ filename="conditioning_images.zip",
42
+ repo_type="dataset",
43
+ )
44
+
45
+ _DEFAULT_CONFIG = datasets.BuilderConfig(name="default", version=_VERSION)
46
+
47
+
48
+ class CONTROL_CELEBA_HQ_DATASET(datasets.GeneratorBasedBuilder):
49
+ BUILDER_CONFIGS = [_DEFAULT_CONFIG]
50
+ DEFAULT_CONFIG_NAME = "default"
51
+
52
+ def _info(self):
53
+ return datasets.DatasetInfo(
54
+ description=_DESCRIPTION,
55
+ features=_FEATURES,
56
+ supervised_keys=None,
57
+ homepage=_HOMEPAGE,
58
+ license=_LICENSE,
59
+ citation=_CITATION,
60
+ )
61
+
62
+ def _split_generators(self, dl_manager):
63
+ train_metadata_path = dl_manager.download(TRAIN_METADATA_URL)
64
+ test_metadata_path = dl_manager.download(TEST_METADATA_URL)
65
+ images_dir = dl_manager.download_and_extract(IMAGES_URL)
66
+ conditioning_images_dir = dl_manager.download_and_extract(
67
+ CONDITIONING_IMAGES_URL
68
+ )
69
+
70
+ return [
71
+ datasets.SplitGenerator(
72
+ name=datasets.Split.TRAIN,
73
+ # These kwargs will be passed to _generate_examples
74
+ gen_kwargs={
75
+ "metadata_path": train_metadata_path,
76
+ "images_dir": images_dir,
77
+ "conditioning_images_dir": conditioning_images_dir,
78
+ },
79
+ )
80
+ datasets.SplitGenerator(
81
+ name=datasets.Split.TEST,
82
+ # These kwargs will be passed to _generate_examples
83
+ gen_kwargs={
84
+ "metadata_path": test_metadata_path,
85
+ "images_dir": images_dir,
86
+ "conditioning_images_dir": conditioning_images_dir,
87
+ },
88
+ ),
89
+ ]
90
+
91
+ def _generate_examples(self, metadata_path, images_dir, conditioning_images_dir):
92
+ metadata = pd.read_json(metadata_path, lines=True)
93
+
94
+ for _, row in metadata.iterrows():
95
+ text = row["text"]
96
+
97
+ image_path = row["image"]
98
+ image_path = os.path.join(images_dir, image_path)
99
+ image = open(image_path, "rb").read()
100
+
101
+ conditioning_image_path = row["conditioning_image"]
102
+ conditioning_image_path = os.path.join(
103
+ conditioning_images_dir, row["conditioning_image"]
104
+ )
105
+ conditioning_image = open(conditioning_image_path, "rb").read()
106
+
107
+ yield row["image"], {
108
+ "text": text,
109
+ "image": {
110
+ "path": image_path,
111
+ "bytes": image,
112
+ },
113
+ "conditioning_image": {
114
+ "path": conditioning_image_path,
115
+ "bytes": conditioning_image,
116
+ },
117
+ }