Upload Scripts/RedditUnpack.py with huggingface_hub
Browse files- Scripts/RedditUnpack.py +319 -0
Scripts/RedditUnpack.py
ADDED
@@ -0,0 +1,319 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import html
|
2 |
+
import multiprocessing
|
3 |
+
import pathlib
|
4 |
+
from typing import Callable
|
5 |
+
|
6 |
+
import msgspec
|
7 |
+
import typer
|
8 |
+
import zstandard
|
9 |
+
from loguru import logger
|
10 |
+
|
11 |
+
from RedditModels import (
|
12 |
+
RedditAuthor,
|
13 |
+
RedditComment,
|
14 |
+
RedditFlair,
|
15 |
+
RedditSubmit,
|
16 |
+
RedditSubreddit,
|
17 |
+
)
|
18 |
+
|
19 |
+
root_app = typer.Typer()
|
20 |
+
|
21 |
+
|
22 |
+
def read_and_decode(
|
23 |
+
reader, chunk_size, max_window_size, previous_chunk=None, bytes_read=0
|
24 |
+
):
|
25 |
+
chunk = reader.read(chunk_size)
|
26 |
+
bytes_read += chunk_size
|
27 |
+
if previous_chunk is not None:
|
28 |
+
chunk = previous_chunk + chunk
|
29 |
+
try:
|
30 |
+
return chunk.decode()
|
31 |
+
except UnicodeDecodeError:
|
32 |
+
if bytes_read > max_window_size:
|
33 |
+
raise UnicodeError(
|
34 |
+
f"Unable to decode frame after reading {bytes_read:,} bytes"
|
35 |
+
)
|
36 |
+
logger.debug(f"Decoding error with {bytes_read:,} bytes, reading another chunk")
|
37 |
+
return read_and_decode(reader, chunk_size, max_window_size, chunk, bytes_read)
|
38 |
+
|
39 |
+
|
40 |
+
GB = 2**30
|
41 |
+
|
42 |
+
|
43 |
+
def read_lines_jsonl(file_name, chunk_size=GB // 2):
|
44 |
+
with open(file_name, "rb") as file_handle:
|
45 |
+
buffer = b""
|
46 |
+
while True:
|
47 |
+
chunk = file_handle.read(chunk_size)
|
48 |
+
|
49 |
+
if not chunk:
|
50 |
+
break
|
51 |
+
lines = (buffer + chunk).split(b"\n")
|
52 |
+
|
53 |
+
for line in lines[:-1]:
|
54 |
+
yield line.strip(), file_handle.tell()
|
55 |
+
|
56 |
+
buffer = lines[-1]
|
57 |
+
|
58 |
+
|
59 |
+
def read_lines_zst(file_name, scale: float = 1):
|
60 |
+
with open(file_name, "rb") as file_handle:
|
61 |
+
buffer = ""
|
62 |
+
reader = zstandard.ZstdDecompressor(
|
63 |
+
max_window_size=int((2**31) * scale)
|
64 |
+
).stream_reader(file_handle)
|
65 |
+
while True:
|
66 |
+
chunk = read_and_decode(
|
67 |
+
reader, int((2**27) * scale), int((2**29) * 2 * scale)
|
68 |
+
)
|
69 |
+
|
70 |
+
if not chunk:
|
71 |
+
break
|
72 |
+
lines = (buffer + chunk).split("\n")
|
73 |
+
|
74 |
+
for line in lines[:-1]:
|
75 |
+
yield line.strip(), file_handle.tell()
|
76 |
+
|
77 |
+
buffer = lines[-1]
|
78 |
+
|
79 |
+
reader.close()
|
80 |
+
|
81 |
+
|
82 |
+
def error_cb(err):
|
83 |
+
logger.exception(err)
|
84 |
+
|
85 |
+
|
86 |
+
def get_submission_flags(data: dict):
|
87 |
+
flag_map = {
|
88 |
+
"!": "spoiler",
|
89 |
+
"#": "stickied",
|
90 |
+
">": "pinned",
|
91 |
+
"A": "archived",
|
92 |
+
"C": "is_crosspostable",
|
93 |
+
"c": "is_original_content",
|
94 |
+
"E": "edited",
|
95 |
+
"e": "is_meta",
|
96 |
+
"G": "can_gild",
|
97 |
+
"H": "hidden",
|
98 |
+
"i": "is_robot_indexable",
|
99 |
+
"L": "allow_live_comments",
|
100 |
+
"l": "locked",
|
101 |
+
"m": "is_reddit_media_domain",
|
102 |
+
"M": "over_18",
|
103 |
+
"O": "contest_mode",
|
104 |
+
"q": "quarantine",
|
105 |
+
"s": "is_self",
|
106 |
+
"v": "is_video",
|
107 |
+
}
|
108 |
+
return "".join(flag for flag, key in flag_map.items() if data.get(key))
|
109 |
+
|
110 |
+
|
111 |
+
def get_comment_flags(data: dict):
|
112 |
+
flag_map = {
|
113 |
+
"#": "stickied",
|
114 |
+
"A": "archived",
|
115 |
+
"E": "edited",
|
116 |
+
"G": "can_gild",
|
117 |
+
"H": "hidden",
|
118 |
+
"l": "locked",
|
119 |
+
"=": "score_hidden",
|
120 |
+
"P": "author_premium",
|
121 |
+
"R": "send_replies",
|
122 |
+
"O": "can_mod_post",
|
123 |
+
"N": "no_follow",
|
124 |
+
}
|
125 |
+
return "".join(flag for flag, key in flag_map.items() if data.get(key))
|
126 |
+
|
127 |
+
|
128 |
+
def get_reddit_flair(data: dict, prefix: str):
|
129 |
+
return RedditFlair(
|
130 |
+
bg=data.get(f"{prefix}_flair_background_color"),
|
131 |
+
css_cls=data.get(f"{prefix}_flair_css_class"),
|
132 |
+
template=data.get(f"{prefix}_flair_template_id"),
|
133 |
+
richtext=data.get(f"{prefix}_flair_richtext"),
|
134 |
+
text=data.get(f"{prefix}_flair_text"),
|
135 |
+
text_color=data.get(f"{prefix}_flair_text_color"),
|
136 |
+
type=data.get(f"{prefix}_flair_type"),
|
137 |
+
)
|
138 |
+
|
139 |
+
|
140 |
+
def make_submission(data: dict, file_id: pathlib.Path):
|
141 |
+
# Create Author
|
142 |
+
author = None
|
143 |
+
if data.get("author_created_utc"):
|
144 |
+
author_flair = get_reddit_flair(data, "author")
|
145 |
+
author_fullname = data.get("author_fullname", "")
|
146 |
+
afn = (
|
147 |
+
author_fullname[3:]
|
148 |
+
if author_fullname.startswith("t2_")
|
149 |
+
else author_fullname
|
150 |
+
)
|
151 |
+
author = RedditAuthor(
|
152 |
+
name=data.get("author", ""),
|
153 |
+
uid=afn,
|
154 |
+
create=data.get("author_created_utc", -1),
|
155 |
+
flair=author_flair if author_flair.is_flaired else None,
|
156 |
+
patreon=data.get("author_patreon_flair", False),
|
157 |
+
premium=data.get("author_premium", False),
|
158 |
+
)
|
159 |
+
|
160 |
+
# Create Subreddit
|
161 |
+
subreddit_id = data.get("subreddit_id", "")
|
162 |
+
if subreddit_id is not None:
|
163 |
+
subid = subreddit_id[3:] if subreddit_id.startswith("t5_") else subreddit_id
|
164 |
+
else:
|
165 |
+
return None
|
166 |
+
subreddit = RedditSubreddit(
|
167 |
+
name=data.get("subreddit", "<?>"),
|
168 |
+
id=subid,
|
169 |
+
# Number of subs and type can be None.
|
170 |
+
subs=data.get("subreddit_subscribers", None),
|
171 |
+
type=data.get("subreddit_type", None),
|
172 |
+
)
|
173 |
+
link_flair = get_reddit_flair(data, "link")
|
174 |
+
|
175 |
+
submission = RedditSubmit(
|
176 |
+
sub=subreddit,
|
177 |
+
author=author,
|
178 |
+
id=data.get("id", None),
|
179 |
+
score=data.get("score", 0) if data.get("score", 0) else 0,
|
180 |
+
created=float(data.get("created_utc", 0.0))
|
181 |
+
if data.get("created_utc", 0.0)
|
182 |
+
else -1.0,
|
183 |
+
title=data.get("title", None),
|
184 |
+
flags=get_submission_flags(data),
|
185 |
+
link_flair=link_flair if link_flair.is_flaired else None,
|
186 |
+
url=data.get("url"),
|
187 |
+
text=data.get("selftext", None),
|
188 |
+
)
|
189 |
+
if submission.text == "[removed]":
|
190 |
+
submission.removed = [
|
191 |
+
data.get("removal_reason"),
|
192 |
+
data.get("removed_by"),
|
193 |
+
data.get("removed_by_category"),
|
194 |
+
]
|
195 |
+
|
196 |
+
if data.get("crosspost_parent_list", []):
|
197 |
+
submission.cross = []
|
198 |
+
for crosspost in data.get("crosspost_parent_list", []):
|
199 |
+
post = make_submission(crosspost, file_id)
|
200 |
+
if post is None:
|
201 |
+
continue
|
202 |
+
submission.cross.append(post)
|
203 |
+
return submission
|
204 |
+
|
205 |
+
|
206 |
+
def make_comment(data: dict):
|
207 |
+
author = data.get("author", "")
|
208 |
+
if author is None or author.lower() == "[deleted]":
|
209 |
+
author = None
|
210 |
+
else:
|
211 |
+
author_flair = get_reddit_flair(data, "author")
|
212 |
+
author_fullname = data.get("author_fullname", "")
|
213 |
+
afn = (
|
214 |
+
author_fullname[3:]
|
215 |
+
if author_fullname.startswith("t2_")
|
216 |
+
else author_fullname
|
217 |
+
)
|
218 |
+
author = RedditAuthor(
|
219 |
+
name=data.get("author", ""),
|
220 |
+
uid=afn,
|
221 |
+
create=data.get("author_created_utc", -1),
|
222 |
+
flair=author_flair if author_flair.is_flaired else None,
|
223 |
+
patreon=data.get("author_patreon_flair", False),
|
224 |
+
premium=data.get("author_premium", False),
|
225 |
+
)
|
226 |
+
subreddit_id = data.get("subreddit_id", "")
|
227 |
+
if subreddit_id is not None:
|
228 |
+
subid = subreddit_id[3:] if subreddit_id.startswith("t5_") else subreddit_id
|
229 |
+
else:
|
230 |
+
return None
|
231 |
+
subreddit = RedditSubreddit(
|
232 |
+
name=data.get("subreddit", ""),
|
233 |
+
id=subid,
|
234 |
+
subs=data.get("subreddit_subscribers", -1),
|
235 |
+
type=data.get("subreddit_type", ""),
|
236 |
+
)
|
237 |
+
text = html.unescape(data.get("body", "")).replace("\r\n", "\n")
|
238 |
+
if author is None and text in ["[deleted]", "[removed]"]:
|
239 |
+
text = None
|
240 |
+
submission = RedditComment(
|
241 |
+
sub=subreddit,
|
242 |
+
author=author,
|
243 |
+
id=data.get("id", ""),
|
244 |
+
score=data.get("score", 0),
|
245 |
+
created=data.get("created_utc", 0),
|
246 |
+
thread_id=data.get("link_id", ""),
|
247 |
+
parent_id=data.get("parent_id", ""),
|
248 |
+
text=text,
|
249 |
+
flags=get_comment_flags(data),
|
250 |
+
)
|
251 |
+
# rich.print(submission)
|
252 |
+
return submission
|
253 |
+
|
254 |
+
|
255 |
+
# Base processor
|
256 |
+
|
257 |
+
|
258 |
+
def process_zst(
|
259 |
+
input_file: pathlib.Path, output_file: pathlib.Path, processor_fn: Callable
|
260 |
+
):
|
261 |
+
decoder = msgspec.json.Decoder()
|
262 |
+
encoder = msgspec.json.Encoder(decimal_format="number")
|
263 |
+
write = output_file.with_stem(f"{output_file.stem}_{input_file.stem}")
|
264 |
+
with open(write, "wb") as f:
|
265 |
+
for lineidx, line in enumerate(read_lines_zst(input_file)):
|
266 |
+
data, idx = line
|
267 |
+
|
268 |
+
try:
|
269 |
+
post = processor_fn(decoder.decode(data))
|
270 |
+
if post:
|
271 |
+
f.write(encoder.encode(post) + b"\n")
|
272 |
+
except msgspec.DecodeError:
|
273 |
+
logger.warning("Decode error detected. Continuing...")
|
274 |
+
if lineidx % 100_000 == 0:
|
275 |
+
logger.info(f"{write} {lineidx} processed")
|
276 |
+
logger.info(f"{write} done")
|
277 |
+
|
278 |
+
|
279 |
+
# Typer Commands
|
280 |
+
|
281 |
+
|
282 |
+
@root_app.command(name="submissions")
|
283 |
+
def process_submissions(zst_input: pathlib.Path, output_prefix: pathlib.Path):
|
284 |
+
s = sorted(list(zst_input.iterdir()))
|
285 |
+
# s = list(pathlib.Path("reddit/submissions").iterdir())
|
286 |
+
with multiprocessing.Pool(processes=32) as pooled:
|
287 |
+
results = []
|
288 |
+
for file in s:
|
289 |
+
results.append(
|
290 |
+
pooled.apply_async(
|
291 |
+
process_zst,
|
292 |
+
args=(file, output_prefix, make_submission),
|
293 |
+
error_callback=error_cb,
|
294 |
+
)
|
295 |
+
)
|
296 |
+
[result.wait() for result in results]
|
297 |
+
|
298 |
+
|
299 |
+
@root_app.command(name="comments")
|
300 |
+
def process_comments(zst_input: pathlib.Path, output_prefix: pathlib.Path):
|
301 |
+
s = sorted(list(zst_input.iterdir()))
|
302 |
+
# s = list(pathlib.Path("reddit/submissions").iterdir())
|
303 |
+
with multiprocessing.Pool(processes=32) as pooled:
|
304 |
+
results = []
|
305 |
+
for file in s:
|
306 |
+
results.append(
|
307 |
+
pooled.apply_async(
|
308 |
+
process_zst,
|
309 |
+
args=(file, output_prefix, make_submission),
|
310 |
+
error_callback=error_cb,
|
311 |
+
)
|
312 |
+
)
|
313 |
+
[result.wait() for result in results]
|
314 |
+
|
315 |
+
|
316 |
+
# Typer Commands for filtering with `sub_selects.jsonl`
|
317 |
+
|
318 |
+
if __name__ == "__main__":
|
319 |
+
root_app()
|