Last commit not found
#!/usr/bin/env python3 | |
# License: MIT | |
# Copyright (C) 2024, Shinon. | |
# Code inspiration from Ronsor Labs. Licensed as below. | |
# License: AGPL 3.0 | |
# Copyright (C) 2023, 2024 Ronsor Labs. | |
import asyncio | |
import concurrent.futures as conc | |
import csv | |
import pathlib | |
import random | |
import urllib.parse | |
import aiofile | |
import httpx | |
import orjson | |
import tqdm | |
from bs4 import BeautifulSoup | |
from proxy_magic_session import get_async_session | |
CONCURRENT_WORKERS = 128 | |
executor = conc.ProcessPoolExecutor(max_workers=64) | |
queue = asyncio.Queue(maxsize=1048576) | |
WIKI_LIST = pathlib.Path("fandom_wikis_210224.csv") | |
OUPUT_JSONL = pathlib.Path("fandom_wikis_pages_210224_v2.jsonl") | |
def parse_special_index(html_text: str): | |
doc = BeautifulSoup(html_text, "lxml") | |
page_list_li = doc.select("ul.mw-allpages-chunk li a") | |
page_list = set() | |
for page_item in page_list_li: | |
page_title = page_item.get("title", "") | |
if not page_title or page_title is None: | |
print("[W] no page title?") | |
continue | |
if page_title.lower().rstrip().endswith("(redirect)"): | |
continue | |
else: | |
page_list.add(page_item["title"]) | |
page_list = list(page_list) | |
next_url = doc.select(".mw-allpages-nav a") | |
if next_url is None or len(next_url) == 0: | |
next_url = None | |
else: | |
candidates = next_url | |
next_url = None | |
for x in candidates: | |
if "next page" in x.text.lower(): | |
if "index.php" not in x["href"].lower(): | |
next_url = x["href"].split("=", 1) | |
next_url[1] = urllib.parse.quote_plus( | |
next_url[1].replace("+", "__SAFE_PLUS__") | |
).replace("__SAFE_PLUS__", "+") | |
next_url = "=".join(next_url) | |
else: | |
next_url = x["href"] | |
return page_list, next_url | |
async def domain_procesor(domain: str, path: str): | |
session: httpx.AsyncClient = get_async_session() | |
loop = asyncio.get_running_loop() | |
session.cookies.clear() | |
session.headers["user-agent"] = ( | |
"Mozilla/6.2 (compatible; Microsoft Chrome 137.0; Apple Gecko 47.0 in AOL Firefox 37.6) Google Toolbar/1.3" | |
) | |
print(f"[I] Processing: https://{domain}{path}Special:AllPages") | |
tries = 10 | |
# pbar = tqdm.tqdm(desc=f"{domain}") | |
data = None | |
while True: | |
try: | |
data = await session.get( | |
f"https://{domain}{path}Special:AllPages", follow_redirects=True | |
) | |
if data.status_code != 200: | |
if data.status_code == 410: | |
break | |
# print(f"https://{domain}{path}Special:AllPages", data.status_code) | |
continue | |
break | |
except httpx.TransportError as e: | |
await session.aclose() | |
session: httpx.AsyncClient = get_async_session() | |
print(f"[W] Retry TransportError https://{domain}{path} {e}") | |
await asyncio.sleep(1) | |
tries -= 1 | |
except httpx.HTTPError as e: | |
print(f"[W] Uncaught Exception Retry... https://{domain}{path} | {e}") | |
await session.aclose() | |
session: httpx.AsyncClient = get_async_session() | |
# print(f"[W] Retry TransportError https://{domain}{path} {e}") | |
await asyncio.sleep(1) | |
tries -= 1 | |
except Exception as e: | |
print(f"[W] Uncaught Exception https://{domain}{path} | {e}") | |
break | |
if tries <= 0: | |
print(f"[W] Tries Exceeded https://{domain}{path}") | |
break | |
if tries <= 0 or data is None: | |
return | |
if data.status_code == 410: | |
return | |
# Handle redirected domains | |
domain = data.url.host | |
page_list, next_url = await loop.run_in_executor( | |
executor, parse_special_index, data.text | |
) | |
# pbar.update(len(page_list)) | |
while next_url: | |
tries = 10 | |
data = None | |
while True: | |
try: | |
data = await session.get( | |
f"https://{domain}{next_url}", follow_redirects=True | |
) | |
if data.status_code != 200: | |
if data.status_code == 410: | |
break | |
print(f"https://{domain}{next_url}", data.status_code) | |
continue | |
break | |
except httpx.TransportError as e: | |
await session.aclose() | |
session: httpx.AsyncClient = get_async_session() | |
print(f"[W2] Retry TransportError https://{domain}{next_url} {e}") | |
await asyncio.sleep(1) | |
tries -= 1 | |
except httpx.HTTPError as e: | |
print( | |
f"[W2] Uncaught Exception Retry... https://{domain}{next_url} | {e}" | |
) | |
await session.aclose() | |
session: httpx.AsyncClient = get_async_session() | |
# print(f"[W] Retry TransportError https://{domain}{path} {e}") | |
await asyncio.sleep(1) | |
tries -= 1 | |
except Exception as e: | |
print(f"[W2] Uncaught Exception https://{domain}{next_url} | {e}") | |
break | |
if tries <= 0: | |
print(f"[W2] Tries Exceeded https://{domain}{next_url}") | |
break | |
if tries <= 0 or data is None: | |
return | |
if data.status_code == 410: | |
return | |
new_page_list, next_url = await loop.run_in_executor( | |
executor, parse_special_index, data.text | |
) | |
# pbar.update(len(new_page_list)) | |
page_list.extend(new_page_list) | |
# pbar.close() | |
print(f"[I] Done: {domain} | {len(page_list)}") | |
await session.aclose() | |
return page_list | |
export_queue = asyncio.Queue(CONCURRENT_WORKERS + 1) | |
async def compiler_worker(): | |
loop = asyncio.get_running_loop() | |
async with aiofile.async_open(OUPUT_JSONL, "ab") as f: | |
while True: | |
page_data = await export_queue.get() | |
if page_data is None: | |
break | |
domain, pages, path = page_data | |
print(f"[I] Dump: {domain}") | |
fi = {"domain": domain, "path": path, "pages": pages} | |
bytes_data = await loop.run_in_executor(executor, orjson.dumps, fi) | |
await f.write(bytes_data) | |
await f.write(b"\n") | |
print(f"[I] OKDump: {domain}") | |
async def worker(): | |
await asyncio.sleep(random.uniform(1, CONCURRENT_WORKERS / 60)) | |
while True: | |
domain_root = await queue.get() | |
if domain_root is None: | |
break | |
domain, _, path, __ = domain_root | |
if path == "": | |
path = "/wiki/" | |
if "vpn-restricted" in domain: | |
continue | |
pages = await domain_procesor(domain, path) | |
if pages is None: | |
continue | |
await export_queue.put((domain, pages, path)) | |
async def main(): | |
loop = asyncio.get_running_loop() | |
workers = [loop.create_task(worker()) for _ in range(CONCURRENT_WORKERS)] | |
writer = loop.create_task(compiler_worker()) | |
seen_domains = set() | |
if OUPUT_JSONL.exists(): | |
print("[I] Fetching seen domains...") | |
with open(OUPUT_JSONL, "rb") as f: | |
for line in tqdm.tqdm(f, desc="Domains Parsed"): | |
seen_domains.add(orjson.loads(line)["domain"]) | |
with open(WIKI_LIST) as f: | |
reader = csv.reader(f) | |
for line in reader: | |
if len(line) == 0: | |
continue | |
domain, friendly_name, path, has_scraped = line | |
if domain[0] == "#": | |
continue | |
if domain in seen_domains: | |
continue | |
await queue.put((domain, friendly_name, path, has_scraped)) | |
for _ in range(CONCURRENT_WORKERS + 1): | |
await queue.put(None) | |
await asyncio.gather(*workers) | |
await export_queue.put(None) | |
await asyncio.gather(writer) | |
if __name__ == "__main__": | |
asyncio.run(main()) | |