enwiki_to_arwiki_categories / fix langlinks.py
Ibrahemqasim's picture
Update fix langlinks.py
b5fe413 verified
raw
history blame
5.52 kB
import tqdm
import re
import json
import requests
from huggingface_hub import login
from huggingface_hub import upload_file
from datasets import Dataset
from google.colab import userdata
# تسجيل الدخول إلى Hugging Face (استبدل "YOUR_ACCESS_TOKEN" بالتوكن الخاص بك)
login(userdata.get('HF_API'))
# تحميل الملف JSON من الرابط مباشرة
json_url = "https://huggingface.co/Ibrahemqasim/enwiki_to_arwiki_categories/resolve/main/langlinks.json"
response = requests.get(json_url)
data = response.json()
# تحميل الملف JSON من الرابط مباشرة
json_url2 = "https://huggingface.co/Ibrahemqasim/enwiki_to_arwiki_categories/resolve/main/countries.json"
response2 = requests.get(json_url2)
countries = response2.json()
# تحويل القاموس إلى قائمة من القواميس [{ "en": "value", "ar": "value" }, ...]
to_work = [
"langlinks",
"filtered_data",
"cats_2000_contry",
"cats_2000",
]
data_lists = {
"langlinks" : {},
"filtered_data" : {},
"cats_2000_contry" : {},
"cats_2000" : {},
}
# نرتب الدول حسب عدد الفراغات (تنازليًا)
sorted_keys = sorted(countries.keys(), key=lambda x: -x.count(' '))
# نبني تعبير regex
regex_pattern = r'\b(' + '|'.join(map(re.escape, sorted_keys)) + r')\b'
YEAR_PATTERN = "{YEAR}"
COUNTRY_PATTERN = "{COUNTRY}"
for tab in tqdm.tqdm(data):
# ---
key = tab["en"]
value = tab["ar"]
# ---
# "Category:1. FC Köln non-playing staff"
# remove " from start and end
# ---
if key.startswith('"') and key.endswith('"'):
key = key[1:-1]
# ----
# remove (:") from start and remove (",) from end
# :"cc",
if value.startswith(':"') and value.endswith('",'):
value = value[2:-2]
# ----
# data_lists["langlinks"].append({"en": key, "ar": value, "count": 0})
if key in data_lists["langlinks"]:
data_lists["langlinks"][key]["count"] += 1
else:
data_lists["langlinks"][key] = {"ar": value, "count": 0}
# ----
# Add if key and value has 4 digits and they are the same
reg_year = r"(\d+[–-]\d+|\d{4})"
# ---
key_digits = re.search(reg_year, key)
value_digits = re.search(reg_year, value)
# ----
if key_digits and value_digits and key_digits.group() == value_digits.group():
# data_lists["filtered_data"].append({"en": key, "ar": value, "count": 0})
if key in data_lists["filtered_data"]:
data_lists["filtered_data"][key]["count"] += 1
else:
data_lists["filtered_data"][key] = {"ar": value, "count": 0}
# ---
key2 = key.replace(key_digits.group(), YEAR_PATTERN)
value2 = value.replace(value_digits.group(), YEAR_PATTERN)
# ---
# data_lists["cats_2000"].append({"en": key2, "ar": value2, "count": 0})
# ---
if key2 in data_lists["cats_2000"]:
data_lists["cats_2000"][key2]["count"] += 1
else:
data_lists["cats_2000"][key2] = {"ar": value2, "count": 0}
# ----
# البحث عن اسم الدولة في key2
match = re.search(regex_pattern, key2)
# ----
if match:
en_country = match.group(1)
ar_country = countries.get(en_country)
# ---
if ar_country and ar_country in value2:
key3 = re.sub(rf'\b{re.escape(en_country)}\b', COUNTRY_PATTERN, key2)
value3 = re.sub(rf'\b{re.escape(ar_country)}\b', COUNTRY_PATTERN, value2)
# ---
if COUNTRY_PATTERN in key3 and COUNTRY_PATTERN in value3:
# ---
if key3 in data_lists["cats_2000_contry"]:
data_lists["cats_2000_contry"][key3]["count"] += 1
else:
data_lists["cats_2000_contry"][key3] = {"ar": value3, "count": 0}
# ----
# ----
print(f"{len(data_lists['cats_2000_contry'])=}")
print(f"{len(data_lists['cats_2000'])=}")
print(f"all data len: {len(data):,}.")
datasets_list = {
"langlinks" : "categories_en2ar",
"filtered_data" : "categories_en2ar_with_years",
"cats_2000_contry" : "categories_en2ar-cats_2000_contry",
"cats_2000" : "categories_en2ar-cats_2000",
}
# for x, data_list in data_lists.items():
for x in to_work:
data_list = data_lists.get(x)
# ---
data_list = [{"en": key, "ar": value["ar"], "count": value["count"]} for key, value in data_list.items()]
# حفظ القاموس المصحح في ملف JSON
with open(f"{x}.json", "w", encoding="utf-8") as f:
json.dump(data_list, f, ensure_ascii=False, indent=4)
print("______________")
print(f"file: {x} uploaded successfully!")
print(f"len of {x} : {len(data_list)}.")
# ---
# continue
# ---
upload_file(
path_or_fileobj=f"{x}.json", # اسم الملف الذي تم حفظه
path_in_repo=f"{x}.json", # المسار داخل المستودع
repo_id="Ibrahemqasim/enwiki_to_arwiki_categories", # معرف المستودع
# repo_type="dataset", # نوع المستودع (نستخدم dataset للملفات)
)
print("____________________________")
set_name = datasets_list.get(x)
if set_name:
# إنشاء Dataset
dataset = Dataset.from_list(data_list)
# رفع Dataset إلى Hugging Face
dataset.push_to_hub(f"Ibrahemqasim/{set_name}")