enwiki_to_arwiki_categories / fix langlinks.py
Ibrahemqasim's picture
Update fix langlinks.py
4cf98f3 verified
import tqdm
import re
import json
import requests
from huggingface_hub import login
from huggingface_hub import upload_file
from datasets import Dataset
# تسجيل الدخول إلى Hugging Face (استبدل "YOUR_ACCESS_TOKEN" بالتوكن الخاص بك)
login("YOUR_ACCESS_TOKEN")
# تحميل الملف JSON من الرابط مباشرة
json_url = "https://huggingface.co/Ibrahemqasim/enwiki_to_arwiki_categories/resolve/main/langlinks.json"
response = requests.get(json_url)
data = response.json()
# تحميل الملف JSON من الرابط مباشرة
json_url2 = "https://huggingface.co/Ibrahemqasim/enwiki_to_arwiki_categories/resolve/main/countries.json"
response2 = requests.get(json_url2)
countries = response2.json()
# تحويل القاموس إلى قائمة من القواميس [{ "en": "value", "ar": "value" }, ...]
to_work = [
# "langlinks",
# "filtered_data",
# "cats_2000_contry",
"cats_2000",
]
data_lists = {
"langlinks" : {},
"filtered_data" : {},
"cats_2000_contry" : {},
"cats_2000" : {},
}
for tab in tqdm.tqdm(data):
# ---
key = tab["en"]
value = tab["ar"]
# ---
# "Category:1. FC Köln non-playing staff"
# remove " from start and end
# ---
if key.startswith('"') and key.endswith('"'):
key = key[1:-1]
# ----
# remove (:") from start and remove (",) from end
# :"cc",
if value.startswith(':"') and value.endswith('",'):
value = value[2:-2]
# ----
# data_lists["langlinks"].append({"en": key, "ar": value})
data_lists["langlinks"][key] = value
# ----
# Add if key and value has 4 digits and they are the same
key_digits = re.search(r"\d{4}", key)
value_digits = re.search(r"\d{4}", value)
# ----
if key_digits and value_digits and key_digits.group() == value_digits.group():
# data_lists["filtered_data"].append({"en": key, "ar": value})
data_lists["filtered_data"][key] = value
# ---
key2 = key.replace(key_digits.group(), "2000")
value2 = value.replace(value_digits.group(), "2000")
# ---
# data_lists["cats_2000"].append({"en": key2, "ar": value2})
data_lists["cats_2000"][key2] = value2
# ----
for en_c, ar_c in countries.items():
if en_c in key2 and ar_c in value2:
key3 = key2.replace(en_c, "country")
value3 = value2.replace(ar_c, "country")
# ---
data_lists["cats_2000_contry"][key3] = value3
print(f"all data len: {len(data):,}.")
datasets_list = {
"langlinks" : "categories_en2ar",
"filtered_data" : "categories_en2ar_with_years",
"cats_2000_contry" : "categories_en2ar-cats_2000_contry",
"cats_2000" : "categories_en2ar-cats_2000",
}
# for x, data_list in data_lists.items():
for x in to_work:
data_list = data_lists.get(x)
# ---
data_list = [{"en": key, "ar": value} for key, value in data_list.items()]
# حفظ القاموس المصحح في ملف JSON
with open(f"{x}.json", "w", encoding="utf-8") as f:
json.dump(data_list, f, ensure_ascii=False, indent=4)
print("______________")
print(f"file: {x} uploaded successfully!")
print(f"len of {x} : {len(data_list)}.")
upload_file(
path_or_fileobj=f"{x}.json", # اسم الملف الذي تم حفظه
path_in_repo=f"{x}.json", # المسار داخل المستودع
repo_id="Ibrahemqasim/enwiki_to_arwiki_categories", # معرف المستودع
# repo_type="dataset", # نوع المستودع (نستخدم dataset للملفات)
)
print("____________________________")
set_name = datasets_list.get(x)
if set_name:
# إنشاء Dataset
dataset = Dataset.from_list(data_list)
# رفع Dataset إلى Hugging Face
dataset.push_to_hub(f"Ibrahemqasim/{set_name}")