Create nat_datasets.py
Browse files- nat_datasets.py +136 -0
nat_datasets.py
ADDED
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import tqdm
|
2 |
+
import re
|
3 |
+
import requests
|
4 |
+
from huggingface_hub import login
|
5 |
+
from datasets import Dataset
|
6 |
+
from datasets import load_dataset
|
7 |
+
from google.colab import userdata
|
8 |
+
|
9 |
+
login(userdata.get('HF_TOKEN'))
|
10 |
+
|
11 |
+
data = load_dataset("Ibrahemqasim/categories_en2ar", split="train")
|
12 |
+
|
13 |
+
# nationalities.keys() "nat_en","man","men","women","womens","country_en","country_ar",
|
14 |
+
nationalities = load_dataset("Ibrahemqasim/nationalities", split="train")
|
15 |
+
nationalities_pattern = r'\b(' + '|'.join(map(re.escape, [n["nat_en"] for n in sorted(nationalities, key=lambda x: -x["nat_en"].count(' '))])) + r')\b'
|
16 |
+
|
17 |
+
countries = load_dataset("Ibrahemqasim/countries", split="train")
|
18 |
+
countries_pattern = r'\b(' + '|'.join(map(re.escape, [n["en"] for n in sorted(countries, key=lambda x: -x["en"].count(' '))])) + r')\b'
|
19 |
+
|
20 |
+
# ---
|
21 |
+
countries_dict = {cc["en"]: cc for cc in countries}
|
22 |
+
nationalities_dict = {cc["nat_en"]: cc for cc in nationalities}
|
23 |
+
# ---
|
24 |
+
to_work = [
|
25 |
+
"categories_with_nationalities",
|
26 |
+
# "categories_with_years",
|
27 |
+
# "categories_with_YEAR_COUNTRY_pattern",
|
28 |
+
# "categories_with_YEAR_pattern",
|
29 |
+
]
|
30 |
+
|
31 |
+
data_lists = {
|
32 |
+
"categories_with_nationalities" : {},
|
33 |
+
"categories_with_years" : {},
|
34 |
+
"categories_with_YEAR_COUNTRY_pattern" : {},
|
35 |
+
"categories_with_YEAR_pattern" : {},
|
36 |
+
}
|
37 |
+
|
38 |
+
YEAR_PATTERN = "{YEAR}"
|
39 |
+
NAT = "{NAT}"
|
40 |
+
AR_NAT_MEN = "{NAT_MEN}"
|
41 |
+
COUNTRY_PATTERN = "{COUNTRY}"
|
42 |
+
|
43 |
+
for tab in tqdm.tqdm(data):
|
44 |
+
# ---
|
45 |
+
key = tab["en"]
|
46 |
+
value = tab["ar"]
|
47 |
+
# ---
|
48 |
+
# Add if key and value has 4 digits and they are the same
|
49 |
+
reg_year = r"(\d+[–-]\d+|\d{4})"
|
50 |
+
# ---
|
51 |
+
key_digits = re.search(reg_year, key)
|
52 |
+
value_digits = re.search(reg_year, value)
|
53 |
+
# ----
|
54 |
+
match1 = re.search(nationalities_pattern, key)
|
55 |
+
# ----
|
56 |
+
if match1:
|
57 |
+
en_country = match1.group(1)
|
58 |
+
ar_country = nationalities_dict.get(en_country, {}).get("men", "")
|
59 |
+
# ---
|
60 |
+
if ar_country and ar_country in value:
|
61 |
+
key1 = re.sub(rf'\b{re.escape(en_country)}\b', COUNTRY_PATTERN, key)
|
62 |
+
value1 = re.sub(rf'\b{re.escape(ar_country)}\b', AR_NAT_MEN, value)
|
63 |
+
# ---
|
64 |
+
if COUNTRY_PATTERN in key1 and AR_NAT_MEN in value1:
|
65 |
+
# ---
|
66 |
+
if key1 in data_lists["categories_with_nationalities"]:
|
67 |
+
data_lists["categories_with_nationalities"][key1]["count"] += 1
|
68 |
+
else:
|
69 |
+
data_lists["categories_with_nationalities"][key1] = {"ar": value1, "count": 1}
|
70 |
+
# ---
|
71 |
+
continue
|
72 |
+
# ---
|
73 |
+
if key_digits and value_digits and key_digits.group() == value_digits.group():
|
74 |
+
# data_lists["categories_with_years"].append({"en": key, "ar": value, "count": 1})
|
75 |
+
if key in data_lists["categories_with_years"]:
|
76 |
+
data_lists["categories_with_years"][key]["count"] += 1
|
77 |
+
else:
|
78 |
+
data_lists["categories_with_years"][key] = {"ar": value, "count": 1}
|
79 |
+
# ---
|
80 |
+
key2 = key.replace(key_digits.group(), YEAR_PATTERN)
|
81 |
+
value2 = value.replace(value_digits.group(), YEAR_PATTERN)
|
82 |
+
# ---
|
83 |
+
# data_lists["categories_with_YEAR_pattern"].append({"en": key2, "ar": value2, "count": 1})
|
84 |
+
# ---
|
85 |
+
if key2 in data_lists["categories_with_YEAR_pattern"]:
|
86 |
+
data_lists["categories_with_YEAR_pattern"][key2]["count"] += 1
|
87 |
+
else:
|
88 |
+
data_lists["categories_with_YEAR_pattern"][key2] = {"ar": value2, "count": 1}
|
89 |
+
# ----
|
90 |
+
# البحث عن اسم الدولة في key2
|
91 |
+
match = re.search(countries_pattern, key2)
|
92 |
+
# ----
|
93 |
+
if match:
|
94 |
+
en_country = match.group(1)
|
95 |
+
ar_country = countries.get(en_country)
|
96 |
+
# ---
|
97 |
+
if ar_country and ar_country in value2:
|
98 |
+
key3 = re.sub(rf'\b{re.escape(en_country)}\b', COUNTRY_PATTERN, key2)
|
99 |
+
value3 = re.sub(rf'\b{re.escape(ar_country)}\b', COUNTRY_PATTERN, value2)
|
100 |
+
# ---
|
101 |
+
if COUNTRY_PATTERN in key3 and COUNTRY_PATTERN in value3:
|
102 |
+
# ---
|
103 |
+
if key3 in data_lists["categories_with_YEAR_COUNTRY_pattern"]:
|
104 |
+
data_lists["categories_with_YEAR_COUNTRY_pattern"][key3]["count"] += 1
|
105 |
+
else:
|
106 |
+
data_lists["categories_with_YEAR_COUNTRY_pattern"][key3] = {"ar": value3, "count": 1}
|
107 |
+
# ----
|
108 |
+
# ----
|
109 |
+
print(f"{len(data_lists['categories_with_YEAR_COUNTRY_pattern'])=}")
|
110 |
+
print(f"{len(data_lists['categories_with_YEAR_pattern'])=}")
|
111 |
+
|
112 |
+
print(f"all data len: {len(data):,}.")
|
113 |
+
|
114 |
+
# for x, data_list in data_lists.items():
|
115 |
+
for x in to_work:
|
116 |
+
data_list = data_lists.get(x)
|
117 |
+
# ---
|
118 |
+
if x == "countries":
|
119 |
+
data_list = [{"en": key, "ar": value} for key, value in data_list.items()]
|
120 |
+
else:
|
121 |
+
data_list = [{"en": key, "ar": value["ar"], "count": value["count"]} for key, value in data_list.items()]
|
122 |
+
# sort data_list by count
|
123 |
+
data_list = sorted(data_list, key=lambda x: x["count"], reverse=True)
|
124 |
+
# ---
|
125 |
+
print("______________")
|
126 |
+
print(f"len of {x} : {len(data_list)}.")
|
127 |
+
# ---
|
128 |
+
print("____________________________")
|
129 |
+
# ---
|
130 |
+
# إنشاء Dataset
|
131 |
+
dataset = Dataset.from_list(data_list)
|
132 |
+
|
133 |
+
# رفع Dataset إلى Hugging Face
|
134 |
+
dataset.push_to_hub(f"Ibrahemqasim/{x}")
|
135 |
+
# ---
|
136 |
+
print(f"dataset: Ibrahemqasim/{x} push_to_hub successfully!")
|