Aananda-giri commited on
Commit
df1ecc3
·
verified ·
1 Parent(s): 9d79118

Upload wikipedia_nepali_data.ipynb

Browse files
wikipedia_nepali/wikipedia_nepali_data.ipynb ADDED
@@ -0,0 +1,308 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {
7
+ "colab": {
8
+ "base_uri": "https://localhost:8080/"
9
+ },
10
+ "id": "MChM63gu3iVj",
11
+ "outputId": "a45849f2-d278-4213-9e54-365b6db4dac2"
12
+ },
13
+ "outputs": [
14
+ {
15
+ "output_type": "stream",
16
+ "name": "stdout",
17
+ "text": [
18
+ "/content/drive/MyDrive\n"
19
+ ]
20
+ }
21
+ ],
22
+ "source": [
23
+ "%cd /content/drive/MyDrive\n",
24
+ "# %cd /content/"
25
+ ]
26
+ },
27
+ {
28
+ "cell_type": "code",
29
+ "execution_count": null,
30
+ "metadata": {
31
+ "colab": {
32
+ "base_uri": "https://localhost:8080/"
33
+ },
34
+ "id": "M1kqCF2e4rtk",
35
+ "outputId": "61ce92d3-b844-4543-9a0a-ba6d914e3af9"
36
+ },
37
+ "outputs": [
38
+ {
39
+ "output_type": "stream",
40
+ "name": "stdout",
41
+ "text": [
42
+ "0 - Retrieved article: 'e' (गणितीय अचर)\n",
43
+ "10 - Retrieved article: अँद्रे मारी एम्पियर\n",
44
+ "500 - Retrieved article: अपाचे वेब सर्भर\n",
45
+ "1000 - Retrieved article: अली असगर\n",
46
+ "1500 - Retrieved article: आन्तरिक मामिला तथा कानून मन्त्री (कोशी प्रदेश)\n",
47
+ "2000 - Retrieved article: इभा मेन्डीज\n",
48
+ "2500 - Retrieved article: ऋतु वर्मा\n",
49
+ "3000 - Retrieved article: एशियन लाईफ इन्स्योरेन्स कम्पनी लिमिटेड\n",
50
+ "3500 - Retrieved article: कब्बलाह\n",
51
+ "4000 - Retrieved article: कानपुर\n",
52
+ "4500 - Retrieved article: कुँइकेल\n",
53
+ "5000 - Retrieved article: केरी मुल्लिगन\n",
54
+ "5500 - Retrieved article: क्रेस्टन,ओहायो\n",
55
+ "6000 - Retrieved article: गङ्गालाल तुलाधर\n",
56
+ "6500 - Retrieved article: गुरूत्वप्रवेग\n",
57
+ "7000 - Retrieved article: घटाउ\n",
58
+ "7500 - Retrieved article: चाल्सा\n",
59
+ "8000 - Retrieved article: छाया\n",
60
+ "8500 - Retrieved article: जाँतो\n",
61
+ "9000 - Retrieved article: जेम्स मिल्नर\n",
62
+ "9500 - Retrieved article: टाइम (पत्रिका)\n",
63
+ "10000 - Retrieved article: डायलग आजीयाटा\n",
64
+ "10500 - Retrieved article: ताको\n",
65
+ "11000 - Retrieved article: त्रिपुरासुन्दरी गाउँपालिका, धादिङ\n",
66
+ "11500 - Retrieved article: दशावतार (२००८ चलचित्र)\n",
67
+ "12000 - Retrieved article: देवी-देवता\n",
68
+ "12500 - Retrieved article: नकुल अभ्यङ्कर\n",
69
+ "13000 - Retrieved article: नारायणी रङ्गशाला\n",
70
+ "13500 - Retrieved article: नेपाल राष्ट्रिय अन्डर-२० फुटबल टोली\n",
71
+ "14000 - Retrieved article: नौवाखोर पर्साही\n",
72
+ "14500 - Retrieved article: पश्चिम सिक्किम साहित्य प्रकाशन\n",
73
+ "15000 - Retrieved article: पिडारी\n",
74
+ "15500 - Retrieved article: पोखरी भञ्ज्याङ\n",
75
+ "16000 - Retrieved article: फक्सिब\n",
76
+ "16500 - Retrieved article: बगहा\n",
77
+ "17000 - Retrieved article: बहुअरवा भाठा\n",
78
+ "17500 - Retrieved article: बिल गेट्स\n",
79
+ "18000 - Retrieved article: बोलांगीर\n",
80
+ "18500 - Retrieved article: भारतको उपप्रधानमन्त्री\n",
81
+ "19000 - Retrieved article: भेसुभियस पर्वत\n",
82
+ "19500 - Retrieved article: मनीष पाण्डे\n",
83
+ "20000 - Retrieved article: माइक्रोम्याक्स\n",
84
+ "20500 - Retrieved article: माहेला जयवर्दने\n",
85
+ "21000 - Retrieved article: मेट्रिक्स\n",
86
+ "21500 - Retrieved article: यश (अभिनेता)\n",
87
+ "22000 - Retrieved article: रमित ढुङ्गाना\n",
88
+ "22500 - Retrieved article: राफ्टिङ\n",
89
+ "23000 - Retrieved article: रुपन्देही २ (निर्वाचन क्षेत्र)\n",
90
+ "23500 - Retrieved article: लन्जरी\n",
91
+ "24000 - Retrieved article: लुना २५\n",
92
+ "24500 - Retrieved article: वायु फ्यालफ्याले\n",
93
+ "25000 - Retrieved article: विश्वामित्र (स्पष्टता)\n",
94
+ "25500 - Retrieved article: शिक्षा, विज्ञान तथा प्रविधि मन्त्रालय (नेपाल)\n",
95
+ "26000 - Retrieved article: संयुक्त अरब इमिरेट्स महिला राष्ट्रिय फुटबल टोली\n",
96
+ "26500 - Retrieved article: समानान्तर चतुर्भुज\n",
97
+ "27000 - Retrieved article: साधना शिवदासानी\n",
98
+ "27500 - Retrieved article: सिन्डी क्रफोर्ड\n",
99
+ "28000 - Retrieved article: सुपेरियर ताल\n",
100
+ "28500 - Retrieved article: सोफी टर्नर (मोडल)\n",
101
+ "29000 - Retrieved article: हबिगन्ज जिल्ला\n",
102
+ "29500 - Retrieved article: हिमोफोलिया\n",
103
+ "30000 - Retrieved article: २०१६ फा कम्युनिटी शिल्ड\n",
104
+ "Saved 30156 articles to nepali_wikipedia_articles.json\n"
105
+ ]
106
+ }
107
+ ],
108
+ "source": [
109
+ "import requests\n",
110
+ "import time\n",
111
+ "from typing import List, Dict\n",
112
+ "import json\n",
113
+ "import math\n",
114
+ "\n",
115
+ "class WikipediaNepaliExtractor:\n",
116
+ " def __init__(self):\n",
117
+ " self.base_url = \"https://ne.wikipedia.org/w/api.php\"\n",
118
+ " self.session = requests.Session()\n",
119
+ "\n",
120
+ " def get_articles(self, limit: int = 500) -> List[Dict]:\n",
121
+ " \"\"\"\n",
122
+ " Fetch Nepali Wikipedia articles using the API\n",
123
+ "\n",
124
+ " Args:\n",
125
+ " limit (int): Maximum number of articles to retrieve\n",
126
+ "\n",
127
+ " Returns:\n",
128
+ " List[Dict]: List of articles with title and content\n",
129
+ " \"\"\"\n",
130
+ " articles = []\n",
131
+ " continue_param = ''\n",
132
+ " count = 0\n",
133
+ "\n",
134
+ " while len(articles) < limit:\n",
135
+ " params = {\n",
136
+ " 'action': 'query',\n",
137
+ " 'format': 'json',\n",
138
+ " 'list': 'allpages',\n",
139
+ " 'aplimit': min(50, limit - len(articles)),\n",
140
+ " 'apnamespace': 0, # Main article namespace\n",
141
+ " 'apfilterredir': 'nonredirects' # Skip redirects\n",
142
+ " }\n",
143
+ "\n",
144
+ " if continue_param:\n",
145
+ " params['apcontinue'] = continue_param\n",
146
+ "\n",
147
+ " try:\n",
148
+ " response = self.session.get(self.base_url, params=params)\n",
149
+ "\n",
150
+ " response.raise_for_status()\n",
151
+ " data = response.json()\n",
152
+ "\n",
153
+ " # Extract articles from response\n",
154
+ " for page in data['query']['allpages']:\n",
155
+ " article = self._get_article_content(page['pageid']) # e.g. page['pageid'] = 126154\n",
156
+ " if article:\n",
157
+ " articles.append(article)\n",
158
+ "\n",
159
+ " if count ==10 or count%500==0:\n",
160
+ " print(f\"{count} - Retrieved article: {article['title']}\")\n",
161
+ " count += 1\n",
162
+ "\n",
163
+ " # Check if there are more articles to fetch\n",
164
+ " if 'continue' in data and 'apcontinue' in data['continue']:\n",
165
+ " continue_param = data['continue']['apcontinue']\n",
166
+ " else:\n",
167
+ " break\n",
168
+ "\n",
169
+ " # Add delay to avoid hitting API limits\n",
170
+ " time.sleep(1)\n",
171
+ "\n",
172
+ " except requests.exceptions.RequestException as e:\n",
173
+ " print(f\"Error fetching articles: {e}\")\n",
174
+ " break\n",
175
+ "\n",
176
+ " return articles\n",
177
+ "\n",
178
+ " def _get_article_content(self, page_id: int) -> Dict:\n",
179
+ " \"\"\"\n",
180
+ " Fetch content for a specific article by page ID\n",
181
+ "\n",
182
+ " Args:\n",
183
+ " page_id (int): Wikipedia page ID\n",
184
+ " e.g. 126154\n",
185
+ "\n",
186
+ " Returns:\n",
187
+ " Dict: Article information including title and content\n",
188
+ " \"\"\"\n",
189
+ " # # wikipedia markup\n",
190
+ " # params = {\n",
191
+ " # 'action': 'query',\n",
192
+ " # 'format': 'json',\n",
193
+ " # 'pageids': 126154,\n",
194
+ " # 'prop': 'revisions',\n",
195
+ " # 'rvprop': 'content',\n",
196
+ " # 'rvslots': '*'\n",
197
+ " # }\n",
198
+ "\n",
199
+ " # plain text\n",
200
+ " params = {\n",
201
+ " 'action': 'query',\n",
202
+ " 'format': 'json',\n",
203
+ " 'pageids': page_id,\n",
204
+ " 'prop': 'extracts',\n",
205
+ " 'explaintext': True,\n",
206
+ " # 'exlimit': 'max'\n",
207
+ " # 'exchars': 2000000\n",
208
+ " }\n",
209
+ "\n",
210
+ " try:\n",
211
+ " response = self.session.get(self.base_url, params=params)\n",
212
+ " response.raise_for_status()\n",
213
+ " data = response.json()\n",
214
+ "\n",
215
+ " page_data = data['query']['pages'][str(page_id)]\n",
216
+ " # print(f'page_data:{page_data}')\n",
217
+ " # print(f'data:{data}')\n",
218
+ " return {\n",
219
+ " 'title': page_data['title'],\n",
220
+ " 'content': page_data.get('extract', ''),\n",
221
+ " 'pageid': page_id,\n",
222
+ " 'length': page_data.get('length', 0)\n",
223
+ " }\n",
224
+ "\n",
225
+ " except requests.exceptions.RequestException as e:\n",
226
+ " print(f\"Error fetching article content for page {page_id}: {e}\")\n",
227
+ " return None\n",
228
+ "\n",
229
+ "def save_articles_to_file(articles: List[Dict], filename: str):\n",
230
+ " \"\"\"\n",
231
+ " Save extracted articles to a JSON file\n",
232
+ "\n",
233
+ " Args:\n",
234
+ " articles (List[Dict]): List of article dictionaries\n",
235
+ " filename (str): Output filename\n",
236
+ " \"\"\"\n",
237
+ " with open(filename, 'w', encoding='utf-8') as f:\n",
238
+ " json.dump(articles, f, ensure_ascii=False, indent=2)\n",
239
+ "\n",
240
+ "def main():\n",
241
+ " extractor = WikipediaNepaliExtractor()\n",
242
+ "\n",
243
+ " # Get 100 articles (you can modify this number)\n",
244
+ " articles = extractor.get_articles(limit=math.inf) # 100\n",
245
+ "\n",
246
+ " # Save to file\n",
247
+ " save_articles_to_file(articles, 'nepali_wikipedia_articles.json')\n",
248
+ " print(f\"Saved {len(articles)} articles to nepali_wikipedia_articles.json\")\n",
249
+ "\n",
250
+ "if __name__ == \"__main__\":\n",
251
+ " main()"
252
+ ]
253
+ },
254
+ {
255
+ "cell_type": "code",
256
+ "source": [
257
+ "# push to huggingface\n",
258
+ "\n",
259
+ "from huggingface_hub import HfApi\n",
260
+ "api = HfApi()\n",
261
+ "api.upload_file(\n",
262
+ " path_or_fileobj=\"nepali_wikipedia_articles.json\",\n",
263
+ " path_in_repo=\"wikipedia_nepali/feb_10_count_30156.json\",\n",
264
+ " repo_id=\"Aananda-giri/nepali_llm_datasets\",\n",
265
+ " repo_type=\"dataset\"\n",
266
+ ")"
267
+ ],
268
+ "metadata": {
269
+ "id": "64di2aPVAO9a",
270
+ "colab": {
271
+ "base_uri": "https://localhost:8080/",
272
+ "height": 173
273
+ },
274
+ "outputId": "ef7a89b4-08f6-4f65-ea3d-c2205cc3cbe8"
275
+ },
276
+ "execution_count": 17,
277
+ "outputs": [
278
+ {
279
+ "output_type": "execute_result",
280
+ "data": {
281
+ "text/plain": [
282
+ "CommitInfo(commit_url='https://huggingface.co/datasets/Aananda-giri/nepali_llm_datasets/commit/c1082d805d5347dbb1d1700448e0e31d2ccda665', commit_message='Upload wikipedia_nepali/feb_10_count_30156.json with huggingface_hub', commit_description='', oid='c1082d805d5347dbb1d1700448e0e31d2ccda665', pr_url=None, repo_url=RepoUrl('https://huggingface.co/datasets/Aananda-giri/nepali_llm_datasets', endpoint='https://huggingface.co', repo_type='dataset', repo_id='Aananda-giri/nepali_llm_datasets'), pr_revision=None, pr_num=None)"
283
+ ],
284
+ "application/vnd.google.colaboratory.intrinsic+json": {
285
+ "type": "string"
286
+ }
287
+ },
288
+ "metadata": {},
289
+ "execution_count": 17
290
+ }
291
+ ]
292
+ }
293
+ ],
294
+ "metadata": {
295
+ "colab": {
296
+ "provenance": []
297
+ },
298
+ "kernelspec": {
299
+ "display_name": "Python 3",
300
+ "name": "python3"
301
+ },
302
+ "language_info": {
303
+ "name": "python"
304
+ }
305
+ },
306
+ "nbformat": 4,
307
+ "nbformat_minor": 0
308
+ }