0x22almostEvil commited on
Commit
89cd787
·
1 Parent(s): 401ac39

Warning: cringe code

Files changed (3) hide show
  1. exceptions.py +6 -0
  2. pywiki_custom.py +279 -0
  3. wiki.py +58 -0
exceptions.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ class ParseError(RuntimeError):
2
+ """ Error parsing WikiHow page"""
3
+
4
+
5
+ class UnsupportedLanguage(ValueError):
6
+ """ Unsupported lang, see https://www.wikihow.com/wikiHow:Language-Projects"""
pywiki_custom.py ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import bs4
2
+ from exceptions import ParseError, UnsupportedLanguage
3
+ from datetime import timedelta
4
+ from requests_cache import CachedSession
5
+
6
+ expire_after = timedelta(hours=1)
7
+ session = CachedSession(backend='memory', expire_after=expire_after)
8
+
9
+ tmp_a = ""
10
+
11
+ def get_html(url):
12
+ headers = {
13
+ "User-Agent": "Mozilla/5.0 (compatible; OpenAssistantCrawler/0.1; +https://open-assistant.io/)"} # user-like browser agent; name/version; +url
14
+ r = session.get(url, headers=headers)
15
+ html = r.text.encode("utf8")
16
+ return html
17
+
18
+
19
+ class HowToStep:
20
+ def __init__(self, number, summary=None, description=None):
21
+ self._number = number
22
+ self._summary = summary
23
+ self._description = description
24
+ #self._picture = picture
25
+
26
+ @property
27
+ def number(self):
28
+ return self._number
29
+
30
+ @property
31
+ def summary(self):
32
+ return self._summary
33
+
34
+ @property
35
+ def description(self):
36
+ return self._description
37
+
38
+ @property
39
+ def picture(self):
40
+ return self._picture
41
+
42
+ def as_dict(self):
43
+ return {"number": self.number,
44
+ "summary": self.summary,
45
+ "description": self.description,
46
+ #"picture": self.picture}
47
+ }
48
+
49
+ def print(self, extended=False):
50
+ global tmp_a
51
+ tmp_a += str("\n" + str(self.number) + ". **" + str(self.summary) + "** ")
52
+ if extended:
53
+ tmp_a += str(self.description)
54
+
55
+
56
+ class HowTo:
57
+ def __init__(self, url="http://www.wikihow.com/Special:Randomizer",
58
+ lazy=True):
59
+ self._url = url
60
+ global tmp_a
61
+ tmp_a = ""
62
+ self._title = None
63
+ self._intro = None
64
+ self._steps = []
65
+ self._parsed = False
66
+ if not lazy:
67
+ self._parse()
68
+
69
+ def __repr__(self):
70
+ return "HowTo:" + self.title
71
+
72
+ @property
73
+ def url(self):
74
+ if not self._parsed:
75
+ self._parse()
76
+ return self._url
77
+
78
+ @property
79
+ def title(self):
80
+ if not self._parsed:
81
+ self._parse()
82
+ return self._title
83
+
84
+ @property
85
+ def intro(self):
86
+ if not self._parsed:
87
+ self._parse()
88
+ return self._intro
89
+
90
+ @property
91
+ def steps(self):
92
+ if not self._parsed:
93
+ self._parse()
94
+ return self._steps
95
+
96
+ @property
97
+ def summary(self):
98
+ summary = self.title + "\n"
99
+ for step in self.steps:
100
+ summary += "\n{n}. ".format(n=step.number) + step.summary + "\n"
101
+ return summary
102
+
103
+ @property
104
+ def n_steps(self):
105
+ return len(self._steps)
106
+
107
+ def print(self, extended=False):
108
+ global tmp_a
109
+ if not extended:
110
+ tmp_a += str(self.summary)
111
+ else:
112
+ #print(self.title)
113
+ tmp_a += str(self.intro)
114
+ for s in self.steps:
115
+ s.print(extended)
116
+ return tmp_a
117
+
118
+ def _parse_title(self, soup):
119
+ # get title
120
+ html = soup.findAll("h1",
121
+ {"class": ["title_lg", "title_md", "title_sm"]})[0]
122
+ if not html.find("a"):
123
+ raise ParseError
124
+ else:
125
+ self._url = html.find("a").get("href")
126
+ if not self._url.startswith("http"):
127
+ self._url = "http://" + self._url
128
+ self._title = self._url.split("/")[-1].replace("-", " ")
129
+
130
+ def _parse_intro(self, soup):
131
+ # get article intro/summary
132
+ intro_html = soup.find("div", {"class": "mf-section-0"})
133
+ if not intro_html:
134
+ raise ParseError
135
+ else:
136
+ super = intro_html.find("sup")
137
+ if super != None:
138
+ for sup in intro_html.findAll("sup"):
139
+ sup.decompose()
140
+ intro = intro_html.text
141
+ self._intro = intro.strip()
142
+ else:
143
+ intro = intro_html.text
144
+ self._intro = intro.strip()
145
+
146
+ def _parse_steps(self, soup):
147
+ self._steps = []
148
+ step_html = soup.findAll("div", {"class": "step"})
149
+ count = 0
150
+ for html in step_html:
151
+ # This finds and cleans weird tags from the step data
152
+ super = html.find("sup")
153
+ script = html.find("script")
154
+ if script != None:
155
+ for script in html.findAll("script"):
156
+ script.decompose()
157
+ if super != None:
158
+ for sup in html.findAll("sup"):
159
+ sup.decompose()
160
+ count += 1
161
+ summary = html.find("b").text
162
+
163
+ for _extra_div in html.find("b").find_all("div"):
164
+ summary = summary.replace(_extra_div.text, "")
165
+
166
+ step = HowToStep(count, summary)
167
+ ex_step = html
168
+ for b in ex_step.findAll("b"):
169
+ b.decompose()
170
+ step._description = ex_step.text.strip()
171
+ self._steps.append(step)
172
+
173
+ def _parse_pictures(self, soup):
174
+ # get step pic
175
+ count = 0
176
+ for html in soup.findAll("a", {"class": "image"}):
177
+ # one more ugly blob, nice :D
178
+ html = html.find("img")
179
+ i = str(html).find("data-src=")
180
+ pic = str(html)[i:].replace('data-src="', "")
181
+ pic = pic[:pic.find('"')]
182
+
183
+ # save in step
184
+ self._steps[count]._picture = pic
185
+ count += 1
186
+
187
+ def _parse(self):
188
+ try:
189
+ html = get_html(self._url)
190
+ soup = bs4.BeautifulSoup(html, 'html.parser')
191
+ self._parse_title(soup)
192
+ self._parse_intro(soup)
193
+ self._parse_steps(soup)
194
+ #self._parse_pictures(soup)
195
+ self._parsed = True
196
+ except Exception as e:
197
+ print(e)
198
+
199
+ def as_dict(self):
200
+ return {
201
+ "title": self.title,
202
+ "url": self._url,
203
+ "intro": self._intro,
204
+ "n_steps": len(self.steps),
205
+ "steps": [s.as_dict() for s in self.steps]
206
+ }
207
+
208
+
209
+ def RandomHowTo(lang="en"):
210
+ lang = lang.split("-")[0].lower()
211
+ if lang not in WikiHow.lang2url:
212
+ raise UnsupportedLanguage
213
+ url = WikiHow.lang2url[lang] + "Special:Randomizer"
214
+ return HowTo(url)
215
+
216
+
217
+ class WikiHow:
218
+ lang2url = {
219
+ "en": "http://www.wikihow.com/",
220
+ "es": "http://es.wikihow.com/",
221
+ "pt": "http://pt.wikihow.com/",
222
+ "it": "http://www.wikihow.it/",
223
+ "fr": "http://fr.wikihow.com/",
224
+ "ru": "http://ru.wikihow.com/",
225
+ "de": "http://de.wikihow.com/",
226
+ "zh": "http://zh.wikihow.com/",
227
+ "nl": "http://nl.wikihow.com/",
228
+ "cz": "http://www.wikihow.cz/",
229
+ "id": "http://id.wikihow.com/",
230
+ "jp": "http://www.wikihow.jp/",
231
+ "hi": "http://hi.wikihow.com/",
232
+ "th": "http://th.wikihow.com/",
233
+ "ar": "http://ar.wikihow.com/",
234
+ "ko": "http://ko.wikihow.com/",
235
+ "tr": "http://www.wikihow.com.tr/",
236
+ "vn": "http://www.wikihow.vn/",
237
+ } #bg, ca, cs, da, de, en, es, fr, hr, hu, it, nl, pl, pt, ro, ru, sl, sr, sv, uk.
238
+
239
+ @staticmethod
240
+ def search(search_term, max_results=-1, lang="en"):
241
+ lang = lang.split("-")[0].lower()
242
+ if lang not in WikiHow.lang2url:
243
+ raise UnsupportedLanguage
244
+ search_url = WikiHow.lang2url[lang] + \
245
+ "wikiHowTo?search=" + search_term.replace(" ", "+")
246
+ html = get_html(search_url)
247
+ soup = bs4.BeautifulSoup(html, 'html.parser').findAll('a', attrs={
248
+ 'class': "result_link"})
249
+ count = 1
250
+ for link in soup:
251
+ url = link.get('href')
252
+ if not url.startswith("http"):
253
+ url = "http://" + url
254
+ how_to = HowTo(url)
255
+ try:
256
+ how_to._parse()
257
+ except ParseError:
258
+ continue
259
+ yield how_to
260
+ count += 1
261
+ if 0 < max_results < count:
262
+ return
263
+
264
+
265
+ def search_wikihow(query, max_results=10, lang="en"):
266
+ return list(WikiHow.search(query, max_results, lang))
267
+
268
+
269
+ if __name__ == "__main__":
270
+ how = RandomHowTo("it")
271
+ how.print()
272
+
273
+ for how_to in WikiHow.search("comprar bitcoin", lang="pt"):
274
+ how_to.print()
275
+ break
276
+
277
+ for how_to in WikiHow.search("buy bitcoin"):
278
+ how_to.print()
279
+ break
wiki.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pywiki_custom import *
2
+ from urllib.parse import unquote
3
+
4
+ import io
5
+ import csv
6
+ import random
7
+
8
+ lang = "de"
9
+ n = 1100
10
+
11
+ def cleandata():
12
+ input_file = f'data_{lang}.csv'
13
+ output_file = f'{lang}-wikihow-qa-dataset-1k.csv'
14
+
15
+ unique_urls = {}
16
+
17
+ with open(input_file, 'r') as f_input, open(output_file, 'w', newline='') as f_output:
18
+ csv_input = csv.reader(f_input)
19
+ csv_output = csv.writer(f_output)
20
+
21
+ header = next(csv_input)
22
+ header[3] = 'METADATA'
23
+ csv_output.writerow(header)
24
+
25
+ for row in csv_input:
26
+ url = row[3]
27
+ if url not in unique_urls:
28
+ row[3] = f'{{"url": "{url}", "language": "{lang}"}}'
29
+ csv_output.writerow(row)
30
+ unique_urls[url] = True
31
+ else:
32
+ print(f"Duplicate row found, url: {url}")
33
+ print("Done!")
34
+
35
+ def getrandom():
36
+ how_to = RandomHowTo(lang)
37
+ wkhowto_url = how_to.url
38
+
39
+ theme = unquote(how_to.title.encode('utf-8'))
40
+ templates = [
41
+ "Wie mache ich {theme}?",
42
+ "Schreibe wie ich folgendes tun kann: {theme}"
43
+ ]
44
+
45
+ wkhowto_q = random.choice(templates).format(theme=theme)
46
+ wkhowto_a = how_to.print(extended=True)
47
+ return wkhowto_q, wkhowto_a, wkhowto_url
48
+
49
+ with open(f'data_{lang}.csv', mode='w', newline='') as file:
50
+ writer = csv.writer(file)
51
+ writer.writerow(['INSTRUCTION', 'RESPONSE', 'SOURCE', 'URL'])
52
+ for i in range(n):
53
+ wkhowto_q, wkhowto_a, wkhowto_url = getrandom()
54
+ data = [wkhowto_q, wkhowto_a, f'{lang}.wikihow.com', wkhowto_url]
55
+ writer.writerow(data)
56
+ print(f"{i+1} out of {n}")
57
+
58
+ cleandata()