from pywiki_custom import * from urllib.parse import unquote import io import csv import random lang = "de" n = 1100 # removing duplicates def cleandata(): input_file = f'data_{lang}.csv' output_file = f'{lang}-wikihow-qa-dataset-1k.csv' unique_urls = {} with open(input_file, 'r') as f_input, open(output_file, 'w', newline='') as f_output: csv_input = csv.reader(f_input) csv_output = csv.writer(f_output) header = next(csv_input) header[3] = 'METADATA' csv_output.writerow(header) for row in csv_input: url = row[3] if url not in unique_urls: row[3] = f'{{"url": "{url}", "language": "{lang}"}}' csv_output.writerow(row) unique_urls[url] = True else: print(f"Duplicate row found, url: {url}") print("Done!") # getting a random article def getrandom(): how_to = RandomHowTo(lang) wkhowto_url = how_to.url theme = unquote(how_to.title.encode('utf-8')) # randomization templates = [ "Wie mache ich {theme}?", "Schreibe wie ich folgendes tun kann: {theme}" ] wkhowto_q = random.choice(templates).format(theme=theme) wkhowto_a = how_to.print(extended=True) return wkhowto_q, wkhowto_a, wkhowto_url # saving .csv with open(f'data_{lang}.csv', mode='w', newline='') as file: writer = csv.writer(file) writer.writerow(['INSTRUCTION', 'RESPONSE', 'SOURCE', 'URL']) for i in range(n): wkhowto_q, wkhowto_a, wkhowto_url = getrandom() data = [wkhowto_q, wkhowto_a, f'{lang}.wikihow.com', wkhowto_url] writer.writerow(data) print(f"{i+1} out of {n}") cleandata()