Spaces:
Running
Running
File size: 1,862 Bytes
9df4cc0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
import requests
from lxml import etree
from tqdm import tqdm
import pandas as pd
import json
import time
from finnlp.data_sources.news._base import News_Downloader
# TODO:
# 1. Contents
class Reuters_Streaming(News_Downloader):
def __init__(self, args={}):
super().__init__(args)
self.dataframe = pd.DataFrame()
def download_streaming_search(self, keyword = "apple", rounds = 3, delay = 0.5):
news_per_page = 20
url = "https://www.reuters.com/pf/api/v3/content/fetch/articles-by-search-v2"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Referer": "https://www.reuters.com/site-search/?query=AAPL&sort=newest&offset=0"
}
print( "Geting pages: ", end = "")
for i in range(rounds):
offset = i * news_per_page
params = {
"query": f'{{"keyword":"{keyword}","offset":{offset},"orderby":"display_date:desc","size":20,"website":"reuters"}}',
"d": "144",
"_website": "reuters",
}
response = self._request_get(url, headers=headers, params = params)
# check connection error
if response.status_code != 200:
return "Error"
# Phrase response
response = json.loads(response.text)
# check whether return content
if response["statusCode"] != 200:
print("Early Stopping")
break
# make pandas DataFrame
tmp = pd.DataFrame(response["result"]["articles"])
self.dataframe = pd.concat([self.dataframe, tmp])
# finish
print( i+1, end = " ")
time.sleep(delay)
|