Spaces:
Running
Running
File size: 1,878 Bytes
9df4cc0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
import warnings
warnings.filterwarnings("ignore")
import requests
from lxml import etree
from tqdm import tqdm
import pandas as pd
import json
import time
from finnlp.data_sources.news._base import News_Downloader
# TODO:
# 1. Contents
# 2. More pages
class GuruFocus_Streaming(News_Downloader):
def __init__(self, args={}):
super().__init__(args)
self.dataframe = pd.DataFrame()
def download_streaming_search(self, keyword = "AAPL", rounds = 3, delay = 0.5):
url = f"https://www.gurufocus.com/stock/{keyword}/article"
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
}
res = requests.get(url = url, headers= headers)
if res.status_code != 200:
print(f"Connection Error: {res.status_code}")
return f"Connection Error: {res.status_code}"
res = etree.HTML(res.text)
divs = res.xpath("/html/body/div[1]/div/section/section/main/div[1]/div[4]/div[1]/div/div")[1:]
titles = []
views = []
sources = []
datetimes = []
for div in divs:
# title
title = " ".join(div.xpath("./div[1]/h4/a//text()"))
title = title.replace("\n", '').strip(" ")
titles.append(title)
# summary
summary = " ".join(div.xpath("div[5]/text()")).replace('\n','').strip(' ')
view ,source, datetime = summary.split(' \xa0\xa0 ')
views.append(view)
sources.append(source)
datetimes.append(datetime)
tmp = pd.DataFrame([titles, views, sources, datetimes]).T
tmp.columns = ["title", "view" ,"source", "datetime"]
self.dataframe = pd.concat([self.dataframe, tmp])
print("Only support first page now!")
|