decode-elm / unitls.py
mehradans92's picture
cleaned repo, added utils
e5985c6
raw
history blame
5.06 kB
import urllib
from lxml import html
import streamlit as st
import requests
import re
from stqdm import stqdm
import os
import shutil
import time
def call_arXiv_API(search_query, search_by='all', sort_by='relevance', max_results='10', folder_name='arxiv-dl'):
'''
Scraps the arXiv's html to get data from each entry in a search. Entries has the following formatting:
<entry>\n
<id>http://arxiv.org/abs/2008.04584v2</id>\n
<updated>2021-05-11T12:00:24Z</updated>\n
<published>2020-08-11T08:47:06Z</published>\n
<title>Bayesian Selective Inference: Non-informative Priors</title>\n
<summary> We discuss Bayesian inference for parameters selected using the data. First,\nwe provide a critical analysis of the existing positions in the literature\nregarding the correct Bayesian approach under selection. Second, we propose two\ntypes of non-informative priors for selection models. These priors may be\nemployed to produce a posterior distribution in the absence of prior\ninformation as well as to provide well-calibrated frequentist inference for the\nselected parameter. We test the proposed priors empirically in several\nscenarios.\n</summary>\n
<author>\n <name>Daniel G. Rasines</name>\n </author>\n <author>\n <name>G. Alastair Young</name>\n </author>\n
<arxiv:comment xmlns:arxiv="http://arxiv.org/schemas/atom">24 pages, 7 figures</arxiv:comment>\n
<link href="http://arxiv.org/abs/2008.04584v2" rel="alternate" type="text/html"/>\n
<link title="pdf" href="http://arxiv.org/pdf/2008.04584v2" rel="related" type="application/pdf"/>\n
<arxiv:primary_category xmlns:arxiv="http://arxiv.org/schemas/atom" term="math.ST" scheme="http://arxiv.org/schemas/atom"/>\n
<category term="math.ST" scheme="http://arxiv.org/schemas/atom"/>\n
<category term="stat.TH" scheme="http://arxiv.org/schemas/atom"/>\n
</entry>\n
'''
# Remove space in seach query
search_query=search_query.strip().replace(" ", "+")
# Call arXiv API
arXiv_url=f'http://export.arxiv.org/api/query?search_query={search_by}:{search_query}&sortBy={sort_by}&start=0&max_results={max_results}'
with urllib.request.urlopen(arXiv_url) as url:
s = url.read()
# Parse the xml data
root = html.fromstring(s)
# Fetch relevant pdf information
pdf_entries = root.xpath("entry")
pdf_titles = []
pdf_authors = []
pdf_urls = []
pdf_categories = []
folder_names = []
pdf_citation = []
pdf_years = []
for i, pdf in enumerate(pdf_entries):
# print(pdf.xpath('updated/text()')[0][:4])
# xpath return a list with every ocurrence of the html path. Since we're getting each entry individually, we'll take the first element to avoid an unecessary list
pdf_titles.append(re.sub('[^a-zA-Z0-9]', ' ', pdf.xpath("title/text()")[0]))
pdf_authors.append(pdf.xpath("author/name/text()"))
pdf_urls.append(pdf.xpath("link[@title='pdf']/@href")[0])
pdf_categories.append(pdf.xpath("category/@term"))
folder_names.append(folder_name)
pdf_years.append(pdf.xpath('updated/text()')[0][:4])
pdf_citation.append(f"{', '.join(pdf_authors[i])}, {pdf_titles[i]}. arXiv [{pdf_categories[i][0]}] ({pdf_years[i]}), (available at {pdf_urls[i]}).")
pdf_info=list(zip(pdf_titles, pdf_urls, pdf_authors, pdf_categories, folder_names, pdf_citation))
# Check number of available files
# print('Requesting {max_results} files'.format(max_results=max_results))
if len(pdf_urls)<int(max_results):
matching_pdf_num=len(pdf_urls)
# print('Only {matching_pdf_num} files available'.format(matching_pdf_num=matching_pdf_num))
return pdf_info, pdf_citation
def download_pdf(pdf_info):
# if len(os.listdir(f'./{folder_name}') ) != 0:
# check folder is empty to avoid using papers from old runs:
# os.remove(f'./{folder_name}/*')
all_reference_text = []
for i,p in enumerate(stqdm(pdf_info, desc='Searching and downloading papers')):
pdf_title=p[0]
pdf_url=p[1]
pdf_author=p[2]
pdf_category=p[3]
folder_name=p[4]
pdf_citation=p[5]
r = requests.get(pdf_url, allow_redirects=True)
if i == 0:
if not os.path.exists(f'{folder_name}'):
os.makedirs(f"{folder_name}")
else:
shutil.rmtree(f'{folder_name}')
os.makedirs(f"{folder_name}")
with open(f'{folder_name}/{pdf_title}.pdf', 'wb') as currP:
currP.write(r.content)
if i == 0:
st.markdown("###### Papers found:")
st.markdown(f"{i+1}. {pdf_citation}")
time.sleep(0.15)
all_reference_text.append(f"{i+1}. {pdf_citation}\n")
if 'all_reference_text' not in st.session_state:
st.session_state.key = 'all_reference_text'
st.session_state['all_reference_text'] = ' '.join(all_reference_text)