FreeBibTec2 / app.py
C2MV's picture
Update app.py
8555a57 verified
raw
history blame
25.8 kB
import os
import re
import time
import logging
import zipfile
import requests
import bibtexparser
from tqdm import tqdm
from urllib.parse import quote, urlencode
import gradio as gr
from bs4 import BeautifulSoup
import io
import asyncio
import aiohttp
from concurrent.futures import ThreadPoolExecutor
# Configure logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(levelname)s: %(message)s')
logger = logging.getLogger(__name__)
class PaperDownloader:
def __init__(self, output_dir='papers'):
self.output_dir = output_dir
os.makedirs(output_dir, exist_ok=True)
# Updated download sources
self.download_sources = [
'https://sci-hub.ee/',
'https://sci-hub.st/',
'https://sci-hub.ru/',
'https://sci-hub.ren/',
'https://sci-hub.mksa.top/',
'https://sci-hub.se/',
'https://libgen.rs/scimag/'
]
# Request headers
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.9',
}
self.executor = ThreadPoolExecutor(max_workers=4)
def clean_doi(self, doi):
"""Clean and encode DOI for URL"""
if not isinstance(doi, str):
return None
return quote(doi.strip()) if doi else None
async def fetch_with_headers(self, session, url, timeout=10):
"""Utility method to fetch an URL with headers and timeout"""
try:
async with session.get(url, headers=self.headers, timeout=timeout, allow_redirects=True) as response:
response.raise_for_status()
return await response.text(), response.headers
except Exception as e:
logger.debug(f"Error fetching {url}: {e}")
return None, None
async def fetch_pdf_content(self, session, url, max_redirects=5, max_retries=2, retry_delay=1):
"""Fetch content and validate if response is PDF, following up to max_redirects redirections with retries."""
current_url = url
redirect_count = 0
retry_count = 0
while redirect_count <= max_redirects:
try:
while retry_count <= max_retries:
try:
async with session.get(current_url, headers=self.headers, timeout=10, allow_redirects=False) as response:
if response.status in [301, 302, 307, 308]:
current_url = response.headers['Location']
redirect_count += 1
logger.debug(f"Following redirect from {url} to {current_url}")
break # Break out of the retry loop for a redirect
response.raise_for_status()
if 'application/pdf' in response.headers.get('Content-Type', ''):
return await response.read()
else:
logger.debug(f"Content type not PDF for {current_url}: {response.headers.get('Content-Type', '')}")
return None
except Exception as e:
logger.debug(f"Error getting PDF, retrying ({retry_count}/{max_retries}) from {current_url}: {e}")
retry_count += 1
await asyncio.sleep(retry_delay)
retry_count = 0 # Reset the retry count, in case there's a next redirect attempt
except Exception as e:
logger.debug(f"Error getting PDF from {current_url}: {e}")
return None
logger.debug(f"Too many redirects or retries {url}, not following this link further")
return None
async def download_paper_direct_doi_async(self, session, doi):
"""Attempt to download the pdf from the landing page of the doi"""
if not doi:
return None
try:
doi_url = f"https://doi.org/{self.clean_doi(doi)}"
# First, let's try to download the URL directly in case it is already the pdf.
pdf_content = await self.fetch_pdf_content(session, doi_url)
if pdf_content:
logger.debug(f"Direct DOI resolved to PDF from {doi_url}")
return pdf_content
# If direct DOI link was not a pdf, fetch landing page and extract links
text, headers = await self.fetch_with_headers(session, doi_url, timeout=15)
if not text:
return None
pdf_patterns = [
r'(https?://[^\s<>"]+?\.pdf)',
r'(https?://[^\s<>"]+?download/[^\s<>"]+)',
r'(https?://[^\s<>"]+?\/pdf\/[^\s<>"]+)',
]
pdf_urls = []
for pattern in pdf_patterns:
pdf_urls.extend(re.findall(pattern, text))
# Attempt each pdf url and break when you find a PDF content.
for pdf_url in pdf_urls:
pdf_content = await self.fetch_pdf_content(session, pdf_url)
if pdf_content:
logger.debug(f"Found PDF from: {pdf_url}")
return pdf_content
except Exception as e:
logger.debug(f"Error trying to get the PDF from {doi}: {e}")
return None
async def download_paper_scihub_async(self, session, doi):
"""Improved method to download paper from Sci-Hub using async requests"""
if not doi:
logger.warning("DOI not provided")
return None
for base_url in self.download_sources:
try:
scihub_url = f"{base_url}{self.clean_doi(doi)}"
text, headers = await self.fetch_with_headers(session, scihub_url, timeout=15)
if not text:
continue
# Search for multiple PDF URL patterns
pdf_patterns = [
r'(https?://[^\s<>"]+?\.pdf)',
r'(https?://[^\s<>"]+?download/[^\s<>"]+)',
r'(https?://[^\s<>"]+?\/pdf\/[^\s<>"]+)',
]
pdf_urls = []
for pattern in pdf_patterns:
pdf_urls.extend(re.findall(pattern, text))
# Try downloading from found URLs, but iterate over ALL
for pdf_url in pdf_urls:
pdf_content = await self.fetch_pdf_content(session,pdf_url)
if pdf_content:
logger.debug(f"Found PDF from: {pdf_url}")
return pdf_content
except Exception as e:
logger.debug(f"Error trying to download {doi} from {base_url}: {e}")
return None
async def download_paper_libgen_async(self, session, doi):
"""Download from Libgen, handles the query and the redirection"""
if not doi:
return None
base_url = 'https://libgen.rs/scimag/'
try:
search_url = f"{base_url}?q={self.clean_doi(doi)}"
text, headers = await self.fetch_with_headers(session, search_url, timeout=10)
if not text or "No results" in text:
logger.debug(f"No results for DOI: {doi} on libgen")
return None
soup = BeautifulSoup(text, 'html.parser')
links = soup.select('table.c > tbody > tr:nth-child(2) > td:nth-child(1) > a')
if links:
link = links[0]
pdf_url = link['href']
pdf_content = await self.fetch_pdf_content(session, pdf_url)
if pdf_content:
logger.debug(f"Found PDF from: {pdf_url}")
return pdf_content
except Exception as e:
logger.debug(f"Error trying to download {doi} from libgen: {e}")
return None
async def download_paper_google_scholar_async(self, session, doi):
"""Search google scholar to find an article with the given doi, try to get the pdf"""
if not doi:
return None
try:
query = f'doi:"{doi}"'
params = {'q': query}
url = f'https://scholar.google.com/scholar?{urlencode(params)}'
text, headers = await self.fetch_with_headers(session, url, timeout=10)
if not text:
return None
soup = BeautifulSoup(text, 'html.parser')
# Find any links with [PDF]
links = soup.find_all('a', string=re.compile(r'\[PDF\]', re.IGNORECASE))
if links:
pdf_url = links[0]['href']
pdf_content = await self.fetch_pdf_content(session,pdf_url)
if pdf_content:
logger.debug(f"Found PDF from: {pdf_url}")
return pdf_content
except Exception as e:
logger.debug(f"Google Scholar error for {doi}: {e}")
return None
async def download_paper_crossref_async(self, session, doi):
"""Alternative search method using Crossref"""
if not doi:
return None
try:
# Search for open access link
url = f"https://api.crossref.org/works/{doi}"
response = await session.get(url, headers=self.headers, timeout=10)
if response.status == 200:
data = await response.json()
work = data.get('message', {})
# Search for open access links
links = work.get('link', [])
for link in links:
if link.get('content-type') == 'application/pdf':
pdf_url = link.get('URL')
if pdf_url:
pdf_content = await self.fetch_pdf_content(session, pdf_url)
if pdf_content:
logger.debug(f"Found PDF from: {pdf_url}")
return pdf_content
except Exception as e:
logger.debug(f"Crossref error for {doi}: {e}")
return None
async def download_with_retry_async(self, doi, max_retries=5, initial_delay=2):
"""Downloads a paper using multiple strategies with exponential backoff and async requests"""
pdf_content = None
retries = 0
delay = initial_delay
# Additional Sci-Hub and alternative sources
additional_sources = [
'https://sci-hub.ren/',
'https://sci-hub.se/',
'https://sci-hub.mksa.top/',
'https://sci-hub.ru/',
'https://sci-hub.st/',
'https://libgen.rs/scimag/'
]
async with aiohttp.ClientSession() as session:
while retries < max_retries and not pdf_content:
try:
logger.info(f"Attempt {retries + 1} to download DOI: {doi}")
# Try primary sources
download_strategies = [
self.download_paper_direct_doi_async,
self.download_paper_scihub_async,
self.download_paper_libgen_async,
self.download_paper_google_scholar_async,
self.download_paper_crossref_async
]
for strategy in download_strategies:
pdf_content = await strategy(session, doi)
if pdf_content:
logger.info(f"Successfully downloaded {doi} using {strategy.__name__}")
return pdf_content
# If not found, try additional sources
if not pdf_content and retries > 1:
for source in additional_sources:
try:
scihub_url = f"{source}{self.clean_doi(doi)}"
logger.info(f"Trying alternative source: {scihub_url}")
text, headers = await self.fetch_with_headers(session, scihub_url, timeout=15)
if text:
# Extract potential PDF links
pdf_patterns = [
r'(https?://[^\s<>"]+?\.pdf)',
r'(https?://[^\s<>"]+?download/[^\s<>"]+)',
r'(https?://[^\s<>"]+?\/pdf\/[^\s<>"]+)',
]
pdf_urls = []
for pattern in pdf_patterns:
pdf_urls.extend(re.findall(pattern, text))
# Try downloading from found URLs
for pdf_url in pdf_urls:
pdf_content = await self.fetch_pdf_content(session, pdf_url)
if pdf_content:
logger.info(f"Found PDF from alternative source: {pdf_url}")
return pdf_content
except Exception as e:
logger.debug(f"Error with alternative source {source}: {e}")
except Exception as e:
logger.error(f"Unexpected error in download attempt {retries + 1} for DOI {doi}: {e}")
# Prepare for next retry
if not pdf_content:
retries += 1
logger.warning(f"Retry attempt {retries} for DOI: {doi} after {delay} seconds")
await asyncio.sleep(delay)
delay *= 2 # Exponential backoff
# Log final failure
logger.warning(f"FINAL FAILURE: Could not download DOI {doi} after {max_retries} attempts")
return None
async def download_single_doi_async(self, doi, progress_callback):
"""Downloads a single paper using a DOI, and updates the given progress_callback"""
if not doi:
return None, "Error: DOI not provided", "Error: DOI not provided"
try:
pdf_content = await self.download_with_retry_async(doi)
if pdf_content:
if doi is None:
return None, "Error: DOI not provided", "Error: DOI not provided"
filename = f"{str(doi).replace('/', '_').replace('.', '_')}.pdf"
filepath = os.path.join(self.output_dir, filename)
loop = asyncio.get_running_loop()
await loop.run_in_executor(self.executor, lambda: open(filepath, 'wb').write(pdf_content))
logger.info(f"Successfully downloaded: {filename}")
progress_callback(f"Successfully downloaded: <a href='https://doi.org/{doi}'>{doi}</a>")
return filepath, f'<a href="https://doi.org/{doi}">{doi}</a>', ""
else:
logger.warning(f"Could not download: {doi}")
progress_callback(f"Could not download: <a href='https://doi.org/{doi}'>{doi}</a>")
return None, f"Could not download {doi}", f'<a href="https://doi.org/{doi}">{doi}</a>'
except Exception as e:
logger.error(f"Error processing {doi}: {e}")
progress_callback(f"Error processing {doi}: <a href='https://doi.org/{doi}'>{doi}</a> {e}")
return None, f"Error processing {doi}: {e}", f"Error processing {doi}: {e}"
async def download_multiple_dois_async(self, dois_text, progress_callback):
if not dois_text:
return None, "Error: No DOIs provided", "Error: No DOIs provided"
# Sanitize and filter DOIs
dois = [doi.strip() for doi in dois_text.split('\n') if doi.strip()]
if not dois:
return None, "Error: No valid DOIs provided", "Error: No valid DOIs provided"
downloaded_files = []
failed_dois = []
downloaded_links = []
# Use asyncio.gather to process all DOIs concurrently
download_tasks = []
for doi in dois:
task = self.download_single_doi_async(doi, progress_callback)
download_tasks.append(task)
# Wait for all downloads to complete
results = await asyncio.gather(*download_tasks, return_exceptions=True)
for i, result in enumerate(results):
doi = dois[i]
# Handle different result types
if isinstance(result, Exception):
# Unexpected error
failed_dois.append(f'<a href="https://doi.org/{doi}">{doi}</a> - Unexpected error: {str(result)}')
elif result[0] is None:
# Download failed
failed_dois.append(f'<a href="https://doi.org/{doi}">{doi}</a> - {result[1]}')
else:
# Successful download
filepath = result[0]
# Create unique filename for zip
filename = f"{str(doi).replace('/', '_').replace('.', '_')}.pdf"
filepath_unique = os.path.join(self.output_dir, filename)
# Rename and add to downloaded files
os.rename(filepath, filepath_unique)
downloaded_files.append(filepath_unique)
downloaded_links.append(f'<a href="https://doi.org/{doi}">{doi}</a>')
# Create zip if any files were downloaded
if downloaded_files:
zip_filename = 'papers.zip'
loop = asyncio.get_running_loop()
await loop.run_in_executor(self.executor, lambda: self.create_zip(zip_filename, downloaded_files))
logger.info(f"ZIP file created: {zip_filename}")
return (zip_filename if downloaded_files else None,
"\n".join(downloaded_links),
"\n".join(failed_dois))
async def process_bibtex_async(self, bib_file, progress_callback):
"""Process BibTeX file and download papers with multiple strategies and reports UI updates using a callback"""
# Read BibTeX file content from the uploaded object
try:
with open(bib_file.name, 'r', encoding='utf-8') as f:
bib_content = f.read()
except Exception as e:
logger.error(f"Error reading uploaded file {bib_file.name}: {e}")
return None, f"Error reading uploaded file {bib_file.name}: {e}", f"Error reading uploaded file {bib_file.name}: {e}"
# Parse BibTeX data
try:
bib_database = bibtexparser.loads(bib_content)
except Exception as e:
logger.error(f"Error parsing BibTeX data: {e}")
return None, f"Error parsing BibTeX data: {e}", f"Error parsing BibTeX data: {e}"
# Extract DOIs
dois = [entry.get('doi') for entry in bib_database.entries if entry.get('doi')]
logger.info(f"Found {len(dois)} DOIs to download")
# Result lists
downloaded_files = []
failed_dois = []
downloaded_links = []
tasks = [self.download_single_doi_async(doi, progress_callback) for doi in dois]
results = await asyncio.gather(*tasks)
for i, (filepath, success_message, fail_message) in enumerate(results):
if filepath:
# Unique filename for zip
filename = f"{str(dois[i]).replace('/', '_').replace('.', '_')}_{i}.pdf"
filepath_unique = os.path.join(self.output_dir, filename)
os.rename(filepath, filepath_unique)
downloaded_files.append(filepath_unique)
downloaded_links.append(f'<a href="https://doi.org/{dois[i]}">{dois[i]}</a>')
else:
failed_dois.append(f'<a href="https://doi.org/{dois[i]}">{dois[i]}</a>')
if downloaded_files:
zip_filename = 'papers.zip'
loop = asyncio.get_running_loop()
await loop.run_in_executor(self.executor, lambda: self.create_zip(zip_filename,downloaded_files))
logger.info(f"ZIP file created: {zip_filename}")
return zip_filename, "\n".join(downloaded_links), "\n".join(failed_dois)
def create_gradio_interface():
"""Create Gradio interface for Paper Downloader"""
downloader = PaperDownloader()
async def download_papers(bib_file, doi_input, dois_input, progress=gr.Progress()):
if bib_file:
# Check file type
if not bib_file.name.lower().endswith('.bib'):
return None, "Error: Please upload a .bib file", "Error: Please upload a .bib file", None
zip_path, downloaded_dois, failed_dois = await downloader.process_bibtex_async(bib_file, progress.update)
return zip_path, downloaded_dois, failed_dois, None
elif doi_input:
filepath, message, failed_doi = await downloader.download_single_doi_async(doi_input,progress.update)
return None, message, failed_doi, filepath
elif dois_input:
zip_path, downloaded_dois, failed_dois = await downloader.download_multiple_dois_async(dois_input, progress.update)
return zip_path, downloaded_dois, failed_dois, None
else:
return None, "Please provide a .bib file, a single DOI, or a list of DOIs", "Please provide a .bib file, a single DOI, or a list of DOIs", None
# Gradio Interface
interface = gr.Interface(
fn=download_papers,
inputs=[
gr.File(file_types=['.bib'], label="Upload BibTeX File"),
gr.Textbox(label="Enter Single DOI", placeholder="10.xxxx/xxxx"),
gr.Textbox(label="Enter Multiple DOIs (one per line)", placeholder="10.xxxx/xxxx\n10.yyyy/yyyy\n...")
],
outputs=[
gr.File(label="Download Papers (ZIP) or Single PDF"),
gr.HTML(label="""
<div style='padding-bottom: 5px; font-weight: bold;'>
Found DOIs
</div>
<div style='border: 1px solid #ddd; padding: 5px; border-radius: 5px;'>
<div id="downloaded-dois"></div>
</div>
"""),
gr.HTML(label="""
<div style='padding-bottom: 5px; font-weight: bold;'>
Missed DOIs
</div>
<div style='border: 1px solid #ddd; padding: 5px; border-radius: 5px;'>
<div id="failed-dois"></div>
</div>
"""),
gr.File(label="Downloaded Single PDF")
],
title="🔬 Academic Paper Batch Downloader",
description="Upload a BibTeX file or enter DOIs to download PDFs. We'll attempt to fetch PDFs from multiple sources like Sci-Hub, Libgen, Google Scholar and Crossref. You can use any of the three inputs at any moment.",
theme="Hev832/Applio",
examples=[
["example.bib", None, None], # Bibtex File
[None, "10.1038/nature12373", None], # Single DOI
[None, None, "10.1109/5.771073\n10.3390/horticulturae8080677"], # Multiple DOIs
],
css="""
.gradio-container {
background-color: black;
}
.gr-interface {
max-width: 800px;
margin: 0 auto;
}
.gr-box {
background-color: black;
border-radius: 10px;
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
}
.output-text a {
color: #007bff; /* Blue color for hyperlinks */
}
""",
cache_examples=False,
)
# Add Javascript to update HTML
interface.load = """
function(downloaded_dois, failed_dois){
let downloaded_html = '';
downloaded_dois.split('\\n').filter(Boolean).forEach(doi => {
downloaded_html += '[' + doi + ']<br>';
});
document.querySelector("#downloaded-dois").innerHTML = downloaded_html;
let failed_html = '';
failed_dois.split('\\n').filter(Boolean).forEach(doi => {
failed_html += '[' + doi + ']<br>';
});
document.querySelector("#failed-dois").innerHTML = failed_html;
return [downloaded_html, failed_html];
}
"""
return interface
def main():
interface = create_gradio_interface()
interface.launch()
if __name__ == "__main__":
main()