import os import re import time import logging import zipfile import requests import bibtexparser from tqdm import tqdm from urllib.parse import quote, urlencode import gradio as gr from bs4 import BeautifulSoup import io import asyncio import aiohttp from concurrent.futures import ThreadPoolExecutor, CancelledError # Configure logging logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s: %(message)s') logger = logging.getLogger(__name__) class PaperDownloader: def __init__(self, output_dir='papers'): self.output_dir = output_dir os.makedirs(output_dir, exist_ok=True) # Updated download sources self.download_sources = [ 'https://sci-hub.ee/', 'https://sci-hub.st/', 'https://sci-hub.ru/', 'https://sci-hub.ren/', 'https://sci-hub.mksa.top/', 'https://sci-hub.se/', 'https://libgen.rs/scimag/' ] # Request headers self.headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8', 'Accept-Language': 'en-US,en;q=0.9', } self.executor = ThreadPoolExecutor(max_workers=4) self.download_task = None # Added attribute def clean_doi(self, doi): """Clean and encode DOI for URL""" if not isinstance(doi, str): return None return quote(doi.strip()) if doi else None async def fetch_with_headers(self, session, url, timeout=10): """Utility method to fetch an URL with headers and timeout""" try: async with session.get(url, headers=self.headers, timeout=timeout, allow_redirects=True) as response: response.raise_for_status() return await response.text(), response.headers except Exception as e: logger.debug(f"Error fetching {url}: {e}") return None, None async def fetch_pdf_content(self, session, url, max_redirects=5, max_retries=2, retry_delay=1): """Fetch content and validate if response is PDF, following up to max_redirects redirections with retries.""" current_url = url redirect_count = 0 retry_count = 0 while redirect_count <= max_redirects: try: while retry_count <= max_retries: try: logger.debug(f"Fetching PDF from {current_url} - Retry {retry_count + 1}")#ADDED async with session.get(current_url, headers=self.headers, timeout=10, allow_redirects=False) as response: if response.status in [301, 302, 307, 308]: current_url = response.headers['Location'] redirect_count += 1 logger.debug(f"Following redirect from {url} to {current_url}") break # Break out of the retry loop for a redirect response.raise_for_status() if 'application/pdf' in response.headers.get('Content-Type', ''): logger.debug(f"Successfully fetched PDF from {current_url}")#ADDED return await response.read() else: logger.debug(f"Content type not PDF for {current_url}: {response.headers.get('Content-Type', '')}") return None except Exception as e: logger.debug(f"Error getting PDF, retrying ({retry_count}/{max_retries}) from {current_url}: {e}") retry_count += 1 await asyncio.sleep(retry_delay) retry_count = 0 # Reset the retry count, in case there's a next redirect attempt except CancelledError: logger.info(f"Fetch PDF cancelled from: {url}") return None except Exception as e: logger.debug(f"Error getting PDF from {current_url}: {e}") return None logger.debug(f"Too many redirects or retries {url}, not following this link further") return None async def download_paper_direct_doi_async(self, session, doi): """Attempt to download the pdf from the landing page of the doi""" if not doi: return None try: doi_url = f"https://doi.org/{self.clean_doi(doi)}" # First, let's try to download the URL directly in case it is already the pdf. pdf_content = await self.fetch_pdf_content(session, doi_url) if pdf_content: logger.debug(f"Direct DOI resolved to PDF from {doi_url}") return pdf_content # If direct DOI link was not a pdf, fetch landing page and extract links text, headers = await self.fetch_with_headers(session, doi_url, timeout=15) if not text: return None pdf_patterns = [ r'(https?://[^\s<>"]+?\.pdf)', r'(https?://[^\s<>"]+?download/[^\s<>"]+)', r'(https?://[^\s<>"]+?\/pdf\/[^\s<>"]+)', ] pdf_urls = [] for pattern in pdf_patterns: pdf_urls.extend(re.findall(pattern, text)) # Attempt each pdf url and break when you find a PDF content. for pdf_url in pdf_urls: pdf_content = await self.fetch_pdf_content(session, pdf_url) if pdf_content: logger.debug(f"Found PDF from: {pdf_url}") return pdf_content except Exception as e: logger.debug(f"Error trying to get the PDF from {doi}: {e}") return None async def download_paper_scihub_async(self, session, doi): """Improved method to download paper from Sci-Hub using async requests""" if not doi: logger.warning("DOI not provided") return None for base_url in self.download_sources: try: scihub_url = f"{base_url}{self.clean_doi(doi)}" text, headers = await self.fetch_with_headers(session, scihub_url, timeout=15) if not text: continue # Search for multiple PDF URL patterns pdf_patterns = [ r'(https?://[^\s<>"]+?\.pdf)', r'(https?://[^\s<>"]+?download/[^\s<>"]+)', r'(https?://[^\s<>"]+?\/pdf\/[^\s<>"]+)', ] pdf_urls = [] for pattern in pdf_patterns: pdf_urls.extend(re.findall(pattern, text)) # Try downloading from found URLs, but iterate over ALL for pdf_url in pdf_urls: pdf_content = await self.fetch_pdf_content(session,pdf_url) if pdf_content: logger.debug(f"Found PDF from: {pdf_url}") return pdf_content except Exception as e: logger.debug(f"Error trying to download {doi} from {base_url}: {e}") return None async def download_paper_libgen_async(self, session, doi): """Download from Libgen, handles the query and the redirection""" if not doi: return None base_url = 'https://libgen.rs/scimag/' try: search_url = f"{base_url}?q={self.clean_doi(doi)}" text, headers = await self.fetch_with_headers(session, search_url, timeout=10) if not text or "No results" in text: logger.debug(f"No results for DOI: {doi} on libgen") return None soup = BeautifulSoup(text, 'html.parser') links = soup.select('table.c > tbody > tr:nth-child(2) > td:nth-child(1) > a') if links: link = links[0] pdf_url = link['href'] pdf_content = await self.fetch_pdf_content(session, pdf_url) if pdf_content: logger.debug(f"Found PDF from: {pdf_url}") return pdf_content except Exception as e: logger.debug(f"Error trying to download {doi} from libgen: {e}") return None async def download_paper_google_scholar_async(self, session, doi): """Search google scholar to find an article with the given doi, try to get the pdf""" if not doi: return None try: query = f'doi:"{doi}"' params = {'q': query} url = f'https://scholar.google.com/scholar?{urlencode(params)}' text, headers = await self.fetch_with_headers(session, url, timeout=10) if not text: return None soup = BeautifulSoup(text, 'html.parser') # Find any links with [PDF] links = soup.find_all('a', string=re.compile(r'\[PDF\]', re.IGNORECASE)) if links: pdf_url = links[0]['href'] pdf_content = await self.fetch_pdf_content(session,pdf_url) if pdf_content: logger.debug(f"Found PDF from: {pdf_url}") return pdf_content except Exception as e: logger.debug(f"Google Scholar error for {doi}: {e}") return None async def download_paper_crossref_async(self, session, doi): """Alternative search method using Crossref""" if not doi: return None try: # Search for open access link url = f"https://api.crossref.org/works/{doi}" response = await session.get(url, headers=self.headers, timeout=10) if response.status == 200: data = await response.json() work = data.get('message', {}) # Search for open access links links = work.get('link', []) for link in links: if link.get('content-type') == 'application/pdf': pdf_url = link.get('URL') if pdf_url: pdf_content = await self.fetch_pdf_content(session, pdf_url) if pdf_content: logger.debug(f"Found PDF from: {pdf_url}") return pdf_content except Exception as e: logger.debug(f"Crossref error for {doi}: {e}") return None async def download_with_retry_async(self, doi, max_retries=3): """ Intenta descargar un paper con múltiples estrategias y un número limitado de reintentos. Args: doi (str): DOI del paper a descargar max_retries (int): Número máximo de reintentos Returns: bytes or None: Contenido del PDF o None si no se puede descargar """ if not doi: logger.warning("DOI no proporcionado") return None # Estrategias de descarga en orden de preferencia download_strategies = [ self.download_paper_direct_doi_async, self.download_paper_scihub_async, self.download_paper_libgen_async, self.download_paper_google_scholar_async, self.download_paper_crossref_async ] async with aiohttp.ClientSession() as session: for retry in range(max_retries): logger.info(f"Intento de descarga {retry + 1} para DOI: {doi}") # Probar cada estrategia de descarga for strategy in download_strategies: try: logger.info(f"Trying strategy {strategy.__name__} for DOI {doi}") # ADDED pdf_content = await strategy(session, doi) if pdf_content: logger.info(f"Descarga exitosa de {doi} usando {strategy.__name__}") return pdf_content except CancelledError: logger.info(f"Download cancelled on strategy: {strategy.__name__} with DOI {doi}") return None # return here in order to stop retry except Exception as e: logger.debug(f"Error en estrategia {strategy.__name__} para {doi}: {e}") #ADDED # Si ninguna estrategia funcionó, esperar un poco antes de reintentar await asyncio.sleep(1) # Pequeña pausa entre reintentos if retry == max_retries -1: #log all if it does not works on max retries. logger.warning(f"FALLO FINAL: No se pudo descargar DOI {doi} después de {max_retries} intentos") # Si se agotan todos los reintentos return None def _download_single_doi(self, doi, progress_callback, cancel_event): # removed async keyword """Descargar un único DOI con retroalimentación de progreso""" if not doi: progress_callback(None, "Error: DOI no proporcionado", "Error: DOI no proporcionado") return None logger.info(f"Starting download process for DOI: {doi}") try: async def call_async():# Added this in order to execute correctly on executor pdf_content = await self.download_with_retry_async(doi) if pdf_content: logger.info(f"Downloaded PDF for DOI: {doi}") filename = f"{str(doi).replace('/', '_').replace('.', '_')}.pdf" filepath = os.path.join(self.output_dir, filename) # Escribir contenido del PDF open(filepath, 'wb').write(pdf_content) logger.info(f"Saved PDF to file: {filepath}") logger.info(f"Descarga exitosa: {filename}") progress_callback(filepath, f"Descargado exitosamente: {doi}", "") else: logger.warning(f"No se pudo descargar: {doi}") progress_callback(None, f"No se pudo descargar {doi}", f'{doi}') asyncio.run(call_async()) #added the loop event here except CancelledError: logger.info(f"Download Cancelled DOI: {doi}") progress_callback(None, f"Download cancelled {doi}","Download Cancelled" ) except Exception as e: logger.error(f"Error processing {doi}: {e}") progress_callback(None, f"Error processing {doi}: {e}", f"Error processing {doi}: {e}" ) def download_multiple_dois(self, dois_text, progress_callback, cancel_event): #removed async here """Download multiple DOIs""" # Validar entrada if not dois_text: progress_callback(None, "Error: No DOIs provided", "Error: No DOIs provided",) return None # Sanitizar y filtrar DOIs # Eliminar líneas vacías, espacios en blanco, y DOIs duplicados dois = list(set([doi.strip() for doi in dois_text.split('\n') if doi.strip()])) # Validar lista de DOIs if not dois: progress_callback(None, "Error: No valid DOIs provided", "Error: No valid DOIs provided") return None # Listas para rastrear resultados downloaded_files = [] # Rutas de archivos descargados failed_dois = [] # DOIs que no se pudieron descargar downloaded_links = [] # Links de DOIs descargados for doi in dois: self._download_single_doi(doi, lambda a,b,c: progress_callback(a,b,c, doi) , cancel_event ) if cancel_event.is_set(): logger.info("Downloads cancelled on multiple dois download") progress_callback(None, "Downloads cancelled","Downloads cancelled") # early return on cancelled return None #break here when is cancelled result = self.results_dict.get(doi, (None,None,"")) # obtain from self.results # Manejar diferentes tipos de resultados if isinstance(result, Exception): # Excepción inesperada error_msg = f"Unexpected error: {str(result)}" logger.error(f"Error downloading {doi}: {error_msg}") failed_dois.append(f'{doi} - {error_msg}') elif result[0] is None: # Descarga fallida (resultado de download_single_doi_async) error_msg = result[1] logger.warning(f"Failed to download {doi}: {error_msg}") failed_dois.append(f'{doi} - {error_msg}') else: # Descarga exitosa filepath = result[0] # Generar nombre de archivo único filename = f"{str(doi).replace('/', '_').replace('.', '_')}.pdf" filepath_unique = os.path.join(self.output_dir, filename) try: # Renombrar archivo os.rename(filepath, filepath_unique) # Añadir a lista de archivos descargados downloaded_files.append(filepath_unique) downloaded_links.append(f'{doi}') except Exception as rename_error: logger.error(f"Error renaming file for {doi}: {rename_error}") failed_dois.append(f'{doi} - Error saving file') # Crear archivo ZIP si hay archivos descargados zip_filename = None if downloaded_files: zip_filename = 'papers.zip' loop = asyncio.get_running_loop() # Ejecutar creación de ZIP en un executor para no bloquear loop.run_in_executor( self.executor, lambda: self.create_zip(zip_filename, downloaded_files) ) logger.info(f"ZIP file created: {zip_filename}") # Devolver resultados progress_callback( zip_filename if downloaded_files else None, "\n".join(downloaded_links),"\n".join(failed_dois)) return def process_bibtex(self, bib_file, progress_callback, cancel_event):# removed async here """Process BibTeX file and download papers with multiple strategies and reports UI updates using a callback""" # Read BibTeX file content from the uploaded object try: with open(bib_file.name, 'r', encoding='utf-8') as f: bib_content = f.read() except Exception as e: logger.error(f"Error reading uploaded file {bib_file.name}: {e}") progress_callback(None, f"Error reading uploaded file {bib_file.name}: {e}", f"Error reading uploaded file {bib_file.name}: {e}" ) return None # Parse BibTeX data try: bib_database = bibtexparser.loads(bib_content) except Exception as e: logger.error(f"Error parsing BibTeX data: {e}") progress_callback(None,f"Error parsing BibTeX data: {e}", f"Error parsing BibTeX data: {e}") return None # Extract DOIs dois = [entry.get('doi') for entry in bib_database.entries if entry.get('doi')] logger.info(f"Found {len(dois)} DOIs to download") # Result lists downloaded_files = [] failed_dois = [] downloaded_links = [] for doi in dois: self._download_single_doi(doi, lambda a,b,c: progress_callback(a,b,c, doi), cancel_event ) if cancel_event.is_set(): logger.info("Download Cancelled in bibtex mode") progress_callback(None, "Download Cancelled", "Download Cancelled") return None #cancel if requested result = self.results_dict.get(doi, (None,None,"")) # obtain from self.results if isinstance(result, Exception): # Excepción inesperada error_msg = f"Unexpected error: {str(result)}" logger.error(f"Error downloading {doi}: {error_msg}") failed_dois.append(f'{doi} - {error_msg}') elif result[0] is None: # Descarga fallida (resultado de download_single_doi_async) error_msg = result[1] logger.warning(f"Failed to download {doi}: {error_msg}") failed_dois.append(f'{doi} - {error_msg}') else: # Descarga exitosa filepath = result[0] # Unique filename for zip filename = f"{str(doi).replace('/', '_').replace('.', '_')}_{i}.pdf" filepath_unique = os.path.join(self.output_dir, filename) os.rename(filepath, filepath_unique) downloaded_files.append(filepath_unique) downloaded_links.append(f'{doi}') if downloaded_files: zip_filename = 'papers.zip' loop = asyncio.get_running_loop() loop.run_in_executor(self.executor, lambda: self.create_zip(zip_filename,downloaded_files)) logger.info(f"ZIP file created: {zip_filename}") progress_callback(zip_filename, "\n".join(downloaded_links), "\n".join(failed_dois)) #after process finishes return def create_zip(self, zip_filename, files): """Crea un archivo zip con los pdfs descargados""" with zipfile.ZipFile(zip_filename, 'w', zipfile.ZIP_DEFLATED) as zf: for file in files: zf.write(file, os.path.basename(file)) def cancel_download(self): if self.download_task: self.cancel_event.set() # Cancel the download task if it exists and it is cancelable self.download_task.cancel() def create_gradio_interface(): """Create Gradio interface for Paper Downloader""" downloader = PaperDownloader() downloader.results_dict = {} #shared results dict, since it runs on different threads def update_progress( message="", logs=""): return gr.Textbox.update(value=f"{message}"),gr.Textbox.update(value=f"
{logs}") def download_papers(bib_file, doi_input, dois_input): cancel_event = asyncio.Event() # Create cancellation event for every submission. downloader.cancel_event = cancel_event # store the event so that it is available to stop the process def custom_progress_callback(filepath, message, fail_message, doi=None): #new callback to send to the execution function logger.info(f"Callback message: {message}") # log each callback msg #store data for single or multiple mode on download_papers execution. if doi: downloader.results_dict[doi] = (filepath, message,fail_message) updates = update_progress(message) return updates if bib_file: # Check file type if not bib_file.name.lower().endswith('.bib'): return None, "Error: Please upload a .bib file", "Error: Please upload a .bib file", None downloader.download_task = downloader.executor.submit( downloader.process_bibtex, bib_file, lambda a,b,c: update_progress(a,f"{b}