Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -12,6 +12,8 @@ from bs4 import BeautifulSoup
|
|
12 |
import io
|
13 |
import asyncio
|
14 |
import aiohttp
|
|
|
|
|
15 |
|
16 |
# Configure logging
|
17 |
logging.basicConfig(level=logging.INFO,
|
@@ -41,6 +43,7 @@ class PaperDownloader:
|
|
41 |
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8',
|
42 |
'Accept-Language': 'en-US,en;q=0.9',
|
43 |
}
|
|
|
44 |
|
45 |
def clean_doi(self, doi):
|
46 |
"""Clean and encode DOI for URL"""
|
@@ -59,47 +62,44 @@ class PaperDownloader:
|
|
59 |
return None, None
|
60 |
|
61 |
async def fetch_pdf_content(self, session, url, max_redirects=5, max_retries=2, retry_delay=1):
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
logger.debug(f"Too many redirects or retries {url}, not following this link further")
|
101 |
-
return None
|
102 |
-
|
103 |
async def download_paper_direct_doi_async(self, session, doi):
|
104 |
"""Attempt to download the pdf from the landing page of the doi"""
|
105 |
if not doi:
|
@@ -124,7 +124,7 @@ class PaperDownloader:
|
|
124 |
r'(https?://[^\s<>"]+?download/[^\s<>"]+)',
|
125 |
r'(https?://[^\s<>"]+?\/pdf\/[^\s<>"]+)',
|
126 |
]
|
127 |
-
|
128 |
pdf_urls = []
|
129 |
for pattern in pdf_patterns:
|
130 |
pdf_urls.extend(re.findall(pattern, text))
|
@@ -177,34 +177,34 @@ class PaperDownloader:
|
|
177 |
return None
|
178 |
|
179 |
async def download_paper_libgen_async(self, session, doi):
|
180 |
-
|
181 |
-
|
182 |
-
return None
|
183 |
-
|
184 |
-
base_url = 'https://libgen.rs/scimag/'
|
185 |
-
try:
|
186 |
-
search_url = f"{base_url}?q={self.clean_doi(doi)}"
|
187 |
-
text, headers = await self.fetch_with_headers(session, search_url, timeout=10)
|
188 |
-
|
189 |
-
if not text or "No results" in text:
|
190 |
-
logger.debug(f"No results for DOI: {doi} on libgen")
|
191 |
-
return None
|
192 |
-
|
193 |
-
soup = BeautifulSoup(text, 'html.parser')
|
194 |
-
|
195 |
-
links = soup.select('table.c > tbody > tr:nth-child(2) > td:nth-child(1) > a')
|
196 |
-
|
197 |
-
if links:
|
198 |
-
link = links[0]
|
199 |
-
pdf_url = link['href']
|
200 |
-
pdf_content = await self.fetch_pdf_content(session, pdf_url)
|
201 |
-
if pdf_content:
|
202 |
-
logger.debug(f"Found PDF from: {pdf_url}")
|
203 |
-
return pdf_content
|
204 |
-
except Exception as e:
|
205 |
-
logger.debug(f"Error trying to download {doi} from libgen: {e}")
|
206 |
return None
|
207 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
208 |
async def download_paper_google_scholar_async(self, session, doi):
|
209 |
"""Search google scholar to find an article with the given doi, try to get the pdf"""
|
210 |
if not doi:
|
@@ -230,39 +230,40 @@ class PaperDownloader:
|
|
230 |
if pdf_content:
|
231 |
logger.debug(f"Found PDF from: {pdf_url}")
|
232 |
return pdf_content
|
|
|
233 |
except Exception as e:
|
234 |
logger.debug(f"Google Scholar error for {doi}: {e}")
|
235 |
|
236 |
return None
|
237 |
|
238 |
async def download_paper_crossref_async(self, session, doi):
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
try:
|
244 |
-
# Search for open access link
|
245 |
-
url = f"https://api.crossref.org/works/{doi}"
|
246 |
-
response = await session.get(url, headers=self.headers, timeout=10)
|
247 |
-
|
248 |
-
if response.status == 200:
|
249 |
-
data = await response.json()
|
250 |
-
work = data.get('message', {})
|
251 |
-
|
252 |
-
# Search for open access links
|
253 |
-
links = work.get('link', [])
|
254 |
-
for link in links:
|
255 |
-
if link.get('content-type') == 'application/pdf':
|
256 |
-
pdf_url = link.get('URL')
|
257 |
-
if pdf_url:
|
258 |
-
pdf_content = await self.fetch_pdf_content(session, pdf_url)
|
259 |
-
if pdf_content:
|
260 |
-
logger.debug(f"Found PDF from: {pdf_url}")
|
261 |
-
return pdf_content
|
262 |
-
except Exception as e:
|
263 |
-
logger.debug(f"Crossref error for {doi}: {e}")
|
264 |
-
return None
|
265 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
266 |
async def download_with_retry_async(self, doi, max_retries=3, initial_delay=2):
|
267 |
"""Downloads a paper using multiple strategies with exponential backoff and async requests"""
|
268 |
pdf_content = None
|
@@ -283,7 +284,7 @@ class PaperDownloader:
|
|
283 |
return pdf_content
|
284 |
except Exception as e:
|
285 |
logger.error(f"Error in download attempt {retries + 1} for DOI {doi}: {e}")
|
286 |
-
|
287 |
if not pdf_content:
|
288 |
retries += 1
|
289 |
logger.warning(f"Retry attempt {retries} for DOI: {doi} after {delay} seconds")
|
@@ -292,36 +293,38 @@ class PaperDownloader:
|
|
292 |
|
293 |
return None
|
294 |
|
295 |
-
async def download_single_doi_async(self, doi):
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
# Write file asynchronously
|
310 |
-
loop = asyncio.get_running_loop()
|
311 |
-
await loop.run_in_executor(None, lambda: open(filepath, 'wb').write(pdf_content))
|
312 |
-
|
313 |
-
logger.info(f"Successfully downloaded: {filename}")
|
314 |
-
return filepath, f'<a href="https://doi.org/{doi}">{doi}</a>', ""
|
315 |
-
else:
|
316 |
-
logger.warning(f"Could not download: {doi}")
|
317 |
-
return None, f"Could not download {doi}", f'<a href="https://doi.org/{doi}">{doi}</a>'
|
318 |
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
325 |
if not dois_text:
|
326 |
return None, "Error: No DOIs provided", "Error: No DOIs provided"
|
327 |
|
@@ -332,34 +335,34 @@ class PaperDownloader:
|
|
332 |
downloaded_files = []
|
333 |
failed_dois = []
|
334 |
downloaded_links = []
|
335 |
-
|
336 |
-
for i, doi in enumerate(
|
337 |
-
|
338 |
-
|
339 |
# Unique filename for zip
|
340 |
filename = f"{str(doi).replace('/', '_').replace('.', '_')}_{i}.pdf"
|
341 |
filepath_unique = os.path.join(self.output_dir, filename)
|
342 |
os.rename(filepath, filepath_unique)
|
343 |
downloaded_files.append(filepath_unique)
|
344 |
downloaded_links.append(f'<a href="https://doi.org/{doi}">{doi}</a>')
|
345 |
-
|
346 |
-
|
347 |
-
|
348 |
if downloaded_files:
|
349 |
-
|
350 |
-
|
351 |
-
|
352 |
-
|
353 |
-
|
354 |
return zip_filename if downloaded_files else None, "\n".join(downloaded_links), "\n".join(failed_dois)
|
355 |
|
356 |
def create_zip(self, zip_filename, downloaded_files):
|
357 |
-
|
358 |
for file_path in downloaded_files:
|
359 |
zipf.write(file_path, arcname=os.path.basename(file_path))
|
360 |
|
361 |
-
async def process_bibtex_async(self, bib_file):
|
362 |
-
"""Process BibTeX file and download papers with multiple strategies"""
|
363 |
# Read BibTeX file content from the uploaded object
|
364 |
try:
|
365 |
with open(bib_file.name, 'r', encoding='utf-8') as f:
|
@@ -383,50 +386,51 @@ class PaperDownloader:
|
|
383 |
downloaded_files = []
|
384 |
failed_dois = []
|
385 |
downloaded_links = []
|
386 |
-
|
387 |
-
tasks = [self.download_single_doi_async(doi) for doi in dois]
|
388 |
results = await asyncio.gather(*tasks)
|
389 |
|
390 |
-
|
391 |
for i, (filepath, success_message, fail_message) in enumerate(results):
|
392 |
-
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
-
|
397 |
-
|
398 |
-
|
399 |
-
|
400 |
-
|
401 |
|
402 |
if downloaded_files:
|
403 |
-
|
404 |
-
|
405 |
-
|
406 |
-
|
407 |
-
|
408 |
return zip_filename, "\n".join(downloaded_links), "\n".join(failed_dois)
|
409 |
|
410 |
def create_gradio_interface():
|
411 |
"""Create Gradio interface for Paper Downloader"""
|
412 |
downloader = PaperDownloader()
|
413 |
|
414 |
-
async def download_papers(bib_file, doi_input, dois_input):
|
415 |
-
|
416 |
-
|
417 |
-
|
418 |
-
|
419 |
-
|
420 |
-
|
421 |
-
|
422 |
-
|
423 |
-
|
424 |
-
|
425 |
-
|
426 |
-
|
427 |
-
|
428 |
-
|
429 |
-
|
|
|
|
|
430 |
|
431 |
|
432 |
# Gradio Interface
|
@@ -436,6 +440,7 @@ def create_gradio_interface():
|
|
436 |
gr.File(file_types=['.bib'], label="Upload BibTeX File"),
|
437 |
gr.Textbox(label="Enter Single DOI", placeholder="10.xxxx/xxxx"),
|
438 |
gr.Textbox(label="Enter Multiple DOIs (one per line)", placeholder="10.xxxx/xxxx\n10.yyyy/yyyy\n...")
|
|
|
439 |
],
|
440 |
outputs=[
|
441 |
gr.File(label="Download Papers (ZIP) or Single PDF"),
|
|
|
12 |
import io
|
13 |
import asyncio
|
14 |
import aiohttp
|
15 |
+
from concurrent.futures import ThreadPoolExecutor
|
16 |
+
|
17 |
|
18 |
# Configure logging
|
19 |
logging.basicConfig(level=logging.INFO,
|
|
|
43 |
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8',
|
44 |
'Accept-Language': 'en-US,en;q=0.9',
|
45 |
}
|
46 |
+
self.executor = ThreadPoolExecutor(max_workers=4)
|
47 |
|
48 |
def clean_doi(self, doi):
|
49 |
"""Clean and encode DOI for URL"""
|
|
|
62 |
return None, None
|
63 |
|
64 |
async def fetch_pdf_content(self, session, url, max_redirects=5, max_retries=2, retry_delay=1):
|
65 |
+
"""Fetch content and validate if response is PDF, following up to max_redirects redirections with retries."""
|
66 |
+
|
67 |
+
current_url = url
|
68 |
+
redirect_count = 0
|
69 |
+
retry_count = 0
|
70 |
+
|
71 |
+
while redirect_count <= max_redirects:
|
72 |
+
try:
|
73 |
+
while retry_count <= max_retries:
|
74 |
+
try:
|
75 |
+
async with session.get(current_url, headers=self.headers, timeout=10, allow_redirects=False) as response:
|
76 |
+
if response.status in [301, 302, 307, 308]:
|
77 |
+
current_url = response.headers['Location']
|
78 |
+
redirect_count += 1
|
79 |
+
logger.debug(f"Following redirect from {url} to {current_url}")
|
80 |
+
break # Break out of the retry loop for a redirect
|
81 |
+
|
82 |
+
response.raise_for_status()
|
83 |
+
|
84 |
+
if 'application/pdf' in response.headers.get('Content-Type', ''):
|
85 |
+
return await response.read()
|
86 |
+
else:
|
87 |
+
logger.debug(f"Content type not PDF for {current_url}: {response.headers.get('Content-Type', '')}")
|
88 |
+
return None
|
89 |
+
except Exception as e:
|
90 |
+
logger.debug(f"Error getting PDF, retrying ({retry_count}/{max_retries}) from {current_url}: {e}")
|
91 |
+
retry_count += 1
|
92 |
+
await asyncio.sleep(retry_delay)
|
93 |
+
|
94 |
+
retry_count = 0 # Reset the retry count, in case there's a next redirect attempt
|
95 |
+
|
96 |
+
except Exception as e:
|
97 |
+
logger.debug(f"Error getting PDF from {current_url}: {e}")
|
98 |
+
return None
|
99 |
+
|
100 |
+
logger.debug(f"Too many redirects or retries {url}, not following this link further")
|
101 |
+
return None
|
102 |
+
|
|
|
|
|
|
|
103 |
async def download_paper_direct_doi_async(self, session, doi):
|
104 |
"""Attempt to download the pdf from the landing page of the doi"""
|
105 |
if not doi:
|
|
|
124 |
r'(https?://[^\s<>"]+?download/[^\s<>"]+)',
|
125 |
r'(https?://[^\s<>"]+?\/pdf\/[^\s<>"]+)',
|
126 |
]
|
127 |
+
|
128 |
pdf_urls = []
|
129 |
for pattern in pdf_patterns:
|
130 |
pdf_urls.extend(re.findall(pattern, text))
|
|
|
177 |
return None
|
178 |
|
179 |
async def download_paper_libgen_async(self, session, doi):
|
180 |
+
"""Download from Libgen, handles the query and the redirection"""
|
181 |
+
if not doi:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
182 |
return None
|
183 |
+
|
184 |
+
base_url = 'https://libgen.rs/scimag/'
|
185 |
+
try:
|
186 |
+
search_url = f"{base_url}?q={self.clean_doi(doi)}"
|
187 |
+
text, headers = await self.fetch_with_headers(session, search_url, timeout=10)
|
188 |
+
|
189 |
+
if not text or "No results" in text:
|
190 |
+
logger.debug(f"No results for DOI: {doi} on libgen")
|
191 |
+
return None
|
192 |
+
|
193 |
+
soup = BeautifulSoup(text, 'html.parser')
|
194 |
+
|
195 |
+
links = soup.select('table.c > tbody > tr:nth-child(2) > td:nth-child(1) > a')
|
196 |
+
|
197 |
+
if links:
|
198 |
+
link = links[0]
|
199 |
+
pdf_url = link['href']
|
200 |
+
pdf_content = await self.fetch_pdf_content(session, pdf_url)
|
201 |
+
if pdf_content:
|
202 |
+
logger.debug(f"Found PDF from: {pdf_url}")
|
203 |
+
return pdf_content
|
204 |
+
except Exception as e:
|
205 |
+
logger.debug(f"Error trying to download {doi} from libgen: {e}")
|
206 |
+
return None
|
207 |
+
|
208 |
async def download_paper_google_scholar_async(self, session, doi):
|
209 |
"""Search google scholar to find an article with the given doi, try to get the pdf"""
|
210 |
if not doi:
|
|
|
230 |
if pdf_content:
|
231 |
logger.debug(f"Found PDF from: {pdf_url}")
|
232 |
return pdf_content
|
233 |
+
|
234 |
except Exception as e:
|
235 |
logger.debug(f"Google Scholar error for {doi}: {e}")
|
236 |
|
237 |
return None
|
238 |
|
239 |
async def download_paper_crossref_async(self, session, doi):
|
240 |
+
"""Alternative search method using Crossref"""
|
241 |
+
if not doi:
|
242 |
+
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
243 |
|
244 |
+
try:
|
245 |
+
# Search for open access link
|
246 |
+
url = f"https://api.crossref.org/works/{doi}"
|
247 |
+
response = await session.get(url, headers=self.headers, timeout=10)
|
248 |
+
|
249 |
+
if response.status == 200:
|
250 |
+
data = await response.json()
|
251 |
+
work = data.get('message', {})
|
252 |
+
|
253 |
+
# Search for open access links
|
254 |
+
links = work.get('link', [])
|
255 |
+
for link in links:
|
256 |
+
if link.get('content-type') == 'application/pdf':
|
257 |
+
pdf_url = link.get('URL')
|
258 |
+
if pdf_url:
|
259 |
+
pdf_content = await self.fetch_pdf_content(session, pdf_url)
|
260 |
+
if pdf_content:
|
261 |
+
logger.debug(f"Found PDF from: {pdf_url}")
|
262 |
+
return pdf_content
|
263 |
+
except Exception as e:
|
264 |
+
logger.debug(f"Crossref error for {doi}: {e}")
|
265 |
+
return None
|
266 |
+
|
267 |
async def download_with_retry_async(self, doi, max_retries=3, initial_delay=2):
|
268 |
"""Downloads a paper using multiple strategies with exponential backoff and async requests"""
|
269 |
pdf_content = None
|
|
|
284 |
return pdf_content
|
285 |
except Exception as e:
|
286 |
logger.error(f"Error in download attempt {retries + 1} for DOI {doi}: {e}")
|
287 |
+
|
288 |
if not pdf_content:
|
289 |
retries += 1
|
290 |
logger.warning(f"Retry attempt {retries} for DOI: {doi} after {delay} seconds")
|
|
|
293 |
|
294 |
return None
|
295 |
|
296 |
+
async def download_single_doi_async(self, doi, progress_callback):
|
297 |
+
"""Downloads a single paper using a DOI, and updates the given progress_callback"""
|
298 |
+
if not doi:
|
299 |
+
return None, "Error: DOI not provided", "Error: DOI not provided"
|
300 |
+
|
301 |
+
try:
|
302 |
+
pdf_content = await self.download_with_retry_async(doi)
|
303 |
+
|
304 |
+
if pdf_content:
|
305 |
+
if doi is None:
|
306 |
+
return None, "Error: DOI not provided", "Error: DOI not provided"
|
307 |
+
filename = f"{str(doi).replace('/', '_').replace('.', '_')}.pdf"
|
308 |
+
filepath = os.path.join(self.output_dir, filename)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
309 |
|
310 |
+
loop = asyncio.get_running_loop()
|
311 |
+
await loop.run_in_executor(self.executor, lambda: open(filepath, 'wb').write(pdf_content))
|
312 |
+
|
313 |
+
logger.info(f"Successfully downloaded: {filename}")
|
314 |
+
progress_callback(f"Successfully downloaded: <a href='https://doi.org/{doi}'>{doi}</a>")
|
315 |
+
return filepath, f'<a href="https://doi.org/{doi}">{doi}</a>', ""
|
316 |
+
else:
|
317 |
+
logger.warning(f"Could not download: {doi}")
|
318 |
+
progress_callback(f"Could not download: <a href='https://doi.org/{doi}'>{doi}</a>")
|
319 |
+
return None, f"Could not download {doi}", f'<a href="https://doi.org/{doi}">{doi}</a>'
|
320 |
+
|
321 |
+
except Exception as e:
|
322 |
+
logger.error(f"Error processing {doi}: {e}")
|
323 |
+
progress_callback(f"Error processing {doi}: <a href='https://doi.org/{doi}'>{doi}</a> {e}")
|
324 |
+
return None, f"Error processing {doi}: {e}", f"Error processing {doi}: {e}"
|
325 |
+
|
326 |
+
async def download_multiple_dois_async(self, dois_text, progress_callback):
|
327 |
+
"""Downloads multiple papers from a list of DOIs and uses a callback for UI"""
|
328 |
if not dois_text:
|
329 |
return None, "Error: No DOIs provided", "Error: No DOIs provided"
|
330 |
|
|
|
335 |
downloaded_files = []
|
336 |
failed_dois = []
|
337 |
downloaded_links = []
|
338 |
+
|
339 |
+
for i, doi in enumerate(dois):
|
340 |
+
filepath, success_message, fail_message = await self.download_single_doi_async(doi, progress_callback)
|
341 |
+
if filepath:
|
342 |
# Unique filename for zip
|
343 |
filename = f"{str(doi).replace('/', '_').replace('.', '_')}_{i}.pdf"
|
344 |
filepath_unique = os.path.join(self.output_dir, filename)
|
345 |
os.rename(filepath, filepath_unique)
|
346 |
downloaded_files.append(filepath_unique)
|
347 |
downloaded_links.append(f'<a href="https://doi.org/{doi}">{doi}</a>')
|
348 |
+
else:
|
349 |
+
failed_dois.append(f'<a href="https://doi.org/{doi}">{doi}</a>')
|
350 |
+
|
351 |
if downloaded_files:
|
352 |
+
zip_filename = 'papers.zip'
|
353 |
+
loop = asyncio.get_running_loop()
|
354 |
+
await loop.run_in_executor(self.executor, lambda: self.create_zip(zip_filename,downloaded_files))
|
355 |
+
logger.info(f"ZIP file created: {zip_filename}")
|
356 |
+
|
357 |
return zip_filename if downloaded_files else None, "\n".join(downloaded_links), "\n".join(failed_dois)
|
358 |
|
359 |
def create_zip(self, zip_filename, downloaded_files):
|
360 |
+
with zipfile.ZipFile(zip_filename, 'w') as zipf:
|
361 |
for file_path in downloaded_files:
|
362 |
zipf.write(file_path, arcname=os.path.basename(file_path))
|
363 |
|
364 |
+
async def process_bibtex_async(self, bib_file, progress_callback):
|
365 |
+
"""Process BibTeX file and download papers with multiple strategies and reports UI updates using a callback"""
|
366 |
# Read BibTeX file content from the uploaded object
|
367 |
try:
|
368 |
with open(bib_file.name, 'r', encoding='utf-8') as f:
|
|
|
386 |
downloaded_files = []
|
387 |
failed_dois = []
|
388 |
downloaded_links = []
|
389 |
+
|
390 |
+
tasks = [self.download_single_doi_async(doi, progress_callback) for doi in dois]
|
391 |
results = await asyncio.gather(*tasks)
|
392 |
|
|
|
393 |
for i, (filepath, success_message, fail_message) in enumerate(results):
|
394 |
+
if filepath:
|
395 |
+
# Unique filename for zip
|
396 |
+
filename = f"{str(dois[i]).replace('/', '_').replace('.', '_')}_{i}.pdf"
|
397 |
+
filepath_unique = os.path.join(self.output_dir, filename)
|
398 |
+
os.rename(filepath, filepath_unique)
|
399 |
+
downloaded_files.append(filepath_unique)
|
400 |
+
downloaded_links.append(f'<a href="https://doi.org/{dois[i]}">{dois[i]}</a>')
|
401 |
+
else:
|
402 |
+
failed_dois.append(f'<a href="https://doi.org/{dois[i]}">{dois[i]}</a>')
|
403 |
|
404 |
if downloaded_files:
|
405 |
+
zip_filename = 'papers.zip'
|
406 |
+
loop = asyncio.get_running_loop()
|
407 |
+
await loop.run_in_executor(self.executor, lambda: self.create_zip(zip_filename,downloaded_files))
|
408 |
+
logger.info(f"ZIP file created: {zip_filename}")
|
409 |
+
|
410 |
return zip_filename, "\n".join(downloaded_links), "\n".join(failed_dois)
|
411 |
|
412 |
def create_gradio_interface():
|
413 |
"""Create Gradio interface for Paper Downloader"""
|
414 |
downloader = PaperDownloader()
|
415 |
|
416 |
+
async def download_papers(bib_file, doi_input, dois_input, progress=gr.Progress()):
|
417 |
+
if bib_file:
|
418 |
+
# Check file type
|
419 |
+
if not bib_file.name.lower().endswith('.bib'):
|
420 |
+
return None, "Error: Please upload a .bib file", "Error: Please upload a .bib file", None
|
421 |
+
|
422 |
+
|
423 |
+
zip_path, downloaded_dois, failed_dois = await downloader.process_bibtex_async(bib_file, progress.update)
|
424 |
+
|
425 |
+
return zip_path, downloaded_dois, failed_dois, None
|
426 |
+
elif doi_input:
|
427 |
+
filepath, message, failed_doi = await downloader.download_single_doi_async(doi_input,progress.update)
|
428 |
+
return None, message, failed_doi, filepath
|
429 |
+
elif dois_input:
|
430 |
+
zip_path, downloaded_dois, failed_dois = await downloader.download_multiple_dois_async(dois_input, progress.update)
|
431 |
+
return zip_path, downloaded_dois, failed_dois, None
|
432 |
+
else:
|
433 |
+
return None, "Please provide a .bib file, a single DOI, or a list of DOIs", "Please provide a .bib file, a single DOI, or a list of DOIs", None
|
434 |
|
435 |
|
436 |
# Gradio Interface
|
|
|
440 |
gr.File(file_types=['.bib'], label="Upload BibTeX File"),
|
441 |
gr.Textbox(label="Enter Single DOI", placeholder="10.xxxx/xxxx"),
|
442 |
gr.Textbox(label="Enter Multiple DOIs (one per line)", placeholder="10.xxxx/xxxx\n10.yyyy/yyyy\n...")
|
443 |
+
|
444 |
],
|
445 |
outputs=[
|
446 |
gr.File(label="Download Papers (ZIP) or Single PDF"),
|