C2MV commited on
Commit
5d5b6d2
·
verified ·
1 Parent(s): 9898cdf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +157 -169
app.py CHANGED
@@ -59,47 +59,47 @@ class PaperDownloader:
59
  return None, None
60
 
61
  async def fetch_pdf_content(self, session, url, max_redirects=5, max_retries=2, retry_delay=1):
62
- """Fetch content and validate if response is PDF, following up to max_redirects redirections with retries."""
63
-
64
- current_url = url
65
- redirect_count = 0
66
- retry_count = 0
67
-
68
- while redirect_count <= max_redirects:
69
- try:
70
- while retry_count <= max_retries:
71
- try:
72
- async with session.get(current_url, headers=self.headers, timeout=10, allow_redirects=False) as response:
73
-
74
- if response.status in [301, 302, 307, 308]:
75
- current_url = response.headers['Location']
76
- redirect_count += 1
77
- logger.debug(f"Following redirect from {url} to {current_url}")
78
- break # Break out of the retry loop for a redirect
79
-
80
- response.raise_for_status()
81
-
82
- if 'application/pdf' in response.headers.get('Content-Type', ''):
83
- return await response.read()
84
- else:
85
- logger.debug(
86
- f"Content type not PDF for {current_url}: {response.headers.get('Content-Type', '')}")
87
- return None
88
- except Exception as e:
89
- logger.debug(
90
- f"Error getting PDF, retrying ({retry_count}/{max_retries}) from {current_url}: {e}")
91
- retry_count += 1
92
- await asyncio.sleep(retry_delay)
93
-
94
- retry_count = 0 # Reset the retry count, in case there's a next redirect attempt
95
-
96
- except Exception as e:
97
- logger.debug(f"Error getting PDF from {current_url}: {e}")
98
- return None
 
 
 
99
 
100
- logger.debug(f"Too many redirects or retries {url}, not following this link further")
101
- return None
102
-
103
  async def download_paper_direct_doi_async(self, session, doi):
104
  """Attempt to download the pdf from the landing page of the doi"""
105
  if not doi:
@@ -107,18 +107,18 @@ class PaperDownloader:
107
 
108
  try:
109
  doi_url = f"https://doi.org/{self.clean_doi(doi)}"
110
-
111
  # First, let's try to download the URL directly in case it is already the pdf.
112
  pdf_content = await self.fetch_pdf_content(session, doi_url)
113
  if pdf_content:
114
  logger.debug(f"Direct DOI resolved to PDF from {doi_url}")
115
  return pdf_content
116
-
117
  # If direct DOI link was not a pdf, fetch landing page and extract links
118
  text, headers = await self.fetch_with_headers(session, doi_url, timeout=15)
119
  if not text:
120
  return None
121
-
122
  pdf_patterns = [
123
  r'(https?://[^\s<>"]+?\.pdf)',
124
  r'(https?://[^\s<>"]+?download/[^\s<>"]+)',
@@ -128,72 +128,72 @@ class PaperDownloader:
128
  pdf_urls = []
129
  for pattern in pdf_patterns:
130
  pdf_urls.extend(re.findall(pattern, text))
131
-
132
  # Attempt each pdf url and break when you find a PDF content.
133
  for pdf_url in pdf_urls:
134
  pdf_content = await self.fetch_pdf_content(session, pdf_url)
135
  if pdf_content:
136
  logger.debug(f"Found PDF from: {pdf_url}")
137
  return pdf_content
138
-
139
  except Exception as e:
140
  logger.debug(f"Error trying to get the PDF from {doi}: {e}")
141
  return None
142
-
143
  async def download_paper_scihub_async(self, session, doi):
144
  """Improved method to download paper from Sci-Hub using async requests"""
145
  if not doi:
146
  logger.warning("DOI not provided")
147
  return None
148
-
149
  for base_url in self.download_sources:
150
  try:
151
  scihub_url = f"{base_url}{self.clean_doi(doi)}"
152
  text, headers = await self.fetch_with_headers(session, scihub_url, timeout=15)
153
  if not text:
154
  continue
155
-
156
  # Search for multiple PDF URL patterns
157
  pdf_patterns = [
158
  r'(https?://[^\s<>"]+?\.pdf)',
159
  r'(https?://[^\s<>"]+?download/[^\s<>"]+)',
160
  r'(https?://[^\s<>"]+?\/pdf\/[^\s<>"]+)',
161
  ]
162
-
163
  pdf_urls = []
164
  for pattern in pdf_patterns:
165
  pdf_urls.extend(re.findall(pattern, text))
166
-
167
  # Try downloading from found URLs, but iterate over ALL
168
  for pdf_url in pdf_urls:
169
  pdf_content = await self.fetch_pdf_content(session,pdf_url)
170
  if pdf_content:
171
  logger.debug(f"Found PDF from: {pdf_url}")
172
  return pdf_content
173
-
174
  except Exception as e:
175
  logger.debug(f"Error trying to download {doi} from {base_url}: {e}")
176
-
177
  return None
178
-
179
  async def download_paper_libgen_async(self, session, doi):
180
  """Download from Libgen, handles the query and the redirection"""
181
  if not doi:
182
  return None
183
-
184
  base_url = 'https://libgen.rs/scimag/'
185
  try:
186
  search_url = f"{base_url}?q={self.clean_doi(doi)}"
187
  text, headers = await self.fetch_with_headers(session, search_url, timeout=10)
188
-
189
  if not text or "No results" in text:
190
  logger.debug(f"No results for DOI: {doi} on libgen")
191
  return None
192
-
193
  soup = BeautifulSoup(text, 'html.parser')
194
-
195
  links = soup.select('table.c > tbody > tr:nth-child(2) > td:nth-child(1) > a')
196
-
197
  if links:
198
  link = links[0]
199
  pdf_url = link['href']
@@ -235,7 +235,7 @@ class PaperDownloader:
235
  logger.debug(f"Google Scholar error for {doi}: {e}")
236
 
237
  return None
238
-
239
  async def download_paper_crossref_async(self, session, doi):
240
  """Alternative search method using Crossref"""
241
  if not doi:
@@ -263,7 +263,7 @@ class PaperDownloader:
263
  except Exception as e:
264
  logger.debug(f"Crossref error for {doi}: {e}")
265
  return None
266
-
267
  async def download_with_retry_async(self, doi, max_retries=3, initial_delay=2):
268
  """Downloads a paper using multiple strategies with exponential backoff and async requests"""
269
  pdf_content = None
@@ -279,7 +279,6 @@ class PaperDownloader:
279
  await self.download_paper_libgen_async(session, doi) or
280
  await self.download_paper_google_scholar_async(session, doi) or
281
  await self.download_paper_crossref_async(session, doi)
282
-
283
  )
284
  if pdf_content:
285
  return pdf_content
@@ -291,136 +290,126 @@ class PaperDownloader:
291
  logger.warning(f"Retry attempt {retries} for DOI: {doi} after {delay} seconds")
292
  await asyncio.sleep(delay)
293
  delay *= 2 # Exponential backoff
294
-
295
  return None
296
 
297
  async def download_single_doi_async(self, doi):
298
- """Downloads a single paper using a DOI"""
299
- if not doi:
300
- return None, "Error: DOI not provided", "Error: DOI not provided"
301
-
302
- try:
303
- pdf_content = await self.download_with_retry_async(doi)
304
-
305
- if pdf_content:
306
- if doi is None:
307
- return None, "Error: DOI not provided", "Error: DOI not provided"
308
- filename = f"{str(doi).replace('/', '_').replace('.', '_')}.pdf"
309
- filepath = os.path.join(self.output_dir, filename)
310
- #write the file asynchronously here so it doesn't block
311
- loop = asyncio.get_running_loop()
312
- await loop.run_in_executor(None, lambda: open(filepath, 'wb').write(pdf_content))
313
- logger.info(f"Successfully downloaded: {filename}")
314
- return filepath, f'<a href="https://doi.org/{doi}">{doi}</a>', ""
315
- else:
316
- logger.warning(f"Could not download: {doi}")
317
- return None, f"Could not download {doi}", f'<a href="https://doi.org/{doi}">{doi}</a>'
318
-
319
- except Exception as e:
320
- logger.error(f"Error processing {doi}: {e}")
321
- return None, f"Error processing {doi}: {e}", f"Error processing {doi}: {e}"
322
-
 
 
 
323
  async def download_multiple_dois_async(self, dois_text):
324
- """Downloads multiple papers from a list of DOIs"""
325
- if not dois_text:
326
- return None, "Error: No DOIs provided", "Error: No DOIs provided"
327
-
328
- dois = [doi.strip() for doi in dois_text.split('\n') if doi.strip()]
329
- if not dois:
330
- return None, "Error: No valid DOIs provided", "Error: No valid DOIs provided"
331
-
332
- downloaded_files = []
333
- failed_dois = []
334
- downloaded_links = []
335
- for i, doi in enumerate(tqdm(dois, desc="Downloading papers")):
336
- filepath, success_message, fail_message = await self.download_single_doi_async(doi)
337
- if filepath:
 
 
338
  # Unique filename for zip
339
  filename = f"{str(doi).replace('/', '_').replace('.', '_')}_{i}.pdf"
340
  filepath_unique = os.path.join(self.output_dir, filename)
341
  os.rename(filepath, filepath_unique)
342
  downloaded_files.append(filepath_unique)
343
  downloaded_links.append(f'<a href="https://doi.org/{doi}">{doi}</a>')
344
- else:
345
  failed_dois.append(f'<a href="https://doi.org/{doi}">{doi}</a>')
346
-
347
- if downloaded_files:
348
- zip_filename = 'papers.zip'
349
- # Zip asynchronously
350
- loop = asyncio.get_running_loop()
351
- await loop.run_in_executor(None, lambda: self.create_zip(zip_filename,downloaded_files) )
352
- logger.info(f"ZIP file created: {zip_filename}")
353
-
354
- return zip_filename if downloaded_files else None, "\n".join(downloaded_links), "\n".join(failed_dois)
355
 
 
 
 
 
 
 
 
 
 
356
  def create_zip(self, zip_filename, downloaded_files):
357
- with zipfile.ZipFile(zip_filename, 'w') as zipf:
358
- for file_path in downloaded_files:
359
- zipf.write(file_path, arcname=os.path.basename(file_path))
360
 
361
  async def process_bibtex_async(self, bib_file):
362
- """Process BibTeX file and download papers with multiple strategies"""
363
- # Read BibTeX file content from the uploaded object
364
- try:
365
- with open(bib_file.name, 'r', encoding='utf-8') as f:
366
- bib_content = f.read()
367
- except Exception as e:
368
- logger.error(f"Error reading uploaded file {bib_file.name}: {e}")
369
- return None, f"Error reading uploaded file {bib_file.name}: {e}", f"Error reading uploaded file {bib_file.name}: {e}"
370
-
371
- # Parse BibTeX data
372
- try:
373
- bib_database = bibtexparser.loads(bib_content)
374
- except Exception as e:
375
- logger.error(f"Error parsing BibTeX data: {e}")
376
- return None, f"Error parsing BibTeX data: {e}", f"Error parsing BibTeX data: {e}"
377
-
378
- # Extract DOIs
379
- dois = [entry.get('doi') for entry in bib_database.entries if entry.get('doi')]
380
- logger.info(f"Found {len(dois)} DOIs to download")
381
-
382
- # Result lists
383
- downloaded_files = []
384
- failed_dois = []
385
- downloaded_links = []
386
 
387
- # Download PDFs
388
- for i, doi in enumerate(tqdm(dois, desc="Downloading papers")):
389
- try:
390
- # Try to download with multiple methods with retries
391
- pdf_content = await self.download_with_retry_async(doi)
 
392
 
393
- # Save PDF
394
- if pdf_content:
395
- if doi is None:
396
- return None, "Error: DOI not provided", "Error: DOI not provided"
397
- filename = f"{str(doi).replace('/', '_').replace('.', '_')}_{i}.pdf"
398
- filepath = os.path.join(self.output_dir, filename)
399
-
400
- #Write the file asynchronously so it doesn't block the ui.
401
- loop = asyncio.get_running_loop()
402
- await loop.run_in_executor(None, lambda: open(filepath, 'wb').write(pdf_content))
403
-
404
- downloaded_files.append(filepath)
405
- downloaded_links.append(f'<a href="https://doi.org/{doi}">{doi}</a>')
406
- logger.info(f"Successfully downloaded: {filename}")
407
- else:
408
- failed_dois.append(f'<a href="https://doi.org/{doi}">{doi}</a>')
409
 
410
- except Exception as e:
411
- failed_dois.append(f'<a href="https://doi.org/{doi}">{doi}</a>')
412
- logger.error(f"Error processing {doi}: {e}")
 
413
 
414
- # Create ZIP of downloaded papers
415
- if downloaded_files:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
416
  zip_filename = 'papers.zip'
417
- # Zip asynchronously so the main loop is not blocked.
418
  loop = asyncio.get_running_loop()
419
  await loop.run_in_executor(None, lambda: self.create_zip(zip_filename,downloaded_files) )
420
  logger.info(f"ZIP file created: {zip_filename}")
421
 
422
- return zip_filename, "\n".join(downloaded_links), "\n".join(failed_dois)
423
-
424
 
425
  def create_gradio_interface():
426
  """Create Gradio interface for Paper Downloader"""
@@ -431,7 +420,7 @@ def create_gradio_interface():
431
  # Check file type
432
  if not bib_file.name.lower().endswith('.bib'):
433
  return None, "Error: Please upload a .bib file", "Error: Please upload a .bib file", None
434
-
435
  zip_path, downloaded_dois, failed_dois = await downloader.process_bibtex_async(bib_file)
436
  return zip_path, downloaded_dois, failed_dois, None
437
  elif doi_input:
@@ -498,7 +487,6 @@ def create_gradio_interface():
498
  """,
499
  cache_examples=False,
500
  )
501
-
502
  # Add Javascript to update HTML
503
  interface.load = """
504
  function(downloaded_dois, failed_dois){
 
59
  return None, None
60
 
61
  async def fetch_pdf_content(self, session, url, max_redirects=5, max_retries=2, retry_delay=1):
62
+ """Fetch content and validate if response is PDF, following up to max_redirects redirections with retries."""
63
+
64
+ current_url = url
65
+ redirect_count = 0
66
+ retry_count = 0
67
+
68
+ while redirect_count <= max_redirects:
69
+ try:
70
+ while retry_count <= max_retries:
71
+ try:
72
+ async with session.get(current_url, headers=self.headers, timeout=10, allow_redirects=False) as response:
73
+
74
+ if response.status in [301, 302, 307, 308]:
75
+ current_url = response.headers['Location']
76
+ redirect_count += 1
77
+ logger.debug(f"Following redirect from {url} to {current_url}")
78
+ break # Break out of the retry loop for a redirect
79
+
80
+ response.raise_for_status()
81
+
82
+ if 'application/pdf' in response.headers.get('Content-Type', ''):
83
+ return await response.read()
84
+ else:
85
+ logger.debug(
86
+ f"Content type not PDF for {current_url}: {response.headers.get('Content-Type', '')}")
87
+ return None
88
+ except Exception as e:
89
+ logger.debug(
90
+ f"Error getting PDF, retrying ({retry_count}/{max_retries}) from {current_url}: {e}")
91
+ retry_count += 1
92
+ await asyncio.sleep(retry_delay)
93
+
94
+ retry_count = 0 # Reset the retry count, in case there's a next redirect attempt
95
+
96
+ except Exception as e:
97
+ logger.debug(f"Error getting PDF from {current_url}: {e}")
98
+ return None
99
+
100
+ logger.debug(f"Too many redirects or retries {url}, not following this link further")
101
+ return None
102
 
 
 
 
103
  async def download_paper_direct_doi_async(self, session, doi):
104
  """Attempt to download the pdf from the landing page of the doi"""
105
  if not doi:
 
107
 
108
  try:
109
  doi_url = f"https://doi.org/{self.clean_doi(doi)}"
110
+
111
  # First, let's try to download the URL directly in case it is already the pdf.
112
  pdf_content = await self.fetch_pdf_content(session, doi_url)
113
  if pdf_content:
114
  logger.debug(f"Direct DOI resolved to PDF from {doi_url}")
115
  return pdf_content
116
+
117
  # If direct DOI link was not a pdf, fetch landing page and extract links
118
  text, headers = await self.fetch_with_headers(session, doi_url, timeout=15)
119
  if not text:
120
  return None
121
+
122
  pdf_patterns = [
123
  r'(https?://[^\s<>"]+?\.pdf)',
124
  r'(https?://[^\s<>"]+?download/[^\s<>"]+)',
 
128
  pdf_urls = []
129
  for pattern in pdf_patterns:
130
  pdf_urls.extend(re.findall(pattern, text))
131
+
132
  # Attempt each pdf url and break when you find a PDF content.
133
  for pdf_url in pdf_urls:
134
  pdf_content = await self.fetch_pdf_content(session, pdf_url)
135
  if pdf_content:
136
  logger.debug(f"Found PDF from: {pdf_url}")
137
  return pdf_content
138
+
139
  except Exception as e:
140
  logger.debug(f"Error trying to get the PDF from {doi}: {e}")
141
  return None
142
+
143
  async def download_paper_scihub_async(self, session, doi):
144
  """Improved method to download paper from Sci-Hub using async requests"""
145
  if not doi:
146
  logger.warning("DOI not provided")
147
  return None
148
+
149
  for base_url in self.download_sources:
150
  try:
151
  scihub_url = f"{base_url}{self.clean_doi(doi)}"
152
  text, headers = await self.fetch_with_headers(session, scihub_url, timeout=15)
153
  if not text:
154
  continue
155
+
156
  # Search for multiple PDF URL patterns
157
  pdf_patterns = [
158
  r'(https?://[^\s<>"]+?\.pdf)',
159
  r'(https?://[^\s<>"]+?download/[^\s<>"]+)',
160
  r'(https?://[^\s<>"]+?\/pdf\/[^\s<>"]+)',
161
  ]
162
+
163
  pdf_urls = []
164
  for pattern in pdf_patterns:
165
  pdf_urls.extend(re.findall(pattern, text))
166
+
167
  # Try downloading from found URLs, but iterate over ALL
168
  for pdf_url in pdf_urls:
169
  pdf_content = await self.fetch_pdf_content(session,pdf_url)
170
  if pdf_content:
171
  logger.debug(f"Found PDF from: {pdf_url}")
172
  return pdf_content
173
+
174
  except Exception as e:
175
  logger.debug(f"Error trying to download {doi} from {base_url}: {e}")
176
+
177
  return None
178
+
179
  async def download_paper_libgen_async(self, session, doi):
180
  """Download from Libgen, handles the query and the redirection"""
181
  if not doi:
182
  return None
183
+
184
  base_url = 'https://libgen.rs/scimag/'
185
  try:
186
  search_url = f"{base_url}?q={self.clean_doi(doi)}"
187
  text, headers = await self.fetch_with_headers(session, search_url, timeout=10)
188
+
189
  if not text or "No results" in text:
190
  logger.debug(f"No results for DOI: {doi} on libgen")
191
  return None
192
+
193
  soup = BeautifulSoup(text, 'html.parser')
194
+
195
  links = soup.select('table.c > tbody > tr:nth-child(2) > td:nth-child(1) > a')
196
+
197
  if links:
198
  link = links[0]
199
  pdf_url = link['href']
 
235
  logger.debug(f"Google Scholar error for {doi}: {e}")
236
 
237
  return None
238
+
239
  async def download_paper_crossref_async(self, session, doi):
240
  """Alternative search method using Crossref"""
241
  if not doi:
 
263
  except Exception as e:
264
  logger.debug(f"Crossref error for {doi}: {e}")
265
  return None
266
+
267
  async def download_with_retry_async(self, doi, max_retries=3, initial_delay=2):
268
  """Downloads a paper using multiple strategies with exponential backoff and async requests"""
269
  pdf_content = None
 
279
  await self.download_paper_libgen_async(session, doi) or
280
  await self.download_paper_google_scholar_async(session, doi) or
281
  await self.download_paper_crossref_async(session, doi)
 
282
  )
283
  if pdf_content:
284
  return pdf_content
 
290
  logger.warning(f"Retry attempt {retries} for DOI: {doi} after {delay} seconds")
291
  await asyncio.sleep(delay)
292
  delay *= 2 # Exponential backoff
 
293
  return None
294
 
295
  async def download_single_doi_async(self, doi):
296
+ """Downloads a single paper using a DOI"""
297
+ if not doi:
298
+ return None, "Error: DOI not provided", "Error: DOI not provided"
299
+
300
+ try:
301
+ pdf_content = await self.download_with_retry_async(doi)
302
+
303
+ if pdf_content:
304
+ if doi is None:
305
+ return None, "Error: DOI not provided", "Error: DOI not provided"
306
+ filename = f"{str(doi).replace('/', '_').replace('.', '_')}.pdf"
307
+ filepath = os.path.join(self.output_dir, filename)
308
+
309
+ # Write file asynchronously
310
+ loop = asyncio.get_running_loop()
311
+ await loop.run_in_executor(None, lambda: open(filepath, 'wb').write(pdf_content))
312
+
313
+
314
+ logger.info(f"Successfully downloaded: {filename}")
315
+ return filepath, f'<a href="https://doi.org/{doi}">{doi}</a>', ""
316
+ else:
317
+ logger.warning(f"Could not download: {doi}")
318
+ return None, f"Could not download {doi}", f'<a href="https://doi.org/{doi}">{doi}</a>'
319
+
320
+ except Exception as e:
321
+ logger.error(f"Error processing {doi}: {e}")
322
+ return None, f"Error processing {doi}: {e}", f"Error processing {doi}: {e}"
323
+
324
  async def download_multiple_dois_async(self, dois_text):
325
+ """Downloads multiple papers from a list of DOIs"""
326
+ if not dois_text:
327
+ return None, "Error: No DOIs provided", "Error: No DOIs provided"
328
+
329
+ dois = [doi.strip() for doi in dois_text.split('\n') if doi.strip()]
330
+ if not dois:
331
+ return None, "Error: No valid DOIs provided", "Error: No valid DOIs provided"
332
+
333
+ downloaded_files = []
334
+ failed_dois = []
335
+ downloaded_links = []
336
+
337
+
338
+ for i, doi in enumerate(tqdm(dois, desc="Downloading papers")):
339
+ filepath, success_message, fail_message = await self.download_single_doi_async(doi)
340
+ if filepath:
341
  # Unique filename for zip
342
  filename = f"{str(doi).replace('/', '_').replace('.', '_')}_{i}.pdf"
343
  filepath_unique = os.path.join(self.output_dir, filename)
344
  os.rename(filepath, filepath_unique)
345
  downloaded_files.append(filepath_unique)
346
  downloaded_links.append(f'<a href="https://doi.org/{doi}">{doi}</a>')
347
+ else:
348
  failed_dois.append(f'<a href="https://doi.org/{doi}">{doi}</a>')
 
 
 
 
 
 
 
 
 
349
 
350
+
351
+ if downloaded_files:
352
+ zip_filename = 'papers.zip'
353
+ loop = asyncio.get_running_loop()
354
+ await loop.run_in_executor(None, lambda: self.create_zip(zip_filename,downloaded_files) )
355
+ logger.info(f"ZIP file created: {zip_filename}")
356
+
357
+ return zip_filename if downloaded_files else None, "\n".join(downloaded_links), "\n".join(failed_dois)
358
+
359
  def create_zip(self, zip_filename, downloaded_files):
360
+ with zipfile.ZipFile(zip_filename, 'w') as zipf:
361
+ for file_path in downloaded_files:
362
+ zipf.write(file_path, arcname=os.path.basename(file_path))
363
 
364
  async def process_bibtex_async(self, bib_file):
365
+ """Process BibTeX file and download papers with multiple strategies"""
366
+ # Read BibTeX file content from the uploaded object
367
+ try:
368
+ with open(bib_file.name, 'r', encoding='utf-8') as f:
369
+ bib_content = f.read()
370
+ except Exception as e:
371
+ logger.error(f"Error reading uploaded file {bib_file.name}: {e}")
372
+ return None, f"Error reading uploaded file {bib_file.name}: {e}", f"Error reading uploaded file {bib_file.name}: {e}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
373
 
374
+ # Parse BibTeX data
375
+ try:
376
+ bib_database = bibtexparser.loads(bib_content)
377
+ except Exception as e:
378
+ logger.error(f"Error parsing BibTeX data: {e}")
379
+ return None, f"Error parsing BibTeX data: {e}", f"Error parsing BibTeX data: {e}"
380
 
381
+ # Extract DOIs
382
+ dois = [entry.get('doi') for entry in bib_database.entries if entry.get('doi')]
383
+ logger.info(f"Found {len(dois)} DOIs to download")
 
 
 
 
 
 
 
 
 
 
 
 
 
384
 
385
+ # Result lists
386
+ downloaded_files = []
387
+ failed_dois = []
388
+ downloaded_links = []
389
 
390
+ # Use asyncio.gather to run all downloads concurrently and show propert progress
391
+ tasks = [self.download_single_doi_async(doi) for doi in dois]
392
+ results = await asyncio.gather(*tasks)
393
+
394
+ for i, (filepath, success_message, fail_message) in enumerate(results):
395
+ if filepath:
396
+ # Unique filename for zip
397
+ filename = f"{str(dois[i]).replace('/', '_').replace('.', '_')}_{i}.pdf"
398
+ filepath_unique = os.path.join(self.output_dir, filename)
399
+ os.rename(filepath, filepath_unique)
400
+ downloaded_files.append(filepath_unique)
401
+ downloaded_links.append(f'<a href="https://doi.org/{dois[i]}">{dois[i]}</a>')
402
+ else:
403
+ failed_dois.append(f'<a href="https://doi.org/{dois[i]}">{dois[i]}</a>')
404
+
405
+
406
+ if downloaded_files:
407
  zip_filename = 'papers.zip'
 
408
  loop = asyncio.get_running_loop()
409
  await loop.run_in_executor(None, lambda: self.create_zip(zip_filename,downloaded_files) )
410
  logger.info(f"ZIP file created: {zip_filename}")
411
 
412
+ return zip_filename, "\n".join(downloaded_links), "\n".join(failed_dois)
 
413
 
414
  def create_gradio_interface():
415
  """Create Gradio interface for Paper Downloader"""
 
420
  # Check file type
421
  if not bib_file.name.lower().endswith('.bib'):
422
  return None, "Error: Please upload a .bib file", "Error: Please upload a .bib file", None
423
+
424
  zip_path, downloaded_dois, failed_dois = await downloader.process_bibtex_async(bib_file)
425
  return zip_path, downloaded_dois, failed_dois, None
426
  elif doi_input:
 
487
  """,
488
  cache_examples=False,
489
  )
 
490
  # Add Javascript to update HTML
491
  interface.load = """
492
  function(downloaded_dois, failed_dois){