C2MV commited on
Commit
4e2e145
·
verified ·
1 Parent(s): ff022ac

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +140 -147
app.py CHANGED
@@ -141,100 +141,100 @@ class PaperDownloader:
141
  return None
142
 
143
  async def download_paper_scihub_async(self, session, doi):
144
- """Improved method to download paper from Sci-Hub using async requests"""
145
- if not doi:
146
- logger.warning("DOI not provided")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
  return None
148
 
149
- for base_url in self.download_sources:
 
 
 
 
 
150
  try:
151
- scihub_url = f"{base_url}{self.clean_doi(doi)}"
152
- text, headers = await self.fetch_with_headers(session, scihub_url, timeout=15)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153
  if not text:
154
- continue
155
-
156
- # Search for multiple PDF URL patterns
157
- pdf_patterns = [
158
- r'(https?://[^\s<>"]+?\.pdf)',
159
- r'(https?://[^\s<>"]+?download/[^\s<>"]+)',
160
- r'(https?://[^\s<>"]+?\/pdf\/[^\s<>"]+)',
161
- ]
162
-
163
- pdf_urls = []
164
- for pattern in pdf_patterns:
165
- pdf_urls.extend(re.findall(pattern, text))
166
-
167
- # Try downloading from found URLs, but iterate over ALL
168
- for pdf_url in pdf_urls:
169
  pdf_content = await self.fetch_pdf_content(session,pdf_url)
170
  if pdf_content:
171
  logger.debug(f"Found PDF from: {pdf_url}")
172
  return pdf_content
173
-
174
  except Exception as e:
175
- logger.debug(f"Error trying to download {doi} from {base_url}: {e}")
176
 
177
- return None
178
-
179
- async def download_paper_libgen_async(self, session, doi):
180
- """Download from Libgen, handles the query and the redirection"""
181
- if not doi:
182
- return None
183
-
184
- base_url = 'https://libgen.rs/scimag/'
185
- try:
186
- search_url = f"{base_url}?q={self.clean_doi(doi)}"
187
- text, headers = await self.fetch_with_headers(session, search_url, timeout=10)
188
-
189
- if not text or "No results" in text:
190
- logger.debug(f"No results for DOI: {doi} on libgen")
191
- return None
192
-
193
- soup = BeautifulSoup(text, 'html.parser')
194
-
195
- links = soup.select('table.c > tbody > tr:nth-child(2) > td:nth-child(1) > a')
196
-
197
- if links:
198
- link = links[0]
199
- pdf_url = link['href']
200
- pdf_content = await self.fetch_pdf_content(session, pdf_url)
201
- if pdf_content:
202
- logger.debug(f"Found PDF from: {pdf_url}")
203
- return pdf_content
204
- except Exception as e:
205
- logger.debug(f"Error trying to download {doi} from libgen: {e}")
206
- return None
207
-
208
- async def download_paper_google_scholar_async(self, session, doi):
209
- """Search google scholar to find an article with the given doi, try to get the pdf"""
210
- if not doi:
211
  return None
212
-
213
- try:
214
- query = f'doi:"{doi}"'
215
- params = {'q': query}
216
- url = f'https://scholar.google.com/scholar?{urlencode(params)}'
217
-
218
- text, headers = await self.fetch_with_headers(session, url, timeout=10)
219
- if not text:
220
- return None
221
-
222
- soup = BeautifulSoup(text, 'html.parser')
223
-
224
- # Find any links with [PDF]
225
- links = soup.find_all('a', string=re.compile(r'\[PDF\]', re.IGNORECASE))
226
-
227
- if links:
228
- pdf_url = links[0]['href']
229
- pdf_content = await self.fetch_pdf_content(session,pdf_url)
230
- if pdf_content:
231
- logger.debug(f"Found PDF from: {pdf_url}")
232
- return pdf_content
233
-
234
- except Exception as e:
235
- logger.debug(f"Google Scholar error for {doi}: {e}")
236
-
237
- return None
238
 
239
  async def download_paper_crossref_async(self, session, doi):
240
  """Alternative search method using Crossref"""
@@ -258,8 +258,8 @@ class PaperDownloader:
258
  if pdf_url:
259
  pdf_content = await self.fetch_pdf_content(session, pdf_url)
260
  if pdf_content:
261
- logger.debug(f"Found PDF from: {pdf_url}")
262
- return pdf_content
263
  except Exception as e:
264
  logger.debug(f"Crossref error for {doi}: {e}")
265
  return None
@@ -294,35 +294,33 @@ class PaperDownloader:
294
  return None
295
 
296
  async def download_single_doi_async(self, doi, progress_callback):
297
- """Downloads a single paper using a DOI, and updates the given progress_callback"""
298
- if not doi:
299
- return None, "Error: DOI not provided", "Error: DOI not provided"
300
-
301
- try:
302
- pdf_content = await self.download_with_retry_async(doi)
303
-
304
- if pdf_content:
305
- if doi is None:
306
- return None, "Error: DOI not provided", "Error: DOI not provided"
307
- filename = f"{str(doi).replace('/', '_').replace('.', '_')}.pdf"
308
- filepath = os.path.join(self.output_dir, filename)
309
-
310
- loop = asyncio.get_running_loop()
311
- await loop.run_in_executor(self.executor, lambda: open(filepath, 'wb').write(pdf_content))
312
-
313
- logger.info(f"Successfully downloaded: {filename}")
314
- progress_callback(f"Successfully downloaded: <a href='https://doi.org/{doi}'>{doi}</a>")
315
- return filepath, f'<a href="https://doi.org/{doi}">{doi}</a>', ""
316
- else:
317
- logger.warning(f"Could not download: {doi}")
318
- progress_callback(f"Could not download: <a href='https://doi.org/{doi}'>{doi}</a>")
319
- return None, f"Could not download {doi}", f'<a href="https://doi.org/{doi}">{doi}</a>'
320
 
321
- except Exception as e:
322
- logger.error(f"Error processing {doi}: {e}")
323
- progress_callback(f"Error processing {doi}: <a href='https://doi.org/{doi}'>{doi}</a> {e}")
324
- return None, f"Error processing {doi}: {e}", f"Error processing {doi}: {e}"
325
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
326
  async def download_multiple_dois_async(self, dois_text, progress_callback):
327
  """Downloads multiple papers from a list of DOIs and uses a callback for UI"""
328
  if not dois_text:
@@ -335,34 +333,35 @@ class PaperDownloader:
335
  downloaded_files = []
336
  failed_dois = []
337
  downloaded_links = []
338
-
339
  for i, doi in enumerate(dois):
340
- filepath, success_message, fail_message = await self.download_single_doi_async(doi, progress_callback)
341
- if filepath:
342
- # Unique filename for zip
 
 
 
343
  filename = f"{str(doi).replace('/', '_').replace('.', '_')}_{i}.pdf"
344
  filepath_unique = os.path.join(self.output_dir, filename)
345
- os.rename(filepath, filepath_unique)
346
  downloaded_files.append(filepath_unique)
347
- downloaded_links.append(f'<a href="https://doi.org/{doi}">{doi}</a>')
348
- else:
349
- failed_dois.append(f'<a href="https://doi.org/{doi}">{doi}</a>')
350
-
351
  if downloaded_files:
352
  zip_filename = 'papers.zip'
353
  loop = asyncio.get_running_loop()
354
  await loop.run_in_executor(self.executor, lambda: self.create_zip(zip_filename,downloaded_files))
355
  logger.info(f"ZIP file created: {zip_filename}")
356
-
357
- return zip_filename if downloaded_files else None, "\n".join(downloaded_links), "\n".join(failed_dois)
358
 
 
 
359
  def create_zip(self, zip_filename, downloaded_files):
360
- with zipfile.ZipFile(zip_filename, 'w') as zipf:
361
- for file_path in downloaded_files:
362
- zipf.write(file_path, arcname=os.path.basename(file_path))
363
 
364
  async def process_bibtex_async(self, bib_file, progress_callback):
365
- """Process BibTeX file and download papers with multiple strategies and reports UI updates using a callback"""
366
  # Read BibTeX file content from the uploaded object
367
  try:
368
  with open(bib_file.name, 'r', encoding='utf-8') as f:
@@ -387,24 +386,22 @@ class PaperDownloader:
387
  failed_dois = []
388
  downloaded_links = []
389
 
390
- tasks = [self.download_single_doi_async(doi, progress_callback) for doi in dois]
391
- results = await asyncio.gather(*tasks)
392
-
393
- for i, (filepath, success_message, fail_message) in enumerate(results):
394
- if filepath:
395
- # Unique filename for zip
396
- filename = f"{str(dois[i]).replace('/', '_').replace('.', '_')}_{i}.pdf"
397
- filepath_unique = os.path.join(self.output_dir, filename)
398
- os.rename(filepath, filepath_unique)
399
- downloaded_files.append(filepath_unique)
400
- downloaded_links.append(f'<a href="https://doi.org/{dois[i]}">{dois[i]}</a>')
401
- else:
402
- failed_dois.append(f'<a href="https://doi.org/{dois[i]}">{dois[i]}</a>')
403
-
404
  if downloaded_files:
405
  zip_filename = 'papers.zip'
406
  loop = asyncio.get_running_loop()
407
- await loop.run_in_executor(self.executor, lambda: self.create_zip(zip_filename,downloaded_files))
408
  logger.info(f"ZIP file created: {zip_filename}")
409
 
410
  return zip_filename, "\n".join(downloaded_links), "\n".join(failed_dois)
@@ -418,8 +415,7 @@ def create_gradio_interface():
418
  # Check file type
419
  if not bib_file.name.lower().endswith('.bib'):
420
  return None, "Error: Please upload a .bib file", "Error: Please upload a .bib file", None
421
-
422
-
423
  zip_path, downloaded_dois, failed_dois = await downloader.process_bibtex_async(bib_file, progress.update)
424
 
425
  return zip_path, downloaded_dois, failed_dois, None
@@ -432,7 +428,6 @@ def create_gradio_interface():
432
  else:
433
  return None, "Please provide a .bib file, a single DOI, or a list of DOIs", "Please provide a .bib file, a single DOI, or a list of DOIs", None
434
 
435
-
436
  # Gradio Interface
437
  interface = gr.Interface(
438
  fn=download_papers,
@@ -489,7 +484,6 @@ def create_gradio_interface():
489
  """,
490
  cache_examples=False,
491
  )
492
-
493
  # Add Javascript to update HTML
494
  interface.load = """
495
  function(downloaded_dois, failed_dois){
@@ -509,7 +503,6 @@ def create_gradio_interface():
509
  """
510
  return interface
511
 
512
-
513
  def main():
514
  interface = create_gradio_interface()
515
  interface.launch(share=True)
 
141
  return None
142
 
143
  async def download_paper_scihub_async(self, session, doi):
144
+ """Improved method to download paper from Sci-Hub using async requests"""
145
+ if not doi:
146
+ logger.warning("DOI not provided")
147
+ return None
148
+
149
+ for base_url in self.download_sources:
150
+ try:
151
+ scihub_url = f"{base_url}{self.clean_doi(doi)}"
152
+ text, headers = await self.fetch_with_headers(session, scihub_url, timeout=15)
153
+ if not text:
154
+ continue
155
+
156
+ # Search for multiple PDF URL patterns
157
+ pdf_patterns = [
158
+ r'(https?://[^\s<>"]+?\.pdf)',
159
+ r'(https?://[^\s<>"]+?download/[^\s<>"]+)',
160
+ r'(https?://[^\s<>"]+?\/pdf\/[^\s<>"]+)',
161
+ ]
162
+
163
+ pdf_urls = []
164
+ for pattern in pdf_patterns:
165
+ pdf_urls.extend(re.findall(pattern, text))
166
+
167
+ # Try downloading from found URLs, but iterate over ALL
168
+ for pdf_url in pdf_urls:
169
+ pdf_content = await self.fetch_pdf_content(session,pdf_url)
170
+ if pdf_content:
171
+ logger.debug(f"Found PDF from: {pdf_url}")
172
+ return pdf_content
173
+
174
+ except Exception as e:
175
+ logger.debug(f"Error trying to download {doi} from {base_url}: {e}")
176
+
177
  return None
178
 
179
+ async def download_paper_libgen_async(self, session, doi):
180
+ """Download from Libgen, handles the query and the redirection"""
181
+ if not doi:
182
+ return None
183
+
184
+ base_url = 'https://libgen.rs/scimag/'
185
  try:
186
+ search_url = f"{base_url}?q={self.clean_doi(doi)}"
187
+ text, headers = await self.fetch_with_headers(session, search_url, timeout=10)
188
+
189
+ if not text or "No results" in text:
190
+ logger.debug(f"No results for DOI: {doi} on libgen")
191
+ return None
192
+
193
+ soup = BeautifulSoup(text, 'html.parser')
194
+
195
+ links = soup.select('table.c > tbody > tr:nth-child(2) > td:nth-child(1) > a')
196
+
197
+ if links:
198
+ link = links[0]
199
+ pdf_url = link['href']
200
+ pdf_content = await self.fetch_pdf_content(session, pdf_url)
201
+ if pdf_content:
202
+ logger.debug(f"Found PDF from: {pdf_url}")
203
+ return pdf_content
204
+ except Exception as e:
205
+ logger.debug(f"Error trying to download {doi} from libgen: {e}")
206
+ return None
207
+
208
+ async def download_paper_google_scholar_async(self, session, doi):
209
+ """Search google scholar to find an article with the given doi, try to get the pdf"""
210
+ if not doi:
211
+ return None
212
+
213
+ try:
214
+ query = f'doi:"{doi}"'
215
+ params = {'q': query}
216
+ url = f'https://scholar.google.com/scholar?{urlencode(params)}'
217
+
218
+ text, headers = await self.fetch_with_headers(session, url, timeout=10)
219
  if not text:
220
+ return None
221
+
222
+ soup = BeautifulSoup(text, 'html.parser')
223
+
224
+ # Find any links with [PDF]
225
+ links = soup.find_all('a', string=re.compile(r'\[PDF\]', re.IGNORECASE))
226
+
227
+ if links:
228
+ pdf_url = links[0]['href']
 
 
 
 
 
 
229
  pdf_content = await self.fetch_pdf_content(session,pdf_url)
230
  if pdf_content:
231
  logger.debug(f"Found PDF from: {pdf_url}")
232
  return pdf_content
233
+
234
  except Exception as e:
235
+ logger.debug(f"Google Scholar error for {doi}: {e}")
236
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
237
  return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
238
 
239
  async def download_paper_crossref_async(self, session, doi):
240
  """Alternative search method using Crossref"""
 
258
  if pdf_url:
259
  pdf_content = await self.fetch_pdf_content(session, pdf_url)
260
  if pdf_content:
261
+ logger.debug(f"Found PDF from: {pdf_url}")
262
+ return pdf_content
263
  except Exception as e:
264
  logger.debug(f"Crossref error for {doi}: {e}")
265
  return None
 
294
  return None
295
 
296
  async def download_single_doi_async(self, doi, progress_callback):
297
+ """Downloads a single paper using a DOI and updates the UI callback with messages, can be a file or the string "could not download" and that's the message reported on the UI"""
298
+ if not doi:
299
+ return None, "Error: DOI not provided", "Error: DOI not provided"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
300
 
301
+ try:
302
+ pdf_content = await self.download_with_retry_async(doi)
 
 
303
 
304
+ if pdf_content:
305
+ if doi is None:
306
+ return None, "Error: DOI not provided", "Error: DOI not provided"
307
+ filename = f"{str(doi).replace('/', '_').replace('.', '_')}.pdf"
308
+ filepath = os.path.join(self.output_dir, filename)
309
+ loop = asyncio.get_running_loop()
310
+ await loop.run_in_executor(self.executor, lambda: open(filepath, 'wb').write(pdf_content))
311
+ logger.info(f"Successfully downloaded: {filename}")
312
+ progress_callback(f"Successfully downloaded: <a href='https://doi.org/{doi}'>{doi}</a>")
313
+
314
+ return filepath, f'<a href="https://doi.org/{doi}">{doi}</a>', "" #return with success status
315
+ else:
316
+ logger.warning(f"Could not download: {doi}")
317
+ progress_callback(f"Could not download: <a href='https://doi.org/{doi}'>{doi}</a>")
318
+ return "Could not download", f"Could not download {doi}", f'<a href="https://doi.org/{doi}">{doi}</a>' #return a "could not download" state
319
+ except Exception as e:
320
+ logger.error(f"Error processing {doi}: {e}")
321
+ progress_callback(f"Error processing {doi}: <a href='https://doi.org/{doi}'>{doi}</a> {e}")
322
+ return "Could not download", f"Error processing {doi}: {e}", f"Error processing {doi}: {e}"#return a "could not download" state
323
+
324
  async def download_multiple_dois_async(self, dois_text, progress_callback):
325
  """Downloads multiple papers from a list of DOIs and uses a callback for UI"""
326
  if not dois_text:
 
333
  downloaded_files = []
334
  failed_dois = []
335
  downloaded_links = []
336
+
337
  for i, doi in enumerate(dois):
338
+ result, success_message, fail_message = await self.download_single_doi_async(doi, progress_callback)
339
+ if result == "Could not download":
340
+ failed_dois.append(fail_message) #reports the error message
341
+
342
+ elif result: # if there was a downloaded pdf, a valid filepath
343
+ # Unique filename for zip
344
  filename = f"{str(doi).replace('/', '_').replace('.', '_')}_{i}.pdf"
345
  filepath_unique = os.path.join(self.output_dir, filename)
346
+ os.rename(result, filepath_unique)
347
  downloaded_files.append(filepath_unique)
348
+ downloaded_links.append(success_message)
349
+
 
 
350
  if downloaded_files:
351
  zip_filename = 'papers.zip'
352
  loop = asyncio.get_running_loop()
353
  await loop.run_in_executor(self.executor, lambda: self.create_zip(zip_filename,downloaded_files))
354
  logger.info(f"ZIP file created: {zip_filename}")
 
 
355
 
356
+ return zip_filename if downloaded_files else None, "\n".join(downloaded_links), "\n".join(failed_dois)
357
+
358
  def create_zip(self, zip_filename, downloaded_files):
359
+ with zipfile.ZipFile(zip_filename, 'w') as zipf:
360
+ for file_path in downloaded_files:
361
+ zipf.write(file_path, arcname=os.path.basename(file_path))
362
 
363
  async def process_bibtex_async(self, bib_file, progress_callback):
364
+ """Process BibTeX file and download papers with multiple strategies"""
365
  # Read BibTeX file content from the uploaded object
366
  try:
367
  with open(bib_file.name, 'r', encoding='utf-8') as f:
 
386
  failed_dois = []
387
  downloaded_links = []
388
 
389
+ for i, doi in enumerate(dois):
390
+ result, success_message, fail_message = await self.download_single_doi_async(doi, progress_callback)
391
+ if result == "Could not download":
392
+ failed_dois.append(fail_message) #report failure to download in UI
393
+ elif result: #if there is a filepath as result, means success
394
+ # Unique filename for zip
395
+ filename = f"{str(doi).replace('/', '_').replace('.', '_')}_{i}.pdf"
396
+ filepath_unique = os.path.join(self.output_dir, filename)
397
+ os.rename(result, filepath_unique)
398
+ downloaded_files.append(filepath_unique)
399
+ downloaded_links.append(success_message) # report to the list of sucessfully donwloaded links
400
+
 
 
401
  if downloaded_files:
402
  zip_filename = 'papers.zip'
403
  loop = asyncio.get_running_loop()
404
+ await loop.run_in_executor(self.executor, lambda: self.create_zip(zip_filename, downloaded_files))
405
  logger.info(f"ZIP file created: {zip_filename}")
406
 
407
  return zip_filename, "\n".join(downloaded_links), "\n".join(failed_dois)
 
415
  # Check file type
416
  if not bib_file.name.lower().endswith('.bib'):
417
  return None, "Error: Please upload a .bib file", "Error: Please upload a .bib file", None
418
+
 
419
  zip_path, downloaded_dois, failed_dois = await downloader.process_bibtex_async(bib_file, progress.update)
420
 
421
  return zip_path, downloaded_dois, failed_dois, None
 
428
  else:
429
  return None, "Please provide a .bib file, a single DOI, or a list of DOIs", "Please provide a .bib file, a single DOI, or a list of DOIs", None
430
 
 
431
  # Gradio Interface
432
  interface = gr.Interface(
433
  fn=download_papers,
 
484
  """,
485
  cache_examples=False,
486
  )
 
487
  # Add Javascript to update HTML
488
  interface.load = """
489
  function(downloaded_dois, failed_dois){
 
503
  """
504
  return interface
505
 
 
506
  def main():
507
  interface = create_gradio_interface()
508
  interface.launch(share=True)