C2MV commited on
Commit
bc25c79
·
verified ·
1 Parent(s): 4e2e145

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +149 -148
app.py CHANGED
@@ -141,100 +141,100 @@ class PaperDownloader:
141
  return None
142
 
143
  async def download_paper_scihub_async(self, session, doi):
144
- """Improved method to download paper from Sci-Hub using async requests"""
145
- if not doi:
146
- logger.warning("DOI not provided")
147
- return None
148
-
149
- for base_url in self.download_sources:
150
- try:
151
- scihub_url = f"{base_url}{self.clean_doi(doi)}"
152
- text, headers = await self.fetch_with_headers(session, scihub_url, timeout=15)
153
- if not text:
154
- continue
155
-
156
- # Search for multiple PDF URL patterns
157
- pdf_patterns = [
158
- r'(https?://[^\s<>"]+?\.pdf)',
159
- r'(https?://[^\s<>"]+?download/[^\s<>"]+)',
160
- r'(https?://[^\s<>"]+?\/pdf\/[^\s<>"]+)',
161
- ]
162
-
163
- pdf_urls = []
164
- for pattern in pdf_patterns:
165
- pdf_urls.extend(re.findall(pattern, text))
166
-
167
- # Try downloading from found URLs, but iterate over ALL
168
- for pdf_url in pdf_urls:
169
- pdf_content = await self.fetch_pdf_content(session,pdf_url)
170
- if pdf_content:
171
- logger.debug(f"Found PDF from: {pdf_url}")
172
- return pdf_content
173
-
174
- except Exception as e:
175
- logger.debug(f"Error trying to download {doi} from {base_url}: {e}")
176
-
177
  return None
178
 
179
- async def download_paper_libgen_async(self, session, doi):
180
- """Download from Libgen, handles the query and the redirection"""
181
- if not doi:
182
- return None
183
-
184
- base_url = 'https://libgen.rs/scimag/'
185
  try:
186
- search_url = f"{base_url}?q={self.clean_doi(doi)}"
187
- text, headers = await self.fetch_with_headers(session, search_url, timeout=10)
188
-
189
- if not text or "No results" in text:
190
- logger.debug(f"No results for DOI: {doi} on libgen")
191
- return None
192
-
193
- soup = BeautifulSoup(text, 'html.parser')
194
-
195
- links = soup.select('table.c > tbody > tr:nth-child(2) > td:nth-child(1) > a')
196
-
197
- if links:
198
- link = links[0]
199
- pdf_url = link['href']
200
- pdf_content = await self.fetch_pdf_content(session, pdf_url)
201
- if pdf_content:
202
- logger.debug(f"Found PDF from: {pdf_url}")
203
- return pdf_content
204
- except Exception as e:
205
- logger.debug(f"Error trying to download {doi} from libgen: {e}")
206
- return None
207
-
208
- async def download_paper_google_scholar_async(self, session, doi):
209
- """Search google scholar to find an article with the given doi, try to get the pdf"""
210
- if not doi:
211
- return None
212
-
213
- try:
214
- query = f'doi:"{doi}"'
215
- params = {'q': query}
216
- url = f'https://scholar.google.com/scholar?{urlencode(params)}'
217
-
218
- text, headers = await self.fetch_with_headers(session, url, timeout=10)
219
  if not text:
220
- return None
221
-
222
- soup = BeautifulSoup(text, 'html.parser')
223
-
224
- # Find any links with [PDF]
225
- links = soup.find_all('a', string=re.compile(r'\[PDF\]', re.IGNORECASE))
226
-
227
- if links:
228
- pdf_url = links[0]['href']
 
 
 
 
 
 
229
  pdf_content = await self.fetch_pdf_content(session,pdf_url)
230
  if pdf_content:
231
  logger.debug(f"Found PDF from: {pdf_url}")
232
  return pdf_content
233
-
234
  except Exception as e:
235
- logger.debug(f"Google Scholar error for {doi}: {e}")
236
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
237
  return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
238
 
239
  async def download_paper_crossref_async(self, session, doi):
240
  """Alternative search method using Crossref"""
@@ -258,12 +258,12 @@ class PaperDownloader:
258
  if pdf_url:
259
  pdf_content = await self.fetch_pdf_content(session, pdf_url)
260
  if pdf_content:
261
- logger.debug(f"Found PDF from: {pdf_url}")
262
- return pdf_content
263
  except Exception as e:
264
  logger.debug(f"Crossref error for {doi}: {e}")
265
  return None
266
-
267
  async def download_with_retry_async(self, doi, max_retries=3, initial_delay=2):
268
  """Downloads a paper using multiple strategies with exponential backoff and async requests"""
269
  pdf_content = None
@@ -294,35 +294,36 @@ class PaperDownloader:
294
  return None
295
 
296
  async def download_single_doi_async(self, doi, progress_callback):
297
- """Downloads a single paper using a DOI and updates the UI callback with messages, can be a file or the string "could not download" and that's the message reported on the UI"""
298
  if not doi:
299
- return None, "Error: DOI not provided", "Error: DOI not provided"
300
-
301
  try:
302
- pdf_content = await self.download_with_retry_async(doi)
303
-
304
- if pdf_content:
305
  if doi is None:
306
- return None, "Error: DOI not provided", "Error: DOI not provided"
307
  filename = f"{str(doi).replace('/', '_').replace('.', '_')}.pdf"
308
  filepath = os.path.join(self.output_dir, filename)
309
  loop = asyncio.get_running_loop()
310
  await loop.run_in_executor(self.executor, lambda: open(filepath, 'wb').write(pdf_content))
 
311
  logger.info(f"Successfully downloaded: {filename}")
312
  progress_callback(f"Successfully downloaded: <a href='https://doi.org/{doi}'>{doi}</a>")
313
-
314
- return filepath, f'<a href="https://doi.org/{doi}">{doi}</a>', "" #return with success status
315
- else:
316
  logger.warning(f"Could not download: {doi}")
317
  progress_callback(f"Could not download: <a href='https://doi.org/{doi}'>{doi}</a>")
318
- return "Could not download", f"Could not download {doi}", f'<a href="https://doi.org/{doi}">{doi}</a>' #return a "could not download" state
 
319
  except Exception as e:
320
- logger.error(f"Error processing {doi}: {e}")
321
- progress_callback(f"Error processing {doi}: <a href='https://doi.org/{doi}'>{doi}</a> {e}")
322
- return "Could not download", f"Error processing {doi}: {e}", f"Error processing {doi}: {e}"#return a "could not download" state
323
-
324
  async def download_multiple_dois_async(self, dois_text, progress_callback):
325
- """Downloads multiple papers from a list of DOIs and uses a callback for UI"""
326
  if not dois_text:
327
  return None, "Error: No DOIs provided", "Error: No DOIs provided"
328
 
@@ -333,35 +334,34 @@ class PaperDownloader:
333
  downloaded_files = []
334
  failed_dois = []
335
  downloaded_links = []
336
-
337
  for i, doi in enumerate(dois):
338
- result, success_message, fail_message = await self.download_single_doi_async(doi, progress_callback)
339
- if result == "Could not download":
340
- failed_dois.append(fail_message) #reports the error message
341
-
342
- elif result: # if there was a downloaded pdf, a valid filepath
343
- # Unique filename for zip
344
- filename = f"{str(doi).replace('/', '_').replace('.', '_')}_{i}.pdf"
345
- filepath_unique = os.path.join(self.output_dir, filename)
346
- os.rename(result, filepath_unique)
347
- downloaded_files.append(filepath_unique)
348
- downloaded_links.append(success_message)
349
-
350
  if downloaded_files:
351
- zip_filename = 'papers.zip'
352
- loop = asyncio.get_running_loop()
353
- await loop.run_in_executor(self.executor, lambda: self.create_zip(zip_filename,downloaded_files))
354
- logger.info(f"ZIP file created: {zip_filename}")
355
-
356
  return zip_filename if downloaded_files else None, "\n".join(downloaded_links), "\n".join(failed_dois)
357
-
358
- def create_zip(self, zip_filename, downloaded_files):
359
- with zipfile.ZipFile(zip_filename, 'w') as zipf:
360
- for file_path in downloaded_files:
361
- zipf.write(file_path, arcname=os.path.basename(file_path))
362
 
 
 
 
 
 
363
  async def process_bibtex_async(self, bib_file, progress_callback):
364
- """Process BibTeX file and download papers with multiple strategies"""
365
  # Read BibTeX file content from the uploaded object
366
  try:
367
  with open(bib_file.name, 'r', encoding='utf-8') as f:
@@ -385,25 +385,25 @@ class PaperDownloader:
385
  downloaded_files = []
386
  failed_dois = []
387
  downloaded_links = []
388
-
389
  for i, doi in enumerate(dois):
390
- result, success_message, fail_message = await self.download_single_doi_async(doi, progress_callback)
391
- if result == "Could not download":
392
- failed_dois.append(fail_message) #report failure to download in UI
393
- elif result: #if there is a filepath as result, means success
394
- # Unique filename for zip
395
- filename = f"{str(doi).replace('/', '_').replace('.', '_')}_{i}.pdf"
396
- filepath_unique = os.path.join(self.output_dir, filename)
397
- os.rename(result, filepath_unique)
398
- downloaded_files.append(filepath_unique)
399
- downloaded_links.append(success_message) # report to the list of sucessfully donwloaded links
400
-
401
  if downloaded_files:
402
- zip_filename = 'papers.zip'
403
- loop = asyncio.get_running_loop()
404
- await loop.run_in_executor(self.executor, lambda: self.create_zip(zip_filename, downloaded_files))
405
- logger.info(f"ZIP file created: {zip_filename}")
406
-
407
  return zip_filename, "\n".join(downloaded_links), "\n".join(failed_dois)
408
 
409
  def create_gradio_interface():
@@ -417,16 +417,16 @@ def create_gradio_interface():
417
  return None, "Error: Please upload a .bib file", "Error: Please upload a .bib file", None
418
 
419
  zip_path, downloaded_dois, failed_dois = await downloader.process_bibtex_async(bib_file, progress.update)
420
-
421
  return zip_path, downloaded_dois, failed_dois, None
422
  elif doi_input:
423
  filepath, message, failed_doi = await downloader.download_single_doi_async(doi_input,progress.update)
424
  return None, message, failed_doi, filepath
425
  elif dois_input:
426
- zip_path, downloaded_dois, failed_dois = await downloader.download_multiple_dois_async(dois_input, progress.update)
427
- return zip_path, downloaded_dois, failed_dois, None
428
  else:
429
- return None, "Please provide a .bib file, a single DOI, or a list of DOIs", "Please provide a .bib file, a single DOI, or a list of DOIs", None
 
430
 
431
  # Gradio Interface
432
  interface = gr.Interface(
@@ -435,7 +435,6 @@ def create_gradio_interface():
435
  gr.File(file_types=['.bib'], label="Upload BibTeX File"),
436
  gr.Textbox(label="Enter Single DOI", placeholder="10.xxxx/xxxx"),
437
  gr.Textbox(label="Enter Multiple DOIs (one per line)", placeholder="10.xxxx/xxxx\n10.yyyy/yyyy\n...")
438
-
439
  ],
440
  outputs=[
441
  gr.File(label="Download Papers (ZIP) or Single PDF"),
@@ -484,6 +483,7 @@ def create_gradio_interface():
484
  """,
485
  cache_examples=False,
486
  )
 
487
  # Add Javascript to update HTML
488
  interface.load = """
489
  function(downloaded_dois, failed_dois){
@@ -503,6 +503,7 @@ def create_gradio_interface():
503
  """
504
  return interface
505
 
 
506
  def main():
507
  interface = create_gradio_interface()
508
  interface.launch(share=True)
 
141
  return None
142
 
143
  async def download_paper_scihub_async(self, session, doi):
144
+ """Improved method to download paper from Sci-Hub using async requests"""
145
+ if not doi:
146
+ logger.warning("DOI not provided")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
  return None
148
 
149
+ for base_url in self.download_sources:
 
 
 
 
 
150
  try:
151
+ scihub_url = f"{base_url}{self.clean_doi(doi)}"
152
+ text, headers = await self.fetch_with_headers(session, scihub_url, timeout=15)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153
  if not text:
154
+ continue
155
+
156
+ # Search for multiple PDF URL patterns
157
+ pdf_patterns = [
158
+ r'(https?://[^\s<>"]+?\.pdf)',
159
+ r'(https?://[^\s<>"]+?download/[^\s<>"]+)',
160
+ r'(https?://[^\s<>"]+?\/pdf\/[^\s<>"]+)',
161
+ ]
162
+
163
+ pdf_urls = []
164
+ for pattern in pdf_patterns:
165
+ pdf_urls.extend(re.findall(pattern, text))
166
+
167
+ # Try downloading from found URLs, but iterate over ALL
168
+ for pdf_url in pdf_urls:
169
  pdf_content = await self.fetch_pdf_content(session,pdf_url)
170
  if pdf_content:
171
  logger.debug(f"Found PDF from: {pdf_url}")
172
  return pdf_content
173
+
174
  except Exception as e:
175
+ logger.debug(f"Error trying to download {doi} from {base_url}: {e}")
176
 
177
+ return None
178
+
179
+ async def download_paper_libgen_async(self, session, doi):
180
+ """Download from Libgen, handles the query and the redirection"""
181
+ if not doi:
182
+ return None
183
+
184
+ base_url = 'https://libgen.rs/scimag/'
185
+ try:
186
+ search_url = f"{base_url}?q={self.clean_doi(doi)}"
187
+ text, headers = await self.fetch_with_headers(session, search_url, timeout=10)
188
+
189
+ if not text or "No results" in text:
190
+ logger.debug(f"No results for DOI: {doi} on libgen")
191
+ return None
192
+
193
+ soup = BeautifulSoup(text, 'html.parser')
194
+
195
+ links = soup.select('table.c > tbody > tr:nth-child(2) > td:nth-child(1) > a')
196
+
197
+ if links:
198
+ link = links[0]
199
+ pdf_url = link['href']
200
+ pdf_content = await self.fetch_pdf_content(session, pdf_url)
201
+ if pdf_content:
202
+ logger.debug(f"Found PDF from: {pdf_url}")
203
+ return pdf_content
204
+ except Exception as e:
205
+ logger.debug(f"Error trying to download {doi} from libgen: {e}")
206
+ return None
207
+
208
+ async def download_paper_google_scholar_async(self, session, doi):
209
+ """Search google scholar to find an article with the given doi, try to get the pdf"""
210
+ if not doi:
211
  return None
212
+
213
+ try:
214
+ query = f'doi:"{doi}"'
215
+ params = {'q': query}
216
+ url = f'https://scholar.google.com/scholar?{urlencode(params)}'
217
+
218
+ text, headers = await self.fetch_with_headers(session, url, timeout=10)
219
+ if not text:
220
+ return None
221
+
222
+ soup = BeautifulSoup(text, 'html.parser')
223
+
224
+ # Find any links with [PDF]
225
+ links = soup.find_all('a', string=re.compile(r'\[PDF\]', re.IGNORECASE))
226
+
227
+ if links:
228
+ pdf_url = links[0]['href']
229
+ pdf_content = await self.fetch_pdf_content(session,pdf_url)
230
+ if pdf_content:
231
+ logger.debug(f"Found PDF from: {pdf_url}")
232
+ return pdf_content
233
+
234
+ except Exception as e:
235
+ logger.debug(f"Google Scholar error for {doi}: {e}")
236
+
237
+ return None
238
 
239
  async def download_paper_crossref_async(self, session, doi):
240
  """Alternative search method using Crossref"""
 
258
  if pdf_url:
259
  pdf_content = await self.fetch_pdf_content(session, pdf_url)
260
  if pdf_content:
261
+ logger.debug(f"Found PDF from: {pdf_url}")
262
+ return pdf_content
263
  except Exception as e:
264
  logger.debug(f"Crossref error for {doi}: {e}")
265
  return None
266
+
267
  async def download_with_retry_async(self, doi, max_retries=3, initial_delay=2):
268
  """Downloads a paper using multiple strategies with exponential backoff and async requests"""
269
  pdf_content = None
 
294
  return None
295
 
296
  async def download_single_doi_async(self, doi, progress_callback):
297
+ """Downloads a single paper using a DOI"""
298
  if not doi:
299
+ return None, "Error: DOI not provided", "Error: DOI not provided"
300
+
301
  try:
302
+ pdf_content = await self.download_with_retry_async(doi)
303
+
304
+ if pdf_content:
305
  if doi is None:
306
+ return None, "Error: DOI not provided", "Error: DOI not provided"
307
  filename = f"{str(doi).replace('/', '_').replace('.', '_')}.pdf"
308
  filepath = os.path.join(self.output_dir, filename)
309
  loop = asyncio.get_running_loop()
310
  await loop.run_in_executor(self.executor, lambda: open(filepath, 'wb').write(pdf_content))
311
+
312
  logger.info(f"Successfully downloaded: {filename}")
313
  progress_callback(f"Successfully downloaded: <a href='https://doi.org/{doi}'>{doi}</a>")
314
+ return filepath, f'<a href="https://doi.org/{doi}">{doi}</a>', ""
315
+ else:
 
316
  logger.warning(f"Could not download: {doi}")
317
  progress_callback(f"Could not download: <a href='https://doi.org/{doi}'>{doi}</a>")
318
+ return None, f"Could not download {doi}", f'<a href="https://doi.org/{doi}">{doi}</a>'
319
+
320
  except Exception as e:
321
+ logger.error(f"Error processing {doi}: {e}")
322
+ progress_callback(f"Error processing {doi}: <a href='https://doi.org/{doi}'>{doi}</a> {e}")
323
+ return None, f"Error processing {doi}: {e}", f"Error processing {doi}: {e}"
324
+
325
  async def download_multiple_dois_async(self, dois_text, progress_callback):
326
+ """Downloads multiple papers from a list of DOIs and updates the UI using the progress_callback"""
327
  if not dois_text:
328
  return None, "Error: No DOIs provided", "Error: No DOIs provided"
329
 
 
334
  downloaded_files = []
335
  failed_dois = []
336
  downloaded_links = []
337
+
338
  for i, doi in enumerate(dois):
339
+ filepath, success_message, fail_message = await self.download_single_doi_async(doi, progress_callback)
340
+ if filepath:
341
+ # Unique filename for zip
342
+ filename = f"{str(doi).replace('/', '_').replace('.', '_')}_{i}.pdf"
343
+ filepath_unique = os.path.join(self.output_dir, filename)
344
+ os.rename(filepath, filepath_unique)
345
+ downloaded_files.append(filepath_unique)
346
+ downloaded_links.append(f'<a href="https://doi.org/{doi}">{doi}</a>')
347
+ else:
348
+ failed_dois.append(f'<a href="https://doi.org/{doi}">{doi}</a>')
349
+
 
350
  if downloaded_files:
351
+ zip_filename = 'papers.zip'
352
+ loop = asyncio.get_running_loop()
353
+ await loop.run_in_executor(self.executor, lambda: self.create_zip(zip_filename,downloaded_files) )
354
+ logger.info(f"ZIP file created: {zip_filename}")
355
+
356
  return zip_filename if downloaded_files else None, "\n".join(downloaded_links), "\n".join(failed_dois)
 
 
 
 
 
357
 
358
+ def create_zip(self, zip_filename, downloaded_files):
359
+ with zipfile.ZipFile(zip_filename, 'w') as zipf:
360
+ for file_path in downloaded_files:
361
+ zipf.write(file_path, arcname=os.path.basename(file_path))
362
+
363
  async def process_bibtex_async(self, bib_file, progress_callback):
364
+ """Process BibTeX file and download papers with multiple strategies and reports UI updates using a callback"""
365
  # Read BibTeX file content from the uploaded object
366
  try:
367
  with open(bib_file.name, 'r', encoding='utf-8') as f:
 
385
  downloaded_files = []
386
  failed_dois = []
387
  downloaded_links = []
388
+
389
  for i, doi in enumerate(dois):
390
+ filepath, success_message, fail_message = await self.download_single_doi_async(doi, progress_callback)
391
+ if filepath:
392
+ # Unique filename for zip
393
+ filename = f"{str(doi).replace('/', '_').replace('.', '_')}_{i}.pdf"
394
+ filepath_unique = os.path.join(self.output_dir, filename)
395
+ os.rename(filepath, filepath_unique)
396
+ downloaded_files.append(filepath_unique)
397
+ downloaded_links.append(f'<a href="https://doi.org/{doi}">{doi}</a>')
398
+ else:
399
+ failed_dois.append(f'<a href="https://doi.org/{doi}">{doi}</a>')
400
+
401
  if downloaded_files:
402
+ zip_filename = 'papers.zip'
403
+ loop = asyncio.get_running_loop()
404
+ await loop.run_in_executor(self.executor, lambda: self.create_zip(zip_filename,downloaded_files))
405
+ logger.info(f"ZIP file created: {zip_filename}")
406
+
407
  return zip_filename, "\n".join(downloaded_links), "\n".join(failed_dois)
408
 
409
  def create_gradio_interface():
 
417
  return None, "Error: Please upload a .bib file", "Error: Please upload a .bib file", None
418
 
419
  zip_path, downloaded_dois, failed_dois = await downloader.process_bibtex_async(bib_file, progress.update)
 
420
  return zip_path, downloaded_dois, failed_dois, None
421
  elif doi_input:
422
  filepath, message, failed_doi = await downloader.download_single_doi_async(doi_input,progress.update)
423
  return None, message, failed_doi, filepath
424
  elif dois_input:
425
+ zip_path, downloaded_dois, failed_dois = await downloader.download_multiple_dois_async(dois_input, progress.update)
426
+ return zip_path, downloaded_dois, failed_dois, None
427
  else:
428
+ return None, "Please provide a .bib file, a single DOI, or a list of DOIs", "Please provide a .bib file, a single DOI, or a list of DOIs", None
429
+
430
 
431
  # Gradio Interface
432
  interface = gr.Interface(
 
435
  gr.File(file_types=['.bib'], label="Upload BibTeX File"),
436
  gr.Textbox(label="Enter Single DOI", placeholder="10.xxxx/xxxx"),
437
  gr.Textbox(label="Enter Multiple DOIs (one per line)", placeholder="10.xxxx/xxxx\n10.yyyy/yyyy\n...")
 
438
  ],
439
  outputs=[
440
  gr.File(label="Download Papers (ZIP) or Single PDF"),
 
483
  """,
484
  cache_examples=False,
485
  )
486
+
487
  # Add Javascript to update HTML
488
  interface.load = """
489
  function(downloaded_dois, failed_dois){
 
503
  """
504
  return interface
505
 
506
+
507
  def main():
508
  interface = create_gradio_interface()
509
  interface.launch(share=True)