Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -228,10 +228,10 @@ class PaperDownloader:
|
|
| 228 |
with open(filepath, 'wb') as f:
|
| 229 |
f.write(pdf_content)
|
| 230 |
logger.info(f"Successfully downloaded: {filename}")
|
| 231 |
-
return filepath,
|
| 232 |
else:
|
| 233 |
logger.warning(f"Could not download: {doi}")
|
| 234 |
-
return None, f"Could not download {doi}", f"
|
| 235 |
|
| 236 |
except Exception as e:
|
| 237 |
logger.error(f"Error processing {doi}: {e}")
|
|
@@ -248,6 +248,7 @@ class PaperDownloader:
|
|
| 248 |
|
| 249 |
downloaded_files = []
|
| 250 |
failed_dois = []
|
|
|
|
| 251 |
for i, doi in enumerate(tqdm(dois, desc="Downloading papers")):
|
| 252 |
filepath, success_message, fail_message = self.download_single_doi(doi)
|
| 253 |
if filepath:
|
|
@@ -256,9 +257,10 @@ class PaperDownloader:
|
|
| 256 |
filepath_unique = os.path.join(self.output_dir, filename)
|
| 257 |
os.rename(filepath,filepath_unique)
|
| 258 |
downloaded_files.append(filepath_unique)
|
| 259 |
-
|
| 260 |
-
failed_dois.append(doi)
|
| 261 |
|
|
|
|
|
|
|
| 262 |
|
| 263 |
if downloaded_files:
|
| 264 |
zip_filename = 'papers.zip'
|
|
@@ -267,8 +269,7 @@ class PaperDownloader:
|
|
| 267 |
zipf.write(file_path, arcname=os.path.basename(file_path))
|
| 268 |
logger.info(f"ZIP file created: {zip_filename}")
|
| 269 |
|
| 270 |
-
return zip_filename if downloaded_files else None, "\n".join(
|
| 271 |
-
|
| 272 |
|
| 273 |
|
| 274 |
def process_bibtex(self, bib_file):
|
|
@@ -279,14 +280,14 @@ class PaperDownloader:
|
|
| 279 |
bib_content = f.read()
|
| 280 |
except Exception as e:
|
| 281 |
logger.error(f"Error reading uploaded file {bib_file.name}: {e}")
|
| 282 |
-
return None, f"Error reading uploaded file {bib_file.name}: {e}", f"Error reading uploaded file {bib_file.name}: {e}"
|
| 283 |
|
| 284 |
# Parse BibTeX data
|
| 285 |
try:
|
| 286 |
bib_database = bibtexparser.loads(bib_content)
|
| 287 |
except Exception as e:
|
| 288 |
logger.error(f"Error parsing BibTeX data: {e}")
|
| 289 |
-
return None, f"Error parsing BibTeX data: {e}", f"Error parsing BibTeX data: {e}"
|
| 290 |
|
| 291 |
# Extract DOIs
|
| 292 |
dois = [entry.get('doi') for entry in bib_database.entries if entry.get('doi')]
|
|
@@ -295,6 +296,7 @@ class PaperDownloader:
|
|
| 295 |
# Result lists
|
| 296 |
downloaded_files = []
|
| 297 |
failed_dois = []
|
|
|
|
| 298 |
|
| 299 |
# Download PDFs
|
| 300 |
for doi in tqdm(dois, desc="Downloading papers"):
|
|
@@ -313,13 +315,13 @@ class PaperDownloader:
|
|
| 313 |
f.write(pdf_content)
|
| 314 |
|
| 315 |
downloaded_files.append(filepath)
|
|
|
|
| 316 |
logger.info(f"Successfully downloaded: {filename}")
|
| 317 |
else:
|
| 318 |
-
failed_dois.append(doi)
|
| 319 |
-
logger.warning(f"Could not download: {doi}")
|
| 320 |
|
| 321 |
except Exception as e:
|
| 322 |
-
failed_dois.append(doi)
|
| 323 |
logger.error(f"Error processing {doi}: {e}")
|
| 324 |
|
| 325 |
# Create ZIP of downloaded papers
|
|
@@ -330,8 +332,7 @@ class PaperDownloader:
|
|
| 330 |
zipf.write(file_path, arcname=os.path.basename(file_path))
|
| 331 |
logger.info(f"ZIP file created: {zip_filename}")
|
| 332 |
|
| 333 |
-
return zip_filename, "\n".join(
|
| 334 |
-
|
| 335 |
|
| 336 |
def create_gradio_interface():
|
| 337 |
"""Create Gradio interface for Paper Downloader"""
|
|
@@ -365,8 +366,25 @@ def create_gradio_interface():
|
|
| 365 |
],
|
| 366 |
outputs=[
|
| 367 |
gr.File(label="Download Papers (ZIP) or Single PDF"),
|
| 368 |
-
|
| 369 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 370 |
gr.File(label="Downloaded Single PDF")
|
| 371 |
],
|
| 372 |
title="🔬 Academic Paper Batch Downloader",
|
|
@@ -390,10 +408,33 @@ def create_gradio_interface():
|
|
| 390 |
border-radius: 10px;
|
| 391 |
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
|
| 392 |
}
|
|
|
|
|
|
|
|
|
|
| 393 |
""",
|
| 394 |
-
cache_examples = False
|
| 395 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 396 |
|
|
|
|
|
|
|
| 397 |
return interface
|
| 398 |
|
| 399 |
def main():
|
|
|
|
| 228 |
with open(filepath, 'wb') as f:
|
| 229 |
f.write(pdf_content)
|
| 230 |
logger.info(f"Successfully downloaded: {filename}")
|
| 231 |
+
return filepath, f'<a href="https://doi.org/{doi}">{doi}</a>', ""
|
| 232 |
else:
|
| 233 |
logger.warning(f"Could not download: {doi}")
|
| 234 |
+
return None, f"Could not download {doi}", f'<a href="https://doi.org/{doi}">{doi}</a>'
|
| 235 |
|
| 236 |
except Exception as e:
|
| 237 |
logger.error(f"Error processing {doi}: {e}")
|
|
|
|
| 248 |
|
| 249 |
downloaded_files = []
|
| 250 |
failed_dois = []
|
| 251 |
+
downloaded_links = []
|
| 252 |
for i, doi in enumerate(tqdm(dois, desc="Downloading papers")):
|
| 253 |
filepath, success_message, fail_message = self.download_single_doi(doi)
|
| 254 |
if filepath:
|
|
|
|
| 257 |
filepath_unique = os.path.join(self.output_dir, filename)
|
| 258 |
os.rename(filepath,filepath_unique)
|
| 259 |
downloaded_files.append(filepath_unique)
|
| 260 |
+
downloaded_links.append(f'<a href="https://doi.org/{doi}">{doi}</a>')
|
|
|
|
| 261 |
|
| 262 |
+
else:
|
| 263 |
+
failed_dois.append(f'<a href="https://doi.org/{doi}">{doi}</a>')
|
| 264 |
|
| 265 |
if downloaded_files:
|
| 266 |
zip_filename = 'papers.zip'
|
|
|
|
| 269 |
zipf.write(file_path, arcname=os.path.basename(file_path))
|
| 270 |
logger.info(f"ZIP file created: {zip_filename}")
|
| 271 |
|
| 272 |
+
return zip_filename if downloaded_files else None, "\n".join(downloaded_links), "\n".join(failed_dois)
|
|
|
|
| 273 |
|
| 274 |
|
| 275 |
def process_bibtex(self, bib_file):
|
|
|
|
| 280 |
bib_content = f.read()
|
| 281 |
except Exception as e:
|
| 282 |
logger.error(f"Error reading uploaded file {bib_file.name}: {e}")
|
| 283 |
+
return None, f"Error reading uploaded file {bib_file.name}: {e}", f"Error reading uploaded file {bib_file.name}: {e}", None
|
| 284 |
|
| 285 |
# Parse BibTeX data
|
| 286 |
try:
|
| 287 |
bib_database = bibtexparser.loads(bib_content)
|
| 288 |
except Exception as e:
|
| 289 |
logger.error(f"Error parsing BibTeX data: {e}")
|
| 290 |
+
return None, f"Error parsing BibTeX data: {e}", f"Error parsing BibTeX data: {e}", None
|
| 291 |
|
| 292 |
# Extract DOIs
|
| 293 |
dois = [entry.get('doi') for entry in bib_database.entries if entry.get('doi')]
|
|
|
|
| 296 |
# Result lists
|
| 297 |
downloaded_files = []
|
| 298 |
failed_dois = []
|
| 299 |
+
downloaded_links = []
|
| 300 |
|
| 301 |
# Download PDFs
|
| 302 |
for doi in tqdm(dois, desc="Downloading papers"):
|
|
|
|
| 315 |
f.write(pdf_content)
|
| 316 |
|
| 317 |
downloaded_files.append(filepath)
|
| 318 |
+
downloaded_links.append(f'<a href="https://doi.org/{doi}">{doi}</a>')
|
| 319 |
logger.info(f"Successfully downloaded: {filename}")
|
| 320 |
else:
|
| 321 |
+
failed_dois.append(f'<a href="https://doi.org/{doi}">{doi}</a>')
|
|
|
|
| 322 |
|
| 323 |
except Exception as e:
|
| 324 |
+
failed_dois.append(f'<a href="https://doi.org/{doi}">{doi}</a>')
|
| 325 |
logger.error(f"Error processing {doi}: {e}")
|
| 326 |
|
| 327 |
# Create ZIP of downloaded papers
|
|
|
|
| 332 |
zipf.write(file_path, arcname=os.path.basename(file_path))
|
| 333 |
logger.info(f"ZIP file created: {zip_filename}")
|
| 334 |
|
| 335 |
+
return zip_filename, "\n".join(downloaded_links), "\n".join(failed_dois), None
|
|
|
|
| 336 |
|
| 337 |
def create_gradio_interface():
|
| 338 |
"""Create Gradio interface for Paper Downloader"""
|
|
|
|
| 366 |
],
|
| 367 |
outputs=[
|
| 368 |
gr.File(label="Download Papers (ZIP) or Single PDF"),
|
| 369 |
+
gr.HTML(label="""
|
| 370 |
+
<div style='padding-bottom: 5px; font-weight: bold;'>
|
| 371 |
+
Enter Single DOI
|
| 372 |
+
</div>
|
| 373 |
+
<div style='border: 1px solid #ddd; padding: 5px; border-radius: 5px;'>
|
| 374 |
+
<div style='padding-bottom: 5px; font-weight: bold;'>
|
| 375 |
+
Downloaded DOIs
|
| 376 |
+
</div>
|
| 377 |
+
<div id="downloaded-dois"></div>
|
| 378 |
+
</div>
|
| 379 |
+
"""),
|
| 380 |
+
gr.HTML(label="""
|
| 381 |
+
<div style='border: 1px solid #ddd; padding: 5px; border-radius: 5px;'>
|
| 382 |
+
<div style='padding-bottom: 5px; font-weight: bold;'>
|
| 383 |
+
Failed DOIs
|
| 384 |
+
</div>
|
| 385 |
+
<div id="failed-dois"></div>
|
| 386 |
+
</div>
|
| 387 |
+
"""),
|
| 388 |
gr.File(label="Downloaded Single PDF")
|
| 389 |
],
|
| 390 |
title="🔬 Academic Paper Batch Downloader",
|
|
|
|
| 408 |
border-radius: 10px;
|
| 409 |
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
|
| 410 |
}
|
| 411 |
+
.output-text a {
|
| 412 |
+
color: #007bff; /* Blue color for hyperlinks */
|
| 413 |
+
}
|
| 414 |
""",
|
| 415 |
+
cache_examples = False,
|
| 416 |
)
|
| 417 |
+
|
| 418 |
+
# Add Javascript to update HTML
|
| 419 |
+
interface.load = """
|
| 420 |
+
function(downloaded_dois, failed_dois){
|
| 421 |
+
let downloaded_html = '<ul>';
|
| 422 |
+
downloaded_dois.split('\\n').filter(Boolean).forEach(doi => {
|
| 423 |
+
downloaded_html += '<li>' + doi + '</li>';
|
| 424 |
+
});
|
| 425 |
+
downloaded_html += '</ul>';
|
| 426 |
+
document.querySelector("#downloaded-dois").innerHTML = downloaded_html;
|
| 427 |
+
|
| 428 |
+
let failed_html = '<ul>';
|
| 429 |
+
failed_dois.split('\\n').filter(Boolean).forEach(doi => {
|
| 430 |
+
failed_html += '<li>' + doi + '</li>';
|
| 431 |
+
});
|
| 432 |
+
failed_html += '</ul>';
|
| 433 |
+
document.querySelector("#failed-dois").innerHTML = failed_html;
|
| 434 |
+
return [downloaded_html, failed_html];
|
| 435 |
|
| 436 |
+
}
|
| 437 |
+
"""
|
| 438 |
return interface
|
| 439 |
|
| 440 |
def main():
|