polymer_semantic_pdfs / 1_hf_up_and_download.py
liuganghuggingface's picture
Upload 1_hf_up_and_download.py with huggingface_hub
b1e0073 verified
import argparse
import os
from huggingface_hub import upload_file, hf_hub_download, create_repo
import time
import math
from pathlib import Path
import subprocess
def split_large_file(file_path, chunk_size_mb=1000):
"""Split a large file into smaller chunks."""
file_path = Path(file_path)
file_size = os.path.getsize(file_path) / (1024 * 1024) # Size in MB
if file_size <= chunk_size_mb:
print(f"File {file_path.name} is {file_size:.2f}MB, no need to split.")
return [file_path]
# Create a directory for chunks if it doesn't exist
chunks_dir = file_path.parent / f"{file_path.stem}_chunks"
os.makedirs(chunks_dir, exist_ok=True)
# Calculate number of chunks needed
num_chunks = math.ceil(file_size / chunk_size_mb)
print(f"Splitting {file_path.name} ({file_size:.2f}MB) into {num_chunks} chunks...")
# Use split command for efficient splitting
chunk_prefix = chunks_dir / file_path.stem
subprocess.run([
"split",
"-b", f"{chunk_size_mb}m",
str(file_path),
f"{chunk_prefix}_part_"
])
# Get all chunk files
chunk_files = sorted(chunks_dir.glob(f"{file_path.stem}_part_*"))
print(f"Created {len(chunk_files)} chunk files in {chunks_dir}")
return chunk_files
def upload_files(api_token, repo_id):
# Create the repository first if it doesn't exist
try:
create_repo(
repo_id=repo_id,
token=api_token,
repo_type="dataset",
private=False # Set to False for a public dataset
)
print(f"Created repository: {repo_id}")
except Exception as e:
print(f"Repository already exists or error occurred: {e}")
# Add a delay to ensure repository creation is complete
time.sleep(5)
# Upload the script itself
try:
script_path = "1_hf_up_and_download.py"
print(f"Uploading script: {script_path}")
upload_file(
repo_id=repo_id,
path_or_fileobj=script_path,
path_in_repo=script_path,
token=api_token,
repo_type="dataset",
)
print(f"Uploaded {script_path} to {repo_id}/{script_path}")
except Exception as e:
print(f"Upload failed for script: {e}")
# Split the large file into chunks if needed
local_file = "pdfs.tar.gz"
chunk_files = split_large_file(local_file)
# Upload each chunk
for i, chunk_file in enumerate(chunk_files):
try:
repo_file = chunk_file.name
print(f"Uploading chunk {i+1}/{len(chunk_files)}: {repo_file}")
upload_file(
repo_id=repo_id,
path_or_fileobj=str(chunk_file),
path_in_repo=repo_file,
token=api_token,
repo_type="dataset",
)
print(f"Uploaded {chunk_file} to {repo_id}/{repo_file}")
except Exception as e:
print(f"Upload failed for {chunk_file}: {e}")
def download_files(api_token, repo_id):
# Check if we have split files
try:
# List files in the repository
from huggingface_hub import list_repo_files
files = list_repo_files(repo_id=repo_id, repo_type="dataset", token=api_token)
# Filter for our chunk files
chunk_files = [f for f in files if f.startswith("pdfs_part_") or "chunks" in f]
if chunk_files:
print(f"Found {len(chunk_files)} chunk files. Downloading...")
os.makedirs("chunks", exist_ok=True)
for file in chunk_files:
downloaded_path = hf_hub_download(
repo_id=repo_id,
filename=file,
token=api_token,
repo_type="dataset",
local_dir="chunks",
local_dir_use_symlinks=False
)
print(f"Downloaded {file} to {downloaded_path}")
print("To combine chunks, use: cat chunks/pdfs_part_* > pdfs.tar.gz")
return
except Exception as e:
print(f"Error checking for chunk files: {e}")
# Fall back to downloading the single file if no chunks found
try:
downloaded_path = hf_hub_download(
repo_id=repo_id,
filename="pdfs.tar.gz",
token=api_token,
repo_type="dataset",
local_dir=".",
local_dir_use_symlinks=False
)
print(f"Downloaded pdfs.tar.gz file to {downloaded_path}")
except Exception as e:
print(f"Download failed: {e}")
def main():
parser = argparse.ArgumentParser(
description="Upload or download files to/from a remote Hugging Face dataset."
)
parser.add_argument(
"operation",
choices=["upload", "download"],
help="Specify the operation: upload or download."
)
args = parser.parse_args()
# Try to get API token from environment variables or HF cache
API_TOKEN = os.environ.get("HUGGINGFACE_API_TOKEN")
if not API_TOKEN:
API_TOKEN = os.environ.get("HUGGINGFACEHUB_API_TOKEN")
if not API_TOKEN:
try:
from huggingface_hub.constants import HF_TOKEN_PATH
if os.path.exists(HF_TOKEN_PATH):
with open(HF_TOKEN_PATH, "r") as f:
API_TOKEN = f.read().strip()
except ImportError:
pass
if not API_TOKEN:
raise ValueError("No Hugging Face API token found. Please set HUGGINGFACE_API_TOKEN environment variable or login using `huggingface-cli login`")
# Include your username in the repo_id
username = "liuganghuggingface" # Replace with your actual Hugging Face username
repo_id = f"{username}/polymer_semantic_pdfs"
if args.operation == "upload":
upload_files(API_TOKEN, repo_id)
elif args.operation == "download":
download_files(API_TOKEN, repo_id)
if __name__ == "__main__":
main()