File size: 6,113 Bytes
b1e0073
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
import argparse
import os
from huggingface_hub import upload_file, hf_hub_download, create_repo
import time
import math
from pathlib import Path
import subprocess

def split_large_file(file_path, chunk_size_mb=1000):
    """Split a large file into smaller chunks."""
    file_path = Path(file_path)
    file_size = os.path.getsize(file_path) / (1024 * 1024)  # Size in MB
    
    if file_size <= chunk_size_mb:
        print(f"File {file_path.name} is {file_size:.2f}MB, no need to split.")
        return [file_path]
    
    # Create a directory for chunks if it doesn't exist
    chunks_dir = file_path.parent / f"{file_path.stem}_chunks"
    os.makedirs(chunks_dir, exist_ok=True)
    
    # Calculate number of chunks needed
    num_chunks = math.ceil(file_size / chunk_size_mb)
    print(f"Splitting {file_path.name} ({file_size:.2f}MB) into {num_chunks} chunks...")
    
    # Use split command for efficient splitting
    chunk_prefix = chunks_dir / file_path.stem
    subprocess.run([
        "split", 
        "-b", f"{chunk_size_mb}m", 
        str(file_path), 
        f"{chunk_prefix}_part_"
    ])
    
    # Get all chunk files
    chunk_files = sorted(chunks_dir.glob(f"{file_path.stem}_part_*"))
    print(f"Created {len(chunk_files)} chunk files in {chunks_dir}")
    return chunk_files

def upload_files(api_token, repo_id):
    # Create the repository first if it doesn't exist
    try:
        create_repo(
            repo_id=repo_id,
            token=api_token,
            repo_type="dataset",
            private=False  # Set to False for a public dataset
        )
        print(f"Created repository: {repo_id}")
    except Exception as e:
        print(f"Repository already exists or error occurred: {e}")
    
    # Add a delay to ensure repository creation is complete
    time.sleep(5)

    # Upload the script itself
    try:
        script_path = "1_hf_up_and_download.py"
        print(f"Uploading script: {script_path}")
        upload_file(
            repo_id=repo_id,
            path_or_fileobj=script_path,
            path_in_repo=script_path,
            token=api_token,
            repo_type="dataset",
        )
        print(f"Uploaded {script_path} to {repo_id}/{script_path}")
    except Exception as e:
        print(f"Upload failed for script: {e}")

    # Split the large file into chunks if needed
    local_file = "pdfs.tar.gz"
    chunk_files = split_large_file(local_file)
    
    # Upload each chunk
    for i, chunk_file in enumerate(chunk_files):
        try:
            repo_file = chunk_file.name
            print(f"Uploading chunk {i+1}/{len(chunk_files)}: {repo_file}")
            
            upload_file(
                repo_id=repo_id,
                path_or_fileobj=str(chunk_file),
                path_in_repo=repo_file,
                token=api_token,
                repo_type="dataset",
            )
            print(f"Uploaded {chunk_file} to {repo_id}/{repo_file}")
        except Exception as e:
            print(f"Upload failed for {chunk_file}: {e}")

def download_files(api_token, repo_id):
    # Check if we have split files
    try:
        # List files in the repository
        from huggingface_hub import list_repo_files
        files = list_repo_files(repo_id=repo_id, repo_type="dataset", token=api_token)
        
        # Filter for our chunk files
        chunk_files = [f for f in files if f.startswith("pdfs_part_") or "chunks" in f]
        
        if chunk_files:
            print(f"Found {len(chunk_files)} chunk files. Downloading...")
            os.makedirs("chunks", exist_ok=True)
            
            for file in chunk_files:
                downloaded_path = hf_hub_download(
                    repo_id=repo_id,
                    filename=file,
                    token=api_token,
                    repo_type="dataset",
                    local_dir="chunks",
                    local_dir_use_symlinks=False
                )
                print(f"Downloaded {file} to {downloaded_path}")
            
            print("To combine chunks, use: cat chunks/pdfs_part_* > pdfs.tar.gz")
            return
    except Exception as e:
        print(f"Error checking for chunk files: {e}")
    
    # Fall back to downloading the single file if no chunks found
    try:
        downloaded_path = hf_hub_download(
            repo_id=repo_id,
            filename="pdfs.tar.gz",
            token=api_token,
            repo_type="dataset",
            local_dir=".",
            local_dir_use_symlinks=False
        )
        print(f"Downloaded pdfs.tar.gz file to {downloaded_path}")
    except Exception as e:
        print(f"Download failed: {e}")

def main():
    parser = argparse.ArgumentParser(
        description="Upload or download files to/from a remote Hugging Face dataset."
    )
    parser.add_argument(
        "operation",
        choices=["upload", "download"],
        help="Specify the operation: upload or download."
    )
    args = parser.parse_args()

    # Try to get API token from environment variables or HF cache
    API_TOKEN = os.environ.get("HUGGINGFACE_API_TOKEN")
    if not API_TOKEN:
        API_TOKEN = os.environ.get("HUGGINGFACEHUB_API_TOKEN")
        if not API_TOKEN:
            try:
                from huggingface_hub.constants import HF_TOKEN_PATH
                if os.path.exists(HF_TOKEN_PATH):
                    with open(HF_TOKEN_PATH, "r") as f:
                        API_TOKEN = f.read().strip()
            except ImportError:
                pass
            
            if not API_TOKEN:
                raise ValueError("No Hugging Face API token found. Please set HUGGINGFACE_API_TOKEN environment variable or login using `huggingface-cli login`")
    
    # Include your username in the repo_id
    username = "liuganghuggingface"  # Replace with your actual Hugging Face username
    repo_id = f"{username}/polymer_semantic_pdfs"

    if args.operation == "upload":
        upload_files(API_TOKEN, repo_id)
    elif args.operation == "download":
        download_files(API_TOKEN, repo_id)

if __name__ == "__main__":
    main()