Update app.py
Browse files
app.py
CHANGED
@@ -6,8 +6,6 @@ import re
|
|
6 |
from huggingface_hub import HfApi
|
7 |
from huggingface_hub.utils import HfHubHTTPError
|
8 |
import time
|
9 |
-
import hashlib
|
10 |
-
import requests
|
11 |
|
12 |
def extract_full_paper_with_labels(pdf_path, progress=None):
|
13 |
print(f"π Starting PDF Processing: {os.path.basename(pdf_path)}")
|
@@ -100,7 +98,7 @@ def extract_full_paper_with_labels(pdf_path, progress=None):
|
|
100 |
elif re.search(r"=|β|β|Β±|Γ|Ο|ΞΌ|Ο", text):
|
101 |
content += f"<EQUATION>{text}</EQUATION>\n"
|
102 |
|
103 |
-
# Code
|
104 |
elif re.search(code_pattern, text) and len(text.split()) <= 50:
|
105 |
content += f"<CODE>{text}</CODE>\n"
|
106 |
|
@@ -121,49 +119,28 @@ def extract_full_paper_with_labels(pdf_path, progress=None):
|
|
121 |
print(f"β
Finished Processing PDF: {os.path.basename(pdf_path)}")
|
122 |
return {
|
123 |
"filename": os.path.basename(pdf_path),
|
124 |
-
"title": title if title else "Untitled_Paper",
|
125 |
"content": content
|
126 |
}
|
127 |
|
128 |
-
|
129 |
def upload_with_progress(file_path, repo_id, token, progress):
|
130 |
"""
|
131 |
-
Upload file to Hugging Face Dataset
|
132 |
"""
|
133 |
-
|
134 |
print(f"π€ Starting upload of Parquet: {file_path}")
|
135 |
file_size = os.path.getsize(file_path)
|
136 |
-
api = HfApi()
|
137 |
|
138 |
-
|
139 |
-
upload_url = f"https://huggingface.co/api/datasets/{repo_id}/upload"
|
140 |
|
141 |
try:
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
|
151 |
-
while True:
|
152 |
-
chunk = f.read(chunk_size)
|
153 |
-
if not chunk:
|
154 |
-
break # Finished reading file
|
155 |
-
|
156 |
-
response = requests.put(upload_url, headers=headers, data=chunk)
|
157 |
-
|
158 |
-
if response.status_code != 200:
|
159 |
-
raise Exception(f"Upload failed: {response.text}")
|
160 |
-
|
161 |
-
# Update progress after each chunk
|
162 |
-
uploaded += len(chunk)
|
163 |
-
if progress is not None:
|
164 |
-
progress(uploaded / file_size, desc=f"Uploading... {uploaded // (1024 * 1024)}MB/{file_size // (1024 * 1024)}MB")
|
165 |
-
|
166 |
-
# Final progress update
|
167 |
if progress is not None:
|
168 |
progress(1, desc="β
Upload Complete")
|
169 |
|
@@ -177,27 +154,8 @@ def upload_with_progress(file_path, repo_id, token, progress):
|
|
177 |
print(f"β Unexpected error: {e}")
|
178 |
return f"β Unexpected error: {str(e)}"
|
179 |
|
180 |
-
|
181 |
-
|
182 |
def pdf_to_parquet_and_upload(pdf_files, hf_token, dataset_repo_id, action_choice, progress=gr.Progress()):
|
183 |
-
|
184 |
-
|
185 |
-
# β
Helper function inside this block to avoid external edits
|
186 |
-
def sanitize_filename(title, max_length=100):
|
187 |
-
"""
|
188 |
-
Sanitize and truncate the filename to avoid OS limits.
|
189 |
-
"""
|
190 |
-
# Remove invalid characters
|
191 |
-
sanitized = re.sub(r'[\\/*?:"<>|]', "", title)
|
192 |
-
sanitized = sanitized.replace(" ", "_")
|
193 |
-
|
194 |
-
# Truncate to max_length if necessary
|
195 |
-
if len(sanitized) > max_length:
|
196 |
-
# Append an 8-character hash for uniqueness
|
197 |
-
hash_suffix = hashlib.md5(sanitized.encode()).hexdigest()[:8]
|
198 |
-
sanitized = sanitized[:max_length] + "_" + hash_suffix
|
199 |
-
|
200 |
-
return sanitized
|
201 |
|
202 |
total_files = len(pdf_files)
|
203 |
print("π Starting PDF to Parquet Conversion Process")
|
@@ -208,28 +166,29 @@ def pdf_to_parquet_and_upload(pdf_files, hf_token, dataset_repo_id, action_choic
|
|
208 |
|
209 |
# β
Step 1: Process PDF with Full Labels
|
210 |
extracted_data = extract_full_paper_with_labels(pdf_file.name, progress=progress)
|
|
|
211 |
|
212 |
-
|
213 |
-
|
214 |
-
|
|
|
215 |
|
216 |
-
|
217 |
-
df =
|
|
|
|
|
|
|
|
|
|
|
|
|
218 |
|
|
|
|
|
219 |
try:
|
220 |
-
|
221 |
-
print(f"β
Parquet saved as: {parquet_file}")
|
222 |
except Exception as e:
|
223 |
-
print(f"β
|
224 |
-
|
225 |
-
|
226 |
-
# β
Step 3: Upload Parquet (if selected)
|
227 |
-
if action_choice in ["Upload to Hugging Face", "Both"]:
|
228 |
-
try:
|
229 |
-
upload_message = upload_with_progress(parquet_file, dataset_repo_id, hf_token, progress)
|
230 |
-
except Exception as e:
|
231 |
-
print(f"β Upload Failed: {str(e)}")
|
232 |
-
upload_message = f"β Upload failed: {str(e)}"
|
233 |
|
234 |
print("π Process Completed")
|
235 |
return parquet_file, upload_message
|
@@ -247,10 +206,11 @@ iface = gr.Interface(
|
|
247 |
gr.File(label="Download Parquet File"),
|
248 |
gr.Textbox(label="Status")
|
249 |
],
|
250 |
-
title="PDF to Parquet Converter with
|
251 |
-
description="Upload your PDFs, convert them to Parquet
|
252 |
)
|
253 |
|
254 |
iface.launch()
|
255 |
|
256 |
|
|
|
|
6 |
from huggingface_hub import HfApi
|
7 |
from huggingface_hub.utils import HfHubHTTPError
|
8 |
import time
|
|
|
|
|
9 |
|
10 |
def extract_full_paper_with_labels(pdf_path, progress=None):
|
11 |
print(f"π Starting PDF Processing: {os.path.basename(pdf_path)}")
|
|
|
98 |
elif re.search(r"=|β|β|Β±|Γ|Ο|ΞΌ|Ο", text):
|
99 |
content += f"<EQUATION>{text}</EQUATION>\n"
|
100 |
|
101 |
+
# β
Improved Code Block Detection
|
102 |
elif re.search(code_pattern, text) and len(text.split()) <= 50:
|
103 |
content += f"<CODE>{text}</CODE>\n"
|
104 |
|
|
|
119 |
print(f"β
Finished Processing PDF: {os.path.basename(pdf_path)}")
|
120 |
return {
|
121 |
"filename": os.path.basename(pdf_path),
|
|
|
122 |
"content": content
|
123 |
}
|
124 |
|
|
|
125 |
def upload_with_progress(file_path, repo_id, token, progress):
|
126 |
"""
|
127 |
+
Upload file to Hugging Face Dataset using upload_file() API method.
|
128 |
"""
|
|
|
129 |
print(f"π€ Starting upload of Parquet: {file_path}")
|
130 |
file_size = os.path.getsize(file_path)
|
|
|
131 |
|
132 |
+
api = HfApi()
|
|
|
133 |
|
134 |
try:
|
135 |
+
# Use upload_file() method from huggingface_hub
|
136 |
+
api.upload_file(
|
137 |
+
path_or_fileobj=file_path,
|
138 |
+
path_in_repo=os.path.basename(file_path),
|
139 |
+
repo_id=repo_id,
|
140 |
+
repo_type="dataset",
|
141 |
+
token=token
|
142 |
+
)
|
143 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
144 |
if progress is not None:
|
145 |
progress(1, desc="β
Upload Complete")
|
146 |
|
|
|
154 |
print(f"β Unexpected error: {e}")
|
155 |
return f"β Unexpected error: {str(e)}"
|
156 |
|
|
|
|
|
157 |
def pdf_to_parquet_and_upload(pdf_files, hf_token, dataset_repo_id, action_choice, progress=gr.Progress()):
|
158 |
+
all_data = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
159 |
|
160 |
total_files = len(pdf_files)
|
161 |
print("π Starting PDF to Parquet Conversion Process")
|
|
|
166 |
|
167 |
# β
Step 1: Process PDF with Full Labels
|
168 |
extracted_data = extract_full_paper_with_labels(pdf_file.name, progress=progress)
|
169 |
+
all_data.append(extracted_data)
|
170 |
|
171 |
+
print("π‘ Converting Processed Data to Parquet")
|
172 |
+
# β
Step 2: Convert to Parquet
|
173 |
+
df = pd.DataFrame(all_data)
|
174 |
+
parquet_file = 'fully_labeled_papers.parquet'
|
175 |
|
176 |
+
try:
|
177 |
+
df.to_parquet(parquet_file, engine='pyarrow', index=False)
|
178 |
+
print("β
Parquet Conversion Completed")
|
179 |
+
except Exception as e:
|
180 |
+
print(f"β Parquet Conversion Failed: {str(e)}")
|
181 |
+
return None, f"β Parquet Conversion Failed: {str(e)}"
|
182 |
+
|
183 |
+
upload_message = "Skipped Upload"
|
184 |
|
185 |
+
# β
Step 3: Upload Parquet (if selected)
|
186 |
+
if action_choice in ["Upload to Hugging Face", "Both"]:
|
187 |
try:
|
188 |
+
upload_message = upload_with_progress(parquet_file, dataset_repo_id, hf_token, progress)
|
|
|
189 |
except Exception as e:
|
190 |
+
print(f"β Upload Failed: {str(e)}")
|
191 |
+
upload_message = f"β Upload failed: {str(e)}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
192 |
|
193 |
print("π Process Completed")
|
194 |
return parquet_file, upload_message
|
|
|
206 |
gr.File(label="Download Parquet File"),
|
207 |
gr.Textbox(label="Status")
|
208 |
],
|
209 |
+
title="PDF to Parquet Converter with Full Labeling",
|
210 |
+
description="Upload your PDFs, convert them to Parquet with full section labeling, and upload to your Hugging Face Dataset."
|
211 |
)
|
212 |
|
213 |
iface.launch()
|
214 |
|
215 |
|
216 |
+
|