ProCreations commited on
Commit
4ebf6f2
Β·
verified Β·
1 Parent(s): af2e046

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +312 -3
README.md CHANGED
@@ -106,8 +106,317 @@ pip install torch transformers datasets tqdm numpy
106
  ### Full Processing Script
107
 
108
  ```python
109
- # TODO: Add the complete processing code here
110
- # (ProCreations will insert the Ultra FineWeb EDU creator script)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
  ```
112
 
113
  ## πŸ“ˆ Quality Analysis
@@ -145,7 +454,7 @@ If you use Ultra FineWeb EDU in your research or applications, please cite:
145
  title={Ultra FineWeb EDU: High-Quality Educational Content from Ultra-FineWeb},
146
  author={ProCreations},
147
  year={2025},
148
- url={https://huggingface.co/datasets/[dataset-url]},
149
  note={Filtered from Ultra-FineWeb using educational quality threshold 3.5+}
150
  }
151
  ```
 
106
  ### Full Processing Script
107
 
108
  ```python
109
+ #!/usr/bin/env python3
110
+ """
111
+ Ultra FineWeb EDU Dataset Creator
112
+ Creates a high-quality educational dataset by filtering Ultra-FineWeb with edu classifier
113
+ """
114
+
115
+ import os
116
+ import json
117
+ import time
118
+ import pickle
119
+ from datetime import datetime, timedelta
120
+ from pathlib import Path
121
+ import torch
122
+ import numpy as np
123
+ from tqdm.auto import tqdm
124
+ from datasets import load_dataset, Dataset, DatasetDict
125
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
126
+ import gc
127
+ import logging
128
+
129
+ # Setup logging
130
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
131
+ logger = logging.getLogger(__name__)
132
+
133
+ class UltraFineWebEDUCreator:
134
+ def __init__(self,
135
+ output_dir="",
136
+ checkpoint_interval_minutes=30,
137
+ batch_size=512,
138
+ max_length=512,
139
+ edu_threshold=3.5,
140
+ device=None):
141
+
142
+ if output_dir:
143
+ self.output_dir = Path(output_dir)
144
+ self.output_dir.mkdir(exist_ok=True)
145
+ else:
146
+ self.output_dir = Path(".")
147
+ self.checkpoint_interval = timedelta(minutes=checkpoint_interval_minutes)
148
+ self.batch_size = batch_size
149
+ self.max_length = max_length
150
+ self.edu_threshold = edu_threshold
151
+
152
+ # Setup device - prefer CUDA for maximum speed! πŸš€
153
+ if device is None:
154
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
155
+ else:
156
+ self.device = torch.device(device)
157
+
158
+ logger.info(f"πŸ”₯ Using device: {self.device}")
159
+ if torch.cuda.is_available():
160
+ logger.info(f"⚑ CUDA device: {torch.cuda.get_device_name()}")
161
+
162
+ # Initialize classifier
163
+ self._load_classifier()
164
+
165
+ # Tracking variables
166
+ self.processed_count = 0
167
+ self.filtered_count = 0
168
+ self.last_checkpoint_time = datetime.now()
169
+ self.start_time = datetime.now()
170
+
171
+ def _load_classifier(self):
172
+ """Load the educational classifier model"""
173
+ logger.info("🧠 Loading FineWeb-Edu classifier...")
174
+ logger.info("⚑ TURBO MODE: FP16 + Large batches for maximum speed!")
175
+
176
+ model_name = "HuggingFaceFW/fineweb-edu-classifier"
177
+ self.tokenizer = AutoTokenizer.from_pretrained(model_name)
178
+ self.model = AutoModelForSequenceClassification.from_pretrained(
179
+ model_name,
180
+ torch_dtype=torch.float16 # Force FP16 for max speed!
181
+ ).to(self.device)
182
+
183
+ # Set to eval mode for inference
184
+ self.model.eval()
185
+
186
+ logger.info("βœ… Classifier loaded successfully!")
187
+
188
+ def _classify_batch(self, texts):
189
+ """Classify a batch of texts and return edu scores - OPTIMIZED FOR SPEED!"""
190
+ with torch.no_grad(), torch.amp.autocast('cuda', dtype=torch.float16):
191
+ # Tokenize batch
192
+ inputs = self.tokenizer(
193
+ texts,
194
+ return_tensors="pt",
195
+ padding=True,
196
+ truncation=True,
197
+ max_length=self.max_length
198
+ ).to(self.device, non_blocking=True) # Async transfer for speed
199
+
200
+ # Get predictions
201
+ outputs = self.model(**inputs)
202
+ scores = outputs.logits.squeeze(-1).float().detach().cpu().numpy()
203
+
204
+ # Handle single sample case
205
+ if scores.ndim == 0:
206
+ scores = np.array([scores])
207
+
208
+ return scores
209
+
210
+ def _save_checkpoint(self, filtered_data, split_name, resume_info):
211
+ """Save checkpoint data"""
212
+ checkpoint_path = self.output_dir / f"checkpoint_{split_name}_{self.processed_count}.pkl"
213
+
214
+ checkpoint_data = {
215
+ 'filtered_data': filtered_data,
216
+ 'processed_count': self.processed_count,
217
+ 'filtered_count': self.filtered_count,
218
+ 'resume_info': resume_info,
219
+ 'timestamp': datetime.now().isoformat()
220
+ }
221
+
222
+ with open(checkpoint_path, 'wb') as f:
223
+ pickle.dump(checkpoint_data, f)
224
+
225
+ logger.info(f"πŸ’Ύ Checkpoint saved: {checkpoint_path}")
226
+ return checkpoint_path
227
+
228
+ def _should_checkpoint(self):
229
+ """Check if it's time to save a checkpoint"""
230
+ return datetime.now() - self.last_checkpoint_time >= self.checkpoint_interval
231
+
232
+ def process_split(self, split_name, resume_from_checkpoint=None):
233
+ """Process a single split of the dataset"""
234
+ logger.info(f"πŸš€ Processing {split_name} split...")
235
+
236
+ # Load dataset in streaming mode for memory efficiency
237
+ dataset = load_dataset(
238
+ "openbmb/Ultra-FineWeb",
239
+ split=split_name,
240
+ streaming=True
241
+ )
242
+
243
+ filtered_data = []
244
+
245
+ # Resume from checkpoint if provided
246
+ start_idx = 0
247
+ if resume_from_checkpoint:
248
+ logger.info(f"πŸ“‚ Resuming from checkpoint: {resume_from_checkpoint}")
249
+ with open(resume_from_checkpoint, 'rb') as f:
250
+ checkpoint_data = pickle.load(f)
251
+ filtered_data = checkpoint_data['filtered_data']
252
+ self.processed_count = checkpoint_data['processed_count']
253
+ self.filtered_count = checkpoint_data['filtered_count']
254
+ start_idx = checkpoint_data['resume_info']['start_idx']
255
+
256
+ # Create progress bar
257
+ pbar = tqdm(
258
+ desc=f"Processing {split_name}",
259
+ unit="samples",
260
+ dynamic_ncols=True,
261
+ initial=self.processed_count
262
+ )
263
+
264
+ # Process in batches for efficiency
265
+ batch_texts = []
266
+ batch_data = []
267
+
268
+ for idx, example in enumerate(dataset):
269
+ if idx < start_idx:
270
+ continue
271
+
272
+ # Extract content only (no metadata)
273
+ content = example['content']
274
+ batch_texts.append(content)
275
+ batch_data.append(example)
276
+
277
+ # Process batch when full
278
+ if len(batch_texts) >= self.batch_size:
279
+ scores = self._classify_batch(batch_texts)
280
+
281
+ # Filter by edu threshold
282
+ for i, (score, data) in enumerate(zip(scores, batch_data)):
283
+ if score >= self.edu_threshold:
284
+ # Only keep content field as requested
285
+ filtered_data.append({'content': data['content']})
286
+ self.filtered_count += 1
287
+
288
+ self.processed_count += 1
289
+
290
+ # Update progress bar with stats
291
+ filter_rate = (self.filtered_count / self.processed_count) * 100
292
+ pbar.set_postfix({
293
+ 'filtered': self.filtered_count,
294
+ 'rate': f'{filter_rate:.1f}%',
295
+ 'avg_score': f'{np.mean(scores):.2f}'
296
+ })
297
+ pbar.update(1)
298
+
299
+ # Clear batch
300
+ batch_texts = []
301
+ batch_data = []
302
+
303
+ # Checkpoint if needed
304
+ if self._should_checkpoint():
305
+ self._save_checkpoint(
306
+ filtered_data,
307
+ split_name,
308
+ {'start_idx': idx + 1}
309
+ )
310
+ self.last_checkpoint_time = datetime.now()
311
+
312
+ # Clean GPU memory
313
+ if torch.cuda.is_available():
314
+ torch.cuda.empty_cache()
315
+
316
+ # Process remaining batch
317
+ if batch_texts:
318
+ scores = self._classify_batch(batch_texts)
319
+ for score, data in zip(scores, batch_data):
320
+ if score >= self.edu_threshold:
321
+ filtered_data.append({'content': data['content']})
322
+ self.filtered_count += 1
323
+ self.processed_count += 1
324
+ pbar.update(1)
325
+
326
+ pbar.close()
327
+
328
+ logger.info(f"βœ… {split_name} complete! Filtered {self.filtered_count}/{self.processed_count} samples")
329
+ return filtered_data
330
+
331
+ def create_dataset(self, splits=['en'], resume_from_checkpoint=None):
332
+ """Create the Ultra FineWeb EDU dataset"""
333
+ logger.info(f"πŸŽ“ Starting Ultra FineWeb EDU creation!")
334
+ logger.info(f"πŸ“Š Using edu threshold: {self.edu_threshold} (PREMIUM QUALITY!)")
335
+ logger.info(f"πŸ”„ Checkpoint interval: {self.checkpoint_interval}")
336
+ logger.info(f"⚑ Batch size: {self.batch_size} - TURBO SPEED ENGAGED!")
337
+
338
+ all_filtered_data = {}
339
+
340
+ for split in splits:
341
+ logger.info(f"\nπŸ“š Processing {split} split...")
342
+
343
+ # Reset counters for each split
344
+ self.processed_count = 0
345
+ self.filtered_count = 0
346
+
347
+ filtered_data = self.process_split(split, resume_from_checkpoint)
348
+ all_filtered_data[split] = filtered_data
349
+
350
+ # Save split results
351
+ split_path = self.output_dir / f"ultra_fineweb_edu_{split}.json"
352
+ with open(split_path, 'w', encoding='utf-8') as f:
353
+ json.dump(filtered_data, f, ensure_ascii=False, indent=2)
354
+ logger.info(f"πŸ’Ύ Saved {split} split to {split_path}")
355
+
356
+ # Create HuggingFace dataset
357
+ logger.info("πŸ€— Creating HuggingFace dataset...")
358
+
359
+ hf_datasets = {}
360
+ for split, data in all_filtered_data.items():
361
+ if data: # Only create dataset if we have data
362
+ hf_datasets[split] = Dataset.from_list(data)
363
+
364
+ if hf_datasets:
365
+ dataset_dict = DatasetDict(hf_datasets)
366
+
367
+ # Save as HuggingFace dataset
368
+ dataset_path = self.output_dir / "dataset"
369
+ dataset_dict.save_to_disk(str(dataset_path))
370
+ logger.info(f"πŸ’Ύ Saved HuggingFace dataset to {dataset_path}")
371
+
372
+ # Print final stats
373
+ total_samples = sum(len(data) for data in all_filtered_data.values())
374
+ elapsed_time = datetime.now() - self.start_time
375
+
376
+ logger.info(f"\nπŸŽ‰ ULTRA FINEWEB EDU CREATION COMPLETE! πŸŽ‰")
377
+ logger.info(f"πŸ“Š Total filtered samples: {total_samples:,}")
378
+ logger.info(f"⏱️ Total time: {elapsed_time}")
379
+ logger.info(f"⚑ Average speed: {total_samples / elapsed_time.total_seconds():.1f} samples/sec")
380
+
381
+ return dataset_dict
382
+ else:
383
+ logger.warning("⚠️ No data passed the filter!")
384
+ return None
385
+
386
+ def main():
387
+ """Main execution function"""
388
+ # Configuration - adjust these as needed!
389
+ config = {
390
+ 'output_dir': '', # Save in root directory
391
+ 'checkpoint_interval_minutes': 30,
392
+ 'batch_size': 512, # MASSIVE batch size for your 24GB GPU!
393
+ 'max_length': 512,
394
+ 'edu_threshold': 3.5, # Ultra high quality only!
395
+ 'splits': ['en'], # Add 'zh' for Chinese if needed
396
+ }
397
+
398
+ print("πŸš€ ULTRA FINEWEB EDU DATASET CREATOR πŸš€")
399
+ print("=" * 50)
400
+
401
+ # Create the dataset creator
402
+ creator = UltraFineWebEDUCreator(**{k: v for k, v in config.items() if k != 'splits'})
403
+
404
+ # Create the dataset
405
+ dataset = creator.create_dataset(splits=config['splits'])
406
+
407
+ if dataset:
408
+ print(f"\n✨ Success! Your Ultra FineWeb EDU dataset is ready!")
409
+ print(f"πŸ“ Location: {creator.output_dir}")
410
+ print(f"πŸ” Preview:")
411
+ for split_name, split_data in dataset.items():
412
+ print(f" {split_name}: {len(split_data):,} samples")
413
+ if len(split_data) > 0:
414
+ print(f" Sample: {split_data[0]['content'][:100]}...")
415
+ else:
416
+ print("😞 Dataset creation failed or no samples passed the filter.")
417
+
418
+ if __name__ == "__main__":
419
+ main()
420
  ```
421
 
422
  ## πŸ“ˆ Quality Analysis
 
454
  title={Ultra FineWeb EDU: High-Quality Educational Content from Ultra-FineWeb},
455
  author={ProCreations},
456
  year={2025},
457
+ url={https://huggingface.co/datasets/ProCreations/Ultra-FineWeb-EDU]},
458
  note={Filtered from Ultra-FineWeb using educational quality threshold 3.5+}
459
  }
460
  ```