classification / classify-dataset.py
davanstrien's picture
davanstrien HF Staff
Add early authentication check with whoami validation
acf6917
raw
history blame
12.3 kB
#!/usr/bin/env python3
# /// script
# requires-python = ">=3.10"
# dependencies = [
# "vllm>=0.6.6",
# "transformers",
# "torch",
# "datasets",
# "huggingface-hub[hf_transfer]",
# ]
# ///
"""
Classify text columns in Hugging Face datasets using vLLM with structured outputs.
This script provides efficient GPU-based classification with guaranteed valid outputs,
optimized for running on HF Jobs.
Example:
uv run classify-dataset.py \\
--input-dataset imdb \\
--column text \\
--labels "positive,negative" \\
--output-dataset user/imdb-classified
HF Jobs example:
hfjobs run --flavor a10 uv run classify-dataset.py \\
--input-dataset user/emails \\
--column content \\
--labels "spam,ham" \\
--output-dataset user/emails-classified \\
--prompt-style reasoning
"""
import argparse
import logging
import os
import sys
from typing import List, Dict, Any, Optional
import torch
from datasets import load_dataset, Dataset
from huggingface_hub import HfApi, get_token
from vllm import LLM, SamplingParams
from vllm.sampling_params import GuidedDecodingParams
# Default model - SmolLM3 for good balance of speed and quality
DEFAULT_MODEL = "HuggingFaceTB/SmolLM3-3B"
# Prompt styles for classification
PROMPT_STYLES = {
"simple": """Classify this text as one of: {labels}
Text: {text}
Label:""",
"detailed": """Task: Classify the following text into EXACTLY ONE of these categories.
Available categories: {labels}
Text to classify:
{text}
Category:""",
"reasoning": """Analyze the following text and determine which category it belongs to.
Available categories: {labels}
Text to analyze:
{text}
Brief analysis: Let me examine the key aspects of this text.
Category:""",
}
# Minimum text length for valid classification
MIN_TEXT_LENGTH = 3
# Maximum text length (in characters) to avoid context overflow
MAX_TEXT_LENGTH = 4000
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def parse_args():
parser = argparse.ArgumentParser(
description="Classify text in HuggingFace datasets using vLLM with structured outputs",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=__doc__
)
# Required arguments
parser.add_argument(
"--input-dataset",
type=str,
required=True,
help="Input dataset ID on Hugging Face Hub"
)
parser.add_argument(
"--column",
type=str,
required=True,
help="Name of the text column to classify"
)
parser.add_argument(
"--labels",
type=str,
required=True,
help="Comma-separated list of classification labels (e.g., 'positive,negative')"
)
parser.add_argument(
"--output-dataset",
type=str,
required=True,
help="Output dataset ID on Hugging Face Hub"
)
# Optional arguments
parser.add_argument(
"--model",
type=str,
default=DEFAULT_MODEL,
help=f"Model to use for classification (default: {DEFAULT_MODEL})"
)
# Removed --batch-size argument as vLLM handles batching internally
parser.add_argument(
"--prompt-style",
type=str,
choices=list(PROMPT_STYLES.keys()),
default="simple",
help="Prompt style to use (default: simple)"
)
parser.add_argument(
"--max-samples",
type=int,
default=None,
help="Maximum number of samples to process (for testing)"
)
parser.add_argument(
"--hf-token",
type=str,
default=None,
help="Hugging Face API token (default: auto-detect from HF_TOKEN env var or huggingface-cli login)"
)
parser.add_argument(
"--split",
type=str,
default="train",
help="Dataset split to process (default: train)"
)
parser.add_argument(
"--temperature",
type=float,
default=0.1,
help="Temperature for generation (default: 0.1)"
)
parser.add_argument(
"--max-tokens",
type=int,
default=50,
help="Maximum tokens to generate (default: 50)"
)
parser.add_argument(
"--guided-backend",
type=str,
default="outlines",
help="Guided decoding backend (default: outlines)"
)
return parser.parse_args()
def preprocess_text(text: str) -> str:
"""Preprocess text for classification."""
if not text or not isinstance(text, str):
return ""
# Strip whitespace
text = text.strip()
# Truncate if too long
if len(text) > MAX_TEXT_LENGTH:
text = text[:MAX_TEXT_LENGTH] + "..."
return text
def validate_text(text: str) -> bool:
"""Check if text is valid for classification."""
if not text or len(text) < MIN_TEXT_LENGTH:
return False
return True
def prepare_prompts(
texts: List[str],
labels: List[str],
prompt_template: str
) -> tuple[List[str], List[int]]:
"""Prepare prompts for classification, filtering invalid texts."""
prompts = []
valid_indices = []
for i, text in enumerate(texts):
processed_text = preprocess_text(text)
if validate_text(processed_text):
prompt = prompt_template.format(
labels=", ".join(labels),
text=processed_text
)
prompts.append(prompt)
valid_indices.append(i)
return prompts, valid_indices
def main():
args = parse_args()
# Check authentication early
logger.info("Checking authentication...")
token = args.hf_token
if not token:
# Try to get token from environment or huggingface-cli login
token = os.environ.get("HF_TOKEN") or get_token()
if not token:
logger.error("No authentication token found. Please either:")
logger.error("1. Run 'huggingface-cli login'")
logger.error("2. Set HF_TOKEN environment variable")
logger.error("3. Pass --hf-token argument")
sys.exit(1)
# Validate token by checking who we are
try:
api = HfApi(token=token)
user_info = api.whoami()
logger.info(f"Authenticated as: {user_info['name']}")
except Exception as e:
logger.error(f"Authentication failed: {e}")
logger.error("Please check your token is valid")
sys.exit(1)
# Check CUDA availability
if not torch.cuda.is_available():
logger.error("CUDA is not available. This script requires a GPU.")
logger.error("Please run on a machine with GPU support or use HF Jobs.")
sys.exit(1)
logger.info(f"CUDA available. Using device: {torch.cuda.get_device_name(0)}")
# Parse and validate labels
labels = [label.strip() for label in args.labels.split(",")]
if len(labels) < 2:
logger.error("At least two labels are required for classification.")
sys.exit(1)
logger.info(f"Classification labels: {labels}")
# Load dataset
logger.info(f"Loading dataset: {args.input_dataset}")
try:
dataset = load_dataset(args.input_dataset, split=args.split)
# Limit samples if specified
if args.max_samples:
dataset = dataset.select(range(min(args.max_samples, len(dataset))))
logger.info(f"Limited dataset to {len(dataset)} samples")
logger.info(f"Loaded {len(dataset)} samples from split '{args.split}'")
except Exception as e:
logger.error(f"Failed to load dataset: {e}")
sys.exit(1)
# Verify column exists
if args.column not in dataset.column_names:
logger.error(f"Column '{args.column}' not found in dataset.")
logger.error(f"Available columns: {dataset.column_names}")
sys.exit(1)
# Extract texts
texts = dataset[args.column]
# Initialize vLLM
logger.info(f"Initializing vLLM with model: {args.model}")
logger.info(f"Using guided decoding backend: {args.guided_backend}")
try:
llm = LLM(
model=args.model,
trust_remote_code=True,
dtype="auto",
gpu_memory_utilization=0.95,
guided_decoding_backend=args.guided_backend,
)
except Exception as e:
logger.error(f"Failed to initialize vLLM: {e}")
sys.exit(1)
# Set up guided decoding parameters
guided_params = GuidedDecodingParams(choice=labels)
# Set up sampling parameters with structured output
sampling_params = SamplingParams(
guided_decoding=guided_params,
temperature=args.temperature,
max_tokens=args.max_tokens,
)
# Get prompt template
prompt_template = PROMPT_STYLES[args.prompt_style]
logger.info(f"Using prompt style '{args.prompt_style}'")
logger.info("Using structured output with guided_choice - outputs guaranteed to be valid labels")
# Prepare all prompts
logger.info("Preparing prompts for classification...")
all_prompts, valid_indices = prepare_prompts(texts, labels, prompt_template)
if not all_prompts:
logger.error("No valid texts found for classification.")
sys.exit(1)
logger.info(f"Prepared {len(all_prompts)} valid prompts out of {len(texts)} texts")
# Let vLLM handle batching internally
logger.info("Starting classification (vLLM will handle batching internally)...")
try:
# Generate all classifications at once - vLLM handles batching
outputs = llm.generate(all_prompts, sampling_params)
# Map results back to original indices
all_classifications = [None] * len(texts)
for idx, output in enumerate(outputs):
original_idx = valid_indices[idx]
generated_text = output.outputs[0].text.strip()
all_classifications[original_idx] = generated_text
# Count statistics
valid_texts = len(valid_indices)
total_texts = len(texts)
except Exception as e:
logger.error(f"Classification failed: {e}")
sys.exit(1)
# Add classifications to dataset
dataset = dataset.add_column("classification", all_classifications)
# Calculate statistics
none_count = total_texts - valid_texts
if none_count > 0:
logger.warning(f"{none_count} texts were too short or invalid for classification")
# Show classification distribution
label_counts = {label: all_classifications.count(label) for label in labels}
logger.info("Classification distribution:")
for label, count in label_counts.items():
percentage = count / total_texts * 100 if total_texts > 0 else 0
logger.info(f" {label}: {count} ({percentage:.1f}%)")
if none_count > 0:
none_percentage = none_count / total_texts * 100
logger.info(f" Invalid/Skipped: {none_count} ({none_percentage:.1f}%)")
# Log success rate
success_rate = (valid_texts / total_texts * 100) if total_texts > 0 else 0
logger.info(f"Classification success rate: {success_rate:.1f}%")
# Save to Hub (token already validated at start)
logger.info(f"Pushing dataset to Hub: {args.output_dataset}")
try:
dataset.push_to_hub(
args.output_dataset,
token=token,
commit_message=f"Add classifications using {args.model} with structured outputs"
)
logger.info(f"Successfully pushed to: https://huggingface.co/datasets/{args.output_dataset}")
except Exception as e:
logger.error(f"Failed to push to Hub: {e}")
sys.exit(1)
if __name__ == "__main__":
if len(sys.argv) == 1:
print("Example HF Jobs command:")
print("hf jobs uv run \\")
print(" --flavor l4x1 \\")
print(" --image vllm/vllm-openai:latest \\")
print(" classify-dataset.py \\")
print(" --input-dataset stanfordnlp/imdb \\")
print(" --column text \\")
print(" --labels 'positive,negative' \\")
print(" --output-dataset user/imdb-classified")
sys.exit(0)
main()