|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
Minimal GPT OSS generation script for HF Jobs. |
|
|
|
Based on official HuggingFace blog recommendations. |
|
Works on regular GPUs (L4, A10G, A100) without Flash Attention 3. |
|
|
|
Usage: |
|
# Quick local test |
|
uv run gpt_oss_minimal.py \ |
|
--input-dataset davanstrien/haiku_dpo \ |
|
--output-dataset username/haiku-raw \ |
|
--prompt-column question \ |
|
--max-samples 2 |
|
|
|
# HF Jobs execution (A10G for $1.50/hr) |
|
hf jobs uv run --flavor a10g-small \ |
|
https://huggingface.co/datasets/uv-scripts/openai-oss/raw/main/gpt_oss_minimal.py \ |
|
--input-dataset davanstrien/haiku_dpo \ |
|
--output-dataset username/haiku-raw \ |
|
--prompt-column question |
|
""" |
|
|
|
import argparse |
|
import os |
|
import sys |
|
|
|
import torch |
|
from datasets import Dataset, load_dataset |
|
from huggingface_hub import get_token, login |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
|
|
|
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" |
|
|
|
|
|
def main(): |
|
parser = argparse.ArgumentParser(description="Minimal GPT OSS generation for HF Jobs") |
|
parser.add_argument("--input-dataset", required=True, help="Input dataset on HF Hub") |
|
parser.add_argument("--output-dataset", required=True, help="Output dataset on HF Hub") |
|
parser.add_argument("--prompt-column", default="prompt", help="Column containing prompts") |
|
parser.add_argument("--model-id", default="openai/gpt-oss-20b", help="Model to use") |
|
parser.add_argument("--max-samples", type=int, help="Limit number of samples") |
|
parser.add_argument("--max-new-tokens", type=int, default=1024, help="Max tokens to generate") |
|
args = parser.parse_args() |
|
|
|
|
|
if not torch.cuda.is_available(): |
|
print("ERROR: GPU required. Use HF Jobs with --flavor a10g-small or run on GPU machine") |
|
sys.exit(1) |
|
|
|
print(f"GPU: {torch.cuda.get_device_name(0)}") |
|
print(f"Memory: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.1f}GB") |
|
|
|
|
|
token = os.environ.get("HF_TOKEN") or get_token() |
|
if not token: |
|
print("ERROR: HF_TOKEN required. Set HF_TOKEN env var or run: huggingface-cli login") |
|
sys.exit(1) |
|
login(token=token, add_to_git_credential=False) |
|
|
|
|
|
print(f"Loading tokenizer: {args.model_id}") |
|
tokenizer = AutoTokenizer.from_pretrained(args.model_id) |
|
|
|
|
|
print(f"Loading model: {args.model_id}") |
|
print("Note: MXFP4 will auto-dequantize to bf16 on non-Hopper GPUs") |
|
|
|
model = AutoModelForCausalLM.from_pretrained( |
|
args.model_id, |
|
device_map="auto", |
|
torch_dtype="auto", |
|
|
|
|
|
) |
|
print("Model loaded successfully") |
|
|
|
|
|
print(f"Loading dataset: {args.input_dataset}") |
|
dataset = load_dataset(args.input_dataset, split="train") |
|
|
|
if args.prompt_column not in dataset.column_names: |
|
print(f"ERROR: Column '{args.prompt_column}' not found") |
|
print(f"Available columns: {dataset.column_names}") |
|
sys.exit(1) |
|
|
|
|
|
if args.max_samples: |
|
dataset = dataset.select(range(min(args.max_samples, len(dataset)))) |
|
|
|
print(f"Processing {len(dataset)} examples") |
|
|
|
|
|
results = [] |
|
for i, example in enumerate(dataset): |
|
print(f"[{i+1}/{len(dataset)}] Processing...") |
|
|
|
prompt_text = example[args.prompt_column] |
|
|
|
|
|
messages = [ |
|
{"role": "user", "content": prompt_text} |
|
] |
|
|
|
|
|
inputs = tokenizer.apply_chat_template( |
|
messages, |
|
add_generation_prompt=True, |
|
return_tensors="pt", |
|
return_dict=True |
|
).to(model.device) |
|
|
|
|
|
with torch.no_grad(): |
|
generated = model.generate( |
|
**inputs, |
|
max_new_tokens=args.max_new_tokens, |
|
do_sample=True, |
|
temperature=0.7, |
|
) |
|
|
|
|
|
response = tokenizer.decode( |
|
generated[0][inputs["input_ids"].shape[-1]:], |
|
skip_special_tokens=False |
|
) |
|
|
|
|
|
results.append({ |
|
"prompt": prompt_text, |
|
"raw_output": response, |
|
"model": args.model_id, |
|
}) |
|
|
|
|
|
if i == 0: |
|
print(f"Sample output preview (first 200 chars):") |
|
print(response[:200]) |
|
print("...") |
|
|
|
|
|
print("\nCreating output dataset...") |
|
output_dataset = Dataset.from_list(results) |
|
|
|
print(f"Pushing to {args.output_dataset}...") |
|
output_dataset.push_to_hub(args.output_dataset, token=token) |
|
|
|
print(f"\n✅ Complete!") |
|
print(f"Dataset: https://huggingface.co/datasets/{args.output_dataset}") |
|
print(f"\nOutput format:") |
|
print("- prompt: Original prompt") |
|
print("- raw_output: Full model response with channel markers") |
|
print("- model: Model ID used") |
|
print("\nTo extract final response, look for text after '<|channel|>final<|message|>'") |
|
|
|
|
|
if __name__ == "__main__": |
|
main() |