davanstrien HF Staff commited on
Commit
bdbcbee
·
1 Parent(s): b09f138
Files changed (1) hide show
  1. generate-responses.py +442 -0
generate-responses.py ADDED
@@ -0,0 +1,442 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.10"
3
+ # dependencies = [
4
+ # "datasets",
5
+ # "flashinfer-python",
6
+ # "huggingface-hub[hf_transfer]",
7
+ # "torch",
8
+ # "transformers",
9
+ # "vllm",
10
+ # ]
11
+ #
12
+ # [[tool.uv.index]]
13
+ # url = "https://flashinfer.ai/whl/cu126/torch2.6"
14
+ #
15
+ # [[tool.uv.index]]
16
+ # url = "https://wheels.vllm.ai/nightly"
17
+ # ///
18
+ """
19
+ Generate responses for prompts in a dataset using vLLM for efficient GPU inference.
20
+
21
+ This script loads a dataset from Hugging Face Hub containing chat-formatted messages,
22
+ applies the model's chat template, generates responses using vLLM, and saves the
23
+ results back to the Hub with a comprehensive dataset card.
24
+
25
+ Example usage:
26
+ # Local execution with auto GPU detection
27
+ uv run generate-responses.py \\
28
+ username/input-dataset \\
29
+ username/output-dataset \\
30
+ --messages-column messages
31
+
32
+ # With custom model and sampling parameters
33
+ uv run generate-responses.py \\
34
+ username/input-dataset \\
35
+ username/output-dataset \\
36
+ --model-id meta-llama/Llama-3.1-8B-Instruct \\
37
+ --temperature 0.9 \\
38
+ --top-p 0.95 \\
39
+ --max-tokens 2048
40
+
41
+ # HF Jobs execution (see script output for full command)
42
+ hf jobs uv run --flavor a100x4 ...
43
+ """
44
+
45
+ import argparse
46
+ import logging
47
+ import os
48
+ import sys
49
+ from datetime import datetime
50
+ from typing import List, Optional
51
+
52
+ import torch
53
+ from datasets import load_dataset
54
+ from huggingface_hub import DatasetCard, get_token, login
55
+ from torch import cuda
56
+ from tqdm.auto import tqdm
57
+ from transformers import AutoTokenizer
58
+ from vllm import LLM, SamplingParams
59
+
60
+ # Enable HF Transfer for faster downloads
61
+ os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
62
+
63
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
64
+ logger = logging.getLogger(__name__)
65
+
66
+
67
+ def check_gpu_availability() -> int:
68
+ """Check if CUDA is available and return the number of GPUs."""
69
+ if not cuda.is_available():
70
+ logger.error("CUDA is not available. This script requires a GPU.")
71
+ logger.error("Please run on a machine with NVIDIA GPU or use HF Jobs with GPU flavor.")
72
+ sys.exit(1)
73
+
74
+ num_gpus = cuda.device_count()
75
+ for i in range(num_gpus):
76
+ gpu_name = cuda.get_device_name(i)
77
+ gpu_memory = cuda.get_device_properties(i).total_memory / 1024**3
78
+ logger.info(f"GPU {i}: {gpu_name} with {gpu_memory:.1f} GB memory")
79
+
80
+ return num_gpus
81
+
82
+
83
+ def create_dataset_card(
84
+ source_dataset: str,
85
+ model_id: str,
86
+ messages_column: str,
87
+ sampling_params: SamplingParams,
88
+ tensor_parallel_size: int,
89
+ num_examples: int,
90
+ generation_time: str,
91
+ ) -> str:
92
+ """Create a comprehensive dataset card documenting the generation process."""
93
+ return f"""---
94
+ viewer: false
95
+ tags:
96
+ - generated
97
+ - vllm
98
+ - uv-script
99
+ ---
100
+
101
+ # Generated Responses Dataset
102
+
103
+ This dataset contains generated responses for prompts from [{source_dataset}](https://huggingface.co/datasets/{source_dataset}).
104
+
105
+ ## Generation Details
106
+
107
+ - **Source Dataset**: [{source_dataset}](https://huggingface.co/datasets/{source_dataset})
108
+ - **Messages Column**: `{messages_column}`
109
+ - **Model**: [{model_id}](https://huggingface.co/{model_id})
110
+ - **Number of Examples**: {num_examples:,}
111
+ - **Generation Date**: {generation_time}
112
+
113
+ ### Sampling Parameters
114
+
115
+ - **Temperature**: {sampling_params.temperature}
116
+ - **Top P**: {sampling_params.top_p}
117
+ - **Top K**: {sampling_params.top_k}
118
+ - **Min P**: {sampling_params.min_p}
119
+ - **Max Tokens**: {sampling_params.max_tokens}
120
+ - **Repetition Penalty**: {sampling_params.repetition_penalty}
121
+
122
+ ### Hardware Configuration
123
+
124
+ - **Tensor Parallel Size**: {tensor_parallel_size}
125
+ - **GPU Configuration**: {tensor_parallel_size} GPU(s)
126
+
127
+ ## Dataset Structure
128
+
129
+ The dataset contains all columns from the source dataset plus:
130
+ - `response`: The generated response from the model
131
+
132
+ ## Generation Script
133
+
134
+ Generated using the vLLM inference script from [uv-scripts/vllm](https://huggingface.co/datasets/uv-scripts/vllm).
135
+
136
+ To reproduce this generation:
137
+
138
+ ```bash
139
+ uv run https://huggingface.co/datasets/uv-scripts/vllm/raw/main/generate-responses.py \\
140
+ {source_dataset} \\
141
+ <output-dataset> \\
142
+ --model-id {model_id} \\
143
+ --messages-column {messages_column} \\
144
+ --temperature {sampling_params.temperature} \\
145
+ --top-p {sampling_params.top_p} \\
146
+ --top-k {sampling_params.top_k} \\
147
+ --max-tokens {sampling_params.max_tokens}
148
+ ```
149
+ """
150
+
151
+
152
+ def main(
153
+ src_dataset_hub_id: str,
154
+ output_dataset_hub_id: str,
155
+ model_id: str = "Qwen/Qwen3-30B-A3B-Instruct-2507-FP8",
156
+ messages_column: str = "messages",
157
+ output_column: str = "response",
158
+ temperature: float = 0.7,
159
+ top_p: float = 0.8,
160
+ top_k: int = 20,
161
+ min_p: float = 0.0,
162
+ max_tokens: int = 16384,
163
+ repetition_penalty: float = 1.0,
164
+ gpu_memory_utilization: float = 0.90,
165
+ tensor_parallel_size: Optional[int] = None,
166
+ hf_token: Optional[str] = None,
167
+ ):
168
+ """
169
+ Main generation pipeline.
170
+
171
+ Args:
172
+ src_dataset_hub_id: Input dataset on Hugging Face Hub
173
+ output_dataset_hub_id: Where to save results on Hugging Face Hub
174
+ model_id: Hugging Face model ID for generation
175
+ messages_column: Column name containing chat messages
176
+ output_column: Column name for generated responses
177
+ temperature: Sampling temperature
178
+ top_p: Top-p sampling parameter
179
+ top_k: Top-k sampling parameter
180
+ min_p: Minimum probability threshold
181
+ max_tokens: Maximum tokens to generate
182
+ repetition_penalty: Repetition penalty parameter
183
+ gpu_memory_utilization: GPU memory utilization factor
184
+ tensor_parallel_size: Number of GPUs to use (auto-detect if None)
185
+ hf_token: Hugging Face authentication token
186
+ """
187
+ generation_start_time = datetime.now().isoformat()
188
+
189
+ # GPU check and configuration
190
+ num_gpus = check_gpu_availability()
191
+ if tensor_parallel_size is None:
192
+ tensor_parallel_size = num_gpus
193
+ logger.info(f"Auto-detected {num_gpus} GPU(s), using tensor_parallel_size={tensor_parallel_size}")
194
+ else:
195
+ logger.info(f"Using specified tensor_parallel_size={tensor_parallel_size}")
196
+ if tensor_parallel_size > num_gpus:
197
+ logger.warning(f"Requested {tensor_parallel_size} GPUs but only {num_gpus} available")
198
+
199
+ # Authentication - try multiple methods
200
+ HF_TOKEN = hf_token or os.environ.get("HF_TOKEN") or get_token()
201
+
202
+ if not HF_TOKEN:
203
+ logger.error("No HuggingFace token found. Please provide token via:")
204
+ logger.error(" 1. --hf-token argument")
205
+ logger.error(" 2. HF_TOKEN environment variable")
206
+ logger.error(" 3. Run 'huggingface-cli login' or use login() in Python")
207
+ sys.exit(1)
208
+
209
+ logger.info("HuggingFace token found, authenticating...")
210
+ login(token=HF_TOKEN)
211
+
212
+ # Initialize vLLM
213
+ logger.info(f"Loading model: {model_id}")
214
+ llm = LLM(
215
+ model=model_id,
216
+ tensor_parallel_size=tensor_parallel_size,
217
+ gpu_memory_utilization=gpu_memory_utilization,
218
+ )
219
+
220
+ # Load tokenizer for chat template
221
+ logger.info("Loading tokenizer...")
222
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
223
+
224
+ # Create sampling parameters
225
+ sampling_params = SamplingParams(
226
+ temperature=temperature,
227
+ top_p=top_p,
228
+ top_k=top_k,
229
+ min_p=min_p,
230
+ max_tokens=max_tokens,
231
+ repetition_penalty=repetition_penalty,
232
+ )
233
+
234
+ # Load dataset
235
+ logger.info(f"Loading dataset: {src_dataset_hub_id}")
236
+ dataset = load_dataset(src_dataset_hub_id, split="train")
237
+ total_examples = len(dataset)
238
+ logger.info(f"Dataset loaded with {total_examples:,} examples")
239
+
240
+ # Validate messages column
241
+ if messages_column not in dataset.column_names:
242
+ logger.error(f"Column '{messages_column}' not found. Available columns: {dataset.column_names}")
243
+ sys.exit(1)
244
+
245
+ # Process messages and apply chat template
246
+ logger.info("Applying chat template to messages...")
247
+ prompts = []
248
+ for example in tqdm(dataset, desc="Processing messages"):
249
+ messages = example[messages_column]
250
+ # Apply chat template
251
+ prompt = tokenizer.apply_chat_template(
252
+ messages,
253
+ tokenize=False,
254
+ add_generation_prompt=True
255
+ )
256
+ prompts.append(prompt)
257
+
258
+ # Generate responses - vLLM handles batching internally
259
+ logger.info(f"Starting generation for {len(prompts):,} prompts...")
260
+ logger.info("vLLM will handle batching and scheduling automatically")
261
+
262
+ outputs = llm.generate(prompts, sampling_params)
263
+
264
+ # Extract generated text
265
+ logger.info("Extracting generated responses...")
266
+ responses = []
267
+ for output in outputs:
268
+ response = output.outputs[0].text.strip()
269
+ responses.append(response)
270
+
271
+ # Add responses to dataset
272
+ logger.info("Adding responses to dataset...")
273
+ dataset = dataset.add_column(output_column, responses)
274
+
275
+ # Create dataset card
276
+ logger.info("Creating dataset card...")
277
+ card_content = create_dataset_card(
278
+ source_dataset=src_dataset_hub_id,
279
+ model_id=model_id,
280
+ messages_column=messages_column,
281
+ sampling_params=sampling_params,
282
+ tensor_parallel_size=tensor_parallel_size,
283
+ num_examples=total_examples,
284
+ generation_time=generation_start_time,
285
+ )
286
+
287
+ # Push dataset to hub
288
+ logger.info(f"Pushing dataset to: {output_dataset_hub_id}")
289
+ dataset.push_to_hub(output_dataset_hub_id, token=HF_TOKEN)
290
+
291
+ # Push dataset card
292
+ card = DatasetCard(card_content)
293
+ card.push_to_hub(output_dataset_hub_id, token=HF_TOKEN)
294
+
295
+ logger.info("✅ Generation complete!")
296
+ logger.info(f"Dataset available at: https://huggingface.co/datasets/{output_dataset_hub_id}")
297
+
298
+
299
+ if __name__ == "__main__":
300
+ if len(sys.argv) > 1:
301
+ parser = argparse.ArgumentParser(
302
+ description="Generate responses for dataset prompts using vLLM",
303
+ formatter_class=argparse.RawDescriptionHelpFormatter,
304
+ epilog="""
305
+ Examples:
306
+ # Basic usage with default Qwen model
307
+ uv run generate-responses.py input-dataset output-dataset
308
+
309
+ # With custom model and parameters
310
+ uv run generate-responses.py input-dataset output-dataset \\
311
+ --model-id meta-llama/Llama-3.1-8B-Instruct \\
312
+ --temperature 0.9 \\
313
+ --max-tokens 2048
314
+
315
+ # Force specific GPU configuration
316
+ uv run generate-responses.py input-dataset output-dataset \\
317
+ --tensor-parallel-size 2 \\
318
+ --gpu-memory-utilization 0.95
319
+
320
+ # Using environment variable for token
321
+ HF_TOKEN=hf_xxx uv run generate-responses.py input-dataset output-dataset
322
+ """
323
+ )
324
+
325
+ parser.add_argument(
326
+ "src_dataset_hub_id",
327
+ help="Input dataset on Hugging Face Hub (e.g., username/dataset-name)"
328
+ )
329
+ parser.add_argument(
330
+ "output_dataset_hub_id",
331
+ help="Output dataset name on Hugging Face Hub"
332
+ )
333
+ parser.add_argument(
334
+ "--model-id",
335
+ type=str,
336
+ default="Qwen/Qwen3-30B-A3B-Instruct-2507-FP8",
337
+ help="Model to use for generation (default: Qwen3-30B-A3B-Instruct-2507-FP8)"
338
+ )
339
+ parser.add_argument(
340
+ "--messages-column",
341
+ type=str,
342
+ default="messages",
343
+ help="Column containing chat messages (default: messages)"
344
+ )
345
+ parser.add_argument(
346
+ "--output-column",
347
+ type=str,
348
+ default="response",
349
+ help="Column name for generated responses (default: response)"
350
+ )
351
+ parser.add_argument(
352
+ "--temperature",
353
+ type=float,
354
+ default=0.7,
355
+ help="Sampling temperature (default: 0.7)"
356
+ )
357
+ parser.add_argument(
358
+ "--top-p",
359
+ type=float,
360
+ default=0.8,
361
+ help="Top-p sampling parameter (default: 0.8)"
362
+ )
363
+ parser.add_argument(
364
+ "--top-k",
365
+ type=int,
366
+ default=20,
367
+ help="Top-k sampling parameter (default: 20)"
368
+ )
369
+ parser.add_argument(
370
+ "--min-p",
371
+ type=float,
372
+ default=0.0,
373
+ help="Minimum probability threshold (default: 0.0)"
374
+ )
375
+ parser.add_argument(
376
+ "--max-tokens",
377
+ type=int,
378
+ default=16384,
379
+ help="Maximum tokens to generate (default: 16384)"
380
+ )
381
+ parser.add_argument(
382
+ "--repetition-penalty",
383
+ type=float,
384
+ default=1.0,
385
+ help="Repetition penalty (default: 1.0)"
386
+ )
387
+ parser.add_argument(
388
+ "--gpu-memory-utilization",
389
+ type=float,
390
+ default=0.90,
391
+ help="GPU memory utilization factor (default: 0.90)"
392
+ )
393
+ parser.add_argument(
394
+ "--tensor-parallel-size",
395
+ type=int,
396
+ help="Number of GPUs to use (default: auto-detect)"
397
+ )
398
+ parser.add_argument(
399
+ "--hf-token",
400
+ type=str,
401
+ help="Hugging Face token (can also use HF_TOKEN env var)"
402
+ )
403
+
404
+ args = parser.parse_args()
405
+
406
+ main(
407
+ src_dataset_hub_id=args.src_dataset_hub_id,
408
+ output_dataset_hub_id=args.output_dataset_hub_id,
409
+ model_id=args.model_id,
410
+ messages_column=args.messages_column,
411
+ output_column=args.output_column,
412
+ temperature=args.temperature,
413
+ top_p=args.top_p,
414
+ top_k=args.top_k,
415
+ min_p=args.min_p,
416
+ max_tokens=args.max_tokens,
417
+ repetition_penalty=args.repetition_penalty,
418
+ gpu_memory_utilization=args.gpu_memory_utilization,
419
+ tensor_parallel_size=args.tensor_parallel_size,
420
+ hf_token=args.hf_token,
421
+ )
422
+ else:
423
+ # Show HF Jobs example when run without arguments
424
+ print("""
425
+ vLLM Response Generation Script
426
+ ==============================
427
+
428
+ This script requires arguments. For usage information:
429
+ uv run generate-responses.py --help
430
+
431
+ Example HF Jobs command with multi-GPU:
432
+ # If you're logged in with huggingface-cli, token will be auto-detected
433
+ hf jobs uv run \\
434
+ --flavor l4x4 \\
435
+ https://huggingface.co/datasets/uv-scripts/vllm/raw/main/generate-responses.py \\
436
+ username/input-dataset \\
437
+ username/output-dataset \\
438
+ --messages-column messages \\
439
+ --model-id Qwen/Qwen3-30B-A3B-Instruct-2507-FP8 \\
440
+ --temperature 0.7 \\
441
+ --max-tokens 16384
442
+ """)