davanstrien HF Staff Claude commited on
Commit
d034c0d
Β·
1 Parent(s): 52dc8a2

Add plain text prompt support and sample limiting to generate-responses.py

Browse files

- Add --prompt-column option to accept plain text prompts, automatically converting to chat format
- Add --max-samples option to limit dataset processing for testing and development
- Update README.md with examples showing both chat message and plain text prompt usage
- Enhance dataset card generation to reflect input column type (chat vs plain text)
- Improve validation logic to handle both input modes

πŸ€– Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <[email protected]>

Files changed (2) hide show
  1. README.md +13 -2
  2. generate-responses.py +63 -15
README.md CHANGED
@@ -52,32 +52,43 @@ hf jobs uv run \
52
 
53
  ### generate-responses.py
54
 
55
- Generate responses for chat-formatted prompts using generative LLMs (e.g., Llama, Qwen, Mistral) with vLLM's high-performance inference engine.
56
 
57
  **Features:**
58
 
59
  - πŸ’¬ Automatic chat template application
 
60
  - πŸ”€ Multi-GPU tensor parallelism support
61
  - πŸ“ Smart filtering for prompts exceeding context length
62
  - πŸ“Š Comprehensive dataset cards with generation metadata
63
  - ⚑ HF Transfer enabled for fast model downloads
64
  - πŸŽ›οΈ Full control over sampling parameters
 
65
 
66
  **Usage:**
67
 
68
  ```bash
69
- # Local execution with default Qwen model
70
  uv run generate-responses.py \
71
  username/input-dataset \
72
  username/output-dataset \
73
  --messages-column messages \
74
  --max-tokens 1024
75
 
 
 
 
 
 
 
 
 
76
  # With custom model and parameters
77
  uv run generate-responses.py \
78
  username/input-dataset \
79
  username/output-dataset \
80
  --model-id meta-llama/Llama-3.1-8B-Instruct \
 
81
  --temperature 0.9 \
82
  --top-p 0.95 \
83
  --max-model-len 8192
 
52
 
53
  ### generate-responses.py
54
 
55
+ Generate responses for prompts using generative LLMs (e.g., Llama, Qwen, Mistral) with vLLM's high-performance inference engine.
56
 
57
  **Features:**
58
 
59
  - πŸ’¬ Automatic chat template application
60
+ - πŸ“ Support for both chat messages and plain text prompts
61
  - πŸ”€ Multi-GPU tensor parallelism support
62
  - πŸ“ Smart filtering for prompts exceeding context length
63
  - πŸ“Š Comprehensive dataset cards with generation metadata
64
  - ⚑ HF Transfer enabled for fast model downloads
65
  - πŸŽ›οΈ Full control over sampling parameters
66
+ - 🎯 Sample limiting with `--max-samples` for testing
67
 
68
  **Usage:**
69
 
70
  ```bash
71
+ # With chat-formatted messages (default)
72
  uv run generate-responses.py \
73
  username/input-dataset \
74
  username/output-dataset \
75
  --messages-column messages \
76
  --max-tokens 1024
77
 
78
+ # With plain text prompts (NEW!)
79
+ uv run generate-responses.py \
80
+ username/input-dataset \
81
+ username/output-dataset \
82
+ --prompt-column question \
83
+ --max-tokens 1024 \
84
+ --max-samples 100
85
+
86
  # With custom model and parameters
87
  uv run generate-responses.py \
88
  username/input-dataset \
89
  username/output-dataset \
90
  --model-id meta-llama/Llama-3.1-8B-Instruct \
91
+ --prompt-column text \
92
  --temperature 0.9 \
93
  --top-p 0.95 \
94
  --max-model-len 8192
generate-responses.py CHANGED
@@ -82,6 +82,7 @@ def create_dataset_card(
82
  source_dataset: str,
83
  model_id: str,
84
  messages_column: str,
 
85
  sampling_params: SamplingParams,
86
  tensor_parallel_size: int,
87
  num_examples: int,
@@ -119,7 +120,7 @@ This dataset contains generated responses for prompts from [{source_dataset}](ht
119
  ## Generation Details
120
 
121
  - **Source Dataset**: [{source_dataset}](https://huggingface.co/datasets/{source_dataset})
122
- - **Messages Column**: `{messages_column}`
123
  - **Model**: [{model_id}](https://huggingface.co/{model_id})
124
  - **Number of Examples**: {num_examples:,}
125
  - **Generation Date**: {generation_time}{filtering_section}
@@ -154,7 +155,7 @@ uv run https://huggingface.co/datasets/uv-scripts/vllm/raw/main/generate-respons
154
  {source_dataset} \\
155
  <output-dataset> \\
156
  --model-id {model_id} \\
157
- --messages-column {messages_column} \\
158
  --temperature {sampling_params.temperature} \\
159
  --top-p {sampling_params.top_p} \\
160
  --top-k {sampling_params.top_k} \\
@@ -168,6 +169,7 @@ def main(
168
  output_dataset_hub_id: str,
169
  model_id: str = "Qwen/Qwen3-30B-A3B-Instruct-2507",
170
  messages_column: str = "messages",
 
171
  output_column: str = "response",
172
  temperature: float = 0.7,
173
  top_p: float = 0.8,
@@ -179,6 +181,7 @@ def main(
179
  max_model_len: Optional[int] = None,
180
  tensor_parallel_size: Optional[int] = None,
181
  skip_long_prompts: bool = True,
 
182
  hf_token: Optional[str] = None,
183
  ):
184
  """
@@ -189,6 +192,7 @@ def main(
189
  output_dataset_hub_id: Where to save results on Hugging Face Hub
190
  model_id: Hugging Face model ID for generation
191
  messages_column: Column name containing chat messages
 
192
  output_column: Column name for generated responses
193
  temperature: Sampling temperature
194
  top_p: Top-p sampling parameter
@@ -200,6 +204,7 @@ def main(
200
  max_model_len: Maximum model context length (None uses model default)
201
  tensor_parallel_size: Number of GPUs to use (auto-detect if None)
202
  skip_long_prompts: Skip prompts exceeding max_model_len instead of failing
 
203
  hf_token: Hugging Face authentication token
204
  """
205
  generation_start_time = datetime.now().isoformat()
@@ -261,15 +266,34 @@ def main(
261
  # Load dataset
262
  logger.info(f"Loading dataset: {src_dataset_hub_id}")
263
  dataset = load_dataset(src_dataset_hub_id, split="train")
 
 
 
 
 
 
264
  total_examples = len(dataset)
265
  logger.info(f"Dataset loaded with {total_examples:,} examples")
266
 
267
- # Validate messages column
268
- if messages_column not in dataset.column_names:
269
- logger.error(
270
- f"Column '{messages_column}' not found. Available columns: {dataset.column_names}"
271
- )
272
- sys.exit(1)
 
 
 
 
 
 
 
 
 
 
 
 
 
273
 
274
  # Get effective max length for filtering
275
  if max_model_len is not None:
@@ -280,18 +304,29 @@ def main(
280
  logger.info(f"Using effective max model length: {effective_max_len}")
281
 
282
  # Process messages and apply chat template
283
- logger.info("Applying chat template to messages...")
284
  all_prompts = []
285
  valid_prompts = []
286
  valid_indices = []
287
  skipped_info = []
288
 
289
- for i, example in enumerate(tqdm(dataset, desc="Processing messages")):
290
- messages = example[messages_column]
291
- # Apply chat template
292
- prompt = tokenizer.apply_chat_template(
293
- messages, tokenize=False, add_generation_prompt=True
294
- )
 
 
 
 
 
 
 
 
 
 
 
295
  all_prompts.append(prompt)
296
 
297
  # Count tokens if filtering is enabled
@@ -352,6 +387,7 @@ def main(
352
  source_dataset=src_dataset_hub_id,
353
  model_id=model_id,
354
  messages_column=messages_column,
 
355
  sampling_params=sampling_params,
356
  tensor_parallel_size=tensor_parallel_size,
357
  num_examples=total_examples,
@@ -419,12 +455,22 @@ Examples:
419
  default="messages",
420
  help="Column containing chat messages (default: messages)",
421
  )
 
 
 
 
 
422
  parser.add_argument(
423
  "--output-column",
424
  type=str,
425
  default="response",
426
  help="Column name for generated responses (default: response)",
427
  )
 
 
 
 
 
428
  parser.add_argument(
429
  "--temperature",
430
  type=float,
@@ -502,6 +548,7 @@ Examples:
502
  output_dataset_hub_id=args.output_dataset_hub_id,
503
  model_id=args.model_id,
504
  messages_column=args.messages_column,
 
505
  output_column=args.output_column,
506
  temperature=args.temperature,
507
  top_p=args.top_p,
@@ -513,6 +560,7 @@ Examples:
513
  max_model_len=args.max_model_len,
514
  tensor_parallel_size=args.tensor_parallel_size,
515
  skip_long_prompts=args.skip_long_prompts,
 
516
  hf_token=args.hf_token,
517
  )
518
  else:
 
82
  source_dataset: str,
83
  model_id: str,
84
  messages_column: str,
85
+ prompt_column: Optional[str],
86
  sampling_params: SamplingParams,
87
  tensor_parallel_size: int,
88
  num_examples: int,
 
120
  ## Generation Details
121
 
122
  - **Source Dataset**: [{source_dataset}](https://huggingface.co/datasets/{source_dataset})
123
+ - **Input Column**: `{prompt_column if prompt_column else messages_column}` ({'plain text prompts' if prompt_column else 'chat messages'})
124
  - **Model**: [{model_id}](https://huggingface.co/{model_id})
125
  - **Number of Examples**: {num_examples:,}
126
  - **Generation Date**: {generation_time}{filtering_section}
 
155
  {source_dataset} \\
156
  <output-dataset> \\
157
  --model-id {model_id} \\
158
+ {'--prompt-column ' + prompt_column if prompt_column else '--messages-column ' + messages_column} \\
159
  --temperature {sampling_params.temperature} \\
160
  --top-p {sampling_params.top_p} \\
161
  --top-k {sampling_params.top_k} \\
 
169
  output_dataset_hub_id: str,
170
  model_id: str = "Qwen/Qwen3-30B-A3B-Instruct-2507",
171
  messages_column: str = "messages",
172
+ prompt_column: Optional[str] = None,
173
  output_column: str = "response",
174
  temperature: float = 0.7,
175
  top_p: float = 0.8,
 
181
  max_model_len: Optional[int] = None,
182
  tensor_parallel_size: Optional[int] = None,
183
  skip_long_prompts: bool = True,
184
+ max_samples: Optional[int] = None,
185
  hf_token: Optional[str] = None,
186
  ):
187
  """
 
192
  output_dataset_hub_id: Where to save results on Hugging Face Hub
193
  model_id: Hugging Face model ID for generation
194
  messages_column: Column name containing chat messages
195
+ prompt_column: Column name containing plain text prompts (alternative to messages_column)
196
  output_column: Column name for generated responses
197
  temperature: Sampling temperature
198
  top_p: Top-p sampling parameter
 
204
  max_model_len: Maximum model context length (None uses model default)
205
  tensor_parallel_size: Number of GPUs to use (auto-detect if None)
206
  skip_long_prompts: Skip prompts exceeding max_model_len instead of failing
207
+ max_samples: Maximum number of samples to process (None for all)
208
  hf_token: Hugging Face authentication token
209
  """
210
  generation_start_time = datetime.now().isoformat()
 
266
  # Load dataset
267
  logger.info(f"Loading dataset: {src_dataset_hub_id}")
268
  dataset = load_dataset(src_dataset_hub_id, split="train")
269
+
270
+ # Apply max_samples if specified
271
+ if max_samples is not None and max_samples < len(dataset):
272
+ logger.info(f"Limiting dataset to {max_samples} samples")
273
+ dataset = dataset.select(range(max_samples))
274
+
275
  total_examples = len(dataset)
276
  logger.info(f"Dataset loaded with {total_examples:,} examples")
277
 
278
+ # Determine which column to use and validate
279
+ if prompt_column:
280
+ # Use prompt column mode
281
+ if prompt_column not in dataset.column_names:
282
+ logger.error(
283
+ f"Column '{prompt_column}' not found. Available columns: {dataset.column_names}"
284
+ )
285
+ sys.exit(1)
286
+ logger.info(f"Using prompt column mode with column: '{prompt_column}'")
287
+ use_messages = False
288
+ else:
289
+ # Use messages column mode
290
+ if messages_column not in dataset.column_names:
291
+ logger.error(
292
+ f"Column '{messages_column}' not found. Available columns: {dataset.column_names}"
293
+ )
294
+ sys.exit(1)
295
+ logger.info(f"Using messages column mode with column: '{messages_column}'")
296
+ use_messages = True
297
 
298
  # Get effective max length for filtering
299
  if max_model_len is not None:
 
304
  logger.info(f"Using effective max model length: {effective_max_len}")
305
 
306
  # Process messages and apply chat template
307
+ logger.info("Preparing prompts...")
308
  all_prompts = []
309
  valid_prompts = []
310
  valid_indices = []
311
  skipped_info = []
312
 
313
+ for i, example in enumerate(tqdm(dataset, desc="Processing prompts")):
314
+ if use_messages:
315
+ # Messages mode: use existing chat messages
316
+ messages = example[messages_column]
317
+ # Apply chat template
318
+ prompt = tokenizer.apply_chat_template(
319
+ messages, tokenize=False, add_generation_prompt=True
320
+ )
321
+ else:
322
+ # Prompt mode: convert plain text to messages format
323
+ user_prompt = example[prompt_column]
324
+ messages = [{"role": "user", "content": user_prompt}]
325
+ # Apply chat template
326
+ prompt = tokenizer.apply_chat_template(
327
+ messages, tokenize=False, add_generation_prompt=True
328
+ )
329
+
330
  all_prompts.append(prompt)
331
 
332
  # Count tokens if filtering is enabled
 
387
  source_dataset=src_dataset_hub_id,
388
  model_id=model_id,
389
  messages_column=messages_column,
390
+ prompt_column=prompt_column,
391
  sampling_params=sampling_params,
392
  tensor_parallel_size=tensor_parallel_size,
393
  num_examples=total_examples,
 
455
  default="messages",
456
  help="Column containing chat messages (default: messages)",
457
  )
458
+ parser.add_argument(
459
+ "--prompt-column",
460
+ type=str,
461
+ help="Column containing plain text prompts (alternative to --messages-column)",
462
+ )
463
  parser.add_argument(
464
  "--output-column",
465
  type=str,
466
  default="response",
467
  help="Column name for generated responses (default: response)",
468
  )
469
+ parser.add_argument(
470
+ "--max-samples",
471
+ type=int,
472
+ help="Maximum number of samples to process (default: all)",
473
+ )
474
  parser.add_argument(
475
  "--temperature",
476
  type=float,
 
548
  output_dataset_hub_id=args.output_dataset_hub_id,
549
  model_id=args.model_id,
550
  messages_column=args.messages_column,
551
+ prompt_column=args.prompt_column,
552
  output_column=args.output_column,
553
  temperature=args.temperature,
554
  top_p=args.top_p,
 
560
  max_model_len=args.max_model_len,
561
  tensor_parallel_size=args.tensor_parallel_size,
562
  skip_long_prompts=args.skip_long_prompts,
563
+ max_samples=args.max_samples,
564
  hf_token=args.hf_token,
565
  )
566
  else: