Commit
·
fa7d3b9
1
Parent(s):
d034c0d
xet
Browse files- generate-responses.py +6 -5
generate-responses.py
CHANGED
@@ -4,6 +4,7 @@
|
|
4 |
# "datasets",
|
5 |
# "flashinfer-python",
|
6 |
# "huggingface-hub[hf_transfer]",
|
|
|
7 |
# "torch",
|
8 |
# "transformers",
|
9 |
# "vllm>=0.8.5",
|
@@ -120,7 +121,7 @@ This dataset contains generated responses for prompts from [{source_dataset}](ht
|
|
120 |
## Generation Details
|
121 |
|
122 |
- **Source Dataset**: [{source_dataset}](https://huggingface.co/datasets/{source_dataset})
|
123 |
-
- **Input Column**: `{prompt_column if prompt_column else messages_column}` ({
|
124 |
- **Model**: [{model_id}](https://huggingface.co/{model_id})
|
125 |
- **Number of Examples**: {num_examples:,}
|
126 |
- **Generation Date**: {generation_time}{filtering_section}
|
@@ -155,7 +156,7 @@ uv run https://huggingface.co/datasets/uv-scripts/vllm/raw/main/generate-respons
|
|
155 |
{source_dataset} \\
|
156 |
<output-dataset> \\
|
157 |
--model-id {model_id} \\
|
158 |
-
{
|
159 |
--temperature {sampling_params.temperature} \\
|
160 |
--top-p {sampling_params.top_p} \\
|
161 |
--top-k {sampling_params.top_k} \\
|
@@ -266,12 +267,12 @@ def main(
|
|
266 |
# Load dataset
|
267 |
logger.info(f"Loading dataset: {src_dataset_hub_id}")
|
268 |
dataset = load_dataset(src_dataset_hub_id, split="train")
|
269 |
-
|
270 |
# Apply max_samples if specified
|
271 |
if max_samples is not None and max_samples < len(dataset):
|
272 |
logger.info(f"Limiting dataset to {max_samples} samples")
|
273 |
dataset = dataset.select(range(max_samples))
|
274 |
-
|
275 |
total_examples = len(dataset)
|
276 |
logger.info(f"Dataset loaded with {total_examples:,} examples")
|
277 |
|
@@ -326,7 +327,7 @@ def main(
|
|
326 |
prompt = tokenizer.apply_chat_template(
|
327 |
messages, tokenize=False, add_generation_prompt=True
|
328 |
)
|
329 |
-
|
330 |
all_prompts.append(prompt)
|
331 |
|
332 |
# Count tokens if filtering is enabled
|
|
|
4 |
# "datasets",
|
5 |
# "flashinfer-python",
|
6 |
# "huggingface-hub[hf_transfer]",
|
7 |
+
# "hf-xet",
|
8 |
# "torch",
|
9 |
# "transformers",
|
10 |
# "vllm>=0.8.5",
|
|
|
121 |
## Generation Details
|
122 |
|
123 |
- **Source Dataset**: [{source_dataset}](https://huggingface.co/datasets/{source_dataset})
|
124 |
+
- **Input Column**: `{prompt_column if prompt_column else messages_column}` ({"plain text prompts" if prompt_column else "chat messages"})
|
125 |
- **Model**: [{model_id}](https://huggingface.co/{model_id})
|
126 |
- **Number of Examples**: {num_examples:,}
|
127 |
- **Generation Date**: {generation_time}{filtering_section}
|
|
|
156 |
{source_dataset} \\
|
157 |
<output-dataset> \\
|
158 |
--model-id {model_id} \\
|
159 |
+
{"--prompt-column " + prompt_column if prompt_column else "--messages-column " + messages_column} \\
|
160 |
--temperature {sampling_params.temperature} \\
|
161 |
--top-p {sampling_params.top_p} \\
|
162 |
--top-k {sampling_params.top_k} \\
|
|
|
267 |
# Load dataset
|
268 |
logger.info(f"Loading dataset: {src_dataset_hub_id}")
|
269 |
dataset = load_dataset(src_dataset_hub_id, split="train")
|
270 |
+
|
271 |
# Apply max_samples if specified
|
272 |
if max_samples is not None and max_samples < len(dataset):
|
273 |
logger.info(f"Limiting dataset to {max_samples} samples")
|
274 |
dataset = dataset.select(range(max_samples))
|
275 |
+
|
276 |
total_examples = len(dataset)
|
277 |
logger.info(f"Dataset loaded with {total_examples:,} examples")
|
278 |
|
|
|
327 |
prompt = tokenizer.apply_chat_template(
|
328 |
messages, tokenize=False, add_generation_prompt=True
|
329 |
)
|
330 |
+
|
331 |
all_prompts.append(prompt)
|
332 |
|
333 |
# Count tokens if filtering is enabled
|