Spaces:
Paused
Paused
| # Copyright 2020-2025 The HuggingFace Team. All rights reserved. | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| from dataclasses import dataclass, field | |
| from typing import Any, Optional | |
| from transformers import TrainingArguments | |
| class OnlineDPOConfig(TrainingArguments): | |
| r""" | |
| Configuration class for the [`OnlineDPOTrainer`]. | |
| This class includes only the parameters that are specific to Online DPO training. For a full list of training | |
| arguments, please refer to the [`~transformers.TrainingArguments`] documentation. Note that default values in this | |
| class may differ from those in [`~transformers.TrainingArguments`]. | |
| Using [`~transformers.HfArgumentParser`] we can turn this class into | |
| [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the | |
| command line. | |
| Parameters: | |
| reward_model_path (`str` or `None`, *optional*, defaults to `None`): | |
| Path to the reward model. Either `judge` or `reward_model_path` must be set, but not both. | |
| judge (`str` or `None`, *optional*, defaults to `None`): | |
| Name of the judge to use. Either `judge` or `reward_model_path` must be set, but not both. | |
| max_new_tokens (`int`, *optional*, defaults to `64`): | |
| Maximum number of tokens to generate per completion. | |
| max_length (`int`, *optional*, defaults to `256`): | |
| Maximum total length of the sequence (prompt + completion) used to compute log probabilities. If the | |
| sequence exceeds this limit, the leftmost tokens will be truncated to preserve as much of the completion as | |
| possible. | |
| temperature (`float`, *optional*, defaults to `0.9`): | |
| Temperature for sampling. The higher the temperature, the more random the completions. | |
| missing_eos_penalty (`float` or `None`, *optional*, defaults to `None`): | |
| Penalty applied to the score when the model fails to generate an EOS token. This is useful to encourage to | |
| generate completions shorter than the maximum length (`max_new_tokens`). The penalty must be a positive | |
| value. | |
| beta (`float` or `list[float]`, *optional*, defaults to `0.1`): | |
| Parameter controlling the deviation from the reference model. Higher β means less deviation from the | |
| reference model. For the IPO loss (`loss_type="ipo"`), β is the regularization parameter denoted by τ in | |
| the [paper](https://huggingface.co/papers/2310.12036). If a list of floats is provided then the β is | |
| selected for each new epoch and the last β is used for the rest of the epochs. | |
| loss_type (`str`, *optional*, defaults to `"sigmoid"`): | |
| Type of loss to use. Possible values are: | |
| - `"sigmoid"`: sigmoid loss from the original [DPO](https://huggingface.co/papers/2305.18290) paper. | |
| - `"ipo"`: IPO loss from the [IPO](https://huggingface.co/papers/2310.12036) paper. | |
| dataset_num_proc (`int` or `None`, *optional*, defaults to `None`): | |
| Number of processes to use for processing the dataset. | |
| disable_dropout (`bool`, *optional*, defaults to `True`): | |
| Whether to disable dropout in the model and reference model. | |
| use_vllm (`bool`, *optional*, defaults to `False`): | |
| Whether to use vLLM for generating completions. Requires vLLM to be installed (`pip install vllm`). | |
| vllm_model_impl (`str`, *optional*, defaults to `"vllm"`): | |
| Model implementation to use for vLLM. Must be one of `"transformers"` or `"vllm"`. `"transformers"`: Use | |
| the `transformers` backend for model implementation. `"vllm"`: Use the `vllm` library for model | |
| implementation. | |
| gpu_memory_utilization (`float`, *optional*, defaults to `0.55`): | |
| The vLLM memory utilization. The default value is 0.55. | |
| ds3_gather_for_generation (`bool`, *optional*, defaults to `True`): | |
| This setting applies to DeepSpeed ZeRO-3. If enabled, the policy model weights are gathered for generation, | |
| improving generation speed. However, disabling this option allows training models that exceed the VRAM | |
| capacity of a single GPU, albeit at the cost of slower generation. | |
| model_init_kwargs (`dict[str, Any]` or `None`, *optional*, defaults to `None`): | |
| Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the model from a | |
| string. | |
| """ | |
| # Parameters whose default values are overridden from TrainingArguments | |
| learning_rate: float = field( | |
| default=5e-7, | |
| metadata={"help": "The initial learning rate for AdamW."}, | |
| ) | |
| logging_steps: float = field( | |
| default=10, | |
| metadata={ | |
| "help": "Log every X updates steps. Should be an integer or a float in range `[0,1)`. If smaller than 1, " | |
| "will be interpreted as ratio of total training steps." | |
| }, | |
| ) | |
| bf16: Optional[bool] = field( | |
| default=None, | |
| metadata={ | |
| "help": "Whether to use bf16 (mixed) precision instead of 32-bit. Requires Ampere or higher NVIDIA " | |
| "architecture or Intel XPU or using CPU (use_cpu) or Ascend NPU. If not set, it defaults to `True` if " | |
| "`fp16` is not set." | |
| }, | |
| ) | |
| reward_model_path: Optional[str] = field( | |
| default=None, | |
| metadata={ | |
| "help": "Path to the reward model. Either `judge` or `reward_model_path` must be set, but not both." | |
| }, | |
| ) | |
| judge: Optional[str] = field( | |
| default=None, | |
| metadata={ | |
| "help": "Name of the judge to use. Either `judge` or `reward_model_path` must be set, but not both." | |
| }, | |
| ) | |
| max_new_tokens: int = field( | |
| default=64, | |
| metadata={"help": "Maximum number of tokens to generate per completion."}, | |
| ) | |
| max_length: int = field( | |
| default=512, | |
| metadata={ | |
| "help": "Maximum total length of the sequence (prompt + completion) used to compute log probabilities. If " | |
| "the sequence exceeds this limit, the leftmost tokens will be truncated to preserve as much of the " | |
| "completion as possible." | |
| }, | |
| ) | |
| temperature: float = field( | |
| default=0.9, | |
| metadata={"help": "Temperature for sampling. The higher the temperature, the more random the completions."}, | |
| ) | |
| missing_eos_penalty: Optional[float] = field( | |
| default=None, | |
| metadata={ | |
| "help": "Penalty applied to the score when the model fails to generate an EOS token. This is useful to " | |
| "encourage to generate completions shorter than the maximum length (`max_new_tokens`). The penalty must be " | |
| "a positive value." | |
| }, | |
| ) | |
| beta: list[float] = field( | |
| default_factory=lambda: [0.1], | |
| metadata={ | |
| "help": "Parameter controlling the deviation from the reference model. Higher β means less deviation from " | |
| "the reference model. For the IPO loss (`loss_type='ipo'`), β is the regularization parameter denoted by " | |
| "τ in the [paper](https://huggingface.co/papers/2310.12036). If a list of floats is provided then the β " | |
| "is selected for each new epoch and the last β is used for the rest of the epochs." | |
| }, | |
| ) | |
| loss_type: str = field( | |
| default="sigmoid", | |
| metadata={ | |
| "help": "Type of loss to use.", | |
| "choices": ["sigmoid", "ipo"], | |
| }, | |
| ) | |
| dataset_num_proc: Optional[int] = field( | |
| default=None, | |
| metadata={"help": "Number of processes to use for processing the dataset."}, | |
| ) | |
| disable_dropout: bool = field( | |
| default=True, | |
| metadata={"help": "Whether to disable dropout in the model."}, | |
| ) | |
| use_vllm: bool = field( | |
| default=False, | |
| metadata={ | |
| "help": "Whether to use vLLM for generating completions. Requires vLLM to be installed " | |
| "(`pip install vllm`)." | |
| }, | |
| ) | |
| vllm_model_impl: str = field( | |
| default="vllm", | |
| metadata={ | |
| "help": "Model implementation to use for vLLM. Must be one of `transformers` or `vllm`. `transformers`: " | |
| "Use the `transformers` backend for model implementation. `vllm`: Use the `vllm` library for " | |
| "model implementation." | |
| }, | |
| ) | |
| gpu_memory_utilization: Optional[float] = field( | |
| default=0.55, | |
| metadata={ | |
| "help": "The vLLM memory utilization. The default value is 0.55.", | |
| }, | |
| ) | |
| ds3_gather_for_generation: bool = field( | |
| default=True, | |
| metadata={ | |
| "help": "This setting applies to DeepSpeed ZeRO-3. If enabled, the policy model weights are gathered for " | |
| "generation, improving generation speed. However, disabling this option allows training models that " | |
| "exceed the VRAM capacity of a single GPU, albeit at the cost of slower generation." | |
| }, | |
| ) | |
| model_init_kwargs: Optional[dict[str, Any]] = field( | |
| default=None, | |
| metadata={ | |
| "help": "Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the model " | |
| "from a string." | |
| }, | |
| ) | |
| def __post_init__(self): | |
| self.bf16 = not (self.fp16) if self.bf16 is None else self.bf16 | |
| super().__post_init__() | |
| if hasattr(self.beta, "__len__") and len(self.beta) == 1: | |
| self.beta = self.beta[0] | |