text
stringlengths 0
5.54k
|
---|
Train the model |
By now, you have most of the pieces to start training the model and all that’s left is putting everything together. |
First, you’ll need an optimizer and a learning rate scheduler: |
Copied |
>>> from diffusers.optimization import get_cosine_schedule_with_warmup |
>>> optimizer = torch.optim.AdamW(model.parameters(), lr=config.learning_rate) |
>>> lr_scheduler = get_cosine_schedule_with_warmup( |
... optimizer=optimizer, |
... num_warmup_steps=config.lr_warmup_steps, |
... num_training_steps=(len(train_dataloader) * config.num_epochs), |
... ) |
Then, you’ll need a way to evaluate the model. For evaluation, you can use the DDPMPipeline to generate a batch of sample images and save it as a grid: |
Copied |
>>> from diffusers import DDPMPipeline |
>>> import math |
>>> import os |
>>> def make_grid(images, rows, cols): |
... w, h = images[0].size |
... grid = Image.new("RGB", size=(cols * w, rows * h)) |
... for i, image in enumerate(images): |
... grid.paste(image, box=(i % cols * w, i // cols * h)) |
... return grid |
>>> def evaluate(config, epoch, pipeline): |
... # Sample some images from random noise (this is the backward diffusion process). |
... # The default pipeline output type is `List[PIL.Image]` |
... images = pipeline( |
... batch_size=config.eval_batch_size, |
... generator=torch.manual_seed(config.seed), |
... ).images |
... # Make a grid out of the images |
... image_grid = make_grid(images, rows=4, cols=4) |
... # Save the images |
... test_dir = os.path.join(config.output_dir, "samples") |
... os.makedirs(test_dir, exist_ok=True) |
... image_grid.save(f"{test_dir}/{epoch:04d}.png") |
Now you can wrap all these components together in a training loop with 🤗 Accelerate for easy TensorBoard logging, gradient accumulation, and mixed precision training. To upload the model to the Hub, write a function to get your repository name and information and then push it to the Hub. |
💡 The training loop below may look intimidating and long, but it’ll be worth it later when you launch your training in just one line of code! If you can’t wait and want to start generating images, feel free to copy and run the code below. You can always come back and examine the training loop more closely later, like when you’re waiting for your model to finish training. 🤗 |
Copied |
>>> from accelerate import Accelerator |
>>> from huggingface_hub import HfFolder, Repository, whoami |
>>> from tqdm.auto import tqdm |
>>> from pathlib import Path |
>>> import os |
>>> def get_full_repo_name(model_id: str, organization: str = None, token: str = None): |
... if token is None: |
... token = HfFolder.get_token() |
... if organization is None: |
... username = whoami(token)["name"] |
... return f"{username}/{model_id}" |
... else: |
... return f"{organization}/{model_id}" |
>>> def train_loop(config, model, noise_scheduler, optimizer, train_dataloader, lr_scheduler): |
... # Initialize accelerator and tensorboard logging |
... accelerator = Accelerator( |
... mixed_precision=config.mixed_precision, |
... gradient_accumulation_steps=config.gradient_accumulation_steps, |
... log_with="tensorboard", |
... logging_dir=os.path.join(config.output_dir, "logs"), |
... ) |
... if accelerator.is_main_process: |
... if config.push_to_hub: |
... repo_name = get_full_repo_name(Path(config.output_dir).name) |
... repo = Repository(config.output_dir, clone_from=repo_name) |
... elif config.output_dir is not None: |
... os.makedirs(config.output_dir, exist_ok=True) |
... accelerator.init_trackers("train_example") |
... # Prepare everything |
... # There is no specific order to remember, you just need to unpack the |
... # objects in the same order you gave them to the prepare method. |
... model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( |
... model, optimizer, train_dataloader, lr_scheduler |
... ) |
... global_step = 0 |
... # Now you train the model |
... for epoch in range(config.num_epochs): |
... progress_bar = tqdm(total=len(train_dataloader), disable=not accelerator.is_local_main_process) |
... progress_bar.set_description(f"Epoch {epoch}") |
... for step, batch in enumerate(train_dataloader): |
Subsets and Splits