Datasets:

ArXiv:
diffusers-benchmarking-bot commited on
Commit
ced7329
·
verified ·
1 Parent(s): c5a1962

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. main/rerender_a_video.py +3 -3
main/rerender_a_video.py CHANGED
@@ -632,7 +632,7 @@ class RerenderAVideoPipeline(StableDiffusionControlNetImg2ImgPipeline):
632
  The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
633
  instead.
634
  frames (`List[np.ndarray]` or `torch.Tensor`): The input images to be used as the starting point for the image generation process.
635
- control_frames (`List[np.ndarray]` or `torch.Tensor`): The ControlNet input images condition to provide guidance to the `unet` for generation.
636
  strength ('float'): SDEdit strength.
637
  num_inference_steps (`int`, *optional*, defaults to 50):
638
  The number of denoising steps. More denoising steps usually lead to a higher quality image at the
@@ -789,7 +789,7 @@ class RerenderAVideoPipeline(StableDiffusionControlNetImg2ImgPipeline):
789
  # Currently we only support single control
790
  if isinstance(controlnet, ControlNetModel):
791
  control_image = self.prepare_control_image(
792
- image=control_frames[0],
793
  width=width,
794
  height=height,
795
  batch_size=batch_size,
@@ -924,7 +924,7 @@ class RerenderAVideoPipeline(StableDiffusionControlNetImg2ImgPipeline):
924
  for idx in range(1, len(frames)):
925
  image = frames[idx]
926
  prev_image = frames[idx - 1]
927
- control_image = control_frames[idx]
928
  # 5.1 prepare frames
929
  image = self.image_processor.preprocess(image).to(dtype=self.dtype)
930
  prev_image = self.image_processor.preprocess(prev_image).to(dtype=self.dtype)
 
632
  The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
633
  instead.
634
  frames (`List[np.ndarray]` or `torch.Tensor`): The input images to be used as the starting point for the image generation process.
635
+ control_frames (`List[np.ndarray]` or `torch.Tensor` or `Callable`): The ControlNet input images condition to provide guidance to the `unet` for generation or any callable object to convert frame to control_frame.
636
  strength ('float'): SDEdit strength.
637
  num_inference_steps (`int`, *optional*, defaults to 50):
638
  The number of denoising steps. More denoising steps usually lead to a higher quality image at the
 
789
  # Currently we only support single control
790
  if isinstance(controlnet, ControlNetModel):
791
  control_image = self.prepare_control_image(
792
+ image=control_frames(frames[0]) if callable(control_frames) else control_frames[0],
793
  width=width,
794
  height=height,
795
  batch_size=batch_size,
 
924
  for idx in range(1, len(frames)):
925
  image = frames[idx]
926
  prev_image = frames[idx - 1]
927
+ control_image = control_frames(image) if callable(control_frames) else control_frames[idx]
928
  # 5.1 prepare frames
929
  image = self.image_processor.preprocess(image).to(dtype=self.dtype)
930
  prev_image = self.image_processor.preprocess(prev_image).to(dtype=self.dtype)