Barak1 commited on
Commit
32cf151
·
1 Parent(s): bad6b75

fix artifacts from trying to use f16

Browse files
Files changed (2) hide show
  1. src/editor.py +1 -1
  2. src/euler_scheduler.py +1 -169
src/editor.py CHANGED
@@ -44,7 +44,7 @@ class ImageEditorDemo:
44
  img_size = (512,512)
45
  VQAE_SCALE = 8
46
  latents_size = (1, 4, img_size[0] // VQAE_SCALE, img_size[1] // VQAE_SCALE)
47
- noise = [randn_tensor(latents_size, dtype=torch.float32, device=torch.device(device), generator=g_cpu) for i
48
  in range(cfg.num_inversion_steps)]
49
  pipe_inversion.scheduler.set_noise_list(noise)
50
  pipe_inversion.scheduler_inference.set_noise_list(noise)
 
44
  img_size = (512,512)
45
  VQAE_SCALE = 8
46
  latents_size = (1, 4, img_size[0] // VQAE_SCALE, img_size[1] // VQAE_SCALE)
47
+ noise = [randn_tensor(latents_size, dtype=torch.float16, device=torch.device(device), generator=g_cpu) for i
48
  in range(cfg.num_inversion_steps)]
49
  pipe_inversion.scheduler.set_noise_list(noise)
50
  pipe_inversion.scheduler_inference.set_noise_list(noise)
src/euler_scheduler.py CHANGED
@@ -419,172 +419,4 @@ class MyEulerAncestralDiscreteScheduler(EulerAncestralDiscreteScheduler):
419
  sigma = sigma.unsqueeze(-1)
420
 
421
  noisy_samples = original_samples + noise * sigma
422
- return noisy_samples
423
-
424
- # def update_noise_for_friendly_inversion(
425
- # self,
426
- # model_output: torch.FloatTensor,
427
- # timestep: Union[float, torch.FloatTensor],
428
- # z_t: torch.FloatTensor,
429
- # z_tp1: torch.FloatTensor,
430
- # return_dict: bool = True,
431
- # ) -> Union[EulerAncestralDiscreteSchedulerOutput, Tuple]:
432
- # if (
433
- # isinstance(timestep, int)
434
- # or isinstance(timestep, torch.IntTensor)
435
- # or isinstance(timestep, torch.LongTensor)
436
- # ):
437
- # raise ValueError(
438
- # (
439
- # "Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to"
440
- # " `EulerDiscreteScheduler.step()` is not supported. Make sure to pass"
441
- # " one of the `scheduler.timesteps` as a timestep."
442
- # ),
443
- # )
444
-
445
- # if not self.is_scale_input_called:
446
- # logger.warning(
447
- # "The `scale_model_input` function should be called before `step` to ensure correct denoising. "
448
- # "See `StableDiffusionPipeline` for a usage example."
449
- # )
450
-
451
- # self._init_step_index(timestep.view((1)))
452
-
453
- # sigma = self.sigmas[self.step_index]
454
-
455
- # sigma_from = self.sigmas[self.step_index]
456
- # sigma_to = self.sigmas[self.step_index+1]
457
- # # sigma_up = (sigma_to**2 * (sigma_from**2 - sigma_to**2) / sigma_from**2) ** 0.5
458
- # sigma_up = (sigma_to**2 * (sigma_from**2 - sigma_to**2).abs() / sigma_from**2) ** 0.5
459
- # # sigma_down = (sigma_to**2 - sigma_up**2) ** 0.5
460
- # sigma_down = sigma_to**2 / sigma_from
461
-
462
- # # 2. Conv = (sample - pred_original_sample) / sigma
463
- # derivative = model_output
464
-
465
- # dt = sigma_down - sigma
466
- # # dt = sigma_down - sigma_from
467
-
468
- # prev_sample = z_t - derivative * dt
469
-
470
- # if sigma_up > 0:
471
- # self.noise_list[self.step_index] = (prev_sample - z_tp1) / sigma_up
472
-
473
- # prev_sample = prev_sample - self.noise_list[self.step_index] * sigma_up
474
-
475
-
476
- # if not return_dict:
477
- # return (prev_sample,)
478
-
479
- # return EulerAncestralDiscreteSchedulerOutput(
480
- # prev_sample=prev_sample, pred_original_sample=None
481
- # )
482
-
483
-
484
- # def step_friendly_inversion(
485
- # self,
486
- # model_output: torch.FloatTensor,
487
- # timestep: Union[float, torch.FloatTensor],
488
- # sample: torch.FloatTensor,
489
- # generator: Optional[torch.Generator] = None,
490
- # return_dict: bool = True,
491
- # expected_next_sample: torch.FloatTensor = None,
492
- # ) -> Union[EulerAncestralDiscreteSchedulerOutput, Tuple]:
493
- # """
494
- # Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
495
- # process from the learned model outputs (most often the predicted noise).
496
-
497
- # Args:
498
- # model_output (`torch.FloatTensor`):
499
- # The direct output from learned diffusion model.
500
- # timestep (`float`):
501
- # The current discrete timestep in the diffusion chain.
502
- # sample (`torch.FloatTensor`):
503
- # A current instance of a sample created by the diffusion process.
504
- # generator (`torch.Generator`, *optional*):
505
- # A random number generator.
506
- # return_dict (`bool`):
507
- # Whether or not to return a
508
- # [`~schedulers.scheduling_euler_ancestral_discrete.EulerAncestralDiscreteSchedulerOutput`] or tuple.
509
-
510
- # Returns:
511
- # [`~schedulers.scheduling_euler_ancestral_discrete.EulerAncestralDiscreteSchedulerOutput`] or `tuple`:
512
- # If return_dict is `True`,
513
- # [`~schedulers.scheduling_euler_ancestral_discrete.EulerAncestralDiscreteSchedulerOutput`] is returned,
514
- # otherwise a tuple is returned where the first element is the sample tensor.
515
-
516
- # """
517
-
518
- # if (
519
- # isinstance(timestep, int)
520
- # or isinstance(timestep, torch.IntTensor)
521
- # or isinstance(timestep, torch.LongTensor)
522
- # ):
523
- # raise ValueError(
524
- # (
525
- # "Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to"
526
- # " `EulerDiscreteScheduler.step()` is not supported. Make sure to pass"
527
- # " one of the `scheduler.timesteps` as a timestep."
528
- # ),
529
- # )
530
-
531
- # if not self.is_scale_input_called:
532
- # logger.warning(
533
- # "The `scale_model_input` function should be called before `step` to ensure correct denoising. "
534
- # "See `StableDiffusionPipeline` for a usage example."
535
- # )
536
-
537
- # self._init_step_index(timestep.view((1)))
538
-
539
- # sigma = self.sigmas[self.step_index]
540
-
541
- # # Upcast to avoid precision issues when computing prev_sample
542
- sample = sample.to(torch.float32)
543
-
544
- # # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
545
- # if self.config.prediction_type == "epsilon":
546
- # pred_original_sample = sample - sigma * model_output
547
- # elif self.config.prediction_type == "v_prediction":
548
- # # * c_out + input * c_skip
549
- # pred_original_sample = model_output * (-sigma / (sigma**2 + 1) ** 0.5) + (sample / (sigma**2 + 1))
550
- # elif self.config.prediction_type == "sample":
551
- # raise NotImplementedError("prediction_type not implemented yet: sample")
552
- # else:
553
- # raise ValueError(
554
- # f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`"
555
- # )
556
-
557
- # sigma_from = self.sigmas[self.step_index]
558
- # sigma_to = self.sigmas[self.step_index + 1]
559
- # sigma_up = (sigma_to**2 * (sigma_from**2 - sigma_to**2) / sigma_from**2) ** 0.5
560
- # sigma_down = (sigma_to**2 - sigma_up**2) ** 0.5
561
-
562
- # # 2. Convert to an ODE derivative
563
- # # derivative = (sample - pred_original_sample) / sigma
564
- # derivative = model_output
565
-
566
- # dt = sigma_down - sigma
567
-
568
- # prev_sample = sample + derivative * dt
569
-
570
- # device = model_output.device
571
- # # noise = randn_tensor(model_output.shape, dtype=model_output.dtype, device=device, generator=generator)
572
- # # prev_sample = prev_sample + noise * sigma_up
573
-
574
- # if sigma_up > 0:
575
- # self.noise_list[self.step_index] = (expected_next_sample - prev_sample) / sigma_up
576
-
577
- # prev_sample = prev_sample + self.noise_list[self.step_index] * sigma_up
578
-
579
- # # Cast sample back to model compatible dtype
580
- # prev_sample = prev_sample.to(model_output.dtype)
581
-
582
- # # upon completion increase step index by one
583
- # self._step_index += 1
584
-
585
- # if not return_dict:
586
- # return (prev_sample,)
587
-
588
- # return EulerAncestralDiscreteSchedulerOutput(
589
- # prev_sample=prev_sample, pred_original_sample=pred_original_sample
590
- # )
 
419
  sigma = sigma.unsqueeze(-1)
420
 
421
  noisy_samples = original_samples + noise * sigma
422
+ return noisy_samples