Update app.py
Browse files
app.py
CHANGED
@@ -105,7 +105,7 @@ class ModelWrapper:
|
|
105 |
eval_images = get_x0_from_noise(noise, eval_images, alphas_cumprod, current_timesteps).to(self.DTYPE)
|
106 |
print(eval_images.dtype)
|
107 |
next_timestep = current_timesteps - step_interval
|
108 |
-
noise = self.scheduler.add_noise(eval_images, torch.randn_like(eval_images), next_timestep).to(
|
109 |
print(noise.dtype)
|
110 |
if fast_vae_decode:
|
111 |
eval_images = self.tiny_vae.decode(eval_images.to(self.tiny_vae_dtype) / self.tiny_vae.config.scaling_factor, return_dict=False)[0]
|
@@ -210,6 +210,8 @@ def create_demo():
|
|
210 |
conditioning_timestep = 999
|
211 |
num_step = 4
|
212 |
revision = None
|
|
|
|
|
213 |
|
214 |
accelerator = Accelerator()
|
215 |
|
|
|
105 |
eval_images = get_x0_from_noise(noise, eval_images, alphas_cumprod, current_timesteps).to(self.DTYPE)
|
106 |
print(eval_images.dtype)
|
107 |
next_timestep = current_timesteps - step_interval
|
108 |
+
noise = self.scheduler.add_noise(eval_images, torch.randn_like(eval_images), next_timestep).to(DTYPE)
|
109 |
print(noise.dtype)
|
110 |
if fast_vae_decode:
|
111 |
eval_images = self.tiny_vae.decode(eval_images.to(self.tiny_vae_dtype) / self.tiny_vae.config.scaling_factor, return_dict=False)[0]
|
|
|
210 |
conditioning_timestep = 999
|
211 |
num_step = 4
|
212 |
revision = None
|
213 |
+
torch.backends.cuda.matmul.allow_tf32 = True
|
214 |
+
torch.backends.cudnn.allow_tf32 = True
|
215 |
|
216 |
accelerator = Accelerator()
|
217 |
|