Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -12,9 +12,7 @@ model_random = UNet2DModel(**model.config)
|
|
| 12 |
model_random.save_pretrained("my_model")
|
| 13 |
model_random = UNet2DModel.from_pretrained("my_model")
|
| 14 |
import torch
|
| 15 |
-
|
| 16 |
torch.manual_seed(0)
|
| 17 |
-
|
| 18 |
noisy_sample = torch.randn(
|
| 19 |
1, model.config.in_channels, model.config.sample_size, model.config.sample_size
|
| 20 |
)
|
|
@@ -23,7 +21,6 @@ with torch.no_grad():
|
|
| 23 |
noisy_residual = model(sample=noisy_sample, timestep=2).sample
|
| 24 |
noisy_residual.shape
|
| 25 |
from diffusers import DDPMScheduler
|
| 26 |
-
|
| 27 |
scheduler = DDPMScheduler.from_config(repo_id)
|
| 28 |
scheduler.config
|
| 29 |
scheduler.save_config("my_scheduler")
|
|
@@ -34,48 +31,31 @@ less_noisy_sample = scheduler.step(
|
|
| 34 |
less_noisy_sample.shape
|
| 35 |
import PIL.Image
|
| 36 |
import numpy as np
|
| 37 |
-
|
| 38 |
def display_sample(sample, i):
|
| 39 |
image_processed = sample.cpu().permute(0, 2, 3, 1)
|
| 40 |
image_processed = (image_processed + 1.0) * 127.5
|
| 41 |
image_processed = image_processed.numpy().astype(np.uint8)
|
| 42 |
-
|
| 43 |
image_pil = PIL.Image.fromarray(image_processed[0])
|
| 44 |
display(f"Image at step {i}")
|
| 45 |
display(image_pil)
|
| 46 |
model.to("cuda")
|
| 47 |
noisy_sample = noisy_sample.to("cuda")
|
| 48 |
import tqdm
|
| 49 |
-
|
| 50 |
sample = noisy_sample
|
| 51 |
-
|
| 52 |
for i, t in enumerate(tqdm.tqdm(scheduler.timesteps)):
|
| 53 |
-
# 1. predict noise residual
|
| 54 |
with torch.no_grad():
|
| 55 |
residual = model(sample, t).sample
|
| 56 |
-
|
| 57 |
-
# 2. compute less noisy image and set x_t -> x_t-1
|
| 58 |
sample = scheduler.step(residual, t, sample).prev_sample
|
| 59 |
-
|
| 60 |
-
# 3. optionally look at image
|
| 61 |
if (i + 1) % 50 == 0:
|
| 62 |
display_sample(sample, i + 1)
|
| 63 |
from diffusers import DDIMScheduler
|
| 64 |
-
|
| 65 |
scheduler = DDIMScheduler.from_config(repo_id)
|
| 66 |
scheduler.set_timesteps(num_inference_steps=50)
|
| 67 |
import tqdm
|
| 68 |
-
|
| 69 |
sample = noisy_sample
|
| 70 |
-
|
| 71 |
for i, t in enumerate(tqdm.tqdm(scheduler.timesteps)):
|
| 72 |
-
# 1. predict noise residual
|
| 73 |
with torch.no_grad():
|
| 74 |
residual = model(sample, t).sample
|
| 75 |
-
|
| 76 |
-
# 2. compute previous image and set x_t -> x_t-1
|
| 77 |
sample = scheduler.step(residual, t, sample).prev_sample
|
| 78 |
-
|
| 79 |
-
# 3. optionally look at image
|
| 80 |
if (i + 1) % 10 == 0:
|
| 81 |
display_sample(sample, i + 1)
|
|
|
|
| 12 |
model_random.save_pretrained("my_model")
|
| 13 |
model_random = UNet2DModel.from_pretrained("my_model")
|
| 14 |
import torch
|
|
|
|
| 15 |
torch.manual_seed(0)
|
|
|
|
| 16 |
noisy_sample = torch.randn(
|
| 17 |
1, model.config.in_channels, model.config.sample_size, model.config.sample_size
|
| 18 |
)
|
|
|
|
| 21 |
noisy_residual = model(sample=noisy_sample, timestep=2).sample
|
| 22 |
noisy_residual.shape
|
| 23 |
from diffusers import DDPMScheduler
|
|
|
|
| 24 |
scheduler = DDPMScheduler.from_config(repo_id)
|
| 25 |
scheduler.config
|
| 26 |
scheduler.save_config("my_scheduler")
|
|
|
|
| 31 |
less_noisy_sample.shape
|
| 32 |
import PIL.Image
|
| 33 |
import numpy as np
|
|
|
|
| 34 |
def display_sample(sample, i):
|
| 35 |
image_processed = sample.cpu().permute(0, 2, 3, 1)
|
| 36 |
image_processed = (image_processed + 1.0) * 127.5
|
| 37 |
image_processed = image_processed.numpy().astype(np.uint8)
|
|
|
|
| 38 |
image_pil = PIL.Image.fromarray(image_processed[0])
|
| 39 |
display(f"Image at step {i}")
|
| 40 |
display(image_pil)
|
| 41 |
model.to("cuda")
|
| 42 |
noisy_sample = noisy_sample.to("cuda")
|
| 43 |
import tqdm
|
|
|
|
| 44 |
sample = noisy_sample
|
|
|
|
| 45 |
for i, t in enumerate(tqdm.tqdm(scheduler.timesteps)):
|
|
|
|
| 46 |
with torch.no_grad():
|
| 47 |
residual = model(sample, t).sample
|
|
|
|
|
|
|
| 48 |
sample = scheduler.step(residual, t, sample).prev_sample
|
|
|
|
|
|
|
| 49 |
if (i + 1) % 50 == 0:
|
| 50 |
display_sample(sample, i + 1)
|
| 51 |
from diffusers import DDIMScheduler
|
|
|
|
| 52 |
scheduler = DDIMScheduler.from_config(repo_id)
|
| 53 |
scheduler.set_timesteps(num_inference_steps=50)
|
| 54 |
import tqdm
|
|
|
|
| 55 |
sample = noisy_sample
|
|
|
|
| 56 |
for i, t in enumerate(tqdm.tqdm(scheduler.timesteps)):
|
|
|
|
| 57 |
with torch.no_grad():
|
| 58 |
residual = model(sample, t).sample
|
|
|
|
|
|
|
| 59 |
sample = scheduler.step(residual, t, sample).prev_sample
|
|
|
|
|
|
|
| 60 |
if (i + 1) % 10 == 0:
|
| 61 |
display_sample(sample, i + 1)
|