Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -291,12 +291,12 @@ def generate_images_30(prompt, neg_prompt_1, neg_prompt_2, neg_prompt_3, width,
|
|
| 291 |
upscale_2 = upscaler_2(upscale_1, tiling=True, tile_width=256, tile_height=256)
|
| 292 |
print('-- got 4K 16-bit upscaled PIL image --')
|
| 293 |
|
| 294 |
-
downscaled_upscale = upscale_2.resize((
|
| 295 |
|
| 296 |
torch.cuda.empty_cache()
|
| 297 |
|
| 298 |
# 6. Convert the 4K 16-bit PIL back to a float32 tensor
|
| 299 |
-
upscaled_16bit_numpy = np.array(
|
| 300 |
upscaled_srgb_tensor = torch.from_numpy(upscaled_16bit_numpy).permute(2, 0, 1).unsqueeze(0).to(device, dtype=torch.float32) / 65535.0
|
| 301 |
|
| 302 |
# 7. Create 10-bit HDR AVIF bytes from the 4K tensor (for GCS)
|
|
@@ -353,12 +353,12 @@ def generate_images_60(prompt, neg_prompt_1, neg_prompt_2, neg_prompt_3, width,
|
|
| 353 |
upscale_2 = upscaler_2(upscale_1, tiling=True, tile_width=256, tile_height=256)
|
| 354 |
print('-- got 4K 16-bit upscaled PIL image --')
|
| 355 |
|
| 356 |
-
downscaled_upscale = upscale_2.resize((
|
| 357 |
|
| 358 |
torch.cuda.empty_cache()
|
| 359 |
|
| 360 |
# 6. Convert the 4K 16-bit PIL back to a float32 tensor
|
| 361 |
-
upscaled_16bit_numpy = np.array(
|
| 362 |
upscaled_srgb_tensor = torch.from_numpy(upscaled_16bit_numpy).permute(2, 0, 1).unsqueeze(0).to(device, dtype=torch.float32) / 65535.0
|
| 363 |
|
| 364 |
# 7. Create 10-bit HDR AVIF bytes from the 4K tensor (for GCS)
|
|
@@ -408,19 +408,19 @@ def generate_images_110(prompt, neg_prompt_1, neg_prompt_2, neg_prompt_3, width,
|
|
| 408 |
sd_image_pil_16bit = Image.fromarray(srgb_numpy_16bit, mode='RGB')
|
| 409 |
print('-- got 16-bit PIL image for upscaling --')
|
| 410 |
|
| 411 |
-
|
| 412 |
# We feed the high-precision 16-bit PIL image to the upscaler
|
| 413 |
with torch.no_grad():
|
| 414 |
upscale_1 = upscaler_2(sd_image_pil_16bit, tiling=True, tile_width=256, tile_height=256)
|
| 415 |
upscale_2 = upscaler_2(upscale_1, tiling=True, tile_width=256, tile_height=256)
|
| 416 |
print('-- got 4K 16-bit upscaled PIL image --')
|
| 417 |
|
| 418 |
-
downscaled_upscale = upscale_2.resize((
|
| 419 |
|
| 420 |
torch.cuda.empty_cache()
|
| 421 |
|
| 422 |
# 6. Convert the 4K 16-bit PIL back to a float32 tensor
|
| 423 |
-
upscaled_16bit_numpy = np.array(
|
| 424 |
upscaled_srgb_tensor = torch.from_numpy(upscaled_16bit_numpy).permute(2, 0, 1).unsqueeze(0).to(device, dtype=torch.float32) / 65535.0
|
| 425 |
|
| 426 |
# 7. Create 10-bit HDR AVIF bytes from the 4K tensor (for GCS)
|
|
|
|
| 291 |
upscale_2 = upscaler_2(upscale_1, tiling=True, tile_width=256, tile_height=256)
|
| 292 |
print('-- got 4K 16-bit upscaled PIL image --')
|
| 293 |
|
| 294 |
+
#downscaled_upscale = upscale_2.resize((upscale_2.width // 8, upscale_2.height // 8), Image.LANCZOS)
|
| 295 |
|
| 296 |
torch.cuda.empty_cache()
|
| 297 |
|
| 298 |
# 6. Convert the 4K 16-bit PIL back to a float32 tensor
|
| 299 |
+
upscaled_16bit_numpy = np.array(upscale_2)
|
| 300 |
upscaled_srgb_tensor = torch.from_numpy(upscaled_16bit_numpy).permute(2, 0, 1).unsqueeze(0).to(device, dtype=torch.float32) / 65535.0
|
| 301 |
|
| 302 |
# 7. Create 10-bit HDR AVIF bytes from the 4K tensor (for GCS)
|
|
|
|
| 353 |
upscale_2 = upscaler_2(upscale_1, tiling=True, tile_width=256, tile_height=256)
|
| 354 |
print('-- got 4K 16-bit upscaled PIL image --')
|
| 355 |
|
| 356 |
+
#downscaled_upscale = upscale_2.resize((upscale_2.width // 8, upscale_2.height // 8), Image.LANCZOS)
|
| 357 |
|
| 358 |
torch.cuda.empty_cache()
|
| 359 |
|
| 360 |
# 6. Convert the 4K 16-bit PIL back to a float32 tensor
|
| 361 |
+
upscaled_16bit_numpy = np.array(upscale_2)
|
| 362 |
upscaled_srgb_tensor = torch.from_numpy(upscaled_16bit_numpy).permute(2, 0, 1).unsqueeze(0).to(device, dtype=torch.float32) / 65535.0
|
| 363 |
|
| 364 |
# 7. Create 10-bit HDR AVIF bytes from the 4K tensor (for GCS)
|
|
|
|
| 408 |
sd_image_pil_16bit = Image.fromarray(srgb_numpy_16bit, mode='RGB')
|
| 409 |
print('-- got 16-bit PIL image for upscaling --')
|
| 410 |
|
| 411 |
+
# 5. Run the 16-bit upscaling (4x)
|
| 412 |
# We feed the high-precision 16-bit PIL image to the upscaler
|
| 413 |
with torch.no_grad():
|
| 414 |
upscale_1 = upscaler_2(sd_image_pil_16bit, tiling=True, tile_width=256, tile_height=256)
|
| 415 |
upscale_2 = upscaler_2(upscale_1, tiling=True, tile_width=256, tile_height=256)
|
| 416 |
print('-- got 4K 16-bit upscaled PIL image --')
|
| 417 |
|
| 418 |
+
#downscaled_upscale = upscale_2.resize((upscale_2.width // 8, upscale_2.height // 8), Image.LANCZOS)
|
| 419 |
|
| 420 |
torch.cuda.empty_cache()
|
| 421 |
|
| 422 |
# 6. Convert the 4K 16-bit PIL back to a float32 tensor
|
| 423 |
+
upscaled_16bit_numpy = np.array(upscale_2)
|
| 424 |
upscaled_srgb_tensor = torch.from_numpy(upscaled_16bit_numpy).permute(2, 0, 1).unsqueeze(0).to(device, dtype=torch.float32) / 65535.0
|
| 425 |
|
| 426 |
# 7. Create 10-bit HDR AVIF bytes from the 4K tensor (for GCS)
|