Muhammad Taqi Raza commited on
Commit
ad3dcc2
·
1 Parent(s): 2f615c0
inference/cli_demo_camera_i2v_pcd.py CHANGED
@@ -498,7 +498,6 @@ if __name__ == "__main__":
498
  # "--upscale", str(upscale),
499
  # "--upscale_factor", str(upscale_factor),
500
  # "--refine", str(refine),
501
-
502
 
503
  args = parser.parse_args()
504
  dtype = torch.float16 if args.dtype == "float16" else torch.bfloat16
 
498
  # "--upscale", str(upscale),
499
  # "--upscale_factor", str(upscale_factor),
500
  # "--refine", str(refine),
 
501
 
502
  args = parser.parse_args()
503
  dtype = torch.float16 if args.dtype == "float16" else torch.bfloat16
inference/utils.py CHANGED
@@ -168,7 +168,8 @@ def load_sd_upscale(ckpt, inf_device):
168
  return out
169
 
170
 
171
- def upscale(upscale_model, tensor: torch.Tensor, inf_device, output_device="cpu") -> torch.Tensor:
 
172
  memory_required = module_size(upscale_model.model)
173
  memory_required += (
174
  (512 * 512 * 3) * tensor.element_size() * max(upscale_model.scale, 1.0) * 384.0
@@ -185,14 +186,14 @@ def upscale(upscale_model, tensor: torch.Tensor, inf_device, output_device="cpu"
185
  )
186
 
187
  pbar = ProgressBar(steps, desc="Tiling and Upscaling")
188
-
189
  s = tiled_scale(
190
  samples=tensor.to(torch.float16),
191
  function=lambda a: upscale_model(a),
192
  tile_x=tile,
193
  tile_y=tile,
194
  overlap=overlap,
195
- upscale_amount=upscale_model.scale,
196
  pbar=pbar,
197
  )
198
 
@@ -204,7 +205,7 @@ def upscale_batch_and_concatenate(upscale_model, latents, inf_device, output_dev
204
  upscaled_latents = []
205
  for i in range(latents.size(0)):
206
  latent = latents[i]
207
- upscaled_latent = upscale(upscale_model, latent, inf_device, output_device)
208
  upscaled_latents.append(upscaled_latent)
209
  return torch.stack(upscaled_latents)
210
 
 
168
  return out
169
 
170
 
171
+ def upscale(upscale_model, tensor: torch.Tensor, inf_device, output_device="cpu", upscale_factor) -> torch.Tensor:
172
+
173
  memory_required = module_size(upscale_model.model)
174
  memory_required += (
175
  (512 * 512 * 3) * tensor.element_size() * max(upscale_model.scale, 1.0) * 384.0
 
186
  )
187
 
188
  pbar = ProgressBar(steps, desc="Tiling and Upscaling")
189
+ # upscale_model.scale
190
  s = tiled_scale(
191
  samples=tensor.to(torch.float16),
192
  function=lambda a: upscale_model(a),
193
  tile_x=tile,
194
  tile_y=tile,
195
  overlap=overlap,
196
+ upscale_amount=upscale_factor,
197
  pbar=pbar,
198
  )
199
 
 
205
  upscaled_latents = []
206
  for i in range(latents.size(0)):
207
  latent = latents[i]
208
+ upscaled_latent = upscale(upscale_model, latent, inf_device, output_device, upscale_factor)
209
  upscaled_latents.append(upscaled_latent)
210
  return torch.stack(upscaled_latents)
211