Muhammad Taqi Raza commited on
Commit
137b829
·
1 Parent(s): ad3dcc2

change output

Browse files
Files changed (1) hide show
  1. inference/utils.py +3 -2
inference/utils.py CHANGED
@@ -168,7 +168,7 @@ def load_sd_upscale(ckpt, inf_device):
168
  return out
169
 
170
 
171
- def upscale(upscale_model, tensor: torch.Tensor, inf_device, output_device="cpu", upscale_factor) -> torch.Tensor:
172
 
173
  memory_required = module_size(upscale_model.model)
174
  memory_required += (
@@ -201,11 +201,12 @@ def upscale(upscale_model, tensor: torch.Tensor, inf_device, output_device="cpu"
201
  return s
202
 
203
 
 
204
  def upscale_batch_and_concatenate(upscale_model, latents, inf_device, output_device="cpu", upscale_factor = 4) -> torch.Tensor:
205
  upscaled_latents = []
206
  for i in range(latents.size(0)):
207
  latent = latents[i]
208
- upscaled_latent = upscale(upscale_model, latent, inf_device, output_device, upscale_factor)
209
  upscaled_latents.append(upscaled_latent)
210
  return torch.stack(upscaled_latents)
211
 
 
168
  return out
169
 
170
 
171
+ def upscale(upscale_model, tensor: torch.Tensor, inf_device, upscale_factor, output_device="cpu") -> torch.Tensor:
172
 
173
  memory_required = module_size(upscale_model.model)
174
  memory_required += (
 
201
  return s
202
 
203
 
204
+
205
  def upscale_batch_and_concatenate(upscale_model, latents, inf_device, output_device="cpu", upscale_factor = 4) -> torch.Tensor:
206
  upscaled_latents = []
207
  for i in range(latents.size(0)):
208
  latent = latents[i]
209
+ upscaled_latent = upscale(upscale_model, latent, inf_device, upscale_factor, output_device)
210
  upscaled_latents.append(upscaled_latent)
211
  return torch.stack(upscaled_latents)
212