jiuface commited on
Commit
ea8efbb
·
1 Parent(s): d731134
Files changed (1) hide show
  1. app.py +11 -8
app.py CHANGED
@@ -27,15 +27,20 @@ vae_model_id = "madebyollin/sdxl-vae-fp16-fix"
27
  if torch.cuda.is_available():
28
 
29
  # load pipe
30
- controlnet = ControlNetModel.from_pretrained(controlnet_model_id, variant="fp16", use_safetensors=True, torch_dtype=torch.float16)
31
- vae = AutoencoderKL.from_pretrained(vae_model_id, torch_dtype=torch.float16)
 
 
 
 
 
32
  pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
33
  base_model_id,
34
  controlnet=controlnet,
35
- vae=vae,
36
- variant="fp16",
37
  use_safetensors=True,
38
- torch_dtype=torch.float16,
39
  )
40
  pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
41
  pipe.to(device)
@@ -62,7 +67,6 @@ def get_depth_map(image):
62
  image = feature_extractor(images=image, return_tensors="pt").pixel_values.to("cuda")
63
  with torch.no_grad(), torch.autocast("cuda"):
64
  depth_map = depth_estimator(image).predicted_depth
65
- print("get_depth_map", original_size)
66
  depth_map = torch.nn.functional.interpolate(
67
  depth_map.unsqueeze(1),
68
  size=original_size,
@@ -93,7 +97,6 @@ def process(image, image_url, prompt, n_prompt, num_steps, guidance_scale, contr
93
  generator = torch.Generator().manual_seed(seed)
94
  generated_image = pipe(
95
  prompt=prompt,
96
- image=orginal_image,
97
  negative_prompt=n_prompt,
98
  width=size[0],
99
  height=size[1],
@@ -101,7 +104,7 @@ def process(image, image_url, prompt, n_prompt, num_steps, guidance_scale, contr
101
  num_inference_steps=num_steps,
102
  strength=control_strength,
103
  generator=generator,
104
- control_image=depth_image
105
  ).images[0]
106
  return [[depth_image, generated_image], "ok"]
107
 
 
27
  if torch.cuda.is_available():
28
 
29
  # load pipe
30
+ controlnet = ControlNetModel.from_pretrained(
31
+ controlnet_model_id,
32
+ # variant="fp16",
33
+ use_safetensors=True,
34
+ torch_dtype=torch.float32
35
+ )
36
+ # vae = AutoencoderKL.from_pretrained(vae_model_id, torch_dtype=torch.float16)
37
  pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
38
  base_model_id,
39
  controlnet=controlnet,
40
+ # vae=vae,
41
+ # variant="fp16",
42
  use_safetensors=True,
43
+ torch_dtype=torch.float32,
44
  )
45
  pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
46
  pipe.to(device)
 
67
  image = feature_extractor(images=image, return_tensors="pt").pixel_values.to("cuda")
68
  with torch.no_grad(), torch.autocast("cuda"):
69
  depth_map = depth_estimator(image).predicted_depth
 
70
  depth_map = torch.nn.functional.interpolate(
71
  depth_map.unsqueeze(1),
72
  size=original_size,
 
97
  generator = torch.Generator().manual_seed(seed)
98
  generated_image = pipe(
99
  prompt=prompt,
 
100
  negative_prompt=n_prompt,
101
  width=size[0],
102
  height=size[1],
 
104
  num_inference_steps=num_steps,
105
  strength=control_strength,
106
  generator=generator,
107
+ image=depth_image
108
  ).images[0]
109
  return [[depth_image, generated_image], "ok"]
110