Spaces:
Running
on
Zero
Running
on
Zero
kxhit
commited on
Commit
·
bec6532
1
Parent(s):
8309413
device GPU
Browse files- app.py +3 -1
- gradio_demo/gradio_demo.py +4 -2
app.py
CHANGED
@@ -110,7 +110,7 @@ pipeline.set_progress_bar_config(disable=False)
|
|
110 |
pipeline.enable_xformers_memory_efficient_attention()
|
111 |
# enable vae slicing
|
112 |
pipeline.enable_vae_slicing()
|
113 |
-
pipeline = pipeline.to(device)
|
114 |
|
115 |
|
116 |
|
@@ -180,6 +180,8 @@ def run_eschernet(tmpdirname, eschernet_input_dict, sample_steps, sample_seed, n
|
|
180 |
assert T_out == pose_out.shape[1]
|
181 |
|
182 |
# run inference
|
|
|
|
|
183 |
if CaPE_TYPE == "6DoF":
|
184 |
with torch.autocast("cuda"):
|
185 |
image = pipeline(input_imgs=input_image, prompt_imgs=input_image,
|
|
|
110 |
pipeline.enable_xformers_memory_efficient_attention()
|
111 |
# enable vae slicing
|
112 |
pipeline.enable_vae_slicing()
|
113 |
+
# pipeline = pipeline.to(device)
|
114 |
|
115 |
|
116 |
|
|
|
180 |
assert T_out == pose_out.shape[1]
|
181 |
|
182 |
# run inference
|
183 |
+
pipeline.to(device)
|
184 |
+
pipeline.enable_xformers_memory_efficient_attention()
|
185 |
if CaPE_TYPE == "6DoF":
|
186 |
with torch.autocast("cuda"):
|
187 |
image = pipeline(input_imgs=input_image, prompt_imgs=input_image,
|
gradio_demo/gradio_demo.py
CHANGED
@@ -106,10 +106,10 @@ pipeline = Zero1to3StableDiffusionPipeline.from_pretrained(
|
|
106 |
pipeline.image_encoder = image_encoder.to(weight_dtype)
|
107 |
pipeline.set_progress_bar_config(disable=False)
|
108 |
|
109 |
-
pipeline.enable_xformers_memory_efficient_attention()
|
110 |
# enable vae slicing
|
111 |
pipeline.enable_vae_slicing()
|
112 |
-
pipeline = pipeline.to(device)
|
113 |
|
114 |
|
115 |
|
@@ -178,6 +178,8 @@ def run_eschernet(tmpdirname, eschernet_input_dict, sample_steps, sample_seed, n
|
|
178 |
assert T_out == pose_out.shape[1]
|
179 |
|
180 |
# run inference
|
|
|
|
|
181 |
if CaPE_TYPE == "6DoF":
|
182 |
with torch.autocast("cuda"):
|
183 |
image = pipeline(input_imgs=input_image, prompt_imgs=input_image,
|
|
|
106 |
pipeline.image_encoder = image_encoder.to(weight_dtype)
|
107 |
pipeline.set_progress_bar_config(disable=False)
|
108 |
|
109 |
+
# pipeline.enable_xformers_memory_efficient_attention()
|
110 |
# enable vae slicing
|
111 |
pipeline.enable_vae_slicing()
|
112 |
+
# pipeline = pipeline.to(device)
|
113 |
|
114 |
|
115 |
|
|
|
178 |
assert T_out == pose_out.shape[1]
|
179 |
|
180 |
# run inference
|
181 |
+
pipeline.to(device)
|
182 |
+
pipeline.enable_xformers_memory_efficient_attention()
|
183 |
if CaPE_TYPE == "6DoF":
|
184 |
with torch.autocast("cuda"):
|
185 |
image = pipeline(input_imgs=input_image, prompt_imgs=input_image,
|