nick_93 commited on
Commit
bff5c84
·
1 Parent(s): ac6bf8a
Files changed (1) hide show
  1. app.py +1 -14
app.py CHANGED
@@ -126,7 +126,7 @@ def get_depth_image(
126
  feature_extractor: AutoImageProcessor,
127
  depth_estimator: AutoModelForDepthEstimation
128
  ) -> Image:
129
- image_to_depth = feature_extractor(images=image, return_tensors="pt").to(device)
130
  with torch.no_grad():
131
  depth_map = depth_estimator(**image_to_depth).predicted_depth
132
 
@@ -180,7 +180,6 @@ class ControlNetDepthDesignModelMulti:
180
  """ Produces random noise images """
181
  def __init__(self):
182
  """ Initialize your model(s) here """
183
- device = torch.device("cuda")
184
  #os.environ['HF_HUB_OFFLINE'] = "True"
185
  controlnet_depth= ControlNetModel.from_pretrained(
186
  "controlnet_depth", torch_dtype=dtype, use_safetensors=True)
@@ -199,9 +198,7 @@ class ControlNetDepthDesignModelMulti:
199
  weight_name="ip-adapter_sd15.bin")
200
  self.pipe.set_ip_adapter_scale(0.4)
201
  self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
202
- print(self.pipe.device)
203
  self.pipe = self.pipe.to(device)
204
- print(self.pipe.device)
205
  self.guide_pipe = StableDiffusionXLPipeline.from_pretrained("segmind/SSD-1B",
206
  torch_dtype=dtype, use_safetensors=True, variant="fp16")
207
  self.guide_pipe = self.guide_pipe.to(device)
@@ -215,16 +212,6 @@ class ControlNetDepthDesignModelMulti:
215
  self.depth_feature_extractor, self.depth_estimator = get_depth_pipeline()
216
  self.depth_estimator = self.depth_estimator.to(device)
217
 
218
- if torch.cuda.is_available():
219
- # Print the number of available GPUs
220
- print("Available GPU devices:")
221
- for i in range(torch.cuda.device_count()):
222
- print(f"Device {i}: {torch.cuda.get_device_name(i)}")
223
- else:
224
- print("No GPU devices available. Using CPU.")
225
-
226
- print(self.depth_estimator.device)
227
- print(self.pipe.device)
228
 
229
  @spaces.GPU
230
  def generate_design(self, empty_room_image: Image, prompt: str, guidance_scale: int = 10, num_steps: int = 50, strength: float =0.9, img_size: int = 640) -> Image:
 
126
  feature_extractor: AutoImageProcessor,
127
  depth_estimator: AutoModelForDepthEstimation
128
  ) -> Image:
129
+ image_to_depth = feature_extractor(images=image, return_tensors="pt")#.to(device)
130
  with torch.no_grad():
131
  depth_map = depth_estimator(**image_to_depth).predicted_depth
132
 
 
180
  """ Produces random noise images """
181
  def __init__(self):
182
  """ Initialize your model(s) here """
 
183
  #os.environ['HF_HUB_OFFLINE'] = "True"
184
  controlnet_depth= ControlNetModel.from_pretrained(
185
  "controlnet_depth", torch_dtype=dtype, use_safetensors=True)
 
198
  weight_name="ip-adapter_sd15.bin")
199
  self.pipe.set_ip_adapter_scale(0.4)
200
  self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
 
201
  self.pipe = self.pipe.to(device)
 
202
  self.guide_pipe = StableDiffusionXLPipeline.from_pretrained("segmind/SSD-1B",
203
  torch_dtype=dtype, use_safetensors=True, variant="fp16")
204
  self.guide_pipe = self.guide_pipe.to(device)
 
212
  self.depth_feature_extractor, self.depth_estimator = get_depth_pipeline()
213
  self.depth_estimator = self.depth_estimator.to(device)
214
 
 
 
 
 
 
 
 
 
 
 
215
 
216
  @spaces.GPU
217
  def generate_design(self, empty_room_image: Image, prompt: str, guidance_scale: int = 10, num_steps: int = 50, strength: float =0.9, img_size: int = 640) -> Image: