ML-Motivators commited on
Commit
6b77e73
·
verified ·
1 Parent(s): 107b85f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -10
app.py CHANGED
@@ -1,7 +1,9 @@
 
 
1
  import gradio as gr
2
-
3
-
4
  from PIL import Image
 
5
  from src.tryon_pipeline import StableDiffusionXLInpaintPipeline as TryonPipeline
6
  from src.unet_hacked_garmnet import UNet2DConditionModel as UNet2DConditionModel_ref
7
  from src.unet_hacked_tryon import UNet2DConditionModel
@@ -10,24 +12,20 @@ from transformers import (
10
  CLIPVisionModelWithProjection,
11
  CLIPTextModel,
12
  CLIPTextModelWithProjection,
 
13
  )
14
- from diffusers import DDPMScheduler,AutoencoderKL
15
  from typing import List
16
-
17
- import torch
18
- import os
19
- from transformers import AutoTokenizer
20
- import spaces
21
- import numpy as np
22
  from utils_mask import get_mask_location
23
  from torchvision import transforms
24
  import apply_net
25
  from preprocess.humanparsing.run_parsing import Parsing
26
  from preprocess.openpose.run_openpose import OpenPose
27
- from detectron2.data.detection_utils import convert_PIL_to_numpy,_apply_exif_orientation
28
  from torchvision.transforms.functional import to_pil_image
29
 
30
 
 
31
  def pil_to_binary_mask(pil_image, threshold=0):
32
  np_image = np.array(pil_image)
33
  grayscale_image = Image.fromarray(np_image).convert("L")
 
1
+ import os
2
+ import torch
3
  import gradio as gr
4
+ import numpy as np
 
5
  from PIL import Image
6
+ import spaces # Move this to the top to avoid CUDA initialization errors
7
  from src.tryon_pipeline import StableDiffusionXLInpaintPipeline as TryonPipeline
8
  from src.unet_hacked_garmnet import UNet2DConditionModel as UNet2DConditionModel_ref
9
  from src.unet_hacked_tryon import UNet2DConditionModel
 
12
  CLIPVisionModelWithProjection,
13
  CLIPTextModel,
14
  CLIPTextModelWithProjection,
15
+ AutoTokenizer
16
  )
17
+ from diffusers import DDPMScheduler, AutoencoderKL
18
  from typing import List
 
 
 
 
 
 
19
  from utils_mask import get_mask_location
20
  from torchvision import transforms
21
  import apply_net
22
  from preprocess.humanparsing.run_parsing import Parsing
23
  from preprocess.openpose.run_openpose import OpenPose
24
+ from detectron2.data.detection_utils import convert_PIL_to_numpy, _apply_exif_orientation
25
  from torchvision.transforms.functional import to_pil_image
26
 
27
 
28
+
29
  def pil_to_binary_mask(pil_image, threshold=0):
30
  np_image = np.array(pil_image)
31
  grayscale_image = Image.fromarray(np_image).convert("L")