wildoctopus commited on
Commit
c396ac7
·
verified ·
1 Parent(s): 6984480

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -80
app.py CHANGED
@@ -6,101 +6,59 @@ from process import load_seg_model, get_palette, generate_mask
6
 
7
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
8
 
9
- def read_content(file_path: str) -> str:
10
- """Read file content with error handling"""
11
- try:
12
- with open(file_path, 'r', encoding='utf-8') as f:
13
- return f.read()
14
- except FileNotFoundError:
15
- print(f"Warning: File {file_path} not found")
16
- return ""
17
- except Exception as e:
18
- print(f"Error reading file {file_path}: {str(e)}")
19
- return ""
20
-
21
- def initialize_and_load_models():
22
- """Initialize and load models with error handling"""
23
- try:
24
- checkpoint_path = 'model/cloth_segm.pth'
25
- if not os.path.exists(checkpoint_path):
26
- raise FileNotFoundError(f"Model checkpoint not found at {checkpoint_path}")
27
- return load_seg_model(checkpoint_path, device=device)
28
- except Exception as e:
29
- print(f"Error loading model: {str(e)}")
30
- return None
31
-
32
- net = initialize_and_load_models()
33
- if net is None:
34
- raise RuntimeError("Failed to load model - check logs for details")
35
-
36
- palette = get_palette(4)
37
 
38
  def run(img):
39
- """Process image with error handling"""
40
  if img is None:
41
- raise gr.Error("No image uploaded")
42
  try:
43
- cloth_seg = generate_mask(img, net=net, palette=palette, device=device)
44
- if cloth_seg is None:
45
- raise gr.Error("Failed to generate mask")
46
- return cloth_seg
47
  except Exception as e:
48
  raise gr.Error(f"Error processing image: {str(e)}")
49
 
50
- # CSS styling
51
- css = '''
52
- .container {max-width: 1150px;margin: auto;padding-top: 1.5rem}
53
- #image_upload{min-height:400px}
54
- #image_upload [data-testid="image"], #image_upload [data-testid="image"] > div{min-height: 400px}
55
- .footer {margin-bottom: 45px;margin-top: 35px;text-align: center;border-bottom: 1px solid #e5e5e5}
56
- .footer>p {font-size: .8rem; display: inline-block; padding: 0 10px;transform: translateY(10px);background: white}
57
- .dark .footer {border-color: #303030}
58
- .dark .footer>p {background: #0b0f19}
59
- .acknowledgments h4{margin: 1.25em 0 .25em 0;font-weight: bold;font-size: 115%}
60
- #image_upload .touch-none{display: flex}
61
- '''
62
-
63
- # Collect example images
64
  image_dir = 'input'
65
- image_list = []
66
  if os.path.exists(image_dir):
67
- image_list = [os.path.join(image_dir, file) for file in os.listdir(image_dir) if file.lower().endswith(('.png', '.jpg', '.jpeg'))]
68
- image_list.sort()
69
- examples = [[img] for img in image_list]
70
-
71
- with gr.Blocks(css=css) as demo:
72
- gr.HTML(read_content("header.html"))
73
 
 
 
74
  with gr.Row():
75
  with gr.Column():
76
- image = gr.Image(elem_id="image_upload", type="pil", label="Input Image")
77
-
78
  with gr.Column():
79
- image_out = gr.Image(label="Output", elem_id="output-img")
80
-
81
  with gr.Row():
82
  gr.Examples(
83
  examples=examples,
84
- inputs=[image],
85
- label="Examples - Input Images",
86
- examples_per_page=12
 
 
87
  )
88
- btn = gr.Button("Run!", variant="primary")
89
-
90
- btn.click(fn=run, inputs=[image], outputs=[image_out])
91
-
92
- gr.HTML(
93
- """
94
- <div class="footer">
95
- <p>Model by <a href="" style="text-decoration: underline;" target="_blank">WildOctopus</a> - Gradio Demo by 🤗 Hugging Face</p>
96
- </div>
97
- <div class="acknowledgments">
98
- <p><h4>ACKNOWLEDGEMENTS</h4></p>
99
- <p>U2net model is from original u2net repo. Thanks to <a href="https://github.com/xuebinqin/U-2-Net" target="_blank">Xuebin Qin</a>.</p>
100
- <p>Codes modified from <a href="https://github.com/levindabhi/cloth-segmentation" target="_blank">levindabhi/cloth-segmentation</a></p>
101
- </div>
102
- """
103
- )
104
 
105
- # For Hugging Face Spaces, use launch() without share=True
106
- demo.launch()
 
 
 
 
 
 
6
 
7
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
8
 
9
+ # Initialize models
10
+ try:
11
+ checkpoint_path = 'model/cloth_segm.pth'
12
+ if not os.path.exists(checkpoint_path):
13
+ raise FileNotFoundError(f"Model checkpoint not found at {checkpoint_path}")
14
+ net = load_seg_model(checkpoint_path, device=device)
15
+ palette = get_palette(4)
16
+ except Exception as e:
17
+ raise RuntimeError(f"Failed to initialize models: {str(e)}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
  def run(img):
 
20
  if img is None:
21
+ raise gr.Error("Please upload an image first")
22
  try:
23
+ return generate_mask(img, net=net, palette=palette, device=device)
 
 
 
24
  except Exception as e:
25
  raise gr.Error(f"Error processing image: {str(e)}")
26
 
27
+ # Handle examples
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  image_dir = 'input'
29
+ examples = []
30
  if os.path.exists(image_dir):
31
+ examples = [
32
+ [os.path.join(image_dir, f)]
33
+ for f in sorted(os.listdir(image_dir))
34
+ if f.lower().endswith(('.png', '.jpg', '.jpeg'))
35
+ ]
 
36
 
37
+ # Create interface
38
+ with gr.Blocks() as demo:
39
  with gr.Row():
40
  with gr.Column():
41
+ input_image = gr.Image(label="Input Image", type="pil")
 
42
  with gr.Column():
43
+ output_image = gr.Image(label="Segmentation Result")
44
+
45
  with gr.Row():
46
  gr.Examples(
47
  examples=examples,
48
+ inputs=[input_image],
49
+ outputs=[output_image],
50
+ fn=run,
51
+ cache_examples=True,
52
+ label="Example Images"
53
  )
54
+
55
+ submit_btn = gr.Button("Segment", variant="primary")
56
+ submit_btn.click(fn=run, inputs=input_image, outputs=output_image)
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
+ # Launch with appropriate settings
59
+ try:
60
+ demo.launch(server_name="0.0.0.0", server_port=7860)
61
+ except Exception as e:
62
+ print(f"Error launching app: {str(e)}")
63
+ # Fallback with sharing enabled
64
+ demo.launch(share=True)