File size: 3,261 Bytes
896437a
 
 
1c6ea49
896437a
 
 
 
1c6ea49
 
1369068
896437a
 
 
1369068
896437a
 
 
 
 
 
 
 
1c6ea49
 
 
 
 
 
 
 
 
 
 
 
1369068
 
1c6ea49
 
1369068
1c6ea49
 
6e7f058
 
 
 
 
 
 
 
1369068
6e7f058
 
1369068
6e7f058
 
 
 
 
 
 
 
 
 
 
 
 
1369068
 
6e7f058
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1369068
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
import PIL
import torch
import gradio as gr
import os
from process import load_seg_model, get_palette, generate_mask

device = 'cpu'

def read_content(file_path: str) -> str:
    with open(file_path, 'r', encoding='utf-8') as f:
        return f.read()

def initialize_and_load_models():
    checkpoint_path = 'model/cloth_segm.pth'
    return load_seg_model(checkpoint_path, device=device)

net = initialize_and_load_models()
palette = get_palette(4)

def run(img):
    cloth_seg = generate_mask(img, net=net, palette=palette, device=device)
    return cloth_seg

css = '''
.container {max-width: 1150px;margin: auto;padding-top: 1.5rem}
#image_upload{min-height:400px}
#image_upload [data-testid="image"], #image_upload [data-testid="image"] > div{min-height: 400px}
.footer {margin-bottom: 45px;margin-top: 35px;text-align: center;border-bottom: 1px solid #e5e5e5}
.footer>p {font-size: .8rem; display: inline-block; padding: 0 10px;transform: translateY(10px);background: white}
.dark .footer {border-color: #303030}
.dark .footer>p {background: #0b0f19}
.acknowledgments h4{margin: 1.25em 0 .25em 0;font-weight: bold;font-size: 115%}
#image_upload .touch-none{display: flex}
'''

image_dir = 'input'
image_list = [os.path.join(image_dir, file) for file in os.listdir(image_dir)]
image_list.sort()

with gr.Blocks(css=css) as demo:
    gr.HTML(read_content("header.html"))

    input_method = gr.Radio(
        choices=["Upload Image", "Use Webcam"],
        label="Choose Input Method",
        value="Upload Image"
    )

    upload_image = gr.Image(label="Upload Image", type="pil", visible=True)
    webcam_input = gr.Camera(label="Webcam Image", visible=False)

    image_out = gr.Image(label="Output", elem_id="output-img")
    btn = gr.Button("Run!")

    # Examples only apply to uploaded images
    gr.Examples(
        examples=image_list,
        inputs=[upload_image],
        label="Examples - Input Images",
        examples_per_page=12
    )

    # Logic to toggle input components based on radio selection
    def toggle_inputs(choice):
        return (
            gr.update(visible=choice == "Upload Image"),
            gr.update(visible=choice == "Use Webcam")
        )

    input_method.change(
        fn=toggle_inputs,
        inputs=input_method,
        outputs=[upload_image, webcam_input]
    )

    # Button logic: use the active image component
    def conditional_run(uploaded, webcam, method):
        return run(uploaded if method == "Upload Image" else webcam)

    btn.click(
        fn=conditional_run,
        inputs=[upload_image, webcam_input, input_method],
        outputs=[image_out]
    )

    gr.HTML(
        """
        <div class="footer">
            <p>Model by <a href="" style="text-decoration: underline;" target="_blank">WildOctopus</a> - Gradio Demo by 🤗 Hugging Face</p>
        </div>
        <div class="acknowledgments">
            <p><h4>ACKNOWLEDGEMENTS</h4></p>
            <p>U2net model is from original u2net repo. Thanks to <a href="https://github.com/xuebinqin/U-2-Net" target="_blank">Xuebin Qin</a>.</p>
            <p>Codes modified from <a href="https://github.com/levindabhi/cloth-segmentation" target="_blank">levindabhi/cloth-segmentation</a></p>
        </div>
        """
    )