File size: 4,277 Bytes
879376e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
import gradio as gr
import numpy as np
from huggingface_hub import hf_hub_url, cached_download
import PIL
import onnx
import onnxruntime

config_file_url = hf_hub_url("Jacopo/ToonClip", filename="model.onnx")
model_file = cached_download(config_file_url)

onnx_model = onnx.load(model_file)
onnx.checker.check_model(onnx_model)

opts = onnxruntime.SessionOptions()
opts.intra_op_num_threads = 16
ort_session = onnxruntime.InferenceSession(model_file, sess_options=opts)

input_name = ort_session.get_inputs()[0].name
output_name = ort_session.get_outputs()[0].name

def normalize(x, mean=(0., 0., 0.), std=(1.0, 1.0, 1.0)):
    # x = (x - mean) / std
    x = np.asarray(x, dtype=np.float32)
    if len(x.shape) == 4:
        for dim in range(3):
            x[:, dim, :, :] = (x[:, dim, :, :] - mean[dim]) / std[dim]
    if len(x.shape) == 3:
        for dim in range(3):
            x[dim, :, :] = (x[dim, :, :] - mean[dim]) / std[dim]

    return x 

def denormalize(x, mean=(0., 0., 0.), std=(1.0, 1.0, 1.0)):
    # x = (x * std) + mean
    x = np.asarray(x, dtype=np.float32)
    if len(x.shape) == 4:
        for dim in range(3):
            x[:, dim, :, :] = (x[:, dim, :, :] * std[dim]) + mean[dim]
    if len(x.shape) == 3:
        for dim in range(3):
            x[dim, :, :] = (x[dim, :, :] * std[dim]) + mean[dim]

    return x 

def nogan(input_img):
    i = np.asarray(input_img)
    i = i.astype("float32")
    i = np.transpose(i, (2, 0, 1))
    i = np.expand_dims(i, 0)
    i = i / 255.0
    i = normalize(i, (0.485, 0.456, 0.406), (0.229, 0.224, 0.225))

    ort_outs = ort_session.run([output_name], {input_name: i})
    output = ort_outs
    output = output[0][0]

    output = denormalize(output, (0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
    output = output * 255.0
    output = output.astype('uint8')
    output = np.transpose(output, (1, 2, 0))
    output_image = PIL.Image.fromarray(output, 'RGB')

    return output_image

title = "ToonClip Comics Hero Demo"
description = """
Gradio demo for ToonClip, a UNet++ network with MobileNet v3 backbone optimized for mobile frameworks and trained with VGG Perceptual Feature Loss trained with PyTorch Lighting.
To use it, simply upload an image with a face or choose an example from the list below.
"""
article = """
<style> 
	.boxes{ 
	    width:50%; 
	    float:left; 
    } 
	#mainDiv{ 
		width:50%; 
        margin:auto; 
	} 
	img{ 
		max-width:100%; 
	}
</style> 
<p style='text-align: center'>The \"ToonClip\" model was trained by <a href='https://twitter.com/JacopoMangia' target='_blank'>Jacopo Mangiavacchi</a> and available at <a href='https://github.com/jacopomangiavacchi/ComicsHeroMobileUNet' target='_blank'>Github Repo ComicsHeroMobileUNet</a></p>
<p style='text-align: center'>The \"Comics Hero dataset\" used to train this model was produced by <a href='https://linktr.ee/Norod78' target='_blank'>Doron Adler</a> and available at <a href='https://github.com/Norod/U-2-Net-StyleTransfer' target='_blank'>Github Repo Comics hero U2Net</a></p>
<p style='text-align: center'>The \"ToonClip\" iOS mobile app using a CoreML version of this model is available on Apple App Store at <a href='https://apps.apple.com/us/app/toonclip/id1536285338' target='_blank'>ToonClip</a></p>
<p style='text-align: center'>samples: </p>
<p>
  <div id='mainDiv'> 
    <div id='divOne' class='boxes'> 
	<img src='https://hf.space/gradioiframe/Jacopo/ComicsHeroMobileUNet/file/Example01.jpeg' alt='Example01'/>
    </div> 
    <div id='divTwo' class='boxes'> 
	<img <img src='https://hf.space/gradioiframe/Jacopo/ComicsHeroMobileUNet/file/Output01.png' alt='Output01'/>
    </div> 
    <div id='divOne' class='boxes'> 
	<img src='https://hf.space/gradioiframe/Jacopo/ComicsHeroMobileUNet/file/Example01.jpeg' alt='Example01'/>
    </div> 
    <div id='divTwo' class='boxes'> 
	<img <img src='https://hf.space/gradioiframe/Jacopo/ComicsHeroMobileUNet/file/Output01.png' alt='Output01'/> 
    </div> 
  </div>
</p>
"""
examples=[['Example01.jpeg']]

iface = gr.Interface(
    nogan, 
    gr.inputs.Image(type="pil", shape=(1024, 1024)),
    gr.outputs.Image(type="pil"),
    title=title,
    description=description,
    article=article,
    examples=examples,
    enable_queue=True,
    live=True)

iface.launch()