File size: 4,152 Bytes
cd9ed02
3996268
f268def
3996268
 
 
 
cd9ed02
fb6c2da
 
aa0bbd7
fb6c2da
 
 
 
 
 
aa0bbd7
fb6c2da
fc5fab1
fb6c2da
 
 
 
a056b0b
fb6c2da
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2aa90ca
fb6c2da
2aa90ca
 
 
 
 
fb6c2da
 
 
 
af3233a
2aa90ca
af3233a
 
 
 
fb6c2da
 
2aa90ca
 
fb6c2da
 
aa0bbd7
 
fb6c2da
 
 
 
2254a67
 
2aa90ca
7dc99b5
2aa90ca
 
 
 
8a4a8d3
 
2aa90ca
 
3996268
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
import os
# os.system('pip3 install openmim')
os.system('mim install mmcv-full==1.7.0')
# os.system('pip3 install mmpose')
# os.system('pip3 install mmdet')
# os.system('pip3 install gradio==3.19.1')
#os.system('pip3 install psutil')

from demo.model import Model_all
import gradio as gr
from demo.demos import create_demo_keypose, create_demo_sketch, create_demo_draw, create_demo_seg, create_demo_depth, create_demo_depth_keypose, create_demo_color, create_demo_color_sketch, create_demo_openpose, create_demo_style_sketch, create_demo_canny
import torch
import subprocess
import shlex
from huggingface_hub import hf_hub_url

urls = {
    'TencentARC/T2I-Adapter':['models/t2iadapter_keypose_sd14v1.pth', 'models/t2iadapter_color_sd14v1.pth', 'models/t2iadapter_openpose_sd14v1.pth', 'models/t2iadapter_seg_sd14v1.pth', 'models/t2iadapter_sketch_sd14v1.pth', 'models/t2iadapter_depth_sd14v1.pth','third-party-models/body_pose_model.pth', "models/t2iadapter_style_sd14v1.pth", "models/t2iadapter_canny_sd14v1.pth"],
    'CompVis/stable-diffusion-v-1-4-original':['sd-v1-4.ckpt'],
    'andite/anything-v4.0':['anything-v4.0-pruned.ckpt', 'anything-v4.0.vae.pt'],
}
urls_mmpose = [
    'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth',
    'https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth',
    'https://github.com/kazuto1011/deeplab-pytorch/releases/download/v1.0/deeplabv2_resnet101_msc-cocostuff164k-100000.pth'
]
if os.path.exists('models') == False:
    os.mkdir('models')
for repo in urls:
    files = urls[repo]
    for file in files:
        url = hf_hub_url(repo, file)
        name_ckp = url.split('/')[-1]
        save_path = os.path.join('models',name_ckp)
        if os.path.exists(save_path) == False:
            subprocess.run(shlex.split(f'wget {url} -O {save_path}'))

for url in urls_mmpose:
    name_ckp = url.split('/')[-1]
    save_path = os.path.join('models',name_ckp)
    if os.path.exists(save_path) == False:
        subprocess.run(shlex.split(f'wget {url} -O {save_path}'))

device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = Model_all(device)

DESCRIPTION = '''# T2I-Adapter

Gradio demo for **T2I-Adapter**: [[GitHub]](https://github.com/TencentARC/T2I-Adapter), [[Paper]](https://arxiv.org/abs/2302.08453).

It also supports **multiple adapters** in the follwing tabs showing **"A adapter + B adapter"**.

If T2I-Adapter is helpful, please help to ⭐ the [Github Repo](https://github.com/TencentARC/T2I-Adapter) and recommend it to your friends 😊
'''

with gr.Blocks(css='style.css') as demo:
    gr.Markdown(DESCRIPTION)
    
    gr.HTML("""<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings.
    <br/>
    <a href="https://huggingface.co/spaces/Adapter/T2I-Adapter?duplicate=true">
    <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
    <p/>""")

    with gr.Tabs():
        with gr.TabItem('Openpose'):
            create_demo_openpose(model.process_openpose)
        with gr.TabItem('Keypose'):
            create_demo_keypose(model.process_keypose)
        with gr.TabItem('Canny'):
            create_demo_canny(model.process_canny)
        with gr.TabItem('Sketch'):
            create_demo_sketch(model.process_sketch)
        with gr.TabItem('Draw'):
            create_demo_draw(model.process_draw)
        with gr.TabItem('Depth'):
            create_demo_depth(model.process_depth)
        with gr.TabItem('Depth + Keypose'):
            create_demo_depth_keypose(model.process_depth_keypose)
        with gr.TabItem('Color'):
            create_demo_color(model.process_color)
        with gr.TabItem('Color + Sketch'):
            create_demo_color_sketch(model.process_color_sketch)
        with gr.TabItem('Style + Sketch'):
            create_demo_style_sketch(model.process_style_sketch)
        with gr.TabItem('Segmentation'):
            create_demo_seg(model.process_seg)
demo.queue().launch(debug=True, server_name='0.0.0.0')