File size: 3,790 Bytes
00e7506
 
8bf5a04
 
 
6cddac6
8bf5a04
00e7506
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4cd25c8
 
 
8bf5a04
 
 
4cd25c8
 
 
 
f5929dc
00e7506
4cd25c8
 
 
 
 
 
 
 
00e7506
4cd25c8
6cddac6
8bf5a04
 
 
 
 
 
 
95bbbe5
 
 
 
 
4cd25c8
 
 
6cddac6
4cd25c8
6cddac6
 
 
 
 
 
 
 
 
 
 
 
 
 
4cd25c8
 
6cddac6
 
 
 
 
 
 
 
 
4cd25c8
cf81290
4cd25c8
 
 
 
6cddac6
 
00e7506
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import torch
import requests
import rembg
import random
import gradio as gr
import numpy

from PIL import Image
from diffusers import DiffusionPipeline, EulerAncestralDiscreteScheduler

# Load the pipeline
pipeline = DiffusionPipeline.from_pretrained(
    "sudo-ai/zero123plus-v1.1", custom_pipeline="sudo-ai/zero123plus-pipeline",
    torch_dtype=torch.float16
)

# Feel free to tune the scheduler!
# `timestep_spacing` parameter is not supported in older versions of `diffusers`
# so there may be performance degradations
# We recommend using `diffusers==0.20.2`
pipeline.scheduler = EulerAncestralDiscreteScheduler.from_config(
    pipeline.scheduler.config, timestep_spacing='trailing'
)
pipeline.to('cuda:0')


def inference(input_img, num_inference_steps, guidance_scale, seed ):
    # Download an example image.
    cond = Image.open(input_img)
    if seed==0:
        seed = random.randint(1, 1000000)
        
    # Run the pipeline!
    #result = pipeline(cond, num_inference_steps=75).images[0]
    result = pipeline(cond, num_inference_steps=num_inference_steps, 
                  guidance_scale=guidance_scale, 
                  generator=torch.Generator(pipeline.device).manual_seed(int(seed))).images[0]

    # for general real and synthetic images of general objects
    # usually it is enough to have around 28 inference steps
    # for images with delicate details like faces (real or anime)
    # you may need 75-100 steps for the details to construct
    
    #result.show()
    #result.save("output.png")
    return result

def remove_background(result):
    print(type(result))
    # Check if the variable is a PIL Image
    if isinstance(result, Image.Image):
        result = rembg.remove(result)
    # Check if the variable is a str filepath
    elif isinstance(result, str):
        result = Image.open(result)
        result = rembg.remove(result)
    elif isinstance(result, numpy.ndarray):
      print('here ELIF 2')
      # Convert the NumPy array to a PIL Image
      result = Image.fromarray(result)
      result = rembg.remove(result)
    return result


# Create a Gradio interface for the Zero123++ model
with gr.Blocks() as demo:
    # Display a title
    gr.HTML("<h1><center> Interactive WebUI : Zero123++ </center></h1>")
    gr.HTML("<h3><center> A Single Image to Consistent Multi-view Diffusion Base Model</center></h1>")
    gr.HTML('''<center> <a href='https://arxiv.org/abs/2310.15110' target='_blank'>ArXiv</a> - <a href='https://github.com/SUDO-AI-3D/zero123plus/tree/main' target='_blank'>Code</a> </center>''')
    with gr.Row():
      # Input section: Allow users to upload an image
      with gr.Column():
          input_img = gr.Image(label='Input Image', type='filepath')
      
      # Output section: Display the Zero123++ output image
      with gr.Column():
          output_img = gr.Image(label='Zero123++ Output')
    
    # Submit button to initiate the inference
    btn = gr.Button('Submit')

    # Advanced options section with accordion for hiding/showing
    with gr.Accordion("Advanced options:", open=False):
        rm_in_bkg = gr.Checkbox(label='Remove Input Background')
        rm_out_bkg = gr.Checkbox(label='Remove Output Background')
        num_inference_steps = gr.Slider(label="Number of Inference Steps", minimum=15, maximum=100, step=1, value=75, interactive=True)
        guidance_scale = gr.Slider(label="Classifier Free Guidance Scale", minimum=1.00, maximum=10.00, step=0.1, value=4.0, interactive=True)
        seed = gr.Number(0, label='Seed')
    

    btn.click(inference, [input_img, num_inference_steps, guidance_scale, seed ], output_img)
    rm_in_bkg.input(remove_background, input_img, input_img)
    rm_out_bkg.input(remove_background, output_img, output_img)
    


demo.launch(debug=True)