File size: 3,372 Bytes
05ca552 506eb50 05ca552 506eb50 05ca552 b297a29 04a826a b297a29 e3e69da 04a826a b297a29 e3e69da b297a29 05ca552 bdca464 e3e69da bdca464 e3e69da bdca464 e3e69da bdca464 50d06b5 bdca464 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 |
import gradio as gr
from PIL import Image
from io import BytesIO
import torch
import os
from diffusers import DiffusionPipeline, DDIMScheduler
MY_SECRET_TOKEN=os.environ.get('HF_TOKEN_SD')
has_cuda = torch.cuda.is_available()
device = torch.device('cpu' if not has_cuda else 'cuda')
pipe = DiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",
safety_checker=None,
use_auth_token=MY_SECRET_TOKEN,
custom_pipeline="imagic_stable_diffusion",
scheduler = DDIMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False)
).to(device)
#generator = torch.Generator("cuda").manual_seed(0)
def infer(prompt, init_image):
res = pipe.train(
prompt,
init_image,
guidance_scale=7.5,
num_inference_steps=50)
res = pipe(alpha=1)
return res.images[0]
title = """
<div style="text-align: center; max-width: 650px; margin: 0 auto;">
<div
style="
display: inline-flex;
align-items: center;
gap: 0.8rem;
font-size: 1.75rem;
"
>
<h1 style="font-weight: 900; margin-bottom: 7px;">
Imagic Stable Diffusion • Community Pipeline
</h1>
</div>
<br /><img src="https://user-images.githubusercontent.com/788417/196388568-4ee45edd-e990-452c-899f-c25af32939be.png" />
<p style="margin-bottom: 10px; font-size: 94%">
Text-Based Real Image Editing with Diffusion Models
<br />This pipeline aims to implement <a href="" target="_blank">this paper</a> to Stable Diffusion, allowing for real-world image editing.
</p>
</div>
"""
article = """
<div class="footer">
<p><a href="https://github.com/huggingface/diffusers/tree/main/examples/community#imagic-stable-diffusion" target="_blank">Community pipeline</a> baked by <a href="https://github.com/apolinario" style="text-decoration: underline;" target="_blank">@apolinario</a> - Gradio Demo by 🤗 Sylvain Filoni
</p>
</div>
"""
css = '''
#col-container {max-width: 700px; margin-left: auto; margin-right: auto;}
a {text-decoration-line: underline; font-weight: 600;}
.footer {
margin-bottom: 45px;
margin-top: 35px;
text-align: center;
border-bottom: 1px solid #e5e5e5;
}
.footer>p {
font-size: .8rem;
display: inline-block;
padding: 0 10px;
transform: translateY(10px);
background: white;
}
.dark .footer {
border-color: #303030;
}
.dark .footer>p {
background: #0b0f19;
}
'''
with gr.Blocks(css=css) as block:
with gr.Column(elem_id="col-container"):
gr.HTML(title)
prompt_input = gr.Textbox(label="Prompt", placeholder="Describe the image with what you want to change about the subject")
image_init = gr.Image(source="upload", type="filepath", label="Input Image")
submit_btn = gr.Button("Submit")
image_output = gr.Image(label="Result")
gr.HTML(article)
submit_btn.click(fn=infer, inputs=[prompt_input,image_init], outputs=[image_output])
block.queue(max_size=32,concurrency_count=20).launch(show_api=False) |