yash
first commit
545d518
import torch
import gradio as gr
from diffusers import StableDiffusionControlNetPipeline
from diffusers import ControlNetModel, DDIMScheduler,EulerDiscreteScheduler,EulerAncestralDiscreteScheduler
from diffusers import KDPM2DiscreteScheduler,KDPM2AncestralDiscreteScheduler,PNDMScheduler,UniPCMultistepScheduler
from diffusers import DPMSolverMultistepScheduler
import random
import numpy as np
import cv2
from PIL import Image
from diffusers.utils import load_image
def canny_image(image,th1=100,th2=200):
image = np.array(image)
image = cv2.Canny(image,th1,th2)
image = image[:, :, None]
image = np.concatenate([image, image, image], axis=2)
canny_image = Image.fromarray(image)
return canny_image
def set_pipeline(model_id_repo,scheduler):
model_ids_dict = {
"runwayml": "runwayml/stable-diffusion-v1-5",
"Realistic_Vision_V5_1_noVAE":"SG161222/Realistic_Vision_V5.1_noVAE"
}
model_id = model_id_repo
model_repo = model_ids_dict.get(model_id)
print("model_repo :",model_repo)
# load control net and stable diffusion v1-5
controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny",
# torch_dtype=torch.float16
)
pipe = StableDiffusionControlNetPipeline.from_pretrained(
model_repo,
controlnet=controlnet,
# torch_dtype=torch.float16,
use_safetensors = True
).to("cpu")
scheduler_classes = {
"DDIM": DDIMScheduler,
"Euler": EulerDiscreteScheduler,
"Euler a": EulerAncestralDiscreteScheduler,
"UniPC": UniPCMultistepScheduler,
"DPM2 Karras": KDPM2DiscreteScheduler,
"DPM2 a Karras": KDPM2AncestralDiscreteScheduler,
"PNDM": PNDMScheduler,
"DPM++ 2M Karras": DPMSolverMultistepScheduler,
"DPM++ 2M SDE Karras": DPMSolverMultistepScheduler,
}
sampler_name = scheduler # Example sampler name, replace with the actual value
scheduler_class = scheduler_classes.get(sampler_name)
if scheduler_class is not None:
print("sampler_name:",sampler_name)
pipe.scheduler = scheduler_class.from_config(pipe.scheduler.config)
else:
pass
return pipe
def img_args(
prompt,
negative_prompt,
image_canny,
controlnet_conditioning_scale = 1.0,
control_guidance_start=0.0,
control_guidance_end=1.0,
clip_skip=0,
model_id_repo = "Realistic_Vision_V5_1_noVAE",
scheduler= "Euler a",
num_inference_steps = 30,
guidance_scale = 7.5,
num_images_per_prompt = 1,
seed = 0
):
controlnet_conditioning_scale = float(controlnet_conditioning_scale)
if image_canny is None:
return
pipe = set_pipeline(model_id_repo,scheduler)
if seed == -1:
seed = random.randint(0,2564798154)
print(f"random seed :{seed}")
generator = torch.manual_seed(seed)
else:
generator = torch.manual_seed(seed)
print(f"manual seed :{seed}")
print("Prompt:",prompt)
image = pipe(prompt=prompt,
negative_prompt = negative_prompt,
image=image_canny,
control_guidance_start = control_guidance_start,
control_guidance_end = control_guidance_end,
clip_skip =clip_skip,
num_inference_steps = num_inference_steps,
guidance_scale = guidance_scale,
num_images_per_prompt = num_images_per_prompt, # default 1
generator = generator,
controlnet_conditioning_scale = controlnet_conditioning_scale
).images
return image
block = gr.Blocks().queue()
block.title = "Inpaint Anything"
with block as image_gen:
with gr.Column():
with gr.Row():
gr.Markdown("## Image Generation With Canny Controlnet")
with gr.Row():
with gr.Column():
# with gr.Row():
input_image = gr.Image(type="pil",label="Input")
prompt = gr.Textbox(placeholder="what you want to generate",label="Positive Prompt")
negative_prompt = gr.Textbox(placeholder="what you don't want to generate",label="Negative prompt")
with gr.Column():
canny_output = gr.Image(type="pil",label="Canny Input")
canny_btn = gr.Button("Canny Image", elem_id="select_btn", variant="primary")
with gr.Accordion(label="Controlnet Advance Options",open=False):
controlnet_conditioning_scale_slider = gr.Slider(label="Control Condition Scale", minimum=0.0, maximum=2.0, value=1.0, step=0.05)
control_guidance_start_slider = gr.Slider(label="Contron Guidance Start", minimum=0.0, maximum=1.0, value=0, step=0.1)
control_guidance_end_slider = gr.Slider(label="Contron Guidance Start End", minimum=0.0, maximum=1.0, value=1, step=0.1)
canny_th1 = gr.Slider(label="Canny High Threshold",minimum=0, maximum=300, value=100, step=1)
canny_th2 = gr.Slider(label="Canny Low Threshold",minimum=0, maximum=300, value=200, step=1)
with gr.Column():
out_img = gr.Gallery(label='Output', show_label=True, elem_id="gallery", preview=True)
run_btn = gr.Button("Generation", elem_id="select_btn", variant="primary")
with gr.Accordion(label="Generation Advance Options",open=False):
with gr.Row():
model_selection = gr.Dropdown(choices=["runwayml","Realistic_Vision_V5_1_noVAE"],value="Realistic_Vision_V5_1_noVAE",label="Models")
schduler_selection = gr.Dropdown(choices=["DDIM","Euler","Euler a","UniPC","DPM2 Karras","DPM2 a Karras","PNDM","DPM++ 2M Karras","DPM++ 2M SDE Karras"],value="Euler a",label="Scheduler")
guidance_scale_slider = gr.Slider(label="Guidance Scale", minimum=0, maximum=15, value=7.5, step=0.5)
num_images_per_prompt_slider = gr.Slider(label="num_images_per_prompt", minimum=0, maximum=5, value=1, step=1)
num_inference_steps_slider = gr.Slider(label="num_inference_steps", minimum=0, maximum=150, value=30, step=1)
seed_slider = gr.Slider(label="Seed", minimum=-1, maximum=256479815, value=-1, step=1)
clip_skip_slider = gr.Slider(label="Clip Skip", minimum=0, maximum=3, value=0, step=1)
canny_btn.click(fn=canny_image,inputs=[input_image,canny_th1,canny_th2],outputs=[canny_output])
run_btn.click(fn=img_args,inputs=[prompt,
negative_prompt,
canny_output,
controlnet_conditioning_scale_slider,
control_guidance_start_slider,
control_guidance_end_slider,
clip_skip_slider,
model_selection,
schduler_selection,
num_inference_steps_slider,
guidance_scale_slider,
num_images_per_prompt_slider,
seed_slider], outputs=[out_img])
image_gen.launch()