from contextlib import nullcontext import gradio as gr import torch from torch import autocast from diffusers import SemanticStableDiffusionPipeline device = "cuda" if torch.cuda.is_available() else "cpu" pipe = SemanticStableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") pipe = pipe.to(device) gen = torch.Generator(device=device) # Sometimes the nsfw checker is confused by the Pokémon images, you can disable # it at your own risk here disable_safety = False if disable_safety: def null_safety(images, **kwargs): return images, False pipe.safety_checker = null_safety style_embeddings = { 'Concept Art': torch.load('embeddings/concept_art.pt'), 'Animation': torch.load('embeddings/animation.pt'), 'Character Design': torch.load('embeddings/character_design.pt') , 'Portrait Photo': torch.load('embeddings/portrait_photo.pt'), 'Architecture': torch.load('embeddings/architecture.pt') } def infer(prompt, steps, scale, seed, editing_prompt_1 = None, reverse_editing_direction_1 = False, edit_warmup_steps_1=10, edit_guidance_scale_1=5, edit_threshold_1=0.95, editing_prompt_2 = None, reverse_editing_direction_2 = False, edit_warmup_steps_2=10, edit_guidance_scale_2=5, edit_threshold_2=0.95, edit_style=None, reverse_editing_direction_style = False, edit_warmup_steps_style=5, edit_guidance_scale_style=7, edit_threshold_style=0.8, edit_momentum_scale=0.5, edit_mom_beta=0.6): gen.manual_seed(seed) images = pipe(prompt, guidance_scale=scale, num_inference_steps=steps, generator=gen).images editing_prompt = [editing_prompt_1, editing_prompt_2] reverse_editing_direction = [reverse_editing_direction_1, reverse_editing_direction_2] edit_warmup_steps = [edit_warmup_steps_1, edit_warmup_steps_2] edit_guidance_scale = [edit_guidance_scale_1, edit_guidance_scale_2] edit_threshold = [edit_threshold_1, edit_threshold_2] indices = [ind for ind, val in enumerate(editing_prompt) if val is None or len(val) <= 1] for index in sorted(indices, reverse=True): del editing_prompt[index] del reverse_editing_direction[index] del edit_warmup_steps[index] del edit_guidance_scale[index] del edit_threshold[index] editing_prompt_embeddings = None if edit_style is not None: editing_prompt = None reverse_editing_direction = reverse_editing_direction_style edit_warmup_steps = edit_warmup_steps_style edit_guidance_scale = edit_guidance_scale_style edit_threshold = edit_threshold_style editing_prompt_embeddings = style_embeddings[edit_style] gen.manual_seed(seed) images.extend(pipe(prompt, guidance_scale=scale, num_inference_steps=steps, generator=gen, editing_prompt=editing_prompt, editing_prompt_embeddings=editing_prompt_embeddings, reverse_editing_direction=reverse_editing_direction, edit_warmup_steps=edit_warmup_steps, edit_guidance_scale=edit_guidance_scale, edit_momentum_scale=edit_momentum_scale, edit_mom_beta=edit_mom_beta ).images) return zip(images, ['Original', edit_style if edit_style is not None else 'SEGA']) def reset_style(): radio = gr.Radio(label='Style', choices=['Concept Art', 'Animation', 'Character Design', 'Portrait Photo', 'Architecture']) return radio def reset_text(): text_1 = gr.Textbox( label="Edit Prompt 1", show_label=False, max_lines=1, placeholder="Enter your 1st edit prompt", ).style( border=(True, False, True, True), rounded=(True, False, False, True), container=False, ) text_2 = gr.Textbox( label="Edit Prompt 2", show_label=False, max_lines=1, placeholder="Enter your 2nd edit prompt", ).style( border=(True, False, True, True), rounded=(True, False, False, True), container=False, ) return text_1, text_2 css = """ a { color: inherit; text-decoration: underline; } .gradio-container { font-family: 'IBM Plex Sans', sans-serif; } .gr-button { color: white; border-color: #9d66e5; background: #9d66e5; } input[type='range'] { accent-color: #9d66e5; } .dark input[type='range'] { accent-color: #dfdfdf; } .container { max-width: 730px; margin: auto; padding-top: 1.5rem; } #gallery { min-height: 22rem; margin-bottom: 15px; margin-left: auto; margin-right: auto; border-bottom-right-radius: .5rem !important; border-bottom-left-radius: .5rem !important; } #gallery>div>.h-full { min-height: 20rem; } .details:hover { text-decoration: underline; } .gr-button { white-space: nowrap; } .gr-button:focus { border-color: rgb(147 197 253 / var(--tw-border-opacity)); outline: none; box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000); --tw-border-opacity: 1; --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color); --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity)); --tw-ring-opacity: .5; } #advanced-options { margin-bottom: 20px; } .footer { margin-bottom: 45px; margin-top: 35px; text-align: center; border-bottom: 1px solid #e5e5e5; } .footer>p { font-size: .8rem; display: inline-block; padding: 0 10px; transform: translateY(10px); background: white; } .dark .footer { border-color: #303030; } .dark .footer>p { background: #0b0f19; } .acknowledgments h4{ margin: 1.25em 0 .25em 0; font-weight: bold; font-size: 115%; } """ block = gr.Blocks(css=css) examples = [ [ 'a photo of a cat', 50, 7, 3, 'sunglasses', False, 10, 5, 0.95, '', False, 10, 5, 0.95, '', False, 5, 7, 0.8, ], [ 'an image of a crowded boulevard, realistic, 4k', 50, 7, 9, 'crowd, crowded, people', True, 10, 8.3, 0.9, '', False, 10, 5, 0.95, '', False, 5, 7, 0.8 ], [ 'a castle next to a river', 50, 7, 48, 'boat on a river', False, 15, 6, 0.9, 'monet, impression, sunrise', False, 18, 6, 0.8, '', False, 5, 7, 0.8 ], [ 'a portrait of a king, full body shot, 8k', 50, 7, 33, 'male', True, 5, 5, 0.9, 'female', False, 5, 5, 0.9, '', False, 5, 7, 0.8 ], [ 'a photo of a flowerpot', 50, 7, 2, 'glasses', False, 12, 5, 0.975, '', False, 10, 5, 0.95, '', False, 5, 7, 0.8 ], [ 'a photo of the face of a woman', 50, 7, 21, 'smiling, smile', False, 15, 3, 0.99, 'curls, wavy hair, curly hair', False, 13, 3, 0.925, '', False, 5, 7, 0.8 ], [ 'temple in ruines, forest, stairs, columns', 50, 7, 11, '', False, 10, 5, 0.95, '', False, 10, 5, 0.95, 'Animation', False, 5, 7, 0.8 ], [ 'city made out of glass', 50, 7, 16, '', False, 10, 5, 0.95, '', False, 10, 5, 0.95, 'Concept Art', False, 10, 8, 0.8 ], [ 'a man riding a horse', 50, 7, 11, '', False, 10, 5, 0.95, '', False, 10, 5, 0.95, 'Character Design', False, 11, 8, 0.9 ], ] with block: gr.HTML( """
Interact with semantic concepts during the diffusion process. Details can be found in the paper SEGA: Instructing Diffusion using Semantic Dimensions.
Simply use the edit prompts to make arbitrary changes to the generation.
Created by Manuel Brack and Patrick Schramowski at AIML Lab.