import torch import gradio as gr from PIL import Image import qrcode from pathlib import Path from multiprocessing import cpu_count import requests import io import os from PIL import Image import spaces from diffusers import ( StableDiffusionPipeline, StableDiffusionControlNetImg2ImgPipeline, ControlNetModel, DDIMScheduler, DPMSolverMultistepScheduler, DEISMultistepScheduler, HeunDiscreteScheduler, EulerDiscreteScheduler, ) qrcode_generator = qrcode.QRCode( version=1, error_correction=qrcode.ERROR_CORRECT_H, box_size=10, border=4, ) controlnet = ControlNetModel.from_pretrained( "DionTimmer/controlnet_qrcode-control_v1p_sd15", torch_dtype=torch.float16 ).to("cuda") pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", controlnet=controlnet, safety_checker=None, torch_dtype=torch.float16, ).to("cuda") pipe.enable_xformers_memory_efficient_attention() # pipe.controlnet = torch.nn.DataParallel(pipe.controlnet) # pipe.unet = torch.nn.DataParallel(pipe.unet) def resize_for_condition_image(input_image: Image.Image, resolution: int, background_image: Image.Image = None, canvas_width: int = None, canvas_height: int = None): input_image = input_image.convert("RGBA") W, H = input_image.size # Use the uploaded background image or create a blank canvas if background_image: background_image = background_image.convert("RGBA") canvas = background_image.resize((canvas_width or W, canvas_height or H), Image.LANCZOS) else: canvas = Image.new("RGBA", (canvas_width or W, canvas_height or H), (255, 255, 255, 0)) # Determine the relative size of the QR code based on the canvas dimensions qr_scale_ratio = 768 / min(1024, 768) # Base ratio (relative to 1024x768) qr_target_size = int(min(canvas.size) * qr_scale_ratio) # Resize the QR code to maintain its relative size input_image = input_image.resize((qr_target_size, qr_target_size), resample=Image.LANCZOS) W, H = input_image.size # Paste the resized QR code onto the background qr_x = int(canvas.size[0] * (2 / 3)) - (W // 2) # Adjust x-coordinate to 2/3 of the canvas qr_y = (canvas.size[1] - H) // 2 # Center the QR code vertically # Use the alpha channel of the input_image as the mask canvas.paste(input_image, (qr_x, qr_y), mask=input_image.split()[3]) return canvas SAMPLER_MAP = { "DPM++ Karras SDE": lambda config: DPMSolverMultistepScheduler.from_config(config, use_karras=True, algorithm_type="sde-dpmsolver++"), "DPM++ Karras": lambda config: DPMSolverMultistepScheduler.from_config(config, use_karras=True), "Heun": lambda config: HeunDiscreteScheduler.from_config(config), "Euler": lambda config: EulerDiscreteScheduler.from_config(config), "DDIM": lambda config: DDIMScheduler.from_config(config), "DEIS": lambda config: DEISMultistepScheduler.from_config(config), } @spaces.GPU() def inference( qr_code_content: str, prompt: str, negative_prompt: str, guidance_scale: float = 10.0, controlnet_conditioning_scale: float = 2.0, strength: float = 0.8, seed: int = -1, init_image: Image.Image | None = None, qrcode_image: Image.Image | None = None, background_image: Image.Image | None = None, # New input use_qr_code_as_init_image=True, sampler="DPM++ Karras SDE", width: int = 768, height: int = 768, ): if prompt is None or prompt == "": raise gr.Error("Prompt is required") if qrcode_image is None and qr_code_content == "": raise gr.Error("QR Code Image or QR Code Content is required") pipe.scheduler = SAMPLER_MAP[sampler](pipe.scheduler.config) generator = torch.manual_seed(seed) if seed != -1 else torch.Generator() if qr_code_content != "" or qrcode_image.size == (1, 1): print("Generating QR Code from content") qr = qrcode.QRCode( version=1, error_correction=qrcode.constants.ERROR_CORRECT_H, box_size=10, border=4, ) qr.add_data(qr_code_content) qr.make(fit=True) qrcode_image = qr.make_image(fill_color="black", back_color="white").convert("RGBA") # Add 50% transparency to the white background data = qrcode_image.getdata() new_data = [ (item[0], item[1], item[2], 128) if item[:3] == (255, 255, 255) else item for item in data ] qrcode_image.putdata(new_data) qrcode_image = resize_for_condition_image(qrcode_image, 768, background_image, width, height) else: print("Using QR Code Image") qrcode_image = resize_for_condition_image(qrcode_image, 768, background_image, width, height) # hack due to gradio examples init_image = qrcode_image out = pipe( prompt=prompt, negative_prompt=negative_prompt, image=qrcode_image, control_image=qrcode_image, # type: ignore width=width, # dynamic width height=height, # dynamic height guidance_scale=float(guidance_scale), controlnet_conditioning_scale=float(controlnet_conditioning_scale), # type: ignore generator=generator, strength=float(strength), num_inference_steps=40, ) return out.images[0] # type: ignore with gr.Blocks() as blocks: gr.Markdown( """ # QR Code AI Art Generator ## 💡 How to generate beautiful QR codes We use the QR code image as the initial image **and** the control image, which allows you to generate QR Codes that blend in **very naturally** with your provided prompt. The strength parameter defines how much noise is added to your QR code and the noisy QR code is then guided towards both your prompt and the QR code image via Controlnet. Use a high strength value between 0.8 and 0.95 and choose a conditioning scale between 0.6 and 2.0. This mode arguably achieves the asthetically most appealing QR code images, but also requires more tuning of the controlnet conditioning scale and the strength value. If the generated image looks way to much like the original QR code, make sure to gently increase the *strength* value and reduce the *conditioning* scale. Also check out the examples below. model: https://huggingface.co/DionTimmer/controlnet_qrcode-control_v1p_sd15 Duplicate Space for no queue on your own hardware.

""" ) with gr.Row(): with gr.Column(): qr_code_content = gr.Textbox( label="QR Code Content", info="QR Code Content or URL", value="", ) with gr.Accordion(label="QR Code Image (Optional)", open=False): qr_code_image = gr.Image( label="QR Code Image (Optional). Leave blank to automatically generate QR code", type="pil", ) prompt = gr.Textbox( label="Prompt", info="Prompt that guides the generation towards", ) negative_prompt = gr.Textbox( label="Negative Prompt", value="ugly, disfigured, low quality, blurry, nsfw", ) use_qr_code_as_init_image = gr.Checkbox(label="Use QR code as init image", value=True, interactive=False, info="Whether init image should be QR code. Unclick to pass init image or generate init image with Stable Diffusion 2.1") with gr.Accordion(label="Init Images (Optional)", open=False, visible=False) as init_image_acc: init_image = gr.Image(label="Init Image (Optional). Leave blank to generate image with SD 2.1", type="pil") with gr.Accordion(label="Background Image (Optional)", open=False): background_image = gr.Image( label="Background Image (Optional). Leave blank for default background", type="pil", ) with gr.Accordion( label="Params: The generated QR Code functionality is largely influenced by the parameters detailed below", open=True, ): width = gr.Slider( minimum=64, maximum=4096, step=64, value=768, label="Width", ) height = gr.Slider( minimum=64, maximum=4096, step=64, value=768, label="Height", ) controlnet_conditioning_scale = gr.Slider( minimum=0.0, maximum=5.0, step=0.01, value=1.1, label="Controlnet Conditioning Scale", ) strength = gr.Slider( minimum=0.0, maximum=1.0, step=0.01, value=0.9, label="Strength" ) guidance_scale = gr.Slider( minimum=0.0, maximum=50.0, step=0.25, value=7.5, label="Guidance Scale", ) sampler = gr.Dropdown(choices=list(SAMPLER_MAP.keys()), value="DPM++ Karras SDE", label="Sampler") seed = gr.Slider( minimum=-1, maximum=9999999999, step=1, value=2313123, label="Seed", randomize=True, ) with gr.Row(): run_btn = gr.Button("Run") with gr.Column(): result_image = gr.Image(label="Result Image") run_btn.click( inference, inputs=[ qr_code_content, prompt, negative_prompt, guidance_scale, controlnet_conditioning_scale, strength, seed, init_image, qr_code_image, background_image, # Add this line use_qr_code_as_init_image, sampler, width, # add width input height, # add height input ], outputs=[result_image], concurrency_limit=1 ) gr.Examples( examples=[ [ "https://huggingface.co/", "A sky view of a colorful lakes and rivers flowing through the desert", "ugly, disfigured, low quality, blurry, nsfw", 7.5, 1.3, 0.9, 5392011833, None, None, True, "DPM++ Karras SDE", ], [ "https://huggingface.co/", "Bright sunshine coming through the cracks of a wet, cave wall of big rocks", "ugly, disfigured, low quality, blurry, nsfw", 7.5, 1.11, 0.9, 2523992465, None, None, True, "DPM++ Karras SDE", ], [ "https://huggingface.co/", "Sky view of highly aesthetic, ancient greek thermal baths in beautiful nature", "ugly, disfigured, low quality, blurry, nsfw", 7.5, 1.5, 0.9, 2523992465, None, None, True, "DPM++ Karras SDE", ], ], fn=inference, inputs=[ qr_code_content, prompt, negative_prompt, guidance_scale, controlnet_conditioning_scale, strength, seed, init_image, qr_code_image, use_qr_code_as_init_image, sampler, ], outputs=[result_image], cache_examples=True, ) os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" blocks.queue(max_size=20,api_open=False) blocks.launch(share=bool(os.environ.get("SHARE", False)), show_api=False)