import os donwload_repo_loc= "./models/image_encoder/" os.system("pip install -U peft") # os.system(f"wget -O {donwload_repo_loc}config.json https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/image_encoder/config.json?download=true") # os.system(f"wget -O {donwload_repo_loc}model.safetensors https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/image_encoder/model.safetensors?download=true") # os.system(f"wget -O {donwload_repo_loc}pytorch_model.bin https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/image_encoder/pytorch_model.bin?download=true") import space import gradio as gr import torch from diffusers import StableDiffusionXLPipeline from PIL import Image from ip_adapter import IPAdapterXL base_model_path = "stabilityai/stable-diffusion-xl-base-1.0" device = "cuda" image_encoder_path = donwload_repo_loc #"sdxl_models/image_encoder" ip_ckpt = "./models/ip-adapter_sdxl.bin" # load SDXL pipeline pipe = StableDiffusionXLPipeline.from_pretrained( base_model_path, torch_dtype=torch.float16, add_watermarker=False, ) # generate image variations with only image prompt @spaces.GPU(enable_queue=True) def create_image(image_pil,target,prompt,n_prompt,scale, guidance_scale,num_samples,num_inference_steps,seed): # load ip-adapter if target =="Load original IP-Adapter": # target_blocks=["blocks"] for original IP-Adapter ip_model = IPAdapterXL(pipe, image_encoder_path, ip_ckpt, device, target_blocks=["blocks"]) elif target=="Load only style blocks": # target_blocks=["up_blocks.0.attentions.1"] for style blocks only ip_model = IPAdapterXL(pipe, image_encoder_path, ip_ckpt, device, target_blocks=["up_blocks.0.attentions.1"]) elif target == "Load style+layout block": # target_blocks = ["up_blocks.0.attentions.1", "down_blocks.2.attentions.1"] # for style+layout blocks ip_model = IPAdapterXL(pipe, image_encoder_path, ip_ckpt, device, target_blocks=["up_blocks.0.attentions.1", "down_blocks.2.attentions.1"]) image_pil=image_pil.resize((512, 512)) images = ip_model.generate(pil_image=image_pil, prompt=prompt, negative_prompt=n_prompt, scale=scale, guidance_scale=guidance_scale, num_samples=num_samples, num_inference_steps=num_inference_steps, seed=seed, #neg_content_prompt="a rabbit", #neg_content_scale=0.5, ) # images[0].save("result.png") del ip_model return images DESCRIPTION = """ # InstantStyle: Free Lunch towards Style-Preserving in Text-to-Image Generation **Demo by [ameer azam] - [Twitter](https://twitter.com/Ameerazam18) - [GitHub](https://github.com/AMEERAZAM08)) - [Hugging Face](https://huggingface.co/ameerazam08)** This is a demo of https://github.com/InstantStyle/InstantStyle. """ block = gr.Blocks(css="footer {visibility: hidden}").queue() with block: with gr.Row(): with gr.Column(): gr.Markdown("##