Spaces:
Running
on
Zero
Running
on
Zero
File size: 3,742 Bytes
dddb041 b8a701a b46e87b dddb041 b46e87b dddb041 b46e87b b1b4f10 a146eda a166abf 3733cf2 ccde57b b46e87b b1b4f10 ccde57b b8a701a b46e87b b1b4f10 b46e87b a146eda b1b4f10 b46e87b 0347515 db08793 b46e87b a146eda b46e87b 322db57 bf71179 b46e87b 9f29ad9 b46e87b b1b4f10 b46e87b 538d554 057bc07 b46e87b 057bc07 b46e87b a200bb2 b46e87b c133494 b1b4f10 b46e87b 057bc07 b1b4f10 057bc07 c03b3ba b46e87b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 |
import gradio as gr
import json
import logging
import torch
from PIL import Image
from diffusers import DiffusionPipeline, EulerDiscreteScheduler, DPMSolverMultistepScheduler
import spaces
# Load LoRAs from JSON file
with open('loras.json', 'r') as f:
loras = json.load(f)
# Initialize the base model
base_model = "stabilityai/stable-diffusion-xl-base-1.0"
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.float16)
pipe.to("cuda")
def update_selection(evt: gr.SelectData):
selected_lora = loras[evt.index]
new_placeholder = f"Type a prompt for {selected_lora['title']}"
lora_repo = selected_lora["repo"]
updated_text = f"### Selected: [{lora_repo}](https://huggingface.co/{lora_repo}) ✨"
return (
gr.update(placeholder=new_placeholder),
updated_text,
evt.index
)
@spaces.GPU
def run_lora(prompt, negative_prompt, cfg_scale, steps, selected_index, scheduler):
if selected_index is None:
raise gr.Error("You must select a LoRA before proceeding.")
selected_lora = loras[selected_index]
lora_path = selected_lora["repo"]
trigger_word = selected_lora["trigger_word"]
# Load LoRA weights
pipe.load_lora_weights(lora_path)
# Set scheduler
if scheduler == "Euler":
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
elif scheduler == "DPM++ 2M":
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
# Generate image
image = pipe(
prompt=f"{prompt} {trigger_word}",
negative_prompt=negative_prompt,
num_inference_steps=steps,
guidance_scale=cfg_scale,
).images[0]
# Unload LoRA weights
pipe.unload_lora_weights()
return image
with gr.Blocks(css="custom.css") as app:
gr.Markdown("# artificialguybr LoRA portfolio")
gr.Markdown(
"### This is my portfolio. Follow me on Twitter [@artificialguybr](https://twitter.com/artificialguybr).\n"
"**Note**: Generation quality may vary. For best results, adjust the parameters.\n"
"Special thanks to Hugging Face for their Diffusers library and Spaces platform."
)
selected_index = gr.State(None)
with gr.Row():
gallery = gr.Gallery(
[(item["image"], item["title"]) for item in loras],
label="LoRA Gallery",
allow_preview=False,
columns=3
)
with gr.Column():
prompt_title = gr.Markdown("### Click on a LoRA in the gallery to select it")
selected_info = gr.Markdown("")
prompt = gr.Textbox(label="Prompt", lines=3, placeholder="Type a prompt after selecting a LoRA")
negative_prompt = gr.Textbox(label="Negative Prompt", lines=2, value="low quality, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry")
with gr.Row():
cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=7.5)
steps = gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=30)
scheduler = gr.Dropdown(label="Scheduler", choices=["Euler", "DPM++ 2M"], value="Euler")
generate_button = gr.Button("Generate")
result = gr.Image(label="Generated Image")
gallery.select(update_selection, outputs=[prompt, selected_info, selected_index])
generate_button.click(
fn=run_lora,
inputs=[prompt, negative_prompt, cfg_scale, steps, selected_index, scheduler],
outputs=[result]
)
app.queue()
app.launch()
|