Spaces:
Sleeping
Sleeping
File size: 9,011 Bytes
5494b47 8c12ff1 cc7ba83 f036864 8c12ff1 cc7ba83 8c12ff1 5494b47 cc7ba83 5494b47 dfd27a0 8c12ff1 a408b27 5494b47 8c12ff1 54c5b48 8c12ff1 556c87e 8c12ff1 556c87e 8c12ff1 9995ce4 8c12ff1 a408b27 c5267aa a408b27 c5267aa e0afa92 c5267aa b486fa5 c5267aa f69042b 9995ce4 a408b27 54c5b48 5494b47 cc7ba83 5494b47 6be2e5b 5494b47 cc7ba83 ce094ab 6be2e5b 7a38b89 8c12ff1 f69042b 8c12ff1 7a38b89 3580710 8c12ff1 6be2e5b 48a6f24 e8ba4ba 7f07a26 48a6f24 54c5b48 48a6f24 8c12ff1 48a6f24 8c12ff1 48a6f24 54c5b48 c5267aa 3276af4 c5267aa 5494b47 6be2e5b 8c12ff1 2f8774a f036864 6be2e5b 5494b47 8c12ff1 54c5b48 5494b47 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 |
import gradio as gr
from huggingface_hub import login, HfFileSystem, HfApi, ModelCard
import os
import random
import spaces
is_shared_ui = True if "fffiloni/sd-xl-custom-model" in os.environ['SPACE_ID'] else False
hf_token = os.environ.get("HF_TOKEN")
login(token=hf_token)
fs = HfFileSystem(token=hf_token)
api = HfApi()
import torch
from diffusers import DiffusionPipeline, AutoencoderKL
device="cuda" if torch.cuda.is_available() else "cpu"
def get_files(file_paths):
last_files = {} # Dictionary to store the last file for each path
for file_path in file_paths:
# Split the file path into directory and file components
directory, file_name = file_path.rsplit('/', 1)
# Update the last file for the current path
last_files[directory] = file_name
# Extract the last files from the dictionary
result = list(last_files.values())
return result
def load_model(custom_model):
if custom_model == "":
gr.Warning("If you want to use a private model, you need to duplicate this space on your personal account.")
raise gr.Error("You forgot to define Model ID.")
# Get instance_prompt a.k.a trigger word
card = ModelCard.load(custom_model)
repo_data = card.data.to_dict()
instance_prompt = repo_data.get("instance_prompt")
if instance_prompt is not None:
print(f"Trigger word: {instance_prompt}")
else:
instance_prompt = "no trigger word needed"
print(f"Trigger word: no trigger word needed")
# List all ".safetensors" files in repo
sfts_available_files = fs.glob(f"{custom_model}/*safetensors")
sfts_available_files = get_files(sfts_available_files)
if sfts_available_files == []:
sfts_available_files = ["NO SAFETENSORS FILE"]
print(f"Safetensors available: {sfts_available_files}")
return gr.update(choices=sfts_available_files, value=sfts_available_files[0], visible=True), gr.update(value=instance_prompt, visible=True)
@spaces.GPU
def infer (custom_model, weight_name, prompt, inf_steps, guidance_scale, seed, lora_weight, progress=gr.Progress(track_tqdm=True)):
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
pipe = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0",
vae=vae, torch_dtype=torch.float16, variant="fp16",
use_safetensors=True
)
pipe.to(device)
if weight_name == "NO SAFETENSORS FILE":
pipe.load_lora_weights(
custom_model,
low_cpu_mem_usage = True,
use_auth_token = True
)
else:
pipe.load_lora_weights(
custom_model,
weight_name = weight_name,
low_cpu_mem_usage = True,
use_auth_token = True
)
#pipe.fuse_lora(lora_weight)
if seed < 0 :
seed = random.randint(0, 423538377342)
generator = torch.Generator(device="cuda").manual_seed(seed)
image = pipe(
prompt=prompt,
num_inference_steps=inf_steps,
guidance_scale = guidance_scale,
generator=generator,
cross_attention_kwargs={"scale": lora_weight}
).images[0]
#pipe.unfuse_lora()
return image, seed
css="""
#col-container{
margin: 0 auto;
max-width: 680px;
text-align: left;
}
div#warning-duplicate {
background-color: #ebf5ff;
padding: 0 10px 5px;
margin: 20px 0;
}
div#warning-duplicate > .gr-prose > h2, div#warning-duplicate > .gr-prose > p {
color: #0f4592!important;
}
div#warning-duplicate strong {
color: #0f4592;
}
p.actions {
display: flex;
align-items: center;
margin: 20px 0;
}
div#warning-duplicate .actions a {
display: inline-block;
margin-right: 10px;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
if is_shared_ui:
top_description = gr.HTML(f'''
<div class="gr-prose">
<h2><svg xmlns="http://www.w3.org/2000/svg" width="18px" height="18px" style="margin-right: 0px;display: inline-block;"fill="none"><path fill="#fff" d="M7 13.2a6.3 6.3 0 0 0 4.4-10.7A6.3 6.3 0 0 0 .6 6.9 6.3 6.3 0 0 0 7 13.2Z"/><path fill="#fff" fill-rule="evenodd" d="M7 0a6.9 6.9 0 0 1 4.8 11.8A6.9 6.9 0 0 1 0 7 6.9 6.9 0 0 1 7 0Zm0 0v.7V0ZM0 7h.6H0Zm7 6.8v-.6.6ZM13.7 7h-.6.6ZM9.1 1.7c-.7-.3-1.4-.4-2.2-.4a5.6 5.6 0 0 0-4 1.6 5.6 5.6 0 0 0-1.6 4 5.6 5.6 0 0 0 1.6 4 5.6 5.6 0 0 0 4 1.7 5.6 5.6 0 0 0 4-1.7 5.6 5.6 0 0 0 1.7-4 5.6 5.6 0 0 0-1.7-4c-.5-.5-1.1-.9-1.8-1.2Z" clip-rule="evenodd"/><path fill="#000" fill-rule="evenodd" d="M7 2.9a.8.8 0 1 1 0 1.5A.8.8 0 0 1 7 3ZM5.8 5.7c0-.4.3-.6.6-.6h.7c.3 0 .6.2.6.6v3.7h.5a.6.6 0 0 1 0 1.3H6a.6.6 0 0 1 0-1.3h.4v-3a.6.6 0 0 1-.6-.7Z" clip-rule="evenodd"/></svg>
Note: you might want to use a private custom LoRa model</h2>
<p class="main-message">
To do so, <strong>duplicate the Space</strong> and run it on your own profile using <strong>your own access token</strong> and eventually a GPU (T4-small or A10G-small) for faster inference without waiting in the queue.<br />
</p>
<p class="actions">
<a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}?duplicate=true">
<img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/duplicate-this-space-lg-dark.svg" alt="Duplicate this Space" />
</a>
to start using private models and skip the queue
</p>
</div>
''', elem_id="warning-duplicate")
gr.HTML("""
<h2 style="text-align: center;">SD-XL Custom Model Inference</h2>
<p style="text-align: center;">Use this demo to check results from your previously trained LoRa model.</p>
""")
with gr.Row():
with gr.Column():
if not is_shared_ui:
your_username = api.whoami()["name"]
my_models = api.list_models(author=your_username, filter=["diffusers", "stable-diffusion-xl", 'lora'])
model_names = [item.modelId for item in my_models]
if not is_shared_ui:
custom_model = gr.Dropdown(
label = "Your custom model ID",
choices = model_names,
allow_custom_value = True
#placeholder = "username/model_id"
)
else:
custom_model = gr.Textbox(
label="Your custom model ID",
placeholder="your_username/your_trained_model_name",
info="Make sure your model is set to PUBLIC"
)
weight_name = gr.Dropdown(
label="Safetensors file",
#value="pytorch_lora_weights.safetensors",
info="specify which one if model has several .safetensors files",
allow_custom_value=True,
visible = False
)
with gr.Column():
load_model_btn = gr.Button("Load my model")
trigger_word = gr.Textbox(label="Trigger word", interactive=False, visible=False)
prompt_in = gr.Textbox(label="Prompt")
with gr.Row():
inf_steps = gr.Slider(
label="Inference steps",
minimum=12,
maximum=50,
step=1,
value=25
)
guidance_scale = gr.Slider(
label="Guidance scale",
minimum=0.0,
maximum=50.0,
step=0.1,
value=7.5
)
with gr.Row():
seed = gr.Slider(
label="Seed",
info = "-1 denotes a random seed",
minimum=-1,
maximum=423538377342,
step=1,
value=-1
)
last_used_seed = gr.Number(
label = "Last used seed",
info = "the seed used in the last generation",
)
lora_weight = gr.Slider(
label="LoRa weigth",
minimum=0.0,
maximum=1.0,
step=0.01,
value=0.9
)
submit_btn = gr.Button("Submit")
image_out = gr.Image(label="Image output")
load_model_btn.click(
fn = load_model,
inputs=[custom_model],
outputs = [weight_name, trigger_word],
queue = False
)
submit_btn.click(
fn = infer,
inputs = [custom_model, weight_name, prompt_in, inf_steps, guidance_scale, seed, lora_weight],
outputs = [image_out, last_used_seed]
)
demo.queue().launch() |