Spaces:
Running
Running
File size: 14,400 Bytes
6c4222e 3d7e17e 5e08dc0 5ada086 b6a069c 070c3b5 5bc1fa1 b6a069c 070c3b5 5bc1fa1 b6a069c 5bc1fa1 b6a069c 39188ef 5bc1fa1 5ada086 e6047f1 6c4222e 00ac9d6 e6047f1 65ba024 dc9d88e 65ba024 6c4222e 4387a40 e6047f1 3d7e17e 3c5a551 b6a069c 8cb54a5 fc9467c 6c4222e 4387a40 6c4222e 5239a5d c8d8aaa 6c4222e e6047f1 6c4222e 5ada086 5bc1fa1 6c4222e 1025a0e 4ab6125 1025a0e 6c4222e 3d7e17e f96f689 ea21b12 f96f689 5bc1fa1 46f0366 5bc1fa1 39188ef 5bc1fa1 39188ef f0b828d 46f0366 5bc1fa1 6c4222e 5e08dc0 5bc1fa1 f0b828d 46f0366 6c4222e e6047f1 6c4222e 5bc1fa1 6c4222e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 |
import gradio as gr
import torch
import os
import shutil
import requests
import subprocess
from subprocess import getoutput
from huggingface_hub import snapshot_download, HfApi, create_repo
api = HfApi()
hf_token = os.environ.get("HF_TOKEN_WITH_WRITE_PERMISSION")
is_shared_ui = True if "fffiloni/train-dreambooth-lora-sdxl" in os.environ['SPACE_ID'] else False
is_gpu_associated = torch.cuda.is_available()
if is_gpu_associated:
gpu_info = getoutput('nvidia-smi')
if("A10G" in gpu_info):
which_gpu = "A10G"
elif("T4" in gpu_info):
which_gpu = "T4"
else:
which_gpu = "CPU"
def change_training_setup(training_type):
if training_type == "style" :
return 1000, 500
elif training_type == "concept" :
return 2000, 1000
def swap_hardware(hf_token, hardware="cpu-basic"):
hardware_url = f"https://huggingface.co/spaces/{os.environ['SPACE_ID']}/hardware"
headers = { "authorization" : f"Bearer {hf_token}"}
body = {'flavor': hardware}
requests.post(hardware_url, json = body, headers=headers)
def swap_sleep_time(sleep_time):
sleep_time_url = f"https://huggingface.co/api/spaces/{os.environ['SPACE_ID']}/sleeptime"
headers = { "authorization" : f"Bearer {hf_token}"}
body = {'seconds':sleep_time}
requests.post(sleep_time_url,json=body,headers=headers)
def get_sleep_time():
sleep_time_url = f"https://huggingface.co/api/spaces/{os.environ['SPACE_ID']}"
headers = { "authorization" : f"Bearer {hf_token}"}
response = requests.get(sleep_time_url,headers=headers)
try:
gcTimeout = response.json()['runtime']['gcTimeout']
except:
gcTimeout = None
return gcTimeout
def check_sleep_time():
sleep_time = get_sleep_time()
if sleep_time is None :
return sleep_time, gr.update(visible=True), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
else :
return sleep_time, gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
def train_dreambooth_blora_sdxl(instance_data_dir, b_lora_trained_folder, instance_prompt, class_prompt, max_train_steps, checkpoint_steps):
script_filename = "train_dreambooth_b-lora_sdxl.py" # Assuming it's in the same folder
command = [
"accelerate",
"launch",
script_filename, # Use the local script
"--pretrained_model_name_or_path=stabilityai/stable-diffusion-xl-base-1.0",
f"--instance_data_dir={instance_data_dir}",
f"--output_dir={b_lora_trained_folder}",
f"--instance_prompt='{instance_prompt}'",
f"--class_prompt={class_prompt}",
#f"--validation_prompt=a teddy bear in {instance_prompt} style",
"--num_validation_images=1",
"--validation_epochs=500",
"--resolution=1024",
"--rank=64",
"--train_batch_size=1",
"--learning_rate=5e-5",
"--lr_scheduler=constant",
"--lr_warmup_steps=0",
f"--max_train_steps={max_train_steps}",
f"--checkpointing_steps={checkpoint_steps}",
"--seed=0",
"--gradient_checkpointing",
"--use_8bit_adam",
"--mixed_precision=fp16",
"--push_to_hub",
f"--hub_token={hf_token}"
]
try:
subprocess.run(command, check=True)
print("Training is finished!")
except subprocess.CalledProcessError as e:
print(f"An error occurred: {e}")
def clear_directory(directory_path):
# Check if the directory exists
if os.path.exists(directory_path):
# Iterate over all the files and directories inside the specified directory
for filename in os.listdir(directory_path):
file_path = os.path.join(directory_path, filename)
try:
# Check if it is a file or a directory and remove accordingly
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path) # Remove the file
elif os.path.isdir(file_path):
shutil.rmtree(file_path) # Remove the directory
except Exception as e:
print(f'Failed to delete {file_path}. Reason: {e}')
else:
print(f'The directory {directory_path} does not exist.')
def main(image_path, b_lora_trained_folder, instance_prompt, class_prompt, training_type, training_steps):
if is_shared_ui:
raise gr.Error("This Space only works in duplicated instances")
if not is_gpu_associated:
raise gr.Error("Please associate a T4 or A10G GPU for this Space")
if image_path == None:
raise gr.Error("You forgot to specify an image reference")
if b_lora_trained_folder == "":
raise gr.Error("You forgot to specify a name for you model")
if instance_prompt == "":
raise gr.Error("You forgot to specify an instance prompt")
#sleep_time = get_sleep_time(hf_token)
#if sleep_time:
#swap_sleep_time(hf_token, 36000)
local_dir = "image_to_train"
# Check if the directory exists and create it if necessary
if not os.path.exists(local_dir):
os.makedirs(local_dir)
else :
directory_to_clear = local_dir
clear_directory(directory_to_clear)
shutil.copy(image_path, local_dir)
print(f"source image has been copied in {local_dir} directory")
if training_type == "style":
checkpoint_steps = 500
elif training_type == "concept" :
checkpoint_steps = 1000
max_train_steps = training_steps
train_dreambooth_blora_sdxl(local_dir, b_lora_trained_folder, instance_prompt, class_prompt, max_train_steps, checkpoint_steps)
your_username = api.whoami(token=hf_token)["name"]
#swap_hardware(hardware="cpu-basic")
swap_sleep_time(300)
return f"Done, your trained model has been stored in your models library: {your_username}/{b_lora_trained_folder}"
css = """
#col-container {max-width: 780px; margin-left: auto; margin-right: auto;}
div#warning-ready {
background-color: #ecfdf5;
padding: 0 10px 5px;
margin: 20px 0;
}
div#warning-ready > .gr-prose > h2, div#warning-ready > .gr-prose > p {
color: #057857!important;
}
div#warning-duplicate {
background-color: #ebf5ff;
padding: 0 10px 5px;
margin: 20px 0;
}
div#warning-duplicate > .gr-prose > h2, div#warning-duplicate > .gr-prose > p {
color: #0f4592!important;
}
div#warning-duplicate strong {
color: #0f4592;
}
p.actions {
display: flex;
align-items: center;
margin: 20px 0;
}
div#warning-duplicate .actions a {
display: inline-block;
margin-right: 10px;
}
div#warning-setgpu {
background-color: #fff4eb;
padding: 0 10px 5px;
margin: 20px 0;
}
div#warning-setgpu > .gr-prose > h2, div#warning-setgpu > .gr-prose > p {
color: #92220f!important;
}
div#warning-setgpu a, div#warning-setgpu b {
color: #91230f;
}
div#warning-setgpu p.actions > a {
display: inline-block;
background: #1f1f23;
border-radius: 40px;
padding: 6px 24px;
color: antiquewhite;
text-decoration: none;
font-weight: 600;
font-size: 1.2em;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
if is_shared_ui:
top_description = gr.HTML(f'''
<div class="gr-prose">
<h2><svg xmlns="http://www.w3.org/2000/svg" width="18px" height="18px" style="margin-right: 0px;display: inline-block;"fill="none"><path fill="#fff" d="M7 13.2a6.3 6.3 0 0 0 4.4-10.7A6.3 6.3 0 0 0 .6 6.9 6.3 6.3 0 0 0 7 13.2Z"/><path fill="#fff" fill-rule="evenodd" d="M7 0a6.9 6.9 0 0 1 4.8 11.8A6.9 6.9 0 0 1 0 7 6.9 6.9 0 0 1 7 0Zm0 0v.7V0ZM0 7h.6H0Zm7 6.8v-.6.6ZM13.7 7h-.6.6ZM9.1 1.7c-.7-.3-1.4-.4-2.2-.4a5.6 5.6 0 0 0-4 1.6 5.6 5.6 0 0 0-1.6 4 5.6 5.6 0 0 0 1.6 4 5.6 5.6 0 0 0 4 1.7 5.6 5.6 0 0 0 4-1.7 5.6 5.6 0 0 0 1.7-4 5.6 5.6 0 0 0-1.7-4c-.5-.5-1.1-.9-1.8-1.2Z" clip-rule="evenodd"/><path fill="#000" fill-rule="evenodd" d="M7 2.9a.8.8 0 1 1 0 1.5A.8.8 0 0 1 7 3ZM5.8 5.7c0-.4.3-.6.6-.6h.7c.3 0 .6.2.6.6v3.7h.5a.6.6 0 0 1 0 1.3H6a.6.6 0 0 1 0-1.3h.4v-3a.6.6 0 0 1-.6-.7Z" clip-rule="evenodd"/></svg>
Attention: this Space need to be duplicated to work</h2>
<p class="main-message">
To make it work, <strong>duplicate the Space</strong> and run it on your own profile using a <strong>private</strong> GPU (T4-small or A10G-small).<br />
A T4 costs <strong>US$0.60/h</strong>, so it should cost < US$1 to train most models.
</p>
<p class="actions">
<a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}?duplicate=true">
<img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/duplicate-this-space-lg-dark.svg" alt="Duplicate this Space" />
</a>
to start training your own B-LoRa model
</p>
</div>
''', elem_id="warning-duplicate")
else:
if(is_gpu_associated):
top_description = gr.HTML(f'''
<div class="gr-prose">
<h2><svg xmlns="http://www.w3.org/2000/svg" width="18px" height="18px" style="margin-right: 0px;display: inline-block;"fill="none"><path fill="#fff" d="M7 13.2a6.3 6.3 0 0 0 4.4-10.7A6.3 6.3 0 0 0 .6 6.9 6.3 6.3 0 0 0 7 13.2Z"/><path fill="#fff" fill-rule="evenodd" d="M7 0a6.9 6.9 0 0 1 4.8 11.8A6.9 6.9 0 0 1 0 7 6.9 6.9 0 0 1 7 0Zm0 0v.7V0ZM0 7h.6H0Zm7 6.8v-.6.6ZM13.7 7h-.6.6ZM9.1 1.7c-.7-.3-1.4-.4-2.2-.4a5.6 5.6 0 0 0-4 1.6 5.6 5.6 0 0 0-1.6 4 5.6 5.6 0 0 0 1.6 4 5.6 5.6 0 0 0 4 1.7 5.6 5.6 0 0 0 4-1.7 5.6 5.6 0 0 0 1.7-4 5.6 5.6 0 0 0-1.7-4c-.5-.5-1.1-.9-1.8-1.2Z" clip-rule="evenodd"/><path fill="#000" fill-rule="evenodd" d="M7 2.9a.8.8 0 1 1 0 1.5A.8.8 0 0 1 7 3ZM5.8 5.7c0-.4.3-.6.6-.6h.7c.3 0 .6.2.6.6v3.7h.5a.6.6 0 0 1 0 1.3H6a.6.6 0 0 1 0-1.3h.4v-3a.6.6 0 0 1-.6-.7Z" clip-rule="evenodd"/></svg>
You have successfully associated a {which_gpu} GPU to the B-LoRa Training Space π</h2>
<p>
You can now train your model! You will be billed by the minute from when you activated the GPU until when it is turned off.
</p>
</div>
''', elem_id="warning-ready")
else:
top_description = gr.HTML(f'''
<div class="gr-prose">
<h2><svg xmlns="http://www.w3.org/2000/svg" width="18px" height="18px" style="margin-right: 0px;display: inline-block;"fill="none"><path fill="#fff" d="M7 13.2a6.3 6.3 0 0 0 4.4-10.7A6.3 6.3 0 0 0 .6 6.9 6.3 6.3 0 0 0 7 13.2Z"/><path fill="#fff" fill-rule="evenodd" d="M7 0a6.9 6.9 0 0 1 4.8 11.8A6.9 6.9 0 0 1 0 7 6.9 6.9 0 0 1 7 0Zm0 0v.7V0ZM0 7h.6H0Zm7 6.8v-.6.6ZM13.7 7h-.6.6ZM9.1 1.7c-.7-.3-1.4-.4-2.2-.4a5.6 5.6 0 0 0-4 1.6 5.6 5.6 0 0 0-1.6 4 5.6 5.6 0 0 0 1.6 4 5.6 5.6 0 0 0 4 1.7 5.6 5.6 0 0 0 4-1.7 5.6 5.6 0 0 0 1.7-4 5.6 5.6 0 0 0-1.7-4c-.5-.5-1.1-.9-1.8-1.2Z" clip-rule="evenodd"/><path fill="#000" fill-rule="evenodd" d="M7 2.9a.8.8 0 1 1 0 1.5A.8.8 0 0 1 7 3ZM5.8 5.7c0-.4.3-.6.6-.6h.7c.3 0 .6.2.6.6v3.7h.5a.6.6 0 0 1 0 1.3H6a.6.6 0 0 1 0-1.3h.4v-3a.6.6 0 0 1-.6-.7Z" clip-rule="evenodd"/></svg>
You have successfully duplicated the B-LoRa Training Space π</h2>
<p>There's only one step left before you can train your model: <a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}/settings" style="text-decoration: underline" target="_blank">attribute a <b>T4-small or A10G-small GPU</b> to it (via the Settings tab)</a> and run the training below.
You will be billed by the minute from when you activate the GPU until when it is turned off.</p>
<p class="actions">
<a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}/settings">π₯ Set recommended GPU</a>
</p>
</div>
''', elem_id="warning-setgpu")
gr.Markdown("# B-LoRa Training UI π")
with gr.Row():
image = gr.Image(label="Image Reference", sources=["upload"], type="filepath")
with gr.Column():
with gr.Row():
current_sleep_time = gr.Textbox(label="current space sleep time")
check_sleep_time_btn = gr.Button("check", scale=1)
training_type = gr.Radio(label="Training type", choices=["style", "concept"], value="style", visible=False)
b_lora_name = gr.Textbox(label="Name your B-LoRa model", placeholder="b_lora_trained_folder", visible=False)
with gr.Row():
instance_prompt = gr.Textbox(label="Create instance prompt", placeholder="A [v42] <class_prompt>", visible=False)
class_prompt = gr.Textbox(label="Specify class prompt", placeholder="style | person | dog ", visible=False)
training_steps = gr.Number(label="Training steps", value=1000, interactive=False, visible=False)
checkpoint_step = gr.Number(label="checkpoint step", visible=False, value=500)
train_btn = gr.Button("Train B-LoRa", visible=False)
status = gr.Textbox(label="status")
check_sleep_time_btn.click(
fn = check_sleep_time,
inputs = None,
outputs = [current_sleep_time, b_lora_name, instance_prompt, class_prompt, training_type, training_steps, train_btn]
)
training_type.change(
fn = change_training_setup,
inputs = [training_type],
outputs = [training_steps, checkpoint_step]
)
train_btn.click(
fn = main,
inputs = [image, b_lora_name, instance_prompt, class_prompt, training_type, training_steps],
outputs = [status]
)
demo.launch(debug=True) |