WatchOutForMike commited on
Commit
8957c3f
·
1 Parent(s): ef9df1a
Files changed (1) hide show
  1. app.py +57 -103
app.py CHANGED
@@ -11,6 +11,7 @@ import torch
11
  from PIL import Image
12
  import gradio as gr
13
 
 
14
  from diffusers import (
15
  DiffusionPipeline,
16
  AutoencoderTiny,
@@ -59,7 +60,7 @@ def retrieve_timesteps(
59
  **kwargs,
60
  ):
61
  if timesteps is not None and sigmas is not None:
62
- raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
63
  if timesteps is not None:
64
  scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
65
  timesteps = scheduler.timesteps
@@ -2148,20 +2149,19 @@ dtype = torch.bfloat16
2148
  device = "cuda" if torch.cuda.is_available() else "cpu"
2149
  base_model = "black-forest-labs/FLUX.1-dev"
2150
 
2151
- # TAEF1 is a very tiny autoencoder which uses the same "latent API" as FLUX.1's VAE.
2152
  taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
2153
  good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
2154
  pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=taef1).to(device)
2155
- pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
2156
- base_model,
2157
- vae=good_vae,
2158
- transformer=pipe.transformer,
2159
- text_encoder=pipe.text_encoder,
2160
- tokenizer=pipe.tokenizer,
2161
- text_encoder_2=pipe.text_encoder_2,
2162
- tokenizer_2=pipe.tokenizer_2,
2163
- torch_dtype=dtype
2164
- )
2165
 
2166
  MAX_SEED = 2**32-1
2167
 
@@ -2250,7 +2250,7 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
2250
  selected_lora = loras[selected_index]
2251
  lora_path = selected_lora["repo"]
2252
  trigger_word = selected_lora["trigger_word"]
2253
- if trigger_word:
2254
  if "trigger_position" in selected_lora:
2255
  if selected_lora["trigger_position"] == "prepend":
2256
  prompt_mash = f"{trigger_word} {prompt}"
@@ -2265,7 +2265,7 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
2265
  pipe.unload_lora_weights()
2266
  pipe_i2i.unload_lora_weights()
2267
 
2268
- # LoRA weights flow
2269
  with calculateDuration(f"Loading LoRA weights for {selected_lora['title']}"):
2270
  pipe_to_use = pipe_i2i if image_input is not None else pipe
2271
  weight_name = selected_lora.get("weights", None)
@@ -2280,7 +2280,8 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
2280
  if randomize_seed:
2281
  seed = random.randint(0, MAX_SEED)
2282
 
2283
- if image_input is not None:
 
2284
  final_image = generate_image_to_image(prompt_mash, image_input, image_strength, steps, cfg_scale, width, height, lora_scale, seed)
2285
  yield final_image, seed, gr.update(visible=False)
2286
  else:
@@ -2289,7 +2290,7 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
2289
  final_image = None
2290
  step_counter = 0
2291
  for image in image_generator:
2292
- step_counter += 1
2293
  final_image = image
2294
  progress_bar = f'<div class="progress-container"><div class="progress-bar" style="--current: {step_counter}; --total: {steps};"></div></div>'
2295
  yield image, seed, gr.update(value=progress_bar, visible=True)
@@ -2298,15 +2299,19 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
2298
 
2299
  def get_huggingface_safetensors(link):
2300
  split_link = link.split("/")
2301
- if len(split_link) == 2:
2302
  model_card = ModelCard.load(link)
2303
  base_model = model_card.data.get("base_model")
2304
  print(base_model)
2305
 
2306
- # Allows Both
2307
- if (base_model != "black-forest-labs/FLUX.1-dev") and (base_model != "black-forest-labs/FLUX.1-schnell"):
2308
  raise Exception("Flux LoRA Not Found!")
2309
 
 
 
 
 
2310
  image_path = model_card.data.get("widget", [{}])[0].get("output", {}).get("url", None)
2311
  trigger_word = model_card.data.get("instance_prompt", "")
2312
  image_url = f"https://huggingface.co/{link}/resolve/main/{image_path}" if image_path else None
@@ -2314,20 +2319,20 @@ def get_huggingface_safetensors(link):
2314
  try:
2315
  list_of_files = fs.ls(link, detail=False)
2316
  for file in list_of_files:
2317
- if file.endswith(".safetensors"):
2318
  safetensors_name = file.split("/")[-1]
2319
  if (not image_url and file.lower().endswith((".jpg", ".jpeg", ".png", ".webp"))):
2320
  image_elements = file.split("/")
2321
  image_url = f"https://huggingface.co/{link}/resolve/main/{image_elements[-1]}"
2322
  except Exception as e:
2323
  print(e)
2324
- gr.Warning("You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
2325
- raise Exception("You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
2326
  return split_link[1], link, safetensors_name, trigger_word, image_url
2327
 
2328
  def check_custom_model(link):
2329
- if link.startswith("https://"):
2330
- if link.startswith("https://huggingface.co") or link.startswith("https://www.huggingface.co"):
2331
  link_split = link.split("huggingface.co/")
2332
  return get_huggingface_safetensors(link_split[1])
2333
  else:
@@ -2335,7 +2340,7 @@ def check_custom_model(link):
2335
 
2336
  def add_custom_lora(custom_lora):
2337
  global loras
2338
- if custom_lora:
2339
  try:
2340
  title, repo, path, trigger_word, image = check_custom_model(custom_lora)
2341
  print(f"Loaded custom LoRA: {repo}")
@@ -2352,7 +2357,7 @@ def add_custom_lora(custom_lora):
2352
  </div>
2353
  '''
2354
  existing_item_index = next((index for (index, item) in enumerate(loras) if item['repo'] == repo), None)
2355
- if not existing_item_index:
2356
  new_item = {
2357
  "image": image,
2358
  "title": title,
@@ -2366,8 +2371,8 @@ def add_custom_lora(custom_lora):
2366
 
2367
  return gr.update(visible=True, value=card), gr.update(visible=True), gr.Gallery(selected_index=None), f"Custom: {path}", existing_item_index, trigger_word
2368
  except Exception as e:
2369
- gr.Warning("Invalid LoRA: either you entered an invalid link, or a non-FLUX LoRA")
2370
- return gr.update(visible=True, value="Invalid LoRA: either you entered an invalid link, or a non-FLUX LoRA"), gr.update(visible=False), gr.update(), "", None, ""
2371
  else:
2372
  return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
2373
 
@@ -2376,73 +2381,22 @@ def remove_custom_lora():
2376
 
2377
  run_lora.zerogpu = True
2378
 
2379
- # ─── UPDATED CSS WITH YOUR SITE COLORS ─────────────────────────────────────────────────────────────
2380
  css = '''
2381
- /* Force our background with !important */
2382
- body {
2383
- background: #f4ecd8 url('https://www.transparenttextures.com/patterns/purty-wood.png') repeat !important;
2384
- font-family: 'Cinzel', serif;
2385
- color: #543d29;
2386
- margin: 0;
2387
- padding: 0;
2388
- }
2389
-
2390
- /* Button & Layout */
2391
- #gen_btn { height: 100%; }
2392
- #gen_column { align-self: stretch; }
2393
-
2394
- /* Title / Header */
2395
- #title { text-align: center; margin-bottom: 20px; }
2396
- #title h1 {
2397
- font-size: 3em;
2398
- display: inline-flex;
2399
- align-items: center;
2400
- color: #795548; /* medium brown */
2401
- text-shadow: 2px 2px 4px #543d29; /* dark brown */
2402
- }
2403
- #title img { width: 100px; margin-right: 0.5em; }
2404
-
2405
- /* Gallery & LoRA List */
2406
- #gallery .grid-wrap { height: 10vh; }
2407
- #lora_list {
2408
- background: #f0e6d2;
2409
- padding: 0 1em 0.3em;
2410
- font-size: 90%;
2411
- color: #795548;
2412
- border: 1px solid #543d29;
2413
- }
2414
- .card_internal {
2415
- display: flex;
2416
- height: 100px;
2417
- margin-top: 0.5em;
2418
- border: 2px solid #543d29;
2419
- background: #f0e6d2;
2420
- border-radius: 8px;
2421
- padding: 5px;
2422
- }
2423
- .card_internal img {
2424
- margin-right: 1em;
2425
- border-radius: 5px;
2426
- border: 1px solid #543d29;
2427
- }
2428
-
2429
- /* Progress Bar */
2430
- #progress { height: 30px; margin-top: 10px; }
2431
- #progress .generating { display: none; }
2432
- .progress-container {
2433
- width: 100%;
2434
- height: 30px;
2435
- background-color: #795548;
2436
- border-radius: 15px;
2437
- overflow: hidden;
2438
- margin-bottom: 20px;
2439
- }
2440
- .progress-bar {
2441
- height: 100%;
2442
- background-color: #543d29;
2443
- width: calc(var(--current) / var(--total) * 100%);
2444
- transition: width 0.5s ease-in-out;
2445
- }
2446
  '''
2447
 
2448
  with gr.Blocks(theme="YTheme/Minecraft", css=css, delete_cache=(60, 60)) as app:
@@ -2451,10 +2405,10 @@ with gr.Blocks(theme="YTheme/Minecraft", css=css, delete_cache=(60, 60)) as app:
2451
  <div id="title">
2452
  <h1>⚔️ ChatDnD.net ⚔️</h1>
2453
  <p>
2454
- <strong>Forge Your Destiny!</strong> Create legendary heroes, intricate maps, epic quests, and awe‐inspiring battle scenes.
2455
- Whether you’re an adventurer or a Dungeon Master, let your imagination run wild. <br>
2456
- <a href="https://chatdnd.net" target="_blank">Visit Our Keep</a> |
2457
- <a href="https://buymeacoffee.com/watchoutformike" target="_blank">Support the Guild</a>
2458
  </p>
2459
  </div>
2460
  """,
@@ -2465,9 +2419,9 @@ with gr.Blocks(theme="YTheme/Minecraft", css=css, delete_cache=(60, 60)) as app:
2465
  with gr.Row():
2466
  with gr.Column(scale=3):
2467
  prompt = gr.Textbox(
2468
- label="🎲 Your Epic Prompt",
2469
  lines=1,
2470
- placeholder="Describe your valiant hero, fearsome villain, or an epic clash of forces..."
2471
  )
2472
  with gr.Column(scale=1, elem_id="gen_column"):
2473
  generate_button = gr.Button("Forge Your Vision", variant="primary", elem_id="gen_btn")
@@ -2476,7 +2430,7 @@ with gr.Blocks(theme="YTheme/Minecraft", css=css, delete_cache=(60, 60)) as app:
2476
  selected_info = gr.Markdown("")
2477
  gallery = gr.Gallery(
2478
  [(item["image"], item["title"]) for item in loras],
2479
- label="🛡️ LoRA Artifacts",
2480
  allow_preview=False,
2481
  columns=3,
2482
  elem_id="gallery",
@@ -2488,7 +2442,7 @@ with gr.Blocks(theme="YTheme/Minecraft", css=css, delete_cache=(60, 60)) as app:
2488
  placeholder="prithivMLmods/Canopus-LoRA-Flux-Anime"
2489
  )
2490
  gr.Markdown(
2491
- """[Explore our FLUX Artifacts Collection 📜](https://huggingface.co/models?other=base_model:adapter:black-forest-labs/FLUX.1-dev)""",
2492
  elem_id="lora_list"
2493
  )
2494
  custom_lora_info = gr.HTML(visible=False)
@@ -2513,7 +2467,7 @@ with gr.Blocks(theme="YTheme/Minecraft", css=css, delete_cache=(60, 60)) as app:
2513
  minimum=1, maximum=20, step=0.5, value=3.5
2514
  )
2515
  steps = gr.Slider(
2516
- label="⏱️ Generation Steps",
2517
  minimum=1, maximum=50, step=1, value=28
2518
  )
2519
 
 
11
  from PIL import Image
12
  import gradio as gr
13
 
14
+
15
  from diffusers import (
16
  DiffusionPipeline,
17
  AutoencoderTiny,
 
60
  **kwargs,
61
  ):
62
  if timesteps is not None and sigmas is not None:
63
+ raise ValueError("Only one of timesteps or sigmas can be passed. Please choose one to set custom values")
64
  if timesteps is not None:
65
  scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
66
  timesteps = scheduler.timesteps
 
2149
  device = "cuda" if torch.cuda.is_available() else "cpu"
2150
  base_model = "black-forest-labs/FLUX.1-dev"
2151
 
2152
+ #TAEF1 is very tiny autoencoder which uses the same "latent API" as FLUX.1's VAE. FLUX.1 is useful for real-time previewing of the FLUX.1 generation process.#
2153
  taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
2154
  good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
2155
  pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=taef1).to(device)
2156
+ pipe_i2i = AutoPipelineForImage2Image.from_pretrained(base_model,
2157
+ vae=good_vae,
2158
+ transformer=pipe.transformer,
2159
+ text_encoder=pipe.text_encoder,
2160
+ tokenizer=pipe.tokenizer,
2161
+ text_encoder_2=pipe.text_encoder_2,
2162
+ tokenizer_2=pipe.tokenizer_2,
2163
+ torch_dtype=dtype
2164
+ )
 
2165
 
2166
  MAX_SEED = 2**32-1
2167
 
 
2250
  selected_lora = loras[selected_index]
2251
  lora_path = selected_lora["repo"]
2252
  trigger_word = selected_lora["trigger_word"]
2253
+ if(trigger_word):
2254
  if "trigger_position" in selected_lora:
2255
  if selected_lora["trigger_position"] == "prepend":
2256
  prompt_mash = f"{trigger_word} {prompt}"
 
2265
  pipe.unload_lora_weights()
2266
  pipe_i2i.unload_lora_weights()
2267
 
2268
+ #LoRA weights flow
2269
  with calculateDuration(f"Loading LoRA weights for {selected_lora['title']}"):
2270
  pipe_to_use = pipe_i2i if image_input is not None else pipe
2271
  weight_name = selected_lora.get("weights", None)
 
2280
  if randomize_seed:
2281
  seed = random.randint(0, MAX_SEED)
2282
 
2283
+ if(image_input is not None):
2284
+
2285
  final_image = generate_image_to_image(prompt_mash, image_input, image_strength, steps, cfg_scale, width, height, lora_scale, seed)
2286
  yield final_image, seed, gr.update(visible=False)
2287
  else:
 
2290
  final_image = None
2291
  step_counter = 0
2292
  for image in image_generator:
2293
+ step_counter+=1
2294
  final_image = image
2295
  progress_bar = f'<div class="progress-container"><div class="progress-bar" style="--current: {step_counter}; --total: {steps};"></div></div>'
2296
  yield image, seed, gr.update(value=progress_bar, visible=True)
 
2299
 
2300
  def get_huggingface_safetensors(link):
2301
  split_link = link.split("/")
2302
+ if(len(split_link) == 2):
2303
  model_card = ModelCard.load(link)
2304
  base_model = model_card.data.get("base_model")
2305
  print(base_model)
2306
 
2307
+ #Allows Both
2308
+ if((base_model != "black-forest-labs/FLUX.1-dev") and (base_model != "black-forest-labs/FLUX.1-schnell")):
2309
  raise Exception("Flux LoRA Not Found!")
2310
 
2311
+ # Only allow "black-forest-labs/FLUX.1-dev"
2312
+ #if base_model != "black-forest-labs/FLUX.1-dev":
2313
+ #raise Exception("Only FLUX.1-dev is supported, other LoRA models are not allowed!")
2314
+
2315
  image_path = model_card.data.get("widget", [{}])[0].get("output", {}).get("url", None)
2316
  trigger_word = model_card.data.get("instance_prompt", "")
2317
  image_url = f"https://huggingface.co/{link}/resolve/main/{image_path}" if image_path else None
 
2319
  try:
2320
  list_of_files = fs.ls(link, detail=False)
2321
  for file in list_of_files:
2322
+ if(file.endswith(".safetensors")):
2323
  safetensors_name = file.split("/")[-1]
2324
  if (not image_url and file.lower().endswith((".jpg", ".jpeg", ".png", ".webp"))):
2325
  image_elements = file.split("/")
2326
  image_url = f"https://huggingface.co/{link}/resolve/main/{image_elements[-1]}"
2327
  except Exception as e:
2328
  print(e)
2329
+ gr.Warning(f"You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
2330
+ raise Exception(f"You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
2331
  return split_link[1], link, safetensors_name, trigger_word, image_url
2332
 
2333
  def check_custom_model(link):
2334
+ if(link.startswith("https://")):
2335
+ if(link.startswith("https://huggingface.co") or link.startswith("https://www.huggingface.co")):
2336
  link_split = link.split("huggingface.co/")
2337
  return get_huggingface_safetensors(link_split[1])
2338
  else:
 
2340
 
2341
  def add_custom_lora(custom_lora):
2342
  global loras
2343
+ if(custom_lora):
2344
  try:
2345
  title, repo, path, trigger_word, image = check_custom_model(custom_lora)
2346
  print(f"Loaded custom LoRA: {repo}")
 
2357
  </div>
2358
  '''
2359
  existing_item_index = next((index for (index, item) in enumerate(loras) if item['repo'] == repo), None)
2360
+ if(not existing_item_index):
2361
  new_item = {
2362
  "image": image,
2363
  "title": title,
 
2371
 
2372
  return gr.update(visible=True, value=card), gr.update(visible=True), gr.Gallery(selected_index=None), f"Custom: {path}", existing_item_index, trigger_word
2373
  except Exception as e:
2374
+ gr.Warning(f"Invalid LoRA: either you entered an invalid link, or a non-FLUX LoRA")
2375
+ return gr.update(visible=True, value=f"Invalid LoRA: either you entered an invalid link, a non-FLUX LoRA"), gr.update(visible=False), gr.update(), "", None, ""
2376
  else:
2377
  return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
2378
 
 
2381
 
2382
  run_lora.zerogpu = True
2383
 
 
2384
  css = '''
2385
+ #gen_btn{height: 100%}
2386
+ #gen_column{align-self: stretch}
2387
+ #title{text-align: center}
2388
+ #title h1{font-size: 3em; display:inline-flex; align-items:center; color: #ffd700;}
2389
+ #title img{width: 100px; margin-right: 0.5em}
2390
+ #gallery .grid-wrap{height: 10vh}
2391
+ #lora_list{background: var(--block-background-fill);padding: 0 1em .3em; font-size: 90%; color: #ffd700;}
2392
+ .card_internal{display: flex;height: 100px;margin-top: .5em; border: 2px solid #8b4513; background: #3a3a3a;}
2393
+ .card_internal img{margin-right: 1em; border-radius: 5px;}
2394
+ .styler{--form-gap-width: 0px !important}
2395
+ #progress{height:30px}
2396
+ #progress .generating{display:none}
2397
+ .progress-container {width: 100%;height: 30px;background-color: #f0f0f0;border-radius: 15px;overflow: hidden;margin-bottom: 20px}
2398
+ .progress-bar {height: 100%;background-color: #d2691e;width: calc(var(--current) / var(--total) * 100%);transition: width 0.5s ease-in-out}
2399
+ body {background: url('https://www.transparenttextures.com/patterns/black-linen.png') center; font-family: 'Cinzel', serif; color: #ffd700;}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2400
  '''
2401
 
2402
  with gr.Blocks(theme="YTheme/Minecraft", css=css, delete_cache=(60, 60)) as app:
 
2405
  <div id="title">
2406
  <h1>⚔️ ChatDnD.net ⚔️</h1>
2407
  <p>
2408
+ <strong>Unleash Your Imagination!</strong> Create heroes, maps, quests, and epic scenes to bring your campaigns to life.
2409
+ Tailored for adventurers seeking inspiration or Dungeon Masters constructing their next grand story. <br>
2410
+ <a href="https://chatdnd.net" target="_blank">Visit Our Website</a> |
2411
+ <a href="https://buymeacoffee.com/watchoutformike" target="_blank">Support Us</a>
2412
  </p>
2413
  </div>
2414
  """,
 
2419
  with gr.Row():
2420
  with gr.Column(scale=3):
2421
  prompt = gr.Textbox(
2422
+ label="🎲 Your Legendary Prompt",
2423
  lines=1,
2424
+ placeholder="Describe your hero, villain, or epic battle scene..."
2425
  )
2426
  with gr.Column(scale=1, elem_id="gen_column"):
2427
  generate_button = gr.Button("Forge Your Vision", variant="primary", elem_id="gen_btn")
 
2430
  selected_info = gr.Markdown("")
2431
  gallery = gr.Gallery(
2432
  [(item["image"], item["title"]) for item in loras],
2433
+ label="🛡️ LoRA Artifacts 🛡️",
2434
  allow_preview=False,
2435
  columns=3,
2436
  elem_id="gallery",
 
2442
  placeholder="prithivMLmods/Canopus-LoRA-Flux-Anime"
2443
  )
2444
  gr.Markdown(
2445
+ """[Explore FLUX Artifacts Collection 📜](https://huggingface.co/models?other=base_model:adapter:black-forest-labs/FLUX.1-dev)""",
2446
  elem_id="lora_list"
2447
  )
2448
  custom_lora_info = gr.HTML(visible=False)
 
2467
  minimum=1, maximum=20, step=0.5, value=3.5
2468
  )
2469
  steps = gr.Slider(
2470
+ label="⏱️ Steps for Generation",
2471
  minimum=1, maximum=50, step=1, value=28
2472
  )
2473