WatchOutForMike commited on
Commit
b739efd
Β·
1 Parent(s): 8533d8a
Files changed (1) hide show
  1. app.py +103 -56
app.py CHANGED
@@ -11,7 +11,6 @@ import torch
11
  from PIL import Image
12
  import gradio as gr
13
 
14
-
15
  from diffusers import (
16
  DiffusionPipeline,
17
  AutoencoderTiny,
@@ -2148,19 +2147,20 @@ dtype = torch.bfloat16
2148
  device = "cuda" if torch.cuda.is_available() else "cpu"
2149
  base_model = "black-forest-labs/FLUX.1-dev"
2150
 
2151
- #TAEF1 is very tiny autoencoder which uses the same "latent API" as FLUX.1's VAE. FLUX.1 is useful for real-time previewing of the FLUX.1 generation process.#
2152
  taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
2153
  good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
2154
  pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=taef1).to(device)
2155
- pipe_i2i = AutoPipelineForImage2Image.from_pretrained(base_model,
2156
- vae=good_vae,
2157
- transformer=pipe.transformer,
2158
- text_encoder=pipe.text_encoder,
2159
- tokenizer=pipe.tokenizer,
2160
- text_encoder_2=pipe.text_encoder_2,
2161
- tokenizer_2=pipe.tokenizer_2,
2162
- torch_dtype=dtype
2163
- )
 
2164
 
2165
  MAX_SEED = 2**32-1
2166
 
@@ -2249,7 +2249,7 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
2249
  selected_lora = loras[selected_index]
2250
  lora_path = selected_lora["repo"]
2251
  trigger_word = selected_lora["trigger_word"]
2252
- if(trigger_word):
2253
  if "trigger_position" in selected_lora:
2254
  if selected_lora["trigger_position"] == "prepend":
2255
  prompt_mash = f"{trigger_word} {prompt}"
@@ -2264,7 +2264,7 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
2264
  pipe.unload_lora_weights()
2265
  pipe_i2i.unload_lora_weights()
2266
 
2267
- #LoRA weights flow
2268
  with calculateDuration(f"Loading LoRA weights for {selected_lora['title']}"):
2269
  pipe_to_use = pipe_i2i if image_input is not None else pipe
2270
  weight_name = selected_lora.get("weights", None)
@@ -2279,8 +2279,7 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
2279
  if randomize_seed:
2280
  seed = random.randint(0, MAX_SEED)
2281
 
2282
- if(image_input is not None):
2283
-
2284
  final_image = generate_image_to_image(prompt_mash, image_input, image_strength, steps, cfg_scale, width, height, lora_scale, seed)
2285
  yield final_image, seed, gr.update(visible=False)
2286
  else:
@@ -2289,7 +2288,7 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
2289
  final_image = None
2290
  step_counter = 0
2291
  for image in image_generator:
2292
- step_counter+=1
2293
  final_image = image
2294
  progress_bar = f'<div class="progress-container"><div class="progress-bar" style="--current: {step_counter}; --total: {steps};"></div></div>'
2295
  yield image, seed, gr.update(value=progress_bar, visible=True)
@@ -2298,19 +2297,15 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
2298
 
2299
  def get_huggingface_safetensors(link):
2300
  split_link = link.split("/")
2301
- if(len(split_link) == 2):
2302
  model_card = ModelCard.load(link)
2303
  base_model = model_card.data.get("base_model")
2304
  print(base_model)
2305
 
2306
- #Allows Both
2307
- if((base_model != "black-forest-labs/FLUX.1-dev") and (base_model != "black-forest-labs/FLUX.1-schnell")):
2308
  raise Exception("Flux LoRA Not Found!")
2309
 
2310
- # Only allow "black-forest-labs/FLUX.1-dev"
2311
- #if base_model != "black-forest-labs/FLUX.1-dev":
2312
- #raise Exception("Only FLUX.1-dev is supported, other LoRA models are not allowed!")
2313
-
2314
  image_path = model_card.data.get("widget", [{}])[0].get("output", {}).get("url", None)
2315
  trigger_word = model_card.data.get("instance_prompt", "")
2316
  image_url = f"https://huggingface.co/{link}/resolve/main/{image_path}" if image_path else None
@@ -2318,20 +2313,20 @@ def get_huggingface_safetensors(link):
2318
  try:
2319
  list_of_files = fs.ls(link, detail=False)
2320
  for file in list_of_files:
2321
- if(file.endswith(".safetensors")):
2322
  safetensors_name = file.split("/")[-1]
2323
  if (not image_url and file.lower().endswith((".jpg", ".jpeg", ".png", ".webp"))):
2324
  image_elements = file.split("/")
2325
  image_url = f"https://huggingface.co/{link}/resolve/main/{image_elements[-1]}"
2326
  except Exception as e:
2327
  print(e)
2328
- gr.Warning(f"You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
2329
- raise Exception(f"You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
2330
  return split_link[1], link, safetensors_name, trigger_word, image_url
2331
 
2332
  def check_custom_model(link):
2333
- if(link.startswith("https://")):
2334
- if(link.startswith("https://huggingface.co") or link.startswith("https://www.huggingface.co")):
2335
  link_split = link.split("huggingface.co/")
2336
  return get_huggingface_safetensors(link_split[1])
2337
  else:
@@ -2339,7 +2334,7 @@ def check_custom_model(link):
2339
 
2340
  def add_custom_lora(custom_lora):
2341
  global loras
2342
- if(custom_lora):
2343
  try:
2344
  title, repo, path, trigger_word, image = check_custom_model(custom_lora)
2345
  print(f"Loaded custom LoRA: {repo}")
@@ -2356,7 +2351,7 @@ def add_custom_lora(custom_lora):
2356
  </div>
2357
  '''
2358
  existing_item_index = next((index for (index, item) in enumerate(loras) if item['repo'] == repo), None)
2359
- if(not existing_item_index):
2360
  new_item = {
2361
  "image": image,
2362
  "title": title,
@@ -2370,8 +2365,8 @@ def add_custom_lora(custom_lora):
2370
 
2371
  return gr.update(visible=True, value=card), gr.update(visible=True), gr.Gallery(selected_index=None), f"Custom: {path}", existing_item_index, trigger_word
2372
  except Exception as e:
2373
- gr.Warning(f"Invalid LoRA: either you entered an invalid link, or a non-FLUX LoRA")
2374
- return gr.update(visible=True, value=f"Invalid LoRA: either you entered an invalid link, a non-FLUX LoRA"), gr.update(visible=False), gr.update(), "", None, ""
2375
  else:
2376
  return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
2377
 
@@ -2380,22 +2375,74 @@ def remove_custom_lora():
2380
 
2381
  run_lora.zerogpu = True
2382
 
 
2383
  css = '''
2384
- #gen_btn{height: 100%}
2385
- #gen_column{align-self: stretch}
2386
- #title{text-align: center}
2387
- #title h1{font-size: 3em; display:inline-flex; align-items:center; color: #ffd700;}
2388
- #title img{width: 100px; margin-right: 0.5em}
2389
- #gallery .grid-wrap{height: 10vh}
2390
- #lora_list{background: var(--block-background-fill);padding: 0 1em .3em; font-size: 90%; color: #ffd700;}
2391
- .card_internal{display: flex;height: 100px;margin-top: .5em; border: 2px solid #8b4513; background: #3a3a3a;}
2392
- .card_internal img{margin-right: 1em; border-radius: 5px;}
2393
- .styler{--form-gap-width: 0px !important}
2394
- #progress{height:30px}
2395
- #progress .generating{display:none}
2396
- .progress-container {width: 100%;height: 30px;background-color: #f0f0f0;border-radius: 15px;overflow: hidden;margin-bottom: 20px}
2397
- .progress-bar {height: 100%;background-color: #d2691e;width: calc(var(--current) / var(--total) * 100%);transition: width 0.5s ease-in-out}
2398
- body {background: url('https://www.transparenttextures.com/patterns/black-linen.png') center; font-family: 'Cinzel', serif; color: #ffd700;}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2399
  '''
2400
 
2401
  with gr.Blocks(theme="YTheme/Minecraft", css=css, delete_cache=(60, 60)) as app:
@@ -2404,10 +2451,10 @@ with gr.Blocks(theme="YTheme/Minecraft", css=css, delete_cache=(60, 60)) as app:
2404
  <div id="title">
2405
  <h1>βš”οΈ ChatDnD.net βš”οΈ</h1>
2406
  <p>
2407
- <strong>Unleash Your Imagination!</strong> Create heroes, maps, quests, and epic scenes to bring your campaigns to life.
2408
- Tailored for adventurers seeking inspiration or Dungeon Masters constructing their next grand story. <br>
2409
- <a href="https://chatdnd.net" target="_blank">Visit Our Website</a> |
2410
- <a href="https://buymeacoffee.com/watchoutformike" target="_blank">Support Us</a>
2411
  </p>
2412
  </div>
2413
  """,
@@ -2418,9 +2465,9 @@ with gr.Blocks(theme="YTheme/Minecraft", css=css, delete_cache=(60, 60)) as app:
2418
  with gr.Row():
2419
  with gr.Column(scale=3):
2420
  prompt = gr.Textbox(
2421
- label="🎲 Your Legendary Prompt",
2422
  lines=1,
2423
- placeholder="Describe your hero, villain, or epic battle scene..."
2424
  )
2425
  with gr.Column(scale=1, elem_id="gen_column"):
2426
  generate_button = gr.Button("Forge Your Vision", variant="primary", elem_id="gen_btn")
@@ -2429,7 +2476,7 @@ with gr.Blocks(theme="YTheme/Minecraft", css=css, delete_cache=(60, 60)) as app:
2429
  selected_info = gr.Markdown("")
2430
  gallery = gr.Gallery(
2431
  [(item["image"], item["title"]) for item in loras],
2432
- label="πŸ›‘οΈ LoRA Artifacts πŸ›‘οΈ",
2433
  allow_preview=False,
2434
  columns=3,
2435
  elem_id="gallery",
@@ -2441,7 +2488,7 @@ with gr.Blocks(theme="YTheme/Minecraft", css=css, delete_cache=(60, 60)) as app:
2441
  placeholder="prithivMLmods/Canopus-LoRA-Flux-Anime"
2442
  )
2443
  gr.Markdown(
2444
- """[Explore FLUX Artifacts Collection πŸ“œ](https://huggingface.co/models?other=base_model:adapter:black-forest-labs/FLUX.1-dev)""",
2445
  elem_id="lora_list"
2446
  )
2447
  custom_lora_info = gr.HTML(visible=False)
@@ -2466,7 +2513,7 @@ with gr.Blocks(theme="YTheme/Minecraft", css=css, delete_cache=(60, 60)) as app:
2466
  minimum=1, maximum=20, step=0.5, value=3.5
2467
  )
2468
  steps = gr.Slider(
2469
- label="⏱️ Steps for Generation",
2470
  minimum=1, maximum=50, step=1, value=28
2471
  )
2472
 
 
11
  from PIL import Image
12
  import gradio as gr
13
 
 
14
  from diffusers import (
15
  DiffusionPipeline,
16
  AutoencoderTiny,
 
2147
  device = "cuda" if torch.cuda.is_available() else "cpu"
2148
  base_model = "black-forest-labs/FLUX.1-dev"
2149
 
2150
+ # TAEF1 is a very tiny autoencoder which uses the same "latent API" as FLUX.1's VAE.
2151
  taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
2152
  good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
2153
  pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=taef1).to(device)
2154
+ pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
2155
+ base_model,
2156
+ vae=good_vae,
2157
+ transformer=pipe.transformer,
2158
+ text_encoder=pipe.text_encoder,
2159
+ tokenizer=pipe.tokenizer,
2160
+ text_encoder_2=pipe.text_encoder_2,
2161
+ tokenizer_2=pipe.tokenizer_2,
2162
+ torch_dtype=dtype
2163
+ )
2164
 
2165
  MAX_SEED = 2**32-1
2166
 
 
2249
  selected_lora = loras[selected_index]
2250
  lora_path = selected_lora["repo"]
2251
  trigger_word = selected_lora["trigger_word"]
2252
+ if trigger_word:
2253
  if "trigger_position" in selected_lora:
2254
  if selected_lora["trigger_position"] == "prepend":
2255
  prompt_mash = f"{trigger_word} {prompt}"
 
2264
  pipe.unload_lora_weights()
2265
  pipe_i2i.unload_lora_weights()
2266
 
2267
+ # LoRA weights flow
2268
  with calculateDuration(f"Loading LoRA weights for {selected_lora['title']}"):
2269
  pipe_to_use = pipe_i2i if image_input is not None else pipe
2270
  weight_name = selected_lora.get("weights", None)
 
2279
  if randomize_seed:
2280
  seed = random.randint(0, MAX_SEED)
2281
 
2282
+ if image_input is not None:
 
2283
  final_image = generate_image_to_image(prompt_mash, image_input, image_strength, steps, cfg_scale, width, height, lora_scale, seed)
2284
  yield final_image, seed, gr.update(visible=False)
2285
  else:
 
2288
  final_image = None
2289
  step_counter = 0
2290
  for image in image_generator:
2291
+ step_counter += 1
2292
  final_image = image
2293
  progress_bar = f'<div class="progress-container"><div class="progress-bar" style="--current: {step_counter}; --total: {steps};"></div></div>'
2294
  yield image, seed, gr.update(value=progress_bar, visible=True)
 
2297
 
2298
  def get_huggingface_safetensors(link):
2299
  split_link = link.split("/")
2300
+ if len(split_link) == 2:
2301
  model_card = ModelCard.load(link)
2302
  base_model = model_card.data.get("base_model")
2303
  print(base_model)
2304
 
2305
+ # Allows Both
2306
+ if (base_model != "black-forest-labs/FLUX.1-dev") and (base_model != "black-forest-labs/FLUX.1-schnell"):
2307
  raise Exception("Flux LoRA Not Found!")
2308
 
 
 
 
 
2309
  image_path = model_card.data.get("widget", [{}])[0].get("output", {}).get("url", None)
2310
  trigger_word = model_card.data.get("instance_prompt", "")
2311
  image_url = f"https://huggingface.co/{link}/resolve/main/{image_path}" if image_path else None
 
2313
  try:
2314
  list_of_files = fs.ls(link, detail=False)
2315
  for file in list_of_files:
2316
+ if file.endswith(".safetensors"):
2317
  safetensors_name = file.split("/")[-1]
2318
  if (not image_url and file.lower().endswith((".jpg", ".jpeg", ".png", ".webp"))):
2319
  image_elements = file.split("/")
2320
  image_url = f"https://huggingface.co/{link}/resolve/main/{image_elements[-1]}"
2321
  except Exception as e:
2322
  print(e)
2323
+ gr.Warning("You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
2324
+ raise Exception("You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
2325
  return split_link[1], link, safetensors_name, trigger_word, image_url
2326
 
2327
  def check_custom_model(link):
2328
+ if link.startswith("https://"):
2329
+ if link.startswith("https://huggingface.co") or link.startswith("https://www.huggingface.co"):
2330
  link_split = link.split("huggingface.co/")
2331
  return get_huggingface_safetensors(link_split[1])
2332
  else:
 
2334
 
2335
  def add_custom_lora(custom_lora):
2336
  global loras
2337
+ if custom_lora:
2338
  try:
2339
  title, repo, path, trigger_word, image = check_custom_model(custom_lora)
2340
  print(f"Loaded custom LoRA: {repo}")
 
2351
  </div>
2352
  '''
2353
  existing_item_index = next((index for (index, item) in enumerate(loras) if item['repo'] == repo), None)
2354
+ if not existing_item_index:
2355
  new_item = {
2356
  "image": image,
2357
  "title": title,
 
2365
 
2366
  return gr.update(visible=True, value=card), gr.update(visible=True), gr.Gallery(selected_index=None), f"Custom: {path}", existing_item_index, trigger_word
2367
  except Exception as e:
2368
+ gr.Warning("Invalid LoRA: either you entered an invalid link, or a non-FLUX LoRA")
2369
+ return gr.update(visible=True, value="Invalid LoRA: either you entered an invalid link, or a non-FLUX LoRA"), gr.update(visible=False), gr.update(), "", None, ""
2370
  else:
2371
  return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
2372
 
 
2375
 
2376
  run_lora.zerogpu = True
2377
 
2378
+ # ─── UPDATED CSS FOR A D&D THEME ─────────────────────────────────────────────────────────────
2379
  css = '''
2380
+ /* Button & Layout */
2381
+ #gen_btn { height: 100%; }
2382
+ #gen_column { align-self: stretch; }
2383
+
2384
+ /* Title / Header */
2385
+ #title { text-align: center; margin-bottom: 20px; }
2386
+ #title h1 {
2387
+ font-size: 3em;
2388
+ display: inline-flex;
2389
+ align-items: center;
2390
+ color: #d4af37; /* Antique gold */
2391
+ text-shadow: 2px 2px 4px #000000;
2392
+ }
2393
+ #title img { width: 100px; margin-right: 0.5em; }
2394
+
2395
+ /* Gallery & LoRA List */
2396
+ #gallery .grid-wrap { height: 10vh; }
2397
+ #lora_list {
2398
+ background: var(--block-background-fill);
2399
+ padding: 0 1em 0.3em;
2400
+ font-size: 90%;
2401
+ color: #d4af37;
2402
+ border: 1px solid #8b4513;
2403
+ }
2404
+ .card_internal {
2405
+ display: flex;
2406
+ height: 100px;
2407
+ margin-top: 0.5em;
2408
+ border: 2px solid #8b4513;
2409
+ background: #f5f5dc; /* light parchment */
2410
+ border-radius: 8px;
2411
+ padding: 5px;
2412
+ }
2413
+ .card_internal img {
2414
+ margin-right: 1em;
2415
+ border-radius: 5px;
2416
+ border: 1px solid #8b4513;
2417
+ }
2418
+
2419
+ /* Progress Bar */
2420
+ #progress { height: 30px; margin-top: 10px; }
2421
+ #progress .generating { display: none; }
2422
+ .progress-container {
2423
+ width: 100%;
2424
+ height: 30px;
2425
+ background-color: #deb887; /* burlywood */
2426
+ border-radius: 15px;
2427
+ overflow: hidden;
2428
+ margin-bottom: 20px;
2429
+ }
2430
+ .progress-bar {
2431
+ height: 100%;
2432
+ background-color: #a0522d; /* sienna */
2433
+ width: calc(var(--current) / var(--total) * 100%);
2434
+ transition: width 0.5s ease-in-out;
2435
+ }
2436
+
2437
+ /* Body / Overall Page Background */
2438
+ body {
2439
+ /* A parchment/wood background for that medieval feel */
2440
+ background: #fdf6e3 url('https://www.transparenttextures.com/patterns/purty-wood.png') repeat;
2441
+ font-family: 'Cinzel', serif;
2442
+ color: #4b3621;
2443
+ margin: 0;
2444
+ padding: 0;
2445
+ }
2446
  '''
2447
 
2448
  with gr.Blocks(theme="YTheme/Minecraft", css=css, delete_cache=(60, 60)) as app:
 
2451
  <div id="title">
2452
  <h1>βš”οΈ ChatDnD.net βš”οΈ</h1>
2453
  <p>
2454
+ <strong>Forge Your Destiny!</strong> Create legendary heroes, intricate maps, epic quests, and awe‐inspiring battle scenes.
2455
+ Whether you’re an adventurer or a Dungeon Master, let your imagination run wild. <br>
2456
+ <a href="https://chatdnd.net" target="_blank">Visit Our Keep</a> |
2457
+ <a href="https://buymeacoffee.com/watchoutformike" target="_blank">Support the Guild</a>
2458
  </p>
2459
  </div>
2460
  """,
 
2465
  with gr.Row():
2466
  with gr.Column(scale=3):
2467
  prompt = gr.Textbox(
2468
+ label="🎲 Your Epic Prompt",
2469
  lines=1,
2470
+ placeholder="Describe your valiant hero, fearsome villain, or an epic clash of forces..."
2471
  )
2472
  with gr.Column(scale=1, elem_id="gen_column"):
2473
  generate_button = gr.Button("Forge Your Vision", variant="primary", elem_id="gen_btn")
 
2476
  selected_info = gr.Markdown("")
2477
  gallery = gr.Gallery(
2478
  [(item["image"], item["title"]) for item in loras],
2479
+ label="πŸ›‘οΈ LoRA Artifacts",
2480
  allow_preview=False,
2481
  columns=3,
2482
  elem_id="gallery",
 
2488
  placeholder="prithivMLmods/Canopus-LoRA-Flux-Anime"
2489
  )
2490
  gr.Markdown(
2491
+ """[Explore our FLUX Artifacts Collection πŸ“œ](https://huggingface.co/models?other=base_model:adapter:black-forest-labs/FLUX.1-dev)""",
2492
  elem_id="lora_list"
2493
  )
2494
  custom_lora_info = gr.HTML(visible=False)
 
2513
  minimum=1, maximum=20, step=0.5, value=3.5
2514
  )
2515
  steps = gr.Slider(
2516
+ label="⏱️ Generation Steps",
2517
  minimum=1, maximum=50, step=1, value=28
2518
  )
2519