ginipick commited on
Commit
706e9b1
Β·
verified Β·
1 Parent(s): 43a5ea5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +771 -24
app.py CHANGED
@@ -21,7 +21,7 @@ pipe = FluxKontextPipeline.from_pretrained("black-forest-labs/FLUX.1-Kontext-dev
21
  # Load LoRA data
22
  flux_loras_raw = [
23
  {
24
- "image": "examples/1.webp",
25
  "title": "Studio Ghibli",
26
  "repo": "openfree/flux-chatgpt-ghibli-lora",
27
  "trigger_word": "ghibli",
@@ -29,7 +29,7 @@ flux_loras_raw = [
29
  "likes": 0
30
  },
31
  {
32
- "image": "examples/2.webp",
33
  "title": "Winslow Homer",
34
  "repo": "openfree/winslow-homer",
35
  "trigger_word": "homer",
@@ -37,7 +37,7 @@ flux_loras_raw = [
37
  "likes": 0
38
  },
39
  {
40
- "image": "examples/3.webp",
41
  "title": "Van Gogh",
42
  "repo": "openfree/van-gogh",
43
  "trigger_word": "gogh",
@@ -45,7 +45,7 @@ flux_loras_raw = [
45
  "likes": 0
46
  },
47
  {
48
- "image": "examples/4.webp",
49
  "title": "Paul CΓ©zanne",
50
  "repo": "openfree/paul-cezanne",
51
  "trigger_word": "Cezanne",
@@ -53,7 +53,7 @@ flux_loras_raw = [
53
  "likes": 0
54
  },
55
  {
56
- "image": "examples/5.webp",
57
  "title": "Renoir",
58
  "repo": "openfree/pierre-auguste-renoir",
59
  "trigger_word": "Renoir",
@@ -61,7 +61,7 @@ flux_loras_raw = [
61
  "likes": 0
62
  },
63
  {
64
- "image": "examples/6.webp",
65
  "title": "Claude Monet",
66
  "repo": "openfree/claude-monet",
67
  "trigger_word": "claude monet",
@@ -69,7 +69,7 @@ flux_loras_raw = [
69
  "likes": 0
70
  },
71
  {
72
- "image": "examples/7.webp",
73
  "title": "Fantasy Art",
74
  "repo": "openfree/myt-flux-fantasy",
75
  "trigger_word": "fantasy",
@@ -227,36 +227,783 @@ def classify_gallery(flux_loras):
227
  sorted_gallery = sorted(flux_loras, key=lambda x: x.get("likes", 0), reverse=True)
228
  gallery_items = []
229
 
230
- # Color mapping for each style
231
- style_colors = {
232
- "Studio Ghibli": "81C784/FFFFFF", # Green
233
- "Winslow Homer": "64B5F6/FFFFFF", # Blue
234
- "Van Gogh": "FFD54F/333333", # Yellow
235
- "Paul CΓ©zanne": "FF8A65/FFFFFF", # Orange
236
- "Renoir": "F06292/FFFFFF", # Pink
237
- "Claude Monet": "9575CD/FFFFFF", # Purple
238
- "Fantasy Art": "4FC3F7/FFFFFF" # Light Blue
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
239
  }
240
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
241
  for item in sorted_gallery:
242
  if "image" in item and "title" in item:
243
  image_url = item["image"]
244
  title = item["title"]
245
 
246
- # If image is a local file path, use a styled placeholder
247
- if isinstance(image_url, str) and (image_url.startswith("examples/") or image_url.startswith("/home/") or image_url.startswith("samples/") or not image_url.startswith("http")):
248
- color = style_colors.get(title, "E0E7FF/818CF8")
249
- image_url = f"https://via.placeholder.com/512x512/{color}?text={title.replace(' ', '+')}"
250
-
251
- gallery_items.append((image_url, title))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
252
 
253
  if not gallery_items:
254
- print("No gallery items found after filtering")
255
  return [], sorted_gallery
256
 
 
257
  return gallery_items, sorted_gallery
258
  except Exception as e:
259
  print(f"Error in classify_gallery: {e}")
 
 
260
  return [], []
261
 
262
  def infer_with_lora_wrapper(input_image, prompt, selected_index, custom_lora, seed=42, randomize_seed=False, guidance_scale=2.5, lora_scale=1.0, flux_loras=None, progress=gr.Progress(track_tqdm=True)):
@@ -707,4 +1454,4 @@ with gr.Blocks(css=css) as demo:
707
  )
708
 
709
  demo.queue(default_concurrency_limit=None)
710
- demo.launch()
 
21
  # Load LoRA data
22
  flux_loras_raw = [
23
  {
24
+ "image": "examples/1.png",
25
  "title": "Studio Ghibli",
26
  "repo": "openfree/flux-chatgpt-ghibli-lora",
27
  "trigger_word": "ghibli",
 
29
  "likes": 0
30
  },
31
  {
32
+ "image": "examples/2.png",
33
  "title": "Winslow Homer",
34
  "repo": "openfree/winslow-homer",
35
  "trigger_word": "homer",
 
37
  "likes": 0
38
  },
39
  {
40
+ "image": "examples/3.png",
41
  "title": "Van Gogh",
42
  "repo": "openfree/van-gogh",
43
  "trigger_word": "gogh",
 
45
  "likes": 0
46
  },
47
  {
48
+ "image": "examples/4.png",
49
  "title": "Paul CΓ©zanne",
50
  "repo": "openfree/paul-cezanne",
51
  "trigger_word": "Cezanne",
 
53
  "likes": 0
54
  },
55
  {
56
+ "image": "examples/5.png",
57
  "title": "Renoir",
58
  "repo": "openfree/pierre-auguste-renoir",
59
  "trigger_word": "Renoir",
 
61
  "likes": 0
62
  },
63
  {
64
+ "image": "examples/6.png",
65
  "title": "Claude Monet",
66
  "repo": "openfree/claude-monet",
67
  "trigger_word": "claude monet",
 
69
  "likes": 0
70
  },
71
  {
72
+ "image": "examples/7.png",
73
  "title": "Fantasy Art",
74
  "repo": "openfree/myt-flux-fantasy",
75
  "trigger_word": "fantasy",
 
227
  sorted_gallery = sorted(flux_loras, key=lambda x: x.get("likes", 0), reverse=True)
228
  gallery_items = []
229
 
230
+ for item in sorted_gallery:
231
+ if "image" in item and "title" in item:
232
+ image_url = item["image"]
233
+ title = item["title"]
234
+
235
+ # Try to load local images with PIL
236
+ if isinstance(image_url, str) and image_url.startswith("examples/"):
237
+ try:
238
+ import os
239
+ # Try different possible paths
240
+ possible_paths = [
241
+ image_url,
242
+ os.path.join(os.getcwd(), image_url),
243
+ f"/home/user/app/{image_url}"
244
+ ]
245
+
246
+ image_loaded = False
247
+ for path in possible_paths:
248
+ if os.path.exists(path):
249
+ try:
250
+ pil_image = Image.open(path)
251
+ gallery_items.append((pil_image, title))
252
+ image_loaded = True
253
+ print(f"βœ“ Successfully loaded image from: {path}")
254
+ break
255
+ except Exception as e:
256
+ print(f"Failed to open image at {path}: {e}")
257
+
258
+ if not image_loaded:
259
+ print(f"βœ— Could not load image: {image_url}")
260
+ # Use the original path as fallback
261
+ gallery_items.append((image_url, title))
262
+ except Exception as e:
263
+ print(f"Error processing image {image_url}: {e}")
264
+ gallery_items.append((image_url, title))
265
+ else:
266
+ # For URLs or other paths, use as-is
267
+ gallery_items.append((image_url, title))
268
+
269
+ if not gallery_items:
270
+ print("No gallery items found")
271
+ return [], sorted_gallery
272
+
273
+ print(f"Gallery loaded with {len(gallery_items)} items")
274
+ return gallery_items, sorted_gallery
275
+ except Exception as e:
276
+ print(f"Error in classify_gallery: {e}")
277
+ import traceback
278
+ traceback.print_exc()
279
+ return [], []
280
+
281
+ def infer_with_lora_wrapper(input_image, prompt, selected_index, custom_lora, seed=42, randomize_seed=False, guidance_scale=2.5, lora_scale=1.0, flux_loras=None, progress=gr.Progress(track_tqdm=True)):
282
+ """Wrapper function to handle state serialization"""
283
+ return infer_with_lora(input_image, prompt, selected_index, custom_lora, seed, randomize_seed, guidance_scale, lora_scale, flux_loras, progress)
284
+
285
+ @spaces.GPU
286
+ def infer_with_lora(input_image, prompt, selected_index, custom_lora, seed=42, randomize_seed=False, guidance_scale=2.5, lora_scale=1.0, flux_loras=None, progress=gr.Progress(track_tqdm=True)):
287
+ """Generate image with selected LoRA"""
288
+ global current_lora, pipe
289
+
290
+ # Check if input image is provided
291
+ if input_image is None:
292
+ gr.Warning("Please upload your portrait photo first! πŸ“Έ")
293
+ return None, seed, gr.update(visible=False)
294
+
295
+ if randomize_seed:
296
+ seed = random.randint(0, MAX_SEED)
297
+
298
+ # Determine which LoRA to use
299
+ lora_to_use = None
300
+ if custom_lora:
301
+ lora_to_use = custom_lora
302
+ elif selected_index is not None and flux_loras and selected_index < len(flux_loras):
303
+ lora_to_use = flux_loras[selected_index]
304
+ # Load LoRA if needed
305
+ if lora_to_use and lora_to_use != current_lora:
306
+ try:
307
+ # Unload current LoRA
308
+ if current_lora:
309
+ pipe.unload_lora_weights()
310
+ print(f"Unloaded previous LoRA")
311
+
312
+ # Load new LoRA
313
+ repo_id = lora_to_use.get("repo", "unknown")
314
+ weights_file = lora_to_use.get("weights", "pytorch_lora_weights.safetensors")
315
+ print(f"Loading LoRA: {repo_id} with weights: {weights_file}")
316
+
317
+ lora_path = load_lora_weights(repo_id, weights_file)
318
+ if lora_path:
319
+ pipe.load_lora_weights(lora_path, adapter_name="selected_lora")
320
+ pipe.set_adapters(["selected_lora"], adapter_weights=[lora_scale])
321
+ print(f"Successfully loaded: {lora_path} with scale {lora_scale}")
322
+ current_lora = lora_to_use
323
+ else:
324
+ print(f"Failed to load LoRA from {repo_id}")
325
+ gr.Warning(f"Failed to load {lora_to_use.get('title', 'style')}. Please try a different art style.")
326
+ return None, seed, gr.update(visible=False)
327
+
328
+ except Exception as e:
329
+ print(f"Error loading LoRA: {e}")
330
+ # Continue without LoRA
331
+ else:
332
+ if lora_to_use:
333
+ print(f"Using already loaded LoRA: {lora_to_use.get('repo', 'unknown')}")
334
+
335
+ try:
336
+ # Convert image to RGB
337
+ input_image = input_image.convert("RGB")
338
+ except Exception as e:
339
+ print(f"Error processing image: {e}")
340
+ gr.Warning("Error processing the uploaded image. Please try a different photo. πŸ“Έ")
341
+ return None, seed, gr.update(visible=False)
342
+
343
+ # Check if LoRA is selected
344
+ if lora_to_use is None:
345
+ gr.Warning("Please select an art style from the gallery first! 🎨")
346
+ return None, seed, gr.update(visible=False)
347
+
348
+ # Add trigger word to prompt
349
+ trigger_word = lora_to_use.get("trigger_word", "")
350
+
351
+ # Special handling for different art styles
352
+ if trigger_word == "ghibli":
353
+ prompt = f"Create a Studio Ghibli anime style portrait of the person in the photo, {prompt}. Maintain the facial identity while transforming into whimsical anime art style."
354
+ elif trigger_word == "homer":
355
+ prompt = f"Paint the person in Winslow Homer's American realist style, {prompt}. Keep facial features while applying watercolor and marine art techniques."
356
+ elif trigger_word == "gogh":
357
+ prompt = f"Transform the portrait into Van Gogh's post-impressionist style with swirling brushstrokes, {prompt}. Maintain facial identity with expressive colors."
358
+ elif trigger_word == "Cezanne":
359
+ prompt = f"Render the person in Paul CΓ©zanne's geometric post-impressionist style, {prompt}. Keep facial structure while applying structured brushwork."
360
+ elif trigger_word == "Renoir":
361
+ prompt = f"Paint the portrait in Pierre-Auguste Renoir's impressionist style with soft light, {prompt}. Maintain identity with luminous skin tones."
362
+ elif trigger_word == "claude monet":
363
+ prompt = f"Create an impressionist portrait in Claude Monet's style with visible brushstrokes, {prompt}. Keep facial features while using light and color."
364
+ elif trigger_word == "fantasy":
365
+ prompt = f"Transform into an epic fantasy character portrait, {prompt}. Maintain facial identity while adding magical and fantastical elements."
366
+ elif trigger_word == ", How2Draw":
367
+ prompt = f"create a How2Draw sketch of the person of the photo {prompt}, maintain the facial identity of the person and general features"
368
+ elif trigger_word == ", video game screenshot in the style of THSMS":
369
+ prompt = f"create a video game screenshot in the style of THSMS with the person from the photo, {prompt}. maintain the facial identity of the person and general features"
370
+ else:
371
+ prompt = f"convert the style of this portrait photo to {trigger_word} while maintaining the identity of the person. {prompt}. Make sure to maintain the person's facial identity and features, while still changing the overall style to {trigger_word}."
372
+
373
+ try:
374
+ image = pipe(
375
+ image=input_image,
376
+ prompt=prompt,
377
+ guidance_scale=guidance_scale,
378
+ generator=torch.Generator().manual_seed(seed),
379
+ ).images[0]
380
+
381
+ return image, seed, gr.update(visible=True)
382
+
383
+ except Exception as e:
384
+ print(f"Error during inference: {e}")
385
+ return None, seed, gr.update(visible=False)
386
+
387
+ # CSS styling with beautiful gradient pastel design
388
+ css = """
389
+ /* Global background and container styling */
390
+ .gradio-container {
391
+ background: linear-gradient(135deg, #ffeef8 0%, #e6f3ff 25%, #fff4e6 50%, #f0e6ff 75%, #e6fff9 100%);
392
+ font-family: 'Inter', sans-serif;
393
+ }
394
+
395
+ /* Main app container */
396
+ #main_app {
397
+ display: flex;
398
+ gap: 24px;
399
+ padding: 20px;
400
+ background: rgba(255, 255, 255, 0.85);
401
+ backdrop-filter: blur(20px);
402
+ border-radius: 24px;
403
+ box-shadow: 0 10px 40px rgba(0, 0, 0, 0.08);
404
+ }
405
+
406
+ /* Box column styling */
407
+ #box_column {
408
+ min-width: 400px;
409
+ }
410
+
411
+ /* Gallery box with glassmorphism */
412
+ #gallery_box {
413
+ background: linear-gradient(135deg, rgba(255, 255, 255, 0.9) 0%, rgba(240, 248, 255, 0.9) 100%);
414
+ border-radius: 20px;
415
+ padding: 20px;
416
+ box-shadow: 0 8px 32px rgba(135, 206, 250, 0.2);
417
+ border: 1px solid rgba(255, 255, 255, 0.8);
418
+ }
419
+
420
+ /* Input image styling */
421
+ .image-container {
422
+ border-radius: 16px;
423
+ overflow: hidden;
424
+ box-shadow: 0 4px 20px rgba(0, 0, 0, 0.1);
425
+ }
426
+
427
+ /* Gallery styling */
428
+ #gallery {
429
+ overflow-y: scroll !important;
430
+ max-height: 400px;
431
+ padding: 12px;
432
+ background: rgba(255, 255, 255, 0.5);
433
+ border-radius: 16px;
434
+ scrollbar-width: thin;
435
+ scrollbar-color: #ddd6fe #f5f3ff;
436
+ }
437
+
438
+ #gallery::-webkit-scrollbar {
439
+ width: 8px;
440
+ }
441
+
442
+ #gallery::-webkit-scrollbar-track {
443
+ background: #f5f3ff;
444
+ border-radius: 10px;
445
+ }
446
+
447
+ #gallery::-webkit-scrollbar-thumb {
448
+ background: linear-gradient(180deg, #c7d2fe 0%, #ddd6fe 100%);
449
+ border-radius: 10px;
450
+ }
451
+
452
+ /* Selected LoRA text */
453
+ #selected_lora {
454
+ background: linear-gradient(135deg, #818cf8 0%, #a78bfa 100%);
455
+ -webkit-background-clip: text;
456
+ -webkit-text-fill-color: transparent;
457
+ background-clip: text;
458
+ font-weight: 700;
459
+ font-size: 18px;
460
+ text-align: center;
461
+ padding: 12px;
462
+ margin-bottom: 16px;
463
+ }
464
+
465
+ /* Prompt input field */
466
+ #prompt {
467
+ flex-grow: 1;
468
+ border: 2px solid transparent;
469
+ background: linear-gradient(white, white) padding-box,
470
+ linear-gradient(135deg, #a5b4fc 0%, #e9d5ff 100%) border-box;
471
+ border-radius: 12px;
472
+ padding: 12px 16px;
473
+ font-size: 16px;
474
+ transition: all 0.3s ease;
475
+ }
476
+
477
+ #prompt:focus {
478
+ box-shadow: 0 0 0 4px rgba(165, 180, 252, 0.25);
479
+ }
480
+
481
+ /* Run button with animated gradient */
482
+ #run_button {
483
+ background: linear-gradient(135deg, #a78bfa 0%, #818cf8 25%, #60a5fa 50%, #34d399 75%, #fbbf24 100%);
484
+ background-size: 200% 200%;
485
+ animation: gradient-shift 3s ease infinite;
486
+ color: white;
487
+ border: none;
488
+ padding: 12px 32px;
489
+ border-radius: 12px;
490
+ font-weight: 600;
491
+ font-size: 16px;
492
+ cursor: pointer;
493
+ transition: all 0.3s ease;
494
+ box-shadow: 0 4px 20px rgba(167, 139, 250, 0.4);
495
+ }
496
+
497
+ #run_button:hover {
498
+ transform: translateY(-2px);
499
+ box-shadow: 0 6px 30px rgba(167, 139, 250, 0.6);
500
+ }
501
+
502
+ @keyframes gradient-shift {
503
+ 0% { background-position: 0% 50%; }
504
+ 50% { background-position: 100% 50%; }
505
+ 100% { background-position: 0% 50%; }
506
+ }
507
+
508
+ /* Custom LoRA card */
509
+ .custom_lora_card {
510
+ background: linear-gradient(135deg, #fef3c7 0%, #fde68a 100%);
511
+ border: 1px solid #fcd34d;
512
+ border-radius: 12px;
513
+ padding: 16px;
514
+ margin: 12px 0;
515
+ box-shadow: 0 4px 12px rgba(251, 191, 36, 0.2);
516
+ }
517
+
518
+ /* Result image container */
519
+ .output-image {
520
+ border-radius: 16px;
521
+ overflow: hidden;
522
+ box-shadow: 0 8px 32px rgba(0, 0, 0, 0.12);
523
+ margin-top: 20px;
524
+ }
525
+
526
+ /* Accordion styling */
527
+ .accordion {
528
+ background: rgba(249, 250, 251, 0.9);
529
+ border-radius: 12px;
530
+ border: 1px solid rgba(229, 231, 235, 0.8);
531
+ margin-top: 16px;
532
+ }
533
+
534
+ /* Slider styling */
535
+ .slider-container {
536
+ padding: 8px 0;
537
+ }
538
+
539
+ input[type="range"] {
540
+ background: linear-gradient(to right, #e0e7ff 0%, #c7d2fe 100%);
541
+ border-radius: 8px;
542
+ height: 6px;
543
+ }
544
+
545
+ /* Reuse button */
546
+ button:not(#run_button) {
547
+ background: linear-gradient(135deg, #f0abfc 0%, #c084fc 100%);
548
+ color: white;
549
+ border: none;
550
+ padding: 8px 20px;
551
+ border-radius: 8px;
552
+ font-weight: 500;
553
+ cursor: pointer;
554
+ transition: all 0.3s ease;
555
+ }
556
+
557
+ button:not(#run_button):hover {
558
+ transform: translateY(-1px);
559
+ box-shadow: 0 4px 16px rgba(192, 132, 252, 0.4);
560
+ }
561
+
562
+ /* Title styling */
563
+ h1 {
564
+ background: linear-gradient(135deg, #6366f1 0%, #a855f7 25%, #ec4899 50%, #f43f5e 75%, #f59e0b 100%);
565
+ -webkit-background-clip: text;
566
+ -webkit-text-fill-color: transparent;
567
+ background-clip: text;
568
+ text-align: center;
569
+ font-size: 3.5rem;
570
+ font-weight: 800;
571
+ margin-bottom: 8px;
572
+ text-shadow: 2px 2px 4px rgba(0, 0, 0, 0.1);
573
+ }
574
+
575
+ h1 small {
576
+ display: block;
577
+ background: linear-gradient(135deg, #94a3b8 0%, #64748b 100%);
578
+ -webkit-background-clip: text;
579
+ -webkit-text-fill-color: transparent;
580
+ background-clip: text;
581
+ font-size: 1rem;
582
+ font-weight: 500;
583
+ margin-top: 8px;
584
+ }
585
+
586
+ /* Checkbox styling */
587
+ input[type="checkbox"] {
588
+ accent-color: #8b5cf6;
589
+ }
590
+
591
+ /* Label styling */
592
+ label {
593
+ color: #4b5563;
594
+ font-weight: 500;
595
+ }
596
+
597
+ /* Group containers */
598
+ .gr-group {
599
+ background: rgba(255, 255, 255, 0.7);
600
+ border-radius: 16px;
601
+ padding: 20px;
602
+ border: 1px solid rgba(255, 255, 255, 0.9);
603
+ box-shadow: 0 4px 16px rgba(0, 0, 0, 0.05);
604
+ }
605
+ """
606
+
607
+ # Create Gradio interface
608
+ with gr.Blocks(css=css) as demo:
609
+ gr_flux_loras = gr.State(value=flux_loras_raw)
610
+
611
+ title = gr.HTML(
612
+ """<h1>✨ Flux-Kontext FaceLORA
613
+ <small>Transform your portraits with AI-powered style transfer 🎨</small></h1>""",
614
+ )
615
+
616
+ selected_state = gr.State(value=None)
617
+ custom_loaded_lora = gr.State(value=None)
618
+
619
+ with gr.Row(elem_id="main_app"):
620
+ with gr.Column(scale=4, elem_id="box_column"):
621
+ with gr.Group(elem_id="gallery_box"):
622
+ input_image = gr.Image(label="Upload your portrait photo πŸ“Έ", type="pil", height=300)
623
+
624
+ gallery = gr.Gallery(
625
+ label="Choose Your Art Style",
626
+ allow_preview=False,
627
+ columns=3,
628
+ elem_id="gallery",
629
+ show_share_button=False,
630
+ height=400
631
+ )
632
+
633
+ custom_model = gr.Textbox(
634
+ label="πŸ”— Or use a custom LoRA from HuggingFace",
635
+ placeholder="e.g., username/lora-name",
636
+ visible=True
637
+ )
638
+ custom_model_card = gr.HTML(visible=False)
639
+ custom_model_button = gr.Button("❌ Remove custom LoRA", visible=False)
640
+
641
+ with gr.Column(scale=5):
642
+ with gr.Row():
643
+ prompt = gr.Textbox(
644
+ label="Additional Details (optional)",
645
+ show_label=False,
646
+ lines=1,
647
+ max_lines=1,
648
+ placeholder="Describe additional details, e.g., 'wearing a red hat' or 'smiling'",
649
+ elem_id="prompt"
650
+ )
651
+ run_button = gr.Button("Generate ✨", elem_id="run_button")
652
+
653
+ result = gr.Image(label="Your Artistic Portrait", interactive=False)
654
+ reuse_button = gr.Button("πŸ”„ Reuse this image", visible=False)
655
+
656
+ with gr.Accordion("βš™οΈ Advanced Settings", open=False):
657
+ lora_scale = gr.Slider(
658
+ label="Style Strength",
659
+ minimum=0,
660
+ maximum=2,
661
+ step=0.1,
662
+ value=1.0,
663
+ info="How strongly to apply the art style (1.0 = balanced)"
664
+ )
665
+ seed = gr.Slider(
666
+ label="Random Seed",
667
+ minimum=0,
668
+ maximum=MAX_SEED,
669
+ step=1,
670
+ value=0,
671
+ info="Set to 0 for random results"
672
+ )
673
+ randomize_seed = gr.Checkbox(label="🎲 Randomize seed for each generation", value=True)
674
+ guidance_scale = gr.Slider(
675
+ label="Image Guidance",
676
+ minimum=1,
677
+ maximum=10,
678
+ step=0.1,
679
+ value=2.5,
680
+ info="How closely to follow the input image (lower = more creative)"
681
+ )
682
+
683
+ prompt_title = gr.Markdown(
684
+ value="### 🎨 Select an art style from the gallery",
685
+ visible=True,
686
+ elem_id="selected_lora",
687
+ )
688
+
689
+ # Event handlers
690
+ custom_model.input(
691
+ fn=load_custom_lora,
692
+ inputs=[custom_model],
693
+ outputs=[custom_model_card, custom_model_card, custom_model_button, custom_loaded_lora, gallery, prompt_title, selected_state],
694
+ )
695
+
696
+ custom_model_button.click(
697
+ fn=remove_custom_lora,
698
+ outputs=[custom_model, custom_model_button, custom_model_card, custom_loaded_lora, selected_state]
699
+ )
700
+
701
+ gallery.select(
702
+ fn=update_selection,
703
+ inputs=[gr_flux_loras],
704
+ outputs=[prompt_title, prompt, selected_state],
705
+ show_progress=False
706
+ )
707
+
708
+ gr.on(
709
+ triggers=[run_button.click, prompt.submit],
710
+ fn=infer_with_lora_wrapper,
711
+ inputs=[input_image, prompt, selected_state, custom_loaded_lora, seed, randomize_seed, guidance_scale, lora_scale, gr_flux_loras],
712
+ outputs=[result, seed, reuse_button]
713
+ )
714
+
715
+ reuse_button.click(
716
+ fn=lambda image: image,
717
+ inputs=[result],
718
+ outputs=[input_image]
719
+ )
720
+
721
+ # Initialize gallery
722
+ demo.load(
723
+ fn=classify_gallery,
724
+ inputs=[gr_flux_loras],
725
+ outputs=[gallery, gr_flux_loras]
726
+ )
727
+
728
+ demo.queue(default_concurrency_limit=None)
729
+ demo.launch(allowed_paths=["examples/"])import gradio as gr
730
+ import numpy as np
731
+ import spaces
732
+ import torch
733
+ import random
734
+ import json
735
+ import os
736
+ from PIL import Image
737
+ from diffusers import FluxKontextPipeline
738
+ from diffusers.utils import load_image
739
+ from huggingface_hub import hf_hub_download, HfFileSystem, ModelCard, list_repo_files
740
+ from safetensors.torch import load_file
741
+ import requests
742
+ import re
743
+
744
+ # Load Kontext model
745
+ MAX_SEED = np.iinfo(np.int32).max
746
+
747
+ pipe = FluxKontextPipeline.from_pretrained("black-forest-labs/FLUX.1-Kontext-dev", torch_dtype=torch.bfloat16).to("cuda")
748
+
749
+ # Load LoRA data
750
+ flux_loras_raw = [
751
+ {
752
+ "image": "examples/1.png",
753
+ "title": "Studio Ghibli",
754
+ "repo": "openfree/flux-chatgpt-ghibli-lora",
755
+ "trigger_word": "ghibli",
756
+ "weights": "pytorch_lora_weights.safetensors",
757
+ "likes": 0
758
+ },
759
+ {
760
+ "image": "examples/2.png",
761
+ "title": "Winslow Homer",
762
+ "repo": "openfree/winslow-homer",
763
+ "trigger_word": "homer",
764
+ "weights": "pytorch_lora_weights.safetensors",
765
+ "likes": 0
766
+ },
767
+ {
768
+ "image": "examples/3.png",
769
+ "title": "Van Gogh",
770
+ "repo": "openfree/van-gogh",
771
+ "trigger_word": "gogh",
772
+ "weights": "pytorch_lora_weights.safetensors",
773
+ "likes": 0
774
+ },
775
+ {
776
+ "image": "examples/4.png",
777
+ "title": "Paul CΓ©zanne",
778
+ "repo": "openfree/paul-cezanne",
779
+ "trigger_word": "Cezanne",
780
+ "weights": "pytorch_lora_weights.safetensors",
781
+ "likes": 0
782
+ },
783
+ {
784
+ "image": "examples/5.png",
785
+ "title": "Renoir",
786
+ "repo": "openfree/pierre-auguste-renoir",
787
+ "trigger_word": "Renoir",
788
+ "weights": "pytorch_lora_weights.safetensors",
789
+ "likes": 0
790
+ },
791
+ {
792
+ "image": "examples/6.png",
793
+ "title": "Claude Monet",
794
+ "repo": "openfree/claude-monet",
795
+ "trigger_word": "claude monet",
796
+ "weights": "pytorch_lora_weights.safetensors",
797
+ "likes": 0
798
+ },
799
+ {
800
+ "image": "examples/7.png",
801
+ "title": "Fantasy Art",
802
+ "repo": "openfree/myt-flux-fantasy",
803
+ "trigger_word": "fantasy",
804
+ "weights": "pytorch_lora_weights.safetensors",
805
+ "likes": 0
806
+ }
807
+ ]
808
+ print(f"Loaded {len(flux_loras_raw)} LoRAs")
809
+ # Global variables for LoRA management
810
+ current_lora = None
811
+ lora_cache = {}
812
+
813
+ def load_lora_weights(repo_id, weights_filename):
814
+ """Load LoRA weights from HuggingFace"""
815
+ try:
816
+ # First try with the specified filename
817
+ try:
818
+ lora_path = hf_hub_download(repo_id=repo_id, filename=weights_filename)
819
+ if repo_id not in lora_cache:
820
+ lora_cache[repo_id] = lora_path
821
+ return lora_path
822
+ except Exception as e:
823
+ print(f"Failed to load {weights_filename}, trying to find alternative LoRA files...")
824
+
825
+ # If the specified file doesn't exist, try to find any .safetensors file
826
+ from huggingface_hub import list_repo_files
827
+ try:
828
+ files = list_repo_files(repo_id)
829
+ safetensors_files = [f for f in files if f.endswith(('.safetensors', '.bin')) and 'lora' in f.lower()]
830
+
831
+ if not safetensors_files:
832
+ # Try without 'lora' in filename
833
+ safetensors_files = [f for f in files if f.endswith('.safetensors')]
834
+
835
+ if safetensors_files:
836
+ # Try the first available file
837
+ for file in safetensors_files:
838
+ try:
839
+ print(f"Trying alternative file: {file}")
840
+ lora_path = hf_hub_download(repo_id=repo_id, filename=file)
841
+ if repo_id not in lora_cache:
842
+ lora_cache[repo_id] = lora_path
843
+ print(f"Successfully loaded alternative LoRA file: {file}")
844
+ return lora_path
845
+ except:
846
+ continue
847
+
848
+ print(f"No suitable LoRA files found in {repo_id}")
849
+ return None
850
+
851
+ except Exception as list_error:
852
+ print(f"Error listing files in repo {repo_id}: {list_error}")
853
+ return None
854
+
855
+ except Exception as e:
856
+ print(f"Error loading LoRA from {repo_id}: {e}")
857
+ return None
858
+
859
+ def update_selection(selected_state: gr.SelectData, flux_loras):
860
+ """Update UI when a LoRA is selected"""
861
+ if selected_state.index >= len(flux_loras):
862
+ return "### No LoRA selected", gr.update(), None
863
+
864
+ lora = flux_loras[selected_state.index]
865
+ lora_title = lora["title"]
866
+ lora_repo = lora["repo"]
867
+ trigger_word = lora["trigger_word"]
868
+
869
+ # Create a more informative selected text
870
+ updated_text = f"### 🎨 Selected Style: {lora_title}"
871
+ new_placeholder = f"Describe additional details, e.g., 'wearing a red hat' or 'smiling'"
872
+
873
+ return updated_text, gr.update(placeholder=new_placeholder), selected_state.index
874
+
875
+ def get_huggingface_lora(link):
876
+ """Download LoRA from HuggingFace link"""
877
+ split_link = link.split("/")
878
+ if len(split_link) == 2:
879
+ try:
880
+ model_card = ModelCard.load(link)
881
+ trigger_word = model_card.data.get("instance_prompt", "")
882
+
883
+ # Try to find the correct safetensors file
884
+ files = list_repo_files(link)
885
+ safetensors_files = [f for f in files if f.endswith('.safetensors')]
886
+
887
+ # Prioritize files with 'lora' in the name
888
+ lora_files = [f for f in safetensors_files if 'lora' in f.lower()]
889
+ if lora_files:
890
+ safetensors_file = lora_files[0]
891
+ elif safetensors_files:
892
+ safetensors_file = safetensors_files[0]
893
+ else:
894
+ # Try .bin files as fallback
895
+ bin_files = [f for f in files if f.endswith('.bin') and 'lora' in f.lower()]
896
+ if bin_files:
897
+ safetensors_file = bin_files[0]
898
+ else:
899
+ safetensors_file = "pytorch_lora_weights.safetensors" # Default fallback
900
+
901
+ print(f"Found LoRA file: {safetensors_file} in {link}")
902
+ return split_link[1], safetensors_file, trigger_word
903
+
904
+ except Exception as e:
905
+ print(f"Error in get_huggingface_lora: {e}")
906
+ # Try basic detection
907
+ try:
908
+ files = list_repo_files(link)
909
+ safetensors_file = next((f for f in files if f.endswith('.safetensors')), "pytorch_lora_weights.safetensors")
910
+ return split_link[1], safetensors_file, ""
911
+ except:
912
+ raise Exception(f"Error loading LoRA: {e}")
913
+ else:
914
+ raise Exception("Invalid HuggingFace repository format")
915
+
916
+ def load_custom_lora(link):
917
+ """Load custom LoRA from user input"""
918
+ if not link:
919
+ return gr.update(visible=False), "", gr.update(visible=False), None, gr.Gallery(selected_index=None), "### 🎨 Select an art style from the gallery", None
920
+
921
+ try:
922
+ repo_name, weights_file, trigger_word = get_huggingface_lora(link)
923
+
924
+ card = f'''
925
+ <div class="custom_lora_card">
926
+ <div style="display: flex; align-items: center; margin-bottom: 12px;">
927
+ <span style="font-size: 18px; margin-right: 8px;">βœ…</span>
928
+ <strong style="font-size: 16px;">Custom LoRA Loaded!</strong>
929
+ </div>
930
+ <div style="background: rgba(255, 255, 255, 0.8); padding: 12px; border-radius: 8px;">
931
+ <h4 style="margin: 0 0 8px 0; color: #333;">{repo_name}</h4>
932
+ <small style="color: #666;">{"Trigger: <code style='background: #f0f0f0; padding: 2px 6px; border-radius: 4px;'><b>"+trigger_word+"</b></code>" if trigger_word else "No trigger word found"}</small>
933
+ </div>
934
+ </div>
935
+ '''
936
+
937
+ custom_lora_data = {
938
+ "repo": link,
939
+ "weights": weights_file,
940
+ "trigger_word": trigger_word
941
  }
942
 
943
+ return gr.update(visible=True), card, gr.update(visible=True), custom_lora_data, gr.Gallery(selected_index=None), f"🎨 Custom Style: {repo_name}", None
944
+
945
+ except Exception as e:
946
+ return gr.update(visible=True), f"Error: {str(e)}", gr.update(visible=False), None, gr.update(), "### 🎨 Select an art style from the gallery", None
947
+
948
+ def remove_custom_lora():
949
+ """Remove custom LoRA"""
950
+ return "", gr.update(visible=False), gr.update(visible=False), None, None
951
+
952
+ def classify_gallery(flux_loras):
953
+ """Sort gallery by likes"""
954
+ try:
955
+ sorted_gallery = sorted(flux_loras, key=lambda x: x.get("likes", 0), reverse=True)
956
+ gallery_items = []
957
+
958
  for item in sorted_gallery:
959
  if "image" in item and "title" in item:
960
  image_url = item["image"]
961
  title = item["title"]
962
 
963
+ # Try to load local images with PIL
964
+ if isinstance(image_url, str) and image_url.startswith("examples/"):
965
+ try:
966
+ import os
967
+ # Try different possible paths
968
+ possible_paths = [
969
+ image_url,
970
+ os.path.join(os.getcwd(), image_url),
971
+ f"/home/user/app/{image_url}"
972
+ ]
973
+
974
+ image_loaded = False
975
+ for path in possible_paths:
976
+ if os.path.exists(path):
977
+ try:
978
+ pil_image = Image.open(path)
979
+ gallery_items.append((pil_image, title))
980
+ image_loaded = True
981
+ print(f"βœ“ Successfully loaded image from: {path}")
982
+ break
983
+ except Exception as e:
984
+ print(f"Failed to open image at {path}: {e}")
985
+
986
+ if not image_loaded:
987
+ print(f"βœ— Could not load image: {image_url}")
988
+ # Use the original path as fallback
989
+ gallery_items.append((image_url, title))
990
+ except Exception as e:
991
+ print(f"Error processing image {image_url}: {e}")
992
+ gallery_items.append((image_url, title))
993
+ else:
994
+ # For URLs or other paths, use as-is
995
+ gallery_items.append((image_url, title))
996
 
997
  if not gallery_items:
998
+ print("No gallery items found")
999
  return [], sorted_gallery
1000
 
1001
+ print(f"Gallery loaded with {len(gallery_items)} items")
1002
  return gallery_items, sorted_gallery
1003
  except Exception as e:
1004
  print(f"Error in classify_gallery: {e}")
1005
+ import traceback
1006
+ traceback.print_exc()
1007
  return [], []
1008
 
1009
  def infer_with_lora_wrapper(input_image, prompt, selected_index, custom_lora, seed=42, randomize_seed=False, guidance_scale=2.5, lora_scale=1.0, flux_loras=None, progress=gr.Progress(track_tqdm=True)):
 
1454
  )
1455
 
1456
  demo.queue(default_concurrency_limit=None)
1457
+ demo.launch(allowed_paths=["examples/"])