import gradio as gr import numpy as np import spaces import torch import random import json import os from PIL import Image from diffusers import FluxKontextPipeline from diffusers.utils import load_image from huggingface_hub import hf_hub_download, HfFileSystem, ModelCard from safetensors.torch import load_file import requests import re # Load Kontext model from your local path MAX_SEED = np.iinfo(np.int32).max # Use the local path for the base model as in your test.py pipe = FluxKontextPipeline.from_pretrained( "black-forest-labs/FLUX.1-Kontext-dev", torch_dtype=torch.bfloat16 ).to("cuda") # Load LoRA data from our custom JSON file with open("kontext_loras.json", "r") as file: data = json.load(file) # Add default values for keys that might be missing, to prevent errors flux_loras_raw = [ { "image": item["image"], "title": item["title"], "repo": item["repo"], "weights": item.get("weights", "pytorch_lora_weights.safetensors"), "prompt": item.get("prompt", f"Turn this image into {item['title']} style."), # The following keys are kept for compatibility with the original demo structure, # but our simplified logic doesn't heavily rely on them. "lora_type": item.get("lora_type", "flux"), "lora_scale_config": item.get("lora_scale", 1.0), # Default scale set to 1.0 "prompt_placeholder": item.get("prompt_placeholder", "You can edit the prompt here..."), } for item in data ] print(f"Loaded {len(flux_loras_raw)} LoRAs from kontext_loras.json") def update_selection(selected_state: gr.SelectData, flux_loras): """Update UI when a LoRA is selected""" if selected_state.index >= len(flux_loras): return "### No LoRA selected", gr.update(), None, gr.update() selected_lora = flux_loras[selected_state.index] lora_repo = selected_lora["repo"] default_prompt = selected_lora.get("prompt") updated_text = f"### Selected: [{lora_repo}](https://huggingface.co/{lora_repo})" optimal_scale = selected_lora.get("lora_scale_config", 1.0) print("Selected Style: ", selected_lora['title']) print("Optimal Scale: ", optimal_scale) return updated_text, gr.update(value=default_prompt), selected_state.index, optimal_scale # This wrapper is kept for compatibility with the Gradio event triggers def infer_with_lora_wrapper(input_image, prompt, selected_index, lora_state, custom_lora, seed=0, guidance_scale=2.5, num_inference_steps=28, lora_scale=1.0, flux_loras=None, progress=gr.Progress(track_tqdm=True)): """Wrapper function to handle state serialization""" # The 'custom_lora' and 'lora_state' arguments are no longer used but kept in the signature return infer_with_lora(input_image, prompt, selected_index, seed, guidance_scale, num_inference_steps, lora_scale, flux_loras, progress) @spaces.GPU # This decorator is only for Hugging Face Spaces hardware, not needed for local execution def infer_with_lora(input_image, prompt, selected_index, seed=0, guidance_scale=2.5, num_inference_steps=28, lora_scale=1.0, flux_loras=None, progress=gr.Progress(track_tqdm=True)): """Generate image with selected LoRA""" global pipe # The seed is now always taken directly from the input. Randomization has been removed. # Unload any previous LoRA to ensure a clean state if "selected_lora" in pipe.get_active_adapters(): pipe.unload_lora_weights() # Determine which LoRA to use from our gallery lora_to_use = None if selected_index is not None and flux_loras and selected_index < len(flux_loras): lora_to_use = flux_loras[selected_index] if lora_to_use: print(f"Applying LoRA: {lora_to_use['title']}") try: # Load LoRA directly from the Hugging Face Hub pipe.load_lora_weights( lora_to_use["repo"], weight_name=lora_to_use["weights"], adapter_name="selected_lora" ) pipe.set_adapters(["selected_lora"], adapter_weights=[lora_scale]) print(f"Loaded {lora_to_use['repo']} with scale {lora_scale}") except Exception as e: print(f"Error loading LoRA: {e}") # Use the prompt from the textbox directly. final_prompt = prompt print(f"Using prompt: {final_prompt}") input_image = input_image.convert("RGB") try: image = pipe( image=input_image, width=input_image.size[0], height=input_image.size[1], prompt=final_prompt, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps, generator=torch.Generator().manual_seed(seed) ).images[0] # The seed value is no longer returned, as it's not being changed. return image, lora_scale except Exception as e: print(f"Error during inference: {e}") # Return an error state for all outputs return None, lora_scale # CSS styling css = """ #gen_btn{height: 100%} #gen_column{align-self: stretch} #main_app { display: flex; gap: 20px; } #box_column { min-width: 400px; } #title{text-align: center} #title h1{font-size: 3em; display:inline-flex; align-items:center} #title img{width: 100px; margin-right: 0.5em} #selected_lora { color: #2563eb; font-weight: bold; } #prompt { flex-grow: 1; } #run_button { background: linear-gradient(45deg, #2563eb, #3b82f6); color: white; border: none; padding: 8px 16px; border-radius: 6px; font-weight: bold; } .custom_lora_card { background: #f8fafc; border: 1px solid #e2e8f0; border-radius: 8px; padding: 12px; margin: 8px 0; } #gallery{ overflow: scroll !important } /* Custom CSS to ensure the input image is fully visible */ #input_image_display div[data-testid="image"] img { object-fit: contain !important; } """ # Create Gradio interface with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo: gr_flux_loras = gr.State(value=flux_loras_raw) title = gr.HTML( """

FLUX Kontext Super LoRAs🖖

""", elem_id="title", ) selected_state = gr.State(value=None) # The following states are no longer used by the simplified logic but kept for component structure custom_loaded_lora = gr.State(value=None) lora_state = gr.State(value=1.0) with gr.Row(elem_id="main_app"): with gr.Column(scale=4, elem_id="box_column"): with gr.Group(elem_id="gallery_box"): input_image = gr.Image( label="Upload a picture of yourself", type="pil", height=300, elem_id="input_image_display" ) gallery = gr.Gallery( label="Pick a LoRA", allow_preview=False, columns=4, elem_id="gallery", show_share_button=False, height=300, object_fit="contain" ) custom_model = gr.Textbox( label="Or enter a custom HuggingFace FLUX LoRA", placeholder="e.g., username/lora-name", visible=False ) custom_model_card = gr.HTML(visible=False) custom_model_button = gr.Button("Remove custom LoRA", visible=False) with gr.Column(scale=5): with gr.Row(): prompt = gr.Textbox( label="Editing Prompt", show_label=False, lines=1, max_lines=1, placeholder="opt - describe the person/subject, e.g. 'a man with glasses and a beard'", elem_id="prompt" ) run_button = gr.Button("Generate", variant="primary", elem_id="gen_btn") result = gr.Image(label="Generated Image", interactive=False, height=512) with gr.Accordion("Advanced Settings", open=False): lora_scale = gr.Slider( label="LoRA Scale", minimum=0, maximum=2, step=0.1, value=1.0, info="Controls the strength of the LoRA effect" ) seed = gr.Slider( label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, ) guidance_scale = gr.Slider( label="Guidance Scale", minimum=1, maximum=10, step=0.1, value=2.5, ) num_inference_steps = gr.Slider( label="Timesteps", minimum=1, maximum=100, step=1, value=28, info="Number of inference steps" ) prompt_title = gr.Markdown( value="### Click on a LoRA in the gallery to select it", visible=True, elem_id="selected_lora", ) # Event handlers # The custom model inputs are no longer needed as we've hidden them. gallery.select( fn=update_selection, inputs=[gr_flux_loras], outputs=[prompt_title, prompt, selected_state, lora_scale], show_progress=False ) gr.on( triggers=[run_button.click, prompt.submit], fn=infer_with_lora_wrapper, inputs=[input_image, prompt, selected_state, lora_state, custom_loaded_lora, seed, guidance_scale, num_inference_steps, lora_scale, gr_flux_loras], outputs=[result, lora_state] ) # Initialize gallery demo.load( fn=lambda loras: ([(item["image"], item["title"]) for item in loras], loras), inputs=[gr_flux_loras], outputs=[gallery, gr_flux_loras] ) demo.queue(default_concurrency_limit=None) demo.launch()