|
import gradio as gr |
|
from huggingface_hub import InferenceClient |
|
|
|
|
|
def generate_image(prompt, seed): |
|
client = InferenceClient(model="tryonlabs/FLUX.1-dev-LoRA-Lehenga-Generator") |
|
image = client.text_to_image( |
|
prompt=prompt, |
|
seed=seed if seed >= 0 else None |
|
) |
|
return image |
|
|
|
|
|
interface = gr.Interface( |
|
fn=generate_image, |
|
inputs=[ |
|
gr.Textbox(label="Prompt", placeholder="Enter your prompt here..."), |
|
gr.Slider(label="Seed", minimum=-1, maximum=1000000, step=1, value=-1, info="Set to -1 for random seed") |
|
], |
|
outputs=gr.Image(label="Generated Lehenga Image"), |
|
title="FLUX.1-dev Lehenga Generator", |
|
description="Generate custom Lehenga designs using FLUX.1-dev with LoRA. Adjust seed for control.", |
|
) |
|
|
|
|
|
interface.launch() |