FluxSchnell / app.py
Akbartus's picture
Update app.py
a4dda29 verified
raw
history blame
6.05 kB
import os
import gradio as gr
import numpy as np
import random
from huggingface_hub import AsyncInferenceClient
from translatepy import Translator
import asyncio
from PIL import Image
from gradio_client import Client, handle_file
import uuid
MAX_SEED = np.iinfo(np.int32).max
# Initialize the AsyncInferenceClient globally
client = AsyncInferenceClient()
def enable_lora(lora_add, basemodel):
return basemodel if not lora_add else lora_add
async def generate_image(prompt, model, lora_word, width, height, scales, steps, seed):
try:
if seed == -1:
seed = random.randint(0, MAX_SEED)
seed = int(seed)
text = str(Translator().translate(prompt, 'English')) + "," + lora_word
# Generate the image
image = await client.text_to_image(
prompt=text,
height=height,
width=width,
guidance_scale=scales,
num_inference_steps=steps,
model=model
)
return image, seed
except Exception as e:
print(f"Error generating image: {e}")
return None, None
def get_upscale_finegrain(prompt, img_path, upscale_factor):
try:
client = Client("finegrain/finegrain-image-enhancer")
result = client.predict(
input_image=handle_file(img_path),
prompt=prompt,
negative_prompt="",
seed=42,
upscale_factor=upscale_factor,
controlnet_scale=0.6,
controlnet_decay=1,
condition_scale=6,
tile_width=112,
tile_height=144,
denoise_strength=0.35,
num_inference_steps=18,
solver="DDIM",
api_name="/process"
)
return result[1]
except Exception as e:
print(f"Error upscaling image: {e}")
return None
async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora):
model = enable_lora(lora_model, basemodel) if process_lora else basemodel
# Generate a unique file name for temporary files
temp_image_path = f"temp_image_{uuid.uuid4().hex}.jpg"
upscale_image_path = f"upscale_image_{uuid.uuid4().hex}.jpg"
try:
# Generate the image
image, seed = await generate_image(prompt, model, "", width, height, scales, steps, seed)
if image is None:
return ["Generation failed", None]
# Save the image locally
image.save(temp_image_path, format="JPEG")
# Process upscale if required
if process_upscale:
upscale_result_path = get_upscale_finegrain(prompt, temp_image_path, upscale_factor)
if upscale_result_path is not None:
upscale_image = Image.open(upscale_result_path)
upscale_image.save(upscale_image_path, format="JPEG")
return [temp_image_path, upscale_image_path]
else:
return ["Upscale failed", temp_image_path]
else:
return [temp_image_path, temp_image_path]
except Exception as e:
print(f"Error in generation pipeline: {e}")
return ["Error", None]
finally:
# Cleanup temporary files
try:
if os.path.exists(temp_image_path):
os.remove(temp_image_path)
if os.path.exists(upscale_image_path):
os.remove(upscale_image_path)
except Exception as cleanup_error:
print(f"Error during cleanup: {cleanup_error}")
css = """
#col-container{ margin: 0 auto; max-width: 1024px;}
"""
with gr.Blocks(css=css, theme="Nymbo/Nymbo_Theme") as demo:
with gr.Column(elem_id="col-container"):
with gr.Row():
with gr.Column(scale=3):
output_res = gr.Image(label="Generated Image / Upscaled Image")
with gr.Column(scale=2):
prompt = gr.Textbox(label="Image Description")
basemodel_choice = gr.Dropdown(
label="Model",
choices=[
"black-forest-labs/FLUX.1-schnell",
"black-forest-labs/FLUX.1-DEV",
"enhanceaiteam/Flux-uncensored",
"Shakker-Labs/FLUX.1-dev-ControlNet-Union-Pro",
"Shakker-Labs/FLUX.1-dev-LoRA-add-details",
"city96/FLUX.1-dev-gguf"
],
value="black-forest-labs/FLUX.1-schnell"
)
lora_model_choice = gr.Dropdown(
label="LoRA",
choices=[
"Shakker-Labs/FLUX.1-dev-LoRA-add-details",
"XLabs-AI/flux-RealismLora",
"enhanceaiteam/Flux-uncensored"
],
value="XLabs-AI/flux-RealismLora"
)
process_lora = gr.Checkbox(label="LoRA Process")
process_upscale = gr.Checkbox(label="Scale Process")
upscale_factor = gr.Radio(label="Scaling Factor", choices=[2, 4, 8], value=2)
with gr.Accordion(label="Advanced Options", open=False):
width = gr.Slider(label="Width", minimum=512, maximum=1280, step=8, value=1280)
height = gr.Slider(label="Height", minimum=512, maximum=1280, step=8, value=768)
scales = gr.Slider(label="Scale", minimum=1, maximum=20, step=1, value=8)
steps = gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=8)
seed = gr.Number(label="Seed", value=-1)
btn = gr.Button("Generate")
btn.click(
fn=gen,
inputs=[prompt, basemodel_choice, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model_choice, process_lora],
outputs=output_res,
)
demo.launch()