Nymbo's picture
Update app.py
03c08ce verified
raw
history blame
16.1 kB
import gradio as gr
import random
import os
from PIL import Image
from typing import Optional
from huggingface_hub import InferenceClient
import tempfile
import json
import uuid
import re
# Project by Nymbo - Converted to MCP Server
# Configuration
API_TOKEN = os.getenv("HF_READ_TOKEN")
timeout = 100
# Helper: build a public base URL for the running app (HF Spaces or local)
def _slugify_for_subdomain(s: str) -> str:
s = s.strip().lower()
s = s.replace(".", "-").replace("_", "-").replace(" ", "-")
s = re.sub(r"[^a-z0-9-]", "-", s)
s = re.sub(r"-+", "-", s).strip("-")
return s
def _build_public_base_url() -> str:
# Allow explicit override via env
for var in ("HF_SPACE_URL", "SPACE_URL", "PUBLIC_SPACE_URL"):
val = os.getenv(var)
if val:
return val.rstrip("/")
space_id = os.getenv("SPACE_ID")
if space_id:
# If a full URL was provided, use it directly
if space_id.startswith("http://") or space_id.startswith("https://"):
return space_id.rstrip("/")
# Typical HF Spaces SPACE_ID is "owner/space-name"
if "/" in space_id:
owner, space = space_id.split("/", 1)
sub = f"{_slugify_for_subdomain(owner)}-{_slugify_for_subdomain(space)}"
else:
# Fall back to slugifying the whole string
sub = _slugify_for_subdomain(space_id)
return f"https://{sub}.hf.space"
# Local fallback
host = os.getenv("GRADIO_SERVER_NAME", "localhost")
port = os.getenv("GRADIO_SERVER_PORT", os.getenv("PORT", "7860"))
return f"http://{host}:{port}"
def flux_krea_generate(
prompt: str,
negative_prompt: str = "(deformed, distorted, disfigured), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, misspellings, typos",
steps: int = 35,
cfg_scale: float = 7.0,
sampler: str = "DPM++ 2M Karras",
seed: int = -1,
strength: float = 0.7,
width: int = 1024,
height: int = 1024
) -> str:
"""
Generate high-quality professional images using FLUX.1-Krea-dev via HuggingFace Serverless Inference.
This MCP tool creates realistic, professional-quality images from text prompts using the
FLUX.1-Krea-dev model through HuggingFace's serverless inference infrastructure. The model
excels at generating natural-looking images without typical AI artifacts, making it ideal
for product photography, e-commerce visuals, concept art, fashion photography, and stock images.
Args:
prompt: Detailed text description of the image to generate. Best results with approximately 60-70 words max. Include technical photography terms, specific lighting, and material textures for photorealistic results.
negative_prompt: Elements to avoid in the generated image (deformities, poor anatomy, etc.).
steps: Number of denoising steps (1-100). Higher values generally improve quality but increase generation time.
cfg_scale: Classifier-free guidance scale (1-20). Higher values make the model follow the prompt more closely but may reduce creativity.
sampler: Sampling method for diffusion process. Options: "DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM".
seed: Random seed for reproducible results. Use -1 for random generation each time.
strength: Generation strength parameter (0-1). Controls the influence of the generation process.
width: Output image width in pixels (64-1216). Recommend multiples of 32 for best results.
height: Output image height in pixels (64-1216). Recommend multiples of 32 for best results.
Returns:
JSON string containing the image URL and generation metadata that can be accessed by LLMs and MCP clients.
"""
if not prompt or prompt.strip() == "":
return json.dumps({
"success": False,
"error": "Prompt is required and cannot be empty",
"image_url": None
})
generation_id = random.randint(0, 999)
# Enhance prompt for better quality
enhanced_prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
print(f'\033[1mMCP Generation {generation_id}:\033[0m {enhanced_prompt}')
try:
# Initialize the Hugging Face Inference Client with fallback providers
providers = ["auto", "replicate", "fal-ai"]
for provider in providers:
try:
client = InferenceClient(
api_key=API_TOKEN,
provider=provider
)
# Generate the image using serverless inference
image = client.text_to_image(
prompt=enhanced_prompt,
negative_prompt=negative_prompt,
model="black-forest-labs/FLUX.1-Krea-dev",
width=width,
height=height,
num_inference_steps=steps,
guidance_scale=cfg_scale,
seed=seed if seed != -1 else random.randint(1, 1000000000)
)
# Save the image and return a Gradio-accessible URL
if image:
# Save to a temporary file that Gradio can serve
temp_file = tempfile.NamedTemporaryFile(
delete=False,
suffix=".png",
prefix=f"flux_krea_mcp_{generation_id}_"
)
image.save(temp_file.name)
temp_file.close()
# Create the Gradio file URL that will be accessible to MCP clients
# This matches the format you saw: /gradio_api/file=<file_path>
gradio_file_url = f"/gradio_api/file={temp_file.name}"
# Build a proper public base URL (HF Spaces or local)
base_url = _build_public_base_url()
full_url = f"{base_url}{gradio_file_url}"
print(f'\033[1mMCP Generation {generation_id} completed with {provider}!\033[0m')
print(f'🌐 Image accessible at: {full_url}')
# Return JSON with accessible URLs and metadata
result = {
"success": True,
"image_url": full_url,
"gradio_file_url": gradio_file_url,
"local_path": temp_file.name,
"generation_id": generation_id,
"provider": provider,
"model": "black-forest-labs/FLUX.1-Krea-dev",
"prompt": enhanced_prompt,
"parameters": {
"width": width,
"height": height,
"steps": steps,
"cfg_scale": cfg_scale,
"seed": seed if seed != -1 else "random",
"sampler": sampler
},
"metadata": {
"tool": "flux_krea_generate",
"timestamp": str(generation_id),
"mcp_compatible": True,
"accessible_url": full_url
}
}
return json.dumps(result)
except Exception as provider_error:
print(f"Provider {provider} failed: {provider_error}")
if provider == providers[-1]: # Last provider failed
raise provider_error
continue
except Exception as e:
print(f"Error during MCP image generation: {e}")
error_message = "Image generation failed due to an unknown error."
if "404" in str(e):
error_message = "Model not found. Please ensure the FLUX.1-Krea-dev model is accessible with your API token."
elif "503" in str(e):
error_message = "The model is currently being loaded. Please try again in a moment."
elif "401" in str(e) or "403" in str(e):
error_message = "Authentication failed. Please check your HF_READ_TOKEN environment variable."
return json.dumps({
"success": False,
"error": error_message,
"image_url": None,
"gradio_file_url": None,
"local_path": None,
"generation_id": generation_id,
"metadata": {
"tool": "flux_krea_generate",
"mcp_compatible": True,
"error": True
}
})
# For UI compatibility - this function returns a PIL Image for the Gradio interface
def flux_krea_generate_ui(*args) -> Optional[Image.Image]:
"""UI wrapper that returns PIL Image for Gradio interface"""
result_json = flux_krea_generate(*args)
try:
result = json.loads(result_json)
if result.get("success") and result.get("local_path"):
# Return the PIL Image for the UI
return Image.open(result["local_path"])
except Exception as e:
print(f"UI wrapper error: {e}")
pass
return None
# CSS for improved styling
css = """
#app-container {
max-width: 900px;
margin-left: auto;
margin-right: auto;
}
.mcp-badge {
background: linear-gradient(45deg, #ff6b6b, #4ecdc4);
color: white;
padding: 5px 10px;
border-radius: 15px;
font-size: 12px;
font-weight: bold;
}
"""
# Build the Gradio app
with gr.Blocks(theme='Nymbo/Nymbo_Theme', css=css, title="FLUX.1-Krea MCP Server") as app:
# Header
gr.HTML("""
<center>
<h1>🚀 FLUX.1-Krea-dev <span class="mcp-badge">MCP SERVER</span></h1>
<p>High-quality serverless image generation via Model Context Protocol</p>
<p><em>Professional-grade images • No AI artifacts • MCP-compatible</em></p>
</center>
""")
# Main interface
with gr.Column(elem_id="app-container"):
# Prompt input
with gr.Row():
text_prompt = gr.Textbox(
label="Image Prompt",
placeholder="Describe the image you want to generate (60-70 words recommended)",
lines=3,
elem_id="prompt-text-input"
)
# Advanced settings accordion
with gr.Row():
with gr.Accordion("🔧 Advanced Generation Settings", open=False):
negative_prompt = gr.Textbox(
label="Negative Prompt",
placeholder="What should NOT be in the image",
value="(deformed, distorted, disfigured), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, misspellings, typos",
lines=3,
elem_id="negative-prompt-text-input"
)
with gr.Row():
width = gr.Slider(
label="Width",
value=1024,
minimum=64,
maximum=1216,
step=32,
info="Output width in pixels"
)
height = gr.Slider(
label="Height",
value=1024,
minimum=64,
maximum=1216,
step=32,
info="Output height in pixels"
)
with gr.Row():
steps = gr.Slider(
label="Sampling Steps",
value=35,
minimum=1,
maximum=100,
step=1,
info="More steps = higher quality, longer generation time"
)
cfg = gr.Slider(
label="CFG Scale",
value=7,
minimum=1,
maximum=20,
step=1,
info="How closely to follow the prompt"
)
with gr.Row():
strength = gr.Slider(
label="Strength",
value=0.7,
minimum=0,
maximum=1,
step=0.01,
info="Generation strength parameter"
)
seed = gr.Slider(
label="Seed",
value=-1,
minimum=-1,
maximum=1000000000,
step=1,
info="Use -1 for random seed"
)
sampler = gr.Radio(
label="Sampling Method",
value="DPM++ 2M Karras",
choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"],
info="Algorithm used for image generation"
)
# Generation button
with gr.Row():
generate_button = gr.Button("🎨 Generate Image", variant='primary', elem_id="gen-button", scale=1)
# Output area
with gr.Row():
image_output = gr.Image(
label="Generated Image",
elem_id="gallery",
show_share_button=True,
show_download_button=True
)
# MCP Information
with gr.Row():
gr.HTML("""
<div style="background: #f0f0f0; padding: 15px; border-radius: 10px; margin-top: 20px;">
<h3>📡 MCP Server Information</h3>
<p><strong>Server Endpoint:</strong> <code>/gradio_api/mcp/sse</code></p>
<p><strong>Tool Name:</strong> <code>flux_krea_generate</code></p>
<p><strong>Image URLs:</strong> Returns accessible Gradio file URLs like <code>/gradio_api/file=&lt;path&gt;</code></p>
<p>This server exposes the image generation function as an MCP tool that returns JSON with accessible image URLs for LLM integration.</p>
<p><em>✅ Fixed: LLMs can now access generated images via proper Gradio file URLs</em></p>
</div>
""")
# Wire up the UI event (this is separate from the MCP tool)
generate_button.click(
flux_krea_generate_ui,
inputs=[text_prompt, negative_prompt, steps, cfg, sampler, seed, strength, width, height],
outputs=image_output,
show_api=False # Hide from API docs since we have dedicated MCP tool
)
# Expose the MCP tool with clear API documentation
gr.api(
flux_krea_generate,
api_name="flux_krea_generate",
api_description=(
"MCP Tool: Generate professional-quality images using FLUX.1-Krea-dev via serverless inference. "
"Returns JSON with image URL and metadata for MCP clients and LLMs. "
"Optimized for natural, realistic images without AI artifacts."
)
)
# Launch with MCP server enabled
if __name__ == "__main__":
# Enable MCP server functionality
app.launch(
mcp_server=True,
show_api=True,
share=False,
server_name="0.0.0.0",
server_port=7860,
show_error=True
)