import gradio as gr import numpy as np import random import os import base64 import requests import io from PIL import Image, ImageOps import pillow_heif # For HEIF/AVIF support # --- Constants --- MAX_SEED = np.iinfo(np.int32).max API_URL = "https://router.huggingface.co/fal-ai/fal-ai/flux-kontext/dev?_subdomain=queue" def get_headers(): """Get headers for API requests""" hf_token = os.getenv("HF_TOKEN") if not hf_token: raise gr.Error("HF_TOKEN environment variable not found. Please add your Hugging Face token to the Space settings.") return { "Authorization": f"Bearer {hf_token}", "X-HF-Bill-To": "huggingface" } def query_api(payload): """Send request to the API and return response""" headers = get_headers() response = requests.post(API_URL, headers=headers, json=payload) if response.status_code != 200: raise gr.Error(f"API request failed with status {response.status_code}: {response.text}") # Debug: Check response content type and first few bytes print(f"Response status: {response.status_code}") print(f"Response headers: {dict(response.headers)}") print(f"Response content type: {response.headers.get('content-type', 'unknown')}") print(f"Response content length: {len(response.content)}") print(f"First 200 chars of response: {response.content[:200]}") # Check if response is JSON (error case) or binary (image case) content_type = response.headers.get('content-type', '').lower() if 'application/json' in content_type: # Response is JSON, might contain base64 image or error try: json_response = response.json() print(f"JSON response: {json_response}") # Check if there's a base64 image in the response if 'image' in json_response: # Decode base64 image image_data = base64.b64decode(json_response['image']) return image_data elif 'images' in json_response and len(json_response['images']) > 0: # Multiple images, take the first one image_data = base64.b64decode(json_response['images'][0]) return image_data else: raise gr.Error(f"Unexpected JSON response format: {json_response}") except Exception as e: raise gr.Error(f"Failed to parse JSON response: {str(e)}") elif 'image/' in content_type: # Response is direct image bytes return response.content else: # Try to decode as base64 first, then as direct bytes try: # Maybe the entire response is base64 encoded image_data = base64.b64decode(response.content) return image_data except: # Return as-is and let PIL try to handle it return response.content # --- Core Inference Function for ChatInterface --- def chat_fn(message, chat_history, seed, randomize_seed, guidance_scale, steps, progress=gr.Progress()): """ Performs image generation or editing based on user input from the chat interface. """ # Register HEIF opener with PIL for AVIF/HEIF support pillow_heif.register_heif_opener() prompt = message["text"] files = message["files"] if not prompt and not files: raise gr.Error("Please provide a prompt and/or upload an image.") if randomize_seed: seed = random.randint(0, MAX_SEED) # Prepare the payload payload = { "parameters": { "prompt": prompt, "seed": seed, "guidance_scale": guidance_scale, "num_inference_steps": steps } } if files: print(f"Received image: {files[0]}") try: # Try to open and convert the image input_image = Image.open(files[0]) # Convert to RGB if needed (handles RGBA, P, etc.) if input_image.mode != "RGB": input_image = input_image.convert("RGB") # Auto-orient the image based on EXIF data input_image = ImageOps.exif_transpose(input_image) # Convert PIL image to base64 for the API img_byte_arr = io.BytesIO() input_image.save(img_byte_arr, format='PNG') img_byte_arr.seek(0) image_base64 = base64.b64encode(img_byte_arr.getvalue()).decode('utf-8') # Add image to payload for image-to-image payload["inputs"] = image_base64 except Exception as e: raise gr.Error(f"Could not process the uploaded image: {str(e)}. Please try uploading a different image format (JPEG, PNG, WebP).") progress(0.1, desc="Processing image...") else: print(f"Received prompt for text-to-image: {prompt}") # For text-to-image, we don't need the inputs field progress(0.1, desc="Generating image...") try: # Make API request image_bytes = query_api(payload) # Try to convert response bytes to PIL Image with better error handling try: image = Image.open(io.BytesIO(image_bytes)) except Exception as img_error: print(f"Failed to open image directly: {img_error}") # Maybe it's a different format, try to save and examine with open('/tmp/debug_response.bin', 'wb') as f: f.write(image_bytes) print(f"Saved response to /tmp/debug_response.bin for debugging") # Try to decode as base64 if direct opening failed try: decoded_bytes = base64.b64decode(image_bytes) image = Image.open(io.BytesIO(decoded_bytes)) except: raise gr.Error(f"Could not process API response as image. Response type: {type(image_bytes)}, Length: {len(image_bytes) if isinstance(image_bytes, (bytes, str)) else 'unknown'}") progress(1.0, desc="Complete!") return gr.Image(value=image) except gr.Error: # Re-raise gradio errors as-is raise except Exception as e: raise gr.Error(f"Failed to generate image: {str(e)}") # --- UI Definition using gr.ChatInterface --- seed_slider = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42) randomize_checkbox = gr.Checkbox(label="Randomize seed", value=False) guidance_slider = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=2.5) steps_slider = gr.Slider(label="Steps", minimum=1, maximum=30, value=28, step=1) demo = gr.ChatInterface( fn=chat_fn, title="FLUX.1 Kontext [dev] - Direct API", description="""

A simple chat UI for the FLUX.1 Kontext model using direct API calls with requests.
To edit an image, upload it and type your instructions (e.g., "Add a hat").
To generate an image, just type a prompt (e.g., "A photo of an astronaut on a horse").
Find the model on Hugging Face.

""", multimodal=True, textbox=gr.MultimodalTextbox( file_types=["image"], placeholder="Type a prompt and/or upload an image...", render=False ), additional_inputs=[ seed_slider, randomize_checkbox, guidance_slider, steps_slider ], theme="soft" ) if __name__ == "__main__": demo.launch()