Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
import gradio as gr | |
import requests | |
import io | |
import random | |
import os | |
import time | |
from PIL import Image | |
import json | |
# Project by Nymbo | |
# Retrieve the API token from environment variables | |
API_TOKEN = os.getenv("HF_READ_TOKEN") | |
# Create a list of available API tokens for load balancing | |
API_TOKENS = [ | |
os.getenv("HF_READ_TOKEN"), | |
os.getenv("HF_READ_TOKEN_2"), | |
os.getenv("HF_READ_TOKEN_3"), | |
os.getenv("HF_READ_TOKEN_4"), | |
os.getenv("HF_READ_TOKEN_5") | |
] | |
# Timeout for requests | |
timeout = 100 | |
def query( | |
prompt, | |
model, | |
custom_lora, | |
is_negative=False, | |
steps=35, | |
cfg_scale=7, | |
sampler="DPM++ 2M Karras", | |
seed=-1, | |
strength=0.7, | |
width=1024, | |
height=1024, | |
provider="hf-inference" # Added provider parameter | |
): | |
# Debug log to indicate function start | |
print("Starting query function...") | |
# Print the parameters for debugging purposes | |
print(f"Prompt: {prompt}") | |
print(f"Model: {model}") | |
print(f"Custom LoRA: {custom_lora}") | |
print(f"Provider: {provider}") # Added provider debug log | |
print(f"Parameters - Steps: {steps}, CFG Scale: {cfg_scale}, Seed: {seed}, Strength: {strength}, Width: {width}, Height: {height}") | |
# Check if the prompt is empty or None | |
if prompt == "" or prompt is None: | |
print("Prompt is empty or None. Exiting query function.") | |
return None | |
# Generate a unique key for tracking the generation process | |
key = random.randint(0, 999) | |
print(f"Generated key: {key}") | |
# Randomly select an API token from available options to distribute the load | |
selected_token = random.choice([token for token in API_TOKENS if token]) | |
print(f"Selected an API token") # Modified to not print the actual token | |
# Initialize the default headers with authorization | |
headers = {"Authorization": f"Bearer {selected_token}"} | |
# Select provider-specific headers if needed | |
if provider == "fal-ai": | |
headers = { | |
"Authorization": f"Bearer {os.getenv('FAL_API_KEY')}", | |
"Content-Type": "application/json" | |
} | |
elif provider == "together": | |
headers = { | |
"Authorization": f"Bearer {os.getenv('TOGETHER_API_KEY')}" | |
} | |
elif provider == "replicate": | |
headers = { | |
"Authorization": f"Token {os.getenv('REPLICATE_API_TOKEN')}", | |
"Content-Type": "application/json" | |
} | |
elif provider == "nebius": | |
headers = { | |
"Authorization": f"Api-Key {os.getenv('NEBIUS_API_KEY')}", | |
"Content-Type": "application/json" | |
} | |
# Enhance the prompt with additional details for better quality | |
enhanced_prompt = prompt | |
if provider == "hf-inference": | |
enhanced_prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect." | |
print(f'Generation {key}: {enhanced_prompt}') | |
# Set the API URL based on the selected provider, model, or custom LoRA | |
API_URL = get_model_api_url(model, custom_lora, provider) | |
print(f"API URL set to: {API_URL}") | |
# Define the payload for the request (provider-specific) | |
payload = build_request_payload(prompt, is_negative, steps, cfg_scale, seed, strength, width, height, provider) | |
print(f"Payload created for provider: {provider}") | |
# Make a request to the API to generate the image | |
try: | |
response = make_provider_request(API_URL, headers, payload, provider, timeout) | |
print(f"Response status code: {response.status_code}") | |
except requests.exceptions.RequestException as e: | |
print(f"Request failed: {e}") | |
raise gr.Error(f"Request failed: {e}") | |
# Check if the response status is not successful | |
if response.status_code != 200: | |
print(f"Error: Failed to retrieve image. Response status: {response.status_code}") | |
print(f"Response content: {response.text}") | |
handle_error_response(response) | |
try: | |
# Process the response based on the provider | |
image = process_provider_response(response, provider) | |
print(f'Generation {key} completed! ({prompt})') | |
return image | |
except Exception as e: | |
print(f"Error while processing the response: {e}") | |
return None | |
def get_model_api_url(model, custom_lora, provider): | |
""" | |
Determine the correct API URL based on model, custom LoRA, and provider | |
""" | |
# If a custom LoRA is specified, use it (only for HF Inference) | |
if custom_lora.strip() != "" and provider == "hf-inference": | |
return f"https://api-inference.huggingface.co/models/{custom_lora.strip()}" | |
# Provider-specific base URLs | |
if provider == "fal-ai": | |
return "https://gateway.fal.ai/inference" | |
elif provider == "replicate": | |
return "https://api.replicate.com/v1/predictions" | |
elif provider == "nebius": | |
return "https://llm.api.cloud.yandex.net/foundationModels/v1/image/generate" | |
elif provider == "together": | |
return "https://api.together.xyz/v1/images/generations" | |
# Default to HuggingFace with the selected model | |
if provider == "hf-inference": | |
# Map model names to their respective API URLs | |
model_urls = { | |
'Stable Diffusion XL': "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0", | |
'FLUX.1 [Dev]': "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev", | |
'FLUX.1 [Schnell]': "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell", | |
'HiDream-I1-Full': "https://api-inference.huggingface.co/models/HiDream-ai/HiDream-I1-Full", | |
'HiDream-I1-Dev': "https://api-inference.huggingface.co/models/HiDream-ai/HiDream-I1-Dev", | |
'HiDream-I1-Fast': "https://api-inference.huggingface.co/models/HiDream-ai/HiDream-I1-Fast", | |
'Animagine 4.0': "https://api-inference.huggingface.co/models/cagliostrolab/animagine-xl-4.0", | |
'Flux Icon Kit': "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Icon-Kit-LoRA", | |
'Pixel Background': "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Pixel-Background-LoRA", | |
'Meme XD': "https://api-inference.huggingface.co/models/prithivMLmods/Flux-Meme-Xd-LoRA", | |
'Chill Guy': "https://api-inference.huggingface.co/models/prithivMLmods/Flux-Chill-Guy-Zone", | |
'Pepe': "https://api-inference.huggingface.co/models/openfree/pepe", | |
'NSFWmodel': "https://api-inference.huggingface.co/models/lexa862/NSFWmodel", | |
'Claude Art': "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Claude-Art", | |
'Open Genmoji': "https://api-inference.huggingface.co/models/EvanZhouDev/open-genmoji", | |
'EBook Creative Cover': "https://api-inference.huggingface.co/models/prithivMLmods/EBook-Creative-Cover-Flux-LoRA", | |
'Flux Logo Design 2': "https://api-inference.huggingface.co/models/prithivMLmods/Logo-Design-Flux-LoRA", | |
'Isometric 3D': "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Isometric-3D-LoRA", | |
'Flux Condensation': "https://api-inference.huggingface.co/models/fofr/flux-condensation", | |
'Flux Handwriting': "https://api-inference.huggingface.co/models/fofr/flux-handwriting", | |
'Shou Xin': "https://api-inference.huggingface.co/models/Datou1111/shou_xin", | |
'Sketch Smudge': "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Sketch-Smudge-LoRA", | |
'80s Cyberpunk': "https://api-inference.huggingface.co/models/fofr/flux-80s-cyberpunk", | |
'Coloring Book Flux': "https://api-inference.huggingface.co/models/renderartist/coloringbookflux", | |
'Flux Miniature LoRA': "https://api-inference.huggingface.co/models/gokaygokay/Flux-Miniature-LoRA", | |
'Sketch Paint': "https://api-inference.huggingface.co/models/strangerzonehf/Sketch-Paint", | |
'Flux UltraRealism 2.0': "https://api-inference.huggingface.co/models/prithivMLmods/Canopus-LoRA-Flux-UltraRealism-2.0", | |
'Midjourney Mix': "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Midjourney-Mix-LoRA", | |
'Midjourney Mix 2': "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Midjourney-Mix2-LoRA", | |
'Flux Logo Design': "https://api-inference.huggingface.co/models/Shakker-Labs/FLUX.1-dev-LoRA-Logo-Design", | |
'Flux Uncensored': "https://api-inference.huggingface.co/models/enhanceaiteam/Flux-uncensored", | |
'Flux Uncensored V2': "https://api-inference.huggingface.co/models/enhanceaiteam/Flux-Uncensored-V2", | |
'Flux Tarot Cards': "https://api-inference.huggingface.co/models/prithivMLmods/Ton618-Tarot-Cards-Flux-LoRA", | |
'Pixel Art Sprites': "https://api-inference.huggingface.co/models/sWizad/pokemon-trainer-sprites-pixelart-flux", | |
'3D Sketchfab': "https://api-inference.huggingface.co/models/prithivMLmods/Castor-3D-Sketchfab-Flux-LoRA", | |
'Retro Comic Flux': "https://api-inference.huggingface.co/models/renderartist/retrocomicflux", | |
'Caricature': "https://api-inference.huggingface.co/models/TheAwakenOne/caricature", | |
'Huggieverse': "https://api-inference.huggingface.co/models/Chunte/flux-lora-Huggieverse", | |
'Propaganda Poster': "https://api-inference.huggingface.co/models/AlekseyCalvin/Propaganda_Poster_Schnell_by_doctor_diffusion", | |
'Flux Game Assets V2': "https://api-inference.huggingface.co/models/gokaygokay/Flux-Game-Assets-LoRA-v2", | |
'SDXL HS Card Style': "https://api-inference.huggingface.co/models/Norod78/sdxl-hearthstone-card-style-lora", | |
'SLDR FLUX NSFW v2 Studio': "https://api-inference.huggingface.co/models/xey/sldr_flux_nsfw_v2-studio", | |
'SoftPasty Flux': "https://api-inference.huggingface.co/models/alvdansen/softpasty-flux-dev", | |
'Flux Stickers': "https://api-inference.huggingface.co/models/diabolic6045/Flux_Sticker_Lora", | |
'Flux Animex V2': "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Animex-v2-LoRA", | |
'Flux Animeo V1': "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Animeo-v1-LoRA", | |
'Movie Board': "https://api-inference.huggingface.co/models/prithivMLmods/Flux.1-Dev-Movie-Boards-LoRA", | |
'Purple Dreamy': "https://api-inference.huggingface.co/models/prithivMLmods/Purple-Dreamy-Flux-LoRA", | |
'PS1 Style Flux': "https://api-inference.huggingface.co/models/veryVANYA/ps1-style-flux", | |
'Softserve Anime': "https://api-inference.huggingface.co/models/alvdansen/softserve_anime", | |
'Flux Tarot v1': "https://api-inference.huggingface.co/models/multimodalart/flux-tarot-v1", | |
'Half Illustration': "https://api-inference.huggingface.co/models/davisbro/half_illustration", | |
'OpenDalle v1.1': "https://api-inference.huggingface.co/models/dataautogpt3/OpenDalleV1.1", | |
'Flux Ghibsky Illustration': "https://api-inference.huggingface.co/models/aleksa-codes/flux-ghibsky-illustration", | |
'Flux Koda': "https://api-inference.huggingface.co/models/alvdansen/flux-koda", | |
'Soviet Diffusion XL': "https://api-inference.huggingface.co/models/openskyml/soviet-diffusion-xl", | |
'Flux Realism LoRA': "https://api-inference.huggingface.co/models/XLabs-AI/flux-RealismLora", | |
'Frosting Lane Flux': "https://api-inference.huggingface.co/models/alvdansen/frosting_lane_flux", | |
'Phantasma Anime': "https://api-inference.huggingface.co/models/alvdansen/phantasma-anime", | |
'Boreal': "https://api-inference.huggingface.co/models/kudzueye/Boreal", | |
'How2Draw': "https://api-inference.huggingface.co/models/glif/how2draw", | |
'Flux AestheticAnime': "https://api-inference.huggingface.co/models/dataautogpt3/FLUX-AestheticAnime", | |
'Fashion Hut Modeling LoRA': "https://api-inference.huggingface.co/models/prithivMLmods/Fashion-Hut-Modeling-LoRA", | |
'Flux SyntheticAnime': "https://api-inference.huggingface.co/models/dataautogpt3/FLUX-SyntheticAnime", | |
'Flux Midjourney Anime': "https://api-inference.huggingface.co/models/brushpenbob/flux-midjourney-anime", | |
'Coloring Book Generator': "https://api-inference.huggingface.co/models/robert123231/coloringbookgenerator", | |
'Collage Flux': "https://api-inference.huggingface.co/models/prithivMLmods/Castor-Collage-Dim-Flux-LoRA", | |
'Flux Product Ad Backdrop': "https://api-inference.huggingface.co/models/prithivMLmods/Flux-Product-Ad-Backdrop", | |
'Product Design': "https://api-inference.huggingface.co/models/multimodalart/product-design", | |
'90s Anime Art': "https://api-inference.huggingface.co/models/glif/90s-anime-art", | |
'Brain Melt Acid Art': "https://api-inference.huggingface.co/models/glif/Brain-Melt-Acid-Art", | |
'Lustly Flux Uncensored v1': "https://api-inference.huggingface.co/models/lustlyai/Flux_Lustly.ai_Uncensored_nsfw_v1", | |
'NSFW Master Flux': "https://api-inference.huggingface.co/models/Keltezaa/NSFW_MASTER_FLUX", | |
'Flux Outfit Generator': "https://api-inference.huggingface.co/models/tryonlabs/FLUX.1-dev-LoRA-Outfit-Generator", | |
'Midjourney': "https://api-inference.huggingface.co/models/Jovie/Midjourney", | |
'DreamPhotoGASM': "https://api-inference.huggingface.co/models/Yntec/DreamPhotoGASM", | |
'Flux Super Realism LoRA': "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Super-Realism-LoRA", | |
'Stable Diffusion 2-1': "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-2-1-base", | |
'Stable Diffusion 3.5 Large': "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-3.5-large", | |
'Stable Diffusion 3.5 Large Turbo': "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-3.5-large-turbo", | |
'Stable Diffusion 3 Medium': "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-3-medium-diffusers", | |
'Duchaiten Real3D NSFW XL': "https://api-inference.huggingface.co/models/stablediffusionapi/duchaiten-real3d-nsfw-xl", | |
'Pixel Art XL': "https://api-inference.huggingface.co/models/nerijs/pixel-art-xl", | |
'Character Design': "https://api-inference.huggingface.co/models/KappaNeuro/character-design", | |
'Sketched Out Manga': "https://api-inference.huggingface.co/models/alvdansen/sketchedoutmanga", | |
'Archfey Anime': "https://api-inference.huggingface.co/models/alvdansen/archfey_anime", | |
'Lofi Cuties': "https://api-inference.huggingface.co/models/alvdansen/lofi-cuties", | |
'YiffyMix': "https://api-inference.huggingface.co/models/Yntec/YiffyMix", | |
'Analog Madness Realistic v7': "https://api-inference.huggingface.co/models/digiplay/AnalogMadness-realistic-model-v7", | |
'Selfie Photography': "https://api-inference.huggingface.co/models/artificialguybr/selfiephotographyredmond-selfie-photography-lora-for-sdxl", | |
'Filmgrain': "https://api-inference.huggingface.co/models/artificialguybr/filmgrain-redmond-filmgrain-lora-for-sdxl", | |
'Leonardo AI Style Illustration': "https://api-inference.huggingface.co/models/goofyai/Leonardo_Ai_Style_Illustration", | |
'Cyborg Style XL': "https://api-inference.huggingface.co/models/goofyai/cyborg_style_xl", | |
'Little Tinies': "https://api-inference.huggingface.co/models/alvdansen/littletinies", | |
'NSFW XL': "https://api-inference.huggingface.co/models/Dremmar/nsfw-xl", | |
'Analog Redmond': "https://api-inference.huggingface.co/models/artificialguybr/analogredmond", | |
'Pixel Art Redmond': "https://api-inference.huggingface.co/models/artificialguybr/PixelArtRedmond", | |
'Ascii Art': "https://api-inference.huggingface.co/models/CiroN2022/ascii-art", | |
'Analog': "https://api-inference.huggingface.co/models/Yntec/Analog", | |
'Maple Syrup': "https://api-inference.huggingface.co/models/Yntec/MapleSyrup", | |
'Perfect Lewd Fantasy': "https://api-inference.huggingface.co/models/digiplay/perfectLewdFantasy_v1.01", | |
'AbsoluteReality 1.8.1': "https://api-inference.huggingface.co/models/digiplay/AbsoluteReality_v1.8.1", | |
'Disney': "https://api-inference.huggingface.co/models/goofyai/disney_style_xl", | |
'Redmond SDXL': "https://api-inference.huggingface.co/models/artificialguybr/LogoRedmond-LogoLoraForSDXL-V2", | |
'epiCPhotoGasm': "https://api-inference.huggingface.co/models/Yntec/epiCPhotoGasm", | |
} | |
# Add prompt prefixes based on the model | |
prompt_prefixes = get_model_prompt_prefixes(model) | |
# Return the corresponding URL or a default one | |
return model_urls.get(model, "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell") | |
def get_model_prompt_prefixes(model): | |
""" | |
Returns prompt prefixes for specific models | |
""" | |
prefixes = { | |
'Animagine 4.0': "masterpiece, high score, great score, absurdres, ", | |
'Flux Icon Kit': "Icon Kit, ", | |
'Pixel Background': "Pixel Background, ", | |
'Meme XD': "meme, ", | |
'Chill Guy': "chill guy, ", | |
'Pepe': "pepe, ", | |
'NSFWmodel': "nude, ", | |
'Claude Art': "claude art, ", | |
'Open Genmoji': "emoji, ", | |
'EBook Creative Cover': "EBook Cover, ", | |
'Flux Logo Design 2': "Logo Design, ", | |
'Isometric 3D': "Isometric 3D, ", | |
'Flux Condensation': "CONDENSATION, ", | |
'Flux Handwriting': "HWRIT handwriting, ", | |
'Shou Xin': "shou_xin, pencil sketch, ", | |
'Sketch Smudge': "Sketch Smudge, ", | |
'80s Cyberpunk': "80s cyberpunk, ", | |
'Coloring Book Flux': "c0l0ringb00k, coloring book, coloring book page, ", | |
'Flux Miniature LoRA': "MNTR, miniature drawing, ", | |
'Sketch Paint': "Sketch paint, ", | |
'Flux UltraRealism 2.0': "Ultra realistic, ", | |
'Midjourney Mix': "midjourney mix, ", | |
'Midjourney Mix 2': "MJ v6, ", | |
'Flux Logo Design': "wablogo, logo, Minimalist, ", | |
'Flux Tarot Cards': "Tarot card, ", | |
'Pixel Art Sprites': "a pixel image, ", | |
'3D Sketchfab': "3D Sketchfab, ", | |
'Retro Comic Flux': "c0m1c, comic book panel, ", | |
'Caricature': "CCTUR3, ", | |
'Huggieverse': "HGGRE, ", | |
'Propaganda Poster': "propaganda poster, ", | |
'Flux Game Assets V2': "wbgmsst, white background, ", | |
'SDXL HS Card Style': "Hearthstone Card, ", | |
'SoftPasty Flux': "araminta_illus illustration style, ", | |
'Flux Stickers': "5t1cker 5ty1e, ", | |
'Flux Animex V2': "Animex, ", | |
'Flux Animeo V1': "Animeo, ", | |
'Movie Board': "movieboard, ", | |
'Purple Dreamy': "Purple Dreamy, ", | |
'PS1 Style Flux': "ps1 game screenshot, ", | |
'Softserve Anime': "sftsrv style illustration, ", | |
'Flux Tarot v1': "in the style of TOK a trtcrd tarot style, ", | |
'Half Illustration': "in the style of TOK, ", | |
'Flux Ghibsky Illustration': "GHIBSKY style, ", | |
'Flux Koda': "flmft style, ", | |
'Soviet Diffusion XL': "soviet poster, ", | |
'Frosting Lane Flux': "frstingln illustration, ", | |
'Boreal': "photo, ", | |
'How2Draw': "How2Draw, ", | |
'Fashion Hut Modeling LoRA': "Modeling of, ", | |
'Flux SyntheticAnime': "1980s anime screengrab, VHS quality, syntheticanime, ", | |
'Flux Midjourney Anime': "egmid, ", | |
'Collage Flux': "collage, ", | |
'Flux Product Ad Backdrop': "Product Ad, ", | |
'Product Design': "product designed by prdsgn, ", | |
'Brain Melt Acid Art': "maximalism, in an acid surrealism style, ", | |
'NSFW Master Flux': "NSFW, ", | |
'Disney': "Disney style, ", | |
'Pixel Art XL': "pixel art, ", | |
'Character Design': "Character Design, ", | |
'Selfie Photography': "instagram model, discord profile picture, ", | |
'Filmgrain': "Film Grain, FilmGrainAF, ", | |
'Leonardo AI Style Illustration': "leonardo style, illustration, vector art, ", | |
'Cyborg Style XL': "cyborg style, ", | |
'Analog Redmond': "timeless style, ", | |
'Pixel Art Redmond': "Pixel Art, ", | |
'Ascii Art': "ascii art, ", | |
'Stable Diffusion 3 Medium': "A, ", | |
} | |
return prefixes.get(model, "") | |
def build_request_payload(prompt, is_negative, steps, cfg_scale, seed, strength, width, height, provider): | |
""" | |
Builds the appropriate payload for the selected provider | |
""" | |
# Set a random seed if -1 is provided | |
actual_seed = seed if seed != -1 else random.randint(1, 1000000000) | |
# Provider-specific payloads | |
if provider == "fal-ai": | |
return { | |
"model": "fal-stable-diffusion-xl", | |
"prompt": prompt, | |
"negative_prompt": is_negative, | |
"num_inference_steps": steps, | |
"guidance_scale": cfg_scale, | |
"seed": actual_seed, | |
"width": width, | |
"height": height | |
} | |
elif provider == "replicate": | |
return { | |
"version": "39ed52f2a78e934b3ba6e2a89f5b1c712de7dfea535525255b1aa35c5565e08b", | |
"input": { | |
"prompt": prompt, | |
"negative_prompt": is_negative, | |
"num_inference_steps": steps, | |
"guidance_scale": cfg_scale, | |
"seed": actual_seed, | |
"width": width, | |
"height": height | |
} | |
} | |
elif provider == "nebius": | |
return { | |
"model": "yandex/imagen", | |
"messages": [ | |
{ | |
"role": "user", | |
"content": prompt | |
} | |
], | |
"generationOptions": { | |
"size": f"{width}x{height}", | |
"negativePrompt": is_negative, | |
"seed": actual_seed, | |
"cfgScale": cfg_scale, | |
"steps": steps | |
} | |
} | |
elif provider == "together": | |
return { | |
"model": "stabilityai/stable-diffusion-xl-base-1.0", | |
"prompt": prompt, | |
"negative_prompt": is_negative, | |
"steps": steps, | |
"cfg_scale": cfg_scale, | |
"seed": actual_seed, | |
"width": width, | |
"height": height | |
} | |
else: # Default for HF Inference | |
return { | |
"inputs": prompt, | |
"is_negative": is_negative, | |
"steps": steps, | |
"cfg_scale": cfg_scale, | |
"seed": actual_seed, | |
"strength": strength, | |
"parameters": { | |
"width": width, | |
"height": height | |
} | |
} | |
def make_provider_request(api_url, headers, payload, provider, timeout): | |
""" | |
Makes the appropriate request for the selected provider | |
""" | |
if provider == "replicate": | |
# Replicate uses a two-step process: create prediction and get results | |
create_response = requests.post(api_url, headers=headers, json=payload, timeout=timeout) | |
if create_response.status_code != 201: | |
return create_response | |
# Get the prediction ID | |
prediction = create_response.json() | |
get_url = f"{api_url}/{prediction['id']}" | |
# Poll until the prediction is complete | |
while True: | |
response = requests.get(get_url, headers=headers, timeout=timeout) | |
if response.json()["status"] == "succeeded": | |
# Create a mock response object to match the expected interface | |
class MockResponse: | |
def __init__(self, content, status_code): | |
self.content = content | |
self.status_code = status_code | |
# Get the image URL from the prediction | |
image_url = response.json()["output"][0] | |
image_response = requests.get(image_url, timeout=timeout) | |
# Return a mock response with the image content | |
return MockResponse(image_response.content, 200) | |
# Standard request for other providers | |
return requests.post(api_url, headers=headers, json=payload, timeout=timeout) | |
def process_provider_response(response, provider): | |
""" | |
Processes the response based on the provider | |
""" | |
if provider == "fal-ai": | |
# Fal AI returns a JSON with an image URL | |
result = response.json() | |
image_url = result["images"][0]["url"] | |
image_response = requests.get(image_url, timeout=timeout) | |
image = Image.open(io.BytesIO(image_response.content)) | |
return image | |
elif provider == "nebius": | |
# Nebius returns a JSON with Base64 encoded image | |
result = response.json() | |
image_data = result["result"]["images"][0] | |
image = Image.open(io.BytesIO(base64.b64decode(image_data))) | |
return image | |
elif provider == "together": | |
# Together.ai returns a JSON with Base64 encoded image | |
result = response.json() | |
image_data = result["images"][0]["base64"] | |
image = Image.open(io.BytesIO(base64.b64decode(image_data))) | |
return image | |
else: | |
# Default for HF Inference and Replicate (which uses a MockResponse) | |
image_bytes = response.content | |
image = Image.open(io.BytesIO(image_bytes)) | |
return image | |
def handle_error_response(response): | |
""" | |
Handles error responses from the API | |
""" | |
if response.status_code == 400: | |
raise gr.Error(f"{response.status_code}: Bad Request - There might be an issue with the input parameters.") | |
elif response.status_code == 401: | |
raise gr.Error(f"{response.status_code}: Unauthorized - Please check your API token.") | |
elif response.status_code == 403: | |
raise gr.Error(f"{response.status_code}: Forbidden - You do not have permission to access this model.") | |
elif response.status_code == 404: | |
raise gr.Error(f"{response.status_code}: Not Found - The requested model could not be found.") | |
elif response.status_code == 503: | |
raise gr.Error(f"{response.status_code}: The model is being loaded. Please try again later.") | |
else: | |
raise gr.Error(f"{response.status_code}: An unexpected error occurred.") | |
# Custom CSS to hide the footer in the interface | |
css = """ | |
* {} | |
footer {visibility: hidden !important;} | |
""" | |
print("Initializing Gradio interface...") # Debug log | |
# Define the Gradio interface | |
with gr.Blocks(theme='Nymbo/Nymbo_Theme_5') as dalle: | |
# Tab for basic settings | |
with gr.Tab("Basic Settings"): | |
with gr.Row(): | |
with gr.Column(elem_id="prompt-container"): | |
with gr.Row(): | |
# Textbox for user to input the prompt | |
text_prompt = gr.Textbox(label="Prompt", placeholder="Enter a prompt here", lines=3, elem_id="prompt-text-input") | |
with gr.Row(): | |
# Provider selection (new component) | |
providers_list = [ | |
"hf-inference", # Default Hugging Face Inference | |
"fal-ai", # Fal AI | |
"nebius", # Nebius | |
"replicate", # Replicate | |
"together", # Together AI | |
] | |
provider_radio = gr.Radio( | |
choices=providers_list, | |
value="hf-inference", | |
label="Inference Provider", | |
info="Select the image generation provider" | |
) | |
with gr.Row(): | |
# Textbox for custom LoRA input | |
custom_lora = gr.Textbox(label="Custom LoRA", info="LoRA Hugging Face path (optional, works with HF Inference only)", placeholder="multimodalart/vintage-ads-flux") | |
with gr.Row(): | |
# Accordion for selecting the model | |
with gr.Accordion("Featured Models", open=False): | |
# Textbox for searching models | |
model_search = gr.Textbox(label="Filter Models", placeholder="Search for a featured model...", lines=1, elem_id="model-search-input") | |
models_list = ( | |
"3D Sketchfab", | |
"80s Cyberpunk", | |
"90s Anime Art", | |
"AbsoluteReality 1.8.1", | |
"Analog", | |
"Analog Madness Realistic v7", | |
"Analog Redmond", | |
"Animagine 4.0", | |
"Archfey Anime", | |
"Ascii Art", | |
"Brain Melt Acid Art", | |
"Boreal", | |
"Caricature", | |
"Collage Flux", | |
"Coloring Book Flux", | |
"Character Design", | |
"Chill Guy", | |
"Claude Art", | |
"Coloring Book Generator", | |
"Cyborg Style XL", | |
"Disney", | |
"DreamPhotoGASM", | |
"Duchaiten Real3D NSFW XL", | |
"EBook Creative Cover", | |
"EpiCPhotoGasm", | |
"Fashion Hut Modeling LoRA", | |
"Filmgrain", | |
"FLUX.1 [Dev]", | |
"FLUX.1 [Schnell]", | |
"FLux Condensation", | |
"Flux Handwriting", | |
"Flux Realism LoRA", | |
"Flux Super Realism LoRA", | |
"Flux Uncensored", | |
"Flux Uncensored V2", | |
"Flux Game Assets V2", | |
"Flux Icon Kit", | |
"Flux Ghibsky Illustration", | |
"Flux Animex V2", | |
"Flux Animeo V1", | |
"Flux AestheticAnime", | |
"Flux SyntheticAnime", | |
"Flux Stickers", | |
"Flux Koda", | |
"Flux Tarot v1", | |
"Flux Tarot Cards", | |
"Flux UltraRealism 2.0", | |
"Flux Midjourney Anime", | |
"Flux Miniature LoRA", | |
"Flux Logo Design", | |
"Flux Logo Design 2", | |
"Flux Product Ad Backdrop", | |
"Flux Outfit Generator", | |
"Frosting Lane Flux", | |
"Half Illustration", | |
"HiDream-I1-Full", | |
"HiDream-I1-Dev", | |
"HiDream-I1-Fast", | |
"How2Draw", | |
"Huggieverse", | |
"Isometric 3D", | |
"Leonardo AI Style Illustration", | |
"Little Tinies", | |
"Lofi Cuties", | |
"Lustly Flux Uncensored v1", | |
"Maple Syrup", | |
"Meme XD", | |
"Midjourney", | |
"Midjourney Mix", | |
"Midjourney Mix 2", | |
"Movie Board", | |
"NSFWmodel", | |
"NSFW Master Flux", | |
"NSFW XL", | |
"OpenDalle v1.1", | |
"Open Genmoji", | |
"Pepe", | |
"Perfect Lewd Fantasy", | |
"Pixel Art Redmond", | |
"Pixel Art XL", | |
"Pixel Art Sprites", | |
"Pixel Background", | |
"Product Design", | |
"Propaganda Poster", | |
"Purple Dreamy", | |
"Phantasma Anime", | |
"PS1 Style Flux", | |
"Redmond SDXL", | |
"Retro Comic Flux", | |
"SDXL HS Card Style", | |
"Sketch Smudge", | |
"Shou Xin", | |
"Softserve Anime", | |
"SoftPasty Flux", | |
"Soviet Diffusion XL", | |
"Sketched Out Manga", | |
"Sketch Paint", | |
"SLDR FLUX NSFW v2 Studio", | |
"Selfie Photography", | |
"Stable Diffusion 2-1", | |
"Stable Diffusion XL", | |
"Stable Diffusion 3 Medium", | |
"Stable Diffusion 3.5 Large", | |
"Stable Diffusion 3.5 Large Turbo", | |
"YiffyMix", | |
) | |
# Radio buttons to select the desired model | |
model = gr.Radio(label="Select a model below", value="FLUX.1 [Schnell]", choices=models_list, interactive=True, elem_id="model-radio", info="Note: Some models may only be available with specific providers") | |
# Filtering models based on search input | |
def filter_models(search_term): | |
filtered_models = [m for m in models_list if search_term.lower() in m.lower()] | |
return gr.update(choices=filtered_models) | |
# Update model list when search box is used | |
model_search.change(filter_models, inputs=model_search, outputs=model) | |
# Tab for advanced settings | |
with gr.Tab("Advanced Settings"): | |
with gr.Row(): | |
# Textbox for specifying elements to exclude from the image | |
negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="What should not be in the image", value="(deformed, distorted, disfigured), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, misspellings, typos", lines=3, elem_id="negative-prompt-text-input") | |
with gr.Row(): | |
# Slider for selecting the image width | |
width = gr.Slider(label="Width", value=1024, minimum=64, maximum=1216, step=32) | |
# Slider for selecting the image height | |
height = gr.Slider(label="Height", value=1024, minimum=64, maximum=1216, step=32) | |
with gr.Row(): | |
# Slider for setting the number of sampling steps | |
steps = gr.Slider(label="Sampling steps", value=35, minimum=1, maximum=100, step=1) | |
with gr.Row(): | |
# Slider for adjusting the CFG scale (guidance scale) | |
cfg = gr.Slider(label="CFG Scale", value=7, minimum=1, maximum=20, step=1) | |
with gr.Row(): | |
# Slider for adjusting the transformation strength | |
strength = gr.Slider(label="Strength", value=0.7, minimum=0, maximum=1, step=0.001) | |
with gr.Row(): | |
# Slider for setting the seed for reproducibility | |
seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=1000000000, step=1) | |
with gr.Row(): | |
# Radio buttons for selecting the sampling method | |
method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"]) | |
# Tab for image editing options | |
with gr.Tab("Image Editor"): | |
# Function to simulate a delay for processing | |
def sleep(im): | |
print("Sleeping for 5 seconds...") # Debug log | |
time.sleep(5) | |
return [im["background"], im["layers"][0], im["layers"][1], im["composite"]] | |
# Function to return the composite image | |
def predict(im): | |
print("Predicting composite image...") # Debug log | |
return im["composite"] | |
with gr.Blocks() as demo: | |
with gr.Row(): | |
# Image editor component for user adjustments | |
im = gr.ImageEditor( | |
type="numpy", | |
crop_size="1:1", # Set crop size to a square aspect ratio | |
) | |
# Tab to provide information to the user | |
with gr.Tab("Information"): | |
with gr.Row(): | |
# Display a sample prompt for guidance | |
gr.Textbox(label="Sample prompt", value="{prompt} | ultra detail, ultra elaboration, ultra quality, perfect.") | |
# Accordion displaying featured models | |
with gr.Accordion("Featured Models (WiP)", open=False): | |
gr.HTML( | |
""" | |
<p><a href="https://huggingface.co/models?inference=warm&pipeline_tag=text-to-image&sort=trending">See all available models</a></p> | |
<table style="width:100%; text-align:center; margin:auto;"> | |
<tr> | |
<th>Model Name</th> | |
<th>Typography</th> | |
<th>Notes</th> | |
</tr> | |
<tr> | |
<td>FLUX.1 Dev</td> | |
<td>✅</td> | |
<td></td> | |
</tr> | |
<tr> | |
<td>FLUX.1 Schnell</td> | |
<td>✅</td> | |
<td></td> | |
</tr> | |
<tr> | |
<td>Stable Diffusion 3.5 Large</td> | |
<td>✅</td> | |
<td></td> | |
</tr> | |
</table> | |
""" | |
) | |
# Accordion providing information about providers | |
with gr.Accordion("Provider Information", open=False): | |
gr.Markdown( | |
""" | |
## Inference Providers | |
### HF Inference | |
Hugging Face's Inference API provides access to a wide range of models hosted on the Hugging Face Hub. | |
- Supports all models listed in "Featured Models" | |
- Custom LoRA support | |
- Free tier available with API key | |
### Fal AI | |
Fal AI offers high-speed inference for image generation models. | |
- Optimized for speed | |
- Limited model selection compared to HF | |
- Requires Fal AI API key | |
### Nebius | |
Nebius Cloud provides image generation capabilities. | |
- Good performance for certain model types | |
- Requires Nebius API key | |
### Replicate | |
Replicate hosts many popular image generation models. | |
- Wide variety of fine-tuned models | |
- Simple API | |
- Requires Replicate API token | |
### Together AI | |
Together AI offers high-performance model hosting. | |
- Optimized for speed and quality | |
- Good selection of models | |
- Requires Together API key | |
""" | |
) | |
# Accordion providing an overview of advanced settings | |
with gr.Accordion("Advanced Settings Overview", open=False): | |
gr.Markdown( | |
""" | |
## Negative Prompt | |
###### This box is for telling the AI what you don't want in your images. Think of it as a way to avoid certain elements. For instance, if you don't want blurry images or extra limbs showing up, this is where you'd mention it. | |
## Width & Height | |
###### These sliders allow you to specify the resolution of your image. Default value is 1024x1024, and maximum output is 1216x1216. | |
## Sampling Steps | |
###### Think of this like the number of brushstrokes in a painting. A higher number can give you a more detailed picture, but it also takes a bit longer. Generally, a middle-ground number like 35 is a good balance between quality and speed. | |
## CFG Scale | |
###### CFG stands for "Control Free Guidance." The scale adjusts how closely the AI follows your prompt. A lower number makes the AI more creative and free-flowing, while a higher number makes it stick closely to what you asked for. If you want the AI to take fewer artistic liberties, slide this towards a higher number. Just think "Control Freak Gauge". | |
## Sampling Method | |
###### This is the technique the AI uses to create your image. Each option is a different approach, like choosing between pencils, markers, or paint. You don't need to worry too much about this; the default setting is usually the best choice for most users. | |
## Strength | |
###### This setting is a bit like the 'intensity' knob. It determines how much the AI modifies the base image it starts with. If you're looking to make subtle changes, keep this low. For more drastic transformations, turn it up. | |
## Seed | |
###### You can think of the seed as a 'recipe' for creating an image. If you find a seed that gives you a result you love, you can use it again to create a similar image. If you leave it at -1, the AI will generate a new seed every time. | |
### Remember, these settings are all about giving you control over the image generation process. Feel free to experiment and see what each one does. And if you're ever in doubt, the default settings are a great place to start. Happy creating! | |
""" | |
) | |
# Row containing the 'Run' button to trigger the image generation | |
with gr.Row(): | |
text_button = gr.Button("Run", variant='primary', elem_id="gen-button") | |
# Row for displaying the generated image output | |
with gr.Row(): | |
image_output = gr.Image(type="pil", label="Image Output", elem_id="gallery") | |
# Set up button click event to call the query function with the provider parameter | |
text_button.click( | |
query, | |
inputs=[ | |
text_prompt, | |
model, | |
custom_lora, | |
negative_prompt, | |
steps, | |
cfg, | |
method, | |
seed, | |
strength, | |
width, | |
height, | |
provider_radio # Added provider parameter | |
], | |
outputs=image_output | |
) | |
# Function to update UI based on provider selection | |
def update_provider_ui(provider): | |
if provider == "hf-inference": | |
return [ | |
gr.update(visible=True), # custom_lora | |
gr.update(visible=True), # models accordion | |
"Select a model or provide a custom LoRA" | |
] | |
else: | |
return [ | |
gr.update(visible=False), # custom_lora | |
gr.update(visible=False), # models accordion | |
f"Using {provider} provider - model selection handled by the provider" | |
] | |
# Update UI when provider changes | |
provider_radio.change( | |
update_provider_ui, | |
inputs=[provider_radio], | |
outputs=[ | |
custom_lora, | |
gr.Accordion("Featured Models"), | |
gr.Textbox(label="Provider Status") | |
] | |
) | |
print("Launching Gradio interface...") # Debug log | |
# Launch the Gradio interface without showing the API or sharing externally | |
dalle.launch(show_api=True, share=False) |