Nymbo commited on
Commit
d38d8d8
·
verified ·
1 Parent(s): 018a7f4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +338 -536
app.py CHANGED
@@ -9,480 +9,377 @@ import json
9
 
10
  # Project by Nymbo
11
 
 
 
12
  # Retrieve the API token from environment variables
13
  API_TOKEN = os.getenv("HF_READ_TOKEN")
14
- # Create a list of available API tokens for load balancing
15
- API_TOKENS = [
16
- os.getenv("HF_READ_TOKEN"),
17
- os.getenv("HF_READ_TOKEN_2"),
18
- os.getenv("HF_READ_TOKEN_3"),
19
- os.getenv("HF_READ_TOKEN_4"),
20
- os.getenv("HF_READ_TOKEN_5")
21
- ]
22
  # Timeout for requests
23
  timeout = 100
24
 
25
- def query(
26
- prompt,
27
- model,
28
- custom_lora,
29
- is_negative=False,
30
- steps=35,
31
- cfg_scale=7,
32
- sampler="DPM++ 2M Karras",
33
- seed=-1,
34
- strength=0.7,
35
- width=1024,
36
- height=1024,
37
- provider="hf-inference" # Added provider parameter
38
- ):
39
  # Debug log to indicate function start
40
  print("Starting query function...")
41
  # Print the parameters for debugging purposes
42
  print(f"Prompt: {prompt}")
43
  print(f"Model: {model}")
44
  print(f"Custom LoRA: {custom_lora}")
45
- print(f"Provider: {provider}") # Added provider debug log
46
  print(f"Parameters - Steps: {steps}, CFG Scale: {cfg_scale}, Seed: {seed}, Strength: {strength}, Width: {width}, Height: {height}")
47
 
48
  # Check if the prompt is empty or None
49
  if prompt == "" or prompt is None:
50
- print("Prompt is empty or None. Exiting query function.")
51
  return None
52
 
53
  # Generate a unique key for tracking the generation process
54
  key = random.randint(0, 999)
55
- print(f"Generated key: {key}")
56
 
57
  # Randomly select an API token from available options to distribute the load
58
- selected_token = random.choice([token for token in API_TOKENS if token])
59
- print(f"Selected an API token") # Modified to not print the actual token
60
-
61
- # Initialize the default headers with authorization
62
- headers = {"Authorization": f"Bearer {selected_token}"}
63
-
64
- # Select provider-specific headers if needed
65
- if provider == "fal-ai":
66
- headers = {
67
- "Authorization": f"Bearer {os.getenv('FAL_API_KEY')}",
68
- "Content-Type": "application/json"
69
- }
70
- elif provider == "together":
71
- headers = {
72
- "Authorization": f"Bearer {os.getenv('TOGETHER_API_KEY')}"
73
- }
74
- elif provider == "replicate":
75
- headers = {
76
- "Authorization": f"Token {os.getenv('REPLICATE_API_TOKEN')}",
77
- "Content-Type": "application/json"
78
- }
79
- elif provider == "nebius":
80
- headers = {
81
- "Authorization": f"Api-Key {os.getenv('NEBIUS_API_KEY')}",
82
- "Content-Type": "application/json"
83
- }
84
 
85
  # Enhance the prompt with additional details for better quality
86
- enhanced_prompt = prompt
87
- if provider == "hf-inference":
88
- enhanced_prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
89
- print(f'Generation {key}: {enhanced_prompt}')
90
-
91
- # Set the API URL based on the selected provider, model, or custom LoRA
92
- API_URL = get_model_api_url(model, custom_lora, provider)
93
- print(f"API URL set to: {API_URL}")
94
 
95
- # Define the payload for the request (provider-specific)
96
- payload = build_request_payload(prompt, is_negative, steps, cfg_scale, seed, strength, width, height, provider)
97
- print(f"Payload created for provider: {provider}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
 
99
  # Make a request to the API to generate the image
100
  try:
101
- response = make_provider_request(API_URL, headers, payload, provider, timeout)
102
- print(f"Response status code: {response.status_code}")
103
  except requests.exceptions.RequestException as e:
104
- print(f"Request failed: {e}")
 
105
  raise gr.Error(f"Request failed: {e}")
106
 
107
  # Check if the response status is not successful
108
  if response.status_code != 200:
109
- print(f"Error: Failed to retrieve image. Response status: {response.status_code}")
110
- print(f"Response content: {response.text}")
111
- handle_error_response(response)
 
 
 
 
 
 
 
 
 
 
 
112
 
113
  try:
114
- # Process the response based on the provider
115
- image = process_provider_response(response, provider)
116
- print(f'Generation {key} completed! ({prompt})')
117
- return image
118
- except Exception as e:
119
- print(f"Error while processing the response: {e}")
120
- return None
121
-
122
- def get_model_api_url(model, custom_lora, provider):
123
- """
124
- Determine the correct API URL based on model, custom LoRA, and provider
125
- """
126
- # If a custom LoRA is specified, use it (only for HF Inference)
127
- if custom_lora.strip() != "" and provider == "hf-inference":
128
- return f"https://api-inference.huggingface.co/models/{custom_lora.strip()}"
129
-
130
- # Provider-specific base URLs
131
- if provider == "fal-ai":
132
- return "https://gateway.fal.ai/inference"
133
- elif provider == "replicate":
134
- return "https://api.replicate.com/v1/predictions"
135
- elif provider == "nebius":
136
- return "https://llm.api.cloud.yandex.net/foundationModels/v1/image/generate"
137
- elif provider == "together":
138
- return "https://api.together.xyz/v1/images/generations"
139
-
140
- # Default to HuggingFace with the selected model
141
- if provider == "hf-inference":
142
- # Map model names to their respective API URLs
143
- model_urls = {
144
- 'Stable Diffusion XL': "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0",
145
- 'FLUX.1 [Dev]': "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev",
146
- 'FLUX.1 [Schnell]': "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell",
147
- 'HiDream-I1-Full': "https://api-inference.huggingface.co/models/HiDream-ai/HiDream-I1-Full",
148
- 'HiDream-I1-Dev': "https://api-inference.huggingface.co/models/HiDream-ai/HiDream-I1-Dev",
149
- 'HiDream-I1-Fast': "https://api-inference.huggingface.co/models/HiDream-ai/HiDream-I1-Fast",
150
- 'Animagine 4.0': "https://api-inference.huggingface.co/models/cagliostrolab/animagine-xl-4.0",
151
- 'Flux Icon Kit': "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Icon-Kit-LoRA",
152
- 'Pixel Background': "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Pixel-Background-LoRA",
153
- 'Meme XD': "https://api-inference.huggingface.co/models/prithivMLmods/Flux-Meme-Xd-LoRA",
154
- 'Chill Guy': "https://api-inference.huggingface.co/models/prithivMLmods/Flux-Chill-Guy-Zone",
155
- 'Pepe': "https://api-inference.huggingface.co/models/openfree/pepe",
156
- 'NSFWmodel': "https://api-inference.huggingface.co/models/lexa862/NSFWmodel",
157
- 'Claude Art': "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Claude-Art",
158
- 'Open Genmoji': "https://api-inference.huggingface.co/models/EvanZhouDev/open-genmoji",
159
- 'EBook Creative Cover': "https://api-inference.huggingface.co/models/prithivMLmods/EBook-Creative-Cover-Flux-LoRA",
160
- 'Flux Logo Design 2': "https://api-inference.huggingface.co/models/prithivMLmods/Logo-Design-Flux-LoRA",
161
- 'Isometric 3D': "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Isometric-3D-LoRA",
162
- 'Flux Condensation': "https://api-inference.huggingface.co/models/fofr/flux-condensation",
163
- 'Flux Handwriting': "https://api-inference.huggingface.co/models/fofr/flux-handwriting",
164
- 'Shou Xin': "https://api-inference.huggingface.co/models/Datou1111/shou_xin",
165
- 'Sketch Smudge': "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Sketch-Smudge-LoRA",
166
- '80s Cyberpunk': "https://api-inference.huggingface.co/models/fofr/flux-80s-cyberpunk",
167
- 'Coloring Book Flux': "https://api-inference.huggingface.co/models/renderartist/coloringbookflux",
168
- 'Flux Miniature LoRA': "https://api-inference.huggingface.co/models/gokaygokay/Flux-Miniature-LoRA",
169
- 'Sketch Paint': "https://api-inference.huggingface.co/models/strangerzonehf/Sketch-Paint",
170
- 'Flux UltraRealism 2.0': "https://api-inference.huggingface.co/models/prithivMLmods/Canopus-LoRA-Flux-UltraRealism-2.0",
171
- 'Midjourney Mix': "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Midjourney-Mix-LoRA",
172
- 'Midjourney Mix 2': "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Midjourney-Mix2-LoRA",
173
- 'Flux Logo Design': "https://api-inference.huggingface.co/models/Shakker-Labs/FLUX.1-dev-LoRA-Logo-Design",
174
- 'Flux Uncensored': "https://api-inference.huggingface.co/models/enhanceaiteam/Flux-uncensored",
175
- 'Flux Uncensored V2': "https://api-inference.huggingface.co/models/enhanceaiteam/Flux-Uncensored-V2",
176
- 'Flux Tarot Cards': "https://api-inference.huggingface.co/models/prithivMLmods/Ton618-Tarot-Cards-Flux-LoRA",
177
- 'Pixel Art Sprites': "https://api-inference.huggingface.co/models/sWizad/pokemon-trainer-sprites-pixelart-flux",
178
- '3D Sketchfab': "https://api-inference.huggingface.co/models/prithivMLmods/Castor-3D-Sketchfab-Flux-LoRA",
179
- 'Retro Comic Flux': "https://api-inference.huggingface.co/models/renderartist/retrocomicflux",
180
- 'Caricature': "https://api-inference.huggingface.co/models/TheAwakenOne/caricature",
181
- 'Huggieverse': "https://api-inference.huggingface.co/models/Chunte/flux-lora-Huggieverse",
182
- 'Propaganda Poster': "https://api-inference.huggingface.co/models/AlekseyCalvin/Propaganda_Poster_Schnell_by_doctor_diffusion",
183
- 'Flux Game Assets V2': "https://api-inference.huggingface.co/models/gokaygokay/Flux-Game-Assets-LoRA-v2",
184
- 'SDXL HS Card Style': "https://api-inference.huggingface.co/models/Norod78/sdxl-hearthstone-card-style-lora",
185
- 'SLDR FLUX NSFW v2 Studio': "https://api-inference.huggingface.co/models/xey/sldr_flux_nsfw_v2-studio",
186
- 'SoftPasty Flux': "https://api-inference.huggingface.co/models/alvdansen/softpasty-flux-dev",
187
- 'Flux Stickers': "https://api-inference.huggingface.co/models/diabolic6045/Flux_Sticker_Lora",
188
- 'Flux Animex V2': "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Animex-v2-LoRA",
189
- 'Flux Animeo V1': "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Animeo-v1-LoRA",
190
- 'Movie Board': "https://api-inference.huggingface.co/models/prithivMLmods/Flux.1-Dev-Movie-Boards-LoRA",
191
- 'Purple Dreamy': "https://api-inference.huggingface.co/models/prithivMLmods/Purple-Dreamy-Flux-LoRA",
192
- 'PS1 Style Flux': "https://api-inference.huggingface.co/models/veryVANYA/ps1-style-flux",
193
- 'Softserve Anime': "https://api-inference.huggingface.co/models/alvdansen/softserve_anime",
194
- 'Flux Tarot v1': "https://api-inference.huggingface.co/models/multimodalart/flux-tarot-v1",
195
- 'Half Illustration': "https://api-inference.huggingface.co/models/davisbro/half_illustration",
196
- 'OpenDalle v1.1': "https://api-inference.huggingface.co/models/dataautogpt3/OpenDalleV1.1",
197
- 'Flux Ghibsky Illustration': "https://api-inference.huggingface.co/models/aleksa-codes/flux-ghibsky-illustration",
198
- 'Flux Koda': "https://api-inference.huggingface.co/models/alvdansen/flux-koda",
199
- 'Soviet Diffusion XL': "https://api-inference.huggingface.co/models/openskyml/soviet-diffusion-xl",
200
- 'Flux Realism LoRA': "https://api-inference.huggingface.co/models/XLabs-AI/flux-RealismLora",
201
- 'Frosting Lane Flux': "https://api-inference.huggingface.co/models/alvdansen/frosting_lane_flux",
202
- 'Phantasma Anime': "https://api-inference.huggingface.co/models/alvdansen/phantasma-anime",
203
- 'Boreal': "https://api-inference.huggingface.co/models/kudzueye/Boreal",
204
- 'How2Draw': "https://api-inference.huggingface.co/models/glif/how2draw",
205
- 'Flux AestheticAnime': "https://api-inference.huggingface.co/models/dataautogpt3/FLUX-AestheticAnime",
206
- 'Fashion Hut Modeling LoRA': "https://api-inference.huggingface.co/models/prithivMLmods/Fashion-Hut-Modeling-LoRA",
207
- 'Flux SyntheticAnime': "https://api-inference.huggingface.co/models/dataautogpt3/FLUX-SyntheticAnime",
208
- 'Flux Midjourney Anime': "https://api-inference.huggingface.co/models/brushpenbob/flux-midjourney-anime",
209
- 'Coloring Book Generator': "https://api-inference.huggingface.co/models/robert123231/coloringbookgenerator",
210
- 'Collage Flux': "https://api-inference.huggingface.co/models/prithivMLmods/Castor-Collage-Dim-Flux-LoRA",
211
- 'Flux Product Ad Backdrop': "https://api-inference.huggingface.co/models/prithivMLmods/Flux-Product-Ad-Backdrop",
212
- 'Product Design': "https://api-inference.huggingface.co/models/multimodalart/product-design",
213
- '90s Anime Art': "https://api-inference.huggingface.co/models/glif/90s-anime-art",
214
- 'Brain Melt Acid Art': "https://api-inference.huggingface.co/models/glif/Brain-Melt-Acid-Art",
215
- 'Lustly Flux Uncensored v1': "https://api-inference.huggingface.co/models/lustlyai/Flux_Lustly.ai_Uncensored_nsfw_v1",
216
- 'NSFW Master Flux': "https://api-inference.huggingface.co/models/Keltezaa/NSFW_MASTER_FLUX",
217
- 'Flux Outfit Generator': "https://api-inference.huggingface.co/models/tryonlabs/FLUX.1-dev-LoRA-Outfit-Generator",
218
- 'Midjourney': "https://api-inference.huggingface.co/models/Jovie/Midjourney",
219
- 'DreamPhotoGASM': "https://api-inference.huggingface.co/models/Yntec/DreamPhotoGASM",
220
- 'Flux Super Realism LoRA': "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Super-Realism-LoRA",
221
- 'Stable Diffusion 2-1': "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-2-1-base",
222
- 'Stable Diffusion 3.5 Large': "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-3.5-large",
223
- 'Stable Diffusion 3.5 Large Turbo': "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-3.5-large-turbo",
224
- 'Stable Diffusion 3 Medium': "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-3-medium-diffusers",
225
- 'Duchaiten Real3D NSFW XL': "https://api-inference.huggingface.co/models/stablediffusionapi/duchaiten-real3d-nsfw-xl",
226
- 'Pixel Art XL': "https://api-inference.huggingface.co/models/nerijs/pixel-art-xl",
227
- 'Character Design': "https://api-inference.huggingface.co/models/KappaNeuro/character-design",
228
- 'Sketched Out Manga': "https://api-inference.huggingface.co/models/alvdansen/sketchedoutmanga",
229
- 'Archfey Anime': "https://api-inference.huggingface.co/models/alvdansen/archfey_anime",
230
- 'Lofi Cuties': "https://api-inference.huggingface.co/models/alvdansen/lofi-cuties",
231
- 'YiffyMix': "https://api-inference.huggingface.co/models/Yntec/YiffyMix",
232
- 'Analog Madness Realistic v7': "https://api-inference.huggingface.co/models/digiplay/AnalogMadness-realistic-model-v7",
233
- 'Selfie Photography': "https://api-inference.huggingface.co/models/artificialguybr/selfiephotographyredmond-selfie-photography-lora-for-sdxl",
234
- 'Filmgrain': "https://api-inference.huggingface.co/models/artificialguybr/filmgrain-redmond-filmgrain-lora-for-sdxl",
235
- 'Leonardo AI Style Illustration': "https://api-inference.huggingface.co/models/goofyai/Leonardo_Ai_Style_Illustration",
236
- 'Cyborg Style XL': "https://api-inference.huggingface.co/models/goofyai/cyborg_style_xl",
237
- 'Little Tinies': "https://api-inference.huggingface.co/models/alvdansen/littletinies",
238
- 'NSFW XL': "https://api-inference.huggingface.co/models/Dremmar/nsfw-xl",
239
- 'Analog Redmond': "https://api-inference.huggingface.co/models/artificialguybr/analogredmond",
240
- 'Pixel Art Redmond': "https://api-inference.huggingface.co/models/artificialguybr/PixelArtRedmond",
241
- 'Ascii Art': "https://api-inference.huggingface.co/models/CiroN2022/ascii-art",
242
- 'Analog': "https://api-inference.huggingface.co/models/Yntec/Analog",
243
- 'Maple Syrup': "https://api-inference.huggingface.co/models/Yntec/MapleSyrup",
244
- 'Perfect Lewd Fantasy': "https://api-inference.huggingface.co/models/digiplay/perfectLewdFantasy_v1.01",
245
- 'AbsoluteReality 1.8.1': "https://api-inference.huggingface.co/models/digiplay/AbsoluteReality_v1.8.1",
246
- 'Disney': "https://api-inference.huggingface.co/models/goofyai/disney_style_xl",
247
- 'Redmond SDXL': "https://api-inference.huggingface.co/models/artificialguybr/LogoRedmond-LogoLoraForSDXL-V2",
248
- 'epiCPhotoGasm': "https://api-inference.huggingface.co/models/Yntec/epiCPhotoGasm",
249
- }
250
-
251
- # Add prompt prefixes based on the model
252
- prompt_prefixes = get_model_prompt_prefixes(model)
253
-
254
- # Return the corresponding URL or a default one
255
- return model_urls.get(model, "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell")
256
-
257
- def get_model_prompt_prefixes(model):
258
- """
259
- Returns prompt prefixes for specific models
260
- """
261
- prefixes = {
262
- 'Animagine 4.0': "masterpiece, high score, great score, absurdres, ",
263
- 'Flux Icon Kit': "Icon Kit, ",
264
- 'Pixel Background': "Pixel Background, ",
265
- 'Meme XD': "meme, ",
266
- 'Chill Guy': "chill guy, ",
267
- 'Pepe': "pepe, ",
268
- 'NSFWmodel': "nude, ",
269
- 'Claude Art': "claude art, ",
270
- 'Open Genmoji': "emoji, ",
271
- 'EBook Creative Cover': "EBook Cover, ",
272
- 'Flux Logo Design 2': "Logo Design, ",
273
- 'Isometric 3D': "Isometric 3D, ",
274
- 'Flux Condensation': "CONDENSATION, ",
275
- 'Flux Handwriting': "HWRIT handwriting, ",
276
- 'Shou Xin': "shou_xin, pencil sketch, ",
277
- 'Sketch Smudge': "Sketch Smudge, ",
278
- '80s Cyberpunk': "80s cyberpunk, ",
279
- 'Coloring Book Flux': "c0l0ringb00k, coloring book, coloring book page, ",
280
- 'Flux Miniature LoRA': "MNTR, miniature drawing, ",
281
- 'Sketch Paint': "Sketch paint, ",
282
- 'Flux UltraRealism 2.0': "Ultra realistic, ",
283
- 'Midjourney Mix': "midjourney mix, ",
284
- 'Midjourney Mix 2': "MJ v6, ",
285
- 'Flux Logo Design': "wablogo, logo, Minimalist, ",
286
- 'Flux Tarot Cards': "Tarot card, ",
287
- 'Pixel Art Sprites': "a pixel image, ",
288
- '3D Sketchfab': "3D Sketchfab, ",
289
- 'Retro Comic Flux': "c0m1c, comic book panel, ",
290
- 'Caricature': "CCTUR3, ",
291
- 'Huggieverse': "HGGRE, ",
292
- 'Propaganda Poster': "propaganda poster, ",
293
- 'Flux Game Assets V2': "wbgmsst, white background, ",
294
- 'SDXL HS Card Style': "Hearthstone Card, ",
295
- 'SoftPasty Flux': "araminta_illus illustration style, ",
296
- 'Flux Stickers': "5t1cker 5ty1e, ",
297
- 'Flux Animex V2': "Animex, ",
298
- 'Flux Animeo V1': "Animeo, ",
299
- 'Movie Board': "movieboard, ",
300
- 'Purple Dreamy': "Purple Dreamy, ",
301
- 'PS1 Style Flux': "ps1 game screenshot, ",
302
- 'Softserve Anime': "sftsrv style illustration, ",
303
- 'Flux Tarot v1': "in the style of TOK a trtcrd tarot style, ",
304
- 'Half Illustration': "in the style of TOK, ",
305
- 'Flux Ghibsky Illustration': "GHIBSKY style, ",
306
- 'Flux Koda': "flmft style, ",
307
- 'Soviet Diffusion XL': "soviet poster, ",
308
- 'Frosting Lane Flux': "frstingln illustration, ",
309
- 'Boreal': "photo, ",
310
- 'How2Draw': "How2Draw, ",
311
- 'Fashion Hut Modeling LoRA': "Modeling of, ",
312
- 'Flux SyntheticAnime': "1980s anime screengrab, VHS quality, syntheticanime, ",
313
- 'Flux Midjourney Anime': "egmid, ",
314
- 'Collage Flux': "collage, ",
315
- 'Flux Product Ad Backdrop': "Product Ad, ",
316
- 'Product Design': "product designed by prdsgn, ",
317
- 'Brain Melt Acid Art': "maximalism, in an acid surrealism style, ",
318
- 'NSFW Master Flux': "NSFW, ",
319
- 'Disney': "Disney style, ",
320
- 'Pixel Art XL': "pixel art, ",
321
- 'Character Design': "Character Design, ",
322
- 'Selfie Photography': "instagram model, discord profile picture, ",
323
- 'Filmgrain': "Film Grain, FilmGrainAF, ",
324
- 'Leonardo AI Style Illustration': "leonardo style, illustration, vector art, ",
325
- 'Cyborg Style XL': "cyborg style, ",
326
- 'Analog Redmond': "timeless style, ",
327
- 'Pixel Art Redmond': "Pixel Art, ",
328
- 'Ascii Art': "ascii art, ",
329
- 'Stable Diffusion 3 Medium': "A, ",
330
- }
331
- return prefixes.get(model, "")
332
-
333
- def build_request_payload(prompt, is_negative, steps, cfg_scale, seed, strength, width, height, provider):
334
- """
335
- Builds the appropriate payload for the selected provider
336
- """
337
- # Set a random seed if -1 is provided
338
- actual_seed = seed if seed != -1 else random.randint(1, 1000000000)
339
-
340
- # Provider-specific payloads
341
- if provider == "fal-ai":
342
- return {
343
- "model": "fal-stable-diffusion-xl",
344
- "prompt": prompt,
345
- "negative_prompt": is_negative,
346
- "num_inference_steps": steps,
347
- "guidance_scale": cfg_scale,
348
- "seed": actual_seed,
349
- "width": width,
350
- "height": height
351
- }
352
- elif provider == "replicate":
353
- return {
354
- "version": "39ed52f2a78e934b3ba6e2a89f5b1c712de7dfea535525255b1aa35c5565e08b",
355
- "input": {
356
- "prompt": prompt,
357
- "negative_prompt": is_negative,
358
- "num_inference_steps": steps,
359
- "guidance_scale": cfg_scale,
360
- "seed": actual_seed,
361
- "width": width,
362
- "height": height
363
- }
364
- }
365
- elif provider == "nebius":
366
- return {
367
- "model": "yandex/imagen",
368
- "messages": [
369
- {
370
- "role": "user",
371
- "content": prompt
372
- }
373
- ],
374
- "generationOptions": {
375
- "size": f"{width}x{height}",
376
- "negativePrompt": is_negative,
377
- "seed": actual_seed,
378
- "cfgScale": cfg_scale,
379
- "steps": steps
380
- }
381
- }
382
- elif provider == "together":
383
- return {
384
- "model": "stabilityai/stable-diffusion-xl-base-1.0",
385
- "prompt": prompt,
386
- "negative_prompt": is_negative,
387
- "steps": steps,
388
- "cfg_scale": cfg_scale,
389
- "seed": actual_seed,
390
- "width": width,
391
- "height": height
392
- }
393
- else: # Default for HF Inference
394
- return {
395
- "inputs": prompt,
396
- "is_negative": is_negative,
397
- "steps": steps,
398
- "cfg_scale": cfg_scale,
399
- "seed": actual_seed,
400
- "strength": strength,
401
- "parameters": {
402
- "width": width,
403
- "height": height
404
- }
405
- }
406
-
407
- def make_provider_request(api_url, headers, payload, provider, timeout):
408
- """
409
- Makes the appropriate request for the selected provider
410
- """
411
- if provider == "replicate":
412
- # Replicate uses a two-step process: create prediction and get results
413
- create_response = requests.post(api_url, headers=headers, json=payload, timeout=timeout)
414
- if create_response.status_code != 201:
415
- return create_response
416
-
417
- # Get the prediction ID
418
- prediction = create_response.json()
419
- get_url = f"{api_url}/{prediction['id']}"
420
-
421
- # Poll until the prediction is complete
422
- while True:
423
- response = requests.get(get_url, headers=headers, timeout=timeout)
424
- if response.json()["status"] == "succeeded":
425
- # Create a mock response object to match the expected interface
426
- class MockResponse:
427
- def __init__(self, content, status_code):
428
- self.content = content
429
- self.status_code = status_code
430
-
431
- # Get the image URL from the prediction
432
- image_url = response.json()["output"][0]
433
- image_response = requests.get(image_url, timeout=timeout)
434
-
435
- # Return a mock response with the image content
436
- return MockResponse(image_response.content, 200)
437
-
438
- # Standard request for other providers
439
- return requests.post(api_url, headers=headers, json=payload, timeout=timeout)
440
-
441
- def process_provider_response(response, provider):
442
- """
443
- Processes the response based on the provider
444
- """
445
- if provider == "fal-ai":
446
- # Fal AI returns a JSON with an image URL
447
- result = response.json()
448
- image_url = result["images"][0]["url"]
449
- image_response = requests.get(image_url, timeout=timeout)
450
- image = Image.open(io.BytesIO(image_response.content))
451
- return image
452
- elif provider == "nebius":
453
- # Nebius returns a JSON with Base64 encoded image
454
- result = response.json()
455
- image_data = result["result"]["images"][0]
456
- image = Image.open(io.BytesIO(base64.b64decode(image_data)))
457
- return image
458
- elif provider == "together":
459
- # Together.ai returns a JSON with Base64 encoded image
460
- result = response.json()
461
- image_data = result["images"][0]["base64"]
462
- image = Image.open(io.BytesIO(base64.b64decode(image_data)))
463
- return image
464
- else:
465
- # Default for HF Inference and Replicate (which uses a MockResponse)
466
  image_bytes = response.content
467
  image = Image.open(io.BytesIO(image_bytes))
 
468
  return image
469
-
470
- def handle_error_response(response):
471
- """
472
- Handles error responses from the API
473
- """
474
- if response.status_code == 400:
475
- raise gr.Error(f"{response.status_code}: Bad Request - There might be an issue with the input parameters.")
476
- elif response.status_code == 401:
477
- raise gr.Error(f"{response.status_code}: Unauthorized - Please check your API token.")
478
- elif response.status_code == 403:
479
- raise gr.Error(f"{response.status_code}: Forbidden - You do not have permission to access this model.")
480
- elif response.status_code == 404:
481
- raise gr.Error(f"{response.status_code}: Not Found - The requested model could not be found.")
482
- elif response.status_code == 503:
483
- raise gr.Error(f"{response.status_code}: The model is being loaded. Please try again later.")
484
- else:
485
- raise gr.Error(f"{response.status_code}: An unexpected error occurred.")
486
 
487
  # Custom CSS to hide the footer in the interface
488
  css = """
@@ -501,24 +398,9 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme_5') as dalle:
501
  with gr.Row():
502
  # Textbox for user to input the prompt
503
  text_prompt = gr.Textbox(label="Prompt", placeholder="Enter a prompt here", lines=3, elem_id="prompt-text-input")
504
- with gr.Row():
505
- # Provider selection (new component)
506
- providers_list = [
507
- "hf-inference", # Default Hugging Face Inference
508
- "fal-ai", # Fal AI
509
- "nebius", # Nebius
510
- "replicate", # Replicate
511
- "together", # Together AI
512
- ]
513
- provider_radio = gr.Radio(
514
- choices=providers_list,
515
- value="hf-inference",
516
- label="Inference Provider",
517
- info="Select the image generation provider"
518
- )
519
  with gr.Row():
520
  # Textbox for custom LoRA input
521
- custom_lora = gr.Textbox(label="Custom LoRA", info="LoRA Hugging Face path (optional, works with HF Inference only)", placeholder="multimodalart/vintage-ads-flux")
522
  with gr.Row():
523
  # Accordion for selecting the model
524
  with gr.Accordion("Featured Models", open=False):
@@ -633,7 +515,7 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme_5') as dalle:
633
  )
634
 
635
  # Radio buttons to select the desired model
636
- model = gr.Radio(label="Select a model below", value="FLUX.1 [Schnell]", choices=models_list, interactive=True, elem_id="model-radio", info="Note: Some models may only be available with specific providers")
637
 
638
  # Filtering models based on search input
639
  def filter_models(search_term):
@@ -726,43 +608,6 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme_5') as dalle:
726
  """
727
  )
728
 
729
- # Accordion providing information about providers
730
- with gr.Accordion("Provider Information", open=False):
731
- gr.Markdown(
732
- """
733
- ## Inference Providers
734
-
735
- ### HF Inference
736
- Hugging Face's Inference API provides access to a wide range of models hosted on the Hugging Face Hub.
737
- - Supports all models listed in "Featured Models"
738
- - Custom LoRA support
739
- - Free tier available with API key
740
-
741
- ### Fal AI
742
- Fal AI offers high-speed inference for image generation models.
743
- - Optimized for speed
744
- - Limited model selection compared to HF
745
- - Requires Fal AI API key
746
-
747
- ### Nebius
748
- Nebius Cloud provides image generation capabilities.
749
- - Good performance for certain model types
750
- - Requires Nebius API key
751
-
752
- ### Replicate
753
- Replicate hosts many popular image generation models.
754
- - Wide variety of fine-tuned models
755
- - Simple API
756
- - Requires Replicate API token
757
-
758
- ### Together AI
759
- Together AI offers high-performance model hosting.
760
- - Optimized for speed and quality
761
- - Good selection of models
762
- - Requires Together API key
763
- """
764
- )
765
-
766
  # Accordion providing an overview of advanced settings
767
  with gr.Accordion("Advanced Settings Overview", open=False):
768
  gr.Markdown(
@@ -799,51 +644,8 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme_5') as dalle:
799
  with gr.Row():
800
  image_output = gr.Image(type="pil", label="Image Output", elem_id="gallery")
801
 
802
- # Set up button click event to call the query function with the provider parameter
803
- text_button.click(
804
- query,
805
- inputs=[
806
- text_prompt,
807
- model,
808
- custom_lora,
809
- negative_prompt,
810
- steps,
811
- cfg,
812
- method,
813
- seed,
814
- strength,
815
- width,
816
- height,
817
- provider_radio # Added provider parameter
818
- ],
819
- outputs=image_output
820
- )
821
-
822
- # Function to update UI based on provider selection
823
- def update_provider_ui(provider):
824
- if provider == "hf-inference":
825
- return [
826
- gr.update(visible=True), # custom_lora
827
- gr.update(visible=True), # models accordion
828
- "Select a model or provide a custom LoRA"
829
- ]
830
- else:
831
- return [
832
- gr.update(visible=False), # custom_lora
833
- gr.update(visible=False), # models accordion
834
- f"Using {provider} provider - model selection handled by the provider"
835
- ]
836
-
837
- # Update UI when provider changes
838
- provider_radio.change(
839
- update_provider_ui,
840
- inputs=[provider_radio],
841
- outputs=[
842
- custom_lora,
843
- gr.Accordion("Featured Models"),
844
- gr.Textbox(label="Provider Status")
845
- ]
846
- )
847
 
848
  print("Launching Gradio interface...") # Debug log
849
  # Launch the Gradio interface without showing the API or sharing externally
 
9
 
10
  # Project by Nymbo
11
 
12
+ # Base API URL for Hugging Face inference
13
+ API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
14
  # Retrieve the API token from environment variables
15
  API_TOKEN = os.getenv("HF_READ_TOKEN")
16
+ headers = {"Authorization": f"Bearer {API_TOKEN}"}
 
 
 
 
 
 
 
17
  # Timeout for requests
18
  timeout = 100
19
 
20
+ def query(prompt, model, custom_lora, is_negative=False, steps=35, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, width=1024, height=1024):
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  # Debug log to indicate function start
22
  print("Starting query function...")
23
  # Print the parameters for debugging purposes
24
  print(f"Prompt: {prompt}")
25
  print(f"Model: {model}")
26
  print(f"Custom LoRA: {custom_lora}")
 
27
  print(f"Parameters - Steps: {steps}, CFG Scale: {cfg_scale}, Seed: {seed}, Strength: {strength}, Width: {width}, Height: {height}")
28
 
29
  # Check if the prompt is empty or None
30
  if prompt == "" or prompt is None:
31
+ print("Prompt is empty or None. Exiting query function.") # Debug log
32
  return None
33
 
34
  # Generate a unique key for tracking the generation process
35
  key = random.randint(0, 999)
36
+ print(f"Generated key: {key}") # Debug log
37
 
38
  # Randomly select an API token from available options to distribute the load
39
+ API_TOKEN = random.choice([os.getenv("HF_READ_TOKEN"), os.getenv("HF_READ_TOKEN_2"), os.getenv("HF_READ_TOKEN_3"), os.getenv("HF_READ_TOKEN_4"), os.getenv("HF_READ_TOKEN_5")])
40
+ headers = {"Authorization": f"Bearer {API_TOKEN}"}
41
+ print(f"Selected API token: {API_TOKEN}") # Debug log
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
 
43
  # Enhance the prompt with additional details for better quality
44
+ prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
45
+ print(f'Generation {key}: {prompt}') # Debug log
 
 
 
 
 
 
46
 
47
+ # Set the API URL based on the selected model or custom LoRA
48
+ if custom_lora.strip() != "":
49
+ API_URL = f"https://api-inference.huggingface.co/models/{custom_lora.strip()}"
50
+ else:
51
+ if model == 'Stable Diffusion XL':
52
+ API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0"
53
+ if model == 'FLUX.1 [Dev]':
54
+ API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
55
+ if model == 'FLUX.1 [Schnell]':
56
+ API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
57
+ if model == 'HiDream-I1-Full':
58
+ API_URL = "https://api-inference.huggingface.co/models/HiDream-ai/HiDream-I1-Full"
59
+ if model == 'HiDream-I1-Dev':
60
+ API_URL = "https://api-inference.huggingface.co/models/HiDream-ai/HiDream-I1-Dev"
61
+ if model == 'HiDream-I1-Fast':
62
+ API_URL = "https://api-inference.huggingface.co/models/HiDream-ai/HiDream-I1-Fast"
63
+ if model == 'Animagine 4.0':
64
+ API_URL = "https://api-inference.huggingface.co/models/cagliostrolab/animagine-xl-4.0"
65
+ prompt = f"masterpiece, high score, great score, absurdres, {prompt}"
66
+ if model == 'Flux Icon Kit':
67
+ API_URL = "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Icon-Kit-LoRA"
68
+ prompt = f"Icon Kit, {prompt}"
69
+ if model == 'Pixel Background':
70
+ API_URL = "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Pixel-Background-LoRA"
71
+ prompt = f"Pixel Background, {prompt}"
72
+ if model == 'Meme XD':
73
+ API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Flux-Meme-Xd-LoRA"
74
+ prompt = f"meme, {prompt}"
75
+ if model == 'Chill Guy':
76
+ API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Flux-Chill-Guy-Zone"
77
+ prompt = f"chill guy, {prompt}"
78
+ if model == 'Pepe':
79
+ API_URL = "https://api-inference.huggingface.co/models/openfree/pepe"
80
+ prompt = f"pepe, {prompt}"
81
+ if model == 'NSFWmodel':
82
+ API_URL = "https://api-inference.huggingface.co/models/lexa862/NSFWmodel"
83
+ prompt = f"nude, {prompt}"
84
+ if model == 'Claude Art':
85
+ API_URL = "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Claude-Art"
86
+ prompt = f"claude art, {prompt}"
87
+ if model == 'Open Genmoji':
88
+ API_URL = "https://api-inference.huggingface.co/models/EvanZhouDev/open-genmoji"
89
+ prompt = f"emoji, {prompt}"
90
+ if model == 'EBook Creative Cover':
91
+ API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/EBook-Creative-Cover-Flux-LoRA"
92
+ prompt = f"EBook Cover, {prompt}"
93
+ if model == 'Flux Logo Design 2':
94
+ API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Logo-Design-Flux-LoRA"
95
+ prompt = f"Logo Design, {prompt}"
96
+ if model == 'Isometric 3D':
97
+ API_URL = "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Isometric-3D-LoRA"
98
+ prompt = f"Isometric 3D, {prompt}"
99
+ if model == 'Flux Condensation':
100
+ API_URL = "https://api-inference.huggingface.co/models/fofr/flux-condensation"
101
+ prompt = f"CONDENSATION, {prompt}"
102
+ if model == 'Flux Handwriting':
103
+ API_URL = "https://api-inference.huggingface.co/models/fofr/flux-handwriting"
104
+ prompt = f"HWRIT handwriting, {prompt}"
105
+ if model == 'Shou Xin':
106
+ API_URL = "https://api-inference.huggingface.co/models/Datou1111/shou_xin"
107
+ prompt = f"shou_xin, pencil sketch, {prompt}"
108
+ if model == 'Sketch Smudge':
109
+ API_URL = "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Sketch-Smudge-LoRA"
110
+ prompt = f"Sketch Smudge, {prompt}"
111
+ if model == '80s Cyberpunk':
112
+ API_URL = "https://api-inference.huggingface.co/models/fofr/flux-80s-cyberpunk"
113
+ prompt = f"80s cyberpunk, {prompt}"
114
+ if model == 'Coloring Book Flux':
115
+ API_URL = "https://api-inference.huggingface.co/models/renderartist/coloringbookflux"
116
+ prompt = f"c0l0ringb00k, coloring book, coloring book page, {prompt}"
117
+ if model == 'Flux Miniature LoRA':
118
+ API_URL = "https://api-inference.huggingface.co/models/gokaygokay/Flux-Miniature-LoRA"
119
+ prompt = f"MNTR, miniature drawing, {prompt}"
120
+ if model == 'Sketch Paint':
121
+ API_URL = "https://api-inference.huggingface.co/models/strangerzonehf/Sketch-Paint"
122
+ prompt = f"Sketch paint, {prompt}"
123
+ if model == 'Flux UltraRealism 2.0':
124
+ API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Canopus-LoRA-Flux-UltraRealism-2.0"
125
+ prompt = f"Ultra realistic, {prompt}"
126
+ if model == 'Midjourney Mix':
127
+ API_URL = "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Midjourney-Mix-LoRA"
128
+ prompt = f"midjourney mix, {prompt}"
129
+ if model == 'Midjourney Mix 2':
130
+ API_URL = "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Midjourney-Mix2-LoRA"
131
+ prompt = f"MJ v6, {prompt}"
132
+ if model == 'Flux Logo Design':
133
+ API_URL = "https://api-inference.huggingface.co/models/Shakker-Labs/FLUX.1-dev-LoRA-Logo-Design"
134
+ prompt = f"wablogo, logo, Minimalist, {prompt}"
135
+ if model == 'Flux Uncensored':
136
+ API_URL = "https://api-inference.huggingface.co/models/enhanceaiteam/Flux-uncensored"
137
+ if model == 'Flux Uncensored V2':
138
+ API_URL = "https://api-inference.huggingface.co/models/enhanceaiteam/Flux-Uncensored-V2"
139
+ if model == 'Flux Tarot Cards':
140
+ API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Ton618-Tarot-Cards-Flux-LoRA"
141
+ prompt = f"Tarot card, {prompt}"
142
+ if model == 'Pixel Art Sprites':
143
+ API_URL = "https://api-inference.huggingface.co/models/sWizad/pokemon-trainer-sprites-pixelart-flux"
144
+ prompt = f"a pixel image, {prompt}"
145
+ if model == '3D Sketchfab':
146
+ API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Castor-3D-Sketchfab-Flux-LoRA"
147
+ prompt = f"3D Sketchfab, {prompt}"
148
+ if model == 'Retro Comic Flux':
149
+ API_URL = "https://api-inference.huggingface.co/models/renderartist/retrocomicflux"
150
+ prompt = f"c0m1c, comic book panel, {prompt}"
151
+ if model == 'Caricature':
152
+ API_URL = "https://api-inference.huggingface.co/models/TheAwakenOne/caricature"
153
+ prompt = f"CCTUR3, {prompt}"
154
+ if model == 'Huggieverse':
155
+ API_URL = "https://api-inference.huggingface.co/models/Chunte/flux-lora-Huggieverse"
156
+ prompt = f"HGGRE, {prompt}"
157
+ if model == 'Propaganda Poster':
158
+ API_URL = "https://api-inference.huggingface.co/models/AlekseyCalvin/Propaganda_Poster_Schnell_by_doctor_diffusion"
159
+ prompt = f"propaganda poster, {prompt}"
160
+ if model == 'Flux Game Assets V2':
161
+ API_URL = "https://api-inference.huggingface.co/models/gokaygokay/Flux-Game-Assets-LoRA-v2"
162
+ prompt = f"wbgmsst, white background, {prompt}"
163
+ if model == 'SDXL HS Card Style':
164
+ API_URL = "https://api-inference.huggingface.co/models/Norod78/sdxl-hearthstone-card-style-lora"
165
+ prompt = f"Hearthstone Card, {prompt}"
166
+ if model == 'SLDR FLUX NSFW v2 Studio':
167
+ API_URL = "https://api-inference.huggingface.co/models/xey/sldr_flux_nsfw_v2-studio"
168
+ if model == 'SoftPasty Flux':
169
+ API_URL = "https://api-inference.huggingface.co/models/alvdansen/softpasty-flux-dev"
170
+ prompt = f"araminta_illus illustration style, {prompt}"
171
+ if model == 'Flux Stickers':
172
+ API_URL = "https://api-inference.huggingface.co/models/diabolic6045/Flux_Sticker_Lora"
173
+ prompt = f"5t1cker 5ty1e, {prompt}"
174
+ if model == 'Flux Animex V2':
175
+ API_URL = "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Animex-v2-LoRA"
176
+ prompt = f"Animex, {prompt}"
177
+ if model == 'Flux Animeo V1':
178
+ API_URL = "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Animeo-v1-LoRA"
179
+ prompt = f"Animeo, {prompt}"
180
+ if model == 'Movie Board':
181
+ API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Flux.1-Dev-Movie-Boards-LoRA"
182
+ prompt = f"movieboard, {prompt}"
183
+ if model == 'Purple Dreamy':
184
+ API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Purple-Dreamy-Flux-LoRA"
185
+ prompt = f"Purple Dreamy, {prompt}"
186
+ if model == 'PS1 Style Flux':
187
+ API_URL = "https://api-inference.huggingface.co/models/veryVANYA/ps1-style-flux"
188
+ prompt = f"ps1 game screenshot, {prompt}"
189
+ if model == 'Softserve Anime':
190
+ API_URL = "https://api-inference.huggingface.co/models/alvdansen/softserve_anime"
191
+ prompt = f"sftsrv style illustration, {prompt}"
192
+ if model == 'Flux Tarot v1':
193
+ API_URL = "https://api-inference.huggingface.co/models/multimodalart/flux-tarot-v1"
194
+ prompt = f"in the style of TOK a trtcrd tarot style, {prompt}"
195
+ if model == 'Half Illustration':
196
+ API_URL = "https://api-inference.huggingface.co/models/davisbro/half_illustration"
197
+ prompt = f"in the style of TOK, {prompt}"
198
+ if model == 'OpenDalle v1.1':
199
+ API_URL = "https://api-inference.huggingface.co/models/dataautogpt3/OpenDalleV1.1"
200
+ if model == 'Flux Ghibsky Illustration':
201
+ API_URL = "https://api-inference.huggingface.co/models/aleksa-codes/flux-ghibsky-illustration"
202
+ prompt = f"GHIBSKY style, {prompt}"
203
+ if model == 'Flux Koda':
204
+ API_URL = "https://api-inference.huggingface.co/models/alvdansen/flux-koda"
205
+ prompt = f"flmft style, {prompt}"
206
+ if model == 'Soviet Diffusion XL':
207
+ API_URL = "https://api-inference.huggingface.co/models/openskyml/soviet-diffusion-xl"
208
+ prompt = f"soviet poster, {prompt}"
209
+ if model == 'Flux Realism LoRA':
210
+ API_URL = "https://api-inference.huggingface.co/models/XLabs-AI/flux-RealismLora"
211
+ if model == 'Frosting Lane Flux':
212
+ API_URL = "https://api-inference.huggingface.co/models/alvdansen/frosting_lane_flux"
213
+ prompt = f"frstingln illustration, {prompt}"
214
+ if model == 'Phantasma Anime':
215
+ API_URL = "https://api-inference.huggingface.co/models/alvdansen/phantasma-anime"
216
+ if model == 'Boreal':
217
+ API_URL = "https://api-inference.huggingface.co/models/kudzueye/Boreal"
218
+ prompt = f"photo, {prompt}"
219
+ if model == 'How2Draw':
220
+ API_URL = "https://api-inference.huggingface.co/models/glif/how2draw"
221
+ prompt = f"How2Draw, {prompt}"
222
+ if model == 'Flux AestheticAnime':
223
+ API_URL = "https://api-inference.huggingface.co/models/dataautogpt3/FLUX-AestheticAnime"
224
+ if model == 'Fashion Hut Modeling LoRA':
225
+ API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Fashion-Hut-Modeling-LoRA"
226
+ prompt = f"Modeling of, {prompt}"
227
+ if model == 'Flux SyntheticAnime':
228
+ API_URL = "https://api-inference.huggingface.co/models/dataautogpt3/FLUX-SyntheticAnime"
229
+ prompt = f"1980s anime screengrab, VHS quality, syntheticanime, {prompt}"
230
+ if model == 'Flux Midjourney Anime':
231
+ API_URL = "https://api-inference.huggingface.co/models/brushpenbob/flux-midjourney-anime"
232
+ prompt = f"egmid, {prompt}"
233
+ if model == 'Coloring Book Generator':
234
+ API_URL = "https://api-inference.huggingface.co/models/robert123231/coloringbookgenerator"
235
+ if model == 'Collage Flux':
236
+ API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Castor-Collage-Dim-Flux-LoRA"
237
+ prompt = f"collage, {prompt}"
238
+ if model == 'Flux Product Ad Backdrop':
239
+ API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Flux-Product-Ad-Backdrop"
240
+ prompt = f"Product Ad, {prompt}"
241
+ if model == 'Product Design':
242
+ API_URL = "https://api-inference.huggingface.co/models/multimodalart/product-design"
243
+ prompt = f"product designed by prdsgn, {prompt}"
244
+ if model == '90s Anime Art':
245
+ API_URL = "https://api-inference.huggingface.co/models/glif/90s-anime-art"
246
+ if model == 'Brain Melt Acid Art':
247
+ API_URL = "https://api-inference.huggingface.co/models/glif/Brain-Melt-Acid-Art"
248
+ prompt = f"maximalism, in an acid surrealism style, {prompt}"
249
+ if model == 'Lustly Flux Uncensored v1':
250
+ API_URL = "https://api-inference.huggingface.co/models/lustlyai/Flux_Lustly.ai_Uncensored_nsfw_v1"
251
+ if model == 'NSFW Master Flux':
252
+ API_URL = "https://api-inference.huggingface.co/models/Keltezaa/NSFW_MASTER_FLUX"
253
+ prompt = f"NSFW, {prompt}"
254
+ if model == 'Flux Outfit Generator':
255
+ API_URL = "https://api-inference.huggingface.co/models/tryonlabs/FLUX.1-dev-LoRA-Outfit-Generator"
256
+ if model == 'Midjourney':
257
+ API_URL = "https://api-inference.huggingface.co/models/Jovie/Midjourney"
258
+ if model == 'DreamPhotoGASM':
259
+ API_URL = "https://api-inference.huggingface.co/models/Yntec/DreamPhotoGASM"
260
+ if model == 'Flux Super Realism LoRA':
261
+ API_URL = "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Super-Realism-LoRA"
262
+ if model == 'Stable Diffusion 2-1':
263
+ API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-2-1-base"
264
+ if model == 'Stable Diffusion 3.5 Large':
265
+ API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-3.5-large"
266
+ if model == 'Stable Diffusion 3.5 Large Turbo':
267
+ API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-3.5-large-turbo"
268
+ if model == 'Stable Diffusion 3 Medium':
269
+ API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-3-medium-diffusers"
270
+ prompt = f"A, {prompt}"
271
+ if model == 'Duchaiten Real3D NSFW XL':
272
+ API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/duchaiten-real3d-nsfw-xl"
273
+ if model == 'Pixel Art XL':
274
+ API_URL = "https://api-inference.huggingface.co/models/nerijs/pixel-art-xl"
275
+ prompt = f"pixel art, {prompt}"
276
+ if model == 'Character Design':
277
+ API_URL = "https://api-inference.huggingface.co/models/KappaNeuro/character-design"
278
+ prompt = f"Character Design, {prompt}"
279
+ if model == 'Sketched Out Manga':
280
+ API_URL = "https://api-inference.huggingface.co/models/alvdansen/sketchedoutmanga"
281
+ prompt = f"daiton, {prompt}"
282
+ if model == 'Archfey Anime':
283
+ API_URL = "https://api-inference.huggingface.co/models/alvdansen/archfey_anime"
284
+ if model == 'Lofi Cuties':
285
+ API_URL = "https://api-inference.huggingface.co/models/alvdansen/lofi-cuties"
286
+ if model == 'YiffyMix':
287
+ API_URL = "https://api-inference.huggingface.co/models/Yntec/YiffyMix"
288
+ if model == 'Analog Madness Realistic v7':
289
+ API_URL = "https://api-inference.huggingface.co/models/digiplay/AnalogMadness-realistic-model-v7"
290
+ if model == 'Selfie Photography':
291
+ API_URL = "https://api-inference.huggingface.co/models/artificialguybr/selfiephotographyredmond-selfie-photography-lora-for-sdxl"
292
+ prompt = f"instagram model, discord profile picture, {prompt}"
293
+ if model == 'Filmgrain':
294
+ API_URL = "https://api-inference.huggingface.co/models/artificialguybr/filmgrain-redmond-filmgrain-lora-for-sdxl"
295
+ prompt = f"Film Grain, FilmGrainAF, {prompt}"
296
+ if model == 'Leonardo AI Style Illustration':
297
+ API_URL = "https://api-inference.huggingface.co/models/goofyai/Leonardo_Ai_Style_Illustration"
298
+ prompt = f"leonardo style, illustration, vector art, {prompt}"
299
+ if model == 'Cyborg Style XL':
300
+ API_URL = "https://api-inference.huggingface.co/models/goofyai/cyborg_style_xl"
301
+ prompt = f"cyborg style, {prompt}"
302
+ if model == 'Little Tinies':
303
+ API_URL = "https://api-inference.huggingface.co/models/alvdansen/littletinies"
304
+ if model == 'NSFW XL':
305
+ API_URL = "https://api-inference.huggingface.co/models/Dremmar/nsfw-xl"
306
+ if model == 'Analog Redmond':
307
+ API_URL = "https://api-inference.huggingface.co/models/artificialguybr/analogredmond"
308
+ prompt = f"timeless style, {prompt}"
309
+ if model == 'Pixel Art Redmond':
310
+ API_URL = "https://api-inference.huggingface.co/models/artificialguybr/PixelArtRedmond"
311
+ prompt = f"Pixel Art, {prompt}"
312
+ if model == 'Ascii Art':
313
+ API_URL = "https://api-inference.huggingface.co/models/CiroN2022/ascii-art"
314
+ prompt = f"ascii art, {prompt}"
315
+ if model == 'Analog':
316
+ API_URL = "https://api-inference.huggingface.co/models/Yntec/Analog"
317
+ if model == 'Maple Syrup':
318
+ API_URL = "https://api-inference.huggingface.co/models/Yntec/MapleSyrup"
319
+ if model == 'Perfect Lewd Fantasy':
320
+ API_URL = "https://api-inference.huggingface.co/models/digiplay/perfectLewdFantasy_v1.01"
321
+ if model == 'AbsoluteReality 1.8.1':
322
+ API_URL = "https://api-inference.huggingface.co/models/digiplay/AbsoluteReality_v1.8.1"
323
+ if model == 'Disney':
324
+ API_URL = "https://api-inference.huggingface.co/models/goofyai/disney_style_xl"
325
+ prompt = f"Disney style, {prompt}"
326
+ if model == 'Redmond SDXL':
327
+ API_URL = "https://api-inference.huggingface.co/models/artificialguybr/LogoRedmond-LogoLoraForSDXL-V2"
328
+ if model == 'epiCPhotoGasm':
329
+ API_URL = "https://api-inference.huggingface.co/models/Yntec/epiCPhotoGasm"
330
+ print(f"API URL set to: {API_URL}") # Debug log
331
+
332
+ # Define the payload for the request
333
+ payload = {
334
+ "inputs": prompt,
335
+ "is_negative": is_negative, # Whether to use a negative prompt
336
+ "steps": steps, # Number of sampling steps
337
+ "cfg_scale": cfg_scale, # Scale for controlling adherence to prompt
338
+ "seed": seed if seed != -1 else random.randint(1, 1000000000), # Random seed for reproducibility
339
+ "strength": strength, # How strongly the model should transform the image
340
+ "parameters": {
341
+ "width": width, # Width of the generated image
342
+ "height": height # Height of the generated image
343
+ }
344
+ }
345
+ print(f"Payload: {json.dumps(payload, indent=2)}") # Debug log
346
 
347
  # Make a request to the API to generate the image
348
  try:
349
+ response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout)
350
+ print(f"Response status code: {response.status_code}") # Debug log
351
  except requests.exceptions.RequestException as e:
352
+ # Log any request exceptions and raise an error for the user
353
+ print(f"Request failed: {e}") # Debug log
354
  raise gr.Error(f"Request failed: {e}")
355
 
356
  # Check if the response status is not successful
357
  if response.status_code != 200:
358
+ print(f"Error: Failed to retrieve image. Response status: {response.status_code}") # Debug log
359
+ print(f"Response content: {response.text}") # Debug log
360
+ if response.status_code == 400:
361
+ raise gr.Error(f"{response.status_code}: Bad Request - There might be an issue with the input parameters.")
362
+ elif response.status_code == 401:
363
+ raise gr.Error(f"{response.status_code}: Unauthorized - Please check your API token.")
364
+ elif response.status_code == 403:
365
+ raise gr.Error(f"{response.status_code}: Forbidden - You do not have permission to access this model.")
366
+ elif response.status_code == 404:
367
+ raise gr.Error(f"{response.status_code}: Not Found - The requested model could not be found.")
368
+ elif response.status_code == 503:
369
+ raise gr.Error(f"{response.status_code}: The model is being loaded. Please try again later.")
370
+ else:
371
+ raise gr.Error(f"{response.status_code}: An unexpected error occurred.")
372
 
373
  try:
374
+ # Attempt to read the image from the response content
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
375
  image_bytes = response.content
376
  image = Image.open(io.BytesIO(image_bytes))
377
+ print(f'Generation {key} completed! ({prompt})') # Debug log
378
  return image
379
+ except Exception as e:
380
+ # Handle any errors that occur when opening the image
381
+ print(f"Error while trying to open image: {e}") # Debug log
382
+ return None
 
 
 
 
 
 
 
 
 
 
 
 
 
383
 
384
  # Custom CSS to hide the footer in the interface
385
  css = """
 
398
  with gr.Row():
399
  # Textbox for user to input the prompt
400
  text_prompt = gr.Textbox(label="Prompt", placeholder="Enter a prompt here", lines=3, elem_id="prompt-text-input")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
401
  with gr.Row():
402
  # Textbox for custom LoRA input
403
+ custom_lora = gr.Textbox(label="Custom LoRA", info="LoRA Hugging Face path (optional)", placeholder="multimodalart/vintage-ads-flux")
404
  with gr.Row():
405
  # Accordion for selecting the model
406
  with gr.Accordion("Featured Models", open=False):
 
515
  )
516
 
517
  # Radio buttons to select the desired model
518
+ model = gr.Radio(label="Select a model below", value="FLUX.1 [Schnell]", choices=models_list, interactive=True, elem_id="model-radio")
519
 
520
  # Filtering models based on search input
521
  def filter_models(search_term):
 
608
  """
609
  )
610
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
611
  # Accordion providing an overview of advanced settings
612
  with gr.Accordion("Advanced Settings Overview", open=False):
613
  gr.Markdown(
 
644
  with gr.Row():
645
  image_output = gr.Image(type="pil", label="Image Output", elem_id="gallery")
646
 
647
+ # Set up button click event to call the query function
648
+ text_button.click(query, inputs=[text_prompt, model, custom_lora, negative_prompt, steps, cfg, method, seed, strength, width, height], outputs=image_output)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
649
 
650
  print("Launching Gradio interface...") # Debug log
651
  # Launch the Gradio interface without showing the API or sharing externally