Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,162 +1,127 @@
|
|
1 |
import gradio as gr
|
2 |
-
from models import models
|
3 |
from PIL import Image
|
4 |
import requests
|
5 |
-
import
|
6 |
-
import io
|
7 |
import base64
|
8 |
-
from transforms import RGBTransform
|
9 |
-
import concurrent.futures
|
10 |
-
import time
|
11 |
|
12 |
-
#
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
for
|
22 |
try:
|
23 |
-
# Attempt to load the model
|
24 |
-
|
25 |
-
|
26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
except Exception as e:
|
28 |
-
|
29 |
-
|
30 |
-
print(f"Failed to load {model}: {e}")
|
31 |
-
return loaded_models
|
32 |
|
33 |
-
|
34 |
-
"""
|
35 |
-
Generates a single image from a specific model with optional color tinting
|
36 |
-
Returns tuple of (image, error_message, model_name)
|
37 |
-
"""
|
38 |
-
try:
|
39 |
-
# Generate image
|
40 |
-
out_img = model(prompt)
|
41 |
-
|
42 |
-
# Process the image
|
43 |
-
if isinstance(out_img, str): # If URL is returned
|
44 |
-
r = requests.get(f'https://omnibus-top-20.hf.space/file={out_img}', stream=True)
|
45 |
-
if r.status_code != 200:
|
46 |
-
return None, f"HTTP Error: {r.status_code}", model_name
|
47 |
-
|
48 |
-
img = Image.open(io.BytesIO(r.content)).convert('RGB')
|
49 |
-
else:
|
50 |
-
img = Image.open(out_img).convert('RGB')
|
51 |
-
|
52 |
-
# Apply color tinting if specified
|
53 |
-
if color is not None:
|
54 |
-
h = color.lstrip('#')
|
55 |
-
rgb_color = tuple(int(h[i:i+2], 16) for i in (0, 2, 4))
|
56 |
-
img = RGBTransform().mix_with(rgb_color, factor=float(tint_strength)).applied_to(img)
|
57 |
-
|
58 |
-
return img, None, model_name
|
59 |
-
|
60 |
-
except Exception as e:
|
61 |
-
return None, str(e), model_name
|
62 |
|
63 |
-
def
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
future_to_model = {
|
76 |
-
executor.submit(
|
77 |
-
generate_single_image,
|
78 |
-
model_name,
|
79 |
-
model,
|
80 |
-
prompt,
|
81 |
-
color,
|
82 |
-
tint_strength
|
83 |
-
): model_name
|
84 |
-
for model_name, model in zip(models, loaded_models)
|
85 |
-
}
|
86 |
-
|
87 |
-
for future in concurrent.futures.as_completed(future_to_model):
|
88 |
-
img, error, model_name = future.result()
|
89 |
-
if error:
|
90 |
-
errors.append(f"{model_name}: {error}")
|
91 |
-
model_status[model_name]['status'] = 'failed'
|
92 |
-
model_status[model_name]['error'] = error
|
93 |
-
if img:
|
94 |
-
results.append((img, model_name))
|
95 |
-
|
96 |
-
# Generate HTML report
|
97 |
-
html_report = "<div class='results-grid'>"
|
98 |
-
for model in models:
|
99 |
-
status = model_status[model]
|
100 |
-
status_color = {
|
101 |
-
'available': 'green',
|
102 |
-
'unavailable': 'red',
|
103 |
-
'failed': 'orange'
|
104 |
-
}.get(status['status'], 'gray')
|
105 |
-
|
106 |
-
html_report += f"""
|
107 |
-
<div class='model-status'>
|
108 |
-
<h3>{model}</h3>
|
109 |
-
<p style='color: {status_color}'>Status: {status['status']}</p>
|
110 |
-
{f"<p class='error'>Error: {status['error']}</p>" if status['error'] else ""}
|
111 |
</div>
|
112 |
"""
|
113 |
-
html_report += "</div>"
|
114 |
|
115 |
-
|
116 |
-
|
117 |
-
# Gradio interface
|
118 |
-
css = """
|
119 |
-
.results-grid {
|
120 |
-
display: grid;
|
121 |
-
grid-template-columns: repeat(auto-fill, minmax(250px, 1fr));
|
122 |
-
gap: 1rem;
|
123 |
-
padding: 1rem;
|
124 |
-
}
|
125 |
-
.model-status {
|
126 |
-
border: 1px solid #ddd;
|
127 |
-
padding: 1rem;
|
128 |
-
border-radius: 4px;
|
129 |
-
}
|
130 |
-
.error {
|
131 |
-
color: red;
|
132 |
-
font-size: 0.9em;
|
133 |
-
word-break: break-word;
|
134 |
-
}
|
135 |
-
"""
|
136 |
|
137 |
-
|
|
|
138 |
with gr.Row():
|
139 |
with gr.Column():
|
140 |
-
|
141 |
-
|
142 |
with gr.Column():
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
gallery = gr.Gallery()
|
148 |
-
|
149 |
-
def process_and_display(prompt, color, tint_strength):
|
150 |
-
results, html_report = run_all_models(prompt, color, tint_strength)
|
151 |
-
return (
|
152 |
-
[img for img, _ in results],
|
153 |
-
html_report
|
154 |
-
)
|
155 |
-
|
156 |
-
btn.click(
|
157 |
-
process_and_display,
|
158 |
-
inputs=[inp, col, tint],
|
159 |
-
outputs=[gallery, status_html]
|
160 |
-
)
|
161 |
|
162 |
app.launch()
|
|
|
1 |
import gradio as gr
|
|
|
2 |
from PIL import Image
|
3 |
import requests
|
4 |
+
import io
|
|
|
5 |
import base64
|
|
|
|
|
|
|
6 |
|
7 |
+
# List of models to process
|
8 |
+
models = [
|
9 |
+
"dreamlike-art/dreamlike-photoreal-2.0",
|
10 |
+
"stabilityai/stable-diffusion-xl-base-1.0",
|
11 |
+
"black-forest-labs/FLUX.1-dev",
|
12 |
+
"black-forest-labs/FLUX.1-schnell",
|
13 |
+
"veryVANYA/ps1-style-flux",
|
14 |
+
"alvdansen/softserve_anime",
|
15 |
+
"multimodalart/flux-tarot-v1",
|
16 |
+
"davisbro/half_illustration",
|
17 |
+
"dataautogpt3/OpenDalleV1.1",
|
18 |
+
"aleksa-codes/flux-ghibsky-illustration",
|
19 |
+
"alvdansen/flux-koda",
|
20 |
+
"openskyml/soviet-diffusion-xl",
|
21 |
+
"XLabs-AI/flux-RealismLora",
|
22 |
+
"alvdansen/frosting_lane_flux",
|
23 |
+
"alvdansen/phantasma-anime",
|
24 |
+
"kudzueye/Boreal",
|
25 |
+
"glif/how2draw",
|
26 |
+
"dataautogpt3/FLUX-AestheticAnime",
|
27 |
+
"prithivMLmods/Fashion-Hut-Modeling-LoRA",
|
28 |
+
"dataautogpt3/FLUX-SyntheticAnime",
|
29 |
+
"brushpenbob/flux-midjourney-anime",
|
30 |
+
"robert123231/coloringbookgenerator",
|
31 |
+
"prithivMLmods/Castor-Collage-Dim-Flux-LoRA",
|
32 |
+
"prithivMLmods/Flux-Product-Ad-Backdrop",
|
33 |
+
"multimodalart/product-design",
|
34 |
+
"glif/90s-anime-art",
|
35 |
+
"glif/Brain-Melt-Acid-Art",
|
36 |
+
"lustlyai/Flux_Lustly.ai_Uncensored_nsfw_v1",
|
37 |
+
"Keltezaa/NSFW_MASTER_FLUX",
|
38 |
+
"tryonlabs/FLUX.1-dev-LoRA-Outfit-Generator",
|
39 |
+
"Jovie/Midjourney",
|
40 |
+
"Yntec/DreamPhotoGASM",
|
41 |
+
"strangerzonehf/Flux-Super-Realism-LoRA",
|
42 |
+
"stabilityai/stable-diffusion-2-1-base",
|
43 |
+
"stabilityai/stable-diffusion-3.5-large",
|
44 |
+
"stabilityai/stable-diffusion-3.5-large-turbo",
|
45 |
+
"stabilityai/stable-diffusion-3-medium-diffusers",
|
46 |
+
"stablediffusionapi/duchaiten-real3d-nsfw-xl",
|
47 |
+
"nerijs/pixel-art-xl",
|
48 |
+
"KappaNeuro/character-design",
|
49 |
+
"alvdansen/sketchedoutmanga",
|
50 |
+
"alvdansen/archfey_anime",
|
51 |
+
"alvdansen/lofi-cuties",
|
52 |
+
"Yntec/YiffyMix",
|
53 |
+
"digiplay/AnalogMadness-realistic-model-v7",
|
54 |
+
"artificialguybr/selfiephotographyredmond-selfie-photography-lora-for-sdxl",
|
55 |
+
"artificialguybr/filmgrain-redmond-filmgrain-lora-for-sdxl",
|
56 |
+
"goofyai/Leonardo_Ai_Style_Illustration",
|
57 |
+
"goofyai/cyborg_style_xl",
|
58 |
+
"alvdansen/littletinies",
|
59 |
+
"Dremmar/nsfw-xl",
|
60 |
+
"artificialguybr/analogredmond",
|
61 |
+
"artificialguybr/PixelArtRedmond",
|
62 |
+
"CiroN2022/ascii-art",
|
63 |
+
"Yntec/Analog",
|
64 |
+
"Yntec/MapleSyrup",
|
65 |
+
"digiplay/perfectLewdFantasy_v1.01",
|
66 |
+
"digiplay/AbsoluteReality_v1.8.1",
|
67 |
+
"goofyai/disney_style_xl",
|
68 |
+
"artificialguybr/LogoRedmond-LogoLoraForSDXL-V2",
|
69 |
+
"Yntec/epiCPhotoGasm",
|
70 |
+
]
|
71 |
|
72 |
+
# Initialize a dictionary to track models and their status
|
73 |
+
model_results = {"success": {}, "failed": []}
|
74 |
+
|
75 |
+
def generate_images(prompt):
|
76 |
+
output_images = []
|
77 |
+
failed_models = []
|
78 |
+
for model_name in models:
|
79 |
try:
|
80 |
+
# Attempt to load and generate an image with the model
|
81 |
+
model = gr.Interface.load(f"models/{model_name}")
|
82 |
+
img_path = model.predict(prompt)
|
83 |
+
|
84 |
+
# Fetch image
|
85 |
+
response = requests.get(img_path, stream=True)
|
86 |
+
if response.status_code == 200:
|
87 |
+
img = Image.open(io.BytesIO(response.content))
|
88 |
+
output_images.append((model_name, img))
|
89 |
+
else:
|
90 |
+
failed_models.append(model_name)
|
91 |
except Exception as e:
|
92 |
+
print(f"Error with model {model_name}: {e}")
|
93 |
+
failed_models.append(model_name)
|
|
|
|
|
94 |
|
95 |
+
return output_images, failed_models
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
|
97 |
+
def app_interface(prompt):
|
98 |
+
output_images, failed_models = generate_images(prompt)
|
99 |
+
images_html = ""
|
100 |
+
for model_name, img in output_images:
|
101 |
+
# Convert images for display in Gradio
|
102 |
+
img_buffer = io.BytesIO()
|
103 |
+
img.save(img_buffer, format="PNG")
|
104 |
+
img_str = base64.b64encode(img_buffer.getvalue()).decode()
|
105 |
+
images_html += f"""
|
106 |
+
<div>
|
107 |
+
<strong>{model_name}</strong>
|
108 |
+
<img src='data:image/png;base64,{img_str}' style="max-width: 200px;" />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
</div>
|
110 |
"""
|
|
|
111 |
|
112 |
+
failed_html = "<ul>" + "".join(f"<li>{model}</li>" for model in failed_models) + "</ul>"
|
113 |
+
return images_html, f"<h3>Failed Models:</h3>{failed_html}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
114 |
|
115 |
+
# Define Gradio UI
|
116 |
+
with gr.Blocks() as app:
|
117 |
with gr.Row():
|
118 |
with gr.Column():
|
119 |
+
prompt_input = gr.Textbox(label="Prompt", placeholder="Enter your prompt")
|
120 |
+
generate_button = gr.Button("Generate Images")
|
121 |
with gr.Column():
|
122 |
+
output_html = gr.HTML(label="Generated Images")
|
123 |
+
failed_html = gr.HTML(label="Failed Models")
|
124 |
+
|
125 |
+
generate_button.click(app_interface, inputs=[prompt_input], outputs=[output_html, failed_html])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
126 |
|
127 |
app.launch()
|