Nymbo commited on
Commit
928b6cd
·
verified ·
1 Parent(s): 446eb57

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +108 -143
app.py CHANGED
@@ -1,162 +1,127 @@
1
  import gradio as gr
2
- from models import models
3
  from PIL import Image
4
  import requests
5
- import uuid
6
- import io
7
  import base64
8
- from transforms import RGBTransform
9
- import concurrent.futures
10
- import time
11
 
12
- # Dictionary to track model availability status
13
- model_status = {}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
- def load_models():
16
- """
17
- Attempts to load all models and tracks their availability status
18
- Returns a list of successfully loaded models
19
- """
20
- loaded_models = []
21
- for model in models:
22
  try:
23
- # Attempt to load the model
24
- loaded_model = gr.load(f'models/{model}')
25
- loaded_models.append(loaded_model)
26
- model_status[model] = {'status': 'available', 'error': None}
 
 
 
 
 
 
 
27
  except Exception as e:
28
- # Track failed model loads
29
- model_status[model] = {'status': 'unavailable', 'error': str(e)}
30
- print(f"Failed to load {model}: {e}")
31
- return loaded_models
32
 
33
- def generate_single_image(model_name, model, prompt, color=None, tint_strength=0.3):
34
- """
35
- Generates a single image from a specific model with optional color tinting
36
- Returns tuple of (image, error_message, model_name)
37
- """
38
- try:
39
- # Generate image
40
- out_img = model(prompt)
41
-
42
- # Process the image
43
- if isinstance(out_img, str): # If URL is returned
44
- r = requests.get(f'https://omnibus-top-20.hf.space/file={out_img}', stream=True)
45
- if r.status_code != 200:
46
- return None, f"HTTP Error: {r.status_code}", model_name
47
-
48
- img = Image.open(io.BytesIO(r.content)).convert('RGB')
49
- else:
50
- img = Image.open(out_img).convert('RGB')
51
-
52
- # Apply color tinting if specified
53
- if color is not None:
54
- h = color.lstrip('#')
55
- rgb_color = tuple(int(h[i:i+2], 16) for i in (0, 2, 4))
56
- img = RGBTransform().mix_with(rgb_color, factor=float(tint_strength)).applied_to(img)
57
-
58
- return img, None, model_name
59
-
60
- except Exception as e:
61
- return None, str(e), model_name
62
 
63
- def run_all_models(prompt, color=None, tint_strength=0.3):
64
- """
65
- Generates images from all available models in parallel
66
- """
67
- results = []
68
- errors = []
69
-
70
- # Load models if not already loaded
71
- loaded_models = load_models()
72
-
73
- # Use ThreadPoolExecutor for parallel execution
74
- with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
75
- future_to_model = {
76
- executor.submit(
77
- generate_single_image,
78
- model_name,
79
- model,
80
- prompt,
81
- color,
82
- tint_strength
83
- ): model_name
84
- for model_name, model in zip(models, loaded_models)
85
- }
86
-
87
- for future in concurrent.futures.as_completed(future_to_model):
88
- img, error, model_name = future.result()
89
- if error:
90
- errors.append(f"{model_name}: {error}")
91
- model_status[model_name]['status'] = 'failed'
92
- model_status[model_name]['error'] = error
93
- if img:
94
- results.append((img, model_name))
95
-
96
- # Generate HTML report
97
- html_report = "<div class='results-grid'>"
98
- for model in models:
99
- status = model_status[model]
100
- status_color = {
101
- 'available': 'green',
102
- 'unavailable': 'red',
103
- 'failed': 'orange'
104
- }.get(status['status'], 'gray')
105
-
106
- html_report += f"""
107
- <div class='model-status'>
108
- <h3>{model}</h3>
109
- <p style='color: {status_color}'>Status: {status['status']}</p>
110
- {f"<p class='error'>Error: {status['error']}</p>" if status['error'] else ""}
111
  </div>
112
  """
113
- html_report += "</div>"
114
 
115
- return results, html_report
116
-
117
- # Gradio interface
118
- css = """
119
- .results-grid {
120
- display: grid;
121
- grid-template-columns: repeat(auto-fill, minmax(250px, 1fr));
122
- gap: 1rem;
123
- padding: 1rem;
124
- }
125
- .model-status {
126
- border: 1px solid #ddd;
127
- padding: 1rem;
128
- border-radius: 4px;
129
- }
130
- .error {
131
- color: red;
132
- font-size: 0.9em;
133
- word-break: break-word;
134
- }
135
- """
136
 
137
- with gr.Blocks(css=css, theme="Nymbo/Nymbo_Theme") as app:
 
138
  with gr.Row():
139
  with gr.Column():
140
- inp = gr.Textbox(label="Prompt")
141
- btn = gr.Button("Generate from All Models")
142
  with gr.Column():
143
- col = gr.ColorPicker(label="Color Tint (Optional)")
144
- tint = gr.Slider(label="Tint Strength", minimum=0, maximum=1, step=0.01, value=0.30)
145
-
146
- status_html = gr.HTML(label="Model Status")
147
- gallery = gr.Gallery()
148
-
149
- def process_and_display(prompt, color, tint_strength):
150
- results, html_report = run_all_models(prompt, color, tint_strength)
151
- return (
152
- [img for img, _ in results],
153
- html_report
154
- )
155
-
156
- btn.click(
157
- process_and_display,
158
- inputs=[inp, col, tint],
159
- outputs=[gallery, status_html]
160
- )
161
 
162
  app.launch()
 
1
  import gradio as gr
 
2
  from PIL import Image
3
  import requests
4
+ import io
 
5
  import base64
 
 
 
6
 
7
+ # List of models to process
8
+ models = [
9
+ "dreamlike-art/dreamlike-photoreal-2.0",
10
+ "stabilityai/stable-diffusion-xl-base-1.0",
11
+ "black-forest-labs/FLUX.1-dev",
12
+ "black-forest-labs/FLUX.1-schnell",
13
+ "veryVANYA/ps1-style-flux",
14
+ "alvdansen/softserve_anime",
15
+ "multimodalart/flux-tarot-v1",
16
+ "davisbro/half_illustration",
17
+ "dataautogpt3/OpenDalleV1.1",
18
+ "aleksa-codes/flux-ghibsky-illustration",
19
+ "alvdansen/flux-koda",
20
+ "openskyml/soviet-diffusion-xl",
21
+ "XLabs-AI/flux-RealismLora",
22
+ "alvdansen/frosting_lane_flux",
23
+ "alvdansen/phantasma-anime",
24
+ "kudzueye/Boreal",
25
+ "glif/how2draw",
26
+ "dataautogpt3/FLUX-AestheticAnime",
27
+ "prithivMLmods/Fashion-Hut-Modeling-LoRA",
28
+ "dataautogpt3/FLUX-SyntheticAnime",
29
+ "brushpenbob/flux-midjourney-anime",
30
+ "robert123231/coloringbookgenerator",
31
+ "prithivMLmods/Castor-Collage-Dim-Flux-LoRA",
32
+ "prithivMLmods/Flux-Product-Ad-Backdrop",
33
+ "multimodalart/product-design",
34
+ "glif/90s-anime-art",
35
+ "glif/Brain-Melt-Acid-Art",
36
+ "lustlyai/Flux_Lustly.ai_Uncensored_nsfw_v1",
37
+ "Keltezaa/NSFW_MASTER_FLUX",
38
+ "tryonlabs/FLUX.1-dev-LoRA-Outfit-Generator",
39
+ "Jovie/Midjourney",
40
+ "Yntec/DreamPhotoGASM",
41
+ "strangerzonehf/Flux-Super-Realism-LoRA",
42
+ "stabilityai/stable-diffusion-2-1-base",
43
+ "stabilityai/stable-diffusion-3.5-large",
44
+ "stabilityai/stable-diffusion-3.5-large-turbo",
45
+ "stabilityai/stable-diffusion-3-medium-diffusers",
46
+ "stablediffusionapi/duchaiten-real3d-nsfw-xl",
47
+ "nerijs/pixel-art-xl",
48
+ "KappaNeuro/character-design",
49
+ "alvdansen/sketchedoutmanga",
50
+ "alvdansen/archfey_anime",
51
+ "alvdansen/lofi-cuties",
52
+ "Yntec/YiffyMix",
53
+ "digiplay/AnalogMadness-realistic-model-v7",
54
+ "artificialguybr/selfiephotographyredmond-selfie-photography-lora-for-sdxl",
55
+ "artificialguybr/filmgrain-redmond-filmgrain-lora-for-sdxl",
56
+ "goofyai/Leonardo_Ai_Style_Illustration",
57
+ "goofyai/cyborg_style_xl",
58
+ "alvdansen/littletinies",
59
+ "Dremmar/nsfw-xl",
60
+ "artificialguybr/analogredmond",
61
+ "artificialguybr/PixelArtRedmond",
62
+ "CiroN2022/ascii-art",
63
+ "Yntec/Analog",
64
+ "Yntec/MapleSyrup",
65
+ "digiplay/perfectLewdFantasy_v1.01",
66
+ "digiplay/AbsoluteReality_v1.8.1",
67
+ "goofyai/disney_style_xl",
68
+ "artificialguybr/LogoRedmond-LogoLoraForSDXL-V2",
69
+ "Yntec/epiCPhotoGasm",
70
+ ]
71
 
72
+ # Initialize a dictionary to track models and their status
73
+ model_results = {"success": {}, "failed": []}
74
+
75
+ def generate_images(prompt):
76
+ output_images = []
77
+ failed_models = []
78
+ for model_name in models:
79
  try:
80
+ # Attempt to load and generate an image with the model
81
+ model = gr.Interface.load(f"models/{model_name}")
82
+ img_path = model.predict(prompt)
83
+
84
+ # Fetch image
85
+ response = requests.get(img_path, stream=True)
86
+ if response.status_code == 200:
87
+ img = Image.open(io.BytesIO(response.content))
88
+ output_images.append((model_name, img))
89
+ else:
90
+ failed_models.append(model_name)
91
  except Exception as e:
92
+ print(f"Error with model {model_name}: {e}")
93
+ failed_models.append(model_name)
 
 
94
 
95
+ return output_images, failed_models
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
 
97
+ def app_interface(prompt):
98
+ output_images, failed_models = generate_images(prompt)
99
+ images_html = ""
100
+ for model_name, img in output_images:
101
+ # Convert images for display in Gradio
102
+ img_buffer = io.BytesIO()
103
+ img.save(img_buffer, format="PNG")
104
+ img_str = base64.b64encode(img_buffer.getvalue()).decode()
105
+ images_html += f"""
106
+ <div>
107
+ <strong>{model_name}</strong>
108
+ <img src='data:image/png;base64,{img_str}' style="max-width: 200px;" />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
  </div>
110
  """
 
111
 
112
+ failed_html = "<ul>" + "".join(f"<li>{model}</li>" for model in failed_models) + "</ul>"
113
+ return images_html, f"<h3>Failed Models:</h3>{failed_html}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
 
115
+ # Define Gradio UI
116
+ with gr.Blocks() as app:
117
  with gr.Row():
118
  with gr.Column():
119
+ prompt_input = gr.Textbox(label="Prompt", placeholder="Enter your prompt")
120
+ generate_button = gr.Button("Generate Images")
121
  with gr.Column():
122
+ output_html = gr.HTML(label="Generated Images")
123
+ failed_html = gr.HTML(label="Failed Models")
124
+
125
+ generate_button.click(app_interface, inputs=[prompt_input], outputs=[output_html, failed_html])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
 
127
  app.launch()