Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -28,265 +28,360 @@ def get_device():
|
|
28 |
|
29 |
DEVICE = get_device()
|
30 |
|
31 |
-
class
|
32 |
"""
|
33 |
-
|
34 |
"""
|
35 |
|
36 |
def __init__(self):
|
37 |
self.forbidden_elements = ["++", "weights", "white background [en dev]"]
|
38 |
|
39 |
-
#
|
40 |
-
|
41 |
-
|
42 |
-
"
|
43 |
-
"
|
|
|
44 |
}
|
45 |
|
46 |
self.facial_features = {
|
47 |
-
"
|
48 |
-
"
|
49 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
}
|
51 |
|
52 |
-
self.
|
53 |
-
"
|
54 |
-
"
|
|
|
|
|
|
|
55 |
}
|
56 |
|
57 |
-
self.
|
58 |
-
"
|
59 |
-
"
|
60 |
-
"
|
|
|
|
|
61 |
}
|
62 |
|
63 |
-
self.
|
64 |
-
"portrait": ["
|
65 |
-
"
|
66 |
-
"
|
|
|
67 |
}
|
68 |
|
69 |
-
self.
|
70 |
-
"
|
71 |
-
|
72 |
-
|
73 |
-
|
|
|
|
|
|
|
74 |
}
|
75 |
|
76 |
-
def
|
77 |
-
"""
|
78 |
-
|
|
|
|
|
|
|
79 |
analysis = {
|
80 |
-
"subjects": [],
|
81 |
"age": None,
|
82 |
-
"
|
83 |
-
"
|
|
|
|
|
|
|
|
|
84 |
"setting": None,
|
|
|
|
|
85 |
"mood": None,
|
86 |
-
"
|
87 |
}
|
88 |
|
89 |
-
#
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
97 |
else:
|
98 |
-
analysis["
|
99 |
-
analysis["age"] = "middle"
|
100 |
|
101 |
-
if any(word in
|
102 |
-
|
103 |
-
analysis["subjects"].append("elderly woman")
|
104 |
-
analysis["age"] = "elderly"
|
105 |
-
else:
|
106 |
-
analysis["subjects"].append("woman")
|
107 |
|
108 |
-
|
109 |
-
|
110 |
-
if any(word in clip_lower for word in ["gray", "grey", "silver", "white"]):
|
111 |
-
analysis["features"].append("silver beard")
|
112 |
-
else:
|
113 |
-
analysis["features"].append("beard")
|
114 |
|
115 |
-
|
116 |
-
|
|
|
|
|
117 |
|
118 |
-
#
|
119 |
-
if any(word in
|
120 |
-
analysis["
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
121 |
|
122 |
-
if any(word in clip_lower for word in ["suit", "formal", "dress", "shirt"]):
|
123 |
-
analysis["clothing"].append("formal wear")
|
124 |
-
|
125 |
-
# Setting detection
|
126 |
-
if any(word in clip_lower for word in ["indoor", "inside", "interior", "room"]):
|
127 |
-
analysis["setting"] = "indoor"
|
128 |
-
elif any(word in clip_lower for word in ["outdoor", "outside", "landscape", "street"]):
|
129 |
-
analysis["setting"] = "outdoor"
|
130 |
-
elif any(word in clip_lower for word in ["studio", "backdrop"]):
|
131 |
-
analysis["setting"] = "studio"
|
132 |
-
|
133 |
-
# Mood and composition
|
134 |
-
if any(word in clip_lower for word in ["portrait", "headshot", "face", "close-up"]):
|
135 |
-
analysis["composition"] = "portrait"
|
136 |
-
elif any(word in clip_lower for word in ["sitting", "seated", "chair"]):
|
137 |
-
analysis["composition"] = "seated"
|
138 |
-
elif any(word in clip_lower for word in ["standing", "upright"]):
|
139 |
-
analysis["composition"] = "standing"
|
140 |
-
|
141 |
return analysis
|
142 |
|
143 |
-
def
|
144 |
-
"""Build
|
145 |
components = []
|
146 |
|
147 |
-
# 1.
|
148 |
-
if analysis["
|
149 |
-
|
150 |
-
article = "An" if
|
|
|
|
|
151 |
else:
|
152 |
article = "A"
|
153 |
components.append(article)
|
154 |
|
155 |
-
# 2.
|
156 |
adjectives = []
|
157 |
-
if analysis["age"] == "elderly":
|
158 |
-
adjectives.extend(["distinguished", "weathered"])
|
159 |
-
elif analysis["age"] == "young":
|
160 |
-
adjectives.extend(["young", "fresh-faced"])
|
161 |
-
else:
|
162 |
-
adjectives.extend(["professional", "elegant"])
|
163 |
|
164 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
165 |
components.extend(adjectives[:2])
|
166 |
|
167 |
-
# 3.
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
|
|
|
|
|
|
|
|
|
|
173 |
else:
|
174 |
-
|
175 |
-
components.append(main_subject)
|
176 |
|
177 |
-
|
178 |
-
|
179 |
-
if "glasses" in analysis["features"]:
|
180 |
-
feature_descriptions.append("with distinctive wire-frame glasses")
|
181 |
-
if any("beard" in f for f in analysis["features"]):
|
182 |
-
if "silver beard" in analysis["features"]:
|
183 |
-
feature_descriptions.append("with a distinguished silver beard")
|
184 |
-
else:
|
185 |
-
feature_descriptions.append("with a full beard")
|
186 |
|
187 |
-
|
188 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
189 |
|
190 |
-
|
191 |
-
|
192 |
-
if "hat" in analysis["clothing"]:
|
193 |
-
clothing_desc.append("wearing a traditional black hat")
|
194 |
-
if "formal wear" in analysis["clothing"]:
|
195 |
-
clothing_desc.append("in formal attire")
|
196 |
-
|
197 |
-
if clothing_desc:
|
198 |
-
components.extend(clothing_desc)
|
199 |
-
|
200 |
-
# 6. Verb/Action (based on composition analysis)
|
201 |
-
if analysis["composition"] == "seated":
|
202 |
-
action = "seated contemplatively"
|
203 |
-
elif analysis["composition"] == "standing":
|
204 |
-
action = "standing with dignity"
|
205 |
else:
|
206 |
-
action = "positioned
|
207 |
components.append(action)
|
208 |
|
209 |
-
# 7.
|
210 |
-
|
211 |
-
"indoor": "in
|
212 |
-
"
|
213 |
-
"
|
|
|
|
|
214 |
}
|
215 |
|
216 |
if analysis["setting"]:
|
217 |
-
context =
|
218 |
else:
|
219 |
-
context = "
|
220 |
components.append(context)
|
221 |
|
222 |
-
# 8.
|
223 |
-
|
224 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
225 |
else:
|
226 |
-
|
227 |
-
components.append(env_detail)
|
228 |
|
229 |
-
|
230 |
-
|
231 |
-
|
|
|
|
|
|
|
|
|
|
|
232 |
else:
|
233 |
-
|
234 |
-
components.append(tech_spec)
|
235 |
|
236 |
-
|
237 |
-
components.append("professional photography")
|
238 |
|
239 |
-
#
|
|
|
|
|
|
|
240 |
prompt = ", ".join(components)
|
241 |
|
242 |
-
# Clean up
|
243 |
prompt = re.sub(r'\s+', ' ', prompt) # Remove extra spaces
|
244 |
-
prompt =
|
|
|
|
|
|
|
|
|
245 |
|
246 |
return prompt
|
247 |
|
248 |
-
def
|
249 |
-
"""Calculate
|
250 |
score = 0
|
|
|
251 |
|
252 |
-
# Structure compliance (
|
253 |
if prompt.startswith(("A", "An")):
|
254 |
score += 10
|
255 |
-
|
256 |
-
# Feature
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
#
|
261 |
-
if analysis["
|
262 |
-
score += 15
|
263 |
-
|
264 |
-
# Subject detail depth
|
265 |
-
if len(analysis["subjects"]) > 0:
|
266 |
score += 15
|
267 |
-
|
268 |
-
|
269 |
-
|
|
|
|
|
270 |
score += 15
|
271 |
-
|
272 |
-
# Lighting specification
|
273 |
-
if "lighting" in prompt:
|
274 |
score += 10
|
275 |
-
|
276 |
-
#
|
277 |
-
if
|
|
|
|
|
|
|
|
|
278 |
score += 10
|
279 |
-
|
280 |
-
# Forbidden elements check
|
281 |
if not any(forbidden in prompt for forbidden in self.forbidden_elements):
|
282 |
score += 10
|
283 |
-
|
284 |
-
return min(score,
|
285 |
|
286 |
-
class
|
287 |
def __init__(self):
|
288 |
self.interrogator = None
|
289 |
-
self.analyzer =
|
290 |
self.usage_count = 0
|
291 |
self.device = DEVICE
|
292 |
self.is_initialized = False
|
@@ -337,7 +432,7 @@ class FluxPromptOptimizer:
|
|
337 |
return image
|
338 |
|
339 |
@spaces.GPU
|
340 |
-
def
|
341 |
try:
|
342 |
if not self.is_initialized:
|
343 |
if not self.initialize_model():
|
@@ -354,17 +449,23 @@ class FluxPromptOptimizer:
|
|
354 |
|
355 |
start_time = datetime.now()
|
356 |
|
357 |
-
#
|
358 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
359 |
|
360 |
-
#
|
361 |
-
deep_analysis = self.analyzer.
|
362 |
|
363 |
-
#
|
364 |
-
optimized_prompt = self.analyzer.
|
365 |
|
366 |
-
#
|
367 |
-
score = self.analyzer.
|
368 |
|
369 |
end_time = datetime.now()
|
370 |
duration = (end_time - start_time).total_seconds()
|
@@ -375,67 +476,93 @@ class FluxPromptOptimizer:
|
|
375 |
else:
|
376 |
torch.cuda.empty_cache()
|
377 |
|
378 |
-
#
|
379 |
gpu_status = "⚡ ZeroGPU" if torch.cuda.is_available() else "💻 CPU"
|
380 |
|
381 |
-
|
382 |
-
|
|
|
|
|
383 |
|
384 |
-
analysis_info = f"""**
|
385 |
|
386 |
-
**Processing:** {gpu_status} • {duration:.1f}s
|
387 |
**Intelligence Score:** {score}/100
|
|
|
388 |
**Generation:** #{self.usage_count}
|
389 |
|
390 |
-
**
|
391 |
-
• **
|
392 |
-
• **
|
393 |
-
• **
|
394 |
-
• **
|
|
|
|
|
|
|
395 |
|
396 |
-
**CLIP
|
397 |
-
**
|
|
|
|
|
|
|
|
|
398 |
|
399 |
return optimized_prompt, analysis_info, score
|
400 |
|
401 |
except Exception as e:
|
402 |
-
logger.error(f"
|
403 |
return f"❌ Error: {str(e)}", "Please try with a different image.", 0
|
404 |
|
405 |
-
optimizer =
|
406 |
|
407 |
-
def
|
408 |
-
"""
|
409 |
try:
|
410 |
-
prompt, info, score = optimizer.
|
411 |
-
|
412 |
-
#
|
413 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
414 |
score_html = f'''
|
415 |
-
<div style="text-align: center; padding:
|
416 |
-
<div style="font-size:
|
417 |
-
<div style="font-size:
|
|
|
418 |
</div>
|
419 |
'''
|
420 |
|
421 |
return prompt, info, score_html
|
422 |
|
423 |
except Exception as e:
|
424 |
-
logger.error(f"
|
425 |
return "❌ Processing failed", f"Error: {str(e)}", '<div style="text-align: center; color: red;">Error</div>'
|
426 |
|
427 |
def clear_outputs():
|
428 |
gc.collect()
|
429 |
if torch.cuda.is_available():
|
430 |
torch.cuda.empty_cache()
|
431 |
-
return "", "", '<div style="text-align: center; padding: 1rem;"><div style="font-size: 2rem; color: #ccc;">--</div><div style="font-size: 0.875rem; color: #999;">Intelligence Score</div></div>'
|
432 |
|
433 |
def create_interface():
|
434 |
css = """
|
435 |
-
@import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap');
|
436 |
|
437 |
.gradio-container {
|
438 |
-
max-width:
|
439 |
margin: 0 auto !important;
|
440 |
font-family: 'Inter', -apple-system, BlinkMacSystemFont, sans-serif !important;
|
441 |
background: linear-gradient(135deg, #f8fafc 0%, #f1f5f9 100%) !important;
|
@@ -444,117 +571,123 @@ def create_interface():
|
|
444 |
.main-header {
|
445 |
text-align: center;
|
446 |
padding: 2rem 0 3rem 0;
|
447 |
-
background: linear-gradient(135deg, #
|
448 |
color: white;
|
449 |
margin: -2rem -2rem 2rem -2rem;
|
450 |
border-radius: 0 0 24px 24px;
|
|
|
451 |
}
|
452 |
|
453 |
.main-title {
|
454 |
-
font-size:
|
455 |
-
font-weight:
|
456 |
margin: 0 0 0.5rem 0 !important;
|
457 |
letter-spacing: -0.025em !important;
|
458 |
-
background: linear-gradient(135deg, #60a5fa 0%, #3b82f6 100%);
|
459 |
-webkit-background-clip: text;
|
460 |
-webkit-text-fill-color: transparent;
|
461 |
background-clip: text;
|
462 |
}
|
463 |
|
464 |
.subtitle {
|
465 |
-
font-size: 1.
|
466 |
font-weight: 400 !important;
|
467 |
-
opacity: 0.
|
468 |
margin: 0 !important;
|
469 |
}
|
470 |
|
471 |
.prompt-output {
|
472 |
font-family: 'SF Mono', 'Monaco', 'Inconsolata', 'Roboto Mono', monospace !important;
|
473 |
font-size: 14px !important;
|
474 |
-
line-height: 1.
|
475 |
background: linear-gradient(135deg, #ffffff 0%, #f8fafc 100%) !important;
|
476 |
border: 1px solid #e2e8f0 !important;
|
477 |
-
border-radius:
|
478 |
-
padding:
|
479 |
-
box-shadow: 0
|
480 |
}
|
481 |
"""
|
482 |
|
483 |
with gr.Blocks(
|
484 |
theme=gr.themes.Soft(),
|
485 |
-
title="Flux Prompt Optimizer",
|
486 |
css=css
|
487 |
) as interface:
|
488 |
|
489 |
gr.HTML("""
|
490 |
<div class="main-header">
|
491 |
-
<div class="main-title"
|
492 |
-
<div class="subtitle">
|
493 |
</div>
|
494 |
""")
|
495 |
|
496 |
with gr.Row():
|
497 |
with gr.Column(scale=1):
|
498 |
-
gr.Markdown("##
|
499 |
|
500 |
image_input = gr.Image(
|
501 |
-
label="Upload your image",
|
502 |
type="pil",
|
503 |
-
height=
|
504 |
)
|
505 |
|
506 |
-
|
507 |
-
"
|
508 |
variant="primary",
|
509 |
size="lg"
|
510 |
)
|
511 |
|
512 |
gr.Markdown("""
|
513 |
-
###
|
514 |
|
515 |
-
|
|
|
|
|
|
|
516 |
|
517 |
-
|
518 |
-
•
|
519 |
-
•
|
520 |
-
•
|
|
|
|
|
521 |
|
522 |
-
No
|
523 |
""")
|
524 |
|
525 |
with gr.Column(scale=1):
|
526 |
-
gr.Markdown("##
|
527 |
|
528 |
prompt_output = gr.Textbox(
|
529 |
-
label="
|
530 |
-
placeholder="Upload an image to see the
|
531 |
-
lines=
|
532 |
-
max_lines=
|
533 |
elem_classes=["prompt-output"],
|
534 |
show_copy_button=True
|
535 |
)
|
536 |
|
537 |
score_output = gr.HTML(
|
538 |
-
value='<div style="text-align: center; padding: 1rem;"><div style="font-size: 2rem; color: #ccc;">--</div><div style="font-size: 0.875rem; color: #999;">Intelligence Score</div></div>'
|
539 |
)
|
540 |
|
541 |
info_output = gr.Markdown(value="")
|
542 |
|
543 |
-
clear_btn = gr.Button("🗑️ Clear", size="sm")
|
544 |
|
545 |
gr.Markdown("""
|
546 |
---
|
547 |
-
### 🔬
|
548 |
|
549 |
-
This
|
550 |
-
|
551 |
|
552 |
-
**Research
|
553 |
""")
|
554 |
|
555 |
-
#
|
556 |
-
|
557 |
-
fn=
|
558 |
inputs=[image_input],
|
559 |
outputs=[prompt_output, info_output, score_output]
|
560 |
)
|
@@ -567,7 +700,7 @@ def create_interface():
|
|
567 |
return interface
|
568 |
|
569 |
if __name__ == "__main__":
|
570 |
-
logger.info("🚀 Starting
|
571 |
interface = create_interface()
|
572 |
interface.launch(
|
573 |
server_name="0.0.0.0",
|
|
|
28 |
|
29 |
DEVICE = get_device()
|
30 |
|
31 |
+
class MaximumFluxAnalyzer:
|
32 |
"""
|
33 |
+
Maximum depth analysis engine - extracts EVERYTHING possible from images
|
34 |
"""
|
35 |
|
36 |
def __init__(self):
|
37 |
self.forbidden_elements = ["++", "weights", "white background [en dev]"]
|
38 |
|
39 |
+
# EXPANDED VOCABULARIES FOR MAXIMUM DETECTION
|
40 |
+
|
41 |
+
self.age_keywords = {
|
42 |
+
"elderly": ["old", "elderly", "aged", "senior", "mature", "weathered", "wrinkled", "gray", "grey", "white hair", "silver", "graying", "ancient", "vintage"],
|
43 |
+
"middle": ["middle-aged", "adult", "grown", "middle", "forties", "fifties"],
|
44 |
+
"young": ["young", "youth", "teenage", "boy", "girl", "child", "kid", "adolescent"]
|
45 |
}
|
46 |
|
47 |
self.facial_features = {
|
48 |
+
"beard_full": ["beard", "bearded", "facial hair", "full beard", "thick beard", "heavy beard"],
|
49 |
+
"beard_color": ["gray beard", "grey beard", "silver beard", "white beard", "salt pepper", "graying beard"],
|
50 |
+
"mustache": ["mustache", "moustache", "facial hair"],
|
51 |
+
"glasses": ["glasses", "spectacles", "eyeglasses", "wire-frame", "rimmed glasses", "reading glasses"],
|
52 |
+
"eyes": ["eyes", "gaze", "stare", "looking", "piercing", "intense", "deep eyes"],
|
53 |
+
"wrinkles": ["wrinkled", "lines", "aged", "weathered", "creased"],
|
54 |
+
"expression": ["serious", "contemplative", "thoughtful", "stern", "wise", "solemn"]
|
55 |
+
}
|
56 |
+
|
57 |
+
self.religious_cultural = {
|
58 |
+
"jewish": ["jewish", "orthodox", "hasidic", "rabbi", "religious", "traditional", "ceremonial"],
|
59 |
+
"hat_types": ["hat", "cap", "yarmulke", "kippah", "black hat", "traditional hat", "religious headwear"],
|
60 |
+
"clothing": ["suit", "jacket", "formal", "black clothing", "traditional dress", "religious attire"]
|
61 |
+
}
|
62 |
+
|
63 |
+
self.hair_descriptors = {
|
64 |
+
"color": ["gray", "grey", "silver", "white", "black", "brown", "blonde", "salt and pepper"],
|
65 |
+
"texture": ["curly", "wavy", "straight", "thick", "thin", "coarse", "fine"],
|
66 |
+
"style": ["long", "short", "receding", "balding", "full head"]
|
67 |
}
|
68 |
|
69 |
+
self.setting_environments = {
|
70 |
+
"indoor": ["indoor", "inside", "interior", "room", "office", "home", "building"],
|
71 |
+
"formal": ["formal setting", "office", "meeting room", "conference", "official"],
|
72 |
+
"religious": ["synagogue", "temple", "religious", "ceremonial", "sacred"],
|
73 |
+
"studio": ["studio", "backdrop", "professional", "photography studio"],
|
74 |
+
"casual": ["casual", "relaxed", "informal", "comfortable"]
|
75 |
}
|
76 |
|
77 |
+
self.lighting_types = {
|
78 |
+
"natural": ["natural light", "window light", "daylight", "sunlight"],
|
79 |
+
"artificial": ["artificial light", "lamp", "electric", "indoor lighting"],
|
80 |
+
"dramatic": ["dramatic", "contrast", "shadow", "chiaroscuro", "moody"],
|
81 |
+
"soft": ["soft", "gentle", "diffused", "even", "flattering"],
|
82 |
+
"harsh": ["harsh", "direct", "strong", "bright", "intense"]
|
83 |
}
|
84 |
|
85 |
+
self.composition_styles = {
|
86 |
+
"portrait": ["portrait", "headshot", "face", "facial", "close-up", "bust"],
|
87 |
+
"seated": ["sitting", "seated", "chair", "sitting down"],
|
88 |
+
"standing": ["standing", "upright", "vertical"],
|
89 |
+
"three_quarter": ["three quarter", "three-quarter", "angled", "turned"]
|
90 |
}
|
91 |
|
92 |
+
self.quality_adjectives = {
|
93 |
+
"age_based": {
|
94 |
+
"elderly": ["distinguished", "dignified", "venerable", "wise", "weathered", "experienced"],
|
95 |
+
"middle": ["professional", "mature", "confident", "established"],
|
96 |
+
"young": ["youthful", "fresh", "vibrant", "energetic"]
|
97 |
+
},
|
98 |
+
"cultural": ["traditional", "Orthodox", "religious", "ceremonial", "devout"],
|
99 |
+
"general": ["elegant", "refined", "sophisticated", "classic", "timeless"]
|
100 |
}
|
101 |
|
102 |
+
def extract_maximum_info(self, clip_fast, clip_classic, clip_best):
|
103 |
+
"""Combine all three CLIP analyses for maximum information extraction"""
|
104 |
+
|
105 |
+
# Combine all analyses
|
106 |
+
combined_text = f"{clip_fast} {clip_classic} {clip_best}".lower()
|
107 |
+
|
108 |
analysis = {
|
|
|
109 |
"age": None,
|
110 |
+
"age_confidence": 0,
|
111 |
+
"gender": None,
|
112 |
+
"facial_features": [],
|
113 |
+
"hair_description": [],
|
114 |
+
"clothing_items": [],
|
115 |
+
"cultural_religious": [],
|
116 |
"setting": None,
|
117 |
+
"lighting": None,
|
118 |
+
"composition": None,
|
119 |
"mood": None,
|
120 |
+
"technical_suggestions": {}
|
121 |
}
|
122 |
|
123 |
+
# DEEP AGE DETECTION
|
124 |
+
age_scores = {"elderly": 0, "middle": 0, "young": 0}
|
125 |
+
for age_type, keywords in self.age_keywords.items():
|
126 |
+
for keyword in keywords:
|
127 |
+
if keyword in combined_text:
|
128 |
+
age_scores[age_type] += 1
|
129 |
+
|
130 |
+
if max(age_scores.values()) > 0:
|
131 |
+
analysis["age"] = max(age_scores, key=age_scores.get)
|
132 |
+
analysis["age_confidence"] = age_scores[analysis["age"]]
|
133 |
+
|
134 |
+
# GENDER DETECTION
|
135 |
+
if any(word in combined_text for word in ["man", "male", "gentleman", "guy", "he", "his"]):
|
136 |
+
analysis["gender"] = "man"
|
137 |
+
elif any(word in combined_text for word in ["woman", "female", "lady", "she", "her"]):
|
138 |
+
analysis["gender"] = "woman"
|
139 |
+
|
140 |
+
# COMPREHENSIVE FACIAL FEATURES
|
141 |
+
if any(word in combined_text for word in self.facial_features["beard_full"]):
|
142 |
+
if any(word in combined_text for word in self.facial_features["beard_color"]):
|
143 |
+
analysis["facial_features"].append("silver beard")
|
144 |
else:
|
145 |
+
analysis["facial_features"].append("full beard")
|
|
|
146 |
|
147 |
+
if any(word in combined_text for word in self.facial_features["glasses"]):
|
148 |
+
analysis["facial_features"].append("wire-frame glasses")
|
|
|
|
|
|
|
|
|
149 |
|
150 |
+
if any(word in combined_text for word in self.facial_features["wrinkles"]):
|
151 |
+
analysis["facial_features"].append("weathered features")
|
|
|
|
|
|
|
|
|
152 |
|
153 |
+
# HAIR ANALYSIS
|
154 |
+
hair_colors = [color for color in self.hair_descriptors["color"] if color in combined_text]
|
155 |
+
if hair_colors:
|
156 |
+
analysis["hair_description"].extend(hair_colors)
|
157 |
|
158 |
+
# CULTURAL/RELIGIOUS DETECTION
|
159 |
+
if any(word in combined_text for word in self.religious_cultural["jewish"]):
|
160 |
+
analysis["cultural_religious"].append("Orthodox Jewish")
|
161 |
+
|
162 |
+
if any(word in combined_text for word in self.religious_cultural["hat_types"]):
|
163 |
+
analysis["clothing_items"].append("traditional black hat")
|
164 |
+
|
165 |
+
if any(word in combined_text for word in self.religious_cultural["clothing"]):
|
166 |
+
analysis["clothing_items"].append("formal religious attire")
|
167 |
+
|
168 |
+
# ENHANCED SETTING DETECTION
|
169 |
+
setting_scores = {}
|
170 |
+
for setting_type, keywords in self.setting_environments.items():
|
171 |
+
score = sum(1 for keyword in keywords if keyword in combined_text)
|
172 |
+
if score > 0:
|
173 |
+
setting_scores[setting_type] = score
|
174 |
+
|
175 |
+
if setting_scores:
|
176 |
+
analysis["setting"] = max(setting_scores, key=setting_scores.get)
|
177 |
+
|
178 |
+
# LIGHTING ANALYSIS
|
179 |
+
lighting_detected = []
|
180 |
+
for light_type, keywords in self.lighting_types.items():
|
181 |
+
if any(keyword in combined_text for keyword in keywords):
|
182 |
+
lighting_detected.append(light_type)
|
183 |
+
|
184 |
+
if lighting_detected:
|
185 |
+
analysis["lighting"] = lighting_detected[0] # Take first/strongest match
|
186 |
+
|
187 |
+
# COMPOSITION DETECTION
|
188 |
+
for comp_type, keywords in self.composition_styles.items():
|
189 |
+
if any(keyword in combined_text for keyword in keywords):
|
190 |
+
analysis["composition"] = comp_type
|
191 |
+
break
|
192 |
+
|
193 |
+
# TECHNICAL SUGGESTIONS BASED ON ANALYSIS
|
194 |
+
if analysis["composition"] == "portrait":
|
195 |
+
analysis["technical_suggestions"] = {
|
196 |
+
"lens": "85mm lens",
|
197 |
+
"aperture": "f/2.8 aperture",
|
198 |
+
"camera": "Shot on Phase One XF"
|
199 |
+
}
|
200 |
+
elif analysis["composition"] == "seated":
|
201 |
+
analysis["technical_suggestions"] = {
|
202 |
+
"lens": "85mm lens",
|
203 |
+
"aperture": "f/4 aperture",
|
204 |
+
"camera": "Shot on Phase One"
|
205 |
+
}
|
206 |
+
else:
|
207 |
+
analysis["technical_suggestions"] = {
|
208 |
+
"lens": "50mm lens",
|
209 |
+
"aperture": "f/2.8 aperture",
|
210 |
+
"camera": "Shot on Phase One"
|
211 |
+
}
|
212 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
213 |
return analysis
|
214 |
|
215 |
+
def build_maximum_flux_prompt(self, analysis, original_clips):
|
216 |
+
"""Build the most detailed Flux prompt possible"""
|
217 |
components = []
|
218 |
|
219 |
+
# 1. INTELLIGENT ARTICLE SELECTION
|
220 |
+
if analysis["cultural_religious"] and analysis["age"]:
|
221 |
+
# "An elderly Orthodox Jewish man"
|
222 |
+
article = "An" if analysis["age"] == "elderly" else "A"
|
223 |
+
elif analysis["gender"]:
|
224 |
+
article = "A"
|
225 |
else:
|
226 |
article = "A"
|
227 |
components.append(article)
|
228 |
|
229 |
+
# 2. CONTEXT-AWARE ADJECTIVES (max 2-3 per Flux rules)
|
230 |
adjectives = []
|
|
|
|
|
|
|
|
|
|
|
|
|
231 |
|
232 |
+
if analysis["age"] and analysis["age"] in self.quality_adjectives["age_based"]:
|
233 |
+
adjectives.extend(self.quality_adjectives["age_based"][analysis["age"]][:2])
|
234 |
+
|
235 |
+
if analysis["cultural_religious"]:
|
236 |
+
adjectives.extend(self.quality_adjectives["cultural"][:1])
|
237 |
+
|
238 |
+
if not adjectives:
|
239 |
+
adjectives = self.quality_adjectives["general"][:2]
|
240 |
+
|
241 |
+
# Limit to 2-3 adjectives as per Flux rules
|
242 |
components.extend(adjectives[:2])
|
243 |
|
244 |
+
# 3. ENHANCED SUBJECT DESCRIPTION
|
245 |
+
subject_parts = []
|
246 |
+
|
247 |
+
if analysis["cultural_religious"]:
|
248 |
+
subject_parts.extend(analysis["cultural_religious"])
|
249 |
+
|
250 |
+
if analysis["age"] and analysis["age"] != "middle":
|
251 |
+
subject_parts.append(analysis["age"])
|
252 |
+
|
253 |
+
if analysis["gender"]:
|
254 |
+
subject_parts.append(analysis["gender"])
|
255 |
else:
|
256 |
+
subject_parts.append("person")
|
|
|
257 |
|
258 |
+
main_subject = " ".join(subject_parts)
|
259 |
+
components.append(main_subject)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
260 |
|
261 |
+
# 4. DETAILED FACIAL FEATURES
|
262 |
+
if analysis["facial_features"]:
|
263 |
+
feature_desc = "with " + " and ".join(analysis["facial_features"])
|
264 |
+
components.append(feature_desc)
|
265 |
+
|
266 |
+
# 5. CLOTHING AND ACCESSORIES
|
267 |
+
if analysis["clothing_items"]:
|
268 |
+
clothing_desc = "wearing " + " and ".join(analysis["clothing_items"])
|
269 |
+
components.append(clothing_desc)
|
270 |
+
|
271 |
+
# 6. ACTION/POSE (based on composition)
|
272 |
+
action_map = {
|
273 |
+
"seated": "seated in contemplative pose",
|
274 |
+
"standing": "standing with dignified presence",
|
275 |
+
"portrait": "captured in intimate portrait style",
|
276 |
+
"three_quarter": "positioned in three-quarter view"
|
277 |
+
}
|
278 |
|
279 |
+
if analysis["composition"]:
|
280 |
+
action = action_map.get(analysis["composition"], "positioned thoughtfully")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
281 |
else:
|
282 |
+
action = "positioned with natural composure"
|
283 |
components.append(action)
|
284 |
|
285 |
+
# 7. ENHANCED ENVIRONMENTAL CONTEXT
|
286 |
+
setting_descriptions = {
|
287 |
+
"indoor": "in a warmly lit indoor environment",
|
288 |
+
"formal": "in a professional formal setting",
|
289 |
+
"religious": "in a traditional religious space",
|
290 |
+
"studio": "in a controlled studio environment",
|
291 |
+
"casual": "in a comfortable informal setting"
|
292 |
}
|
293 |
|
294 |
if analysis["setting"]:
|
295 |
+
context = setting_descriptions.get(analysis["setting"], "in a thoughtfully composed environment")
|
296 |
else:
|
297 |
+
context = "within a carefully arranged scene"
|
298 |
components.append(context)
|
299 |
|
300 |
+
# 8. SOPHISTICATED LIGHTING DESCRIPTION
|
301 |
+
lighting_descriptions = {
|
302 |
+
"natural": "bathed in gentle natural lighting that enhances facial texture and depth",
|
303 |
+
"dramatic": "illuminated by dramatic lighting that creates compelling shadows and highlights",
|
304 |
+
"soft": "softly lit to emphasize character and warmth",
|
305 |
+
"artificial": "under controlled artificial lighting for optimal detail capture"
|
306 |
+
}
|
307 |
+
|
308 |
+
if analysis["lighting"]:
|
309 |
+
lighting_desc = lighting_descriptions.get(analysis["lighting"], "with professional lighting that emphasizes facial features and texture")
|
310 |
else:
|
311 |
+
lighting_desc = "captured with sophisticated portrait lighting that brings out intricate facial details"
|
|
|
312 |
|
313 |
+
components.append(lighting_desc)
|
314 |
+
|
315 |
+
# 9. TECHNICAL SPECIFICATIONS
|
316 |
+
tech_parts = []
|
317 |
+
if analysis["technical_suggestions"]:
|
318 |
+
tech_parts.append(analysis["technical_suggestions"]["camera"])
|
319 |
+
tech_parts.append(analysis["technical_suggestions"]["lens"])
|
320 |
+
tech_parts.append(analysis["technical_suggestions"]["aperture"])
|
321 |
else:
|
322 |
+
tech_parts = ["Shot on Phase One", "85mm lens", "f/2.8 aperture"]
|
|
|
323 |
|
324 |
+
components.append(", ".join(tech_parts))
|
|
|
325 |
|
326 |
+
# 10. QUALITY MARKER
|
327 |
+
components.append("professional portrait photography")
|
328 |
+
|
329 |
+
# FINAL ASSEMBLY AND OPTIMIZATION
|
330 |
prompt = ", ".join(components)
|
331 |
|
332 |
+
# Clean up the prompt
|
333 |
prompt = re.sub(r'\s+', ' ', prompt) # Remove extra spaces
|
334 |
+
prompt = re.sub(r',\s*,', ',', prompt) # Remove double commas
|
335 |
+
prompt = prompt.replace(" ,", ",") # Fix spacing around commas
|
336 |
+
|
337 |
+
# Ensure proper capitalization
|
338 |
+
prompt = prompt[0].upper() + prompt[1:] if prompt else ""
|
339 |
|
340 |
return prompt
|
341 |
|
342 |
+
def calculate_maximum_score(self, prompt, analysis):
|
343 |
+
"""Calculate intelligence score based on depth of analysis"""
|
344 |
score = 0
|
345 |
+
max_possible = 100
|
346 |
|
347 |
+
# Structure compliance (10 points)
|
348 |
if prompt.startswith(("A", "An")):
|
349 |
score += 10
|
350 |
+
|
351 |
+
# Feature detection depth (20 points)
|
352 |
+
feature_score = len(analysis["facial_features"]) * 5
|
353 |
+
score += min(feature_score, 20)
|
354 |
+
|
355 |
+
# Cultural/contextual awareness (20 points)
|
356 |
+
if analysis["cultural_religious"]:
|
|
|
|
|
|
|
|
|
357 |
score += 15
|
358 |
+
if analysis["age"]:
|
359 |
+
score += 5
|
360 |
+
|
361 |
+
# Technical appropriateness (15 points)
|
362 |
+
if "85mm" in prompt and analysis["composition"] in ["portrait", "seated"]:
|
363 |
score += 15
|
364 |
+
elif "50mm" in prompt:
|
|
|
|
|
365 |
score += 10
|
366 |
+
|
367 |
+
# Lighting sophistication (15 points)
|
368 |
+
if "lighting" in prompt and len(prompt.split("lighting")[1].split(",")[0]) > 10:
|
369 |
+
score += 15
|
370 |
+
|
371 |
+
# Setting context (10 points)
|
372 |
+
if analysis["setting"]:
|
373 |
score += 10
|
374 |
+
|
375 |
+
# Forbidden elements check (10 points)
|
376 |
if not any(forbidden in prompt for forbidden in self.forbidden_elements):
|
377 |
score += 10
|
378 |
+
|
379 |
+
return min(score, max_possible)
|
380 |
|
381 |
+
class MaximumFluxOptimizer:
|
382 |
def __init__(self):
|
383 |
self.interrogator = None
|
384 |
+
self.analyzer = MaximumFluxAnalyzer()
|
385 |
self.usage_count = 0
|
386 |
self.device = DEVICE
|
387 |
self.is_initialized = False
|
|
|
432 |
return image
|
433 |
|
434 |
@spaces.GPU
|
435 |
+
def generate_maximum_prompt(self, image):
|
436 |
try:
|
437 |
if not self.is_initialized:
|
438 |
if not self.initialize_model():
|
|
|
449 |
|
450 |
start_time = datetime.now()
|
451 |
|
452 |
+
# TRIPLE CLIP ANALYSIS FOR MAXIMUM INFORMATION
|
453 |
+
logger.info("Starting MAXIMUM analysis - Triple CLIP interrogation")
|
454 |
+
|
455 |
+
clip_fast = self.interrogator.interrogate_fast(image)
|
456 |
+
clip_classic = self.interrogator.interrogate_classic(image)
|
457 |
+
clip_best = self.interrogator.interrogate(image)
|
458 |
+
|
459 |
+
logger.info(f"CLIP Results:\nFast: {clip_fast}\nClassic: {clip_classic}\nBest: {clip_best}")
|
460 |
|
461 |
+
# MAXIMUM DEPTH ANALYSIS
|
462 |
+
deep_analysis = self.analyzer.extract_maximum_info(clip_fast, clip_classic, clip_best)
|
463 |
|
464 |
+
# BUILD MAXIMUM QUALITY FLUX PROMPT
|
465 |
+
optimized_prompt = self.analyzer.build_maximum_flux_prompt(deep_analysis, [clip_fast, clip_classic, clip_best])
|
466 |
|
467 |
+
# CALCULATE INTELLIGENCE SCORE
|
468 |
+
score = self.analyzer.calculate_maximum_score(optimized_prompt, deep_analysis)
|
469 |
|
470 |
end_time = datetime.now()
|
471 |
duration = (end_time - start_time).total_seconds()
|
|
|
476 |
else:
|
477 |
torch.cuda.empty_cache()
|
478 |
|
479 |
+
# COMPREHENSIVE ANALYSIS REPORT
|
480 |
gpu_status = "⚡ ZeroGPU" if torch.cuda.is_available() else "💻 CPU"
|
481 |
|
482 |
+
# Format detected elements
|
483 |
+
features = ", ".join(deep_analysis["facial_features"]) if deep_analysis["facial_features"] else "None detected"
|
484 |
+
cultural = ", ".join(deep_analysis["cultural_religious"]) if deep_analysis["cultural_religious"] else "None detected"
|
485 |
+
clothing = ", ".join(deep_analysis["clothing_items"]) if deep_analysis["clothing_items"] else "None detected"
|
486 |
|
487 |
+
analysis_info = f"""**MAXIMUM ANALYSIS COMPLETE**
|
488 |
|
489 |
+
**Processing:** {gpu_status} • {duration:.1f}s • Triple CLIP interrogation
|
490 |
**Intelligence Score:** {score}/100
|
491 |
+
**Analysis Confidence:** {deep_analysis.get("age_confidence", 0)} age indicators detected
|
492 |
**Generation:** #{self.usage_count}
|
493 |
|
494 |
+
**DEEP DETECTION RESULTS:**
|
495 |
+
• **Age Category:** {deep_analysis.get("age", "Unspecified").title()}
|
496 |
+
• **Cultural Context:** {cultural}
|
497 |
+
• **Facial Features:** {features}
|
498 |
+
• **Clothing/Accessories:** {clothing}
|
499 |
+
• **Setting:** {deep_analysis.get("setting", "Standard").title()}
|
500 |
+
• **Composition:** {deep_analysis.get("composition", "Standard").title()}
|
501 |
+
• **Lighting:** {deep_analysis.get("lighting", "Standard").title()}
|
502 |
|
503 |
+
**CLIP ANALYSIS SOURCES:**
|
504 |
+
• **Fast:** {clip_fast[:60]}...
|
505 |
+
• **Classic:** {clip_classic[:60]}...
|
506 |
+
• **Best:** {clip_best[:60]}...
|
507 |
+
|
508 |
+
**FLUX OPTIMIZATION:** Applied maximum depth analysis with Pariente AI research rules"""
|
509 |
|
510 |
return optimized_prompt, analysis_info, score
|
511 |
|
512 |
except Exception as e:
|
513 |
+
logger.error(f"Maximum generation error: {e}")
|
514 |
return f"❌ Error: {str(e)}", "Please try with a different image.", 0
|
515 |
|
516 |
+
optimizer = MaximumFluxOptimizer()
|
517 |
|
518 |
+
def process_maximum_analysis(image):
|
519 |
+
"""Maximum analysis wrapper"""
|
520 |
try:
|
521 |
+
prompt, info, score = optimizer.generate_maximum_prompt(image)
|
522 |
+
|
523 |
+
# Enhanced score display
|
524 |
+
if score >= 90:
|
525 |
+
color = "#10b981"
|
526 |
+
grade = "EXCELLENT"
|
527 |
+
elif score >= 80:
|
528 |
+
color = "#22c55e"
|
529 |
+
grade = "VERY GOOD"
|
530 |
+
elif score >= 70:
|
531 |
+
color = "#f59e0b"
|
532 |
+
grade = "GOOD"
|
533 |
+
elif score >= 60:
|
534 |
+
color = "#f97316"
|
535 |
+
grade = "FAIR"
|
536 |
+
else:
|
537 |
+
color = "#ef4444"
|
538 |
+
grade = "NEEDS WORK"
|
539 |
+
|
540 |
score_html = f'''
|
541 |
+
<div style="text-align: center; padding: 1.5rem; background: linear-gradient(135deg, #f0fdf4 0%, #dcfce7 100%); border: 2px solid {color}; border-radius: 12px; margin: 1rem 0; box-shadow: 0 4px 6px -1px rgba(0, 0, 0, 0.1);">
|
542 |
+
<div style="font-size: 2.5rem; font-weight: 700; color: {color}; margin: 0;">{score}</div>
|
543 |
+
<div style="font-size: 1rem; color: #15803d; margin: 0; text-transform: uppercase; letter-spacing: 0.05em; font-weight: 600;">{grade}</div>
|
544 |
+
<div style="font-size: 0.875rem; color: #15803d; margin: 0; text-transform: uppercase; letter-spacing: 0.05em;">Maximum Intelligence Score</div>
|
545 |
</div>
|
546 |
'''
|
547 |
|
548 |
return prompt, info, score_html
|
549 |
|
550 |
except Exception as e:
|
551 |
+
logger.error(f"Maximum wrapper error: {e}")
|
552 |
return "❌ Processing failed", f"Error: {str(e)}", '<div style="text-align: center; color: red;">Error</div>'
|
553 |
|
554 |
def clear_outputs():
|
555 |
gc.collect()
|
556 |
if torch.cuda.is_available():
|
557 |
torch.cuda.empty_cache()
|
558 |
+
return "", "", '<div style="text-align: center; padding: 1rem;"><div style="font-size: 2rem; color: #ccc;">--</div><div style="font-size: 0.875rem; color: #999;">Maximum Intelligence Score</div></div>'
|
559 |
|
560 |
def create_interface():
|
561 |
css = """
|
562 |
+
@import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700;800&display=swap');
|
563 |
|
564 |
.gradio-container {
|
565 |
+
max-width: 1400px !important;
|
566 |
margin: 0 auto !important;
|
567 |
font-family: 'Inter', -apple-system, BlinkMacSystemFont, sans-serif !important;
|
568 |
background: linear-gradient(135deg, #f8fafc 0%, #f1f5f9 100%) !important;
|
|
|
571 |
.main-header {
|
572 |
text-align: center;
|
573 |
padding: 2rem 0 3rem 0;
|
574 |
+
background: linear-gradient(135deg, #0f172a 0%, #1e293b 50%, #334155 100%);
|
575 |
color: white;
|
576 |
margin: -2rem -2rem 2rem -2rem;
|
577 |
border-radius: 0 0 24px 24px;
|
578 |
+
box-shadow: 0 10px 25px -5px rgba(0, 0, 0, 0.1);
|
579 |
}
|
580 |
|
581 |
.main-title {
|
582 |
+
font-size: 3rem !important;
|
583 |
+
font-weight: 800 !important;
|
584 |
margin: 0 0 0.5rem 0 !important;
|
585 |
letter-spacing: -0.025em !important;
|
586 |
+
background: linear-gradient(135deg, #60a5fa 0%, #3b82f6 50%, #2563eb 100%);
|
587 |
-webkit-background-clip: text;
|
588 |
-webkit-text-fill-color: transparent;
|
589 |
background-clip: text;
|
590 |
}
|
591 |
|
592 |
.subtitle {
|
593 |
+
font-size: 1.25rem !important;
|
594 |
font-weight: 400 !important;
|
595 |
+
opacity: 0.9 !important;
|
596 |
margin: 0 !important;
|
597 |
}
|
598 |
|
599 |
.prompt-output {
|
600 |
font-family: 'SF Mono', 'Monaco', 'Inconsolata', 'Roboto Mono', monospace !important;
|
601 |
font-size: 14px !important;
|
602 |
+
line-height: 1.7 !important;
|
603 |
background: linear-gradient(135deg, #ffffff 0%, #f8fafc 100%) !important;
|
604 |
border: 1px solid #e2e8f0 !important;
|
605 |
+
border-radius: 16px !important;
|
606 |
+
padding: 2rem !important;
|
607 |
+
box-shadow: 0 8px 25px -5px rgba(0, 0, 0, 0.1) !important;
|
608 |
}
|
609 |
"""
|
610 |
|
611 |
with gr.Blocks(
|
612 |
theme=gr.themes.Soft(),
|
613 |
+
title="Maximum Flux Prompt Optimizer",
|
614 |
css=css
|
615 |
) as interface:
|
616 |
|
617 |
gr.HTML("""
|
618 |
<div class="main-header">
|
619 |
+
<div class="main-title">🧠 Maximum Flux Optimizer</div>
|
620 |
+
<div class="subtitle">Triple CLIP Analysis • Maximum Intelligence • Zero Configuration</div>
|
621 |
</div>
|
622 |
""")
|
623 |
|
624 |
with gr.Row():
|
625 |
with gr.Column(scale=1):
|
626 |
+
gr.Markdown("## 🔬 Maximum Analysis")
|
627 |
|
628 |
image_input = gr.Image(
|
629 |
+
label="Upload your image for maximum analysis",
|
630 |
type="pil",
|
631 |
+
height=450
|
632 |
)
|
633 |
|
634 |
+
analyze_btn = gr.Button(
|
635 |
+
"🚀 MAXIMUM ANALYSIS",
|
636 |
variant="primary",
|
637 |
size="lg"
|
638 |
)
|
639 |
|
640 |
gr.Markdown("""
|
641 |
+
### Maximum Intelligence Engine
|
642 |
|
643 |
+
**Triple CLIP Interrogation:**
|
644 |
+
• Fast analysis for broad context
|
645 |
+
• Classic analysis for detailed features
|
646 |
+
• Best analysis for maximum depth
|
647 |
|
648 |
+
**Deep Feature Extraction:**
|
649 |
+
• Age, gender, cultural context
|
650 |
+
• Facial features, expressions, accessories
|
651 |
+
• Clothing, religious/cultural indicators
|
652 |
+
• Environmental setting and lighting
|
653 |
+
• Composition and technical optimization
|
654 |
|
655 |
+
**No configuration needed** - Maximum intelligence applied automatically.
|
656 |
""")
|
657 |
|
658 |
with gr.Column(scale=1):
|
659 |
+
gr.Markdown("## ⚡ Maximum Result")
|
660 |
|
661 |
prompt_output = gr.Textbox(
|
662 |
+
label="Maximum Optimized Flux Prompt",
|
663 |
+
placeholder="Upload an image to see the maximum intelligence analysis...",
|
664 |
+
lines=10,
|
665 |
+
max_lines=15,
|
666 |
elem_classes=["prompt-output"],
|
667 |
show_copy_button=True
|
668 |
)
|
669 |
|
670 |
score_output = gr.HTML(
|
671 |
+
value='<div style="text-align: center; padding: 1rem;"><div style="font-size: 2rem; color: #ccc;">--</div><div style="font-size: 0.875rem; color: #999;">Maximum Intelligence Score</div></div>'
|
672 |
)
|
673 |
|
674 |
info_output = gr.Markdown(value="")
|
675 |
|
676 |
+
clear_btn = gr.Button("🗑️ Clear Analysis", size="sm")
|
677 |
|
678 |
gr.Markdown("""
|
679 |
---
|
680 |
+
### 🔬 Maximum Research Foundation
|
681 |
|
682 |
+
This system represents the absolute maximum in image analysis and Flux prompt optimization. Using triple CLIP interrogation
|
683 |
+
and deep feature extraction, it identifies every possible detail and applies research-validated Flux rules with maximum intelligence.
|
684 |
|
685 |
+
**Pariente AI Research Laboratory** • Maximum Intelligence • Research-Driven • Zero Compromise
|
686 |
""")
|
687 |
|
688 |
+
# Maximum event handlers
|
689 |
+
analyze_btn.click(
|
690 |
+
fn=process_maximum_analysis,
|
691 |
inputs=[image_input],
|
692 |
outputs=[prompt_output, info_output, score_output]
|
693 |
)
|
|
|
700 |
return interface
|
701 |
|
702 |
if __name__ == "__main__":
|
703 |
+
logger.info("🚀 Starting MAXIMUM Flux Prompt Optimizer")
|
704 |
interface = create_interface()
|
705 |
interface.launch(
|
706 |
server_name="0.0.0.0",
|