Malaji71 commited on
Commit
398d82e
Β·
verified Β·
1 Parent(s): 6a59263

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +530 -403
app.py CHANGED
@@ -10,6 +10,7 @@ import warnings
10
  from datetime import datetime
11
  import gc
12
  import re
 
13
 
14
  warnings.filterwarnings("ignore", category=FutureWarning)
15
  warnings.filterwarnings("ignore", category=UserWarning)
@@ -28,360 +29,482 @@ def get_device():
28
 
29
  DEVICE = get_device()
30
 
31
- class MaximumFluxAnalyzer:
32
  """
33
- Maximum depth analysis engine - extracts EVERYTHING possible from images
34
  """
35
 
36
  def __init__(self):
37
  self.forbidden_elements = ["++", "weights", "white background [en dev]"]
38
 
39
- # EXPANDED VOCABULARIES FOR MAXIMUM DETECTION
40
 
41
- self.age_keywords = {
42
- "elderly": ["old", "elderly", "aged", "senior", "mature", "weathered", "wrinkled", "gray", "grey", "white hair", "silver", "graying", "ancient", "vintage"],
43
- "middle": ["middle-aged", "adult", "grown", "middle", "forties", "fifties"],
44
- "young": ["young", "youth", "teenage", "boy", "girl", "child", "kid", "adolescent"]
 
 
 
 
45
  }
46
 
47
- self.facial_features = {
48
- "beard_full": ["beard", "bearded", "facial hair", "full beard", "thick beard", "heavy beard"],
49
- "beard_color": ["gray beard", "grey beard", "silver beard", "white beard", "salt pepper", "graying beard"],
50
- "mustache": ["mustache", "moustache", "facial hair"],
51
- "glasses": ["glasses", "spectacles", "eyeglasses", "wire-frame", "rimmed glasses", "reading glasses"],
52
- "eyes": ["eyes", "gaze", "stare", "looking", "piercing", "intense", "deep eyes"],
53
- "wrinkles": ["wrinkled", "lines", "aged", "weathered", "creased"],
54
- "expression": ["serious", "contemplative", "thoughtful", "stern", "wise", "solemn"]
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  }
56
 
57
- self.religious_cultural = {
58
- "jewish": ["jewish", "orthodox", "hasidic", "rabbi", "religious", "traditional", "ceremonial"],
59
- "hat_types": ["hat", "cap", "yarmulke", "kippah", "black hat", "traditional hat", "religious headwear"],
60
- "clothing": ["suit", "jacket", "formal", "black clothing", "traditional dress", "religious attire"]
61
  }
62
 
63
- self.hair_descriptors = {
64
- "color": ["gray", "grey", "silver", "white", "black", "brown", "blonde", "salt and pepper"],
65
- "texture": ["curly", "wavy", "straight", "thick", "thin", "coarse", "fine"],
66
- "style": ["long", "short", "receding", "balding", "full head"]
 
 
 
 
 
 
67
  }
68
 
69
- self.setting_environments = {
70
- "indoor": ["indoor", "inside", "interior", "room", "office", "home", "building"],
71
- "formal": ["formal setting", "office", "meeting room", "conference", "official"],
72
- "religious": ["synagogue", "temple", "religious", "ceremonial", "sacred"],
73
- "studio": ["studio", "backdrop", "professional", "photography studio"],
74
- "casual": ["casual", "relaxed", "informal", "comfortable"]
 
75
  }
76
 
77
- self.lighting_types = {
78
- "natural": ["natural light", "window light", "daylight", "sunlight"],
79
- "artificial": ["artificial light", "lamp", "electric", "indoor lighting"],
80
- "dramatic": ["dramatic", "contrast", "shadow", "chiaroscuro", "moody"],
81
- "soft": ["soft", "gentle", "diffused", "even", "flattering"],
82
- "harsh": ["harsh", "direct", "strong", "bright", "intense"]
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  }
84
 
85
- self.composition_styles = {
86
- "portrait": ["portrait", "headshot", "face", "facial", "close-up", "bust"],
87
- "seated": ["sitting", "seated", "chair", "sitting down"],
88
- "standing": ["standing", "upright", "vertical"],
89
- "three_quarter": ["three quarter", "three-quarter", "angled", "turned"]
 
 
90
  }
91
 
92
- self.quality_adjectives = {
93
- "age_based": {
94
- "elderly": ["distinguished", "dignified", "venerable", "wise", "weathered", "experienced"],
95
- "middle": ["professional", "mature", "confident", "established"],
96
- "young": ["youthful", "fresh", "vibrant", "energetic"]
97
- },
98
- "cultural": ["traditional", "Orthodox", "religious", "ceremonial", "devout"],
99
- "general": ["elegant", "refined", "sophisticated", "classic", "timeless"]
100
  }
101
-
102
- def extract_maximum_info(self, clip_fast, clip_classic, clip_best):
103
- """Combine all three CLIP analyses for maximum information extraction"""
104
-
105
- # Combine all analyses
106
- combined_text = f"{clip_fast} {clip_classic} {clip_best}".lower()
107
-
108
- analysis = {
109
- "age": None,
110
- "age_confidence": 0,
111
- "gender": None,
112
- "facial_features": [],
113
- "hair_description": [],
114
- "clothing_items": [],
115
- "cultural_religious": [],
116
- "setting": None,
117
- "lighting": None,
118
- "composition": None,
119
- "mood": None,
120
- "technical_suggestions": {}
121
- }
122
-
123
- # DEEP AGE DETECTION
124
- age_scores = {"elderly": 0, "middle": 0, "young": 0}
125
- for age_type, keywords in self.age_keywords.items():
126
- for keyword in keywords:
127
- if keyword in combined_text:
128
- age_scores[age_type] += 1
129
-
130
- if max(age_scores.values()) > 0:
131
- analysis["age"] = max(age_scores, key=age_scores.get)
132
- analysis["age_confidence"] = age_scores[analysis["age"]]
133
-
134
- # GENDER DETECTION
135
- if any(word in combined_text for word in ["man", "male", "gentleman", "guy", "he", "his"]):
136
- analysis["gender"] = "man"
137
- elif any(word in combined_text for word in ["woman", "female", "lady", "she", "her"]):
138
- analysis["gender"] = "woman"
139
-
140
- # COMPREHENSIVE FACIAL FEATURES
141
- if any(word in combined_text for word in self.facial_features["beard_full"]):
142
- if any(word in combined_text for word in self.facial_features["beard_color"]):
143
- analysis["facial_features"].append("silver beard")
144
- else:
145
- analysis["facial_features"].append("full beard")
146
 
147
- if any(word in combined_text for word in self.facial_features["glasses"]):
148
- analysis["facial_features"].append("wire-frame glasses")
149
-
150
- if any(word in combined_text for word in self.facial_features["wrinkles"]):
151
- analysis["facial_features"].append("weathered features")
152
-
153
- # HAIR ANALYSIS
154
- hair_colors = [color for color in self.hair_descriptors["color"] if color in combined_text]
155
- if hair_colors:
156
- analysis["hair_description"].extend(hair_colors)
157
-
158
- # CULTURAL/RELIGIOUS DETECTION
159
- if any(word in combined_text for word in self.religious_cultural["jewish"]):
160
- analysis["cultural_religious"].append("Orthodox Jewish")
161
 
162
- if any(word in combined_text for word in self.religious_cultural["hat_types"]):
163
- analysis["clothing_items"].append("traditional black hat")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164
 
165
- if any(word in combined_text for word in self.religious_cultural["clothing"]):
166
- analysis["clothing_items"].append("formal religious attire")
 
 
 
 
 
 
 
 
167
 
168
- # ENHANCED SETTING DETECTION
169
- setting_scores = {}
170
- for setting_type, keywords in self.setting_environments.items():
171
- score = sum(1 for keyword in keywords if keyword in combined_text)
172
  if score > 0:
173
- setting_scores[setting_type] = score
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
174
 
175
  if setting_scores:
176
- analysis["setting"] = max(setting_scores, key=setting_scores.get)
177
 
178
  # LIGHTING ANALYSIS
179
- lighting_detected = []
180
- for light_type, keywords in self.lighting_types.items():
181
- if any(keyword in combined_text for keyword in keywords):
182
- lighting_detected.append(light_type)
183
-
184
- if lighting_detected:
185
- analysis["lighting"] = lighting_detected[0] # Take first/strongest match
186
-
187
- # COMPOSITION DETECTION
188
- for comp_type, keywords in self.composition_styles.items():
189
- if any(keyword in combined_text for keyword in keywords):
190
- analysis["composition"] = comp_type
 
 
 
191
  break
192
 
193
- # TECHNICAL SUGGESTIONS BASED ON ANALYSIS
194
- if analysis["composition"] == "portrait":
195
- analysis["technical_suggestions"] = {
196
- "lens": "85mm lens",
197
- "aperture": "f/2.8 aperture",
198
- "camera": "Shot on Phase One XF"
199
- }
200
- elif analysis["composition"] == "seated":
201
- analysis["technical_suggestions"] = {
202
- "lens": "85mm lens",
203
- "aperture": "f/4 aperture",
204
- "camera": "Shot on Phase One"
205
- }
206
- else:
207
- analysis["technical_suggestions"] = {
208
- "lens": "50mm lens",
209
- "aperture": "f/2.8 aperture",
210
- "camera": "Shot on Phase One"
211
- }
212
 
213
- return analysis
214
 
215
- def build_maximum_flux_prompt(self, analysis, original_clips):
216
- """Build the most detailed Flux prompt possible"""
 
217
  components = []
218
 
219
- # 1. INTELLIGENT ARTICLE SELECTION
220
- if analysis["cultural_religious"] and analysis["age"]:
221
- # "An elderly Orthodox Jewish man"
222
- article = "An" if analysis["age"] == "elderly" else "A"
223
- elif analysis["gender"]:
224
- article = "A"
 
 
 
 
 
 
225
  else:
226
  article = "A"
227
  components.append(article)
228
 
229
- # 2. CONTEXT-AWARE ADJECTIVES (max 2-3 per Flux rules)
230
  adjectives = []
231
 
232
- if analysis["age"] and analysis["age"] in self.quality_adjectives["age_based"]:
233
- adjectives.extend(self.quality_adjectives["age_based"][analysis["age"]][:2])
 
 
234
 
235
- if analysis["cultural_religious"]:
236
- adjectives.extend(self.quality_adjectives["cultural"][:1])
 
 
237
 
 
238
  if not adjectives:
239
- adjectives = self.quality_adjectives["general"][:2]
240
 
241
- # Limit to 2-3 adjectives as per Flux rules
242
- components.extend(adjectives[:2])
243
 
244
- # 3. ENHANCED SUBJECT DESCRIPTION
245
- subject_parts = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
246
 
247
- if analysis["cultural_religious"]:
248
- subject_parts.extend(analysis["cultural_religious"])
249
 
250
- if analysis["age"] and analysis["age"] != "middle":
251
- subject_parts.append(analysis["age"])
252
 
253
- if analysis["gender"]:
254
- subject_parts.append(analysis["gender"])
255
- else:
256
- subject_parts.append("person")
257
-
258
- main_subject = " ".join(subject_parts)
259
- components.append(main_subject)
260
-
261
- # 4. DETAILED FACIAL FEATURES
262
- if analysis["facial_features"]:
263
- feature_desc = "with " + " and ".join(analysis["facial_features"])
264
- components.append(feature_desc)
265
-
266
- # 5. CLOTHING AND ACCESSORIES
267
- if analysis["clothing_items"]:
268
- clothing_desc = "wearing " + " and ".join(analysis["clothing_items"])
269
- components.append(clothing_desc)
270
-
271
- # 6. ACTION/POSE (based on composition)
272
- action_map = {
273
- "seated": "seated in contemplative pose",
274
- "standing": "standing with dignified presence",
275
- "portrait": "captured in intimate portrait style",
276
- "three_quarter": "positioned in three-quarter view"
277
- }
278
 
279
- if analysis["composition"]:
280
- action = action_map.get(analysis["composition"], "positioned thoughtfully")
281
- else:
282
- action = "positioned with natural composure"
283
- components.append(action)
284
-
285
- # 7. ENHANCED ENVIRONMENTAL CONTEXT
286
- setting_descriptions = {
287
- "indoor": "in a warmly lit indoor environment",
288
- "formal": "in a professional formal setting",
289
- "religious": "in a traditional religious space",
290
- "studio": "in a controlled studio environment",
291
- "casual": "in a comfortable informal setting"
292
- }
293
 
294
- if analysis["setting"]:
295
- context = setting_descriptions.get(analysis["setting"], "in a thoughtfully composed environment")
296
- else:
297
- context = "within a carefully arranged scene"
298
- components.append(context)
299
-
300
- # 8. SOPHISTICATED LIGHTING DESCRIPTION
301
- lighting_descriptions = {
302
- "natural": "bathed in gentle natural lighting that enhances facial texture and depth",
303
- "dramatic": "illuminated by dramatic lighting that creates compelling shadows and highlights",
304
- "soft": "softly lit to emphasize character and warmth",
305
- "artificial": "under controlled artificial lighting for optimal detail capture"
306
- }
307
 
308
- if analysis["lighting"]:
309
- lighting_desc = lighting_descriptions.get(analysis["lighting"], "with professional lighting that emphasizes facial features and texture")
310
- else:
311
- lighting_desc = "captured with sophisticated portrait lighting that brings out intricate facial details"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
312
 
313
  components.append(lighting_desc)
314
 
315
- # 9. TECHNICAL SPECIFICATIONS
316
- tech_parts = []
317
- if analysis["technical_suggestions"]:
318
- tech_parts.append(analysis["technical_suggestions"]["camera"])
319
- tech_parts.append(analysis["technical_suggestions"]["lens"])
320
- tech_parts.append(analysis["technical_suggestions"]["aperture"])
321
  else:
322
- tech_parts = ["Shot on Phase One", "85mm lens", "f/2.8 aperture"]
 
 
323
 
324
- components.append(", ".join(tech_parts))
 
325
 
326
- # 10. QUALITY MARKER
327
- components.append("professional portrait photography")
 
 
328
 
329
- # FINAL ASSEMBLY AND OPTIMIZATION
 
 
330
  prompt = ", ".join(components)
331
 
332
- # Clean up the prompt
333
- prompt = re.sub(r'\s+', ' ', prompt) # Remove extra spaces
334
- prompt = re.sub(r',\s*,', ',', prompt) # Remove double commas
335
- prompt = prompt.replace(" ,", ",") # Fix spacing around commas
 
336
 
337
- # Ensure proper capitalization
338
- prompt = prompt[0].upper() + prompt[1:] if prompt else ""
339
 
340
  return prompt
341
 
342
- def calculate_maximum_score(self, prompt, analysis):
343
- """Calculate intelligence score based on depth of analysis"""
 
344
  score = 0
345
- max_possible = 100
346
 
347
- # Structure compliance (10 points)
 
348
  if prompt.startswith(("A", "An")):
349
- score += 10
350
-
351
- # Feature detection depth (20 points)
352
- feature_score = len(analysis["facial_features"]) * 5
353
- score += min(feature_score, 20)
354
-
355
- # Cultural/contextual awareness (20 points)
356
- if analysis["cultural_religious"]:
357
- score += 15
358
- if analysis["age"]:
359
- score += 5
360
-
361
- # Technical appropriateness (15 points)
362
- if "85mm" in prompt and analysis["composition"] in ["portrait", "seated"]:
363
- score += 15
364
- elif "50mm" in prompt:
365
- score += 10
366
-
367
- # Lighting sophistication (15 points)
368
- if "lighting" in prompt and len(prompt.split("lighting")[1].split(",")[0]) > 10:
369
- score += 15
370
-
371
- # Setting context (10 points)
372
- if analysis["setting"]:
373
- score += 10
374
-
375
- # Forbidden elements check (10 points)
376
- if not any(forbidden in prompt for forbidden in self.forbidden_elements):
377
- score += 10
378
-
379
- return min(score, max_possible)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
380
 
381
- class MaximumFluxOptimizer:
382
  def __init__(self):
383
  self.interrogator = None
384
- self.analyzer = MaximumFluxAnalyzer()
385
  self.usage_count = 0
386
  self.device = DEVICE
387
  self.is_initialized = False
@@ -432,40 +555,40 @@ class MaximumFluxOptimizer:
432
  return image
433
 
434
  @spaces.GPU
435
- def generate_maximum_prompt(self, image):
436
  try:
437
  if not self.is_initialized:
438
  if not self.initialize_model():
439
- return "❌ Model initialization failed.", "Please refresh and try again.", 0
440
 
441
  if image is None:
442
- return "❌ Please upload an image.", "No image provided.", 0
443
 
444
  self.usage_count += 1
445
 
446
  image = self.optimize_image(image)
447
  if image is None:
448
- return "❌ Image processing failed.", "Invalid image format.", 0
449
 
450
  start_time = datetime.now()
451
 
452
- # TRIPLE CLIP ANALYSIS FOR MAXIMUM INFORMATION
453
- logger.info("Starting MAXIMUM analysis - Triple CLIP interrogation")
454
 
455
  clip_fast = self.interrogator.interrogate_fast(image)
456
  clip_classic = self.interrogator.interrogate_classic(image)
457
  clip_best = self.interrogator.interrogate(image)
458
 
459
- logger.info(f"CLIP Results:\nFast: {clip_fast}\nClassic: {clip_classic}\nBest: {clip_best}")
460
 
461
- # MAXIMUM DEPTH ANALYSIS
462
- deep_analysis = self.analyzer.extract_maximum_info(clip_fast, clip_classic, clip_best)
463
 
464
- # BUILD MAXIMUM QUALITY FLUX PROMPT
465
- optimized_prompt = self.analyzer.build_maximum_flux_prompt(deep_analysis, [clip_fast, clip_classic, clip_best])
466
 
467
- # CALCULATE INTELLIGENCE SCORE
468
- score = self.analyzer.calculate_maximum_score(optimized_prompt, deep_analysis)
469
 
470
  end_time = datetime.now()
471
  duration = (end_time - start_time).total_seconds()
@@ -476,52 +599,54 @@ class MaximumFluxOptimizer:
476
  else:
477
  torch.cuda.empty_cache()
478
 
479
- # COMPREHENSIVE ANALYSIS REPORT
480
  gpu_status = "⚑ ZeroGPU" if torch.cuda.is_available() else "πŸ’» CPU"
481
 
482
  # Format detected elements
483
- features = ", ".join(deep_analysis["facial_features"]) if deep_analysis["facial_features"] else "None detected"
484
- cultural = ", ".join(deep_analysis["cultural_religious"]) if deep_analysis["cultural_religious"] else "None detected"
485
- clothing = ", ".join(deep_analysis["clothing_items"]) if deep_analysis["clothing_items"] else "None detected"
486
 
487
- analysis_info = f"""**MAXIMUM ANALYSIS COMPLETE**
488
 
489
- **Processing:** {gpu_status} β€’ {duration:.1f}s β€’ Triple CLIP interrogation
490
- **Intelligence Score:** {score}/100
491
- **Analysis Confidence:** {deep_analysis.get("age_confidence", 0)} age indicators detected
492
  **Generation:** #{self.usage_count}
493
 
494
- **DEEP DETECTION RESULTS:**
495
- β€’ **Age Category:** {deep_analysis.get("age", "Unspecified").title()}
496
  β€’ **Cultural Context:** {cultural}
497
  β€’ **Facial Features:** {features}
498
- β€’ **Clothing/Accessories:** {clothing}
499
- β€’ **Setting:** {deep_analysis.get("setting", "Standard").title()}
500
- β€’ **Composition:** {deep_analysis.get("composition", "Standard").title()}
501
- β€’ **Lighting:** {deep_analysis.get("lighting", "Standard").title()}
502
 
503
- **CLIP ANALYSIS SOURCES:**
504
- β€’ **Fast:** {clip_fast[:60]}...
505
- β€’ **Classic:** {clip_classic[:60]}...
506
- β€’ **Best:** {clip_best[:60]}...
507
 
508
- **FLUX OPTIMIZATION:** Applied maximum depth analysis with Pariente AI research rules"""
509
 
510
- return optimized_prompt, analysis_info, score
511
 
512
  except Exception as e:
513
- logger.error(f"Maximum generation error: {e}")
514
- return f"❌ Error: {str(e)}", "Please try with a different image.", 0
515
 
516
- optimizer = MaximumFluxOptimizer()
517
 
518
- def process_maximum_analysis(image):
519
- """Maximum analysis wrapper"""
520
  try:
521
- prompt, info, score = optimizer.generate_maximum_prompt(image)
522
 
523
- # Enhanced score display
524
- if score >= 90:
 
 
 
525
  color = "#10b981"
526
  grade = "EXCELLENT"
527
  elif score >= 80:
@@ -538,31 +663,31 @@ def process_maximum_analysis(image):
538
  grade = "NEEDS WORK"
539
 
540
  score_html = f'''
541
- <div style="text-align: center; padding: 1.5rem; background: linear-gradient(135deg, #f0fdf4 0%, #dcfce7 100%); border: 2px solid {color}; border-radius: 12px; margin: 1rem 0; box-shadow: 0 4px 6px -1px rgba(0, 0, 0, 0.1);">
542
- <div style="font-size: 2.5rem; font-weight: 700; color: {color}; margin: 0;">{score}</div>
543
- <div style="font-size: 1rem; color: #15803d; margin: 0; text-transform: uppercase; letter-spacing: 0.05em; font-weight: 600;">{grade}</div>
544
- <div style="font-size: 0.875rem; color: #15803d; margin: 0; text-transform: uppercase; letter-spacing: 0.05em;">Maximum Intelligence Score</div>
545
  </div>
546
  '''
547
 
548
  return prompt, info, score_html
549
 
550
  except Exception as e:
551
- logger.error(f"Maximum wrapper error: {e}")
552
  return "❌ Processing failed", f"Error: {str(e)}", '<div style="text-align: center; color: red;">Error</div>'
553
 
554
  def clear_outputs():
555
  gc.collect()
556
  if torch.cuda.is_available():
557
  torch.cuda.empty_cache()
558
- return "", "", '<div style="text-align: center; padding: 1rem;"><div style="font-size: 2rem; color: #ccc;">--</div><div style="font-size: 0.875rem; color: #999;">Maximum Intelligence Score</div></div>'
559
 
560
  def create_interface():
561
  css = """
562
- @import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700;800&display=swap');
563
 
564
  .gradio-container {
565
- max-width: 1400px !important;
566
  margin: 0 auto !important;
567
  font-family: 'Inter', -apple-system, BlinkMacSystemFont, sans-serif !important;
568
  background: linear-gradient(135deg, #f8fafc 0%, #f1f5f9 100%) !important;
@@ -570,140 +695,142 @@ def create_interface():
570
 
571
  .main-header {
572
  text-align: center;
573
- padding: 2rem 0 3rem 0;
574
- background: linear-gradient(135deg, #0f172a 0%, #1e293b 50%, #334155 100%);
575
  color: white;
576
- margin: -2rem -2rem 2rem -2rem;
577
- border-radius: 0 0 24px 24px;
578
- box-shadow: 0 10px 25px -5px rgba(0, 0, 0, 0.1);
 
 
 
 
 
 
 
 
 
 
 
 
 
579
  }
580
 
581
  .main-title {
582
- font-size: 3rem !important;
583
- font-weight: 800 !important;
584
- margin: 0 0 0.5rem 0 !important;
585
- letter-spacing: -0.025em !important;
586
- background: linear-gradient(135deg, #60a5fa 0%, #3b82f6 50%, #2563eb 100%);
587
  -webkit-background-clip: text;
588
  -webkit-text-fill-color: transparent;
589
  background-clip: text;
 
 
590
  }
591
 
592
  .subtitle {
593
- font-size: 1.25rem !important;
594
- font-weight: 400 !important;
595
- opacity: 0.9 !important;
596
  margin: 0 !important;
 
 
597
  }
598
 
599
  .prompt-output {
600
  font-family: 'SF Mono', 'Monaco', 'Inconsolata', 'Roboto Mono', monospace !important;
601
- font-size: 14px !important;
602
- line-height: 1.7 !important;
603
  background: linear-gradient(135deg, #ffffff 0%, #f8fafc 100%) !important;
604
- border: 1px solid #e2e8f0 !important;
605
- border-radius: 16px !important;
606
- padding: 2rem !important;
607
- box-shadow: 0 8px 25px -5px rgba(0, 0, 0, 0.1) !important;
 
 
 
 
 
 
608
  }
609
  """
610
 
611
  with gr.Blocks(
612
  theme=gr.themes.Soft(),
613
- title="Maximum Flux Prompt Optimizer",
614
  css=css
615
  ) as interface:
616
 
617
  gr.HTML("""
618
  <div class="main-header">
619
- <div class="main-title">🧠 Maximum Flux Optimizer</div>
620
- <div class="subtitle">Triple CLIP Analysis β€’ Maximum Intelligence β€’ Zero Configuration</div>
621
  </div>
622
  """)
623
 
624
  with gr.Row():
625
  with gr.Column(scale=1):
626
- gr.Markdown("## πŸ”¬ Maximum Analysis")
627
 
628
  image_input = gr.Image(
629
- label="Upload your image for maximum analysis",
630
  type="pil",
631
- height=450
632
  )
633
 
634
  analyze_btn = gr.Button(
635
- "πŸš€ MAXIMUM ANALYSIS",
636
  variant="primary",
637
  size="lg"
638
  )
639
 
640
  gr.Markdown("""
641
- ### Maximum Intelligence Engine
642
 
643
- **Triple CLIP Interrogation:**
644
- β€’ Fast analysis for broad context
645
- β€’ Classic analysis for detailed features
646
- β€’ Best analysis for maximum depth
647
 
648
- **Deep Feature Extraction:**
649
- β€’ Age, gender, cultural context
650
- β€’ Facial features, expressions, accessories
651
- β€’ Clothing, religious/cultural indicators
652
- β€’ Environmental setting and lighting
653
- β€’ Composition and technical optimization
 
 
654
 
655
- **No configuration needed** - Maximum intelligence applied automatically.
656
  """)
657
 
658
  with gr.Column(scale=1):
659
- gr.Markdown("## ⚑ Maximum Result")
660
 
661
  prompt_output = gr.Textbox(
662
- label="Maximum Optimized Flux Prompt",
663
- placeholder="Upload an image to see the maximum intelligence analysis...",
664
- lines=10,
665
- max_lines=15,
666
  elem_classes=["prompt-output"],
667
  show_copy_button=True
668
  )
669
 
670
  score_output = gr.HTML(
671
- value='<div style="text-align: center; padding: 1rem;"><div style="font-size: 2rem; color: #ccc;">--</div><div style="font-size: 0.875rem; color: #999;">Maximum Intelligence Score</div></div>'
672
  )
673
 
674
  info_output = gr.Markdown(value="")
675
 
676
- clear_btn = gr.Button("πŸ—‘οΈ Clear Analysis", size="sm")
677
 
678
  gr.Markdown("""
679
  ---
680
- ### πŸ”¬ Maximum Research Foundation
681
 
682
- This system represents the absolute maximum in image analysis and Flux prompt optimization. Using triple CLIP interrogation
683
- and deep feature extraction, it identifies every possible detail and applies research-validated Flux rules with maximum intelligence.
 
684
 
685
- **Pariente AI Research Laboratory** β€’ Maximum Intelligence β€’ Research-Driven β€’ Zero Compromise
686
- """)
687
-
688
- # Maximum event handlers
689
- analyze_btn.click(
690
- fn=process_maximum_analysis,
691
- inputs=[image_input],
692
- outputs=[prompt_output, info_output, score_output]
693
- )
694
-
695
- clear_btn.click(
696
- fn=clear_outputs,
697
- outputs=[prompt_output, info_output, score_output]
698
- )
699
-
700
- return interface
701
-
702
- if __name__ == "__main__":
703
- logger.info("πŸš€ Starting MAXIMUM Flux Prompt Optimizer")
704
- interface = create_interface()
705
- interface.launch(
706
- server_name="0.0.0.0",
707
- server_port=7860,
708
- show_error=True
709
- )
 
10
  from datetime import datetime
11
  import gc
12
  import re
13
+ import math
14
 
15
  warnings.filterwarnings("ignore", category=FutureWarning)
16
  warnings.filterwarnings("ignore", category=UserWarning)
 
29
 
30
  DEVICE = get_device()
31
 
32
+ class UltraSupremeAnalyzer:
33
  """
34
+ ULTRA SUPREME ANALYSIS ENGINE - ABSOLUTE MAXIMUM INTELLIGENCE
35
  """
36
 
37
  def __init__(self):
38
  self.forbidden_elements = ["++", "weights", "white background [en dev]"]
39
 
40
+ # ULTRA COMPREHENSIVE VOCABULARIES - MAXIMUM DEPTH
41
 
42
+ self.micro_age_indicators = {
43
+ "infant": ["baby", "infant", "newborn", "toddler"],
44
+ "child": ["child", "kid", "young", "little", "small", "youth"],
45
+ "teen": ["teenager", "teen", "adolescent", "young adult", "student"],
46
+ "young_adult": ["young adult", "twenties", "thirty", "youthful", "fresh"],
47
+ "middle_aged": ["middle-aged", "forties", "fifties", "mature", "experienced"],
48
+ "senior": ["senior", "older", "elderly", "aged", "vintage", "seasoned"],
49
+ "elderly": ["elderly", "old", "ancient", "weathered", "aged", "gray", "grey", "white hair", "silver", "wrinkled", "lined", "creased", "time-worn", "distinguished by age"]
50
  }
51
 
52
+ self.ultra_facial_analysis = {
53
+ "eye_features": {
54
+ "shape": ["round eyes", "almond eyes", "narrow eyes", "wide eyes", "deep-set eyes", "prominent eyes"],
55
+ "expression": ["intense gaze", "piercing stare", "gentle eyes", "wise eyes", "tired eyes", "alert eyes", "contemplative stare", "focused gaze", "distant look"],
56
+ "color": ["brown eyes", "blue eyes", "green eyes", "hazel eyes", "dark eyes", "light eyes"],
57
+ "condition": ["clear eyes", "bloodshot", "bright eyes", "dull eyes", "sparkling eyes"]
58
+ },
59
+ "eyebrow_analysis": ["thick eyebrows", "thin eyebrows", "bushy eyebrows", "arched eyebrows", "straight eyebrows", "gray eyebrows"],
60
+ "nose_features": ["prominent nose", "straight nose", "aquiline nose", "small nose", "wide nose", "narrow nose"],
61
+ "mouth_expression": {
62
+ "shape": ["thin lips", "full lips", "small mouth", "wide mouth"],
63
+ "expression": ["slight smile", "serious expression", "frown", "neutral expression", "contemplative look", "stern look", "gentle expression"]
64
+ },
65
+ "facial_hair_ultra": {
66
+ "beard_types": ["full beard", "goatee", "mustache", "stubble", "clean-shaven", "five o'clock shadow"],
67
+ "beard_texture": ["thick beard", "thin beard", "coarse beard", "fine beard", "well-groomed beard", "unkempt beard"],
68
+ "beard_color": ["black beard", "brown beard", "gray beard", "grey beard", "silver beard", "white beard", "salt-and-pepper beard", "graying beard"],
69
+ "beard_length": ["long beard", "short beard", "trimmed beard", "full-length beard"]
70
+ },
71
+ "skin_analysis": ["smooth skin", "weathered skin", "wrinkled skin", "clear skin", "rough skin", "aged skin", "youthful skin", "tanned skin", "pale skin", "olive skin"],
72
+ "facial_structure": ["angular face", "round face", "oval face", "square jaw", "defined cheekbones", "high cheekbones", "strong jawline", "soft features", "sharp features"]
73
  }
74
 
75
+ self.emotion_micro_expressions = {
76
+ "primary_emotions": ["happy", "sad", "angry", "fearful", "surprised", "disgusted", "contemptuous"],
77
+ "complex_emotions": ["contemplative", "melancholic", "serene", "intense", "peaceful", "troubled", "confident", "uncertain", "wise", "stern", "gentle", "authoritative"],
78
+ "emotional_indicators": ["furrowed brow", "raised eyebrows", "squinted eyes", "pursed lips", "relaxed expression", "tense jaw", "soft eyes", "hard stare"]
79
  }
80
 
81
+ self.cultural_religious_ultra = {
82
+ "jewish_orthodox": ["Orthodox Jewish", "Hasidic", "Ultra-Orthodox", "religious Jewish", "traditional Jewish", "devout Jewish"],
83
+ "christian": ["Christian", "Catholic", "Protestant", "Orthodox Christian", "religious Christian"],
84
+ "muslim": ["Muslim", "Islamic", "religious Muslim", "devout Muslim"],
85
+ "buddhist": ["Buddhist", "monk", "religious Buddhist"],
86
+ "general_religious": ["religious", "devout", "pious", "spiritual", "faithful", "observant"],
87
+ "traditional_clothing": {
88
+ "jewish": ["yarmulke", "kippah", "tallit", "tzitzit", "black hat", "Orthodox hat", "religious hat", "traditional Jewish hat"],
89
+ "general": ["religious garment", "traditional clothing", "ceremonial dress", "formal religious attire"]
90
+ }
91
  }
92
 
93
+ self.clothing_accessories_ultra = {
94
+ "headwear": ["hat", "cap", "beret", "headband", "turban", "hood", "helmet", "crown", "headpiece"],
95
+ "eyewear": ["glasses", "spectacles", "sunglasses", "reading glasses", "wire-frame glasses", "thick-rimmed glasses", "designer glasses", "vintage glasses"],
96
+ "clothing_types": ["suit", "jacket", "shirt", "dress", "robe", "uniform", "casual wear", "formal wear", "business attire"],
97
+ "clothing_colors": ["black", "white", "gray", "blue", "red", "green", "brown", "navy", "dark", "light"],
98
+ "clothing_styles": ["formal", "casual", "business", "traditional", "modern", "vintage", "classic", "contemporary"],
99
+ "accessories": ["jewelry", "watch", "necklace", "ring", "bracelet", "earrings", "pin", "brooch"]
100
  }
101
 
102
+ self.environmental_ultra_analysis = {
103
+ "indoor_settings": {
104
+ "residential": ["home", "house", "apartment", "living room", "bedroom", "kitchen", "dining room"],
105
+ "office": ["office", "workplace", "conference room", "meeting room", "boardroom", "desk"],
106
+ "institutional": ["school", "hospital", "government building", "court", "library"],
107
+ "religious": ["church", "synagogue", "mosque", "temple", "chapel", "sanctuary"],
108
+ "commercial": ["store", "restaurant", "hotel", "mall", "shop"]
109
+ },
110
+ "outdoor_settings": {
111
+ "natural": ["park", "garden", "forest", "beach", "mountain", "countryside", "field"],
112
+ "urban": ["street", "city", "downtown", "plaza", "square", "avenue"],
113
+ "architectural": ["building", "monument", "bridge", "structure"]
114
+ },
115
+ "lighting_ultra": {
116
+ "natural_light": ["sunlight", "daylight", "morning light", "afternoon light", "evening light", "golden hour", "blue hour", "overcast light", "window light"],
117
+ "artificial_light": ["indoor lighting", "electric light", "lamp light", "overhead lighting", "side lighting", "fluorescent", "LED lighting"],
118
+ "dramatic_lighting": ["high contrast", "low key", "high key", "chiaroscuro", "dramatic shadows", "rim lighting", "backlighting", "spotlight"],
119
+ "quality": ["soft lighting", "hard lighting", "diffused light", "direct light", "ambient light", "mood lighting"]
120
+ }
121
  }
122
 
123
+ self.pose_body_language_ultra = {
124
+ "head_position": ["head up", "head down", "head tilted", "head straight", "head turned", "profile view", "three-quarter view"],
125
+ "posture": ["upright posture", "slouched", "relaxed posture", "formal posture", "casual stance", "dignified bearing"],
126
+ "hand_positions": ["hands clasped", "hands folded", "hands visible", "hands hidden", "gesturing", "pointing"],
127
+ "sitting_positions": ["sitting upright", "leaning forward", "leaning back", "sitting casually", "formal sitting"],
128
+ "eye_contact": ["looking at camera", "looking away", "direct gaze", "averted gaze", "looking down", "looking up"],
129
+ "overall_demeanor": ["confident", "reserved", "approachable", "authoritative", "gentle", "stern", "relaxed", "tense"]
130
  }
131
 
132
+ self.composition_photography_ultra = {
133
+ "shot_types": ["close-up", "medium shot", "wide shot", "extreme close-up", "portrait shot", "headshot", "bust shot", "full body"],
134
+ "angles": ["eye level", "high angle", "low angle", "bird's eye", "worm's eye", "Dutch angle"],
135
+ "framing": ["centered", "off-center", "rule of thirds", "tight framing", "loose framing"],
136
+ "depth_of_field": ["shallow depth", "deep focus", "bokeh", "sharp focus", "soft focus"],
137
+ "camera_movement": ["static", "handheld", "stabilized", "smooth"]
 
 
138
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139
 
140
+ self.technical_photography_ultra = {
141
+ "camera_systems": {
142
+ "professional": ["Phase One XF", "Phase One XT", "Hasselblad X2D", "Fujifilm GFX", "Canon EOS R5", "Nikon Z9"],
143
+ "medium_format": ["Phase One", "Hasselblad", "Fujifilm GFX", "Pentax 645"],
144
+ "full_frame": ["Canon EOS R", "Nikon Z", "Sony A7", "Leica SL"]
145
+ },
146
+ "lenses_ultra": {
147
+ "portrait": ["85mm f/1.4", "135mm f/2", "105mm f/1.4", "200mm f/2.8"],
148
+ "standard": ["50mm f/1.4", "35mm f/1.4", "24-70mm f/2.8"],
149
+ "wide": ["24mm f/1.4", "16-35mm f/2.8", "14mm f/2.8"]
150
+ },
151
+ "aperture_settings": ["f/1.4", "f/2", "f/2.8", "f/4", "f/5.6", "f/8"],
152
+ "photography_styles": ["portrait photography", "documentary photography", "fine art photography", "commercial photography", "editorial photography"]
153
+ }
154
 
155
+ self.quality_descriptors_ultra = {
156
+ "based_on_age": {
157
+ "elderly": ["distinguished", "venerable", "dignified", "wise", "experienced", "seasoned", "time-honored", "revered", "weathered", "sage-like"],
158
+ "middle_aged": ["professional", "accomplished", "established", "confident", "mature", "refined", "sophisticated"],
159
+ "young_adult": ["vibrant", "energetic", "fresh", "youthful", "dynamic", "spirited", "lively"]
160
+ },
161
+ "based_on_emotion": {
162
+ "contemplative": ["thoughtful", "reflective", "meditative", "introspective"],
163
+ "confident": ["assured", "self-possessed", "commanding", "authoritative"],
164
+ "gentle": ["kind", "warm", "compassionate", "tender"],
165
+ "stern": ["serious", "grave", "solemn", "austere"]
166
+ },
167
+ "based_on_setting": {
168
+ "formal": ["professional", "official", "ceremonial", "dignified"],
169
+ "casual": ["relaxed", "informal", "comfortable", "natural"],
170
+ "artistic": ["creative", "expressive", "aesthetic", "artistic"]
171
+ }
172
+ }
173
+ def ultra_supreme_analysis(self, clip_fast, clip_classic, clip_best):
174
+ """ULTRA SUPREME ANALYSIS - MAXIMUM POSSIBLE INTELLIGENCE"""
175
+
176
+ combined_analysis = {
177
+ "fast": clip_fast.lower(),
178
+ "classic": clip_classic.lower(),
179
+ "best": clip_best.lower(),
180
+ "combined": f"{clip_fast} {clip_classic} {clip_best}".lower()
181
+ }
182
 
183
+ ultra_result = {
184
+ "demographic": {"age_category": None, "age_confidence": 0, "gender": None, "cultural_religious": []},
185
+ "facial_ultra": {"eyes": [], "eyebrows": [], "nose": [], "mouth": [], "facial_hair": [], "skin": [], "structure": []},
186
+ "emotional_state": {"primary_emotion": None, "emotion_confidence": 0, "micro_expressions": [], "overall_demeanor": []},
187
+ "clothing_accessories": {"headwear": [], "eyewear": [], "clothing": [], "accessories": []},
188
+ "environmental": {"setting_type": None, "specific_location": None, "lighting_analysis": [], "atmosphere": []},
189
+ "pose_composition": {"body_language": [], "head_position": [], "eye_contact": [], "posture": []},
190
+ "technical_analysis": {"shot_type": None, "angle": None, "lighting_setup": None, "suggested_equipment": {}},
191
+ "intelligence_metrics": {"total_features_detected": 0, "analysis_depth_score": 0, "cultural_awareness_score": 0, "technical_optimization_score": 0}
192
+ }
193
 
194
+ # ULTRA DEEP AGE ANALYSIS
195
+ age_scores = {}
196
+ for age_category, indicators in self.micro_age_indicators.items():
197
+ score = sum(1 for indicator in indicators if indicator in combined_analysis["combined"])
198
  if score > 0:
199
+ age_scores[age_category] = score
200
+
201
+ if age_scores:
202
+ ultra_result["demographic"]["age_category"] = max(age_scores, key=age_scores.get)
203
+ ultra_result["demographic"]["age_confidence"] = age_scores[ultra_result["demographic"]["age_category"]]
204
+
205
+ # GENDER DETECTION WITH CONFIDENCE
206
+ male_indicators = ["man", "male", "gentleman", "guy", "he", "his", "masculine"]
207
+ female_indicators = ["woman", "female", "lady", "she", "her", "feminine"]
208
+
209
+ male_score = sum(1 for indicator in male_indicators if indicator in combined_analysis["combined"])
210
+ female_score = sum(1 for indicator in female_indicators if indicator in combined_analysis["combined"])
211
+
212
+ if male_score > female_score:
213
+ ultra_result["demographic"]["gender"] = "man"
214
+ elif female_score > male_score:
215
+ ultra_result["demographic"]["gender"] = "woman"
216
+
217
+ # ULTRA CULTURAL/RELIGIOUS ANALYSIS
218
+ for culture_type, indicators in self.cultural_religious_ultra.items():
219
+ if isinstance(indicators, list):
220
+ for indicator in indicators:
221
+ if indicator.lower() in combined_analysis["combined"]:
222
+ ultra_result["demographic"]["cultural_religious"].append(indicator)
223
+
224
+ # COMPREHENSIVE FACIAL FEATURE ANALYSIS
225
+ for hair_category, features in self.ultra_facial_analysis["facial_hair_ultra"].items():
226
+ for feature in features:
227
+ if feature in combined_analysis["combined"]:
228
+ ultra_result["facial_ultra"]["facial_hair"].append(feature)
229
+
230
+ # Eyes analysis
231
+ for eye_category, features in self.ultra_facial_analysis["eye_features"].items():
232
+ for feature in features:
233
+ if feature in combined_analysis["combined"]:
234
+ ultra_result["facial_ultra"]["eyes"].append(feature)
235
+
236
+ # EMOTION AND MICRO-EXPRESSION ANALYSIS
237
+ emotion_scores = {}
238
+ for emotion in self.emotion_micro_expressions["complex_emotions"]:
239
+ if emotion in combined_analysis["combined"]:
240
+ emotion_scores[emotion] = combined_analysis["combined"].count(emotion)
241
+
242
+ if emotion_scores:
243
+ ultra_result["emotional_state"]["primary_emotion"] = max(emotion_scores, key=emotion_scores.get)
244
+ ultra_result["emotional_state"]["emotion_confidence"] = emotion_scores[ultra_result["emotional_state"]["primary_emotion"]]
245
+
246
+ # CLOTHING AND ACCESSORIES ANALYSIS
247
+ for category, items in self.clothing_accessories_ultra.items():
248
+ if isinstance(items, list):
249
+ for item in items:
250
+ if item in combined_analysis["combined"]:
251
+ ultra_result["clothing_accessories"][category].append(item)
252
+
253
+ # ENVIRONMENTAL ULTRA ANALYSIS
254
+ setting_scores = {}
255
+ for main_setting, sub_settings in self.environmental_ultra_analysis.items():
256
+ if isinstance(sub_settings, dict):
257
+ for sub_type, locations in sub_settings.items():
258
+ score = sum(1 for location in locations if location in combined_analysis["combined"])
259
+ if score > 0:
260
+ setting_scores[sub_type] = score
261
 
262
  if setting_scores:
263
+ ultra_result["environmental"]["setting_type"] = max(setting_scores, key=setting_scores.get)
264
 
265
  # LIGHTING ANALYSIS
266
+ for light_category, light_types in self.environmental_ultra_analysis["lighting_ultra"].items():
267
+ for light_type in light_types:
268
+ if light_type in combined_analysis["combined"]:
269
+ ultra_result["environmental"]["lighting_analysis"].append(light_type)
270
+
271
+ # POSE AND BODY LANGUAGE ANALYSIS
272
+ for pose_category, indicators in self.pose_body_language_ultra.items():
273
+ for indicator in indicators:
274
+ if indicator in combined_analysis["combined"]:
275
+ ultra_result["pose_composition"][pose_category].append(indicator)
276
+
277
+ # TECHNICAL PHOTOGRAPHY ANALYSIS
278
+ for shot_type in self.composition_photography_ultra["shot_types"]:
279
+ if shot_type in combined_analysis["combined"]:
280
+ ultra_result["technical_analysis"]["shot_type"] = shot_type
281
  break
282
 
283
+ # CALCULATE INTELLIGENCE METRICS
284
+ total_features = sum(len(v) if isinstance(v, list) else (1 if v else 0) for category in ultra_result.values() if isinstance(category, dict) for v in category.values())
285
+ ultra_result["intelligence_metrics"]["total_features_detected"] = total_features
286
+ ultra_result["intelligence_metrics"]["analysis_depth_score"] = min(total_features * 5, 100)
287
+ ultra_result["intelligence_metrics"]["cultural_awareness_score"] = len(ultra_result["demographic"]["cultural_religious"]) * 20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
288
 
289
+ return ultra_result
290
 
291
+ def build_ultra_supreme_prompt(self, ultra_analysis, clip_results):
292
+ """BUILD ULTRA SUPREME FLUX PROMPT - ABSOLUTE MAXIMUM QUALITY"""
293
+
294
  components = []
295
 
296
+ # 1. ULTRA INTELLIGENT ARTICLE SELECTION
297
+ subject_desc = []
298
+ if ultra_analysis["demographic"]["cultural_religious"]:
299
+ subject_desc.extend(ultra_analysis["demographic"]["cultural_religious"][:1])
300
+ if ultra_analysis["demographic"]["age_category"] and ultra_analysis["demographic"]["age_category"] != "middle_aged":
301
+ subject_desc.append(ultra_analysis["demographic"]["age_category"].replace("_", " "))
302
+ if ultra_analysis["demographic"]["gender"]:
303
+ subject_desc.append(ultra_analysis["demographic"]["gender"])
304
+
305
+ if subject_desc:
306
+ full_subject = " ".join(subject_desc)
307
+ article = "An" if full_subject[0].lower() in 'aeiou' else "A"
308
  else:
309
  article = "A"
310
  components.append(article)
311
 
312
+ # 2. ULTRA CONTEXTUAL ADJECTIVES (max 2-3 per Flux rules)
313
  adjectives = []
314
 
315
+ # Age-based adjectives
316
+ age_cat = ultra_analysis["demographic"]["age_category"]
317
+ if age_cat and age_cat in self.quality_descriptors_ultra["based_on_age"]:
318
+ adjectives.extend(self.quality_descriptors_ultra["based_on_age"][age_cat][:2])
319
 
320
+ # Emotion-based adjectives
321
+ emotion = ultra_analysis["emotional_state"]["primary_emotion"]
322
+ if emotion and emotion in self.quality_descriptors_ultra["based_on_emotion"]:
323
+ adjectives.extend(self.quality_descriptors_ultra["based_on_emotion"][emotion][:1])
324
 
325
+ # Default if none found
326
  if not adjectives:
327
+ adjectives = ["distinguished", "professional"]
328
 
329
+ components.extend(adjectives[:2]) # Flux rule: max 2-3 adjectives
 
330
 
331
+ # 3. ULTRA ENHANCED SUBJECT
332
+ if subject_desc:
333
+ components.append(" ".join(subject_desc))
334
+ else:
335
+ components.append("person")
336
+
337
+ # 4. ULTRA DETAILED FACIAL FEATURES
338
+ facial_details = []
339
+
340
+ # Eyes
341
+ if ultra_analysis["facial_ultra"]["eyes"]:
342
+ eye_desc = ultra_analysis["facial_ultra"]["eyes"][0]
343
+ facial_details.append(f"with {eye_desc}")
344
+
345
+ # Facial hair with ultra detail
346
+ if ultra_analysis["facial_ultra"]["facial_hair"]:
347
+ beard_details = ultra_analysis["facial_ultra"]["facial_hair"]
348
+ if any("silver" in detail or "gray" in detail or "grey" in detail for detail in beard_details):
349
+ facial_details.append("with a distinguished silver beard")
350
+ elif any("beard" in detail for detail in beard_details):
351
+ facial_details.append("with a full well-groomed beard")
352
+
353
+ if facial_details:
354
+ components.extend(facial_details)
355
+
356
+ # 5. CLOTHING AND ACCESSORIES ULTRA
357
+ clothing_details = []
358
+
359
+ # Eyewear
360
+ if ultra_analysis["clothing_accessories"]["eyewear"]:
361
+ eyewear = ultra_analysis["clothing_accessories"]["eyewear"][0]
362
+ clothing_details.append(f"wearing {eyewear}")
363
+
364
+ # Headwear
365
+ if ultra_analysis["clothing_accessories"]["headwear"]:
366
+ headwear = ultra_analysis["clothing_accessories"]["headwear"][0]
367
+ if ultra_analysis["demographic"]["cultural_religious"]:
368
+ clothing_details.append("wearing a traditional black hat")
369
+ else:
370
+ clothing_details.append(f"wearing a {headwear}")
371
 
372
+ if clothing_details:
373
+ components.extend(clothing_details)
374
 
375
+ # 6. ULTRA POSE AND BODY LANGUAGE
376
+ pose_description = "positioned with natural dignity"
377
 
378
+ if ultra_analysis["pose_composition"]["posture"]:
379
+ posture = ultra_analysis["pose_composition"]["posture"][0]
380
+ pose_description = f"maintaining {posture}"
381
+ elif ultra_analysis["technical_analysis"]["shot_type"] == "portrait":
382
+ pose_description = "captured in contemplative portrait pose"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
383
 
384
+ components.append(pose_description)
 
 
 
 
 
 
 
 
 
 
 
 
 
385
 
386
+ # 7. ULTRA ENVIRONMENTAL CONTEXT
387
+ environment_desc = "in a thoughtfully composed environment"
 
 
 
 
 
 
 
 
 
 
 
388
 
389
+ if ultra_analysis["environmental"]["setting_type"]:
390
+ setting_map = {
391
+ "residential": "in an intimate home setting",
392
+ "office": "in a professional office environment",
393
+ "religious": "in a sacred traditional space",
394
+ "formal": "in a distinguished formal setting"
395
+ }
396
+ environment_desc = setting_map.get(ultra_analysis["environmental"]["setting_type"], "in a carefully arranged professional setting")
397
+
398
+ components.append(environment_desc)
399
+
400
+ # 8. ULTRA SOPHISTICATED LIGHTING
401
+ lighting_desc = "illuminated by sophisticated portrait lighting that emphasizes character and facial texture"
402
+
403
+ if ultra_analysis["environmental"]["lighting_analysis"]:
404
+ primary_light = ultra_analysis["environmental"]["lighting_analysis"][0]
405
+ if "dramatic" in primary_light:
406
+ lighting_desc = "bathed in dramatic chiaroscuro lighting that creates compelling depth and shadow play"
407
+ elif "natural" in primary_light or "window" in primary_light:
408
+ lighting_desc = "graced by gentle natural lighting that brings out intricate facial details and warmth"
409
+ elif "soft" in primary_light:
410
+ lighting_desc = "softly illuminated to reveal nuanced expressions and character"
411
 
412
  components.append(lighting_desc)
413
 
414
+ # 9. ULTRA TECHNICAL SPECIFICATIONS
415
+ if ultra_analysis["technical_analysis"]["shot_type"] in ["portrait", "headshot", "close-up"]:
416
+ camera_setup = "Shot on Phase One XF IQ4, 85mm f/1.4 lens, f/2.8 aperture"
417
+ elif ultra_analysis["demographic"]["cultural_religious"]:
418
+ camera_setup = "Shot on Hasselblad X2D, 90mm lens, f/2.8 aperture"
 
419
  else:
420
+ camera_setup = "Shot on Phase One XF, 80mm lens, f/4 aperture"
421
+
422
+ components.append(camera_setup)
423
 
424
+ # 10. ULTRA QUALITY DESIGNATION
425
+ quality_designation = "professional portrait photography"
426
 
427
+ if ultra_analysis["demographic"]["cultural_religious"]:
428
+ quality_designation = "fine art documentary photography"
429
+ elif ultra_analysis["emotional_state"]["primary_emotion"]:
430
+ quality_designation = "expressive portrait photography"
431
 
432
+ components.append(quality_designation)
433
+
434
+ # ULTRA FINAL ASSEMBLY
435
  prompt = ", ".join(components)
436
 
437
+ # Ultra cleaning and optimization
438
+ prompt = re.sub(r'\s+', ' ', prompt)
439
+ prompt = re.sub(r',\s*,+', ',', prompt)
440
+ prompt = re.sub(r'\s*,\s*', ', ', prompt)
441
+ prompt = prompt.replace(" ,", ",")
442
 
443
+ if prompt:
444
+ prompt = prompt[0].upper() + prompt[1:]
445
 
446
  return prompt
447
 
448
+ def calculate_ultra_supreme_score(self, prompt, ultra_analysis):
449
+ """ULTRA SUPREME INTELLIGENCE SCORING"""
450
+
451
  score = 0
452
+ breakdown = {}
453
 
454
+ # Structure Excellence (15 points)
455
+ structure_score = 0
456
  if prompt.startswith(("A", "An")):
457
+ structure_score += 5
458
+ if prompt.count(",") >= 8:
459
+ structure_score += 10
460
+ score += structure_score
461
+ breakdown["structure"] = structure_score
462
+
463
+ # Feature Detection Depth (25 points)
464
+ features_score = min(ultra_analysis["intelligence_metrics"]["total_features_detected"] * 2, 25)
465
+ score += features_score
466
+ breakdown["features"] = features_score
467
+
468
+ # Cultural/Religious Awareness (20 points)
469
+ cultural_score = min(len(ultra_analysis["demographic"]["cultural_religious"]) * 10, 20)
470
+ score += cultural_score
471
+ breakdown["cultural"] = cultural_score
472
+
473
+ # Emotional Intelligence (15 points)
474
+ emotion_score = 0
475
+ if ultra_analysis["emotional_state"]["primary_emotion"]:
476
+ emotion_score += 10
477
+ if ultra_analysis["emotional_state"]["emotion_confidence"] > 1:
478
+ emotion_score += 5
479
+ score += emotion_score
480
+ breakdown["emotional"] = emotion_score
481
+
482
+ # Technical Sophistication (15 points)
483
+ tech_score = 0
484
+ if "Phase One" in prompt or "Hasselblad" in prompt:
485
+ tech_score += 5
486
+ if any(aperture in prompt for aperture in ["f/1.4", "f/2.8", "f/4"]):
487
+ tech_score += 5
488
+ if any(lens in prompt for lens in ["85mm", "90mm", "80mm"]):
489
+ tech_score += 5
490
+ score += tech_score
491
+ breakdown["technical"] = tech_score
492
+
493
+ # Environmental Context (10 points)
494
+ env_score = 0
495
+ if ultra_analysis["environmental"]["setting_type"]:
496
+ env_score += 5
497
+ if ultra_analysis["environmental"]["lighting_analysis"]:
498
+ env_score += 5
499
+ score += env_score
500
+ breakdown["environmental"] = env_score
501
+
502
+ return min(score, 100), breakdown
503
 
504
+ class UltraSupremeOptimizer:
505
  def __init__(self):
506
  self.interrogator = None
507
+ self.analyzer = UltraSupremeAnalyzer()
508
  self.usage_count = 0
509
  self.device = DEVICE
510
  self.is_initialized = False
 
555
  return image
556
 
557
  @spaces.GPU
558
+ def generate_ultra_supreme_prompt(self, image):
559
  try:
560
  if not self.is_initialized:
561
  if not self.initialize_model():
562
+ return "❌ Model initialization failed.", "Please refresh and try again.", 0, {}
563
 
564
  if image is None:
565
+ return "❌ Please upload an image.", "No image provided.", 0, {}
566
 
567
  self.usage_count += 1
568
 
569
  image = self.optimize_image(image)
570
  if image is None:
571
+ return "❌ Image processing failed.", "Invalid image format.", 0, {}
572
 
573
  start_time = datetime.now()
574
 
575
+ # ULTRA SUPREME TRIPLE CLIP ANALYSIS
576
+ logger.info("ULTRA SUPREME ANALYSIS - Maximum intelligence deployment")
577
 
578
  clip_fast = self.interrogator.interrogate_fast(image)
579
  clip_classic = self.interrogator.interrogate_classic(image)
580
  clip_best = self.interrogator.interrogate(image)
581
 
582
+ logger.info(f"ULTRA CLIP Results:\nFast: {clip_fast}\nClassic: {clip_classic}\nBest: {clip_best}")
583
 
584
+ # ULTRA SUPREME ANALYSIS
585
+ ultra_analysis = self.analyzer.ultra_supreme_analysis(clip_fast, clip_classic, clip_best)
586
 
587
+ # BUILD ULTRA SUPREME FLUX PROMPT
588
+ optimized_prompt = self.analyzer.build_ultra_supreme_prompt(ultra_analysis, [clip_fast, clip_classic, clip_best])
589
 
590
+ # CALCULATE ULTRA SUPREME SCORE
591
+ score, breakdown = self.analyzer.calculate_ultra_supreme_score(optimized_prompt, ultra_analysis)
592
 
593
  end_time = datetime.now()
594
  duration = (end_time - start_time).total_seconds()
 
599
  else:
600
  torch.cuda.empty_cache()
601
 
602
+ # ULTRA COMPREHENSIVE ANALYSIS REPORT
603
  gpu_status = "⚑ ZeroGPU" if torch.cuda.is_available() else "πŸ’» CPU"
604
 
605
  # Format detected elements
606
+ features = ", ".join(ultra_analysis["facial_ultra"]["facial_hair"]) if ultra_analysis["facial_ultra"]["facial_hair"] else "None detected"
607
+ cultural = ", ".join(ultra_analysis["demographic"]["cultural_religious"]) if ultra_analysis["demographic"]["cultural_religious"] else "None detected"
608
+ clothing = ", ".join(ultra_analysis["clothing_accessories"]["eyewear"] + ultra_analysis["clothing_accessories"]["headwear"]) if ultra_analysis["clothing_accessories"]["eyewear"] or ultra_analysis["clothing_accessories"]["headwear"] else "None detected"
609
 
610
+ analysis_info = f"""**πŸš€ ULTRA SUPREME ANALYSIS COMPLETE**
611
 
612
+ **Processing:** {gpu_status} β€’ {duration:.1f}s β€’ Triple CLIP Ultra Intelligence
613
+ **Ultra Score:** {score}/100 β€’ Breakdown: Structure({breakdown.get('structure',0)}) Features({breakdown.get('features',0)}) Cultural({breakdown.get('cultural',0)}) Emotional({breakdown.get('emotional',0)}) Technical({breakdown.get('technical',0)})
 
614
  **Generation:** #{self.usage_count}
615
 
616
+ **🧠 ULTRA DEEP DETECTION:**
617
+ β€’ **Age Category:** {ultra_analysis["demographic"].get("age_category", "Unspecified").replace("_", " ").title()} (Confidence: {ultra_analysis["demographic"].get("age_confidence", 0)})
618
  β€’ **Cultural Context:** {cultural}
619
  β€’ **Facial Features:** {features}
620
+ β€’ **Accessories:** {clothing}
621
+ β€’ **Setting:** {ultra_analysis["environmental"].get("setting_type", "Standard").title()}
622
+ β€’ **Emotion:** {ultra_analysis["emotional_state"].get("primary_emotion", "Neutral").title()}
623
+ β€’ **Total Features:** {ultra_analysis["intelligence_metrics"]["total_features_detected"]}
624
 
625
+ **πŸ“Š CLIP ANALYSIS SOURCES:**
626
+ β€’ **Fast:** {clip_fast[:50]}...
627
+ β€’ **Classic:** {clip_classic[:50]}...
628
+ β€’ **Best:** {clip_best[:50]}...
629
 
630
+ **⚑ ULTRA OPTIMIZATION:** Applied absolute maximum depth analysis with Pariente AI research rules"""
631
 
632
+ return optimized_prompt, analysis_info, score, breakdown
633
 
634
  except Exception as e:
635
+ logger.error(f"Ultra supreme generation error: {e}")
636
+ return f"❌ Error: {str(e)}", "Please try with a different image.", 0, {}
637
 
638
+ optimizer = UltraSupremeOptimizer()
639
 
640
+ def process_ultra_supreme_analysis(image):
641
+ """Ultra supreme analysis wrapper"""
642
  try:
643
+ prompt, info, score, breakdown = optimizer.generate_ultra_supreme_prompt(image)
644
 
645
+ # Ultra enhanced score display
646
+ if score >= 95:
647
+ color = "#059669"
648
+ grade = "LEGENDARY"
649
+ elif score >= 90:
650
  color = "#10b981"
651
  grade = "EXCELLENT"
652
  elif score >= 80:
 
663
  grade = "NEEDS WORK"
664
 
665
  score_html = f'''
666
+ <div style="text-align: center; padding: 2rem; background: linear-gradient(135deg, #f0fdf4 0%, #dcfce7 100%); border: 3px solid {color}; border-radius: 16px; margin: 1rem 0; box-shadow: 0 8px 25px -5px rgba(0, 0, 0, 0.1);">
667
+ <div style="font-size: 3rem; font-weight: 800; color: {color}; margin: 0; text-shadow: 0 2px 4px rgba(0,0,0,0.1);">{score}</div>
668
+ <div style="font-size: 1.25rem; color: #15803d; margin: 0.5rem 0; text-transform: uppercase; letter-spacing: 0.1em; font-weight: 700;">{grade}</div>
669
+ <div style="font-size: 1rem; color: #15803d; margin: 0; text-transform: uppercase; letter-spacing: 0.05em; font-weight: 500;">Ultra Supreme Intelligence Score</div>
670
  </div>
671
  '''
672
 
673
  return prompt, info, score_html
674
 
675
  except Exception as e:
676
+ logger.error(f"Ultra supreme wrapper error: {e}")
677
  return "❌ Processing failed", f"Error: {str(e)}", '<div style="text-align: center; color: red;">Error</div>'
678
 
679
  def clear_outputs():
680
  gc.collect()
681
  if torch.cuda.is_available():
682
  torch.cuda.empty_cache()
683
+ return "", "", '<div style="text-align: center; padding: 1rem;"><div style="font-size: 2rem; color: #ccc;">--</div><div style="font-size: 0.875rem; color: #999;">Ultra Supreme Score</div></div>'
684
 
685
  def create_interface():
686
  css = """
687
+ @import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700;800;900&display=swap');
688
 
689
  .gradio-container {
690
+ max-width: 1600px !important;
691
  margin: 0 auto !important;
692
  font-family: 'Inter', -apple-system, BlinkMacSystemFont, sans-serif !important;
693
  background: linear-gradient(135deg, #f8fafc 0%, #f1f5f9 100%) !important;
 
695
 
696
  .main-header {
697
  text-align: center;
698
+ padding: 3rem 0 4rem 0;
699
+ background: linear-gradient(135deg, #0c0a09 0%, #1c1917 30%, #292524 60%, #44403c 100%);
700
  color: white;
701
+ margin: -2rem -2rem 3rem -2rem;
702
+ border-radius: 0 0 32px 32px;
703
+ box-shadow: 0 20px 50px -10px rgba(0, 0, 0, 0.25);
704
+ position: relative;
705
+ overflow: hidden;
706
+ }
707
+
708
+ .main-header::before {
709
+ content: '';
710
+ position: absolute;
711
+ top: 0;
712
+ left: 0;
713
+ right: 0;
714
+ bottom: 0;
715
+ background: linear-gradient(45deg, rgba(59, 130, 246, 0.1) 0%, rgba(147, 51, 234, 0.1) 50%, rgba(236, 72, 153, 0.1) 100%);
716
+ z-index: 1;
717
  }
718
 
719
  .main-title {
720
+ font-size: 4rem !important;
721
+ font-weight: 900 !important;
722
+ margin: 0 0 1rem 0 !important;
723
+ letter-spacing: -0.05em !important;
724
+ background: linear-gradient(135deg, #60a5fa 0%, #3b82f6 25%, #8b5cf6 50%, #a855f7 75%, #ec4899 100%);
725
  -webkit-background-clip: text;
726
  -webkit-text-fill-color: transparent;
727
  background-clip: text;
728
+ position: relative;
729
+ z-index: 2;
730
  }
731
 
732
  .subtitle {
733
+ font-size: 1.5rem !important;
734
+ font-weight: 500 !important;
735
+ opacity: 0.95 !important;
736
  margin: 0 !important;
737
+ position: relative;
738
+ z-index: 2;
739
  }
740
 
741
  .prompt-output {
742
  font-family: 'SF Mono', 'Monaco', 'Inconsolata', 'Roboto Mono', monospace !important;
743
+ font-size: 15px !important;
744
+ line-height: 1.8 !important;
745
  background: linear-gradient(135deg, #ffffff 0%, #f8fafc 100%) !important;
746
+ border: 2px solid #e2e8f0 !important;
747
+ border-radius: 20px !important;
748
+ padding: 2.5rem !important;
749
+ box-shadow: 0 20px 50px -10px rgba(0, 0, 0, 0.1) !important;
750
+ transition: all 0.3s ease !important;
751
+ }
752
+
753
+ .prompt-output:hover {
754
+ box-shadow: 0 25px 60px -5px rgba(0, 0, 0, 0.15) !important;
755
+ transform: translateY(-2px) !important;
756
  }
757
  """
758
 
759
  with gr.Blocks(
760
  theme=gr.themes.Soft(),
761
+ title="πŸš€ Ultra Supreme Flux Optimizer",
762
  css=css
763
  ) as interface:
764
 
765
  gr.HTML("""
766
  <div class="main-header">
767
+ <div class="main-title">πŸš€ ULTRA SUPREME FLUX OPTIMIZER</div>
768
+ <div class="subtitle">Maximum Absolute Intelligence β€’ Triple CLIP Analysis β€’ Zero Compromise β€’ Research Supremacy</div>
769
  </div>
770
  """)
771
 
772
  with gr.Row():
773
  with gr.Column(scale=1):
774
+ gr.Markdown("## 🧠 Ultra Supreme Analysis Engine")
775
 
776
  image_input = gr.Image(
777
+ label="Upload image for MAXIMUM intelligence analysis",
778
  type="pil",
779
+ height=500
780
  )
781
 
782
  analyze_btn = gr.Button(
783
+ "πŸš€ ULTRA SUPREME ANALYSIS",
784
  variant="primary",
785
  size="lg"
786
  )
787
 
788
  gr.Markdown("""
789
+ ### πŸ”¬ Maximum Absolute Intelligence
790
 
791
+ **πŸš€ Triple CLIP Interrogation:**
792
+ β€’ Fast analysis for broad contextual mapping
793
+ β€’ Classic analysis for detailed feature extraction
794
+ β€’ Best analysis for maximum depth intelligence
795
 
796
+ **🧠 Ultra Deep Feature Extraction:**
797
+ β€’ Micro-age detection with confidence scoring
798
+ β€’ Cultural/religious context with semantic analysis
799
+ β€’ Facial micro-features and expression mapping
800
+ β€’ Emotional state and micro-expression detection
801
+ β€’ Environmental lighting and atmospheric analysis
802
+ β€’ Body language and pose interpretation
803
+ β€’ Technical photography optimization
804
 
805
+ **⚑ Absolute Maximum Intelligence** - No configuration, no limits, no compromise.
806
  """)
807
 
808
  with gr.Column(scale=1):
809
+ gr.Markdown("## ⚑ Ultra Supreme Result")
810
 
811
  prompt_output = gr.Textbox(
812
+ label="πŸš€ Ultra Supreme Optimized Flux Prompt",
813
+ placeholder="Upload an image to witness absolute maximum intelligence analysis...",
814
+ lines=12,
815
+ max_lines=20,
816
  elem_classes=["prompt-output"],
817
  show_copy_button=True
818
  )
819
 
820
  score_output = gr.HTML(
821
+ value='<div style="text-align: center; padding: 1rem;"><div style="font-size: 2rem; color: #ccc;">--</div><div style="font-size: 0.875rem; color: #999;">Ultra Supreme Score</div></div>'
822
  )
823
 
824
  info_output = gr.Markdown(value="")
825
 
826
+ clear_btn = gr.Button("πŸ—‘οΈ Clear Ultra Analysis", size="sm")
827
 
828
  gr.Markdown("""
829
  ---
830
+ ### πŸ† Ultra Supreme Research Foundation
831
 
832
+ This system represents the **absolute pinnacle** of image analysis and Flux prompt optimization. Using triple CLIP interrogation,
833
+ ultra-deep feature extraction, cultural context awareness, and emotional intelligence mapping, it achieves maximum possible
834
+ understanding and applies research-validated Flux rules with supreme intelligence.
835
 
836
+ **πŸ”¬ Pariente AI Research Laboratory** β€’ **πŸš€ Ultra Supreme