Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -11,7 +11,6 @@ from datetime import datetime
|
|
11 |
import gc
|
12 |
import re
|
13 |
|
14 |
-
# Suppress warnings
|
15 |
warnings.filterwarnings("ignore", category=FutureWarning)
|
16 |
warnings.filterwarnings("ignore", category=UserWarning)
|
17 |
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
@@ -29,185 +28,265 @@ def get_device():
|
|
29 |
|
30 |
DEVICE = get_device()
|
31 |
|
32 |
-
class
|
33 |
"""
|
34 |
-
|
35 |
-
Implements structured prompt generation following validated rules
|
36 |
"""
|
37 |
|
38 |
def __init__(self):
|
39 |
self.forbidden_elements = ["++", "weights", "white background [en dev]"]
|
40 |
|
41 |
-
|
42 |
-
self.
|
43 |
-
"
|
44 |
-
"
|
45 |
-
|
46 |
-
|
47 |
-
self.lighting_types = [
|
48 |
-
"golden hour", "studio lighting", "dramatic lighting",
|
49 |
-
"ambient lighting", "natural light", "soft lighting",
|
50 |
-
"rim lighting", "volumetric lighting"
|
51 |
-
]
|
52 |
-
|
53 |
-
self.technical_specs = [
|
54 |
-
"Shot on Phase One", "f/2.8 aperture", "50mm lens",
|
55 |
-
"85mm lens", "35mm lens", "professional photography",
|
56 |
-
"medium format", "high resolution"
|
57 |
-
]
|
58 |
-
|
59 |
-
self.materials = [
|
60 |
-
"metallic", "glass", "chrome", "leather", "fabric",
|
61 |
-
"wood", "concrete", "steel", "ceramic"
|
62 |
-
]
|
63 |
-
|
64 |
-
def extract_subject(self, base_prompt):
|
65 |
-
"""Extract main subject from CLIP analysis"""
|
66 |
-
words = base_prompt.lower().split()
|
67 |
-
|
68 |
-
# Common subjects to identify
|
69 |
-
subjects = [
|
70 |
-
"car", "vehicle", "automobile", "person", "man", "woman",
|
71 |
-
"building", "house", "landscape", "mountain", "tree",
|
72 |
-
"flower", "animal", "dog", "cat", "bird"
|
73 |
-
]
|
74 |
-
|
75 |
-
for word in words:
|
76 |
-
if word in subjects:
|
77 |
-
return word
|
78 |
-
|
79 |
-
# Fallback to first noun-like word
|
80 |
-
return words[0] if words else "subject"
|
81 |
-
|
82 |
-
def detect_setting(self, base_prompt):
|
83 |
-
"""Detect environmental context"""
|
84 |
-
prompt_lower = base_prompt.lower()
|
85 |
-
|
86 |
-
settings = {
|
87 |
-
"studio": ["studio", "backdrop", "seamless"],
|
88 |
-
"outdoor": ["outdoor", "outside", "landscape", "nature"],
|
89 |
-
"urban": ["city", "street", "urban", "building"],
|
90 |
-
"coastal": ["beach", "ocean", "coast", "sea"],
|
91 |
-
"indoor": ["room", "interior", "inside", "home"]
|
92 |
}
|
93 |
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
99 |
|
100 |
-
def
|
101 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
102 |
|
103 |
-
#
|
104 |
-
|
105 |
-
|
106 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
107 |
|
108 |
-
|
109 |
-
|
110 |
-
|
|
|
|
|
|
|
111 |
|
112 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
113 |
components = []
|
114 |
|
115 |
-
# 1. Article
|
116 |
-
|
|
|
|
|
|
|
|
|
117 |
components.append(article)
|
118 |
|
119 |
-
# 2. Descriptive adjectives (
|
120 |
-
adjectives = [
|
121 |
-
|
|
|
|
|
|
|
|
|
|
|
122 |
|
123 |
-
# 3
|
124 |
-
components.
|
125 |
|
126 |
-
#
|
127 |
-
if "
|
128 |
-
|
|
|
|
|
|
|
129 |
else:
|
130 |
-
|
131 |
-
components.append(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
132 |
|
133 |
-
|
134 |
-
|
135 |
-
"studio": "in a professional studio setting",
|
136 |
-
"outdoor": "in a natural outdoor environment",
|
137 |
-
"urban": "on an urban street",
|
138 |
-
"coastal": "along a dramatic coastline",
|
139 |
-
"indoor": "in an elegant interior space"
|
140 |
-
}
|
141 |
-
components.append(context_map.get(setting, "in a carefully composed scene"))
|
142 |
|
143 |
-
#
|
144 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
145 |
|
146 |
-
# 7.
|
147 |
-
|
148 |
-
|
|
|
|
|
|
|
149 |
|
150 |
-
|
151 |
-
|
|
|
|
|
|
|
152 |
|
153 |
-
#
|
154 |
-
|
|
|
|
|
|
|
|
|
155 |
|
156 |
-
#
|
157 |
-
if
|
158 |
-
|
159 |
-
elif style_preference == "commercial":
|
160 |
-
quality = "commercial photography quality"
|
161 |
else:
|
162 |
-
|
163 |
-
|
164 |
-
components.append(quality)
|
165 |
|
166 |
-
#
|
|
|
|
|
|
|
167 |
prompt = ", ".join(components)
|
168 |
|
169 |
-
#
|
170 |
-
prompt =
|
|
|
171 |
|
172 |
return prompt
|
173 |
|
174 |
-
def
|
175 |
-
"""Calculate
|
176 |
score = 0
|
177 |
|
178 |
-
# Structure
|
179 |
-
if prompt.startswith(("A", "An"
|
|
|
|
|
|
|
|
|
180 |
score += 15
|
181 |
|
182 |
-
#
|
183 |
-
if
|
184 |
-
score +=
|
185 |
|
186 |
-
#
|
187 |
-
if
|
188 |
score += 15
|
189 |
|
190 |
-
#
|
191 |
-
if
|
192 |
score += 15
|
193 |
|
194 |
-
#
|
195 |
-
if "
|
196 |
score += 10
|
197 |
|
198 |
-
#
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
|
|
204 |
|
205 |
return min(score, 100)
|
206 |
|
207 |
class FluxPromptOptimizer:
|
208 |
def __init__(self):
|
209 |
self.interrogator = None
|
210 |
-
self.
|
211 |
self.usage_count = 0
|
212 |
self.device = DEVICE
|
213 |
self.is_initialized = False
|
@@ -251,7 +330,6 @@ class FluxPromptOptimizer:
|
|
251 |
if image.mode != 'RGB':
|
252 |
image = image.convert('RGB')
|
253 |
|
254 |
-
# Optimize image size for processing
|
255 |
max_size = 768 if self.device != "cpu" else 512
|
256 |
if image.size[0] > max_size or image.size[1] > max_size:
|
257 |
image.thumbnail((max_size, max_size), Image.Resampling.LANCZOS)
|
@@ -259,7 +337,7 @@ class FluxPromptOptimizer:
|
|
259 |
return image
|
260 |
|
261 |
@spaces.GPU
|
262 |
-
def generate_optimized_prompt(self, image
|
263 |
try:
|
264 |
if not self.is_initialized:
|
265 |
if not self.initialize_model():
|
@@ -276,22 +354,17 @@ class FluxPromptOptimizer:
|
|
276 |
|
277 |
start_time = datetime.now()
|
278 |
|
279 |
-
# Get
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
base_prompt = self.interrogator.interrogate_classic(image)
|
285 |
-
else:
|
286 |
-
base_prompt = self.interrogator.interrogate(image)
|
287 |
-
except Exception as e:
|
288 |
-
base_prompt = self.interrogator.interrogate_fast(image)
|
289 |
|
290 |
-
#
|
291 |
-
optimized_prompt = self.
|
292 |
|
293 |
-
# Calculate
|
294 |
-
score = self.
|
295 |
|
296 |
end_time = datetime.now()
|
297 |
duration = (end_time - start_time).total_seconds()
|
@@ -302,18 +375,26 @@ class FluxPromptOptimizer:
|
|
302 |
else:
|
303 |
torch.cuda.empty_cache()
|
304 |
|
305 |
-
# Generate analysis info
|
306 |
gpu_status = "⚡ ZeroGPU" if torch.cuda.is_available() else "💻 CPU"
|
307 |
|
308 |
-
|
|
|
|
|
|
|
309 |
|
310 |
-
**Processing:** {gpu_status} • {duration:.1f}s
|
311 |
-
**
|
312 |
-
**Optimization Score:** {score}/100
|
313 |
**Generation:** #{self.usage_count}
|
314 |
|
315 |
-
**
|
316 |
-
**
|
|
|
|
|
|
|
|
|
|
|
|
|
317 |
|
318 |
return optimized_prompt, analysis_info, score
|
319 |
|
@@ -323,17 +404,17 @@ class FluxPromptOptimizer:
|
|
323 |
|
324 |
optimizer = FluxPromptOptimizer()
|
325 |
|
326 |
-
def process_image_wrapper(image
|
327 |
-
"""
|
328 |
try:
|
329 |
-
prompt, info, score = optimizer.generate_optimized_prompt(image
|
330 |
|
331 |
# Create score HTML
|
332 |
color = "#22c55e" if score >= 80 else "#f59e0b" if score >= 60 else "#ef4444"
|
333 |
score_html = f'''
|
334 |
<div style="text-align: center; padding: 1rem; background: linear-gradient(135deg, #f0fdf4 0%, #dcfce7 100%); border: 2px solid {color}; border-radius: 12px; margin: 1rem 0;">
|
335 |
<div style="font-size: 2rem; font-weight: 700; color: {color}; margin: 0;">{score}</div>
|
336 |
-
<div style="font-size: 0.875rem; color: #15803d; margin: 0; text-transform: uppercase; letter-spacing: 0.05em;">
|
337 |
</div>
|
338 |
'''
|
339 |
|
@@ -347,10 +428,9 @@ def clear_outputs():
|
|
347 |
gc.collect()
|
348 |
if torch.cuda.is_available():
|
349 |
torch.cuda.empty_cache()
|
350 |
-
return "", "", '<div style="text-align: center; padding: 1rem;"><div style="font-size: 2rem; color: #ccc;">--</div><div style="font-size: 0.875rem; color: #999;">
|
351 |
|
352 |
def create_interface():
|
353 |
-
# Professional CSS with elegant typography
|
354 |
css = """
|
355 |
@import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap');
|
356 |
|
@@ -409,76 +489,73 @@ def create_interface():
|
|
409 |
gr.HTML("""
|
410 |
<div class="main-header">
|
411 |
<div class="main-title">⚡ Flux Prompt Optimizer</div>
|
412 |
-
<div class="subtitle">
|
413 |
</div>
|
414 |
""")
|
415 |
|
416 |
with gr.Row():
|
417 |
with gr.Column(scale=1):
|
418 |
-
gr.Markdown("## 📷 Image
|
419 |
|
420 |
image_input = gr.Image(
|
421 |
label="Upload your image",
|
422 |
type="pil",
|
423 |
-
height=
|
424 |
-
)
|
425 |
-
|
426 |
-
gr.Markdown("## ⚙️ Settings")
|
427 |
-
|
428 |
-
style_selector = gr.Dropdown(
|
429 |
-
choices=["professional", "cinematic", "commercial", "artistic"],
|
430 |
-
value="professional",
|
431 |
-
label="Photography Style"
|
432 |
-
)
|
433 |
-
|
434 |
-
mode_selector = gr.Dropdown(
|
435 |
-
choices=["fast", "classic", "best"],
|
436 |
-
value="best",
|
437 |
-
label="Analysis Mode"
|
438 |
)
|
439 |
|
440 |
optimize_btn = gr.Button(
|
441 |
-
"
|
442 |
variant="primary",
|
443 |
size="lg"
|
444 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
445 |
|
446 |
with gr.Column(scale=1):
|
447 |
-
gr.Markdown("##
|
448 |
|
449 |
prompt_output = gr.Textbox(
|
450 |
-
label="
|
451 |
-
placeholder="
|
452 |
-
lines=
|
453 |
-
max_lines=
|
454 |
elem_classes=["prompt-output"],
|
455 |
show_copy_button=True
|
456 |
)
|
457 |
|
458 |
score_output = gr.HTML(
|
459 |
-
value='<div style="text-align: center; padding: 1rem;"><div style="font-size: 2rem; color: #ccc;">--</div><div style="font-size: 0.875rem; color: #999;">
|
460 |
)
|
461 |
|
462 |
info_output = gr.Markdown(value="")
|
463 |
|
464 |
-
|
465 |
-
clear_btn = gr.Button("🗑️ Clear", size="sm")
|
466 |
|
467 |
gr.Markdown("""
|
468 |
---
|
469 |
-
### 🔬 Research Foundation
|
470 |
|
471 |
-
|
472 |
-
The
|
473 |
-
specifically calibrated for Flux architecture.
|
474 |
|
475 |
-
**
|
476 |
""")
|
477 |
|
478 |
-
#
|
479 |
optimize_btn.click(
|
480 |
fn=process_image_wrapper,
|
481 |
-
inputs=[image_input
|
482 |
outputs=[prompt_output, info_output, score_output]
|
483 |
)
|
484 |
|
@@ -490,7 +567,7 @@ def create_interface():
|
|
490 |
return interface
|
491 |
|
492 |
if __name__ == "__main__":
|
493 |
-
logger.info("🚀 Starting Flux Prompt Optimizer")
|
494 |
interface = create_interface()
|
495 |
interface.launch(
|
496 |
server_name="0.0.0.0",
|
|
|
11 |
import gc
|
12 |
import re
|
13 |
|
|
|
14 |
warnings.filterwarnings("ignore", category=FutureWarning)
|
15 |
warnings.filterwarnings("ignore", category=UserWarning)
|
16 |
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
|
|
28 |
|
29 |
DEVICE = get_device()
|
30 |
|
31 |
+
class DeepFluxAnalyzer:
|
32 |
"""
|
33 |
+
Deep analysis engine that understands image content and applies Flux rules intelligently
|
|
|
34 |
"""
|
35 |
|
36 |
def __init__(self):
|
37 |
self.forbidden_elements = ["++", "weights", "white background [en dev]"]
|
38 |
|
39 |
+
# Deep vocabulary for intelligent analysis
|
40 |
+
self.age_descriptors = {
|
41 |
+
"young": ["young", "youthful", "fresh-faced"],
|
42 |
+
"middle": ["middle-aged", "mature"],
|
43 |
+
"elderly": ["elderly", "aged", "distinguished", "weathered"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
}
|
45 |
|
46 |
+
self.facial_features = {
|
47 |
+
"beard": ["bearded", "with a full beard", "with facial hair", "with a silver beard", "with a gray beard"],
|
48 |
+
"glasses": ["wearing glasses", "with wire-frame glasses", "with spectacles", "with eyeglasses"],
|
49 |
+
"eyes": ["intense gaze", "piercing eyes", "contemplative expression", "focused stare"]
|
50 |
+
}
|
51 |
+
|
52 |
+
self.clothing_religious = {
|
53 |
+
"hat": ["black hat", "traditional hat", "religious headwear", "Orthodox hat"],
|
54 |
+
"clothing": ["traditional clothing", "religious attire", "formal wear", "dark clothing"]
|
55 |
+
}
|
56 |
+
|
57 |
+
self.settings_detailed = {
|
58 |
+
"indoor": ["indoor setting", "interior space", "indoor environment"],
|
59 |
+
"outdoor": ["outdoor setting", "natural environment", "exterior location"],
|
60 |
+
"studio": ["studio setting", "controlled environment", "professional backdrop"]
|
61 |
+
}
|
62 |
+
|
63 |
+
self.lighting_advanced = {
|
64 |
+
"portrait": ["dramatic portrait lighting", "studio portrait lighting", "professional portrait setup"],
|
65 |
+
"natural": ["natural lighting", "window light", "ambient illumination"],
|
66 |
+
"dramatic": ["dramatic lighting", "high contrast lighting", "chiaroscuro lighting"]
|
67 |
+
}
|
68 |
+
|
69 |
+
self.technical_professional = {
|
70 |
+
"portrait_lens": ["85mm lens", "135mm lens", "medium telephoto"],
|
71 |
+
"standard_lens": ["50mm lens", "35mm lens", "standard focal length"],
|
72 |
+
"aperture": ["f/1.4 aperture", "f/2.8 aperture", "f/4 aperture"],
|
73 |
+
"camera": ["Shot on Phase One XF", "Shot on Hasselblad", "Shot on Canon EOS R5"]
|
74 |
+
}
|
75 |
|
76 |
+
def analyze_clip_deeply(self, clip_result):
|
77 |
+
"""Extract detailed information from CLIP analysis"""
|
78 |
+
clip_lower = clip_result.lower()
|
79 |
+
analysis = {
|
80 |
+
"subjects": [],
|
81 |
+
"age": None,
|
82 |
+
"features": [],
|
83 |
+
"clothing": [],
|
84 |
+
"setting": None,
|
85 |
+
"mood": None,
|
86 |
+
"composition": None
|
87 |
+
}
|
88 |
|
89 |
+
# Subject and age detection
|
90 |
+
if any(word in clip_lower for word in ["man", "person", "male"]):
|
91 |
+
if any(word in clip_lower for word in ["old", "elderly", "aged", "gray", "grey", "silver"]):
|
92 |
+
analysis["subjects"].append("elderly man")
|
93 |
+
analysis["age"] = "elderly"
|
94 |
+
elif any(word in clip_lower for word in ["young", "youth", "boy"]):
|
95 |
+
analysis["subjects"].append("young man")
|
96 |
+
analysis["age"] = "young"
|
97 |
+
else:
|
98 |
+
analysis["subjects"].append("man")
|
99 |
+
analysis["age"] = "middle"
|
100 |
|
101 |
+
if any(word in clip_lower for word in ["woman", "female", "lady"]):
|
102 |
+
if any(word in clip_lower for word in ["old", "elderly", "aged"]):
|
103 |
+
analysis["subjects"].append("elderly woman")
|
104 |
+
analysis["age"] = "elderly"
|
105 |
+
else:
|
106 |
+
analysis["subjects"].append("woman")
|
107 |
|
108 |
+
# Facial features detection
|
109 |
+
if any(word in clip_lower for word in ["beard", "facial hair", "mustache"]):
|
110 |
+
if any(word in clip_lower for word in ["gray", "grey", "silver", "white"]):
|
111 |
+
analysis["features"].append("silver beard")
|
112 |
+
else:
|
113 |
+
analysis["features"].append("beard")
|
114 |
+
|
115 |
+
if any(word in clip_lower for word in ["glasses", "spectacles", "eyeglasses"]):
|
116 |
+
analysis["features"].append("glasses")
|
117 |
+
|
118 |
+
# Clothing and accessories
|
119 |
+
if any(word in clip_lower for word in ["hat", "cap", "headwear"]):
|
120 |
+
analysis["clothing"].append("hat")
|
121 |
+
|
122 |
+
if any(word in clip_lower for word in ["suit", "formal", "dress", "shirt"]):
|
123 |
+
analysis["clothing"].append("formal wear")
|
124 |
+
|
125 |
+
# Setting detection
|
126 |
+
if any(word in clip_lower for word in ["indoor", "inside", "interior", "room"]):
|
127 |
+
analysis["setting"] = "indoor"
|
128 |
+
elif any(word in clip_lower for word in ["outdoor", "outside", "landscape", "street"]):
|
129 |
+
analysis["setting"] = "outdoor"
|
130 |
+
elif any(word in clip_lower for word in ["studio", "backdrop"]):
|
131 |
+
analysis["setting"] = "studio"
|
132 |
+
|
133 |
+
# Mood and composition
|
134 |
+
if any(word in clip_lower for word in ["portrait", "headshot", "face", "close-up"]):
|
135 |
+
analysis["composition"] = "portrait"
|
136 |
+
elif any(word in clip_lower for word in ["sitting", "seated", "chair"]):
|
137 |
+
analysis["composition"] = "seated"
|
138 |
+
elif any(word in clip_lower for word in ["standing", "upright"]):
|
139 |
+
analysis["composition"] = "standing"
|
140 |
+
|
141 |
+
return analysis
|
142 |
+
|
143 |
+
def build_flux_prompt(self, analysis, clip_base):
|
144 |
+
"""Build optimized Flux prompt using deep analysis"""
|
145 |
components = []
|
146 |
|
147 |
+
# 1. Article (intelligent selection)
|
148 |
+
if analysis["subjects"]:
|
149 |
+
subject = analysis["subjects"][0]
|
150 |
+
article = "An" if subject[0] in 'aeiou' else "A"
|
151 |
+
else:
|
152 |
+
article = "A"
|
153 |
components.append(article)
|
154 |
|
155 |
+
# 2. Descriptive adjectives (context-aware)
|
156 |
+
adjectives = []
|
157 |
+
if analysis["age"] == "elderly":
|
158 |
+
adjectives.extend(["distinguished", "weathered"])
|
159 |
+
elif analysis["age"] == "young":
|
160 |
+
adjectives.extend(["young", "fresh-faced"])
|
161 |
+
else:
|
162 |
+
adjectives.extend(["professional", "elegant"])
|
163 |
|
164 |
+
# Add up to 2-3 adjectives as per Flux rules
|
165 |
+
components.extend(adjectives[:2])
|
166 |
|
167 |
+
# 3. Main subject (enhanced with details)
|
168 |
+
if analysis["subjects"]:
|
169 |
+
main_subject = analysis["subjects"][0]
|
170 |
+
# Add religious/cultural context if detected
|
171 |
+
if "hat" in analysis["clothing"] and "beard" in [f.split()[0] for f in analysis["features"]]:
|
172 |
+
main_subject = "Orthodox Jewish " + main_subject
|
173 |
else:
|
174 |
+
main_subject = "subject"
|
175 |
+
components.append(main_subject)
|
176 |
+
|
177 |
+
# 4. Features integration (intelligent placement)
|
178 |
+
feature_descriptions = []
|
179 |
+
if "glasses" in analysis["features"]:
|
180 |
+
feature_descriptions.append("with distinctive wire-frame glasses")
|
181 |
+
if any("beard" in f for f in analysis["features"]):
|
182 |
+
if "silver beard" in analysis["features"]:
|
183 |
+
feature_descriptions.append("with a distinguished silver beard")
|
184 |
+
else:
|
185 |
+
feature_descriptions.append("with a full beard")
|
186 |
|
187 |
+
if feature_descriptions:
|
188 |
+
components.extend(feature_descriptions)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
189 |
|
190 |
+
# 5. Clothing and accessories
|
191 |
+
clothing_desc = []
|
192 |
+
if "hat" in analysis["clothing"]:
|
193 |
+
clothing_desc.append("wearing a traditional black hat")
|
194 |
+
if "formal wear" in analysis["clothing"]:
|
195 |
+
clothing_desc.append("in formal attire")
|
196 |
+
|
197 |
+
if clothing_desc:
|
198 |
+
components.extend(clothing_desc)
|
199 |
+
|
200 |
+
# 6. Verb/Action (based on composition analysis)
|
201 |
+
if analysis["composition"] == "seated":
|
202 |
+
action = "seated contemplatively"
|
203 |
+
elif analysis["composition"] == "standing":
|
204 |
+
action = "standing with dignity"
|
205 |
+
else:
|
206 |
+
action = "positioned thoughtfully"
|
207 |
+
components.append(action)
|
208 |
|
209 |
+
# 7. Context/Location (enhanced setting)
|
210 |
+
setting_map = {
|
211 |
+
"indoor": "in an intimate indoor setting",
|
212 |
+
"outdoor": "in a natural outdoor environment",
|
213 |
+
"studio": "in a professional studio environment"
|
214 |
+
}
|
215 |
|
216 |
+
if analysis["setting"]:
|
217 |
+
context = setting_map.get(analysis["setting"], "in a carefully composed environment")
|
218 |
+
else:
|
219 |
+
context = "in a thoughtfully arranged scene"
|
220 |
+
components.append(context)
|
221 |
|
222 |
+
# 8. Environmental details (lighting-aware)
|
223 |
+
if analysis["composition"] == "portrait":
|
224 |
+
env_detail = "with dramatic portrait lighting that emphasizes facial features and texture"
|
225 |
+
else:
|
226 |
+
env_detail = "captured with sophisticated atmospheric lighting"
|
227 |
+
components.append(env_detail)
|
228 |
|
229 |
+
# 9. Technical specifications (composition-appropriate)
|
230 |
+
if analysis["composition"] == "portrait":
|
231 |
+
tech_spec = "Shot on Phase One XF, 85mm lens, f/2.8 aperture"
|
|
|
|
|
232 |
else:
|
233 |
+
tech_spec = "Shot on Phase One, 50mm lens, f/4 aperture"
|
234 |
+
components.append(tech_spec)
|
|
|
235 |
|
236 |
+
# 10. Quality marker (always professional)
|
237 |
+
components.append("professional photography")
|
238 |
+
|
239 |
+
# Join with proper punctuation
|
240 |
prompt = ", ".join(components)
|
241 |
|
242 |
+
# Clean up and optimize
|
243 |
+
prompt = re.sub(r'\s+', ' ', prompt) # Remove extra spaces
|
244 |
+
prompt = prompt.replace(", ,", ",") # Remove double commas
|
245 |
|
246 |
return prompt
|
247 |
|
248 |
+
def calculate_intelligence_score(self, prompt, analysis):
|
249 |
+
"""Calculate how well the prompt reflects intelligent analysis"""
|
250 |
score = 0
|
251 |
|
252 |
+
# Structure compliance (Flux rules 1-10)
|
253 |
+
if prompt.startswith(("A", "An")):
|
254 |
+
score += 10
|
255 |
+
|
256 |
+
# Feature recognition accuracy
|
257 |
+
if len(analysis["features"]) > 0:
|
258 |
score += 15
|
259 |
|
260 |
+
# Context understanding
|
261 |
+
if analysis["setting"]:
|
262 |
+
score += 15
|
263 |
|
264 |
+
# Subject detail depth
|
265 |
+
if len(analysis["subjects"]) > 0:
|
266 |
score += 15
|
267 |
|
268 |
+
# Technical specs presence
|
269 |
+
if "Phase One" in prompt and "lens" in prompt:
|
270 |
score += 15
|
271 |
|
272 |
+
# Lighting specification
|
273 |
+
if "lighting" in prompt:
|
274 |
score += 10
|
275 |
|
276 |
+
# Composition awareness
|
277 |
+
if analysis["composition"]:
|
278 |
+
score += 10
|
279 |
+
|
280 |
+
# Forbidden elements check
|
281 |
+
if not any(forbidden in prompt for forbidden in self.forbidden_elements):
|
282 |
+
score += 10
|
283 |
|
284 |
return min(score, 100)
|
285 |
|
286 |
class FluxPromptOptimizer:
|
287 |
def __init__(self):
|
288 |
self.interrogator = None
|
289 |
+
self.analyzer = DeepFluxAnalyzer()
|
290 |
self.usage_count = 0
|
291 |
self.device = DEVICE
|
292 |
self.is_initialized = False
|
|
|
330 |
if image.mode != 'RGB':
|
331 |
image = image.convert('RGB')
|
332 |
|
|
|
333 |
max_size = 768 if self.device != "cpu" else 512
|
334 |
if image.size[0] > max_size or image.size[1] > max_size:
|
335 |
image.thumbnail((max_size, max_size), Image.Resampling.LANCZOS)
|
|
|
337 |
return image
|
338 |
|
339 |
@spaces.GPU
|
340 |
+
def generate_optimized_prompt(self, image):
|
341 |
try:
|
342 |
if not self.is_initialized:
|
343 |
if not self.initialize_model():
|
|
|
354 |
|
355 |
start_time = datetime.now()
|
356 |
|
357 |
+
# Get comprehensive CLIP analysis
|
358 |
+
clip_result = self.interrogator.interrogate(image)
|
359 |
+
|
360 |
+
# Deep analysis of the CLIP result
|
361 |
+
deep_analysis = self.analyzer.analyze_clip_deeply(clip_result)
|
|
|
|
|
|
|
|
|
|
|
362 |
|
363 |
+
# Build optimized Flux prompt
|
364 |
+
optimized_prompt = self.analyzer.build_flux_prompt(deep_analysis, clip_result)
|
365 |
|
366 |
+
# Calculate intelligence score
|
367 |
+
score = self.analyzer.calculate_intelligence_score(optimized_prompt, deep_analysis)
|
368 |
|
369 |
end_time = datetime.now()
|
370 |
duration = (end_time - start_time).total_seconds()
|
|
|
375 |
else:
|
376 |
torch.cuda.empty_cache()
|
377 |
|
378 |
+
# Generate detailed analysis info
|
379 |
gpu_status = "⚡ ZeroGPU" if torch.cuda.is_available() else "💻 CPU"
|
380 |
|
381 |
+
features_detected = ", ".join(deep_analysis["features"]) if deep_analysis["features"] else "None"
|
382 |
+
subjects_detected = ", ".join(deep_analysis["subjects"]) if deep_analysis["subjects"] else "Generic"
|
383 |
+
|
384 |
+
analysis_info = f"""**Deep Analysis Complete**
|
385 |
|
386 |
+
**Processing:** {gpu_status} • {duration:.1f}s
|
387 |
+
**Intelligence Score:** {score}/100
|
|
|
388 |
**Generation:** #{self.usage_count}
|
389 |
|
390 |
+
**Detected Elements:**
|
391 |
+
• **Subjects:** {subjects_detected}
|
392 |
+
• **Features:** {features_detected}
|
393 |
+
• **Setting:** {deep_analysis["setting"] or "Unspecified"}
|
394 |
+
• **Composition:** {deep_analysis["composition"] or "Standard"}
|
395 |
+
|
396 |
+
**CLIP Base:** {clip_result[:80]}...
|
397 |
+
**Flux Enhancement:** Applied deep analysis with Pariente AI rules"""
|
398 |
|
399 |
return optimized_prompt, analysis_info, score
|
400 |
|
|
|
404 |
|
405 |
optimizer = FluxPromptOptimizer()
|
406 |
|
407 |
+
def process_image_wrapper(image):
|
408 |
+
"""Simplified wrapper - no unnecessary options"""
|
409 |
try:
|
410 |
+
prompt, info, score = optimizer.generate_optimized_prompt(image)
|
411 |
|
412 |
# Create score HTML
|
413 |
color = "#22c55e" if score >= 80 else "#f59e0b" if score >= 60 else "#ef4444"
|
414 |
score_html = f'''
|
415 |
<div style="text-align: center; padding: 1rem; background: linear-gradient(135deg, #f0fdf4 0%, #dcfce7 100%); border: 2px solid {color}; border-radius: 12px; margin: 1rem 0;">
|
416 |
<div style="font-size: 2rem; font-weight: 700; color: {color}; margin: 0;">{score}</div>
|
417 |
+
<div style="font-size: 0.875rem; color: #15803d; margin: 0; text-transform: uppercase; letter-spacing: 0.05em;">Intelligence Score</div>
|
418 |
</div>
|
419 |
'''
|
420 |
|
|
|
428 |
gc.collect()
|
429 |
if torch.cuda.is_available():
|
430 |
torch.cuda.empty_cache()
|
431 |
+
return "", "", '<div style="text-align: center; padding: 1rem;"><div style="font-size: 2rem; color: #ccc;">--</div><div style="font-size: 0.875rem; color: #999;">Intelligence Score</div></div>'
|
432 |
|
433 |
def create_interface():
|
|
|
434 |
css = """
|
435 |
@import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap');
|
436 |
|
|
|
489 |
gr.HTML("""
|
490 |
<div class="main-header">
|
491 |
<div class="main-title">⚡ Flux Prompt Optimizer</div>
|
492 |
+
<div class="subtitle">Deep AI analysis • Intelligent prompt generation • Research-based optimization</div>
|
493 |
</div>
|
494 |
""")
|
495 |
|
496 |
with gr.Row():
|
497 |
with gr.Column(scale=1):
|
498 |
+
gr.Markdown("## 📷 Image Analysis")
|
499 |
|
500 |
image_input = gr.Image(
|
501 |
label="Upload your image",
|
502 |
type="pil",
|
503 |
+
height=400
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
504 |
)
|
505 |
|
506 |
optimize_btn = gr.Button(
|
507 |
+
"🧠 Analyze & Optimize",
|
508 |
variant="primary",
|
509 |
size="lg"
|
510 |
)
|
511 |
+
|
512 |
+
gr.Markdown("""
|
513 |
+
### Deep Analysis Engine
|
514 |
+
|
515 |
+
This system performs comprehensive image analysis:
|
516 |
+
|
517 |
+
• **Subject Recognition** - Identifies people, objects, context
|
518 |
+
• **Feature Detection** - Facial features, clothing, accessories
|
519 |
+
• **Composition Analysis** - Lighting, setting, mood
|
520 |
+
• **Flux Optimization** - Applies research-validated rules
|
521 |
+
|
522 |
+
No options needed - the AI decides what's optimal.
|
523 |
+
""")
|
524 |
|
525 |
with gr.Column(scale=1):
|
526 |
+
gr.Markdown("## 🎯 Optimized Result")
|
527 |
|
528 |
prompt_output = gr.Textbox(
|
529 |
+
label="Flux-Optimized Prompt",
|
530 |
+
placeholder="Upload an image to see the intelligent analysis and optimization...",
|
531 |
+
lines=8,
|
532 |
+
max_lines=12,
|
533 |
elem_classes=["prompt-output"],
|
534 |
show_copy_button=True
|
535 |
)
|
536 |
|
537 |
score_output = gr.HTML(
|
538 |
+
value='<div style="text-align: center; padding: 1rem;"><div style="font-size: 2rem; color: #ccc;">--</div><div style="font-size: 0.875rem; color: #999;">Intelligence Score</div></div>'
|
539 |
)
|
540 |
|
541 |
info_output = gr.Markdown(value="")
|
542 |
|
543 |
+
clear_btn = gr.Button("🗑️ Clear", size="sm")
|
|
|
544 |
|
545 |
gr.Markdown("""
|
546 |
---
|
547 |
+
### 🔬 Pariente AI Research Foundation
|
548 |
|
549 |
+
This optimizer implements deep computer vision analysis combined with validated Flux prompt engineering rules.
|
550 |
+
The system intelligently recognizes image content and applies structured optimization without requiring user configuration.
|
|
|
551 |
|
552 |
+
**Research-based • Intelligence-driven • Zero configuration needed**
|
553 |
""")
|
554 |
|
555 |
+
# Simple event handlers
|
556 |
optimize_btn.click(
|
557 |
fn=process_image_wrapper,
|
558 |
+
inputs=[image_input],
|
559 |
outputs=[prompt_output, info_output, score_output]
|
560 |
)
|
561 |
|
|
|
567 |
return interface
|
568 |
|
569 |
if __name__ == "__main__":
|
570 |
+
logger.info("🚀 Starting Deep Flux Prompt Optimizer")
|
571 |
interface = create_interface()
|
572 |
interface.launch(
|
573 |
server_name="0.0.0.0",
|