VOIDER commited on
Commit
51aec78
·
verified ·
1 Parent(s): 83b7522

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +85 -139
  2. requirements.txt +13 -14
  3. scoring.py +77 -0
app.py CHANGED
@@ -8,8 +8,6 @@ import base64
8
  from typing import List, Dict, Tuple, Optional
9
  import logging
10
  from pathlib import Path
11
- import tempfile
12
- import os
13
  import random
14
 
15
  # Simplified imports for testing
@@ -34,9 +32,17 @@ except ImportError as e:
34
  class MockEvaluator:
35
  def __init__(self):
36
  pass
37
- def evaluate(self, *args, **kwargs):
38
- return random.uniform(5.0, 9.0)
39
-
 
 
 
 
 
 
 
 
40
  QualityEvaluator = MockEvaluator
41
  AestheticsEvaluator = MockEvaluator
42
  PromptEvaluator = MockEvaluator
@@ -45,11 +51,8 @@ except ImportError as e:
45
  def extract_png_metadata(path):
46
  return None
47
 
48
- def calculate_final_score(quality, aesthetics, prompt, ai_detection, has_prompt=True):
49
- if has_prompt:
50
- return (quality * 0.25 + aesthetics * 0.35 + prompt * 0.25 + (1-ai_detection) * 0.15)
51
- else:
52
- return (quality * 0.375 + aesthetics * 0.475 + (1-ai_detection) * 0.15)
53
 
54
  # Configure logging
55
  logging.basicConfig(level=logging.INFO)
@@ -101,24 +104,11 @@ class ImageEvaluationApp:
101
  ) -> Tuple[pd.DataFrame, str]:
102
  """
103
  Evaluate uploaded images and return results
104
-
105
- Args:
106
- images: List of image file paths
107
- enable_quality: Whether to evaluate image quality
108
- enable_aesthetics: Whether to evaluate aesthetics
109
- enable_prompt: Whether to evaluate prompt following
110
- enable_ai_detection: Whether to detect AI generation
111
- anime_mode: Whether to use anime-specific models
112
- progress: Gradio progress tracker
113
-
114
- Returns:
115
- Tuple of (results_dataframe, status_message)
116
  """
117
  if not images:
118
  return pd.DataFrame(), "No images uploaded."
119
 
120
  try:
121
- # Load models based on selection
122
  selected_models = {
123
  'quality': enable_quality,
124
  'aesthetics': enable_aesthetics,
@@ -137,42 +127,33 @@ class ImageEvaluationApp:
137
  desc=f"Evaluating image {i+1}/{total_images}")
138
 
139
  try:
140
- # Load image
141
  image = Image.open(image_path).convert('RGB')
142
  filename = Path(image_path).name
143
 
144
- # Extract metadata
145
  metadata = extract_png_metadata(image_path)
146
  prompt = metadata.get('prompt', '') if metadata else ''
147
 
148
- # Initialize scores
149
  scores = {
150
  'filename': filename,
151
  'quality_score': 0.0,
152
  'aesthetics_score': 0.0,
153
  'prompt_score': 0.0,
154
  'ai_detection_score': 0.0,
155
- 'has_prompt': bool(prompt),
156
- 'prompt_text': prompt[:100] + '...' if len(prompt) > 100 else prompt
157
  }
158
 
159
- # Evaluate quality
160
  if enable_quality and self.quality_evaluator:
161
- scores['quality_score'] = self.quality_evaluator.evaluate(image, anime_mode)
162
 
163
- # Evaluate aesthetics
164
  if enable_aesthetics and self.aesthetics_evaluator:
165
- scores['aesthetics_score'] = self.aesthetics_evaluator.evaluate(image, anime_mode)
166
 
167
- # Evaluate prompt following (only if prompt available)
168
  if enable_prompt and self.prompt_evaluator and prompt:
169
  scores['prompt_score'] = self.prompt_evaluator.evaluate(image, prompt)
170
 
171
- # Evaluate AI detection
172
  if enable_ai_detection and self.ai_detection_evaluator:
173
  scores['ai_detection_score'] = self.ai_detection_evaluator.evaluate(image)
174
 
175
- # Calculate final score
176
  scores['final_score'] = calculate_final_score(
177
  scores['quality_score'],
178
  scores['aesthetics_score'],
@@ -181,177 +162,142 @@ class ImageEvaluationApp:
181
  scores['has_prompt']
182
  )
183
 
184
- # Create thumbnail for display
185
  thumbnail = image.copy()
186
- thumbnail.thumbnail((150, 150), Image.Resampling.LANCZOS)
187
-
188
- # Convert thumbnail to base64 for display
189
  buffer = io.BytesIO()
190
  thumbnail.save(buffer, format='PNG')
191
  thumbnail_b64 = base64.b64encode(buffer.getvalue()).decode()
192
- scores['thumbnail'] = f"data:image/png;base64,{thumbnail_b64}"
 
193
 
194
  results.append(scores)
195
 
196
  except Exception as e:
197
  logger.error(f"Error evaluating {image_path}: {str(e)}")
198
- # Add error entry
199
  results.append({
200
  'filename': Path(image_path).name,
201
- 'quality_score': 0.0,
202
- 'aesthetics_score': 0.0,
203
- 'prompt_score': 0.0,
204
- 'ai_detection_score': 0.0,
205
- 'final_score': 0.0,
206
- 'has_prompt': False,
207
- 'prompt_text': f"Error: {str(e)}",
208
- 'thumbnail': ""
209
  })
210
 
211
- # Create DataFrame and sort by final score
 
 
212
  df = pd.DataFrame(results)
213
- if not df.empty:
214
- df = df.sort_values('final_score', ascending=False).reset_index(drop=True)
215
- df.index = df.index + 1 # Start ranking from 1
216
- df.index.name = 'Rank'
217
-
218
- progress(1.0, desc="Evaluation complete!")
219
 
220
- status_msg = f"Successfully evaluated {len(results)} images."
221
- if any('Error:' in str(r.get('prompt_text', '')) for r in results):
222
- error_count = sum(1 for r in results if 'Error:' in str(r.get('prompt_text', '')))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
223
  status_msg += f" {error_count} images had evaluation errors."
224
 
225
- return df, status_msg
226
 
227
  except Exception as e:
228
  logger.error(f"Error in evaluate_images: {str(e)}")
229
  return pd.DataFrame(), f"Error during evaluation: {str(e)}"
230
 
231
  def create_interface():
232
- """Create and configure the Gradio interface"""
233
-
234
  app = ImageEvaluationApp()
235
 
236
- # Custom CSS for better styling
237
  css = """
238
- .gradio-container {
239
- max-width: 1200px !important;
240
- }
241
- .results-table {
242
- font-size: 12px;
243
- }
244
- .thumbnail-cell img {
245
- max-width: 100px;
246
- max-height: 100px;
247
- object-fit: cover;
248
- }
249
  """
250
 
251
  with gr.Blocks(css=css, title="AI Image Evaluation Tool") as interface:
252
- gr.Markdown("""
253
- # 🎨 AI Image Evaluation Tool
254
-
255
- Upload your AI-generated images to evaluate their quality, aesthetics, prompt following, and detect AI generation.
256
- Supports realistic, anime, and art styles with multiple SOTA models.
257
- """)
258
 
259
  with gr.Row():
260
  with gr.Column(scale=1):
261
- # File upload
262
- images_input = gr.File(
263
- label="Upload Images",
264
- file_count="multiple",
265
- file_types=["image"],
266
- height=200
267
- )
268
 
269
- # Model selection
270
  gr.Markdown("### Model Selection")
271
  with gr.Row():
272
  enable_quality = gr.Checkbox(label="Image Quality", value=True)
273
  enable_aesthetics = gr.Checkbox(label="Aesthetics", value=True)
274
-
275
  with gr.Row():
276
  enable_prompt = gr.Checkbox(label="Prompt Following", value=True)
277
  enable_ai_detection = gr.Checkbox(label="AI Detection", value=True)
278
 
279
- # Additional options
280
  gr.Markdown("### Options")
281
  anime_mode = gr.Checkbox(label="Anime/Art Mode", value=False)
282
 
283
- # Evaluate button
284
  evaluate_btn = gr.Button("🚀 Evaluate Images", variant="primary", size="lg")
285
-
286
- # Status
287
  status_output = gr.Textbox(label="Status", interactive=False)
288
 
289
- with gr.Column(scale=2):
290
- # Results display
291
  gr.Markdown("### 📊 Evaluation Results")
 
292
  results_output = gr.Dataframe(
293
- headers=["Rank", "Filename", "Quality", "Aesthetics", "Prompt", "AI Detection", "Final Score", "Thumbnail"],
294
- datatype=["number", "str", "number", "number", "number", "number", "number", "str"],
295
  label="Results",
296
  interactive=False,
297
  wrap=True,
298
  elem_classes=["results-table"]
299
  )
300
 
301
- # Event handlers
302
  evaluate_btn.click(
303
  fn=app.evaluate_images,
304
- inputs=[
305
- images_input,
306
- enable_quality,
307
- enable_aesthetics,
308
- enable_prompt,
309
- enable_ai_detection,
310
- anime_mode
311
- ],
312
- outputs=[results_output, status_output],
313
- show_progress=True
314
  )
315
 
316
- # Examples and help
317
  with gr.Accordion("ℹ️ Help & Information", open=False):
 
318
  gr.Markdown("""
319
  ### How to Use
320
- 1. **Upload Images**: Select multiple PNG/JPG images (max 50MB each)
321
- 2. **Select Models**: Choose which evaluation metrics to use
322
- 3. **Anime Mode**: Enable for better evaluation of anime/art style images
323
- 4. **Evaluate**: Click the button to start evaluation
324
 
325
  ### Scoring System
326
- - **Quality Score**: Technical image quality (0-10)
327
- - **Aesthetics Score**: Visual appeal and composition (0-10)
328
- - **Prompt Score**: How well the image follows the text prompt (0-10, requires metadata)
329
- - **AI Detection**: Probability of being AI-generated (0-1, lower is better)
330
- - **Final Score**: Weighted combination of all metrics (0-10)
331
-
332
- ### Supported Formats
333
- - PNG files with A1111/ComfyUI metadata (for prompt evaluation)
334
- - JPG, PNG, WebP images (for other evaluations)
335
- - Batch processing of 10-100+ images
336
-
337
- ### Models Used
338
- - **Quality**: LAR-IQA, DGIQA
339
- - **Aesthetics**: UNIAA, MUSIQ
340
- - **Prompt Following**: CLIP, BLIP-2
341
- - **AI Detection**: Sentry-Image, Custom ensemble
342
  """)
343
 
344
  return interface
345
 
346
  if __name__ == "__main__":
347
- # Create the interface
348
  interface = create_interface()
349
-
350
- # Launch the app
351
- interface.launch(
352
- server_name="0.0.0.0",
353
- server_port=7860,
354
- share=False,
355
- show_error=True
356
- )
357
-
 
8
  from typing import List, Dict, Tuple, Optional
9
  import logging
10
  from pathlib import Path
 
 
11
  import random
12
 
13
  # Simplified imports for testing
 
32
  class MockEvaluator:
33
  def __init__(self):
34
  pass
35
+ # FIX: Make mock evaluation deterministic based on image content
36
+ def evaluate(self, image: Image.Image, *args, **kwargs):
37
+ try:
38
+ img_bytes = image.tobytes()
39
+ img_hash = hash(img_bytes)
40
+ random.seed(img_hash)
41
+ # Return a consistent score for the same image
42
+ return random.uniform(5.0, 9.5)
43
+ except Exception:
44
+ return random.uniform(5.0, 9.5) # Fallback for any error
45
+
46
  QualityEvaluator = MockEvaluator
47
  AestheticsEvaluator = MockEvaluator
48
  PromptEvaluator = MockEvaluator
 
51
  def extract_png_metadata(path):
52
  return None
53
 
54
+ # Use the corrected scoring logic from scoring.py
55
+ from scoring import calculate_final_score
 
 
 
56
 
57
  # Configure logging
58
  logging.basicConfig(level=logging.INFO)
 
104
  ) -> Tuple[pd.DataFrame, str]:
105
  """
106
  Evaluate uploaded images and return results
 
 
 
 
 
 
 
 
 
 
 
 
107
  """
108
  if not images:
109
  return pd.DataFrame(), "No images uploaded."
110
 
111
  try:
 
112
  selected_models = {
113
  'quality': enable_quality,
114
  'aesthetics': enable_aesthetics,
 
127
  desc=f"Evaluating image {i+1}/{total_images}")
128
 
129
  try:
 
130
  image = Image.open(image_path).convert('RGB')
131
  filename = Path(image_path).name
132
 
 
133
  metadata = extract_png_metadata(image_path)
134
  prompt = metadata.get('prompt', '') if metadata else ''
135
 
 
136
  scores = {
137
  'filename': filename,
138
  'quality_score': 0.0,
139
  'aesthetics_score': 0.0,
140
  'prompt_score': 0.0,
141
  'ai_detection_score': 0.0,
142
+ 'has_prompt': bool(prompt)
 
143
  }
144
 
 
145
  if enable_quality and self.quality_evaluator:
146
+ scores['quality_score'] = self.quality_evaluator.evaluate(image, anime_mode=anime_mode)
147
 
 
148
  if enable_aesthetics and self.aesthetics_evaluator:
149
+ scores['aesthetics_score'] = self.aesthetics_evaluator.evaluate(image, anime_mode=anime_mode)
150
 
 
151
  if enable_prompt and self.prompt_evaluator and prompt:
152
  scores['prompt_score'] = self.prompt_evaluator.evaluate(image, prompt)
153
 
 
154
  if enable_ai_detection and self.ai_detection_evaluator:
155
  scores['ai_detection_score'] = self.ai_detection_evaluator.evaluate(image)
156
 
 
157
  scores['final_score'] = calculate_final_score(
158
  scores['quality_score'],
159
  scores['aesthetics_score'],
 
162
  scores['has_prompt']
163
  )
164
 
 
165
  thumbnail = image.copy()
166
+ thumbnail.thumbnail((100, 100), Image.Resampling.LANCZOS)
 
 
167
  buffer = io.BytesIO()
168
  thumbnail.save(buffer, format='PNG')
169
  thumbnail_b64 = base64.b64encode(buffer.getvalue()).decode()
170
+ # FIX: Use markdown format for Gradio dataframe image display
171
+ scores['thumbnail'] = f"![{filename}](data:image/png;base64,{thumbnail_b64})"
172
 
173
  results.append(scores)
174
 
175
  except Exception as e:
176
  logger.error(f"Error evaluating {image_path}: {str(e)}")
 
177
  results.append({
178
  'filename': Path(image_path).name,
179
+ 'error': str(e),
180
+ 'thumbnail': ''
 
 
 
 
 
 
181
  })
182
 
183
+ if not results:
184
+ return pd.DataFrame(), "Evaluation failed for all images."
185
+
186
  df = pd.DataFrame(results)
 
 
 
 
 
 
187
 
188
+ # FIX: Create a display-ready dataframe with proper formatting and column names
189
+ if not df.empty:
190
+ # Separate error rows
191
+ error_df = df[df['final_score'].isna()]
192
+ valid_df = df.dropna(subset=['final_score'])
193
+
194
+ if not valid_df.empty:
195
+ valid_df = valid_df.sort_values('final_score', ascending=False).reset_index(drop=True)
196
+ valid_df.index = valid_df.index + 1
197
+ valid_df = valid_df.reset_index().rename(columns={'index': 'Rank'})
198
+
199
+ # Format columns for display
200
+ display_cols = {
201
+ 'Rank': 'Rank',
202
+ 'thumbnail': 'Thumbnail',
203
+ 'filename': 'Filename',
204
+ 'final_score': 'Final Score',
205
+ 'quality_score': 'Quality',
206
+ 'aesthetics_score': 'Aesthetics',
207
+ 'prompt_score': 'Prompt',
208
+ 'ai_detection_score': 'AI Detection'
209
+ }
210
+
211
+ display_df = valid_df[list(display_cols.keys())]
212
+ display_df = display_df.rename(columns=display_cols)
213
+
214
+ # Apply formatting
215
+ for col in ['Final Score', 'Quality', 'Aesthetics', 'Prompt']:
216
+ display_df[col] = display_df[col].map('{:.2f}'.format)
217
+ display_df['AI Detection'] = display_df['AI Detection'].map('{:.1%}'.format)
218
+
219
+ else:
220
+ display_df = pd.DataFrame()
221
+
222
+ status_msg = f"Successfully evaluated {len(df[df['final_score'].notna()])} images."
223
+ error_count = len(df[df['final_score'].isna()])
224
+ if error_count > 0:
225
  status_msg += f" {error_count} images had evaluation errors."
226
 
227
+ return display_df, status_msg
228
 
229
  except Exception as e:
230
  logger.error(f"Error in evaluate_images: {str(e)}")
231
  return pd.DataFrame(), f"Error during evaluation: {str(e)}"
232
 
233
  def create_interface():
 
 
234
  app = ImageEvaluationApp()
235
 
 
236
  css = """
237
+ .gradio-container { max-width: 1400px !important; }
238
+ .results-table { font-size: 14px; }
239
+ .results-table .thumbnail-cell img { max-width: 100px; max-height: 100px; object-fit: cover; }
 
 
 
 
 
 
 
 
240
  """
241
 
242
  with gr.Blocks(css=css, title="AI Image Evaluation Tool") as interface:
243
+ gr.Markdown("# 🎨 AI Image Evaluation Tool")
244
+ gr.Markdown("Upload your AI-generated images to evaluate their quality, aesthetics, prompt following, and detect AI generation.")
 
 
 
 
245
 
246
  with gr.Row():
247
  with gr.Column(scale=1):
248
+ images_input = gr.File(label="Upload Images", file_count="multiple", file_types=["image"], height=200)
 
 
 
 
 
 
249
 
 
250
  gr.Markdown("### Model Selection")
251
  with gr.Row():
252
  enable_quality = gr.Checkbox(label="Image Quality", value=True)
253
  enable_aesthetics = gr.Checkbox(label="Aesthetics", value=True)
 
254
  with gr.Row():
255
  enable_prompt = gr.Checkbox(label="Prompt Following", value=True)
256
  enable_ai_detection = gr.Checkbox(label="AI Detection", value=True)
257
 
 
258
  gr.Markdown("### Options")
259
  anime_mode = gr.Checkbox(label="Anime/Art Mode", value=False)
260
 
 
261
  evaluate_btn = gr.Button("🚀 Evaluate Images", variant="primary", size="lg")
 
 
262
  status_output = gr.Textbox(label="Status", interactive=False)
263
 
264
+ with gr.Column(scale=3):
 
265
  gr.Markdown("### 📊 Evaluation Results")
266
+ # FIX: Update headers and datatypes to match the new formatted DataFrame
267
  results_output = gr.Dataframe(
268
+ headers=["Rank", "Thumbnail", "Filename", "Final Score", "Quality", "Aesthetics", "Prompt", "AI Detection"],
269
+ datatype=["number", "markdown", "str", "str", "str", "str", "str", "str"],
270
  label="Results",
271
  interactive=False,
272
  wrap=True,
273
  elem_classes=["results-table"]
274
  )
275
 
 
276
  evaluate_btn.click(
277
  fn=app.evaluate_images,
278
+ inputs=[images_input, enable_quality, enable_aesthetics, enable_prompt, enable_ai_detection, anime_mode],
279
+ outputs=[results_output, status_output]
 
 
 
 
 
 
 
 
280
  )
281
 
 
282
  with gr.Accordion("ℹ️ Help & Information", open=False):
283
+ # Help text remains the same as it describes the intended functionality
284
  gr.Markdown("""
285
  ### How to Use
286
+ 1. **Upload Images**: Select multiple PNG/JPG images.
287
+ 2. **Select Models**: Choose which evaluation metrics to use.
288
+ 3. **Anime Mode**: Enable for better evaluation of anime/art style images.
289
+ 4. **Evaluate**: Click the button to start evaluation.
290
 
291
  ### Scoring System
292
+ - **Quality Score**: Technical image quality (0-10).
293
+ - **Aesthetics Score**: Visual appeal and composition (0-10).
294
+ - **Prompt Score**: How well the image follows the text prompt (0-10, requires metadata).
295
+ - **AI Detection**: Probability of being AI-generated (0-1, lower is better for the final score).
296
+ - **Final Score**: Weighted combination of all metrics (0-10).
 
 
 
 
 
 
 
 
 
 
 
297
  """)
298
 
299
  return interface
300
 
301
  if __name__ == "__main__":
 
302
  interface = create_interface()
303
+ interface.launch(server_name="0.0.0.0", server_port=7860, show_error=True)
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -2,18 +2,17 @@ gradio>=4.0.0
2
  Pillow>=9.0.0
3
  numpy>=1.21.0
4
  pandas>=1.3.0
5
- scipy>=1.9.0
6
-
7
- # Optional dependencies for full functionality
8
- # Uncomment these for production deployment with real models
9
- # torch>=2.0.0
10
- # torchvision>=0.15.0
11
- # transformers>=4.30.0
12
- # opencv-python>=4.5.0
13
- # scikit-image>=0.19.0
14
- # huggingface-hub>=0.15.0
15
- # accelerate>=0.20.0
16
- # timm>=0.9.0
17
- # sentence-transformers>=2.2.0
18
- # git+https://github.com/openai/CLIP.git
19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  Pillow>=9.0.0
3
  numpy>=1.21.0
4
  pandas>=1.3.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
+ Optional dependencies for full functionality
7
+ Uncomment these for production deployment with real models
8
+ torch>=2.0.0
9
+ torchvision>=0.15.0
10
+ transformers>=4.30.0
11
+ opencv-python>=4.5.0
12
+ scikit-image>=0.19.0
13
+ huggingface-hub>=0.15.0
14
+ accelerate>=0.20.0
15
+ timm>=0.9.0
16
+ sentence-transformers>=2.2.0
17
+ git+https://github.com/openai/CLIP.git
18
+ scipy>=1.9.0
scoring.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import logging
3
+
4
+ logger = logging.getLogger(__name__)
5
+
6
+ def calculate_final_score(
7
+ quality_score: float,
8
+ aesthetics_score: float,
9
+ prompt_score: float,
10
+ ai_detection_score: float,
11
+ has_prompt: bool = True
12
+ ) -> float:
13
+ """
14
+ Calculate weighted composite score for image evaluation.
15
+
16
+ Args:
17
+ quality_score: Technical image quality (0-10)
18
+ aesthetics_score: Visual appeal score (0-10)
19
+ prompt_score: Prompt adherence score (0-10)
20
+ ai_detection_score: AI generation probability (0-1)
21
+ has_prompt: Whether prompt metadata is available
22
+
23
+ Returns:
24
+ Final composite score (0-10)
25
+ """
26
+ try:
27
+ # Validate and clamp input scores
28
+ quality_score = max(0.0, min(10.0, quality_score))
29
+ aesthetics_score = max(0.0, min(10.0, aesthetics_score))
30
+ prompt_score = max(0.0, min(10.0, prompt_score))
31
+ ai_detection_score = max(0.0, min(1.0, ai_detection_score))
32
+
33
+ # FIX: Invert and scale the AI detection score to a 0-10 range
34
+ # A low AI detection probability (good) results in a high score.
35
+ inverted_ai_score = (1 - ai_detection_score) * 10
36
+
37
+ if has_prompt:
38
+ # Standard weights when prompt is available
39
+ weights = {
40
+ 'quality': 0.25, # 25% - Technical quality
41
+ 'aesthetics': 0.35, # 35% - Visual appeal (highest weight)
42
+ 'prompt': 0.25, # 25% - Prompt following
43
+ 'ai_detection': 0.15 # 15% - Authenticity (inverted detection score)
44
+ }
45
+
46
+ # FIX: Correctly calculate the weighted score. The sum of weights is 1.0.
47
+ score = (
48
+ quality_score * weights['quality'] +
49
+ aesthetics_score * weights['aesthetics'] +
50
+ prompt_score * weights['prompt'] +
51
+ inverted_ai_score * weights['ai_detection']
52
+ )
53
+ else:
54
+ # Redistribute prompt weight when no prompt available
55
+ weights = {
56
+ 'quality': 0.375, # 25% + 12.5% from prompt
57
+ 'aesthetics': 0.475, # 35% + 12.5% from prompt
58
+ 'ai_detection': 0.15 # 15% - Authenticity
59
+ }
60
+
61
+ # FIX: Correctly calculate the weighted score without prompt. Sum of weights is 1.0.
62
+ score = (
63
+ quality_score * weights['quality'] +
64
+ aesthetics_score * weights['aesthetics'] +
65
+ inverted_ai_score * weights['ai_detection']
66
+ )
67
+
68
+ # Ensure final score is within the valid 0-10 range
69
+ final_score = max(0.0, min(10.0, score))
70
+
71
+ logger.debug(f"Score calculation - Final: {final_score:.2f}")
72
+
73
+ return final_score
74
+
75
+ except Exception as e:
76
+ logger.error(f"Error calculating final score: {str(e)}")
77
+ return 0.0 # Return 0.0 on error to clearly indicate failure