VOIDER commited on
Commit
024c6f2
Β·
verified Β·
1 Parent(s): e84a5b4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +435 -458
app.py CHANGED
@@ -1,541 +1,518 @@
1
  import os
2
- import io
3
- import tempfile
4
- import shutil # Kept for potential future use, but not actively used for now.
 
 
5
 
6
  import cv2
7
  import numpy as np
8
- import pandas as pd
9
  import torch
10
  import onnxruntime as rt
11
  from PIL import Image
12
  import gradio as gr
13
  from transformers import pipeline
14
  from huggingface_hub import hf_hub_download
 
15
 
16
- # Assuming aesthetic_predictor_v2_5.py is in the same directory or Python path.
17
- # If it's not available, the AestheticPredictorV25 model will fail to load.
18
- # For this example, a mock will be used if the real import fails.
19
- try:
20
- from aesthetic_predictor_v2_5 import convert_v2_5_from_siglip
21
- except ImportError:
22
- print("Warning: aesthetic_predictor_v2_5.py not found. Using a mock for AestheticPredictorV25.")
23
- def convert_v2_5_from_siglip(low_cpu_mem_usage=True, trust_remote_code=True):
24
- # This is a mock.
25
- mock_model_output = torch.randn(1, 1) # Represents logits for a single image
26
-
27
- class MockModel(torch.nn.Module):
28
- def __init__(self):
29
- super().__init__()
30
- self.dummy_param = torch.nn.Parameter(torch.empty(0)) # To have a device property
31
-
32
- def forward(self, pixel_values):
33
- # Return something that has .logits
34
- # Batch size from pixel_values
35
- batch_size = pixel_values.size(0)
36
- # Create a namedtuple or simple class to mimic HuggingFace output object with .logits
37
- class Output:
38
- pass
39
- output = Output()
40
- output.logits = torch.randn(batch_size, 1).to(self.dummy_param.device)
41
- return output
42
-
43
- def to(self, device_or_dtype): # Simplified .to()
44
- if isinstance(device_or_dtype, torch.dtype):
45
- # In a real scenario, handle dtype conversion
46
- pass
47
- elif isinstance(device_or_dtype, str) or isinstance(device_or_dtype, torch.device):
48
- self.dummy_param = torch.nn.Parameter(torch.empty(0, device=device_or_dtype)) # Move dummy param to device
49
- return self
50
 
51
- def cuda(self): # Mock .cuda()
52
- return self.to(torch.device('cuda'))
53
 
54
 
55
- mock_model_instance = MockModel()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
 
57
- # Mock preprocessor that returns a dict with "pixel_values"
58
- mock_preprocessor = lambda images, return_tensors: {"pixel_values": torch.randn(len(images) if isinstance(images, list) else 1, 3, 224, 224)}
59
- return mock_model_instance, mock_preprocessor
60
-
61
- # --- Configuration ---
62
- DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
63
- DTYPE_WAIFU = torch.float32 # Specific dtype for WaifuScorer's MLP
64
- CACHE_DIR = None # Set to a path string to use a specific Hugging Face cache directory, e.g., "./hf_cache"
65
-
66
- # --- Model Definitions ---
67
-
68
- class MLP(torch.nn.Module):
69
- """Custom MLP for WaifuScorer."""
70
- def __init__(self, input_size: int, batch_norm: bool = True):
71
- super().__init__()
72
- self.input_size = input_size
73
- self.layers = torch.nn.Sequential(
74
- torch.nn.Linear(self.input_size, 2048), torch.nn.ReLU(),
75
- torch.nn.BatchNorm1d(2048) if batch_norm else torch.nn.Identity(), torch.nn.Dropout(0.3),
76
- torch.nn.Linear(2048, 512), torch.nn.ReLU(),
77
- torch.nn.BatchNorm1d(512) if batch_norm else torch.nn.Identity(), torch.nn.Dropout(0.3),
78
- torch.nn.Linear(512, 256), torch.nn.ReLU(),
79
- torch.nn.BatchNorm1d(256) if batch_norm else torch.nn.Identity(), torch.nn.Dropout(0.2),
80
- torch.nn.Linear(256, 128), torch.nn.ReLU(),
81
- torch.nn.BatchNorm1d(128) if batch_norm else torch.nn.Identity(), torch.nn.Dropout(0.1),
82
- torch.nn.Linear(128, 32), torch.nn.ReLU(),
83
- torch.nn.Linear(32, 1)
84
  )
85
- def forward(self, x: torch.Tensor) -> torch.Tensor: return self.layers(x)
86
-
87
- class BaseImageScorer:
88
- """Abstract base class for image scorers."""
89
- def __init__(self, model_key: str, model_display_name: str, device: str = DEVICE, verbose: bool = False):
90
- self.model_key = model_key
91
- self.model_display_name = model_display_name
92
- self.device = device
93
- self.verbose = verbose
94
- self.model = None
95
- self.preprocessor = None
96
- self._load_model()
97
-
98
- def _load_model(self): raise NotImplementedError
99
- def predict(self, images: list[Image.Image]) -> list[float | None]: raise NotImplementedError
100
-
101
- def __call__(self, images: list[Image.Image]) -> list[float | None]:
102
- if not self.model:
103
- if self.verbose: print(f"{self.model_display_name} model not loaded.")
104
  return [None] * len(images)
105
-
106
- rgb_images = [img.convert("RGB") if img.mode != "RGB" else img for img in images]
107
- return self.predict(rgb_images)
108
 
109
- class WaifuScorerModel(BaseImageScorer):
 
 
 
 
 
 
 
110
  def _load_model(self):
111
  try:
112
  import clip
113
- model_hf_path = "Eugeoter/waifu-scorer-v3/model.pth" # Default path
114
 
115
- repo_id, filename = os.path.split(model_hf_path)
116
- actual_model_path = hf_hub_download(repo_id=repo_id, filename=filename, cache_dir=CACHE_DIR)
117
- if self.verbose: print(f"Loading WaifuScorer MLP from: {actual_model_path}")
118
-
119
- self.mlp = MLP(input_size=768) # ViT-L/14 embedding size
120
- if actual_model_path.endswith(".safetensors"):
121
- from safetensors.torch import load_file
122
- state_dict = load_file(actual_model_path, device=self.device)
123
- else:
124
- state_dict = torch.load(actual_model_path, map_location=self.device)
125
  self.mlp.load_state_dict(state_dict)
126
  self.mlp.to(self.device).eval()
127
-
128
- if self.verbose: print("Loading CLIP model ViT-L/14 for WaifuScorer.")
129
- self.model, self.preprocessor = clip.load("ViT-L/14", device=self.device) # self.model is CLIP model
130
- self.model.eval()
131
- except ImportError:
132
- if self.verbose: print("CLIP library not found. WaifuScorer will not be available.")
133
  except Exception as e:
134
- if self.verbose: print(f"Error loading WaifuScorer ({self.model_display_name}): {e}")
135
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136
  @torch.no_grad()
137
- def predict(self, images: list[Image.Image]) -> list[float | None]:
138
- if not self.model or not self.mlp: return [None] * len(images)
139
-
140
- original_n = len(images)
141
- processed_images = list(images)
142
- if original_n == 1: processed_images.append(images[0]) # Duplicate for single image batch
143
 
144
  try:
145
- image_tensors = torch.cat([self.preprocessor(img).unsqueeze(0) for img in processed_images]).to(self.device)
146
- image_features = self.model.encode_image(image_tensors)
147
- norm = image_features.norm(p=2, dim=-1, keepdim=True)
148
- norm[norm == 0] = 1e-6 # Avoid division by zero, use small epsilon
149
- im_emb = (image_features / norm).to(device=self.device, dtype=DTYPE_WAIFU)
 
 
 
150
 
151
- predictions = self.mlp(im_emb)
152
  scores = predictions.clamp(0, 10).cpu().numpy().flatten().tolist()
153
- return scores[:original_n]
154
  except Exception as e:
155
- if self.verbose: print(f"Error during {self.model_display_name} prediction: {e}")
156
- return [None] * original_n
157
 
158
- class AestheticPredictorV25(BaseImageScorer):
159
- def _load_model(self):
160
- try:
161
- if self.verbose: print(f"Loading {self.model_display_name}...")
162
- self.model, self.preprocessor = convert_v2_5_from_siglip(low_cpu_mem_usage=True, trust_remote_code=True)
163
- # Model's .to() method should handle dtype (e.g. bfloat16) and device.
164
- self.model = self.model.to(self.device)
165
- if self.device == 'cuda' and torch.cuda.is_available() and hasattr(self.model, 'to'): # some models might need explicit dtype
166
- self.model = self.model.to(torch.bfloat16)
167
- self.model.eval()
168
- except Exception as e:
169
- if self.verbose: print(f"Error loading {self.model_display_name}: {e}")
170
 
 
 
 
 
 
 
 
 
 
 
 
 
171
  @torch.no_grad()
172
- def predict(self, images: list[Image.Image]) -> list[float | None]:
173
- if not self.model or not self.preprocessor: return [None] * len(images)
174
  try:
175
- inputs = self.preprocessor(images=images, return_tensors="pt")
176
- pixel_values = inputs["pixel_values"].to(self.model.dummy_param.device if hasattr(self.model, 'dummy_param') else self.device) # Use model's device
177
- if self.device == 'cuda' and torch.cuda.is_available() and pixel_values.dtype != torch.bfloat16 : # Match dtype if model changed it
178
- pixel_values = pixel_values.to(torch.bfloat16)
179
-
180
- output = self.model(pixel_values)
181
- scores_tensor = output.logits if hasattr(output, 'logits') else output
182
- scores = scores_tensor.squeeze().float().cpu().numpy()
183
 
184
- scores_list = [float(np.round(np.clip(s, 0.0, 10.0), 4)) for s in np.atleast_1d(scores)]
185
- return scores_list
 
 
 
186
  except Exception as e:
187
- if self.verbose: print(f"Error during {self.model_display_name} prediction: {e}")
188
  return [None] * len(images)
189
 
190
- class AnimeAestheticONNX(BaseImageScorer):
191
- def _load_model(self):
192
- try:
193
- if self.verbose: print(f"Loading {self.model_display_name} (ONNX)...")
194
- model_path = hf_hub_download(repo_id="skytnt/anime-aesthetic", filename="model.onnx", cache_dir=CACHE_DIR)
195
- providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if self.device == 'cuda' else ['CPUExecutionProvider']
196
- valid_providers = [p for p in providers if p in rt.get_available_providers()] or ['CPUExecutionProvider']
197
- self.model = rt.InferenceSession(model_path, providers=valid_providers)
198
- if self.verbose: print(f"{self.model_display_name} loaded with providers: {self.model.get_providers()}")
199
- except Exception as e:
200
- if self.verbose: print(f"Error loading {self.model_display_name}: {e}")
201
 
202
- def _preprocess_image(self, img: Image.Image) -> np.ndarray:
203
- img_np = np.array(img).astype(np.float32) / 255.0
204
- s = 768
205
- h, w = img_np.shape[:2]
206
- r = min(s/h, s/w)
207
- new_h, new_w = int(h*r), int(w*r)
208
-
209
- resized = cv2.resize(img_np, (new_w, new_h), interpolation=cv2.INTER_AREA if r < 1 else cv2.INTER_LANCZOS4)
210
-
211
- canvas = np.zeros((s, s, 3), dtype=np.float32) # Fill with black
212
- pad_h, pad_w = (s - new_h) // 2, (s - new_w) // 2
213
- canvas[pad_h:pad_h+new_h, pad_w:pad_w+new_w] = resized
214
- return np.transpose(canvas, (2, 0, 1))[np.newaxis, :]
215
-
216
- def predict(self, images: list[Image.Image]) -> list[float | None]:
217
- if not self.model: return [None] * len(images)
218
  scores = []
219
  for img in images:
220
  try:
221
- input_tensor = self._preprocess_image(img)
222
- pred = self.model.run(None, {"img": input_tensor})[0].item()
223
- scores.append(float(np.clip(pred * 10.0, 0.0, 10.0)))
224
  except Exception as e:
225
- if self.verbose: print(f"Error predicting with {self.model_display_name} for one image: {e}")
226
  scores.append(None)
227
  return scores
228
-
229
- class AestheticShadowPipeline(BaseImageScorer):
230
- def _load_model(self):
231
- try:
232
- if self.verbose: print(f"Loading {self.model_display_name} pipeline...")
233
- pipeline_device = 0 if self.device == 'cuda' else -1
234
- self.model = pipeline("image-classification", model="NeoChen1024/aesthetic-shadow-v2-backup", device=pipeline_device)
235
- except Exception as e:
236
- if self.verbose: print(f"Error loading {self.model_display_name}: {e}")
237
-
238
- def predict(self, images: list[Image.Image]) -> list[float | None]:
239
- if not self.model: return [None] * len(images)
240
- scores = []
241
- try:
242
- pipeline_results = self.model(images, top_k=None) # Assuming pipeline handles batching
243
-
244
- # Ensure consistent output structure from pipeline (List[List[Dict]] vs List[Dict])
245
- if images and pipeline_results and not isinstance(pipeline_results[0], list):
246
- pipeline_results = [pipeline_results]
247
-
248
- for res_set in pipeline_results:
249
- try:
250
- hq_score_dict = next(p for p in res_set if p['label'] == 'hq')
251
- scores.append(float(np.clip(hq_score_dict['score'] * 10.0, 0.0, 10.0)))
252
- except (StopIteration, TypeError, KeyError): scores.append(None)
253
- except Exception as e:
254
- if self.verbose: print(f"Error during {self.model_display_name} prediction: {e}")
255
- return [None] * len(images) # All None if batch fails
256
- return scores
257
-
258
- # --- Model Management ---
259
- MODEL_REGISTRY = {
260
- "aesthetic_shadow": {"class": AestheticShadowPipeline, "name": "Aesthetic Shadow"},
261
- "waifu_scorer": {"class": WaifuScorerModel, "name": "Waifu Scorer"},
262
- "aesthetic_predictor_v2_5": {"class": AestheticPredictorV25, "name": "Aesthetic V2.5"},
263
- "anime_aesthetic": {"class": AnimeAestheticONNX, "name": "Anime Score"},
264
- }
265
- LOADED_MODELS = {} # Populated at startup
266
-
267
- def initialize_models(verbose_loading=False):
268
- print(f"Using device: {DEVICE}")
269
- print("Initializing models...")
270
- for key, config in MODEL_REGISTRY.items():
271
- LOADED_MODELS[key] = config["class"](key, config['name'], device=DEVICE, verbose=verbose_loading)
272
- print("Model initialization complete.")
273
-
274
- # --- Core Logic ---
275
- @torch.no_grad()
276
- def auto_tune_batch_size(images: list[Image.Image], selected_model_keys: list[str],
277
- initial_bs: int = 1, max_bs_limit: int = 64, verbose: bool = False) -> int:
278
- if not images or not selected_model_keys: return initial_bs
279
- if verbose: print("Auto-tuning batch size...")
280
 
281
- test_image = images[0]
282
- active_models = [LOADED_MODELS[key] for key in selected_model_keys if key in LOADED_MODELS and LOADED_MODELS[key].model]
283
- if not active_models: return initial_bs
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
284
 
285
- bs = initial_bs
286
- optimal_bs = initial_bs
287
- while bs <= len(images) and bs <= max_bs_limit:
288
- try:
289
- batch_test_images = [test_image] * bs
290
- for model in active_models:
291
- if verbose: print(f" Testing {model.model_display_name} with batch size {bs}")
292
- model.predict(batch_test_images)
293
- if DEVICE == 'cuda': torch.cuda.empty_cache()
294
-
295
- optimal_bs = bs
296
- if bs == max_bs_limit: break
297
- bs = min(bs * 2, max_bs_limit) # Try next power of 2 or max_bs_limit
298
- except Exception as e: # Typically OOM or other runtime errors
299
- if verbose: print(f" Failed at batch size {bs} ({type(e).__name__}). Optimal so far: {optimal_bs}. Error: {str(e)[:100]}")
300
- break
301
- if verbose: print(f"Auto-tuned batch size: {optimal_bs}")
302
- return max(1, optimal_bs)
303
 
304
- async def evaluate_images_core(
305
- pil_images: list[Image.Image], file_names: list[str],
306
- selected_model_keys: list[str], batch_size: int,
307
- progress_tracker: gr.Progress
308
- ) -> tuple[pd.DataFrame, list[str]]:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
309
 
310
- logs = []
311
- num_images = len(pil_images)
312
- if num_images == 0: return pd.DataFrame(), ["No images to process."]
313
-
314
- # Initialize results_data: list of dicts, one per image
315
- results_data = [{'File Name': fn, 'Thumbnail': img.copy().resize((150,150)), 'Final Score': np.nan}
316
- for fn, img in zip(file_names, pil_images)]
317
- for r_dict in results_data: # Initialize all model score columns to NaN
318
- for cfg in MODEL_REGISTRY.values(): r_dict[cfg['name']] = np.nan
319
-
320
- progress_tracker(0, desc="Starting evaluation...")
321
- total_models_to_run = len(selected_model_keys)
322
-
323
- for model_idx, model_key in enumerate(selected_model_keys):
324
- model = LOADED_MODELS.get(model_key)
325
- if not model or not model.model:
326
- logs.append(f"Skipping {MODEL_REGISTRY[model_key]['name']} (not loaded).")
327
- continue
328
-
329
- model_name = model.model_display_name
330
- logs.append(f"Processing with {model_name}...")
331
 
332
- current_img_offset = 0
333
- for batch_start_idx in range(0, num_images, batch_size):
334
- # Progress: (current_model_idx + fraction_of_current_model_done) / total_models_to_run
335
- model_progress_fraction = (batch_start_idx / num_images)
336
- overall_progress = (model_idx + model_progress_fraction) / total_models_to_run
337
- progress_tracker(overall_progress, desc=f"{model_name} (Batch {batch_start_idx//batch_size + 1})")
338
-
339
- batch_images = pil_images[batch_start_idx : batch_start_idx + batch_size]
340
  try:
341
- scores = model(batch_images) # Use __call__
342
- for i, score in enumerate(scores):
343
- results_data[current_img_offset + i][model_name] = score if score is not None else np.nan
344
  except Exception as e:
345
- logs.append(f"Error with {model_name} on batch: {e}")
346
- current_img_offset += len(batch_images)
347
- logs.append(f"Finished with {model_name}.")
348
-
349
- # Calculate Final Scores
350
- for i in range(num_images):
351
- img_scores = [results_data[i][MODEL_REGISTRY[mk]['name']] for mk in selected_model_keys
352
- if pd.notna(results_data[i].get(MODEL_REGISTRY[mk]['name']))]
353
- if img_scores:
354
- results_data[i]['Final Score'] = float(np.clip(np.mean(img_scores), 0.0, 10.0))
355
-
356
- df = pd.DataFrame(results_data)
357
- # Define column order: Thumbnail, File Name, then model scores, then Final Score
358
- ordered_cols = ['Thumbnail', 'File Name'] + \
359
- [MODEL_REGISTRY[k]['name'] for k in MODEL_REGISTRY.keys() if MODEL_REGISTRY[k]['name'] in df.columns] + \
360
- ['Final Score']
361
- df = df[[col for col in ordered_cols if col in df.columns]] # Ensure all columns exist
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
362
 
363
- logs.append("Evaluation complete.")
364
- progress_tracker(1.0, desc="Evaluation complete.")
365
- return df, logs
366
 
367
- def results_df_to_csv_bytes(df: pd.DataFrame, selected_model_display_names: list[str]) -> bytes | None:
368
- if df.empty: return None
369
-
370
- cols_for_csv = ['File Name', 'Final Score'] + \
371
- [name for name in selected_model_display_names if name in df.columns and name not in cols_for_csv]
372
 
373
- df_csv = df[cols_for_csv].copy()
374
- for col in df_csv.select_dtypes(include=['float']).columns: # Format float scores
375
- df_csv[col] = df_csv[col].apply(lambda x: f"{x:.4f}" if pd.notnull(x) else "N/A")
 
 
 
 
376
 
377
- s_io = io.StringIO()
378
- df_csv.to_csv(s_io, index=False)
379
- return s_io.getvalue().encode('utf-8')
380
-
381
- # --- Gradio Interface ---
382
- def create_gradio_interface():
383
- model_name_choices = [config['name'] for config in MODEL_REGISTRY.values()]
384
-
385
- # Define column structure for DataFrame
386
- initial_df_cols = ['Thumbnail', 'File Name'] + model_name_choices + ['Final Score']
387
- initial_datatypes = ['image', 'str'] + ['number'] * (len(model_name_choices) + 1)
388
-
389
- with gr.Blocks(theme=gr.themes.Glass()) as demo:
390
- gr.Markdown("## ✨ Comprehensive Image Evaluation Tool ✨")
391
 
392
- # For storing results DataFrame between interactions
393
- results_state = gr.State(pd.DataFrame(columns=initial_df_cols))
394
-
395
  with gr.Row():
396
- with gr.Column(scale=1, min_width=300):
397
- gr.Markdown("#### Controls")
398
- files_input = gr.Files(label="Upload Images", file_count="multiple", type="filepath")
399
- models_checkbox_group = gr.CheckboxGroup(choices=model_name_choices, value=model_name_choices, label="Select Models")
 
 
400
 
401
- with gr.Accordion("Batch Settings", open=False):
402
- auto_batch_toggle = gr.Checkbox(label="Auto-detect Batch Size", value=True)
403
- manual_batch_input = gr.Number(label="Manual Batch Size", value=4, minimum=1, step=1, interactive=False) # Interactive based on toggle
 
 
 
404
 
405
- evaluate_button = gr.Button("πŸš€ Evaluate Images", variant="primary")
406
  with gr.Row():
407
- clear_button = gr.Button("🧹 Clear")
408
- download_button = gr.Button("πŸ’Ύ Download CSV")
 
 
 
 
 
 
409
 
410
- # Hidden component for file download functionality
411
- csv_file_output = gr.File(label="Download CSV File", visible=False)
 
412
 
413
- with gr.Column(scale=3, min_width=600):
414
- gr.Markdown("#### Results")
415
- # Using gr.Slider for progress display
416
- progress_slider = gr.Slider(label="Progress", minimum=0, maximum=1, value=0, interactive=False)
 
 
 
 
417
 
418
- results_dataframe = gr.DataFrame(
419
- label="Evaluation Scores",
420
- headers=initial_df_cols,
421
- datatype=initial_datatypes,
422
- interactive=True, # Enables native sorting by clicking headers
423
- height=500,
424
  wrap=True
425
  )
426
- logs_textbox = gr.Textbox(label="Process Logs", lines=5, max_lines=10, interactive=False)
427
-
428
- # --- Callbacks ---
429
- def map_display_names_to_keys(display_names: list[str]) -> list[str]:
430
- return [key for key, cfg in MODEL_REGISTRY.items() if cfg['name'] in display_names]
431
-
432
- async def run_evaluation(uploaded_files, selected_model_names, auto_batch, manual_batch,
433
- current_results_df, progress=gr.Progress(track_tqdm=True)):
434
- if not uploaded_files:
435
- return {
436
- results_state: current_results_df, logs_textbox: "No files uploaded. Please upload images first.",
437
- progress_slider: gr.update(value=0, label="Progress")
438
- }
439
-
440
- yield {logs_textbox: "Loading images...", progress_slider: gr.update(value=0.01, label="Loading images...")}
441
 
442
- pil_images, file_names = [], []
443
- for f_obj in uploaded_files:
444
- try:
445
- pil_images.append(Image.open(f_obj.name).convert("RGB")) # f_obj.name is path for type="filepath"
446
- file_names.append(os.path.basename(f_obj.name))
447
- except Exception as e:
448
- print(f"Error loading image {f_obj.name}: {e}") # Log to console
449
 
450
- if not pil_images:
451
- return {logs_textbox: "No valid images could be loaded.", progress_slider: gr.update(value=0, label="Error")}
452
-
453
- selected_keys = map_display_names_to_keys(selected_model_names)
454
 
455
- batch_size_to_use = manual_batch
456
- if auto_batch:
457
- yield {logs_textbox: "Auto-tuning batch size...", progress_slider: gr.update(value=0.1, label="Auto-tuning...")}
458
- batch_size_to_use = auto_tune_batch_size(pil_images, selected_keys, verbose=True)
459
- yield {manual_batch_input: gr.update(value=batch_size_to_use)} # Update UI with detected size
460
 
461
- yield {logs_textbox: f"Starting evaluation with batch size {batch_size_to_use}...",
462
- progress_slider: gr.update(value=0.15, label=f"Evaluating (Batch: {batch_size_to_use})...")}
463
-
464
- df_new_results, log_messages = await evaluate_images_core(
465
- pil_images, file_names, selected_keys, batch_size_to_use, progress
 
466
  )
467
 
468
- # Sort by 'Final Score' descending by default before display
469
- if not df_new_results.empty and 'Final Score' in df_new_results.columns:
470
- df_new_results = df_new_results.sort_values(by='Final Score', ascending=False, na_position='last')
471
-
472
- return {
473
- results_state: df_new_results, results_dataframe: df_new_results,
474
- logs_textbox: "\n".join(log_messages),
475
- progress_slider: gr.update(value=1.0, label="Evaluation Complete")
476
- }
477
-
478
- def clear_all_outputs():
479
- empty_df = pd.DataFrame(columns=initial_df_cols)
480
- return {
481
- results_state: empty_df, results_dataframe: empty_df,
482
- files_input: None, logs_textbox: "Outputs cleared.",
483
- progress_slider: gr.update(value=0, label="Progress")
484
- }
485
-
486
- def download_csv_file(current_df, selected_names):
487
- if current_df.empty:
488
- gr.Warning("No results available to download.")
489
- return None
490
 
491
- csv_data = results_df_to_csv_bytes(current_df, selected_names)
492
- if csv_data:
493
- with tempfile.NamedTemporaryFile(delete=False, suffix=".csv", mode='wb') as tmp_f:
494
- tmp_f.write(csv_data)
495
- gr.Info("CSV file prepared for download.")
496
- return tmp_f.name
497
- gr.Error("Failed to generate CSV.")
498
- return None
499
 
500
- def update_final_scores_on_model_select(selected_model_names, current_df):
501
- if current_df.empty: return current_df
 
 
502
 
503
- df_updated = current_df.copy()
504
- selected_keys = map_display_names_to_keys(selected_model_names)
505
-
506
- for i, row in df_updated.iterrows():
507
- img_scores = [row[MODEL_REGISTRY[mk]['name']] for mk in selected_keys
508
- if pd.notna(row.get(MODEL_REGISTRY[mk]['name']))]
509
- if img_scores:
510
- df_updated.loc[i, 'Final Score'] = float(np.clip(np.mean(img_scores), 0.0, 10.0))
511
- else:
512
- df_updated.loc[i, 'Final Score'] = np.nan
513
 
514
- if 'Final Score' in df_updated.columns: # Re-sort
515
- df_updated = df_updated.sort_values(by='Final Score', ascending=False, na_position='last')
516
-
517
- return {results_state: df_updated, results_dataframe: df_updated}
518
-
519
- auto_batch_toggle.change(lambda x: gr.update(interactive=not x), inputs=auto_batch_toggle, outputs=manual_batch_input)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
520
 
521
- evaluate_button.click(
522
- fn=run_evaluation,
523
- inputs=[files_input, models_checkbox_group, auto_batch_toggle, manual_batch_input, results_state],
524
- outputs=[results_state, results_dataframe, logs_textbox, manual_batch_input, progress_slider]
525
  )
526
- clear_button.click(fn=clear_all_outputs, outputs=[results_state, results_dataframe, files_input, logs_textbox, progress_slider])
527
- download_button.click(fn=download_csv_file, inputs=[results_state, models_checkbox_group], outputs=csv_file_output)
528
- models_checkbox_group.change(
529
- fn=update_final_scores_on_model_select,
530
- inputs=[models_checkbox_group, results_state],
531
- outputs=[results_state, results_dataframe]
532
  )
533
-
534
- # Initial load state for the DataFrame UI component
535
- demo.load(lambda: pd.DataFrame(columns=initial_df_cols), outputs=[results_dataframe])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
536
  return demo
537
 
 
538
  if __name__ == "__main__":
539
- initialize_models(verbose_loading=True) # Load models once at startup
540
- gradio_app = create_gradio_interface()
541
- gradio_app.queue().launch(debug=False) # Enable queue for async ops, debug=True for more logs
 
1
  import os
2
+ import asyncio
3
+ from typing import List, Dict, Optional, Tuple, Any
4
+ from dataclasses import dataclass, field
5
+ from pathlib import Path
6
+ import logging
7
 
8
  import cv2
9
  import numpy as np
 
10
  import torch
11
  import onnxruntime as rt
12
  from PIL import Image
13
  import gradio as gr
14
  from transformers import pipeline
15
  from huggingface_hub import hf_hub_download
16
+ import pandas as pd
17
 
18
+ # Configure logging
19
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
20
+ logger = logging.getLogger(__name__)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
+ # Import aesthetic predictor function
23
+ from aesthetic_predictor_v2_5 import convert_v2_5_from_siglip
24
 
25
 
26
+ @dataclass
27
+ class EvaluationResult:
28
+ """Data class for storing image evaluation results"""
29
+ file_name: str
30
+ image_path: str
31
+ scores: Dict[str, Optional[float]] = field(default_factory=dict)
32
+ final_score: Optional[float] = None
33
+
34
+ def calculate_final_score(self, selected_models: List[str]) -> None:
35
+ """Calculate the average score from selected models"""
36
+ valid_scores = [
37
+ score for model, score in self.scores.items()
38
+ if model in selected_models and score is not None
39
+ ]
40
+ self.final_score = np.mean(valid_scores) if valid_scores else None
41
+
42
+
43
+ class BaseModel:
44
+ """Base class for all evaluation models"""
45
+ def __init__(self, name: str):
46
+ self.name = name
47
+ self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
48
 
49
+ async def evaluate_batch(self, images: List[Image.Image]) -> List[Optional[float]]:
50
+ """Evaluate a batch of images"""
51
+ raise NotImplementedError
52
+
53
+
54
+ class AestheticShadowModel(BaseModel):
55
+ """Aesthetic Shadow V2 model implementation"""
56
+ def __init__(self):
57
+ super().__init__("Aesthetic Shadow")
58
+ logger.info(f"Loading {self.name} model...")
59
+ self.model = pipeline(
60
+ "image-classification",
61
+ model="NeoChen1024/aesthetic-shadow-v2-backup",
62
+ device=0 if self.device == 'cuda' else -1
 
 
 
 
 
 
 
 
 
 
 
 
 
63
  )
64
+
65
+ async def evaluate_batch(self, images: List[Image.Image]) -> List[Optional[float]]:
66
+ try:
67
+ results = self.model(images)
68
+ scores = []
69
+ for result in results:
70
+ hq_score = next((p['score'] for p in result if p['label'] == 'hq'), 0)
71
+ scores.append(float(np.clip(hq_score * 10.0, 0.0, 10.0)))
72
+ return scores
73
+ except Exception as e:
74
+ logger.error(f"Error in {self.name}: {e}")
 
 
 
 
 
 
 
 
75
  return [None] * len(images)
 
 
 
76
 
77
+
78
+ class WaifuScorerModel(BaseModel):
79
+ """Waifu Scorer V3 model implementation"""
80
+ def __init__(self):
81
+ super().__init__("Waifu Scorer")
82
+ logger.info(f"Loading {self.name} model...")
83
+ self._load_model()
84
+
85
  def _load_model(self):
86
  try:
87
  import clip
 
88
 
89
+ # Load MLP model
90
+ self.mlp = self._create_mlp()
91
+ model_path = hf_hub_download("Eugeoter/waifu-scorer-v3", "model.pth")
92
+ state_dict = torch.load(model_path, map_location=self.device)
 
 
 
 
 
 
93
  self.mlp.load_state_dict(state_dict)
94
  self.mlp.to(self.device).eval()
95
+
96
+ # Load CLIP model
97
+ self.clip_model, self.preprocess = clip.load("ViT-L/14", device=self.device)
98
+ self.available = True
 
 
99
  except Exception as e:
100
+ logger.error(f"Failed to load {self.name}: {e}")
101
+ self.available = False
102
+
103
+ def _create_mlp(self) -> torch.nn.Module:
104
+ """Create the MLP architecture"""
105
+ return torch.nn.Sequential(
106
+ torch.nn.Linear(768, 2048),
107
+ torch.nn.ReLU(),
108
+ torch.nn.BatchNorm1d(2048),
109
+ torch.nn.Dropout(0.3),
110
+ torch.nn.Linear(2048, 512),
111
+ torch.nn.ReLU(),
112
+ torch.nn.BatchNorm1d(512),
113
+ torch.nn.Dropout(0.3),
114
+ torch.nn.Linear(512, 256),
115
+ torch.nn.ReLU(),
116
+ torch.nn.BatchNorm1d(256),
117
+ torch.nn.Dropout(0.2),
118
+ torch.nn.Linear(256, 128),
119
+ torch.nn.ReLU(),
120
+ torch.nn.BatchNorm1d(128),
121
+ torch.nn.Dropout(0.1),
122
+ torch.nn.Linear(128, 32),
123
+ torch.nn.ReLU(),
124
+ torch.nn.Linear(32, 1)
125
+ )
126
+
127
  @torch.no_grad()
128
+ async def evaluate_batch(self, images: List[Image.Image]) -> List[Optional[float]]:
129
+ if not self.available:
130
+ return [None] * len(images)
 
 
 
131
 
132
  try:
133
+ # Process images
134
+ image_tensors = torch.cat([self.preprocess(img).unsqueeze(0) for img in images])
135
+ image_tensors = image_tensors.to(self.device)
136
+
137
+ # Extract features and predict
138
+ features = self.clip_model.encode_image(image_tensors)
139
+ features = features / features.norm(dim=-1, keepdim=True)
140
+ predictions = self.mlp(features)
141
 
 
142
  scores = predictions.clamp(0, 10).cpu().numpy().flatten().tolist()
143
+ return scores
144
  except Exception as e:
145
+ logger.error(f"Error in {self.name}: {e}")
146
+ return [None] * len(images)
147
 
 
 
 
 
 
 
 
 
 
 
 
 
148
 
149
+ class AestheticPredictorV25Model(BaseModel):
150
+ """Aesthetic Predictor V2.5 model implementation"""
151
+ def __init__(self):
152
+ super().__init__("Aesthetic V2.5")
153
+ logger.info(f"Loading {self.name} model...")
154
+ self.model, self.preprocessor = convert_v2_5_from_siglip(
155
+ low_cpu_mem_usage=True,
156
+ trust_remote_code=True,
157
+ )
158
+ if self.device == 'cuda':
159
+ self.model = self.model.to(torch.bfloat16).cuda()
160
+
161
  @torch.no_grad()
162
+ async def evaluate_batch(self, images: List[Image.Image]) -> List[Optional[float]]:
 
163
  try:
164
+ images_rgb = [img.convert("RGB") for img in images]
165
+ pixel_values = self.preprocessor(images=images_rgb, return_tensors="pt").pixel_values
166
+
167
+ if self.device == 'cuda':
168
+ pixel_values = pixel_values.to(torch.bfloat16).cuda()
 
 
 
169
 
170
+ scores = self.model(pixel_values).logits.squeeze().float().cpu().numpy()
171
+ if scores.ndim == 0:
172
+ scores = np.array([scores])
173
+
174
+ return [float(np.clip(s, 0.0, 10.0)) for s in scores]
175
  except Exception as e:
176
+ logger.error(f"Error in {self.name}: {e}")
177
  return [None] * len(images)
178
 
 
 
 
 
 
 
 
 
 
 
 
179
 
180
+ class AnimeAestheticModel(BaseModel):
181
+ """Anime Aesthetic model implementation"""
182
+ def __init__(self):
183
+ super().__init__("Anime Score")
184
+ logger.info(f"Loading {self.name} model...")
185
+ model_path = hf_hub_download(repo_id="skytnt/anime-aesthetic", filename="model.onnx")
186
+ self.session = rt.InferenceSession(model_path, providers=['CPUExecutionProvider'])
187
+
188
+ async def evaluate_batch(self, images: List[Image.Image]) -> List[Optional[float]]:
 
 
 
 
 
 
 
189
  scores = []
190
  for img in images:
191
  try:
192
+ score = self._process_single_image(img)
193
+ scores.append(float(np.clip(score * 10.0, 0.0, 10.0)))
 
194
  except Exception as e:
195
+ logger.error(f"Error in {self.name} for single image: {e}")
196
  scores.append(None)
197
  return scores
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198
 
199
+ def _process_single_image(self, img: Image.Image) -> float:
200
+ """Process a single image through the model"""
201
+ img_np = np.array(img).astype(np.float32) / 255.0
202
+ size = 768
203
+ h, w = img_np.shape[:2]
204
+
205
+ # Calculate new dimensions
206
+ if h > w:
207
+ new_h, new_w = size, int(size * w / h)
208
+ else:
209
+ new_h, new_w = int(size * h / w), size
210
+
211
+ # Resize and center
212
+ resized = cv2.resize(img_np, (new_w, new_h))
213
+ canvas = np.zeros((size, size, 3), dtype=np.float32)
214
+ pad_h = (size - new_h) // 2
215
+ pad_w = (size - new_w) // 2
216
+ canvas[pad_h:pad_h+new_h, pad_w:pad_w+new_w] = resized
217
+
218
+ # Prepare input
219
+ input_tensor = np.transpose(canvas, (2, 0, 1))[np.newaxis, :]
220
+ return self.session.run(None, {"img": input_tensor})[0].item()
221
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
222
 
223
+ class ImageEvaluator:
224
+ """Main class for managing image evaluation"""
225
+ def __init__(self):
226
+ self.models: Dict[str, BaseModel] = {}
227
+ self._initialize_models()
228
+ self.results: List[EvaluationResult] = []
229
+
230
+ def _initialize_models(self):
231
+ """Initialize all evaluation models"""
232
+ model_classes = [
233
+ ("aesthetic_shadow", AestheticShadowModel),
234
+ ("waifu_scorer", WaifuScorerModel),
235
+ ("aesthetic_predictor_v2_5", AestheticPredictorV25Model),
236
+ ("anime_aesthetic", AnimeAestheticModel),
237
+ ]
238
+
239
+ for key, model_class in model_classes:
240
+ try:
241
+ self.models[key] = model_class()
242
+ logger.info(f"Successfully loaded {key}")
243
+ except Exception as e:
244
+ logger.error(f"Failed to load {key}: {e}")
245
 
246
+ async def evaluate_images(
247
+ self,
248
+ file_paths: List[str],
249
+ selected_models: List[str],
250
+ batch_size: int = 8,
251
+ progress_callback = None
252
+ ) -> Tuple[List[EvaluationResult], List[str]]:
253
+ """Evaluate images with selected models"""
254
+ logs = []
255
+ results = []
 
 
 
 
 
 
 
 
 
 
 
256
 
257
+ # Load images
258
+ images = []
259
+ valid_paths = []
260
+ for path in file_paths:
 
 
 
 
261
  try:
262
+ img = Image.open(path).convert("RGB")
263
+ images.append(img)
264
+ valid_paths.append(path)
265
  except Exception as e:
266
+ logs.append(f"Failed to load {Path(path).name}: {e}")
267
+
268
+ if not images:
269
+ logs.append("No valid images to process")
270
+ return results, logs
271
+
272
+ logs.append(f"Loaded {len(images)} images")
273
+
274
+ # Process in batches
275
+ total_batches = (len(images) + batch_size - 1) // batch_size
276
+
277
+ for batch_idx in range(0, len(images), batch_size):
278
+ batch_images = images[batch_idx:batch_idx + batch_size]
279
+ batch_paths = valid_paths[batch_idx:batch_idx + batch_size]
280
+
281
+ # Evaluate with each selected model
282
+ batch_results = {}
283
+ for model_key in selected_models:
284
+ if model_key in self.models:
285
+ scores = await self.models[model_key].evaluate_batch(batch_images)
286
+ batch_results[model_key] = scores
287
+ logs.append(f"Processed batch {batch_idx//batch_size + 1}/{total_batches} with {self.models[model_key].name}")
288
+
289
+ # Create results
290
+ for i, (path, img) in enumerate(zip(batch_paths, batch_images)):
291
+ result = EvaluationResult(
292
+ file_name=Path(path).name,
293
+ image_path=path
294
+ )
295
+
296
+ for model_key in selected_models:
297
+ if model_key in batch_results:
298
+ result.scores[model_key] = batch_results[model_key][i]
299
+
300
+ result.calculate_final_score(selected_models)
301
+ results.append(result)
302
+
303
+ # Update progress
304
+ if progress_callback:
305
+ progress = (batch_idx + batch_size) / len(images) * 100
306
+ progress_callback(min(progress, 100))
307
+
308
+ self.results = results
309
+ return results, logs
310
+
311
+ def get_results_dataframe(self, selected_models: List[str]) -> pd.DataFrame:
312
+ """Convert results to pandas DataFrame"""
313
+ if not self.results:
314
+ return pd.DataFrame()
315
+
316
+ data = []
317
+ for result in self.results:
318
+ row = {
319
+ 'File Name': result.file_name,
320
+ 'Image': result.image_path,
321
+ }
322
+
323
+ # Add model scores
324
+ for model_key in selected_models:
325
+ if model_key in self.models:
326
+ score = result.scores.get(model_key)
327
+ row[self.models[model_key].name] = f"{score:.4f}" if score is not None else "N/A"
328
+
329
+ row['Final Score'] = f"{result.final_score:.4f}" if result.final_score is not None else "N/A"
330
+ data.append(row)
331
+
332
+ return pd.DataFrame(data)
333
 
 
 
 
334
 
335
+ def create_interface():
336
+ """Create the Gradio interface"""
337
+ evaluator = ImageEvaluator()
 
 
338
 
339
+ # Model options for checkbox
340
+ model_options = [
341
+ ("Aesthetic Shadow", "aesthetic_shadow"),
342
+ ("Waifu Scorer", "waifu_scorer"),
343
+ ("Aesthetic V2.5", "aesthetic_predictor_v2_5"),
344
+ ("Anime Score", "anime_aesthetic")
345
+ ]
346
 
347
+ with gr.Blocks(theme=gr.themes.Soft(), title="Image Evaluation Tool") as demo:
348
+ gr.Markdown("""
349
+ # 🎨 Advanced Image Evaluation Tool
350
+
351
+ Evaluate images using state-of-the-art aesthetic and quality prediction models.
352
+ Upload your images and select the models you want to use for evaluation.
353
+ """)
 
 
 
 
 
 
 
354
 
 
 
 
355
  with gr.Row():
356
+ with gr.Column(scale=1):
357
+ input_files = gr.File(
358
+ label="Upload Images",
359
+ file_count="multiple",
360
+ file_types=["image"]
361
+ )
362
 
363
+ model_checkboxes = gr.CheckboxGroup(
364
+ choices=[label for label, _ in model_options],
365
+ value=[label for label, _ in model_options],
366
+ label="Select Models",
367
+ info="Choose which models to use for evaluation"
368
+ )
369
 
 
370
  with gr.Row():
371
+ batch_size = gr.Slider(
372
+ minimum=1,
373
+ maximum=64,
374
+ value=8,
375
+ step=1,
376
+ label="Batch Size",
377
+ info="Number of images to process at once"
378
+ )
379
 
380
+ with gr.Row():
381
+ evaluate_btn = gr.Button("πŸš€ Evaluate Images", variant="primary", scale=2)
382
+ clear_btn = gr.Button("πŸ—‘οΈ Clear", variant="secondary", scale=1)
383
 
384
+ with gr.Column(scale=2):
385
+ progress = gr.Progress()
386
+ logs = gr.Textbox(
387
+ label="Processing Logs",
388
+ lines=10,
389
+ max_lines=10,
390
+ autoscroll=True
391
+ )
392
 
393
+ results_df = gr.Dataframe(
394
+ label="Evaluation Results",
395
+ interactive=False,
 
 
 
396
  wrap=True
397
  )
398
+
399
+ download_btn = gr.Button("πŸ“₯ Download Results (CSV)", variant="secondary")
400
+ download_file = gr.File(visible=False)
401
+
402
+ # State for storing results
403
+ results_state = gr.State([])
404
+
405
+ async def process_images(files, selected_model_labels, batch_size, progress=gr.Progress()):
406
+ """Process uploaded images"""
407
+ if not files:
408
+ return "Please upload images first", pd.DataFrame(), []
 
 
 
 
409
 
410
+ # Convert labels to keys
411
+ selected_models = [key for label, key in model_options if label in selected_model_labels]
 
 
 
 
 
412
 
413
+ # Get file paths
414
+ file_paths = [f.name for f in files]
 
 
415
 
416
+ # Progress callback
417
+ def update_progress(value):
418
+ progress(value / 100, desc=f"Processing images... {value:.0f}%")
 
 
419
 
420
+ # Evaluate images
421
+ results, logs = await evaluator.evaluate_images(
422
+ file_paths,
423
+ selected_models,
424
+ batch_size,
425
+ update_progress
426
  )
427
 
428
+ # Create DataFrame
429
+ df = evaluator.get_results_dataframe(selected_models)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
430
 
431
+ # Format logs
432
+ log_text = "\n".join(logs[-10:]) # Show last 10 logs
433
+
434
+ return log_text, df, results
 
 
 
 
435
 
436
+ def update_results_on_model_change(selected_model_labels, results):
437
+ """Update results when model selection changes"""
438
+ if not results:
439
+ return pd.DataFrame()
440
 
441
+ # Convert labels to keys
442
+ selected_models = [key for label, key in model_options if label in selected_model_labels]
 
 
 
 
 
 
 
 
443
 
444
+ # Recalculate final scores
445
+ for result in results:
446
+ result.calculate_final_score(selected_models)
447
+
448
+ # Update evaluator results
449
+ evaluator.results = results
450
+
451
+ # Create updated DataFrame
452
+ return evaluator.get_results_dataframe(selected_models)
453
+
454
+ def clear_interface():
455
+ """Clear all results"""
456
+ return "", pd.DataFrame(), [], None
457
+
458
+ def prepare_download(selected_model_labels, results):
459
+ """Prepare CSV file for download"""
460
+ if not results:
461
+ return None
462
+
463
+ # Convert labels to keys
464
+ selected_models = [key for label, key in model_options if label in selected_model_labels]
465
+
466
+ # Get DataFrame
467
+ df = evaluator.get_results_dataframe(selected_models)
468
+
469
+ # Save to temporary file
470
+ import tempfile
471
+ with tempfile.NamedTemporaryFile(mode='w', suffix='.csv', delete=False) as f:
472
+ df.to_csv(f, index=False)
473
+ return f.name
474
+
475
+ # Event handlers
476
+ evaluate_btn.click(
477
+ fn=process_images,
478
+ inputs=[input_files, model_checkboxes, batch_size],
479
+ outputs=[logs, results_df, results_state]
480
+ )
481
 
482
+ model_checkboxes.change(
483
+ fn=update_results_on_model_change,
484
+ inputs=[model_checkboxes, results_state],
485
+ outputs=[results_df]
486
  )
487
+
488
+ clear_btn.click(
489
+ fn=clear_interface,
490
+ outputs=[logs, results_df, results_state, download_file]
 
 
491
  )
492
+
493
+ download_btn.click(
494
+ fn=prepare_download,
495
+ inputs=[model_checkboxes, results_state],
496
+ outputs=[download_file]
497
+ )
498
+
499
+ gr.Markdown("""
500
+ ### πŸ“ Notes
501
+ - **Model Selection**: Choose which models to use for evaluation. Final score is the average of selected models.
502
+ - **Batch Size**: Adjust based on your GPU memory. Larger batches process faster.
503
+ - **Results Table**: Click column headers to sort. The table updates automatically when models are selected/deselected.
504
+ - **Download**: Export results as CSV for further analysis.
505
+
506
+ ### 🎯 Score Interpretation
507
+ - **7-10**: High quality/aesthetic appeal
508
+ - **5-7**: Medium quality
509
+ - **0-5**: Lower quality
510
+ """)
511
+
512
  return demo
513
 
514
+
515
  if __name__ == "__main__":
516
+ # Create and launch the interface
517
+ demo = create_interface()
518
+ demo.queue().launch()