davidberenstein1957 commited on
Commit
85288ef
·
1 Parent(s): b3af0e1

Enhance SSIM and overall quality plots with dynamic y-axis scaling to emphasize differences. Add area fills for better visualization and update hover templates for improved data presentation. Introduce quality variation indicators in overall quality assessment. Refactor layout for improved user experience and add comprehensive usage guide with metric explanations.

Browse files
Files changed (1) hide show
  1. app.py +447 -115
app.py CHANGED
@@ -364,7 +364,42 @@ class FrameMetrics:
364
  # 1. SSIM Plot
365
  ssim_frames, ssim_values = get_valid_data("ssim")
366
  if ssim_values:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
367
  fig_ssim = go.Figure()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
368
  fig_ssim.add_trace(
369
  go.Scatter(
370
  x=ssim_frames,
@@ -372,8 +407,12 @@ class FrameMetrics:
372
  mode="lines+markers",
373
  name="SSIM",
374
  line=dict(color="blue", width=3),
375
- marker=dict(size=6),
376
- hovertemplate="<b>Frame %{x}</b><br>SSIM: %{y:.4f}<extra></extra>",
 
 
 
 
377
  )
378
  )
379
 
@@ -391,9 +430,18 @@ class FrameMetrics:
391
  plot_bgcolor="rgba(0,0,0,0)",
392
  paper_bgcolor="rgba(0,0,0,0)",
393
  showlegend=False,
 
 
 
 
 
 
 
 
 
 
 
394
  )
395
- fig_ssim.update_xaxes(title_text="Frame")
396
- fig_ssim.update_yaxes(title_text="SSIM", range=[0, 1.05])
397
  plots["ssim"] = fig_ssim
398
 
399
  # 2. PSNR Plot
@@ -426,9 +474,17 @@ class FrameMetrics:
426
  plot_bgcolor="rgba(0,0,0,0)",
427
  paper_bgcolor="rgba(0,0,0,0)",
428
  showlegend=False,
 
 
 
 
 
 
 
 
 
 
429
  )
430
- fig_psnr.update_xaxes(title_text="Frame")
431
- fig_psnr.update_yaxes(title_text="PSNR (dB)")
432
  plots["psnr"] = fig_psnr
433
 
434
  # 3. MSE Plot
@@ -461,9 +517,15 @@ class FrameMetrics:
461
  plot_bgcolor="rgba(0,0,0,0)",
462
  paper_bgcolor="rgba(0,0,0,0)",
463
  showlegend=False,
 
 
 
 
 
 
 
 
464
  )
465
- fig_mse.update_xaxes(title_text="Frame")
466
- fig_mse.update_yaxes(title_text="MSE")
467
  plots["mse"] = fig_mse
468
 
469
  # 4. pHash Plot
@@ -496,9 +558,17 @@ class FrameMetrics:
496
  plot_bgcolor="rgba(0,0,0,0)",
497
  paper_bgcolor="rgba(0,0,0,0)",
498
  showlegend=False,
 
 
 
 
 
 
 
 
 
 
499
  )
500
- fig_phash.update_xaxes(title_text="Frame")
501
- fig_phash.update_yaxes(title_text="pHash Similarity")
502
  plots["phash"] = fig_phash
503
 
504
  # 5. Color Histogram Correlation Plot
@@ -531,9 +601,17 @@ class FrameMetrics:
531
  plot_bgcolor="rgba(0,0,0,0)",
532
  paper_bgcolor="rgba(0,0,0,0)",
533
  showlegend=False,
 
 
 
 
 
 
 
 
 
 
534
  )
535
- fig_hist.update_xaxes(title_text="Frame")
536
- fig_hist.update_yaxes(title_text="Color Histogram Correlation")
537
  plots["color_hist"] = fig_hist
538
 
539
  # 6. Sharpness Comparison Plot
@@ -586,9 +664,17 @@ class FrameMetrics:
586
  legend=dict(
587
  orientation="h", yanchor="bottom", y=1.02, xanchor="center", x=0.5
588
  ),
 
 
 
 
 
 
 
 
 
 
589
  )
590
- fig_sharp.update_xaxes(title_text="Frame")
591
- fig_sharp.update_yaxes(title_text="Sharpness")
592
  plots["sharpness"] = fig_sharp
593
 
594
  # 7. Overall Quality Score Plot (Combination of metrics)
@@ -645,7 +731,42 @@ class FrameMetrics:
645
  ]
646
  overall_quality.append(sum(frame_scores) / len(frame_scores))
647
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
648
  fig_overall = go.Figure()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
649
  fig_overall.add_trace(
650
  go.Scatter(
651
  x=common_frames,
@@ -653,14 +774,41 @@ class FrameMetrics:
653
  mode="lines+markers",
654
  name="Overall Quality",
655
  line=dict(color="gold", width=4),
656
- marker=dict(size=8),
657
- hovertemplate="<b>Frame %{x}</b><br>Overall Quality: %{y:.3f}<br><i>Combined from: "
 
 
658
  + ", ".join(component_names)
659
  + "</i><extra></extra>",
660
  fill="tonexty",
 
661
  )
662
  )
663
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
664
  if current_frame is not None:
665
  fig_overall.add_vline(
666
  x=current_frame,
@@ -675,10 +823,19 @@ class FrameMetrics:
675
  plot_bgcolor="rgba(0,0,0,0)",
676
  paper_bgcolor="rgba(0,0,0,0)",
677
  showlegend=False,
 
 
 
 
 
 
 
678
  )
679
- fig_overall.update_xaxes(title_text="Frame")
680
  fig_overall.update_yaxes(
681
- title_text="Overall Quality Score", range=[0, 1.05]
 
 
 
682
  )
683
  plots["overall"] = fig_overall
684
 
@@ -1091,15 +1248,65 @@ class VideoFrameComparator:
1091
  # Add overall assessment with formula explanation
1092
  if avg_quality >= 0.9:
1093
  overall = "✨ Excellent Overall"
 
1094
  elif avg_quality >= 0.8:
1095
  overall = "✅ Good Overall"
 
1096
  elif avg_quality >= 0.6:
1097
  overall = "⚠️ Fair Overall"
 
1098
  else:
1099
  overall = "❌ Poor Overall"
1100
-
1101
- info += f"\n🎯 Overall Quality: {avg_quality:.3f} ({overall})"
1102
- info += f"\n Formula: Average of {' + '.join(metric_contributions)} = {avg_quality:.3f}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1103
 
1104
  return info
1105
 
@@ -1134,12 +1341,20 @@ def load_examples_from_json(json_file_path="data.json"):
1134
  # OpenCV will handle the validation during actual loading
1135
  valid_videos.append(video_path)
1136
  print(f"Added video URL: {video_path}")
1137
- elif os.path.exists(video_path):
1138
- # For local files, check existence
1139
- valid_videos.append(video_path)
1140
- print(f"Added local video file: {video_path}")
1141
  else:
1142
- print(f"Warning: Local video file not found: {video_path}")
 
 
 
 
 
 
 
 
 
 
 
 
1143
 
1144
  # Add to examples if we have valid videos
1145
  if len(valid_videos) == 2:
@@ -1198,15 +1413,107 @@ def get_all_videos_from_json(json_file_path="data.json"):
1198
  def create_app():
1199
  comparator = VideoFrameComparator()
1200
  example_pairs = load_examples_from_json()
 
 
 
1201
  all_videos = get_all_videos_from_json()
1202
 
1203
  with gr.Blocks(
1204
  title="Frame Arena - Video Frame Comparator",
1205
  # theme=gr.themes.Soft(),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1206
  ) as app:
1207
  gr.Markdown("""
1208
  # 🎬 Frame Arena: Frame by frame comparisons of any videos
1209
 
 
 
1210
  - Upload videos in common formats with the same number of frames (MP4, AVI, MOV, etc.) or use URLs
1211
  - **7 Quality Metrics**: SSIM, PSNR, MSE, pHash, Color Histogram, Sharpness + Overall Quality
1212
  - **Individual Visualization**: Each metric gets its own dedicated plot
@@ -1249,14 +1556,15 @@ def create_app():
1249
  type="filepath",
1250
  )
1251
 
1252
- # Add examples if available (this auto-populates inputs when clicked)
1253
  if example_pairs:
1254
  gr.Markdown("### 📁 Example Video Comparisons")
1255
- gr.Examples(
1256
  examples=example_pairs,
1257
  inputs=[video1_input, video2_input],
1258
  label="Click any example to load video pairs:",
1259
  examples_per_page=10,
 
1260
  )
1261
 
1262
  load_btn = gr.Button("🔄 Load Videos", variant="primary", size="lg")
@@ -1285,21 +1593,14 @@ def create_app():
1285
  step=1,
1286
  value=0,
1287
  label="Frame Number",
1288
- interactive=False,
1289
  )
1290
 
1291
  # Comprehensive metrics visualization (initially hidden)
1292
  metrics_section = gr.Row(visible=False)
1293
  with metrics_section:
1294
  with gr.Column():
1295
- # Frame info moved above the plot
1296
- frame_info = gr.Textbox(
1297
- label="Frame Information & Metrics",
1298
- interactive=False,
1299
- value="",
1300
- lines=3,
1301
- )
1302
- gr.Markdown("### 📊 Individual Metric Analysis")
1303
 
1304
  # Overall quality plot
1305
  with gr.Row():
@@ -1308,6 +1609,14 @@ def create_app():
1308
  show_label=True,
1309
  )
1310
 
 
 
 
 
 
 
 
 
1311
  # Individual metric plots
1312
  with gr.Row():
1313
  ssim_plot = gr.Plot(label="SSIM", show_label=True)
@@ -1321,14 +1630,81 @@ def create_app():
1321
  color_plot = gr.Plot(label="Color Histogram", show_label=True)
1322
  sharpness_plot = gr.Plot(label="Sharpness", show_label=True)
1323
 
1324
- # Status and frame info (moved below plots, initially hidden)
1325
- info_section = gr.Row(visible=False)
1326
- with info_section:
1327
- with gr.Column():
1328
- status_output = gr.Textbox(label="Status", interactive=False, lines=8)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1329
 
1330
  # Event handlers
1331
  def load_videos_handler(video1, video2):
 
 
 
1332
  status, max_frames, frame1, frame2, info, plots = comparator.load_videos(
1333
  video1, video2
1334
  )
@@ -1369,7 +1745,7 @@ def create_app():
1369
  overall_fig, # overall_plot
1370
  gr.Row(visible=videos_loaded), # frame_controls
1371
  gr.Row(visible=videos_loaded), # frame_display
1372
- gr.Row(visible=videos_loaded), # metrics_section
1373
  gr.Row(visible=videos_loaded), # info_section
1374
  )
1375
 
@@ -1385,7 +1761,6 @@ def create_app():
1385
  None,
1386
  None,
1387
  None,
1388
- None,
1389
  )
1390
 
1391
  frame1, frame2 = comparator.get_frames_at_index(frame_index)
@@ -1416,10 +1791,15 @@ def create_app():
1416
 
1417
  # Auto-load when examples populate the inputs
1418
  def auto_load_when_examples_change(video1, video2):
 
 
 
1419
  # Only auto-load if both inputs are provided (from examples)
1420
  if video1 and video2:
 
1421
  return load_videos_handler(video1, video2)
1422
  # If only one or no videos, return default empty state
 
1423
  return (
1424
  "Please upload videos or select an example", # status_output
1425
  gr.Slider(
@@ -1427,7 +1807,7 @@ def create_app():
1427
  ), # frame_slider
1428
  None, # frame1_output
1429
  None, # frame2_output
1430
- "", # frame_info (now in metrics_section)
1431
  None, # ssim_plot
1432
  None, # psnr_plot
1433
  None, # mse_plot
@@ -1441,9 +1821,14 @@ def create_app():
1441
  gr.Row(visible=False), # info_section
1442
  )
1443
 
1444
- # Connect events
1445
- load_btn.click(
1446
- fn=load_videos_handler,
 
 
 
 
 
1447
  inputs=[video1_input, video2_input],
1448
  outputs=[
1449
  status_output,
@@ -1465,9 +1850,8 @@ def create_app():
1465
  ],
1466
  )
1467
 
1468
- # Auto-load when both video inputs change (triggered by examples)
1469
- video1_input.change(
1470
- fn=auto_load_when_examples_change,
1471
  inputs=[video1_input, video2_input],
1472
  outputs=[
1473
  status_output,
@@ -1489,8 +1873,13 @@ def create_app():
1489
  ],
1490
  )
1491
 
1492
- video2_input.change(
1493
- fn=auto_load_when_examples_change,
 
 
 
 
 
1494
  inputs=[video1_input, video2_input],
1495
  outputs=[
1496
  status_output,
@@ -1529,73 +1918,16 @@ def create_app():
1529
  ],
1530
  )
1531
 
1532
- # Add comprehensive usage guide
1533
- with gr.Accordion("📖 Usage Guide & Metrics Reference", open=False):
1534
- with gr.Accordion("📊 Metrics Explained", open=False):
1535
- gr.Markdown("""
1536
- - **SSIM**: Structural Similarity (1.0 = identical structure, 0.0 = completely different)
1537
- - **PSNR**: Peak Signal-to-Noise Ratio in dB (higher = better quality, less noise)
1538
- - **MSE**: Mean Squared Error (lower = more similar pixel values)
1539
- - **pHash**: Perceptual Hash similarity (1.0 = visually identical)
1540
- - **Color Histogram**: Color distribution correlation (1.0 = identical color patterns)
1541
- - **Sharpness**: Laplacian variance per video (higher = sharper/more detailed images)
1542
- - **Overall Quality**: Combined metric averaging SSIM, normalized PSNR, and pHash (when available)
1543
- """)
1544
-
1545
- with gr.Accordion(
1546
- "🎯 Quality Assessment Scale (Research-Based Thresholds)", open=False
1547
- ):
1548
- gr.Markdown("""
1549
- **SSIM Scale** (based on human perception studies):
1550
- - 🟢 **Excellent (≥0.9)**: Visually indistinguishable differences
1551
- - 🔵 **Good (≥0.8)**: Minor visible differences, still high quality
1552
- - 🟡 **Fair (≥0.6)**: Noticeable differences, acceptable quality
1553
- - 🔴 **Poor (<0.6)**: Significant visible artifacts and differences
1554
-
1555
- **PSNR Scale** (standard video quality benchmarks):
1556
- - 🟢 **Excellent (≥40dB)**: Professional broadcast quality
1557
- - 🔵 **Good (≥30dB)**: High consumer video quality
1558
- - 🟡 **Fair (≥20dB)**: Acceptable for web streaming
1559
- - 🔴 **Poor (<20dB)**: Low quality with visible compression artifacts
1560
-
1561
- **MSE Scale** (pixel difference thresholds):
1562
- - 🟢 **Very Similar (≤50)**: Minimal pixel-level differences
1563
- - 🔵 **Similar (≤100)**: Small differences, good quality preservation
1564
- - 🟡 **Moderately Different (≤200)**: Noticeable but acceptable differences
1565
- - 🔴 **Very Different (>200)**: Significant pixel-level changes
1566
- """)
1567
-
1568
- with gr.Accordion("🔍 Understanding Comparisons", open=False):
1569
- gr.Markdown("""
1570
- **Comparison Analysis**: Shows how similar/different the videos are
1571
- - Most metrics indicate **similarity** - not which video "wins"
1572
- - Higher SSIM/PSNR/pHash/Color = more similar videos
1573
- - Lower MSE = more similar videos
1574
-
1575
- **Individual Quality**: Shows the quality of each video separately
1576
- - Sharpness comparison shows which video has more detail
1577
- - Significance levels: 🔴 MAJOR (>20%), 🟡 MODERATE (10-20%), 🔵 MINOR (5-10%), 🟢 NEGLIGIBLE (<5%)
1578
-
1579
- **Overall Quality**: Combines multiple metrics to provide a single similarity score
1580
- - **Formula**: Average of [SSIM + normalized_PSNR + pHash]
1581
- - **PSNR Normalization**: PSNR values are divided by 50dB and capped at 1.0
1582
- - **Range**: 0.0 to 1.0 (higher = more similar videos overall)
1583
- - **Purpose**: Provides a single metric when you need one overall assessment
1584
- - **Limitation**: Different metrics may disagree; check individual metrics for details
1585
- """)
1586
-
1587
- with gr.Accordion("📁 Configuration", open=False):
1588
- gr.Markdown(
1589
- f"{'Loaded ' + str(len(example_pairs)) + ' example comparisons from data.json' if example_pairs else 'No examples found in data.json'}<br>"
1590
- f"{'Available videos: ' + str(len(all_videos)) + ' files' if all_videos else ''}"
1591
- )
1592
-
1593
  return app
1594
 
1595
 
1596
  def main():
1597
  app = create_app()
1598
- app.launch(server_name="0.0.0.0", server_port=7860, share=False, debug=True)
 
 
 
 
1599
 
1600
 
1601
  if __name__ == "__main__":
 
364
  # 1. SSIM Plot
365
  ssim_frames, ssim_values = get_valid_data("ssim")
366
  if ssim_values:
367
+ # Calculate dynamic y-axis range for SSIM to highlight differences
368
+ min_ssim = min(ssim_values)
369
+ max_ssim = max(ssim_values)
370
+ ssim_range = max_ssim - min_ssim
371
+
372
+ # If there's very little variation, zoom in to show differences
373
+ if ssim_range < 0.05:
374
+ # For small variations, zoom in to show differences better
375
+ center = (min_ssim + max_ssim) / 2
376
+ padding = max(
377
+ 0.02, ssim_range * 2
378
+ ) # At least 0.02 range or 2x actual range
379
+ y_min = max(0, center - padding)
380
+ y_max = min(1, center + padding)
381
+ else:
382
+ # For larger variations, add some padding
383
+ padding = ssim_range * 0.15 # 15% padding
384
+ y_min = max(0, min_ssim - padding)
385
+ y_max = min(1, max_ssim + padding)
386
+
387
  fig_ssim = go.Figure()
388
+
389
+ # Add area fill to emphasize the curve
390
+ fig_ssim.add_trace(
391
+ go.Scatter(
392
+ x=ssim_frames,
393
+ y=[y_min] * len(ssim_frames),
394
+ mode="lines",
395
+ line=dict(
396
+ color="rgba(0,0,255,0)"
397
+ ), # Transparent line for area base
398
+ showlegend=False,
399
+ hoverinfo="skip",
400
+ )
401
+ )
402
+
403
  fig_ssim.add_trace(
404
  go.Scatter(
405
  x=ssim_frames,
 
407
  mode="lines+markers",
408
  name="SSIM",
409
  line=dict(color="blue", width=3),
410
+ marker=dict(
411
+ size=6, color="blue", line=dict(color="darkblue", width=1)
412
+ ),
413
+ hovertemplate="<b>Frame %{x}</b><br>SSIM: %{y:.5f}<extra></extra>",
414
+ fill="tonexty",
415
+ fillcolor="rgba(0,0,255,0.1)", # Light blue fill
416
  )
417
  )
418
 
 
430
  plot_bgcolor="rgba(0,0,0,0)",
431
  paper_bgcolor="rgba(0,0,0,0)",
432
  showlegend=False,
433
+ dragmode=False,
434
+ hovermode="x unified",
435
+ )
436
+ fig_ssim.update_xaxes(
437
+ title_text="Frame", gridcolor="rgba(128,128,128,0.4)", fixedrange=True
438
+ )
439
+ fig_ssim.update_yaxes(
440
+ title_text="SSIM",
441
+ range=[y_min, y_max],
442
+ gridcolor="rgba(128,128,128,0.4)",
443
+ fixedrange=True,
444
  )
 
 
445
  plots["ssim"] = fig_ssim
446
 
447
  # 2. PSNR Plot
 
474
  plot_bgcolor="rgba(0,0,0,0)",
475
  paper_bgcolor="rgba(0,0,0,0)",
476
  showlegend=False,
477
+ dragmode=False,
478
+ hovermode="x unified",
479
+ )
480
+ fig_psnr.update_xaxes(
481
+ title_text="Frame", gridcolor="rgba(128,128,128,0.4)", fixedrange=True
482
+ )
483
+ fig_psnr.update_yaxes(
484
+ title_text="PSNR (dB)",
485
+ gridcolor="rgba(128,128,128,0.4)",
486
+ fixedrange=True,
487
  )
 
 
488
  plots["psnr"] = fig_psnr
489
 
490
  # 3. MSE Plot
 
517
  plot_bgcolor="rgba(0,0,0,0)",
518
  paper_bgcolor="rgba(0,0,0,0)",
519
  showlegend=False,
520
+ dragmode=False,
521
+ hovermode="x unified",
522
+ )
523
+ fig_mse.update_xaxes(
524
+ title_text="Frame", gridcolor="rgba(128,128,128,0.4)", fixedrange=True
525
+ )
526
+ fig_mse.update_yaxes(
527
+ title_text="MSE", gridcolor="rgba(128,128,128,0.4)", fixedrange=True
528
  )
 
 
529
  plots["mse"] = fig_mse
530
 
531
  # 4. pHash Plot
 
558
  plot_bgcolor="rgba(0,0,0,0)",
559
  paper_bgcolor="rgba(0,0,0,0)",
560
  showlegend=False,
561
+ dragmode=False,
562
+ hovermode="x unified",
563
+ )
564
+ fig_phash.update_xaxes(
565
+ title_text="Frame", gridcolor="rgba(128,128,128,0.4)", fixedrange=True
566
+ )
567
+ fig_phash.update_yaxes(
568
+ title_text="pHash Similarity",
569
+ gridcolor="rgba(128,128,128,0.4)",
570
+ fixedrange=True,
571
  )
 
 
572
  plots["phash"] = fig_phash
573
 
574
  # 5. Color Histogram Correlation Plot
 
601
  plot_bgcolor="rgba(0,0,0,0)",
602
  paper_bgcolor="rgba(0,0,0,0)",
603
  showlegend=False,
604
+ dragmode=False,
605
+ hovermode="x unified",
606
+ )
607
+ fig_hist.update_xaxes(
608
+ title_text="Frame", gridcolor="rgba(128,128,128,0.4)", fixedrange=True
609
+ )
610
+ fig_hist.update_yaxes(
611
+ title_text="Color Histogram Correlation",
612
+ gridcolor="rgba(128,128,128,0.4)",
613
+ fixedrange=True,
614
  )
 
 
615
  plots["color_hist"] = fig_hist
616
 
617
  # 6. Sharpness Comparison Plot
 
664
  legend=dict(
665
  orientation="h", yanchor="bottom", y=1.02, xanchor="center", x=0.5
666
  ),
667
+ dragmode=False,
668
+ hovermode="x unified",
669
+ )
670
+ fig_sharp.update_xaxes(
671
+ title_text="Frame", gridcolor="rgba(128,128,128,0.4)", fixedrange=True
672
+ )
673
+ fig_sharp.update_yaxes(
674
+ title_text="Sharpness",
675
+ gridcolor="rgba(128,128,128,0.4)",
676
+ fixedrange=True,
677
  )
 
 
678
  plots["sharpness"] = fig_sharp
679
 
680
  # 7. Overall Quality Score Plot (Combination of metrics)
 
731
  ]
732
  overall_quality.append(sum(frame_scores) / len(frame_scores))
733
 
734
+ # Calculate dynamic y-axis range to emphasize differences
735
+ min_quality = min(overall_quality)
736
+ max_quality = max(overall_quality)
737
+ quality_range = max_quality - min_quality
738
+
739
+ # If there's very little variation, use a smaller range to emphasize small differences
740
+ if quality_range < 0.08:
741
+ # For small variations, zoom in to show differences better
742
+ center = (min_quality + max_quality) / 2
743
+ padding = max(
744
+ 0.04, quality_range * 2
745
+ ) # At least 0.04 range or 2x the actual range
746
+ y_min = max(0, center - padding)
747
+ y_max = min(1, center + padding)
748
+ else:
749
+ # For larger variations, add some padding
750
+ padding = quality_range * 0.15 # 15% padding
751
+ y_min = max(0, min_quality - padding)
752
+ y_max = min(1, max_quality + padding)
753
+
754
  fig_overall = go.Figure()
755
+
756
+ # Add area fill to emphasize the quality curve
757
+ fig_overall.add_trace(
758
+ go.Scatter(
759
+ x=common_frames,
760
+ y=[y_min] * len(common_frames),
761
+ mode="lines",
762
+ line=dict(
763
+ color="rgba(255,215,0,0)"
764
+ ), # Transparent line for area base
765
+ showlegend=False,
766
+ hoverinfo="skip",
767
+ )
768
+ )
769
+
770
  fig_overall.add_trace(
771
  go.Scatter(
772
  x=common_frames,
 
774
  mode="lines+markers",
775
  name="Overall Quality",
776
  line=dict(color="gold", width=4),
777
+ marker=dict(
778
+ size=8, color="gold", line=dict(color="orange", width=2)
779
+ ),
780
+ hovertemplate="<b>Frame %{x}</b><br>Overall Quality: %{y:.5f}<br><i>Combined from: "
781
  + ", ".join(component_names)
782
  + "</i><extra></extra>",
783
  fill="tonexty",
784
+ fillcolor="rgba(255,215,0,0.15)", # Semi-transparent gold fill
785
  )
786
  )
787
 
788
+ # Add quality threshold indicators if there are significant variations
789
+ if (
790
+ quality_range > 0.03
791
+ ): # Show thresholds if there's meaningful variation
792
+ # Add reference lines for quality levels within the visible range
793
+ if y_min <= 0.9 <= y_max:
794
+ fig_overall.add_hline(
795
+ y=0.9,
796
+ line_dash="dot",
797
+ line_color="green",
798
+ line_width=1,
799
+ annotation_text="Excellent (0.9)",
800
+ annotation_position="right",
801
+ )
802
+ if y_min <= 0.8 <= y_max:
803
+ fig_overall.add_hline(
804
+ y=0.8,
805
+ line_dash="dot",
806
+ line_color="blue",
807
+ line_width=1,
808
+ annotation_text="Good (0.8)",
809
+ annotation_position="right",
810
+ )
811
+
812
  if current_frame is not None:
813
  fig_overall.add_vline(
814
  x=current_frame,
 
823
  plot_bgcolor="rgba(0,0,0,0)",
824
  paper_bgcolor="rgba(0,0,0,0)",
825
  showlegend=False,
826
+ dragmode=False,
827
+ hovermode="x unified",
828
+ )
829
+ fig_overall.update_xaxes(
830
+ title_text="Frame",
831
+ gridcolor="rgba(128,128,128,0.4)",
832
+ fixedrange=True,
833
  )
 
834
  fig_overall.update_yaxes(
835
+ title_text="Overall Quality Score",
836
+ range=[y_min, y_max],
837
+ gridcolor="rgba(128,128,128,0.4)",
838
+ fixedrange=True,
839
  )
840
  plots["overall"] = fig_overall
841
 
 
1248
  # Add overall assessment with formula explanation
1249
  if avg_quality >= 0.9:
1250
  overall = "✨ Excellent Overall"
1251
+ quality_indicator = "🟢"
1252
  elif avg_quality >= 0.8:
1253
  overall = "✅ Good Overall"
1254
+ quality_indicator = "🔵"
1255
  elif avg_quality >= 0.6:
1256
  overall = "⚠️ Fair Overall"
1257
+ quality_indicator = "🟡"
1258
  else:
1259
  overall = "❌ Poor Overall"
1260
+ quality_indicator = "🔴"
1261
+
1262
+ # Calculate quality variation across all frames to show differences
1263
+ quality_variation = ""
1264
+ if self.computed_metrics and len(self.computed_metrics) > 1:
1265
+ # Calculate overall quality for all frames to show variation
1266
+ all_quality_scores = []
1267
+ for metric in self.computed_metrics:
1268
+ frame_quality = 0
1269
+ frame_quality_count = 0
1270
+
1271
+ if metric.get("ssim") is not None:
1272
+ frame_quality += metric["ssim"]
1273
+ frame_quality_count += 1
1274
+ if metric.get("psnr") is not None:
1275
+ frame_quality += min(metric["psnr"] / 50, 1.0)
1276
+ frame_quality_count += 1
1277
+ if metric.get("phash") is not None:
1278
+ frame_quality += metric["phash"]
1279
+ frame_quality_count += 1
1280
+
1281
+ if frame_quality_count > 0:
1282
+ all_quality_scores.append(
1283
+ frame_quality / frame_quality_count
1284
+ )
1285
+
1286
+ if len(all_quality_scores) > 1:
1287
+ min_qual = min(all_quality_scores)
1288
+ max_qual = max(all_quality_scores)
1289
+ variation = max_qual - min_qual
1290
+
1291
+ if variation > 0.08:
1292
+ quality_variation = (
1293
+ f" | 📊 High Variation (Δ{variation:.4f})"
1294
+ )
1295
+ elif variation > 0.04:
1296
+ quality_variation = (
1297
+ f" | 📊 Moderate Variation (Δ{variation:.4f})"
1298
+ )
1299
+ elif variation > 0.02:
1300
+ quality_variation = (
1301
+ f" | 📊 Low Variation (Δ{variation:.4f})"
1302
+ )
1303
+ else:
1304
+ quality_variation = (
1305
+ f" | 📊 Stable Quality (Δ{variation:.4f})"
1306
+ )
1307
+
1308
+ info += f"\n🎯 Overall Quality: {quality_indicator} {avg_quality:.5f} ({overall}){quality_variation}"
1309
+ info += f"\n 💡 Formula: Average of {' + '.join(metric_contributions)} = {avg_quality:.5f}"
1310
 
1311
  return info
1312
 
 
1341
  # OpenCV will handle the validation during actual loading
1342
  valid_videos.append(video_path)
1343
  print(f"Added video URL: {video_path}")
 
 
 
 
1344
  else:
1345
+ # Convert to absolute path for local files
1346
+ abs_path = os.path.abspath(video_path)
1347
+ if os.path.exists(abs_path):
1348
+ valid_videos.append(abs_path)
1349
+ print(f"Added local video file: {abs_path}")
1350
+ elif os.path.exists(video_path):
1351
+ # Try relative path as fallback
1352
+ valid_videos.append(video_path)
1353
+ print(f"Added local video file: {video_path}")
1354
+ else:
1355
+ print(
1356
+ f"Warning: Local video file not found: {video_path} (abs: {abs_path})"
1357
+ )
1358
 
1359
  # Add to examples if we have valid videos
1360
  if len(valid_videos) == 2:
 
1413
  def create_app():
1414
  comparator = VideoFrameComparator()
1415
  example_pairs = load_examples_from_json()
1416
+ print(f"DEBUG: Loaded {len(example_pairs)} example pairs")
1417
+ for i, pair in enumerate(example_pairs):
1418
+ print(f" Example {i + 1}: {pair}")
1419
  all_videos = get_all_videos_from_json()
1420
 
1421
  with gr.Blocks(
1422
  title="Frame Arena - Video Frame Comparator",
1423
  # theme=gr.themes.Soft(),
1424
+ css="""
1425
+ /* Ensure plots adapt to theme */
1426
+ .plotly .main-svg {
1427
+ color: var(--body-text-color, #000) !important;
1428
+ }
1429
+ /* Grid visibility for both themes */
1430
+ .plotly .gridlayer .xgrid, .plotly .gridlayer .ygrid {
1431
+ stroke-opacity: 0.4 !important;
1432
+ }
1433
+ /* Axis text color adaptation */
1434
+ .plotly .xtick text, .plotly .ytick text {
1435
+ fill: var(--body-text-color, #000) !important;
1436
+ }
1437
+ /* Axis title color adaptation - multiple selectors for better coverage */
1438
+ .plotly .g-xtitle, .plotly .g-ytitle,
1439
+ .plotly .xtitle, .plotly .ytitle,
1440
+ .plotly text[class*="xtitle"], .plotly text[class*="ytitle"],
1441
+ .plotly .infolayer .g-xtitle, .plotly .infolayer .g-ytitle {
1442
+ fill: var(--body-text-color, #000) !important;
1443
+ }
1444
+ /* Additional axis title selectors */
1445
+ .plotly .subplot .xtitle, .plotly .subplot .ytitle,
1446
+ .plotly .cartesianlayer .xtitle, .plotly .cartesianlayer .ytitle {
1447
+ fill: var(--body-text-color, #000) !important;
1448
+ }
1449
+ /* SVG text elements in plots */
1450
+ .plotly svg text {
1451
+ fill: var(--body-text-color, #000) !important;
1452
+ }
1453
+ /* Legend text color */
1454
+ .plotly .legendtext, .plotly .legend text {
1455
+ fill: var(--body-text-color, #000) !important;
1456
+ }
1457
+ /* Hover label adaptation */
1458
+ .plotly .hoverlayer .hovertext, .plotly .hovertext {
1459
+ fill: var(--body-text-color, #000) !important;
1460
+ color: var(--body-text-color, #000) !important;
1461
+ }
1462
+ /* Annotation text */
1463
+ .plotly .annotation-text, .plotly .annotation {
1464
+ fill: var(--body-text-color, #000) !important;
1465
+ }
1466
+ /* Disable plot interactions except hover */
1467
+ .plotly .modebar {
1468
+ display: none !important;
1469
+ }
1470
+ .plotly .plot-container .plotly {
1471
+ pointer-events: none !important;
1472
+ }
1473
+ .plotly .plot-container .plotly .hoverlayer {
1474
+ pointer-events: auto !important;
1475
+ }
1476
+ .plotly .plot-container .plotly .hovertext {
1477
+ pointer-events: auto !important;
1478
+ }
1479
+ """,
1480
+ # js="""
1481
+ # function updatePlotColors() {
1482
+ # // Get current theme color
1483
+ # const bodyStyle = getComputedStyle(document.body);
1484
+ # const textColor = bodyStyle.getPropertyValue('--body-text-color') ||
1485
+ # bodyStyle.color ||
1486
+ # (bodyStyle.backgroundColor === 'rgb(255, 255, 255)' ? '#000000' : '#ffffff');
1487
+ # // Update all plot text elements
1488
+ # document.querySelectorAll('.plotly svg text').forEach(text => {
1489
+ # text.setAttribute('fill', textColor);
1490
+ # });
1491
+ # }
1492
+ # // Update colors on load and theme change
1493
+ # window.addEventListener('load', updatePlotColors);
1494
+ # // Watch for theme changes
1495
+ # const observer = new MutationObserver(updatePlotColors);
1496
+ # observer.observe(document.body, {
1497
+ # attributes: true,
1498
+ # attributeFilter: ['class', 'style']
1499
+ # });
1500
+ # // Also watch for CSS variable changes
1501
+ # if (window.CSS && CSS.supports('color', 'var(--body-text-color)')) {
1502
+ # const style = document.createElement('style');
1503
+ # style.textContent = `
1504
+ # .plotly svg text {
1505
+ # fill: var(--body-text-color, currentColor) !important;
1506
+ # }
1507
+ # `;
1508
+ # document.head.appendChild(style);
1509
+ # }
1510
+ # """,
1511
  ) as app:
1512
  gr.Markdown("""
1513
  # 🎬 Frame Arena: Frame by frame comparisons of any videos
1514
 
1515
+ > 🎉 This tool has been created to celebrate our Wan 2.2 [text-to-video](https://replicate.com/wan-video/wan-2.2-t2v-480p-fast) and [image-to-video](https://replicate.com/wan-video/wan-2.2-i2v-a14b) endpoints on Replicate. Want to know more? Check out [our blog](https://www.wan22.com/blog/video-optimization-on-replicate)!
1516
+
1517
  - Upload videos in common formats with the same number of frames (MP4, AVI, MOV, etc.) or use URLs
1518
  - **7 Quality Metrics**: SSIM, PSNR, MSE, pHash, Color Histogram, Sharpness + Overall Quality
1519
  - **Individual Visualization**: Each metric gets its own dedicated plot
 
1556
  type="filepath",
1557
  )
1558
 
1559
+ # Add examples at the top for better UX
1560
  if example_pairs:
1561
  gr.Markdown("### 📁 Example Video Comparisons")
1562
+ examples_component_top = gr.Examples(
1563
  examples=example_pairs,
1564
  inputs=[video1_input, video2_input],
1565
  label="Click any example to load video pairs:",
1566
  examples_per_page=10,
1567
+ run_on_click=False, # We'll handle this manually
1568
  )
1569
 
1570
  load_btn = gr.Button("🔄 Load Videos", variant="primary", size="lg")
 
1593
  step=1,
1594
  value=0,
1595
  label="Frame Number",
1596
+ interactive=True,
1597
  )
1598
 
1599
  # Comprehensive metrics visualization (initially hidden)
1600
  metrics_section = gr.Row(visible=False)
1601
  with metrics_section:
1602
  with gr.Column():
1603
+ gr.Markdown("### 📊 Metric Analysis")
 
 
 
 
 
 
 
1604
 
1605
  # Overall quality plot
1606
  with gr.Row():
 
1609
  show_label=True,
1610
  )
1611
 
1612
+ # Frame info moved below overall quality plot
1613
+ frame_info = gr.Textbox(
1614
+ label="Frame Information & Metrics",
1615
+ interactive=False,
1616
+ value="",
1617
+ lines=3,
1618
+ )
1619
+
1620
  # Individual metric plots
1621
  with gr.Row():
1622
  ssim_plot = gr.Plot(label="SSIM", show_label=True)
 
1630
  color_plot = gr.Plot(label="Color Histogram", show_label=True)
1631
  sharpness_plot = gr.Plot(label="Sharpness", show_label=True)
1632
 
1633
+ # Add comprehensive usage guide
1634
+ with gr.Accordion("📖 Usage Guide & Metrics Reference", open=False):
1635
+ with gr.Row() as info_section:
1636
+ with gr.Column():
1637
+ status_output = gr.Textbox(
1638
+ label="Status", interactive=False, lines=8
1639
+ )
1640
+ with gr.Column():
1641
+ gr.Markdown("""
1642
+ ### 📊 Metrics Explained
1643
+ - **SSIM**: Structural Similarity (1.0 = identical structure, 0.0 = completely different)
1644
+ - **PSNR**: Peak Signal-to-Noise Ratio in dB (higher = better quality, less noise)
1645
+ - **MSE**: Mean Squared Error (lower = more similar pixel values)
1646
+ - **pHash**: Perceptual Hash similarity (1.0 = visually identical)
1647
+ - **Color Histogram**: Color distribution correlation (1.0 = identical color patterns)
1648
+ - **Sharpness**: Laplacian variance per video (higher = sharper/more detailed images)
1649
+ - **Overall Quality**: Combined metric averaging SSIM, normalized PSNR, and pHash (when available)
1650
+ """)
1651
+
1652
+ with gr.Row():
1653
+ with gr.Column():
1654
+ gr.Markdown("""
1655
+ ### 🎯 Quality Assessment Scale (Research-Based Thresholds)
1656
+ **SSIM Scale** (based on human perception studies):
1657
+ - 🟢 **Excellent (≥0.9)**: Visually indistinguishable differences
1658
+ - 🔵 **Good (≥0.8)**: Minor visible differences, still high quality
1659
+ - 🟡 **Fair (≥0.6)**: Noticeable differences, acceptable quality
1660
+ - 🔴 **Poor (<0.6)**: Significant visible artifacts and differences
1661
+
1662
+ **PSNR Scale** (standard video quality benchmarks):
1663
+ - 🟢 **Excellent (≥40dB)**: Professional broadcast quality
1664
+ - 🔵 **Good (≥30dB)**: High consumer video quality
1665
+ - 🟡 **Fair (≥20dB)**: Acceptable for web streaming
1666
+ - 🔴 **Poor (<20dB)**: Low quality with visible compression artifacts
1667
+
1668
+ **MSE Scale** (pixel difference thresholds):
1669
+ - 🟢 **Very Similar (≤50)**: Minimal pixel-level differences
1670
+ - 🔵 **Similar (≤100)**: Small differences, good quality preservation
1671
+ - 🟡 **Moderately Different (≤200)**: Noticeable but acceptable differences
1672
+ - 🔴 **Very Different (>200)**: Significant pixel-level changes
1673
+ """)
1674
+ with gr.Column():
1675
+ gr.Markdown("""
1676
+ ### 🔍 Understanding Comparisons
1677
+ **Comparison Analysis**: Shows how similar/different the videos are
1678
+ - Most metrics indicate **similarity** - not which video "wins"
1679
+ - Higher SSIM/PSNR/pHash/Color = more similar videos
1680
+ - Lower MSE = more similar videos
1681
+
1682
+ **Individual Quality**: Shows the quality of each video separately
1683
+ - Sharpness comparison shows which video has more detail
1684
+ - Significance levels: 🔴 MAJOR (>20%), 🟡 MODERATE (10-20%), 🔵 MINOR (5-10%), 🟢 NEGLIGIBLE (<5%)
1685
+
1686
+ **Overall Quality**: Combines multiple metrics to provide a single similarity score
1687
+ - **Formula**: Average of [SSIM + normalized_PSNR + pHash]
1688
+ - **PSNR Normalization**: PSNR values are divided by 50dB and capped at 1.0
1689
+ - **Range**: 0.0 to 1.0 (higher = more similar videos overall)
1690
+ - **Purpose**: Provides a single metric when you need one overall assessment
1691
+ - **Limitation**: Different metrics may disagree; check individual metrics for details
1692
+ """)
1693
+
1694
+ # Connect examples to auto-loading
1695
+ if example_pairs:
1696
+ # Use a manual approach to handle examples click
1697
+ def examples_manual_handler(video1, video2):
1698
+ print("DEBUG: Examples clicked manually!")
1699
+ return load_videos_handler(video1, video2)
1700
+
1701
+ # Since we can't directly attach to examples, we'll use the change events
1702
 
1703
  # Event handlers
1704
  def load_videos_handler(video1, video2):
1705
+ print(
1706
+ f"DEBUG: load_videos_handler called with video1={video1}, video2={video2}"
1707
+ )
1708
  status, max_frames, frame1, frame2, info, plots = comparator.load_videos(
1709
  video1, video2
1710
  )
 
1745
  overall_fig, # overall_plot
1746
  gr.Row(visible=videos_loaded), # frame_controls
1747
  gr.Row(visible=videos_loaded), # frame_display
1748
+ gr.Row(visible=videos_loaded and plots is not None), # metrics_section
1749
  gr.Row(visible=videos_loaded), # info_section
1750
  )
1751
 
 
1761
  None,
1762
  None,
1763
  None,
 
1764
  )
1765
 
1766
  frame1, frame2 = comparator.get_frames_at_index(frame_index)
 
1791
 
1792
  # Auto-load when examples populate the inputs
1793
  def auto_load_when_examples_change(video1, video2):
1794
+ print(
1795
+ f"DEBUG: auto_load_when_examples_change called with video1={video1}, video2={video2}"
1796
+ )
1797
  # Only auto-load if both inputs are provided (from examples)
1798
  if video1 and video2:
1799
+ print("DEBUG: Both videos present, calling load_videos_handler")
1800
  return load_videos_handler(video1, video2)
1801
  # If only one or no videos, return default empty state
1802
+ print("DEBUG: Not both videos present, returning default state")
1803
  return (
1804
  "Please upload videos or select an example", # status_output
1805
  gr.Slider(
 
1807
  ), # frame_slider
1808
  None, # frame1_output
1809
  None, # frame2_output
1810
+ "", # frame_info
1811
  None, # ssim_plot
1812
  None, # psnr_plot
1813
  None, # mse_plot
 
1821
  gr.Row(visible=False), # info_section
1822
  )
1823
 
1824
+ # Enhanced auto-load function with more debug info
1825
+ def enhanced_auto_load(video1, video2):
1826
+ print(f"DEBUG: Input change detected! video1={video1}, video2={video2}")
1827
+ return auto_load_when_examples_change(video1, video2)
1828
+
1829
+ # Auto-load when both video inputs change (triggered by examples)
1830
+ video1_input.change(
1831
+ fn=enhanced_auto_load,
1832
  inputs=[video1_input, video2_input],
1833
  outputs=[
1834
  status_output,
 
1850
  ],
1851
  )
1852
 
1853
+ video2_input.change(
1854
+ fn=enhanced_auto_load,
 
1855
  inputs=[video1_input, video2_input],
1856
  outputs=[
1857
  status_output,
 
1873
  ],
1874
  )
1875
 
1876
+ # Manual load button event handler with debug
1877
+ def debug_load_videos_handler(video1, video2):
1878
+ print(f"DEBUG: Load button clicked! video1={video1}, video2={video2}")
1879
+ return load_videos_handler(video1, video2)
1880
+
1881
+ load_btn.click(
1882
+ fn=debug_load_videos_handler,
1883
  inputs=[video1_input, video2_input],
1884
  outputs=[
1885
  status_output,
 
1918
  ],
1919
  )
1920
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1921
  return app
1922
 
1923
 
1924
  def main():
1925
  app = create_app()
1926
+ app.launch(
1927
+ server_name="0.0.0.0",
1928
+ server_port=7860,
1929
+ share=False,
1930
+ )
1931
 
1932
 
1933
  if __name__ == "__main__":