davidberenstein1957 commited on
Commit
9d3e9dc
Β·
1 Parent(s): 90f50e4

Refactor video quality analysis to create individual metric plots instead of a combined dashboard. Update README to reflect new project name "Frame Arena" and enhance description of features and functionality.

Browse files
Files changed (2) hide show
  1. README.md +15 -3
  2. app.py +528 -383
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
- title: FrameLens
3
- emoji: πŸ‘€
4
  colorFrom: purple
5
  colorTo: yellow
6
  sdk: gradio
@@ -9,7 +9,19 @@ app_file: app.py
9
  pinned: true
10
  python_version: 3.12
11
  license: apache-2.0
12
- short_description: Tool for frame-by-frame video or image metric comparison
13
  ---
14
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Frame Arena
3
+ emoji: 🎬
4
  colorFrom: purple
5
  colorTo: yellow
6
  sdk: gradio
 
9
  pinned: true
10
  python_version: 3.12
11
  license: apache-2.0
12
+ short_description: Frame by frame video comparison tool with comprehensive quality metrics
13
  ---
14
 
15
+ # Frame Arena: Frame by frame comparisons of any videos
16
+
17
+ A powerful tool to compare videos frame by frame and side by side using comprehensive quality metrics including SSIM, PSNR, MSE, pHash, Color Histogram Correlation, Sharpness Analysis, and an Overall Quality score that combines multiple metrics.
18
+
19
+ ### πŸ’‘ How Frame Arena Works:
20
+
21
+ - Upload videos in common formats (MP4, AVI, MOV, etc.) or use URLs
22
+ - **7 Quality Metrics**: SSIM, PSNR, MSE, pHash, Color Histogram, Sharpness + Overall Quality
23
+ - **Individual Visualization**: Each metric gets its own dedicated plot
24
+ - **Real-time Analysis**: Navigate frames with live metric updates
25
+ - **Smart Comparisons**: Understand differences between videos per metric
26
+
27
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -7,7 +7,6 @@ import imagehash
7
  import numpy as np
8
  import plotly.graph_objects as go
9
  from PIL import Image
10
- from plotly.subplots import make_subplots
11
  from scipy.stats import pearsonr
12
  from skimage.metrics import mean_squared_error as mse_skimage
13
  from skimage.metrics import peak_signal_noise_ratio as psnr_skimage
@@ -343,44 +342,14 @@ class FrameMetrics:
343
 
344
  return summary
345
 
346
- def create_modern_plot(self, metrics_list, current_frame=0):
347
- """Create a comprehensive multi-metric visualization with shared hover"""
348
  if not metrics_list:
349
  return None
350
 
351
- # Extract frame indices and metric values
352
  frame_indices = [m["frame_index"] for m in metrics_list]
353
 
354
- # Create 3x2 subplots with quality overview at the top
355
- fig = make_subplots(
356
- rows=3,
357
- cols=2,
358
- subplot_titles=(
359
- "Quality Overview (Combined Score)",
360
- "", # Empty title for merged cell
361
- "SSIM",
362
- "PSNR vs MSE",
363
- "Perceptual Hash vs Color Histogram",
364
- "Individual Sharpness (Video 1 vs Video 2)",
365
- ),
366
- specs=[
367
- [
368
- {"colspan": 2, "secondary_y": False},
369
- None,
370
- ], # Row 1: Quality Overview (single axis)
371
- [
372
- {"secondary_y": False},
373
- {"secondary_y": True},
374
- ], # Row 2: SSIM (single axis), PSNR vs MSE
375
- [
376
- {"secondary_y": True},
377
- {"secondary_y": True},
378
- ], # Row 3: pHash vs Color, Individual Sharpness
379
- ],
380
- vertical_spacing=0.12,
381
- horizontal_spacing=0.1,
382
- )
383
-
384
  # Helper function to get valid data
385
  def get_valid_data(metric_name):
386
  values = [m.get(metric_name) for m in metrics_list]
@@ -389,293 +358,335 @@ class FrameMetrics:
389
  valid_frames = [frame_indices[i] for i in valid_indices]
390
  return valid_frames, valid_values
391
 
392
- # Plot 1: Quality Overview - Combined Score Only (row 1, full width)
393
- ssim_frames, ssim_values = get_valid_data("ssim")
394
- psnr_frames, psnr_values = get_valid_data("psnr")
395
-
396
- # Show only combined quality score
397
- if ssim_values and psnr_values and len(ssim_values) == len(psnr_values):
398
- # Normalize metrics to 0-1 scale for comparison
399
- ssim_norm = np.array(ssim_values)
400
- psnr_norm = np.clip(np.array(psnr_values) / 50, 0, 1)
401
- quality_score = (ssim_norm + psnr_norm) / 2
402
-
403
- fig.add_trace(
404
- go.Scatter(
405
- x=ssim_frames,
406
- y=quality_score,
407
- mode="lines+markers",
408
- name="Quality Score ↑",
409
- line=dict(color="gold", width=4),
410
- marker=dict(size=8),
411
- hovertemplate="<b>Frame %{x}</b><br>Quality Score: %{y:.3f}<extra></extra>",
412
- fill="tonexty",
413
- ),
414
- row=1,
415
- col=1,
416
- )
417
 
418
- # Plot 2: SSIM (row 2, col 1)
 
419
  if ssim_values:
420
- fig.add_trace(
 
421
  go.Scatter(
422
  x=ssim_frames,
423
  y=ssim_values,
424
  mode="lines+markers",
425
- name="SSIM ↑",
426
  line=dict(color="blue", width=3),
427
  marker=dict(size=6),
428
  hovertemplate="<b>Frame %{x}</b><br>SSIM: %{y:.4f}<extra></extra>",
429
- ),
430
- row=2,
431
- col=1,
432
  )
433
 
434
- # Get pHash data for later use
435
- phash_frames, phash_values = get_valid_data("phash")
 
 
 
 
 
436
 
437
- # Plot 3: PSNR vs MSE (row 2, col 2) - keep as is since already shows individual metrics
 
 
 
 
 
 
 
 
 
 
 
 
438
  if psnr_values:
439
- fig.add_trace(
 
440
  go.Scatter(
441
  x=psnr_frames,
442
  y=psnr_values,
443
  mode="lines+markers",
444
- name="PSNR ↑",
445
- line=dict(color="green", width=2),
 
446
  hovertemplate="<b>Frame %{x}</b><br>PSNR: %{y:.2f} dB<extra></extra>",
447
- ),
448
- row=2,
449
- col=2,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
450
  )
 
 
 
451
 
 
452
  mse_frames, mse_values = get_valid_data("mse")
453
  if mse_values:
454
- fig.add_trace(
 
455
  go.Scatter(
456
  x=mse_frames,
457
  y=mse_values,
458
  mode="lines+markers",
459
- name="MSE ↓",
460
- line=dict(color="red", width=2),
 
461
  hovertemplate="<b>Frame %{x}</b><br>MSE: %{y:.2f}<extra></extra>",
462
- yaxis="y6",
463
- ),
464
- row=2,
465
- col=2,
466
- secondary_y=True,
 
 
 
 
 
 
 
 
 
 
 
 
467
  )
 
 
 
468
 
469
- # Plot 4: Perceptual Hash vs Color Histogram (row 3, col 1) - keep as is
 
470
  if phash_values:
471
- fig.add_trace(
 
472
  go.Scatter(
473
  x=phash_frames,
474
  y=phash_values,
475
  mode="lines+markers",
476
- name="pHash ↑",
477
- line=dict(color="purple", width=2),
 
478
  hovertemplate="<b>Frame %{x}</b><br>pHash: %{y:.4f}<extra></extra>",
479
- ),
480
- row=3,
481
- col=1,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
482
  )
 
 
 
483
 
 
484
  hist_frames, hist_values = get_valid_data("color_hist_corr")
485
  if hist_values:
486
- fig.add_trace(
 
487
  go.Scatter(
488
  x=hist_frames,
489
  y=hist_values,
490
  mode="lines+markers",
491
- name="Color Hist ↑",
492
- line=dict(color="orange", width=2),
493
- hovertemplate="<b>Frame %{x}</b><br>Hist Corr: %{y:.4f}<extra></extra>",
494
- yaxis="y8",
495
- ),
496
- row=3,
497
- col=1,
498
- secondary_y=True,
499
  )
500
 
501
- # Plot 5: Individual Sharpness - Video 1 vs Video 2 (row 3, col 2)
502
- sharp1_frames, sharp1_values = get_valid_data("sharpness1")
503
- sharp2_frames, sharp2_values = get_valid_data("sharpness2")
504
-
505
- if sharp1_values:
506
- fig.add_trace(
507
- go.Scatter(
508
- x=sharp1_frames,
509
- y=sharp1_values,
510
- mode="lines+markers",
511
- name="Video 1 Sharpness ↑",
512
- line=dict(color="darkgreen", width=2),
513
- hovertemplate="<b>Frame %{x}</b><br>Video 1 Sharpness: %{y:.1f}<extra></extra>",
514
- ),
515
- row=3,
516
- col=2,
517
- )
518
 
519
- if sharp2_values:
520
- fig.add_trace(
521
- go.Scatter(
522
- x=sharp2_frames,
523
- y=sharp2_values,
524
- mode="lines+markers",
525
- name="Video 2 Sharpness ↑",
526
- line=dict(color="darkblue", width=2),
527
- hovertemplate="<b>Frame %{x}</b><br>Video 2 Sharpness: %{y:.1f}<extra></extra>",
528
- yaxis="y10",
529
- ),
530
- row=3,
531
- col=2,
532
- secondary_y=True,
533
  )
 
 
 
534
 
535
- # Add current frame marker to all plots
536
- if current_frame is not None:
537
- # Add vertical line to each subplot to show current frame
538
- # Subplot (1,1): Quality Overview (full width)
539
- fig.add_vline(
540
- x=current_frame,
541
- line_dash="dash",
542
- line_color="red",
543
- line_width=2,
544
- row=1,
545
- col=1,
546
- )
547
 
548
- # Subplot (2,1): Similarity Metrics
549
- fig.add_vline(
550
- x=current_frame,
551
- line_dash="dash",
552
- line_color="red",
553
- line_width=2,
554
- row=2,
555
- col=1,
556
- )
 
 
 
 
 
 
557
 
558
- # Subplot (2,2): PSNR vs MSE
559
- fig.add_vline(
560
- x=current_frame,
561
- line_dash="dash",
562
- line_color="red",
563
- line_width=2,
564
- row=2,
565
- col=2,
566
- )
 
 
 
567
 
568
- # Subplot (3,1): pHash vs Color Histogram
569
- fig.add_vline(
570
- x=current_frame,
571
- line_dash="dash",
572
- line_color="red",
573
- line_width=2,
574
- row=3,
575
- col=1,
576
- )
577
 
578
- # Subplot (3,2): Individual Sharpness
579
- fig.add_vline(
580
- x=current_frame,
581
- line_dash="dash",
582
- line_color="red",
583
- line_width=2,
584
- row=3,
585
- col=2,
 
586
  )
 
 
 
587
 
588
- # Update layout with shared hover mode and other improvements
589
- fig.update_layout(
590
- height=900,
591
- showlegend=True,
592
- hovermode="x unified", # Shared hover pointer across subplots
593
- dragmode=False,
594
- title={
595
- "text": "πŸ“Š Multi-Metric Video Quality Analysis Dashboard",
596
- "x": 0.5,
597
- "xanchor": "center",
598
- "font": {"size": 16},
599
- },
600
- legend={
601
- "orientation": "h",
602
- "yanchor": "bottom",
603
- "y": 1.02,
604
- "xanchor": "center",
605
- "x": 0.5,
606
- "font": {"size": 10},
607
- },
608
- margin=dict(t=100, b=50, l=50, r=50),
609
- plot_bgcolor="rgba(0,0,0,0)",
610
- paper_bgcolor="rgba(0,0,0,0)",
611
- )
612
-
613
- # Update axes labels and ranges with improved configuration
614
- fig.update_xaxes(title_text="Frame", fixedrange=True)
615
-
616
- # Quality Overview axis (row 1, col 1) - focused range to emphasize differences
617
- quality_values = []
618
  if ssim_values and psnr_values and len(ssim_values) == len(psnr_values):
619
- ssim_norm = np.array(ssim_values)
620
- psnr_norm = np.clip(np.array(psnr_values) / 50, 0, 1)
621
- quality_values = (ssim_norm + psnr_norm) / 2
622
-
623
- if len(quality_values) > 0:
624
- # Use dynamic range based on data with some padding for better visualization
625
- min_qual = float(np.min(quality_values))
626
- max_qual = float(np.max(quality_values))
627
- range_padding = (max_qual - min_qual) * 0.1 # 10% padding
628
- y_min = max(0, min_qual - range_padding)
629
- y_max = min(1, max_qual + range_padding)
630
- # Ensure minimum range for visibility
631
- if (y_max - y_min) < 0.1:
632
- center = (y_max + y_min) / 2
633
- y_min = max(0, center - 0.05)
634
- y_max = min(1, center + 0.05)
635
- else:
636
- # Fallback range
637
- y_min, y_max = 0.5, 1.0
638
-
639
- fig.update_yaxes(
640
- title_text="Quality Score",
641
- row=1,
642
- col=1,
643
- fixedrange=True,
644
- range=[y_min, y_max],
645
- )
646
-
647
- # SSIM axis (row 2, col 1)
648
- fig.update_yaxes(
649
- title_text="SSIM", row=2, col=1, fixedrange=True, range=[0, 1.05]
650
- )
651
-
652
- # PSNR vs MSE axes (row 2, col 2)
653
- fig.update_yaxes(title_text="PSNR (dB)", row=2, col=2, fixedrange=True)
654
- fig.update_yaxes(
655
- title_text="MSE", row=2, col=2, secondary_y=True, fixedrange=True
656
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
657
 
658
- # pHash vs Color Histogram axes (row 3, col 1)
659
- fig.update_yaxes(title_text="pHash Similarity", row=3, col=1, fixedrange=True)
660
- fig.update_yaxes(
661
- title_text="Histogram Correlation",
662
- row=3,
663
- col=1,
664
- secondary_y=True,
665
- fixedrange=True,
666
- )
 
 
 
 
 
 
 
 
 
 
 
667
 
668
- # Individual Sharpness axes (row 3, col 2)
669
- fig.update_yaxes(title_text="Video 1 Sharpness", row=3, col=2, fixedrange=True)
670
- fig.update_yaxes(
671
- title_text="Video 2 Sharpness",
672
- row=3,
673
- col=2,
674
- secondary_y=True,
675
- fixedrange=True,
676
- )
677
 
678
- return fig
 
 
679
 
680
 
681
  class VideoFrameComparator:
@@ -778,7 +789,7 @@ class VideoFrameComparator:
778
 
779
  # Compute metrics if both videos are present and not in data.json
780
  metrics_info = ""
781
- metrics_plot = None
782
 
783
  if (
784
  video1_path
@@ -819,7 +830,7 @@ class VideoFrameComparator:
819
  metrics_info += f"Valid Frames: {self.metrics_summary['valid_frames']}/{self.metrics_summary['total_frames']}"
820
 
821
  # Generate initial plot
822
- metrics_plot = self.frame_metrics.create_modern_plot(
823
  self.computed_metrics, 0
824
  )
825
  else:
@@ -854,7 +865,7 @@ class VideoFrameComparator:
854
  frame1,
855
  frame2,
856
  self.get_current_frame_info(0),
857
- metrics_plot,
858
  )
859
 
860
  def get_frames_at_index(self, frame_index):
@@ -918,97 +929,103 @@ class VideoFrameComparator:
918
  quality = "🟑 Fair"
919
  else:
920
  quality = "πŸ”΄ Poor"
921
- comparison_metrics.append(f"SSIM: {ssim_val:.4f} ↑ ({quality})")
 
 
922
 
923
  # PSNR with quality indicator
924
  if metrics.get("psnr") is not None:
925
  psnr_val = metrics["psnr"]
926
  if psnr_val >= 40:
927
- psnr_quality = "🟒"
928
  elif psnr_val >= 30:
929
- psnr_quality = "πŸ”΅"
930
  elif psnr_val >= 20:
931
- psnr_quality = "🟑"
932
  else:
933
- psnr_quality = "πŸ”΄"
934
- comparison_metrics.append(f"PSNR: {psnr_val:.1f}dB ↑ {psnr_quality}")
 
 
935
 
936
  # MSE with quality indicator (lower is better)
937
  if metrics.get("mse") is not None:
938
  mse_val = metrics["mse"]
939
  if mse_val <= 50:
940
- mse_quality = "🟒"
941
  elif mse_val <= 100:
942
- mse_quality = "πŸ”΅"
943
  elif mse_val <= 200:
944
- mse_quality = "🟑"
945
  else:
946
- mse_quality = "πŸ”΄"
947
- comparison_metrics.append(f"MSE: {mse_val:.1f} ↓ {mse_quality}")
948
 
949
  # pHash with quality indicator
950
  if metrics.get("phash") is not None:
951
  phash_val = metrics["phash"]
952
  if phash_val >= 0.95:
953
- phash_quality = "🟒"
954
  elif phash_val >= 0.9:
955
- phash_quality = "πŸ”΅"
956
  elif phash_val >= 0.8:
957
- phash_quality = "🟑"
958
  else:
959
- phash_quality = "πŸ”΄"
960
- comparison_metrics.append(f"pHash: {phash_val:.3f} ↑ {phash_quality}")
 
 
961
 
962
  # Color Histogram Correlation
963
  if metrics.get("color_hist_corr") is not None:
964
  color_val = metrics["color_hist_corr"]
965
  if color_val >= 0.9:
966
- color_quality = "🟒"
967
  elif color_val >= 0.8:
968
- color_quality = "πŸ”΅"
969
  elif color_val >= 0.6:
970
- color_quality = "🟑"
971
  else:
972
- color_quality = "πŸ”΄"
973
- comparison_metrics.append(f"Color: {color_val:.3f} ↑ {color_quality}")
974
 
975
  # Add comparison metrics to info
976
  if comparison_metrics:
977
- info += " | " + " | ".join(comparison_metrics)
978
 
979
- # === INDIVIDUAL IMAGE METRICS ===
980
  individual_metrics = []
981
 
982
  # Individual Sharpness for each video
983
  if metrics.get("sharpness1") is not None:
984
  sharp1 = metrics["sharpness1"]
985
  if sharp1 >= 200:
986
- sharp1_quality = "🟒"
987
  elif sharp1 >= 100:
988
- sharp1_quality = "πŸ”΅"
989
  elif sharp1 >= 50:
990
- sharp1_quality = "🟑"
991
  else:
992
- sharp1_quality = "πŸ”΄"
993
  individual_metrics.append(
994
- f"V1 Sharpness: {sharp1:.0f} ↑ {sharp1_quality}"
995
  )
996
 
997
  if metrics.get("sharpness2") is not None:
998
  sharp2 = metrics["sharpness2"]
999
  if sharp2 >= 200:
1000
- sharp2_quality = "🟒"
1001
  elif sharp2 >= 100:
1002
- sharp2_quality = "πŸ”΅"
1003
  elif sharp2 >= 50:
1004
- sharp2_quality = "🟑"
1005
  else:
1006
- sharp2_quality = "πŸ”΄"
1007
  individual_metrics.append(
1008
- f"V2 Sharpness: {sharp2:.0f} ↑ {sharp2_quality}"
1009
  )
1010
 
1011
- # Sharpness comparison and winner
1012
  if (
1013
  metrics.get("sharpness1") is not None
1014
  and metrics.get("sharpness2") is not None
@@ -1016,77 +1033,80 @@ class VideoFrameComparator:
1016
  sharp1 = metrics["sharpness1"]
1017
  sharp2 = metrics["sharpness2"]
1018
 
1019
- # Determine winner
1020
- if sharp1 > sharp2:
1021
- winner = "V1"
1022
- winner_emoji = "πŸ†"
1023
- elif sharp2 > sharp1:
1024
- winner = "V2"
1025
- winner_emoji = "πŸ†"
1026
- else:
1027
- winner = "Tie"
1028
- winner_emoji = "βš–οΈ"
1029
-
1030
  diff_pct = abs(sharp1 - sharp2) / max(sharp1, sharp2) * 100
1031
 
1032
- # Add significance
1033
  if diff_pct > 20:
1034
- significance = "Major"
1035
  elif diff_pct > 10:
1036
- significance = "Moderate"
1037
  elif diff_pct > 5:
1038
- significance = "Minor"
1039
  else:
1040
- significance = "Negligible"
1041
 
1042
- individual_metrics.append(
1043
- f"Sharpness Winner: {winner_emoji}{winner} ({significance})"
1044
- )
 
 
 
 
 
 
1045
 
1046
  # Add individual metrics to info
1047
  if individual_metrics:
1048
- info += "\nπŸ“Š Individual: " + " | ".join(individual_metrics)
1049
 
1050
  # === OVERALL QUALITY ASSESSMENT ===
 
1051
  quality_score = 0
1052
  quality_count = 0
 
1053
 
1054
- # Calculate overall quality score
1055
  if metrics.get("ssim") is not None:
1056
  quality_score += metrics["ssim"]
1057
  quality_count += 1
 
1058
 
 
1059
  if metrics.get("psnr") is not None:
1060
- # Normalize PSNR to 0-1 scale (assume 50dB max)
1061
  psnr_norm = min(metrics["psnr"] / 50, 1.0)
1062
  quality_score += psnr_norm
1063
  quality_count += 1
 
1064
 
 
1065
  if metrics.get("phash") is not None:
1066
  quality_score += metrics["phash"]
1067
  quality_count += 1
 
1068
 
1069
  if quality_count > 0:
1070
  avg_quality = quality_score / quality_count
1071
 
1072
- # Add overall assessment
1073
  if avg_quality >= 0.9:
1074
- overall = "✨ Excellent Match"
1075
  elif avg_quality >= 0.8:
1076
- overall = "βœ… Good Match"
1077
  elif avg_quality >= 0.6:
1078
- overall = "⚠️ Fair Match"
1079
  else:
1080
- overall = "❌ Poor Match"
1081
 
1082
- info += f"\n🎯 Overall: {overall}"
 
1083
 
1084
  return info
1085
 
1086
  def get_updated_plot(self, frame_index):
1087
  """Get updated plot with current frame highlighted"""
1088
  if self.computed_metrics:
1089
- return self.frame_metrics.create_modern_plot(
1090
  self.computed_metrics, int(frame_index)
1091
  )
1092
  return None
@@ -1181,16 +1201,19 @@ def create_app():
1181
  all_videos = get_all_videos_from_json()
1182
 
1183
  with gr.Blocks(
1184
- title="FrameLens - Video Frame Comparator",
1185
  # theme=gr.themes.Soft(),
1186
  ) as app:
1187
  gr.Markdown("""
1188
- # 🎬 FrameLens - Professional Video Quality Analysis
1189
 
1190
- Upload two videos and compare them using comprehensive quality metrics.
1191
- Perfect for analyzing compression effects, processing artifacts, and visual quality assessment.
 
 
 
1192
 
1193
- **✨ Features**: SSIM, PSNR, MSE, pHash, Color Histogram & Sharpness Analysis!
1194
  """)
1195
 
1196
  with gr.Row():
@@ -1276,11 +1299,27 @@ def create_app():
1276
  value="",
1277
  lines=3,
1278
  )
1279
- gr.Markdown("### πŸ“Š Comprehensive Metrics Analysis")
1280
- metrics_plot = gr.Plot(
1281
- label="Multi-Metric Quality Analysis",
1282
- show_label=False,
1283
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1284
 
1285
  # Status and frame info (moved below plots, initially hidden)
1286
  info_section = gr.Row(visible=False)
@@ -1290,7 +1329,7 @@ def create_app():
1290
 
1291
  # Event handlers
1292
  def load_videos_handler(video1, video2):
1293
- status, max_frames, frame1, frame2, info, plot = comparator.load_videos(
1294
  video1, video2
1295
  )
1296
 
@@ -1306,13 +1345,28 @@ def create_app():
1306
  # Show/hide sections based on whether videos were loaded successfully
1307
  videos_loaded = max_frames > 0
1308
 
 
 
 
 
 
 
 
 
 
1309
  return (
1310
  status, # status_output
1311
  slider_update, # frame_slider
1312
  frame1, # frame1_output
1313
  frame2, # frame2_output
1314
  info, # frame_info
1315
- plot, # metrics_plot
 
 
 
 
 
 
1316
  gr.Row(visible=videos_loaded), # frame_controls
1317
  gr.Row(visible=videos_loaded), # frame_display
1318
  gr.Row(visible=videos_loaded), # metrics_section
@@ -1321,13 +1375,44 @@ def create_app():
1321
 
1322
  def update_frames(frame_index):
1323
  if comparator.max_frames == 0:
1324
- return None, None, "No videos loaded", None
 
 
 
 
 
 
 
 
 
 
 
1325
 
1326
  frame1, frame2 = comparator.get_frames_at_index(frame_index)
1327
  info = comparator.get_current_frame_info(frame_index)
1328
- plot = comparator.get_updated_plot(frame_index)
1329
 
1330
- return frame1, frame2, info, plot
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1331
 
1332
  # Auto-load when examples populate the inputs
1333
  def auto_load_when_examples_change(video1, video2):
@@ -1343,7 +1428,13 @@ def create_app():
1343
  None, # frame1_output
1344
  None, # frame2_output
1345
  "", # frame_info (now in metrics_section)
1346
- None, # metrics_plot
 
 
 
 
 
 
1347
  gr.Row(visible=False), # frame_controls
1348
  gr.Row(visible=False), # frame_display
1349
  gr.Row(visible=False), # metrics_section
@@ -1360,7 +1451,13 @@ def create_app():
1360
  frame1_output,
1361
  frame2_output,
1362
  frame_info,
1363
- metrics_plot,
 
 
 
 
 
 
1364
  frame_controls,
1365
  frame_display,
1366
  metrics_section,
@@ -1378,7 +1475,13 @@ def create_app():
1378
  frame1_output,
1379
  frame2_output,
1380
  frame_info,
1381
- metrics_plot,
 
 
 
 
 
 
1382
  frame_controls,
1383
  frame_display,
1384
  metrics_section,
@@ -1395,7 +1498,13 @@ def create_app():
1395
  frame1_output,
1396
  frame2_output,
1397
  frame_info,
1398
- metrics_plot,
 
 
 
 
 
 
1399
  frame_controls,
1400
  frame_display,
1401
  metrics_section,
@@ -1406,44 +1515,80 @@ def create_app():
1406
  frame_slider.change(
1407
  fn=update_frames,
1408
  inputs=[frame_slider],
1409
- outputs=[frame1_output, frame2_output, frame_info, metrics_plot],
 
 
 
 
 
 
 
 
 
 
 
1410
  )
1411
 
1412
  # Add comprehensive usage guide
1413
- gr.Markdown(f"""
1414
- ### πŸ’‘ Professional Features:
1415
- - Upload videos in common formats (MP4, AVI, MOV, etc.) or use URLs
1416
- - **6 Quality Metrics**: SSIM, PSNR, MSE, pHash, Color Histogram, Sharpness
1417
- - **Comprehensive Visualization**: 6-panel analysis dashboard
1418
- - **Real-time Analysis**: Navigate frames with live metric updates
1419
- - **Smart Comparisons**: See which video performs better per metric
1420
- - **Correlation Analysis**: Understand relationships between metrics
1421
- {"- Click examples above for instant analysis!" if example_pairs else ""}
1422
-
1423
- ### πŸ“Š Metrics Explained (with Directionality):
1424
- - **SSIM** ↑: Structural Similarity (1.0 = identical, 0.0 = completely different)
1425
- - **PSNR** ↑: Peak Signal-to-Noise Ratio in dB (higher = better quality)
1426
- - **MSE** ↓: Mean Squared Error (lower = more similar)
1427
- - **pHash** ↑: Perceptual Hash similarity (1.0 = visually identical)
1428
- - **Color Histogram** ↑: Color distribution correlation (1.0 = identical colors)
1429
- - **Sharpness** ↑: Laplacian variance (higher = sharper images)
1430
-
1431
- ### 🎯 Quality Assessment Scale:
1432
- - 🟒 **Excellent**: SSIM β‰₯ 0.9, PSNR β‰₯ 40dB, MSE ≀ 50
1433
- - πŸ”΅ **Good**: SSIM β‰₯ 0.8, PSNR β‰₯ 30dB, MSE ≀ 100
1434
- - 🟑 **Fair**: SSIM β‰₯ 0.6, PSNR β‰₯ 20dB, MSE ≀ 200
1435
- - πŸ”΄ **Poor**: Below fair thresholds
1436
-
1437
- ### πŸ† Comparison Indicators:
1438
- - **V1/V2 Winner**: Shows which video performs better per metric
1439
- - **Significance**: Major (>20%), Moderate (10-20%), Minor (5-10%), Negligible (<5%)
1440
- - **Overall Match**: Combined quality assessment across all metrics
1441
- - **Arrows**: ↑ = Higher is Better, ↓ = Lower is Better
1442
-
1443
- ### πŸ“ Configuration:
1444
- {f"Loaded {len(example_pairs)} example comparisons from data.json" if example_pairs else "No examples found in data.json"}
1445
- {f"Available videos: {len(all_videos)} files" if all_videos else ""}
1446
- """)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1447
 
1448
  return app
1449
 
 
7
  import numpy as np
8
  import plotly.graph_objects as go
9
  from PIL import Image
 
10
  from scipy.stats import pearsonr
11
  from skimage.metrics import mean_squared_error as mse_skimage
12
  from skimage.metrics import peak_signal_noise_ratio as psnr_skimage
 
342
 
343
  return summary
344
 
345
+ def create_individual_metric_plots(self, metrics_list, current_frame=0):
346
+ """Create individual plots for each metric with frame on x-axis"""
347
  if not metrics_list:
348
  return None
349
 
350
+ # Extract frame indices
351
  frame_indices = [m["frame_index"] for m in metrics_list]
352
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
353
  # Helper function to get valid data
354
  def get_valid_data(metric_name):
355
  values = [m.get(metric_name) for m in metrics_list]
 
358
  valid_frames = [frame_indices[i] for i in valid_indices]
359
  return valid_frames, valid_values
360
 
361
+ # Create individual plots for each metric
362
+ plots = {}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
363
 
364
+ # 1. SSIM Plot
365
+ ssim_frames, ssim_values = get_valid_data("ssim")
366
  if ssim_values:
367
+ fig_ssim = go.Figure()
368
+ fig_ssim.add_trace(
369
  go.Scatter(
370
  x=ssim_frames,
371
  y=ssim_values,
372
  mode="lines+markers",
373
+ name="SSIM",
374
  line=dict(color="blue", width=3),
375
  marker=dict(size=6),
376
  hovertemplate="<b>Frame %{x}</b><br>SSIM: %{y:.4f}<extra></extra>",
377
+ )
 
 
378
  )
379
 
380
+ if current_frame is not None:
381
+ fig_ssim.add_vline(
382
+ x=current_frame,
383
+ line_dash="dash",
384
+ line_color="red",
385
+ line_width=2,
386
+ )
387
 
388
+ fig_ssim.update_layout(
389
+ height=300,
390
+ margin=dict(t=20, b=40, l=60, r=20),
391
+ plot_bgcolor="rgba(0,0,0,0)",
392
+ paper_bgcolor="rgba(0,0,0,0)",
393
+ showlegend=False,
394
+ )
395
+ fig_ssim.update_xaxes(title_text="Frame")
396
+ fig_ssim.update_yaxes(title_text="SSIM", range=[0, 1.05])
397
+ plots["ssim"] = fig_ssim
398
+
399
+ # 2. PSNR Plot
400
+ psnr_frames, psnr_values = get_valid_data("psnr")
401
  if psnr_values:
402
+ fig_psnr = go.Figure()
403
+ fig_psnr.add_trace(
404
  go.Scatter(
405
  x=psnr_frames,
406
  y=psnr_values,
407
  mode="lines+markers",
408
+ name="PSNR",
409
+ line=dict(color="green", width=3),
410
+ marker=dict(size=6),
411
  hovertemplate="<b>Frame %{x}</b><br>PSNR: %{y:.2f} dB<extra></extra>",
412
+ )
413
+ )
414
+
415
+ if current_frame is not None:
416
+ fig_psnr.add_vline(
417
+ x=current_frame,
418
+ line_dash="dash",
419
+ line_color="red",
420
+ line_width=2,
421
+ )
422
+
423
+ fig_psnr.update_layout(
424
+ height=300,
425
+ margin=dict(t=20, b=40, l=60, r=20),
426
+ plot_bgcolor="rgba(0,0,0,0)",
427
+ paper_bgcolor="rgba(0,0,0,0)",
428
+ showlegend=False,
429
  )
430
+ fig_psnr.update_xaxes(title_text="Frame")
431
+ fig_psnr.update_yaxes(title_text="PSNR (dB)")
432
+ plots["psnr"] = fig_psnr
433
 
434
+ # 3. MSE Plot
435
  mse_frames, mse_values = get_valid_data("mse")
436
  if mse_values:
437
+ fig_mse = go.Figure()
438
+ fig_mse.add_trace(
439
  go.Scatter(
440
  x=mse_frames,
441
  y=mse_values,
442
  mode="lines+markers",
443
+ name="MSE",
444
+ line=dict(color="red", width=3),
445
+ marker=dict(size=6),
446
  hovertemplate="<b>Frame %{x}</b><br>MSE: %{y:.2f}<extra></extra>",
447
+ )
448
+ )
449
+
450
+ if current_frame is not None:
451
+ fig_mse.add_vline(
452
+ x=current_frame,
453
+ line_dash="dash",
454
+ line_color="red",
455
+ line_width=2,
456
+ )
457
+
458
+ fig_mse.update_layout(
459
+ height=300,
460
+ margin=dict(t=20, b=40, l=60, r=20),
461
+ plot_bgcolor="rgba(0,0,0,0)",
462
+ paper_bgcolor="rgba(0,0,0,0)",
463
+ showlegend=False,
464
  )
465
+ fig_mse.update_xaxes(title_text="Frame")
466
+ fig_mse.update_yaxes(title_text="MSE")
467
+ plots["mse"] = fig_mse
468
 
469
+ # 4. pHash Plot
470
+ phash_frames, phash_values = get_valid_data("phash")
471
  if phash_values:
472
+ fig_phash = go.Figure()
473
+ fig_phash.add_trace(
474
  go.Scatter(
475
  x=phash_frames,
476
  y=phash_values,
477
  mode="lines+markers",
478
+ name="pHash",
479
+ line=dict(color="purple", width=3),
480
+ marker=dict(size=6),
481
  hovertemplate="<b>Frame %{x}</b><br>pHash: %{y:.4f}<extra></extra>",
482
+ )
483
+ )
484
+
485
+ if current_frame is not None:
486
+ fig_phash.add_vline(
487
+ x=current_frame,
488
+ line_dash="dash",
489
+ line_color="red",
490
+ line_width=2,
491
+ )
492
+
493
+ fig_phash.update_layout(
494
+ height=300,
495
+ margin=dict(t=20, b=40, l=60, r=20),
496
+ plot_bgcolor="rgba(0,0,0,0)",
497
+ paper_bgcolor="rgba(0,0,0,0)",
498
+ showlegend=False,
499
  )
500
+ fig_phash.update_xaxes(title_text="Frame")
501
+ fig_phash.update_yaxes(title_text="pHash Similarity")
502
+ plots["phash"] = fig_phash
503
 
504
+ # 5. Color Histogram Correlation Plot
505
  hist_frames, hist_values = get_valid_data("color_hist_corr")
506
  if hist_values:
507
+ fig_hist = go.Figure()
508
+ fig_hist.add_trace(
509
  go.Scatter(
510
  x=hist_frames,
511
  y=hist_values,
512
  mode="lines+markers",
513
+ name="Color Histogram",
514
+ line=dict(color="orange", width=3),
515
+ marker=dict(size=6),
516
+ hovertemplate="<b>Frame %{x}</b><br>Color Histogram: %{y:.4f}<extra></extra>",
517
+ )
 
 
 
518
  )
519
 
520
+ if current_frame is not None:
521
+ fig_hist.add_vline(
522
+ x=current_frame,
523
+ line_dash="dash",
524
+ line_color="red",
525
+ line_width=2,
526
+ )
 
 
 
 
 
 
 
 
 
 
527
 
528
+ fig_hist.update_layout(
529
+ height=300,
530
+ margin=dict(t=20, b=40, l=60, r=20),
531
+ plot_bgcolor="rgba(0,0,0,0)",
532
+ paper_bgcolor="rgba(0,0,0,0)",
533
+ showlegend=False,
 
 
 
 
 
 
 
 
534
  )
535
+ fig_hist.update_xaxes(title_text="Frame")
536
+ fig_hist.update_yaxes(title_text="Color Histogram Correlation")
537
+ plots["color_hist"] = fig_hist
538
 
539
+ # 6. Sharpness Comparison Plot
540
+ sharp1_frames, sharp1_values = get_valid_data("sharpness1")
541
+ sharp2_frames, sharp2_values = get_valid_data("sharpness2")
 
 
 
 
 
 
 
 
 
542
 
543
+ if sharp1_values or sharp2_values:
544
+ fig_sharp = go.Figure()
545
+
546
+ if sharp1_values:
547
+ fig_sharp.add_trace(
548
+ go.Scatter(
549
+ x=sharp1_frames,
550
+ y=sharp1_values,
551
+ mode="lines+markers",
552
+ name="Video 1",
553
+ line=dict(color="darkgreen", width=3),
554
+ marker=dict(size=6),
555
+ hovertemplate="<b>Frame %{x}</b><br>Video 1 Sharpness: %{y:.1f}<extra></extra>",
556
+ )
557
+ )
558
 
559
+ if sharp2_values:
560
+ fig_sharp.add_trace(
561
+ go.Scatter(
562
+ x=sharp2_frames,
563
+ y=sharp2_values,
564
+ mode="lines+markers",
565
+ name="Video 2",
566
+ line=dict(color="darkblue", width=3),
567
+ marker=dict(size=6),
568
+ hovertemplate="<b>Frame %{x}</b><br>Video 2 Sharpness: %{y:.1f}<extra></extra>",
569
+ )
570
+ )
571
 
572
+ if current_frame is not None:
573
+ fig_sharp.add_vline(
574
+ x=current_frame,
575
+ line_dash="dash",
576
+ line_color="red",
577
+ line_width=2,
578
+ )
 
 
579
 
580
+ fig_sharp.update_layout(
581
+ height=300,
582
+ margin=dict(t=20, b=40, l=60, r=20),
583
+ plot_bgcolor="rgba(0,0,0,0)",
584
+ paper_bgcolor="rgba(0,0,0,0)",
585
+ showlegend=True,
586
+ legend=dict(
587
+ orientation="h", yanchor="bottom", y=1.02, xanchor="center", x=0.5
588
+ ),
589
  )
590
+ fig_sharp.update_xaxes(title_text="Frame")
591
+ fig_sharp.update_yaxes(title_text="Sharpness")
592
+ plots["sharpness"] = fig_sharp
593
 
594
+ # 7. Overall Quality Score Plot (Combination of metrics)
595
+ # Calculate overall quality score by combining normalized metrics
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
596
  if ssim_values and psnr_values and len(ssim_values) == len(psnr_values):
597
+ # Get data for metrics that contribute to overall score
598
+ phash_frames_overall, phash_values_overall = get_valid_data("phash")
599
+
600
+ # Ensure we have the same frames for all metrics
601
+ common_frames = set(ssim_frames) & set(psnr_frames)
602
+ if phash_values_overall:
603
+ common_frames = common_frames & set(phash_frames_overall)
604
+
605
+ common_frames = sorted(list(common_frames))
606
+
607
+ if common_frames:
608
+ # Extract values for common frames
609
+ ssim_common = [
610
+ ssim_values[ssim_frames.index(f)]
611
+ for f in common_frames
612
+ if f in ssim_frames
613
+ ]
614
+ psnr_common = [
615
+ psnr_values[psnr_frames.index(f)]
616
+ for f in common_frames
617
+ if f in psnr_frames
618
+ ]
619
+
620
+ # Normalize PSNR to 0-1 scale (typical range 0-50dB)
621
+ psnr_normalized = [min(p / 50.0, 1.0) for p in psnr_common]
622
+
623
+ # Start with SSIM and normalized PSNR
624
+ quality_components = [ssim_common, psnr_normalized]
625
+ component_names = ["SSIM", "PSNR"]
626
+
627
+ # Add pHash if available
628
+ if phash_values_overall:
629
+ phash_common = [
630
+ phash_values_overall[phash_frames_overall.index(f)]
631
+ for f in common_frames
632
+ if f in phash_frames_overall
633
+ ]
634
+ if len(phash_common) == len(ssim_common):
635
+ quality_components.append(phash_common)
636
+ component_names.append("pHash")
637
+
638
+ # Calculate average across all components
639
+ overall_quality = []
640
+ for i in range(len(common_frames)):
641
+ frame_scores = [
642
+ component[i]
643
+ for component in quality_components
644
+ if i < len(component)
645
+ ]
646
+ overall_quality.append(sum(frame_scores) / len(frame_scores))
647
+
648
+ fig_overall = go.Figure()
649
+ fig_overall.add_trace(
650
+ go.Scatter(
651
+ x=common_frames,
652
+ y=overall_quality,
653
+ mode="lines+markers",
654
+ name="Overall Quality",
655
+ line=dict(color="gold", width=4),
656
+ marker=dict(size=8),
657
+ hovertemplate="<b>Frame %{x}</b><br>Overall Quality: %{y:.3f}<br><i>Combined from: "
658
+ + ", ".join(component_names)
659
+ + "</i><extra></extra>",
660
+ fill="tonexty",
661
+ )
662
+ )
663
 
664
+ if current_frame is not None:
665
+ fig_overall.add_vline(
666
+ x=current_frame,
667
+ line_dash="dash",
668
+ line_color="red",
669
+ line_width=2,
670
+ )
671
+
672
+ fig_overall.update_layout(
673
+ height=300,
674
+ margin=dict(t=20, b=40, l=60, r=20),
675
+ plot_bgcolor="rgba(0,0,0,0)",
676
+ paper_bgcolor="rgba(0,0,0,0)",
677
+ showlegend=False,
678
+ )
679
+ fig_overall.update_xaxes(title_text="Frame")
680
+ fig_overall.update_yaxes(
681
+ title_text="Overall Quality Score", range=[0, 1.05]
682
+ )
683
+ plots["overall"] = fig_overall
684
 
685
+ return plots
 
 
 
 
 
 
 
 
686
 
687
+ def create_modern_plot(self, metrics_list, current_frame=0):
688
+ """Create individual metric plots instead of combined dashboard"""
689
+ return self.create_individual_metric_plots(metrics_list, current_frame)
690
 
691
 
692
  class VideoFrameComparator:
 
789
 
790
  # Compute metrics if both videos are present and not in data.json
791
  metrics_info = ""
792
+ plots = None
793
 
794
  if (
795
  video1_path
 
830
  metrics_info += f"Valid Frames: {self.metrics_summary['valid_frames']}/{self.metrics_summary['total_frames']}"
831
 
832
  # Generate initial plot
833
+ plots = self.frame_metrics.create_individual_metric_plots(
834
  self.computed_metrics, 0
835
  )
836
  else:
 
865
  frame1,
866
  frame2,
867
  self.get_current_frame_info(0),
868
+ plots,
869
  )
870
 
871
  def get_frames_at_index(self, frame_index):
 
929
  quality = "🟑 Fair"
930
  else:
931
  quality = "πŸ”΄ Poor"
932
+ comparison_metrics.append(
933
+ f"SSIM: {ssim_val:.4f} ({quality} similarity)"
934
+ )
935
 
936
  # PSNR with quality indicator
937
  if metrics.get("psnr") is not None:
938
  psnr_val = metrics["psnr"]
939
  if psnr_val >= 40:
940
+ psnr_quality = "🟒 Excellent"
941
  elif psnr_val >= 30:
942
+ psnr_quality = "πŸ”΅ Good"
943
  elif psnr_val >= 20:
944
+ psnr_quality = "🟑 Fair"
945
  else:
946
+ psnr_quality = "πŸ”΄ Poor"
947
+ comparison_metrics.append(
948
+ f"PSNR: {psnr_val:.1f}dB ({psnr_quality} signal quality)"
949
+ )
950
 
951
  # MSE with quality indicator (lower is better)
952
  if metrics.get("mse") is not None:
953
  mse_val = metrics["mse"]
954
  if mse_val <= 50:
955
+ mse_quality = "🟒 Very Similar"
956
  elif mse_val <= 100:
957
+ mse_quality = "πŸ”΅ Similar"
958
  elif mse_val <= 200:
959
+ mse_quality = "🟑 Moderately Different"
960
  else:
961
+ mse_quality = "πŸ”΄ Very Different"
962
+ comparison_metrics.append(f"MSE: {mse_val:.1f} ({mse_quality})")
963
 
964
  # pHash with quality indicator
965
  if metrics.get("phash") is not None:
966
  phash_val = metrics["phash"]
967
  if phash_val >= 0.95:
968
+ phash_quality = "🟒 Nearly Identical"
969
  elif phash_val >= 0.9:
970
+ phash_quality = "πŸ”΅ Very Similar"
971
  elif phash_val >= 0.8:
972
+ phash_quality = "🟑 Somewhat Similar"
973
  else:
974
+ phash_quality = "πŸ”΄ Different"
975
+ comparison_metrics.append(
976
+ f"pHash: {phash_val:.3f} ({phash_quality} perceptually)"
977
+ )
978
 
979
  # Color Histogram Correlation
980
  if metrics.get("color_hist_corr") is not None:
981
  color_val = metrics["color_hist_corr"]
982
  if color_val >= 0.9:
983
+ color_quality = "🟒 Very Similar Colors"
984
  elif color_val >= 0.8:
985
+ color_quality = "πŸ”΅ Similar Colors"
986
  elif color_val >= 0.6:
987
+ color_quality = "🟑 Moderate Color Diff"
988
  else:
989
+ color_quality = "πŸ”΄ Different Colors"
990
+ comparison_metrics.append(f"Color: {color_val:.3f} ({color_quality})")
991
 
992
  # Add comparison metrics to info
993
  if comparison_metrics:
994
+ info += "\nπŸ“Š Comparison Analysis: " + " | ".join(comparison_metrics)
995
 
996
+ # === INDIVIDUAL VIDEO QUALITY ===
997
  individual_metrics = []
998
 
999
  # Individual Sharpness for each video
1000
  if metrics.get("sharpness1") is not None:
1001
  sharp1 = metrics["sharpness1"]
1002
  if sharp1 >= 200:
1003
+ sharp1_quality = "🟒 Sharp"
1004
  elif sharp1 >= 100:
1005
+ sharp1_quality = "πŸ”΅ Moderate"
1006
  elif sharp1 >= 50:
1007
+ sharp1_quality = "🟑 Soft"
1008
  else:
1009
+ sharp1_quality = "πŸ”΄ Blurry"
1010
  individual_metrics.append(
1011
+ f"V1 Sharpness: {sharp1:.0f} ({sharp1_quality})"
1012
  )
1013
 
1014
  if metrics.get("sharpness2") is not None:
1015
  sharp2 = metrics["sharpness2"]
1016
  if sharp2 >= 200:
1017
+ sharp2_quality = "🟒 Sharp"
1018
  elif sharp2 >= 100:
1019
+ sharp2_quality = "πŸ”΅ Moderate"
1020
  elif sharp2 >= 50:
1021
+ sharp2_quality = "🟑 Soft"
1022
  else:
1023
+ sharp2_quality = "πŸ”΄ Blurry"
1024
  individual_metrics.append(
1025
+ f"V2 Sharpness: {sharp2:.0f} ({sharp2_quality})"
1026
  )
1027
 
1028
+ # Sharpness comparison
1029
  if (
1030
  metrics.get("sharpness1") is not None
1031
  and metrics.get("sharpness2") is not None
 
1033
  sharp1 = metrics["sharpness1"]
1034
  sharp2 = metrics["sharpness2"]
1035
 
1036
+ # Calculate difference percentage
 
 
 
 
 
 
 
 
 
 
1037
  diff_pct = abs(sharp1 - sharp2) / max(sharp1, sharp2) * 100
1038
 
1039
+ # Determine significance with clearer labels
1040
  if diff_pct > 20:
1041
+ significance = "πŸ”΄ MAJOR difference"
1042
  elif diff_pct > 10:
1043
+ significance = "🟑 MODERATE difference"
1044
  elif diff_pct > 5:
1045
+ significance = "πŸ”΅ MINOR difference"
1046
  else:
1047
+ significance = "🟒 NEGLIGIBLE difference"
1048
 
1049
+ # Determine which is sharper
1050
+ if sharp1 > sharp2:
1051
+ comparison = "V1 is sharper"
1052
+ elif sharp2 > sharp1:
1053
+ comparison = "V2 is sharper"
1054
+ else:
1055
+ comparison = "Equal sharpness"
1056
+
1057
+ individual_metrics.append(f"Sharpness: {comparison} ({significance})")
1058
 
1059
  # Add individual metrics to info
1060
  if individual_metrics:
1061
+ info += "\n🎯 Individual Quality: " + " | ".join(individual_metrics)
1062
 
1063
  # === OVERALL QUALITY ASSESSMENT ===
1064
+ # Calculate combined quality score from multiple metrics
1065
  quality_score = 0
1066
  quality_count = 0
1067
+ metric_contributions = []
1068
 
1069
+ # SSIM contribution
1070
  if metrics.get("ssim") is not None:
1071
  quality_score += metrics["ssim"]
1072
  quality_count += 1
1073
+ metric_contributions.append(f"SSIM({metrics['ssim']:.3f})")
1074
 
1075
+ # PSNR contribution (normalized to 0-1 scale)
1076
  if metrics.get("psnr") is not None:
 
1077
  psnr_norm = min(metrics["psnr"] / 50, 1.0)
1078
  quality_score += psnr_norm
1079
  quality_count += 1
1080
+ metric_contributions.append(f"PSNR({psnr_norm:.3f})")
1081
 
1082
+ # pHash contribution
1083
  if metrics.get("phash") is not None:
1084
  quality_score += metrics["phash"]
1085
  quality_count += 1
1086
+ metric_contributions.append(f"pHash({metrics['phash']:.3f})")
1087
 
1088
  if quality_count > 0:
1089
  avg_quality = quality_score / quality_count
1090
 
1091
+ # Add overall assessment with formula explanation
1092
  if avg_quality >= 0.9:
1093
+ overall = "✨ Excellent Overall"
1094
  elif avg_quality >= 0.8:
1095
+ overall = "βœ… Good Overall"
1096
  elif avg_quality >= 0.6:
1097
+ overall = "⚠️ Fair Overall"
1098
  else:
1099
+ overall = "❌ Poor Overall"
1100
 
1101
+ info += f"\n🎯 Overall Quality: {avg_quality:.3f} ({overall})"
1102
+ info += f"\n Formula: Average of {' + '.join(metric_contributions)} = {avg_quality:.3f}"
1103
 
1104
  return info
1105
 
1106
  def get_updated_plot(self, frame_index):
1107
  """Get updated plot with current frame highlighted"""
1108
  if self.computed_metrics:
1109
+ return self.frame_metrics.create_individual_metric_plots(
1110
  self.computed_metrics, int(frame_index)
1111
  )
1112
  return None
 
1201
  all_videos = get_all_videos_from_json()
1202
 
1203
  with gr.Blocks(
1204
+ title="Frame Arena - Video Frame Comparator",
1205
  # theme=gr.themes.Soft(),
1206
  ) as app:
1207
  gr.Markdown("""
1208
+ # 🎬 Frame Arena: Frame by frame comparisons of any videos
1209
 
1210
+ - Upload videos in common formats with the same number of frames (MP4, AVI, MOV, etc.) or use URLs
1211
+ - **7 Quality Metrics**: SSIM, PSNR, MSE, pHash, Color Histogram, Sharpness + Overall Quality
1212
+ - **Individual Visualization**: Each metric gets its own dedicated plot
1213
+ - **Real-time Analysis**: Navigate frames with live metric updates
1214
+ - **Smart Comparisons**: Understand differences between videos per metric
1215
 
1216
+ **Perfect for**: Analyzing compression effects, processing artifacts, visual quality assessment, and compression algorithm comparisons.
1217
  """)
1218
 
1219
  with gr.Row():
 
1299
  value="",
1300
  lines=3,
1301
  )
1302
+ gr.Markdown("### πŸ“Š Individual Metric Analysis")
1303
+
1304
+ # Overall quality plot
1305
+ with gr.Row():
1306
+ overall_plot = gr.Plot(
1307
+ label="Overall Quality (Combined Metric [SSIM + normalized_PSNR + pHash])",
1308
+ show_label=True,
1309
+ )
1310
+
1311
+ # Individual metric plots
1312
+ with gr.Row():
1313
+ ssim_plot = gr.Plot(label="SSIM", show_label=True)
1314
+ psnr_plot = gr.Plot(label="PSNR", show_label=True)
1315
+
1316
+ with gr.Row():
1317
+ mse_plot = gr.Plot(label="MSE", show_label=True)
1318
+ phash_plot = gr.Plot(label="pHash", show_label=True)
1319
+
1320
+ with gr.Row():
1321
+ color_plot = gr.Plot(label="Color Histogram", show_label=True)
1322
+ sharpness_plot = gr.Plot(label="Sharpness", show_label=True)
1323
 
1324
  # Status and frame info (moved below plots, initially hidden)
1325
  info_section = gr.Row(visible=False)
 
1329
 
1330
  # Event handlers
1331
  def load_videos_handler(video1, video2):
1332
+ status, max_frames, frame1, frame2, info, plots = comparator.load_videos(
1333
  video1, video2
1334
  )
1335
 
 
1345
  # Show/hide sections based on whether videos were loaded successfully
1346
  videos_loaded = max_frames > 0
1347
 
1348
+ # Extract individual plots from the plots dictionary
1349
+ ssim_fig = plots.get("ssim") if plots else None
1350
+ psnr_fig = plots.get("psnr") if plots else None
1351
+ mse_fig = plots.get("mse") if plots else None
1352
+ phash_fig = plots.get("phash") if plots else None
1353
+ color_fig = plots.get("color_hist") if plots else None
1354
+ sharpness_fig = plots.get("sharpness") if plots else None
1355
+ overall_fig = plots.get("overall") if plots else None
1356
+
1357
  return (
1358
  status, # status_output
1359
  slider_update, # frame_slider
1360
  frame1, # frame1_output
1361
  frame2, # frame2_output
1362
  info, # frame_info
1363
+ ssim_fig, # ssim_plot
1364
+ psnr_fig, # psnr_plot
1365
+ mse_fig, # mse_plot
1366
+ phash_fig, # phash_plot
1367
+ color_fig, # color_plot
1368
+ sharpness_fig, # sharpness_plot
1369
+ overall_fig, # overall_plot
1370
  gr.Row(visible=videos_loaded), # frame_controls
1371
  gr.Row(visible=videos_loaded), # frame_display
1372
  gr.Row(visible=videos_loaded), # metrics_section
 
1375
 
1376
  def update_frames(frame_index):
1377
  if comparator.max_frames == 0:
1378
+ return (
1379
+ None,
1380
+ None,
1381
+ "No videos loaded",
1382
+ None,
1383
+ None,
1384
+ None,
1385
+ None,
1386
+ None,
1387
+ None,
1388
+ None,
1389
+ )
1390
 
1391
  frame1, frame2 = comparator.get_frames_at_index(frame_index)
1392
  info = comparator.get_current_frame_info(frame_index)
1393
+ plots = comparator.get_updated_plot(frame_index)
1394
 
1395
+ # Extract individual plots from the plots dictionary
1396
+ ssim_fig = plots.get("ssim") if plots else None
1397
+ psnr_fig = plots.get("psnr") if plots else None
1398
+ mse_fig = plots.get("mse") if plots else None
1399
+ phash_fig = plots.get("phash") if plots else None
1400
+ color_fig = plots.get("color_hist") if plots else None
1401
+ sharpness_fig = plots.get("sharpness") if plots else None
1402
+ overall_fig = plots.get("overall") if plots else None
1403
+
1404
+ return (
1405
+ frame1,
1406
+ frame2,
1407
+ info,
1408
+ ssim_fig,
1409
+ psnr_fig,
1410
+ mse_fig,
1411
+ phash_fig,
1412
+ color_fig,
1413
+ sharpness_fig,
1414
+ overall_fig,
1415
+ )
1416
 
1417
  # Auto-load when examples populate the inputs
1418
  def auto_load_when_examples_change(video1, video2):
 
1428
  None, # frame1_output
1429
  None, # frame2_output
1430
  "", # frame_info (now in metrics_section)
1431
+ None, # ssim_plot
1432
+ None, # psnr_plot
1433
+ None, # mse_plot
1434
+ None, # phash_plot
1435
+ None, # color_plot
1436
+ None, # sharpness_plot
1437
+ None, # overall_plot
1438
  gr.Row(visible=False), # frame_controls
1439
  gr.Row(visible=False), # frame_display
1440
  gr.Row(visible=False), # metrics_section
 
1451
  frame1_output,
1452
  frame2_output,
1453
  frame_info,
1454
+ ssim_plot,
1455
+ psnr_plot,
1456
+ mse_plot,
1457
+ phash_plot,
1458
+ color_plot,
1459
+ sharpness_plot,
1460
+ overall_plot,
1461
  frame_controls,
1462
  frame_display,
1463
  metrics_section,
 
1475
  frame1_output,
1476
  frame2_output,
1477
  frame_info,
1478
+ ssim_plot,
1479
+ psnr_plot,
1480
+ mse_plot,
1481
+ phash_plot,
1482
+ color_plot,
1483
+ sharpness_plot,
1484
+ overall_plot,
1485
  frame_controls,
1486
  frame_display,
1487
  metrics_section,
 
1498
  frame1_output,
1499
  frame2_output,
1500
  frame_info,
1501
+ ssim_plot,
1502
+ psnr_plot,
1503
+ mse_plot,
1504
+ phash_plot,
1505
+ color_plot,
1506
+ sharpness_plot,
1507
+ overall_plot,
1508
  frame_controls,
1509
  frame_display,
1510
  metrics_section,
 
1515
  frame_slider.change(
1516
  fn=update_frames,
1517
  inputs=[frame_slider],
1518
+ outputs=[
1519
+ frame1_output,
1520
+ frame2_output,
1521
+ frame_info,
1522
+ ssim_plot,
1523
+ psnr_plot,
1524
+ mse_plot,
1525
+ phash_plot,
1526
+ color_plot,
1527
+ sharpness_plot,
1528
+ overall_plot,
1529
+ ],
1530
  )
1531
 
1532
  # Add comprehensive usage guide
1533
+ with gr.Accordion("πŸ“– Usage Guide & Metrics Reference", open=False):
1534
+ with gr.Accordion("πŸ“Š Metrics Explained", open=False):
1535
+ gr.Markdown("""
1536
+ - **SSIM**: Structural Similarity (1.0 = identical structure, 0.0 = completely different)
1537
+ - **PSNR**: Peak Signal-to-Noise Ratio in dB (higher = better quality, less noise)
1538
+ - **MSE**: Mean Squared Error (lower = more similar pixel values)
1539
+ - **pHash**: Perceptual Hash similarity (1.0 = visually identical)
1540
+ - **Color Histogram**: Color distribution correlation (1.0 = identical color patterns)
1541
+ - **Sharpness**: Laplacian variance per video (higher = sharper/more detailed images)
1542
+ - **Overall Quality**: Combined metric averaging SSIM, normalized PSNR, and pHash (when available)
1543
+ """)
1544
+
1545
+ with gr.Accordion(
1546
+ "🎯 Quality Assessment Scale (Research-Based Thresholds)", open=False
1547
+ ):
1548
+ gr.Markdown("""
1549
+ **SSIM Scale** (based on human perception studies):
1550
+ - 🟒 **Excellent (β‰₯0.9)**: Visually indistinguishable differences
1551
+ - πŸ”΅ **Good (β‰₯0.8)**: Minor visible differences, still high quality
1552
+ - 🟑 **Fair (β‰₯0.6)**: Noticeable differences, acceptable quality
1553
+ - πŸ”΄ **Poor (<0.6)**: Significant visible artifacts and differences
1554
+
1555
+ **PSNR Scale** (standard video quality benchmarks):
1556
+ - 🟒 **Excellent (β‰₯40dB)**: Professional broadcast quality
1557
+ - πŸ”΅ **Good (β‰₯30dB)**: High consumer video quality
1558
+ - 🟑 **Fair (β‰₯20dB)**: Acceptable for web streaming
1559
+ - πŸ”΄ **Poor (<20dB)**: Low quality with visible compression artifacts
1560
+
1561
+ **MSE Scale** (pixel difference thresholds):
1562
+ - 🟒 **Very Similar (≀50)**: Minimal pixel-level differences
1563
+ - πŸ”΅ **Similar (≀100)**: Small differences, good quality preservation
1564
+ - 🟑 **Moderately Different (≀200)**: Noticeable but acceptable differences
1565
+ - πŸ”΄ **Very Different (>200)**: Significant pixel-level changes
1566
+ """)
1567
+
1568
+ with gr.Accordion("πŸ” Understanding Comparisons", open=False):
1569
+ gr.Markdown("""
1570
+ **Comparison Analysis**: Shows how similar/different the videos are
1571
+ - Most metrics indicate **similarity** - not which video "wins"
1572
+ - Higher SSIM/PSNR/pHash/Color = more similar videos
1573
+ - Lower MSE = more similar videos
1574
+
1575
+ **Individual Quality**: Shows the quality of each video separately
1576
+ - Sharpness comparison shows which video has more detail
1577
+ - Significance levels: πŸ”΄ MAJOR (>20%), 🟑 MODERATE (10-20%), πŸ”΅ MINOR (5-10%), 🟒 NEGLIGIBLE (<5%)
1578
+
1579
+ **Overall Quality**: Combines multiple metrics to provide a single similarity score
1580
+ - **Formula**: Average of [SSIM + normalized_PSNR + pHash]
1581
+ - **PSNR Normalization**: PSNR values are divided by 50dB and capped at 1.0
1582
+ - **Range**: 0.0 to 1.0 (higher = more similar videos overall)
1583
+ - **Purpose**: Provides a single metric when you need one overall assessment
1584
+ - **Limitation**: Different metrics may disagree; check individual metrics for details
1585
+ """)
1586
+
1587
+ with gr.Accordion("πŸ“ Configuration", open=False):
1588
+ gr.Markdown(
1589
+ f"{'Loaded ' + str(len(example_pairs)) + ' example comparisons from data.json' if example_pairs else 'No examples found in data.json'}<br>"
1590
+ f"{'Available videos: ' + str(len(all_videos)) + ' files' if all_videos else ''}"
1591
+ )
1592
 
1593
  return app
1594