davidberenstein1957 commited on
Commit
a1181d4
Β·
1 Parent(s): 7bbbbf5

Refactor PSNR normalization to use min-max scaling for improved accuracy. Remove unused quality threshold indicators and enhance the layout of the usage guide with clearer metric explanations. Implement debouncing in the auto-load function to prevent redundant processing of the same video pair.

Browse files
Files changed (1) hide show
  1. app.py +77 -89
app.py CHANGED
@@ -703,8 +703,18 @@ class FrameMetrics:
703
  if f in psnr_frames
704
  ]
705
 
706
- # Normalize PSNR to 0-1 scale (typical range 0-50dB)
707
- psnr_normalized = [min(p / 50.0, 1.0) for p in psnr_common]
 
 
 
 
 
 
 
 
 
 
708
 
709
  # Start with SSIM and normalized PSNR
710
  quality_components = [ssim_common, psnr_normalized]
@@ -786,28 +796,6 @@ class FrameMetrics:
786
  )
787
 
788
  # Add quality threshold indicators if there are significant variations
789
- if (
790
- quality_range > 0.03
791
- ): # Show thresholds if there's meaningful variation
792
- # Add reference lines for quality levels within the visible range
793
- if y_min <= 0.9 <= y_max:
794
- fig_overall.add_hline(
795
- y=0.9,
796
- line_dash="dot",
797
- line_color="green",
798
- line_width=1,
799
- annotation_text="Excellent (0.9)",
800
- annotation_position="right",
801
- )
802
- if y_min <= 0.8 <= y_max:
803
- fig_overall.add_hline(
804
- y=0.8,
805
- line_dash="dot",
806
- line_color="blue",
807
- line_width=1,
808
- annotation_text="Good (0.8)",
809
- annotation_position="right",
810
- )
811
 
812
  if current_frame is not None:
813
  fig_overall.add_vline(
@@ -919,7 +907,7 @@ class VideoFrameComparator:
919
  return True
920
 
921
  return False
922
- except:
923
  return False
924
 
925
  def load_videos(self, video1_path, video2_path):
@@ -1416,7 +1404,6 @@ def create_app():
1416
  print(f"DEBUG: Loaded {len(example_pairs)} example pairs")
1417
  for i, pair in enumerate(example_pairs):
1418
  print(f" Example {i + 1}: {pair}")
1419
- all_videos = get_all_videos_from_json()
1420
 
1421
  with gr.Blocks(
1422
  title="Frame Arena - Video Frame Comparator",
@@ -1478,37 +1465,6 @@ def create_app():
1478
  pointer-events: auto !important;
1479
  }
1480
  """,
1481
- # js="""
1482
- # function updatePlotColors() {
1483
- # // Get current theme color
1484
- # const bodyStyle = getComputedStyle(document.body);
1485
- # const textColor = bodyStyle.getPropertyValue('--body-text-color') ||
1486
- # bodyStyle.color ||
1487
- # (bodyStyle.backgroundColor === 'rgb(255, 255, 255)' ? '#000000' : '#ffffff');
1488
- # // Update all plot text elements
1489
- # document.querySelectorAll('.plotly svg text').forEach(text => {
1490
- # text.setAttribute('fill', textColor);
1491
- # });
1492
- # }
1493
- # // Update colors on load and theme change
1494
- # window.addEventListener('load', updatePlotColors);
1495
- # // Watch for theme changes
1496
- # const observer = new MutationObserver(updatePlotColors);
1497
- # observer.observe(document.body, {
1498
- # attributes: true,
1499
- # attributeFilter: ['class', 'style']
1500
- # });
1501
- # // Also watch for CSS variable changes
1502
- # if (window.CSS && CSS.supports('color', 'var(--body-text-color)')) {
1503
- # const style = document.createElement('style');
1504
- # style.textContent = `
1505
- # .plotly svg text {
1506
- # fill: var(--body-text-color, currentColor) !important;
1507
- # }
1508
- # `;
1509
- # document.head.appendChild(style);
1510
- # }
1511
- # """,
1512
  ) as app:
1513
  gr.Markdown("""
1514
  # 🎬 Frame Arena: Frame by frame comparisons of any videos
@@ -1560,7 +1516,7 @@ def create_app():
1560
  # Add examples at the top for better UX
1561
  if example_pairs:
1562
  gr.Markdown("### πŸ“ Example Video Comparisons")
1563
- examples_component_top = gr.Examples(
1564
  examples=example_pairs,
1565
  inputs=[video1_input, video2_input],
1566
  label="Click any example to load video pairs:",
@@ -1618,28 +1574,11 @@ def create_app():
1618
  lines=3,
1619
  )
1620
 
1621
- # Individual metric plots
1622
- with gr.Row():
1623
- ssim_plot = gr.Plot(label="SSIM", show_label=True)
1624
- psnr_plot = gr.Plot(label="PSNR", show_label=True)
1625
-
1626
- with gr.Row():
1627
- mse_plot = gr.Plot(label="MSE", show_label=True)
1628
- phash_plot = gr.Plot(label="pHash", show_label=True)
1629
-
1630
- with gr.Row():
1631
- color_plot = gr.Plot(label="Color Histogram", show_label=True)
1632
- sharpness_plot = gr.Plot(label="Sharpness", show_label=True)
1633
-
1634
- # Add comprehensive usage guide
1635
- with gr.Accordion("πŸ“– Usage Guide & Metrics Reference", open=False):
1636
- with gr.Row() as info_section:
1637
- with gr.Column():
1638
- status_output = gr.Textbox(
1639
- label="Status", interactive=False, lines=8
1640
- )
1641
- with gr.Column():
1642
- gr.Markdown("""
1643
  ### πŸ“Š Metrics Explained
1644
  - **SSIM**: Structural Similarity (1.0 = identical structure, 0.0 = completely different)
1645
  - **PSNR**: Peak Signal-to-Noise Ratio in dB (higher = better quality, less noise)
@@ -1648,11 +1587,15 @@ def create_app():
1648
  - **Color Histogram**: Color distribution correlation (1.0 = identical color patterns)
1649
  - **Sharpness**: Laplacian variance per video (higher = sharper/more detailed images)
1650
  - **Overall Quality**: Combined metric averaging SSIM, normalized PSNR, and pHash (when available)
1651
- """)
 
 
 
 
1652
 
1653
- with gr.Row():
1654
- with gr.Column():
1655
- gr.Markdown("""
1656
  ### 🎯 Quality Assessment Scale (Research-Based Thresholds)
1657
  **SSIM Scale** (based on human perception studies):
1658
  - 🟒 **Excellent (β‰₯0.9)**: Visually indistinguishable differences
@@ -1671,9 +1614,9 @@ def create_app():
1671
  - πŸ”΅ **Similar (≀100)**: Small differences, good quality preservation
1672
  - 🟑 **Moderately Different (≀200)**: Noticeable but acceptable differences
1673
  - πŸ”΄ **Very Different (>200)**: Significant pixel-level changes
1674
- """)
1675
- with gr.Column():
1676
- gr.Markdown("""
1677
  ### πŸ” Understanding Comparisons
1678
  **Comparison Analysis**: Shows how similar/different the videos are
1679
  - Most metrics indicate **similarity** - not which video "wins"
@@ -1690,7 +1633,20 @@ def create_app():
1690
  - **Range**: 0.0 to 1.0 (higher = more similar videos overall)
1691
  - **Purpose**: Provides a single metric when you need one overall assessment
1692
  - **Limitation**: Different metrics may disagree; check individual metrics for details
1693
- """)
 
 
 
 
 
 
 
 
 
 
 
 
 
1694
 
1695
  # Connect examples to auto-loading
1696
  if example_pairs:
@@ -1822,9 +1778,41 @@ def create_app():
1822
  gr.Row(visible=False), # info_section
1823
  )
1824
 
1825
- # Enhanced auto-load function with more debug info
 
 
1826
  def enhanced_auto_load(video1, video2):
1827
  print(f"DEBUG: Input change detected! video1={video1}, video2={video2}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1828
  return auto_load_when_examples_change(video1, video2)
1829
 
1830
  # Auto-load when both video inputs change (triggered by examples)
 
703
  if f in psnr_frames
704
  ]
705
 
706
+ # Normalize PSNR to 0-1 scale using min-max normalization
707
+ if psnr_common:
708
+ psnr_min = min(psnr_common)
709
+ psnr_max = max(psnr_common)
710
+ if psnr_max > psnr_min:
711
+ psnr_normalized = [
712
+ (p - psnr_min) / (psnr_max - psnr_min) for p in psnr_common
713
+ ]
714
+ else:
715
+ psnr_normalized = [0.0 for _ in psnr_common]
716
+ else:
717
+ psnr_normalized = []
718
 
719
  # Start with SSIM and normalized PSNR
720
  quality_components = [ssim_common, psnr_normalized]
 
796
  )
797
 
798
  # Add quality threshold indicators if there are significant variations
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
799
 
800
  if current_frame is not None:
801
  fig_overall.add_vline(
 
907
  return True
908
 
909
  return False
910
+ except Exception:
911
  return False
912
 
913
  def load_videos(self, video1_path, video2_path):
 
1404
  print(f"DEBUG: Loaded {len(example_pairs)} example pairs")
1405
  for i, pair in enumerate(example_pairs):
1406
  print(f" Example {i + 1}: {pair}")
 
1407
 
1408
  with gr.Blocks(
1409
  title="Frame Arena - Video Frame Comparator",
 
1465
  pointer-events: auto !important;
1466
  }
1467
  """,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1468
  ) as app:
1469
  gr.Markdown("""
1470
  # 🎬 Frame Arena: Frame by frame comparisons of any videos
 
1516
  # Add examples at the top for better UX
1517
  if example_pairs:
1518
  gr.Markdown("### πŸ“ Example Video Comparisons")
1519
+ gr.Examples(
1520
  examples=example_pairs,
1521
  inputs=[video1_input, video2_input],
1522
  label="Click any example to load video pairs:",
 
1574
  lines=3,
1575
  )
1576
 
1577
+ # Add comprehensive usage guide underneath frame information & metrics
1578
+ with gr.Accordion("πŸ“– Usage Guide & Metrics Reference", open=False):
1579
+ with gr.Row():
1580
+ with gr.Column():
1581
+ gr.Markdown("""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1582
  ### πŸ“Š Metrics Explained
1583
  - **SSIM**: Structural Similarity (1.0 = identical structure, 0.0 = completely different)
1584
  - **PSNR**: Peak Signal-to-Noise Ratio in dB (higher = better quality, less noise)
 
1587
  - **Color Histogram**: Color distribution correlation (1.0 = identical color patterns)
1588
  - **Sharpness**: Laplacian variance per video (higher = sharper/more detailed images)
1589
  - **Overall Quality**: Combined metric averaging SSIM, normalized PSNR, and pHash (when available)
1590
+ """)
1591
+ with gr.Column() as info_section:
1592
+ status_output = gr.Textbox(
1593
+ label="Status", interactive=False, lines=16
1594
+ )
1595
 
1596
+ with gr.Row():
1597
+ with gr.Column():
1598
+ gr.Markdown("""
1599
  ### 🎯 Quality Assessment Scale (Research-Based Thresholds)
1600
  **SSIM Scale** (based on human perception studies):
1601
  - 🟒 **Excellent (β‰₯0.9)**: Visually indistinguishable differences
 
1614
  - πŸ”΅ **Similar (≀100)**: Small differences, good quality preservation
1615
  - 🟑 **Moderately Different (≀200)**: Noticeable but acceptable differences
1616
  - πŸ”΄ **Very Different (>200)**: Significant pixel-level changes
1617
+ """)
1618
+ with gr.Column():
1619
+ gr.Markdown("""
1620
  ### πŸ” Understanding Comparisons
1621
  **Comparison Analysis**: Shows how similar/different the videos are
1622
  - Most metrics indicate **similarity** - not which video "wins"
 
1633
  - **Range**: 0.0 to 1.0 (higher = more similar videos overall)
1634
  - **Purpose**: Provides a single metric when you need one overall assessment
1635
  - **Limitation**: Different metrics may disagree; check individual metrics for details
1636
+ """)
1637
+
1638
+ # Individual metric plots
1639
+ with gr.Row():
1640
+ ssim_plot = gr.Plot(label="SSIM", show_label=True)
1641
+ psnr_plot = gr.Plot(label="PSNR", show_label=True)
1642
+
1643
+ with gr.Row():
1644
+ mse_plot = gr.Plot(label="MSE", show_label=True)
1645
+ phash_plot = gr.Plot(label="pHash", show_label=True)
1646
+
1647
+ with gr.Row():
1648
+ color_plot = gr.Plot(label="Color Histogram", show_label=True)
1649
+ sharpness_plot = gr.Plot(label="Sharpness", show_label=True)
1650
 
1651
  # Connect examples to auto-loading
1652
  if example_pairs:
 
1778
  gr.Row(visible=False), # info_section
1779
  )
1780
 
1781
+ # Enhanced auto-load function with debouncing to prevent multiple rapid calls
1782
+ last_processed_pair = {"video1": None, "video2": None}
1783
+
1784
  def enhanced_auto_load(video1, video2):
1785
  print(f"DEBUG: Input change detected! video1={video1}, video2={video2}")
1786
+
1787
+ # Simple debouncing: skip if same video pair was just processed
1788
+ if (
1789
+ last_processed_pair["video1"] == video1
1790
+ and last_processed_pair["video2"] == video2
1791
+ ):
1792
+ print("DEBUG: Same video pair already processed, skipping...")
1793
+ # Return current state without recomputing
1794
+ return (
1795
+ gr.update(),
1796
+ gr.update(),
1797
+ gr.update(),
1798
+ gr.update(),
1799
+ gr.update(),
1800
+ gr.update(),
1801
+ gr.update(),
1802
+ gr.update(),
1803
+ gr.update(),
1804
+ gr.update(),
1805
+ gr.update(),
1806
+ gr.update(),
1807
+ gr.update(),
1808
+ gr.update(),
1809
+ gr.update(),
1810
+ gr.update(),
1811
+ )
1812
+
1813
+ last_processed_pair["video1"] = video1
1814
+ last_processed_pair["video2"] = video2
1815
+
1816
  return auto_load_when_examples_change(video1, video2)
1817
 
1818
  # Auto-load when both video inputs change (triggered by examples)