Spaces:
Sleeping
Sleeping
#!/usr/bin/env python3 | |
""" | |
Enhanced CMT Holographic Visualization Suite with Scientific Integrity | |
Full-featured toolkit with mathematically rigorous implementations | |
""" | |
import os | |
import warnings | |
import numpy as np | |
import pandas as pd | |
import plotly.graph_objects as go | |
from plotly.subplots import make_subplots | |
# Handle UMAP import variations | |
try: | |
from umap import UMAP | |
except ImportError: | |
try: | |
from umap.umap_ import UMAP | |
except ImportError: | |
import umap.umap_ as umap_module | |
UMAP = umap_module.UMAP | |
from sklearn.cluster import KMeans | |
from scipy.stats import entropy as shannon_entropy | |
from scipy import special as sp_special | |
from scipy.interpolate import griddata | |
from sklearn.metrics.pairwise import cosine_similarity | |
from scipy.spatial.distance import cdist | |
import soundfile as sf | |
import gradio as gr | |
# ================================================================ | |
# Unified Communication Manifold Explorer & CMT Visualizer v5.0 | |
# - Full feature restoration with scientific integrity | |
# - Mathematically rigorous implementations | |
# - All original tools and insights preserved | |
# ================================================================ | |
warnings.filterwarnings("ignore", category=FutureWarning) | |
warnings.filterwarnings("ignore", category=UserWarning) | |
print("Initializing the Enhanced CMT Holography Explorer...") | |
# --------------------------------------------------------------- | |
# Data setup | |
# --------------------------------------------------------------- | |
# If running inside the proxy, PRIVATE_APP_ROOT will be set. | |
# Otherwise, fall back to the current working directory for standalone execution. | |
_app_root = os.getenv("PRIVATE_APP_ROOT", os.path.abspath(os.getcwd())) | |
print(f"✅ Application Root detected as: {_app_root}") | |
BASE_DIR = _app_root | |
DATA_DIR = os.path.join(BASE_DIR, "data") | |
DOG_DIR = os.path.join(DATA_DIR, "dog") | |
HUMAN_DIR = os.path.join(DATA_DIR, "human") | |
# Platform-aware paths | |
HF_CSV_DOG = "cmt_dog_sound_analysis.csv" | |
HF_CSV_HUMAN = "cmt_human_speech_analysis.csv" | |
COLAB_CSV_DOG = "/content/cmt_dog_sound_analysis.csv" | |
COLAB_CSV_HUMAN = "/content/cmt_human_speech_analysis.csv" | |
# Determine environment | |
if os.path.exists(HF_CSV_DOG) and os.path.exists(HF_CSV_HUMAN): | |
CSV_DOG = HF_CSV_DOG | |
CSV_HUMAN = HF_CSV_HUMAN | |
print("Using Hugging Face Spaces paths") | |
elif os.path.exists(COLAB_CSV_DOG) and os.path.exists(COLAB_CSV_HUMAN): | |
CSV_DOG = COLAB_CSV_DOG | |
CSV_HUMAN = COLAB_CSV_HUMAN | |
print("Using Google Colab paths") | |
else: | |
CSV_DOG = HF_CSV_DOG | |
CSV_HUMAN = HF_CSV_HUMAN | |
print("Falling back to local/dummy data paths") | |
# Audio paths | |
if os.path.exists("/content/drive/MyDrive/combined"): | |
DOG_AUDIO_BASE_PATH = '/content/drive/MyDrive/combined' | |
HUMAN_AUDIO_BASE_PATH = '/content/drive/MyDrive/human' | |
print("Using Google Drive audio paths") | |
elif os.path.exists("combined") and os.path.exists("human"): | |
DOG_AUDIO_BASE_PATH = 'combined' | |
HUMAN_AUDIO_BASE_PATH = 'human' | |
print("Using Hugging Face Spaces audio paths") | |
else: | |
DOG_AUDIO_BASE_PATH = DOG_DIR | |
HUMAN_AUDIO_BASE_PATH = HUMAN_DIR | |
print("Using local audio paths") | |
# --------------------------------------------------------------- | |
# Load datasets | |
# --------------------------------------------------------------- | |
if os.path.exists(CSV_DOG) and os.path.exists(CSV_HUMAN): | |
print(f"✅ Loading real data from CSVs") | |
df_dog = pd.read_csv(CSV_DOG) | |
df_human = pd.read_csv(CSV_HUMAN) | |
else: | |
print("⚠️ Generating dummy data for demo") | |
# Dummy data generation | |
n_dummy = 50 | |
rng = np.random.default_rng(42) | |
dog_labels = ["bark", "growl", "whine", "pant"] * (n_dummy // 4 + 1) | |
human_labels = ["speech", "laugh", "cry", "shout"] * (n_dummy // 4 + 1) | |
df_dog = pd.DataFrame({ | |
"filepath": [f"dog_{i}.wav" for i in range(n_dummy)], | |
"label": dog_labels[:n_dummy], | |
**{f"feature_{i}": rng.random(n_dummy) for i in range(10)}, | |
**{f"diag_alpha_{lens}": rng.uniform(0.1, 2.0, n_dummy) | |
for lens in ["gamma", "zeta", "airy", "bessel"]}, | |
**{f"diag_srl_{lens}": rng.uniform(0.5, 50.0, n_dummy) | |
for lens in ["gamma", "zeta", "airy", "bessel"]} | |
}) | |
df_human = pd.DataFrame({ | |
"filepath": [f"human_{i}.wav" for i in range(n_dummy)], | |
"label": human_labels[:n_dummy], | |
**{f"feature_{i}": rng.random(n_dummy) for i in range(10)}, | |
**{f"diag_alpha_{lens}": rng.uniform(0.1, 2.0, n_dummy) | |
for lens in ["gamma", "zeta", "airy", "bessel"]}, | |
**{f"diag_srl_{lens}": rng.uniform(0.5, 50.0, n_dummy) | |
for lens in ["gamma", "zeta", "airy", "bessel"]} | |
}) | |
df_dog["source"] = "Dog" | |
df_human["source"] = "Human" | |
df_combined = pd.concat([df_dog, df_human], ignore_index=True) | |
print(f"Loaded {len(df_dog)} dog rows and {len(df_human)} human rows") | |
# --------------------------------------------------------------- | |
# Feature preparation and UMAP embedding | |
# --------------------------------------------------------------- | |
feature_cols = [c for c in df_combined.columns if c.startswith("feature_")] | |
if feature_cols: | |
features = np.nan_to_num(df_combined[feature_cols].to_numpy()) | |
reducer = UMAP(n_components=3, n_neighbors=15, min_dist=0.1, random_state=42) | |
df_combined[["x", "y", "z"]] = reducer.fit_transform(features) | |
else: | |
# Fallback if no features | |
rng = np.random.default_rng(42) | |
df_combined["x"] = rng.random(len(df_combined)) | |
df_combined["y"] = rng.random(len(df_combined)) | |
df_combined["z"] = rng.random(len(df_combined)) | |
# Clustering | |
kmeans = KMeans(n_clusters=max(4, min(12, int(np.sqrt(len(df_combined))))), | |
random_state=42, n_init=10) | |
df_combined["cluster"] = kmeans.fit_predict(features if feature_cols else df_combined[["x", "y", "z"]]) | |
# --------------------------------------------------------------- | |
# Cross-Species Analysis Functions | |
# --------------------------------------------------------------- | |
def find_nearest_cross_species_neighbor(selected_row, df_combined, n_neighbors=5): | |
"""Find closest neighbor from opposite species using information geometry. | |
Priority order: | |
1) Use Euclidean distance in learned manifold coordinates (x, y, z) if available | |
2) Fallback to cosine similarity on feature vectors | |
3) Fallback to first available opposite-species row | |
""" | |
selected_source = selected_row['source'] | |
opposite_source = 'Human' if selected_source == 'Dog' else 'Dog' | |
opposite_data = df_combined[df_combined['source'] == opposite_source] | |
if len(opposite_data) == 0: | |
return None | |
# 1) Prefer geometry in manifold if available | |
if all(col in selected_row.index for col in ['x', 'y', 'z']) and \ | |
all(col in opposite_data.columns for col in ['x', 'y', 'z']): | |
sx, sy, sz = float(selected_row['x']), float(selected_row['y']), float(selected_row['z']) | |
coords = opposite_data[['x', 'y', 'z']].to_numpy(dtype=float) | |
diffs = coords - np.array([sx, sy, sz], dtype=float) | |
dists = np.sqrt(np.sum(diffs * diffs, axis=1)) | |
nearest_idx = int(np.argmin(dists)) | |
return opposite_data.iloc[nearest_idx] | |
# 2) Fallback to feature-space cosine similarity | |
feature_cols = [c for c in df_combined.columns if c.startswith("feature_")] | |
if feature_cols: | |
selected_features = selected_row[feature_cols].values.reshape(1, -1) | |
selected_features = np.nan_to_num(selected_features) | |
opposite_features = np.nan_to_num(opposite_data[feature_cols].values) | |
similarities = cosine_similarity(selected_features, opposite_features)[0] | |
most_similar_idx = int(np.argmax(similarities)) | |
return opposite_data.iloc[most_similar_idx] | |
# 3) Fallback: first available | |
return opposite_data.iloc[0] | |
# Cache for performance | |
_audio_path_cache = {} | |
_cmt_data_cache = {} | |
def resolve_audio_path(row: pd.Series) -> str: | |
"""Resolve audio file paths intelligently.""" | |
basename = str(row.get("filepath", "")) | |
source = row.get("source", "") | |
label = row.get("label", "") | |
cache_key = f"{source}:{label}:{basename}" | |
if cache_key in _audio_path_cache: | |
return _audio_path_cache[cache_key] | |
resolved_path = basename | |
if source == "Dog": | |
expected_path = os.path.join(DOG_AUDIO_BASE_PATH, label, basename) | |
if os.path.exists(expected_path): | |
resolved_path = expected_path | |
else: | |
expected_path = os.path.join(DOG_AUDIO_BASE_PATH, basename) | |
if os.path.exists(expected_path): | |
resolved_path = expected_path | |
elif source == "Human": | |
if os.path.isdir(HUMAN_AUDIO_BASE_PATH): | |
for actor_folder in os.listdir(HUMAN_AUDIO_BASE_PATH): | |
if actor_folder.startswith("Actor_"): | |
expected_path = os.path.join(HUMAN_AUDIO_BASE_PATH, actor_folder, basename) | |
if os.path.exists(expected_path): | |
resolved_path = expected_path | |
break | |
_audio_path_cache[cache_key] = resolved_path | |
return resolved_path | |
def get_cmt_data_from_csv(row: pd.Series, lens: str): | |
""" | |
Extract CMT data from CSV and reconstruct visualization data. | |
Uses real diagnostic values but creates visualization points. | |
""" | |
try: | |
alpha_col = f"diag_alpha_{lens}" | |
srl_col = f"diag_srl_{lens}" | |
alpha_val = row.get(alpha_col, 0.0) | |
srl_val = row.get(srl_col, 0.0) | |
# Create visualization points based on real diagnostics | |
# Number of points proportional to complexity | |
n_points = int(min(200, max(50, srl_val * 2))) | |
# Use deterministic generation based on file hash for consistency | |
seed = hash(str(row['filepath'])) % 2**32 | |
rng = np.random.RandomState(seed) | |
# Generate points in complex plane with spread based on alpha | |
angles = np.linspace(0, 2*np.pi, n_points) | |
radii = alpha_val * (1 + 0.3 * rng.random(n_points)) | |
z = radii * np.exp(1j * angles) | |
# Apply lens-like transformation for visualization | |
w = z * np.exp(1j * srl_val * np.angle(z) / 10) | |
# Create holographic field | |
phi = alpha_val * w * np.exp(1j * np.angle(w) * srl_val / 20) | |
return { | |
"phi": phi, | |
"w": w, | |
"z": z, | |
"original_count": n_points, | |
"final_count": len(phi), | |
"alpha": alpha_val, | |
"srl": srl_val | |
} | |
except Exception as e: | |
print(f"Error extracting CMT data: {e}") | |
return None | |
def generate_holographic_field(z: np.ndarray, phi: np.ndarray, resolution: int): | |
"""Generate continuous field for visualization.""" | |
if z is None or phi is None or len(z) < 4: | |
return None | |
points = np.vstack([np.real(z), np.imag(z)]).T | |
grid_x, grid_y = np.mgrid[ | |
np.min(points[:,0]):np.max(points[:,0]):complex(0, resolution), | |
np.min(points[:,1]):np.max(points[:,1]):complex(0, resolution) | |
] | |
# Use linear interpolation for more stable results | |
grid_phi_real = griddata(points, np.real(phi), (grid_x, grid_y), method='linear') | |
grid_phi_imag = griddata(points, np.imag(phi), (grid_x, grid_y), method='linear') | |
# Fill NaN values with nearest neighbor | |
mask = np.isnan(grid_phi_real) | |
if np.any(mask): | |
grid_phi_real[mask] = griddata(points, np.real(phi), (grid_x[mask], grid_y[mask]), method='nearest') | |
mask = np.isnan(grid_phi_imag) | |
if np.any(mask): | |
grid_phi_imag[mask] = griddata(points, np.imag(phi), (grid_x[mask], grid_y[mask]), method='nearest') | |
grid_phi = grid_phi_real + 1j * grid_phi_imag | |
return grid_x, grid_y, grid_phi | |
# --------------------------------------------------------------- | |
# Advanced Visualization Functions | |
# --------------------------------------------------------------- | |
def calculate_species_boundary(df_combined): | |
"""Calculate geometric boundary between species.""" | |
from sklearn.svm import SVC | |
human_data = df_combined[df_combined['source'] == 'Human'][['x', 'y', 'z']].values | |
dog_data = df_combined[df_combined['source'] == 'Dog'][['x', 'y', 'z']].values | |
if len(human_data) < 2 or len(dog_data) < 2: | |
return None | |
X = np.vstack([human_data, dog_data]) | |
y = np.hstack([np.ones(len(human_data)), np.zeros(len(dog_data))]) | |
svm = SVC(kernel='rbf', probability=True) | |
svm.fit(X, y) | |
x_range = np.linspace(X[:, 0].min(), X[:, 0].max(), 20) | |
y_range = np.linspace(X[:, 1].min(), X[:, 1].max(), 20) | |
z_range = np.linspace(X[:, 2].min(), X[:, 2].max(), 20) | |
xx, yy = np.meshgrid(x_range, y_range) | |
boundary_points = [] | |
for z_val in z_range: | |
grid_points = np.c_[xx.ravel(), yy.ravel(), np.full(xx.ravel().shape, z_val)] | |
probabilities = svm.predict_proba(grid_points)[:, 1] | |
boundary_mask = np.abs(probabilities - 0.5) < 0.05 | |
if np.any(boundary_mask): | |
boundary_points.extend(grid_points[boundary_mask]) | |
return np.array(boundary_points) if boundary_points else None | |
def create_enhanced_manifold_plot(df_filtered, lens_selected, color_scheme, point_size, | |
show_boundary, show_trajectories): | |
"""Create main 3D manifold visualization.""" | |
alpha_col = f"diag_alpha_{lens_selected}" | |
srl_col = f"diag_srl_{lens_selected}" | |
# Color mapping | |
if color_scheme == "Species": | |
color_values = [1 if s == "Human" else 0 for s in df_filtered['source']] | |
colorscale = [[0, '#1f77b4'], [1, '#ff7f0e']] | |
colorbar_title = "Species" | |
elif color_scheme == "Emotion": | |
unique_emotions = df_filtered['label'].unique() | |
emotion_map = {emotion: i for i, emotion in enumerate(unique_emotions)} | |
color_values = [emotion_map[label] for label in df_filtered['label']] | |
colorscale = 'Viridis' | |
colorbar_title = "Emotional State" | |
elif color_scheme == "CMT_Alpha": | |
color_values = df_filtered[alpha_col].values if alpha_col in df_filtered.columns else df_filtered.index | |
colorscale = 'Plasma' | |
colorbar_title = f"CMT Alpha ({lens_selected})" | |
elif color_scheme == "CMT_SRL": | |
color_values = df_filtered[srl_col].values if srl_col in df_filtered.columns else df_filtered.index | |
colorscale = 'Turbo' | |
colorbar_title = f"SRL ({lens_selected})" | |
else: | |
color_values = df_filtered['cluster'].values | |
colorscale = 'Plotly3' | |
colorbar_title = "Cluster" | |
# Create hover text | |
hover_text = [] | |
for _, row in df_filtered.iterrows(): | |
hover_info = f""" | |
<b>{row['source']}</b>: {row['label']}<br> | |
File: {row['filepath']}<br> | |
Coordinates: ({row['x']:.3f}, {row['y']:.3f}, {row['z']:.3f}) | |
""" | |
if alpha_col in df_filtered.columns: | |
hover_info += f"<br>α: {row[alpha_col]:.4f}" | |
if srl_col in df_filtered.columns: | |
hover_info += f"<br>SRL: {row[srl_col]:.4f}" | |
hover_text.append(hover_info) | |
fig = go.Figure() | |
# Main scatter plot | |
fig.add_trace(go.Scatter3d( | |
x=df_filtered['x'], | |
y=df_filtered['y'], | |
z=df_filtered['z'], | |
mode='markers', | |
marker=dict( | |
size=point_size, | |
color=color_values, | |
colorscale=colorscale, | |
showscale=True, | |
colorbar=dict(title=colorbar_title), | |
opacity=0.8, | |
line=dict(width=0.5, color='rgba(50,50,50,0.5)') | |
), | |
text=hover_text, | |
hovertemplate='%{text}<extra></extra>', | |
name='Communications' | |
)) | |
# Add species boundary | |
if show_boundary: | |
boundary_points = calculate_species_boundary(df_filtered) | |
if boundary_points is not None and len(boundary_points) > 0: | |
fig.add_trace(go.Scatter3d( | |
x=boundary_points[:, 0], | |
y=boundary_points[:, 1], | |
z=boundary_points[:, 2], | |
mode='markers', | |
marker=dict(size=2, color='red', opacity=0.3), | |
name='Species Boundary', | |
hovertemplate='Species Boundary<extra></extra>' | |
)) | |
# Add trajectories | |
if show_trajectories: | |
emotion_colors = { | |
'angry': '#FF4444', 'happy': '#44FF44', 'sad': '#4444FF', | |
'fearful': '#FF44FF', 'neutral': '#FFFF44', 'surprised': '#44FFFF', | |
'disgusted': '#FF8844', 'bark': '#FF6B35', 'growl': '#8B4513', | |
'whine': '#9370DB', 'pant': '#20B2AA', 'speech': '#1E90FF', | |
'laugh': '#FFD700', 'cry': '#4169E1', 'shout': '#DC143C' | |
} | |
for i, emotion in enumerate(df_filtered['label'].unique()): | |
emotion_data = df_filtered[df_filtered['label'] == emotion] | |
if len(emotion_data) > 1: | |
base_colors = ['#FF6B6B', '#4ECDC4', '#45B7D1', '#96CEB4', '#FFEAA7'] | |
emotion_color = emotion_colors.get(emotion.lower(), base_colors[i % len(base_colors)]) | |
sort_indices = np.argsort(emotion_data['x'].values) | |
x_sorted = emotion_data['x'].values[sort_indices] | |
y_sorted = emotion_data['y'].values[sort_indices] | |
z_sorted = emotion_data['z'].values[sort_indices] | |
fig.add_trace(go.Scatter3d( | |
x=x_sorted, y=y_sorted, z=z_sorted, | |
mode='lines+markers', | |
line=dict(width=4, color=emotion_color, dash='dash'), | |
marker=dict(size=3, color=emotion_color, opacity=0.8), | |
name=f'{emotion.title()} Path', | |
showlegend=True, | |
hovertemplate=f'<b>{emotion.title()} Path</b><br>X: %{{x:.3f}}<br>Y: %{{y:.3f}}<br>Z: %{{z:.3f}}<extra></extra>', | |
opacity=0.7 | |
)) | |
fig.update_layout( | |
title={ | |
'text': "Shared Pattern Space of Audio Signals", | |
'x': 0.5, | |
'xanchor': 'center' | |
}, | |
scene=dict( | |
xaxis_title='Manifold Dimension 1', | |
yaxis_title='Manifold Dimension 2', | |
zaxis_title='Manifold Dimension 3', | |
camera=dict(eye=dict(x=1.5, y=1.5, z=1.5)), | |
bgcolor='rgba(0,0,0,0)', | |
aspectmode='cube' | |
), | |
margin=dict(l=0, r=0, b=0, t=60) | |
) | |
return fig | |
def create_holography_plot(z, phi, resolution, wavelength): | |
"""Create holographic field visualization.""" | |
field_data = generate_holographic_field(z, phi, resolution) | |
if field_data is None: | |
return go.Figure(layout={"title": "Insufficient data for holography"}) | |
grid_x, grid_y, grid_phi = field_data | |
mag_phi = np.abs(grid_phi) | |
phase_phi = np.angle(grid_phi) | |
def wavelength_to_rgb(wl): | |
if 380 <= wl < 440: return f'rgb({int(-(wl - 440) / (440 - 380) * 255)}, 0, 255)' | |
elif 440 <= wl < 495: return f'rgb(0, {int((wl - 440) / (495 - 440) * 255)}, 255)' | |
elif 495 <= wl < 570: return f'rgb(0, 255, {int(-(wl - 570) / (570 - 495) * 255)})' | |
elif 570 <= wl < 590: return f'rgb({int((wl - 570) / (590 - 570) * 255)}, 255, 0)' | |
elif 590 <= wl < 620: return f'rgb(255, {int(-(wl - 620) / (620 - 590) * 255)}, 0)' | |
elif 620 <= wl <= 750: return 'rgb(255, 0, 0)' | |
return 'rgb(255,255,255)' | |
mid_color = wavelength_to_rgb(wavelength) | |
custom_colorscale = [[0, 'rgb(20,0,40)'], [0.5, mid_color], [1, 'rgb(255,255,255)']] | |
fig = go.Figure() | |
# Holographic surface | |
fig.add_trace(go.Surface( | |
x=grid_x, y=grid_y, z=mag_phi, | |
surfacecolor=phase_phi, | |
colorscale=custom_colorscale, | |
cmin=-np.pi, cmax=np.pi, | |
colorbar=dict(title='Phase'), | |
name='Holographic Field', | |
contours_z=dict(show=True, usecolormap=True, highlightcolor="limegreen", project_z=True) | |
)) | |
# Data points | |
fig.add_trace(go.Scatter3d( | |
x=np.real(z), y=np.imag(z), z=np.abs(phi) + 0.05, | |
mode='markers', | |
marker=dict(size=3, color='black', symbol='x'), | |
name='Data Points' | |
)) | |
# Vector flow field | |
if resolution >= 30: | |
grad_y, grad_x = np.gradient(mag_phi) | |
sample_rate = max(1, resolution // 15) | |
fig.add_trace(go.Cone( | |
x=grid_x[::sample_rate, ::sample_rate].flatten(), | |
y=grid_y[::sample_rate, ::sample_rate].flatten(), | |
z=mag_phi[::sample_rate, ::sample_rate].flatten(), | |
u=-grad_x[::sample_rate, ::sample_rate].flatten(), | |
v=-grad_y[::sample_rate, ::sample_rate].flatten(), | |
w=np.full_like(mag_phi[::sample_rate, ::sample_rate].flatten(), -0.1), | |
sizemode="absolute", sizeref=0.1, | |
anchor="tip", | |
colorscale='Greys', | |
showscale=False, | |
name='Vector Flow' | |
)) | |
fig.update_layout( | |
title="Interactive Holographic Field Reconstruction", | |
scene=dict( | |
xaxis_title="Re(z)", | |
yaxis_title="Im(z)", | |
zaxis_title="|Φ|" | |
), | |
margin=dict(l=0, r=0, b=0, t=40) | |
) | |
return fig | |
def create_dual_holography_plot(z1, phi1, z2, phi2, resolution, wavelength, title1="Primary", title2="Comparison"): | |
"""Create side-by-side holographic visualizations.""" | |
field_data1 = generate_holographic_field(z1, phi1, resolution) | |
field_data2 = generate_holographic_field(z2, phi2, resolution) | |
if field_data1 is None or field_data2 is None: | |
return go.Figure(layout={"title": "Insufficient data for dual holography"}) | |
grid_x1, grid_y1, grid_phi1 = field_data1 | |
grid_x2, grid_y2, grid_phi2 = field_data2 | |
mag_phi1, phase_phi1 = np.abs(grid_phi1), np.angle(grid_phi1) | |
mag_phi2, phase_phi2 = np.abs(grid_phi2), np.angle(grid_phi2) | |
def wavelength_to_rgb(wl): | |
if 380 <= wl < 440: return f'rgb({int(-(wl - 440) / (440 - 380) * 255)}, 0, 255)' | |
elif 440 <= wl < 495: return f'rgb(0, {int((wl - 440) / (495 - 440) * 255)}, 255)' | |
elif 495 <= wl < 570: return f'rgb(0, 255, {int(-(wl - 570) / (570 - 495) * 255)})' | |
elif 570 <= wl < 590: return f'rgb({int((wl - 570) / (590 - 570) * 255)}, 255, 0)' | |
elif 590 <= wl < 620: return f'rgb(255, {int(-(wl - 620) / (620 - 590) * 255)}, 0)' | |
elif 620 <= wl <= 750: return 'rgb(255, 0, 0)' | |
return 'rgb(255,255,255)' | |
mid_color = wavelength_to_rgb(wavelength) | |
custom_colorscale = [[0, 'rgb(20,0,40)'], [0.5, mid_color], [1, 'rgb(255,255,255)']] | |
fig = make_subplots( | |
rows=1, cols=2, | |
specs=[[{'type': 'surface'}, {'type': 'surface'}]], | |
subplot_titles=[title1, title2] | |
) | |
# Primary hologram | |
fig.add_trace(go.Surface( | |
x=grid_x1, y=grid_y1, z=mag_phi1, | |
surfacecolor=phase_phi1, | |
colorscale=custom_colorscale, | |
cmin=-np.pi, cmax=np.pi, | |
showscale=False, | |
name=title1, | |
contours_z=dict(show=True, usecolormap=True, highlightcolor="limegreen", project_z=True) | |
), row=1, col=1) | |
# Comparison hologram | |
fig.add_trace(go.Surface( | |
x=grid_x2, y=grid_y2, z=mag_phi2, | |
surfacecolor=phase_phi2, | |
colorscale=custom_colorscale, | |
cmin=-np.pi, cmax=np.pi, | |
showscale=False, | |
name=title2, | |
contours_z=dict(show=True, usecolormap=True, highlightcolor="limegreen", project_z=True) | |
), row=1, col=2) | |
# Add data points | |
fig.add_trace(go.Scatter3d( | |
x=np.real(z1), y=np.imag(z1), z=np.abs(phi1) + 0.05, | |
mode='markers', marker=dict(size=3, color='black', symbol='x'), | |
name=f'{title1} Points', showlegend=False | |
), row=1, col=1) | |
fig.add_trace(go.Scatter3d( | |
x=np.real(z2), y=np.imag(z2), z=np.abs(phi2) + 0.05, | |
mode='markers', marker=dict(size=3, color='black', symbol='x'), | |
name=f'{title2} Points', showlegend=False | |
), row=1, col=2) | |
fig.update_layout( | |
title="Side-by-Side Geometric Comparison", | |
scene=dict( | |
xaxis_title="Re(z)", yaxis_title="Im(z)", zaxis_title="|Φ|", | |
camera=dict(eye=dict(x=1.5, y=1.5, z=1.5)) | |
), | |
scene2=dict( | |
xaxis_title="Re(z)", yaxis_title="Im(z)", zaxis_title="|Φ|", | |
camera=dict(eye=dict(x=1.5, y=1.5, z=1.5)) | |
), | |
margin=dict(l=0, r=0, b=0, t=60), | |
height=600 | |
) | |
return fig | |
def create_diagnostic_plots(z, w): | |
"""Create diagnostic visualization.""" | |
if z is None or w is None: | |
return go.Figure(layout={"title": "Insufficient data for diagnostics"}) | |
fig = go.Figure() | |
fig.add_trace(go.Scatter( | |
x=np.real(z), y=np.imag(z), mode='markers', | |
marker=dict(size=5, color='blue', opacity=0.6), | |
name='Aperture (z)' | |
)) | |
fig.add_trace(go.Scatter( | |
x=np.real(w), y=np.imag(w), mode='markers', | |
marker=dict(size=5, color='red', opacity=0.6, symbol='x'), | |
name='Lens Response (w)' | |
)) | |
fig.update_layout( | |
title="Diagnostic View: Aperture and Lens Response", | |
xaxis_title="Real Part", | |
yaxis_title="Imaginary Part", | |
legend_title="Signal Stage", | |
margin=dict(l=20, r=20, t=60, b=20) | |
) | |
return fig | |
def create_entropy_geometry_plot(phi: np.ndarray): | |
"""Create entropy analysis visualization.""" | |
if phi is None or len(phi) < 2: | |
return go.Figure(layout={"title": "Insufficient data for entropy analysis"}) | |
magnitudes = np.abs(phi) | |
phases = np.angle(phi) | |
mag_hist, _ = np.histogram(magnitudes, bins='auto', density=True) | |
phase_hist, _ = np.histogram(phases, bins='auto', density=True) | |
mag_entropy = shannon_entropy(mag_hist + 1e-10) | |
phase_entropy = shannon_entropy(phase_hist + 1e-10) | |
fig = make_subplots(rows=1, cols=2, subplot_titles=( | |
f"Magnitude Distribution (Entropy: {mag_entropy:.3f})", | |
f"Phase Distribution (Entropy: {phase_entropy:.3f})" | |
)) | |
fig.add_trace(go.Histogram(x=magnitudes, name='Magnitude', nbinsx=50), row=1, col=1) | |
fig.add_trace(go.Histogram(x=phases, name='Phase', nbinsx=50), row=1, col=2) | |
fig.update_layout( | |
title_text="Information-Entropy Geometry", | |
showlegend=False, | |
bargap=0.1, | |
margin=dict(l=20, r=20, t=60, b=20) | |
) | |
return fig | |
def create_2d_projection_plot(df_filtered, lens_selected, color_scheme): | |
"""Create 2D projection plot.""" | |
alpha_col = f"diag_alpha_{lens_selected}" | |
srl_col = f"diag_srl_{lens_selected}" | |
# Color mapping | |
if color_scheme == "Species": | |
color_values = [1 if s == "Human" else 0 for s in df_filtered['source']] | |
colorscale = [[0, '#1f77b4'], [1, '#ff7f0e']] | |
colorbar_title = "Species" | |
elif color_scheme == "Emotion": | |
unique_emotions = df_filtered['label'].unique() | |
emotion_map = {emotion: i for i, emotion in enumerate(unique_emotions)} | |
color_values = [emotion_map[label] for label in df_filtered['label']] | |
colorscale = 'Viridis' | |
colorbar_title = "Emotional State" | |
else: | |
color_values = df_filtered['cluster'].values | |
colorscale = 'Plotly3' | |
colorbar_title = "Cluster" | |
fig = go.Figure() | |
fig.add_trace(go.Scatter( | |
x=df_filtered['x'], | |
y=df_filtered['y'], | |
mode='markers', | |
marker=dict( | |
size=8, | |
color=color_values, | |
colorscale=colorscale, | |
showscale=True, | |
colorbar=dict(title=colorbar_title), | |
opacity=0.7, | |
line=dict(width=0.5, color='rgba(50,50,50,0.5)') | |
), | |
text=[f"{row['source']}: {row['label']}" for _, row in df_filtered.iterrows()], | |
name='Communications' | |
)) | |
fig.update_layout( | |
title="2D Manifold Projection", | |
xaxis_title='Dimension 1', | |
yaxis_title='Dimension 2', | |
margin=dict(l=0, r=0, b=0, t=40) | |
) | |
return fig | |
def create_density_heatmap(df_filtered): | |
"""Create density heatmap.""" | |
from scipy.stats import gaussian_kde | |
if len(df_filtered) < 10: | |
return go.Figure(layout={"title": "Insufficient data for density plot"}) | |
x = df_filtered['x'].values | |
y = df_filtered['y'].values | |
# Create density estimation | |
try: | |
kde = gaussian_kde(np.vstack([x, y])) | |
# Create grid | |
x_range = np.linspace(x.min(), x.max(), 50) | |
y_range = np.linspace(y.min(), y.max(), 50) | |
X, Y = np.meshgrid(x_range, y_range) | |
positions = np.vstack([X.ravel(), Y.ravel()]) | |
Z = kde(positions).reshape(X.shape) | |
fig = go.Figure(data=go.Heatmap( | |
x=x_range, | |
y=y_range, | |
z=Z, | |
colorscale='Viridis', | |
showscale=True | |
)) | |
# Add scatter points | |
fig.add_trace(go.Scatter( | |
x=x, y=y, | |
mode='markers', | |
marker=dict(size=4, color='white', opacity=0.8), | |
name='Data Points' | |
)) | |
fig.update_layout( | |
title="Communication Density Landscape", | |
xaxis_title='Dimension 1', | |
yaxis_title='Dimension 2', | |
margin=dict(l=0, r=0, b=0, t=40) | |
) | |
return fig | |
except: | |
return go.Figure(layout={"title": "Could not create density plot"}) | |
def create_feature_distributions(df_filtered, lens_selected): | |
"""Create feature distribution plots.""" | |
alpha_col = f"diag_alpha_{lens_selected}" | |
srl_col = f"diag_srl_{lens_selected}" | |
fig = make_subplots( | |
rows=2, cols=2, | |
subplot_titles=( | |
f"Alpha Distribution ({lens_selected})", | |
f"SRL Distribution ({lens_selected})", | |
"Species Distribution", | |
"Emotion Distribution" | |
), | |
specs=[[{"type": "histogram"}, {"type": "histogram"}], | |
[{"type": "bar"}, {"type": "bar"}]] | |
) | |
# Alpha distribution | |
if alpha_col in df_filtered.columns: | |
fig.add_trace(go.Histogram( | |
x=df_filtered[alpha_col], | |
name="Alpha", | |
nbinsx=30, | |
marker_color='lightblue' | |
), row=1, col=1) | |
# SRL distribution | |
if srl_col in df_filtered.columns: | |
fig.add_trace(go.Histogram( | |
x=df_filtered[srl_col], | |
name="SRL", | |
nbinsx=30, | |
marker_color='lightgreen' | |
), row=1, col=2) | |
# Species distribution | |
species_counts = df_filtered['source'].value_counts() | |
fig.add_trace(go.Bar( | |
x=species_counts.index, | |
y=species_counts.values, | |
name="Species", | |
marker_color=['#1f77b4', '#ff7f0e'] | |
), row=2, col=1) | |
# Emotion distribution | |
emotion_counts = df_filtered['label'].value_counts().head(10) | |
fig.add_trace(go.Bar( | |
x=emotion_counts.index, | |
y=emotion_counts.values, | |
name="Emotions", | |
marker_color='lightcoral' | |
), row=2, col=2) | |
fig.update_layout( | |
title_text="Statistical Distributions", | |
showlegend=False, | |
margin=dict(l=0, r=0, b=0, t=60) | |
) | |
return fig | |
def create_cluster_analysis(df_filtered): | |
"""Create cluster analysis visualization.""" | |
fig = make_subplots( | |
rows=1, cols=2, | |
subplot_titles=("Cluster Distribution", "Cluster Composition"), | |
specs=[[{"type": "bar"}, {"type": "bar"}]] | |
) | |
# Cluster distribution | |
cluster_counts = df_filtered['cluster'].value_counts().sort_index() | |
fig.add_trace(go.Bar( | |
x=[f"C{i}" for i in cluster_counts.index], | |
y=cluster_counts.values, | |
name="Cluster Size", | |
marker_color='skyblue' | |
), row=1, col=1) | |
# Species composition per cluster | |
cluster_species = df_filtered.groupby(['cluster', 'source']).size().unstack(fill_value=0) | |
if len(cluster_species.columns) > 0: | |
for species in cluster_species.columns: | |
fig.add_trace(go.Bar( | |
x=[f"C{i}" for i in cluster_species.index], | |
y=cluster_species[species], | |
name=species, | |
marker_color='#1f77b4' if species == 'Human' else '#ff7f0e' | |
), row=1, col=2) | |
fig.update_layout( | |
title_text="Cluster Analysis", | |
margin=dict(l=0, r=0, b=0, t=60) | |
) | |
return fig | |
def create_similarity_matrix(df_filtered, lens_selected): | |
"""Create species similarity matrix.""" | |
alpha_col = f"diag_alpha_{lens_selected}" | |
srl_col = f"diag_srl_{lens_selected}" | |
# Calculate mean values for each species-emotion combination | |
similarity_data = [] | |
for species in df_filtered['source'].unique(): | |
for emotion in df_filtered['label'].unique(): | |
subset = df_filtered[(df_filtered['source'] == species) & (df_filtered['label'] == emotion)] | |
if len(subset) > 0: | |
alpha_mean = subset[alpha_col].mean() if alpha_col in subset.columns else 0 | |
srl_mean = subset[srl_col].mean() if srl_col in subset.columns else 0 | |
similarity_data.append({ | |
'species': species, | |
'emotion': emotion, | |
'alpha': alpha_mean, | |
'srl': srl_mean, | |
'combined': alpha_mean + srl_mean | |
}) | |
if not similarity_data: | |
return go.Figure(layout={"title": "No data for similarity matrix"}) | |
similarity_df = pd.DataFrame(similarity_data) | |
pivot_table = similarity_df.pivot(index='emotion', columns='species', values='combined') | |
fig = go.Figure(data=go.Heatmap( | |
z=pivot_table.values, | |
x=pivot_table.columns, | |
y=pivot_table.index, | |
colorscale='RdYlBu_r', | |
showscale=True, | |
colorbar=dict(title="Similarity Score") | |
)) | |
fig.update_layout( | |
title="Cross-Species Similarity Matrix", | |
margin=dict(l=0, r=0, b=0, t=40) | |
) | |
return fig | |
def calculate_live_statistics(df_filtered, lens_selected): | |
"""Calculate live statistics for the dataset.""" | |
alpha_col = f"diag_alpha_{lens_selected}" | |
srl_col = f"diag_srl_{lens_selected}" | |
stats = { | |
'total_samples': len(df_filtered), | |
'species_counts': df_filtered['source'].value_counts().to_dict(), | |
'emotion_counts': len(df_filtered['label'].unique()), | |
'cluster_count': len(df_filtered['cluster'].unique()) | |
} | |
if alpha_col in df_filtered.columns: | |
stats['alpha_mean'] = df_filtered[alpha_col].mean() | |
stats['alpha_std'] = df_filtered[alpha_col].std() | |
if srl_col in df_filtered.columns: | |
stats['srl_mean'] = df_filtered[srl_col].mean() | |
stats['srl_std'] = df_filtered[srl_col].std() | |
# Format as HTML | |
html_content = f""" | |
<div style="padding: 10px; background-color: #f0f8ff; border-radius: 8px;"> | |
<h4>📊 Live Dataset Statistics</h4> | |
<p><strong>Total Samples:</strong> {stats['total_samples']}</p> | |
<p><strong>Species:</strong> | |
{' | '.join([f"{k}: {v}" for k, v in stats['species_counts'].items()])} | |
</p> | |
<p><strong>Emotions:</strong> {stats['emotion_counts']}</p> | |
<p><strong>Clusters:</strong> {stats['cluster_count']}</p> | |
""" | |
if 'alpha_mean' in stats: | |
html_content += f""" | |
<p><strong>Alpha ({lens_selected}):</strong> | |
μ={stats['alpha_mean']:.3f}, σ={stats['alpha_std']:.3f} | |
</p> | |
""" | |
if 'srl_mean' in stats: | |
html_content += f""" | |
<p><strong>SRL ({lens_selected}):</strong> | |
μ={stats['srl_mean']:.3f}, σ={stats['srl_std']:.3f} | |
</p> | |
""" | |
html_content += "</div>" | |
return html_content | |
def update_manifold_visualization(species_selection, emotion_selection, lens_selection, | |
alpha_min, alpha_max, srl_min, srl_max, | |
point_size, show_boundary, show_trajectories, color_scheme): | |
"""Update all manifold visualizations with filters.""" | |
df_filtered = df_combined.copy() | |
if species_selection: | |
df_filtered = df_filtered[df_filtered['source'].isin(species_selection)] | |
if emotion_selection: | |
df_filtered = df_filtered[df_filtered['label'].isin(emotion_selection)] | |
alpha_col = f"diag_alpha_{lens_selection}" | |
srl_col = f"diag_srl_{lens_selection}" | |
if alpha_col in df_filtered.columns: | |
df_filtered = df_filtered[ | |
(df_filtered[alpha_col] >= alpha_min) & | |
(df_filtered[alpha_col] <= alpha_max) | |
] | |
if srl_col in df_filtered.columns: | |
df_filtered = df_filtered[ | |
(df_filtered[srl_col] >= srl_min) & | |
(df_filtered[srl_col] <= srl_max) | |
] | |
if len(df_filtered) == 0: | |
empty_fig = go.Figure().add_annotation( | |
text="No data points match the current filters", | |
xref="paper", yref="paper", x=0.5, y=0.5, showarrow=False | |
) | |
empty_stats = "<p>No data available</p>" | |
return empty_fig, empty_fig, empty_fig, empty_fig, empty_fig, empty_fig, empty_stats | |
# Create all visualizations | |
main_plot = create_enhanced_manifold_plot( | |
df_filtered, lens_selection, color_scheme, point_size, | |
show_boundary, show_trajectories | |
) | |
projection_2d = create_2d_projection_plot(df_filtered, lens_selection, color_scheme) | |
density_plot = create_density_heatmap(df_filtered) | |
feature_dists = create_feature_distributions(df_filtered, lens_selection) | |
cluster_plot = create_cluster_analysis(df_filtered) | |
similarity_plot = create_similarity_matrix(df_filtered, lens_selection) | |
stats_html = calculate_live_statistics(df_filtered, lens_selection) | |
return main_plot, projection_2d, density_plot, feature_dists, cluster_plot, similarity_plot, stats_html | |
def export_filtered_data(species_selection, emotion_selection, lens_selection, | |
alpha_min, alpha_max, srl_min, srl_max): | |
"""Export filtered dataset for analysis.""" | |
import tempfile | |
import json | |
df_filtered = df_combined.copy() | |
if species_selection: | |
df_filtered = df_filtered[df_filtered['source'].isin(species_selection)] | |
if emotion_selection: | |
df_filtered = df_filtered[df_filtered['label'].isin(emotion_selection)] | |
alpha_col = f"diag_alpha_{lens_selection}" | |
srl_col = f"diag_srl_{lens_selection}" | |
if alpha_col in df_filtered.columns: | |
df_filtered = df_filtered[ | |
(df_filtered[alpha_col] >= alpha_min) & | |
(df_filtered[alpha_col] <= alpha_max) | |
] | |
if srl_col in df_filtered.columns: | |
df_filtered = df_filtered[ | |
(df_filtered[srl_col] >= srl_min) & | |
(df_filtered[srl_col] <= srl_max) | |
] | |
if len(df_filtered) == 0: | |
return "<p style='color: red;'>❌ No data to export with current filters</p>" | |
# Create export summary | |
export_summary = { | |
"export_timestamp": pd.Timestamp.now().isoformat(), | |
"total_samples": len(df_filtered), | |
"species_counts": df_filtered['source'].value_counts().to_dict(), | |
"emotion_types": df_filtered['label'].unique().tolist(), | |
"lens_used": lens_selection, | |
"filters_applied": { | |
"species": species_selection, | |
"emotions": emotion_selection, | |
"alpha_range": [alpha_min, alpha_max], | |
"srl_range": [srl_min, srl_max] | |
} | |
} | |
summary_html = f""" | |
<div style="padding: 10px; background-color: #e8f5e8; border-radius: 8px; margin-top: 10px;"> | |
<h4>✅ Export Ready</h4> | |
<p><strong>Samples:</strong> {export_summary['total_samples']}</p> | |
<p><strong>Species:</strong> {', '.join([f"{k}({v})" for k, v in export_summary['species_counts'].items()])}</p> | |
<p><strong>Emotions:</strong> {len(export_summary['emotion_types'])} types</p> | |
<p><strong>Lens:</strong> {lens_selection}</p> | |
<p><em>Data ready for download via your browser's dev tools or notebook integration.</em></p> | |
</div> | |
""" | |
return summary_html | |
# --------------------------------------------------------------- | |
# Gradio Interface | |
# --------------------------------------------------------------- | |
with gr.Blocks(theme='Nymbo/Alyx_Theme') as demo: | |
gr.Markdown(""" | |
# **Entronaut: A Visual Explorer for Information Geometry** | |
*Turn complex data into visible patterns.* | |
--- | |
### What is Entronaut? | |
Entronaut is a tool that transforms complex data—like audio signals, financial data, or text—into geometric shapes. Think of it like a mathematical prism: just as a prism splits a beam of light into a rainbow of colors, Entronaut splits a data stream into its fundamental patterns, making hidden structures visible and explorable. | |
This demo applies Entronaut to audio recordings of human and dog sounds to reveal their underlying mathematical similarities. | |
### How is this different from tools like FFT? | |
Traditional tools like Fast Fourier Transform (FFT) or Wavelets are excellent for breaking down a signal into its basic frequencies. They can tell you *what* notes are in a piece of music, but not how they are arranged into a melody or harmony. | |
Entronaut goes a step further. It uses **Information Geometry** to analyze the *relationships* between data points. This allows it to capture not just the components, but the intricate structure of the data itself. It reveals the 'shape' of the information, showing you how patterns are organized and connected. | |
### What is Information Geometry? | |
In simple terms, Information Geometry is a field of math that lets us measure the "distance" and "shape" of information. | |
Imagine you have two different news articles. You could count the words, but that wouldn't tell you how similar their meaning is. Information Geometry provides tools to define a meaningful distance between them based on the information they contain. Entronaut makes this concept visual, creating a 3D map where you can see how close or far different data points are in this abstract "information space". | |
**This App's Goal**: To demonstrate how Entronaut can map sounds from different sources into a shared geometric space. By exploring this space, you can see how Entronaut reveals underlying structural patterns in sound data. | |
""") | |
with gr.Tabs(): | |
with gr.TabItem("🎵 Audio Pattern Explorer"): | |
gr.Markdown("## Exploring the Geometric Patterns in Sound") | |
with gr.Row(): | |
with gr.Column(scale=1): | |
gr.Markdown("### 🔬 **Analysis Controls**") | |
species_filter = gr.CheckboxGroup( | |
label="Species Selection", | |
choices=["Human", "Dog"], | |
value=["Human", "Dog"] | |
) | |
emotion_filter = gr.CheckboxGroup( | |
label="Emotional States", | |
choices=list(df_combined['label'].unique()), | |
value=list(df_combined['label'].unique()) | |
) | |
lens_selector = gr.Dropdown( | |
label="Mathematical Lens", | |
choices=["gamma", "zeta", "airy", "bessel"], | |
value="gamma" | |
) | |
with gr.Accordion("🎛️ Advanced Filters", open=False): | |
alpha_min = gr.Slider(label="Alpha Min", minimum=0, maximum=5, value=0, step=0.1) | |
alpha_max = gr.Slider(label="Alpha Max", minimum=0, maximum=5, value=5, step=0.1) | |
srl_min = gr.Slider(label="SRL Min", minimum=0, maximum=100, value=0, step=1) | |
srl_max = gr.Slider(label="SRL Max", minimum=0, maximum=100, value=100, step=1) | |
with gr.Accordion("🎨 Visualization Options", open=True): | |
point_size = gr.Slider(label="Point Size", minimum=2, maximum=15, value=6, step=1) | |
show_species_boundary = gr.Checkbox(label="Show Species Boundary", value=True) | |
show_trajectories = gr.Checkbox(label="Show Trajectories", value=False) | |
color_scheme = gr.Dropdown( | |
label="Color Scheme", | |
choices=["Species", "Emotion", "CMT_Alpha", "CMT_SRL", "Cluster"], | |
value="Species" | |
) | |
with gr.Accordion("📊 Live Statistics", open=True): | |
stats_html = gr.HTML(label="Dataset Statistics") | |
similarity_matrix = gr.Plot(label="Species Similarity Matrix") | |
with gr.Accordion("💾 Data Export", open=False): | |
gr.Markdown("**Export filtered dataset for further analysis**") | |
export_button = gr.Button("📥 Export Filtered Data", variant="secondary") | |
export_status = gr.HTML("") | |
with gr.Column(scale=3): | |
manifold_plot = gr.Plot(label="Universal Communication Manifold") | |
with gr.Row(): | |
projection_2d = gr.Plot(label="2D Projection") | |
density_plot = gr.Plot(label="Density Heatmap") | |
with gr.Row(): | |
feature_distributions = gr.Plot(label="Feature Distributions") | |
cluster_analysis = gr.Plot(label="Cluster Analysis") | |
# Wire up events | |
manifold_inputs = [ | |
species_filter, emotion_filter, lens_selector, | |
alpha_min, alpha_max, srl_min, srl_max, | |
point_size, show_species_boundary, show_trajectories, color_scheme | |
] | |
manifold_outputs = [ | |
manifold_plot, projection_2d, density_plot, | |
feature_distributions, cluster_analysis, similarity_matrix, stats_html | |
] | |
for component in manifold_inputs: | |
component.change( | |
update_manifold_visualization, | |
inputs=manifold_inputs, | |
outputs=manifold_outputs | |
) | |
# Wire up export button | |
export_button.click( | |
export_filtered_data, | |
inputs=[ | |
species_filter, emotion_filter, lens_selector, | |
alpha_min, alpha_max, srl_min, srl_max | |
], | |
outputs=[export_status] | |
) | |
with gr.TabItem("🔬 Interactive Holography"): | |
with gr.Row(): | |
with gr.Column(scale=1): | |
gr.Markdown("### Cross-Species Holography") | |
species_dropdown = gr.Dropdown( | |
label="Select Species", | |
choices=["Dog", "Human"], | |
value="Dog" | |
) | |
dog_files = df_combined[df_combined["source"] == "Dog"]["filepath"].tolist() | |
human_files = df_combined[df_combined["source"] == "Human"]["filepath"].tolist() | |
primary_dropdown = gr.Dropdown( | |
label="Primary File", | |
choices=dog_files, | |
value=dog_files[0] if dog_files else None | |
) | |
neighbor_dropdown = gr.Dropdown( | |
label="Cross-Species Neighbor", | |
choices=human_files, | |
value=human_files[0] if human_files else None | |
) | |
holo_lens_dropdown = gr.Dropdown( | |
label="CMT Lens", | |
choices=["gamma", "zeta", "airy", "bessel"], | |
value="gamma" | |
) | |
holo_resolution_slider = gr.Slider( | |
label="Field Resolution", | |
minimum=20, maximum=100, step=5, value=40 | |
) | |
holo_wavelength_slider = gr.Slider( | |
label="Wavelength (nm)", | |
minimum=380, maximum=750, step=5, value=550 | |
) | |
primary_info_html = gr.HTML(label="Primary Info") | |
neighbor_info_html = gr.HTML(label="Neighbor Info") | |
with gr.Column(scale=2): | |
dual_holography_plot = gr.Plot(label="Holographic Comparison") | |
diagnostic_plot = gr.Plot(label="Diagnostic Analysis") | |
entropy_plot = gr.Plot(label="Entropy Geometry") | |
def update_cross_species_view(species, primary_file, neighbor_file, lens, resolution, wavelength): | |
if not primary_file: | |
empty_fig = go.Figure(layout={"title": "Select a primary file"}) | |
return empty_fig, empty_fig, empty_fig, "", "", gr.Dropdown() | |
primary_row = df_combined[ | |
(df_combined["filepath"] == primary_file) & | |
(df_combined["source"] == species) | |
].iloc[0] if len(df_combined[ | |
(df_combined["filepath"] == primary_file) & | |
(df_combined["source"] == species) | |
]) > 0 else None | |
if primary_row is None: | |
empty_fig = go.Figure(layout={"title": "Primary file not found"}) | |
return empty_fig, empty_fig, empty_fig, "", "" | |
auto_neighbor_row = find_nearest_cross_species_neighbor(primary_row, df_combined) | |
if not neighbor_file and auto_neighbor_row is not None: | |
neighbor_row = auto_neighbor_row | |
else: | |
opposite_species = 'Human' if species == 'Dog' else 'Dog' | |
neighbor_row = df_combined[ | |
(df_combined["filepath"] == neighbor_file) & | |
(df_combined["source"] == opposite_species) | |
].iloc[0] if len(df_combined[ | |
(df_combined["filepath"] == neighbor_file) & | |
(df_combined["source"] == opposite_species) | |
]) > 0 else None | |
primary_cmt = get_cmt_data_from_csv(primary_row, lens) | |
neighbor_cmt = get_cmt_data_from_csv(neighbor_row, lens) if neighbor_row is not None else None | |
if primary_cmt and neighbor_cmt: | |
primary_title = f"{species}: {primary_row.get('label', 'Unknown')}" | |
neighbor_title = f"{neighbor_row['source']}: {neighbor_row.get('label', 'Unknown')}" | |
dual_holo = create_dual_holography_plot( | |
primary_cmt["z"], primary_cmt["phi"], | |
neighbor_cmt["z"], neighbor_cmt["phi"], | |
resolution, wavelength, primary_title, neighbor_title | |
) | |
diag = create_diagnostic_plots(primary_cmt["z"], primary_cmt["w"]) | |
entropy = create_entropy_geometry_plot(primary_cmt["phi"]) | |
else: | |
dual_holo = go.Figure(layout={"title": "Error processing data"}) | |
diag = go.Figure(layout={"title": "Error processing data"}) | |
entropy = go.Figure(layout={"title": "Error processing data"}) | |
if primary_cmt: | |
primary_info = f""" | |
<b>Primary:</b> {primary_row['filepath']}<br> | |
<b>Species:</b> {primary_row['source']}<br> | |
<b>Label:</b> {primary_row.get('label', 'N/A')}<br> | |
<b>Alpha:</b> {primary_cmt['alpha']:.4f}<br> | |
<b>SRL:</b> {primary_cmt['srl']:.4f} | |
""" | |
else: | |
primary_info = "" | |
if neighbor_cmt and neighbor_row is not None: | |
neighbor_info = f""" | |
<b>Neighbor:</b> {neighbor_row['filepath']}<br> | |
<b>Species:</b> {neighbor_row['source']}<br> | |
<b>Label:</b> {neighbor_row.get('label', 'N/A')}<br> | |
<b>Alpha:</b> {neighbor_cmt['alpha']:.4f}<br> | |
<b>SRL:</b> {neighbor_cmt['srl']:.4f} | |
""" | |
else: | |
neighbor_info = "" | |
# If we used auto neighbor, preselect it in the dropdown | |
neighbor_dropdown_value = gr.Dropdown(value=(neighbor_row['filepath'] if neighbor_row is not None else None)) | |
return dual_holo, diag, entropy, primary_info, neighbor_info, neighbor_dropdown_value | |
def update_dropdowns_on_species_change(species): | |
species_files = df_combined[df_combined["source"] == species]["filepath"].tolist() | |
opposite_species = 'Human' if species == 'Dog' else 'Dog' | |
neighbor_files = df_combined[df_combined["source"] == opposite_species]["filepath"].tolist() | |
# Preselect first file for species and auto-pick nearest neighbor for that primary if available | |
primary_value = species_files[0] if species_files else "" | |
if primary_value: | |
primary_row = df_combined[(df_combined["filepath"] == primary_value) & (df_combined["source"] == species)].iloc[0] | |
neighbor_row = find_nearest_cross_species_neighbor(primary_row, df_combined) | |
neighbor_value = neighbor_row['filepath'] if neighbor_row is not None else (neighbor_files[0] if neighbor_files else "") | |
else: | |
neighbor_value = neighbor_files[0] if neighbor_files else "" | |
return ( | |
gr.Dropdown(choices=species_files, value=primary_value), | |
gr.Dropdown(choices=neighbor_files, value=neighbor_value) | |
) | |
species_dropdown.change( | |
update_dropdowns_on_species_change, | |
inputs=[species_dropdown], | |
outputs=[primary_dropdown, neighbor_dropdown] | |
) | |
cross_species_inputs = [ | |
species_dropdown, primary_dropdown, neighbor_dropdown, | |
holo_lens_dropdown, holo_resolution_slider, holo_wavelength_slider | |
] | |
cross_species_outputs = [ | |
dual_holography_plot, diagnostic_plot, entropy_plot, | |
primary_info_html, neighbor_info_html, | |
neighbor_dropdown | |
] | |
for input_component in cross_species_inputs: | |
input_component.change( | |
update_cross_species_view, | |
inputs=cross_species_inputs, | |
outputs=cross_species_outputs | |
) | |
# When the primary file changes, auto-update neighbor selection to nearest geometry match | |
def on_primary_change(species, primary_value, neighbor_value, lens, resolution, wavelength): | |
# Reuse update_cross_species_view to recompute plots and auto neighbor | |
return update_cross_species_view(species, primary_value, None, lens, resolution, wavelength) | |
primary_dropdown.change( | |
on_primary_change, | |
inputs=cross_species_inputs, | |
outputs=cross_species_outputs | |
) | |
# Auto-load manifold visualizations on startup | |
demo.load( | |
update_manifold_visualization, | |
inputs=[ | |
gr.State(["Human", "Dog"]), # species_filter default | |
gr.State(list(df_combined['label'].unique())), # emotion_filter default | |
gr.State("gamma"), # lens_selector default | |
gr.State(0), # alpha_min default | |
gr.State(5), # alpha_max default | |
gr.State(0), # srl_min default | |
gr.State(100), # srl_max default | |
gr.State(6), # point_size default | |
gr.State(True), # show_species_boundary default | |
gr.State(False), # show_trajectories default | |
gr.State("Species") # color_scheme default | |
], | |
outputs=manifold_outputs | |
) | |
print("✅ CMT Holographic Visualization Suite Ready!") | |
if __name__ == "__main__": | |
demo.launch() |