Spaces:
Running
Running
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,748 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import warnings
|
3 |
+
import numpy as np
|
4 |
+
import pandas as pd
|
5 |
+
import plotly.graph_objects as go
|
6 |
+
from plotly.subplots import make_subplots
|
7 |
+
from umap import UMAP
|
8 |
+
from sklearn.cluster import KMeans
|
9 |
+
from scipy.stats import entropy as shannon_entropy
|
10 |
+
from scipy import special as sp_special
|
11 |
+
from scipy.interpolate import griddata
|
12 |
+
from sklearn.metrics.pairwise import cosine_similarity
|
13 |
+
from scipy.spatial.distance import cdist
|
14 |
+
import soundfile as sf
|
15 |
+
import gradio as gr
|
16 |
+
|
17 |
+
# ================================================================
|
18 |
+
# Unified Communication Manifold Explorer & CMT Visualizer v4.0
|
19 |
+
# - Adds side-by-side comparison capabilities from HTML draft
|
20 |
+
# - Implements cross-species neighbor finding for grammar mapping
|
21 |
+
# - Separates human and dog audio with automatic pairing
|
22 |
+
# - Enhanced dual visualization for comparative analysis
|
23 |
+
# ================================================================
|
24 |
+
# - Adds Interactive Holography tab for full field reconstruction.
|
25 |
+
# - Interpolates the continuous CMT state-space (Φ field).
|
26 |
+
# - Visualizes topology, vector flow, and phase interference.
|
27 |
+
# - Adds informational-entropy-geometry visualization.
|
28 |
+
# - Prioritizes specific Colab paths for data loading.
|
29 |
+
# ================================================================
|
30 |
+
warnings.filterwarnings("ignore", category=FutureWarning)
|
31 |
+
warnings.filterwarnings("ignore", category=UserWarning)
|
32 |
+
|
33 |
+
print("Initializing the Interactive CMT Holography Explorer...")
|
34 |
+
|
35 |
+
# ---------------------------------------------------------------
|
36 |
+
# Data setup
|
37 |
+
# ---------------------------------------------------------------
|
38 |
+
# Paths for local execution (used for dummy data generation fallback)
|
39 |
+
BASE_DIR = os.path.abspath(os.getcwd())
|
40 |
+
DATA_DIR = os.path.join(BASE_DIR, "data")
|
41 |
+
DOG_DIR = os.path.join(DATA_DIR, "dog")
|
42 |
+
HUMAN_DIR = os.path.join(DATA_DIR, "human")
|
43 |
+
|
44 |
+
# Explicit paths for Colab environment
|
45 |
+
CSV_DOG = "/content/cmt_dog_sound_analysis.csv"
|
46 |
+
CSV_HUMAN = "/content/cmt_human_speech_analysis.csv"
|
47 |
+
|
48 |
+
# These are for creating dummy audio files if needed
|
49 |
+
os.makedirs(DOG_DIR, exist_ok=True)
|
50 |
+
os.makedirs(os.path.join(HUMAN_DIR, "Actor_01"), exist_ok=True)
|
51 |
+
|
52 |
+
# --- Audio Data Configuration (Must match your data source locations) ---
|
53 |
+
DOG_AUDIO_BASE_PATH = '/content/drive/MyDrive/combined'
|
54 |
+
HUMAN_AUDIO_BASE_PATH = '/content/drive/MyDrive/human'
|
55 |
+
|
56 |
+
|
57 |
+
# ---------------------------------------------------------------
|
58 |
+
# Cross-Species Analysis Functions
|
59 |
+
# ---------------------------------------------------------------
|
60 |
+
def find_nearest_cross_species_neighbor(selected_row, df_combined, n_neighbors=5):
|
61 |
+
"""
|
62 |
+
Finds the closest neighbor from the opposite species using feature similarity.
|
63 |
+
This enables cross-species pattern mapping for grammar development.
|
64 |
+
"""
|
65 |
+
selected_source = selected_row['source']
|
66 |
+
opposite_source = 'Human' if selected_source == 'Dog' else 'Dog'
|
67 |
+
|
68 |
+
# Get feature columns for similarity calculation
|
69 |
+
feature_cols = [c for c in df_combined.columns if c.startswith("feature_")]
|
70 |
+
|
71 |
+
if not feature_cols:
|
72 |
+
# Fallback to any numeric columns if no feature columns exist
|
73 |
+
numeric_cols = df_combined.select_dtypes(include=[np.number]).columns
|
74 |
+
feature_cols = [c for c in numeric_cols if c not in ['x', 'y', 'z', 'cluster']]
|
75 |
+
|
76 |
+
if not feature_cols:
|
77 |
+
# Random selection if no suitable features found
|
78 |
+
opposite_species_data = df_combined[df_combined['source'] == opposite_source]
|
79 |
+
if len(opposite_species_data) > 0:
|
80 |
+
return opposite_species_data.iloc[0]
|
81 |
+
return None
|
82 |
+
|
83 |
+
# Extract features for the selected row
|
84 |
+
selected_features = selected_row[feature_cols].values.reshape(1, -1)
|
85 |
+
selected_features = np.nan_to_num(selected_features)
|
86 |
+
|
87 |
+
# Get all rows from the opposite species
|
88 |
+
opposite_species_data = df_combined[df_combined['source'] == opposite_source]
|
89 |
+
if len(opposite_species_data) == 0:
|
90 |
+
return None
|
91 |
+
|
92 |
+
# Extract features for opposite species
|
93 |
+
opposite_features = opposite_species_data[feature_cols].values
|
94 |
+
opposite_features = np.nan_to_num(opposite_features)
|
95 |
+
|
96 |
+
# Calculate cosine similarity (better for high-dimensional feature spaces)
|
97 |
+
similarities = cosine_similarity(selected_features, opposite_features)[0]
|
98 |
+
|
99 |
+
# Find the index of the most similar neighbor
|
100 |
+
most_similar_idx = np.argmax(similarities)
|
101 |
+
nearest_neighbor = opposite_species_data.iloc[most_similar_idx]
|
102 |
+
|
103 |
+
return nearest_neighbor
|
104 |
+
|
105 |
+
# ---------------------------------------------------------------
|
106 |
+
# Load datasets (Colab-first paths)
|
107 |
+
# ---------------------------------------------------------------
|
108 |
+
if os.path.exists(CSV_DOG) and os.path.exists(CSV_HUMAN):
|
109 |
+
print(f"Found existing data files. Loading from:\n- {CSV_DOG}\n- {CSV_HUMAN}")
|
110 |
+
df_dog = pd.read_csv(CSV_DOG)
|
111 |
+
df_human = pd.read_csv(CSV_HUMAN)
|
112 |
+
print("Successfully loaded data from specified paths.")
|
113 |
+
else:
|
114 |
+
print("Could not find one or both CSV files. Generating and using in-memory dummy data.")
|
115 |
+
|
116 |
+
# This section is for DUMMY DATA GENERATION ONLY.
|
117 |
+
# It runs if the primary CSVs are not found and does NOT write files.
|
118 |
+
n_dummy_items_per_category = 50
|
119 |
+
|
120 |
+
rng = np.random.default_rng(42)
|
121 |
+
dog_labels = ["bark", "growl", "whine", "pant"] * (n_dummy_items_per_category // 4)
|
122 |
+
human_labels = ["speech", "laugh", "cry", "shout"] * (n_dummy_items_per_category // 4)
|
123 |
+
dog_rows = {
|
124 |
+
"feature_1": rng.random(n_dummy_items_per_category), "feature_2": rng.random(n_dummy_items_per_category), "feature_3": rng.random(n_dummy_items_per_category),
|
125 |
+
"label": dog_labels, "filepath": [f"dog_{i}.wav" for i in range(n_dummy_items_per_category)],
|
126 |
+
"diag_srl_gamma": rng.uniform(0.5, 5.0, n_dummy_items_per_category), "diag_alpha_gamma": rng.uniform(0.1, 2.0, n_dummy_items_per_category),
|
127 |
+
"zeta_curvature": rng.uniform(-1, 1, n_dummy_items_per_category), "torsion_index": rng.uniform(0, 1, n_dummy_items_per_category),
|
128 |
+
}
|
129 |
+
human_rows = {
|
130 |
+
"feature_1": rng.random(n_dummy_items_per_category), "feature_2": rng.random(n_dummy_items_per_category), "feature_3": rng.random(n_dummy_items_per_category),
|
131 |
+
"label": human_labels, "filepath": [f"human_{i}.wav" for i in range(n_dummy_items_per_category)],
|
132 |
+
"diag_srl_gamma": rng.uniform(0.5, 5.0, n_dummy_items_per_category), "diag_alpha_gamma": rng.uniform(0.1, 2.0, n_dummy_items_per_category),
|
133 |
+
"zeta_curvature": rng.uniform(-1, 1, n_dummy_items_per_category), "torsion_index": rng.uniform(0, 1, n_dummy_items_per_category),
|
134 |
+
}
|
135 |
+
|
136 |
+
df_dog = pd.DataFrame(dog_rows)
|
137 |
+
df_human = pd.DataFrame(human_rows)
|
138 |
+
|
139 |
+
# We still create dummy audio files for the UI to use if needed
|
140 |
+
sr = 22050
|
141 |
+
dur = 2.0
|
142 |
+
t = np.linspace(0, dur, int(sr * dur), endpoint=False)
|
143 |
+
for i in range(n_dummy_items_per_category):
|
144 |
+
tone_freq = 220 + 20 * (i % 5)
|
145 |
+
audio = 0.1 * np.sin(2 * np.pi * tone_freq * t) + 0.02 * rng.standard_normal(t.shape)
|
146 |
+
audio = audio / (np.max(np.abs(audio)) + 1e-9)
|
147 |
+
dog_label = dog_labels[i]
|
148 |
+
dog_label_dir = os.path.join(DOG_DIR, dog_label)
|
149 |
+
os.makedirs(dog_label_dir, exist_ok=True)
|
150 |
+
sf.write(os.path.join(dog_label_dir, f"dog_{i}.wav"), audio, sr)
|
151 |
+
sf.write(os.path.join(HUMAN_DIR, "Actor_01", f"human_{i}.wav"), audio, sr)
|
152 |
+
|
153 |
+
print(f"Loaded {len(df_dog)} dog rows and {len(df_human)} human rows.")
|
154 |
+
df_dog["source"], df_human["source"] = "Dog", "Human"
|
155 |
+
df_combined = pd.concat([df_dog, df_human], ignore_index=True)
|
156 |
+
|
157 |
+
# ---------------------------------------------------------------
|
158 |
+
# Expanded CMT implementation
|
159 |
+
# ---------------------------------------------------------------
|
160 |
+
class ExpandedCMT:
|
161 |
+
def __init__(self):
|
162 |
+
self.c1, self.c2 = 0.587 + 1.223j, -0.994 + 0.0j
|
163 |
+
# A large but finite number to represent the pole at z=1 for Zeta
|
164 |
+
self.ZETA_POLE_REGULARIZATION = 1e6 - 1e6j
|
165 |
+
self.lens_library = {
|
166 |
+
"gamma": sp_special.gamma,
|
167 |
+
"zeta": self._regularized_zeta, # Use the robust zeta function
|
168 |
+
"airy": lambda z: sp_special.airy(z)[0],
|
169 |
+
"bessel": lambda z: sp_special.jv(0, z),
|
170 |
+
}
|
171 |
+
|
172 |
+
def _regularized_zeta(self, z: np.ndarray) -> np.ndarray:
|
173 |
+
"""
|
174 |
+
A wrapper around scipy's zeta function to handle the pole at z=1.
|
175 |
+
"""
|
176 |
+
# Create a copy to avoid modifying the original array
|
177 |
+
z_out = np.copy(z).astype(np.complex128)
|
178 |
+
|
179 |
+
# Find where the real part is close to 1 and the imaginary part is close to 0
|
180 |
+
pole_condition = np.isclose(np.real(z), 1.0) & np.isclose(np.imag(z), 0.0)
|
181 |
+
|
182 |
+
# Apply the standard zeta function to non-pole points
|
183 |
+
non_pole_points = ~pole_condition
|
184 |
+
z_out[non_pole_points] = sp_special.zeta(z[non_pole_points], 1)
|
185 |
+
|
186 |
+
# Apply the regularization constant to the pole points
|
187 |
+
z_out[pole_condition] = self.ZETA_POLE_REGULARIZATION
|
188 |
+
|
189 |
+
return z_out
|
190 |
+
|
191 |
+
def _robust_normalize(self, signal: np.ndarray) -> np.ndarray:
|
192 |
+
if signal.size == 0: return signal
|
193 |
+
Q1, Q3 = np.percentile(signal, [25, 75])
|
194 |
+
IQR = Q3 - Q1
|
195 |
+
if IQR < 1e-9:
|
196 |
+
median, mad = np.median(signal), np.median(np.abs(signal - np.median(signal)))
|
197 |
+
return np.zeros_like(signal) if mad < 1e-9 else (signal - median) / (mad + 1e-9)
|
198 |
+
lower, upper = Q1 - 1.5 * IQR, Q3 + 1.5 * IQR
|
199 |
+
clipped = np.clip(signal, lower, upper)
|
200 |
+
s_min, s_max = np.min(clipped), np.max(clipped)
|
201 |
+
return np.zeros_like(signal) if s_max == s_min else 2.0 * (clipped - s_min) / (s_max - s_min) - 1.0
|
202 |
+
|
203 |
+
def _encode(self, signal: np.ndarray) -> np.ndarray:
|
204 |
+
N = len(signal)
|
205 |
+
if N == 0: return signal.astype(np.complex128)
|
206 |
+
i = np.arange(N)
|
207 |
+
theta = 2.0 * np.pi * i / N
|
208 |
+
f_k, A_k = np.array([271, 341, 491]), np.array([0.033, 0.050, 0.100])
|
209 |
+
phi = np.sum(A_k[:, None] * np.sin(2.0 * np.pi * f_k[:, None] * i / N), axis=0)
|
210 |
+
Theta = theta + phi
|
211 |
+
exp_iTheta = np.exp(1j * Theta)
|
212 |
+
g, m = signal * exp_iTheta, np.abs(signal) * exp_iTheta
|
213 |
+
return 0.5 * g + 0.5 * m
|
214 |
+
|
215 |
+
def _apply_lens(self, encoded_signal: np.ndarray, lens_type: str):
|
216 |
+
lens_fn = self.lens_library.get(lens_type)
|
217 |
+
if not lens_fn: raise ValueError(f"Lens '{lens_type}' not found.")
|
218 |
+
with np.errstate(all="ignore"):
|
219 |
+
w = lens_fn(encoded_signal)
|
220 |
+
phi_trajectory = self.c1 * np.angle(w) + self.c2 * np.abs(encoded_signal)
|
221 |
+
finite_mask = np.isfinite(phi_trajectory)
|
222 |
+
return phi_trajectory[finite_mask], w[finite_mask], encoded_signal[finite_mask], len(encoded_signal), len(phi_trajectory[finite_mask])
|
223 |
+
# ---------------------------------------------------------------
|
224 |
+
# Feature preparation and UMAP embedding
|
225 |
+
# ---------------------------------------------------------------
|
226 |
+
feature_cols = [c for c in df_combined.columns if c.startswith("feature_")]
|
227 |
+
features = np.nan_to_num(df_combined[feature_cols].to_numpy())
|
228 |
+
reducer = UMAP(n_components=3, n_neighbors=15, min_dist=0.1, random_state=42)
|
229 |
+
df_combined[["x", "y", "z"]] = reducer.fit_transform(features)
|
230 |
+
kmeans = KMeans(n_clusters=max(4, min(12, int(np.sqrt(len(df_combined))))), random_state=42, n_init=10)
|
231 |
+
df_combined["cluster"] = kmeans.fit_predict(features)
|
232 |
+
df_combined["chaos_score"] = np.log1p(df_combined.get("diag_srl_gamma", 0)) / (df_combined.get("diag_alpha_gamma", 1) + 1e-2)
|
233 |
+
|
234 |
+
# ---------------------------------------------------------------
|
235 |
+
# Core Visualization and Analysis Functions
|
236 |
+
# ---------------------------------------------------------------
|
237 |
+
def resolve_audio_path(row: pd.Series) -> str:
|
238 |
+
"""
|
239 |
+
Intelligently reconstructs the full path to an audio file
|
240 |
+
based on the logic from the data generation scripts.
|
241 |
+
"""
|
242 |
+
basename = str(row.get("filepath", ""))
|
243 |
+
source = row.get("source", "")
|
244 |
+
label = row.get("label", "")
|
245 |
+
|
246 |
+
# For "Dog" data, the structure is: {base_path}/{label}/{filename}
|
247 |
+
if source == "Dog":
|
248 |
+
expected_path = os.path.join(DOG_AUDIO_BASE_PATH, label, basename)
|
249 |
+
if os.path.exists(expected_path):
|
250 |
+
return expected_path
|
251 |
+
|
252 |
+
# For "Human" data, we must search within all "Actor_XX" subfolders
|
253 |
+
elif source == "Human":
|
254 |
+
if os.path.isdir(HUMAN_AUDIO_BASE_PATH):
|
255 |
+
for actor_folder in os.listdir(HUMAN_AUDIO_BASE_PATH):
|
256 |
+
if actor_folder.startswith("Actor_"):
|
257 |
+
expected_path = os.path.join(HUMAN_AUDIO_BASE_PATH, actor_folder, basename)
|
258 |
+
if os.path.exists(expected_path):
|
259 |
+
return expected_path
|
260 |
+
|
261 |
+
# Fallback for dummy data or other cases
|
262 |
+
if os.path.exists(basename):
|
263 |
+
return basename
|
264 |
+
|
265 |
+
# If all else fails, return the original basename and let it error out with a clear message
|
266 |
+
return basename
|
267 |
+
|
268 |
+
def get_cmt_data(filepath: str, lens: str):
|
269 |
+
try:
|
270 |
+
y, _ = sf.read(filepath)
|
271 |
+
if y.ndim > 1: y = np.mean(y, axis=1)
|
272 |
+
except Exception as e:
|
273 |
+
print(f"Error reading audio file {filepath}: {e}")
|
274 |
+
return None
|
275 |
+
|
276 |
+
cmt = ExpandedCMT()
|
277 |
+
normalized = cmt._robust_normalize(y)
|
278 |
+
encoded = cmt._encode(normalized)
|
279 |
+
|
280 |
+
# The _apply_lens function now returns additional diagnostic info
|
281 |
+
phi, w, z, original_count, final_count = cmt._apply_lens(encoded, lens)
|
282 |
+
|
283 |
+
return {
|
284 |
+
"phi": phi, "w": w, "z": z,
|
285 |
+
"original_count": original_count,
|
286 |
+
"final_count": final_count
|
287 |
+
}
|
288 |
+
|
289 |
+
def generate_holographic_field(z: np.ndarray, phi: np.ndarray, resolution: int):
|
290 |
+
if z is None or phi is None or len(z) < 4: return None
|
291 |
+
|
292 |
+
points = np.vstack([np.real(z), np.imag(z)]).T
|
293 |
+
grid_x, grid_y = np.mgrid[
|
294 |
+
np.min(points[:,0]):np.max(points[:,0]):complex(0, resolution),
|
295 |
+
np.min(points[:,1]):np.max(points[:,1]):complex(0, resolution)
|
296 |
+
]
|
297 |
+
|
298 |
+
grid_phi_real = griddata(points, np.real(phi), (grid_x, grid_y), method='cubic')
|
299 |
+
grid_phi_imag = griddata(points, np.imag(phi), (grid_x, grid_y), method='cubic')
|
300 |
+
|
301 |
+
grid_phi = np.nan_to_num(grid_phi_real + 1j * grid_phi_imag)
|
302 |
+
|
303 |
+
return grid_x, grid_y, grid_phi
|
304 |
+
|
305 |
+
def create_holography_plot(z, phi, resolution, wavelength):
|
306 |
+
field_data = generate_holographic_field(z, phi, resolution)
|
307 |
+
if field_data is None: return go.Figure(layout={"title": "Not enough data for holography"})
|
308 |
+
|
309 |
+
grid_x, grid_y, grid_phi = field_data
|
310 |
+
mag_phi = np.abs(grid_phi)
|
311 |
+
phase_phi = np.angle(grid_phi)
|
312 |
+
|
313 |
+
# --- Wavelength to Colorscale Mapping ---
|
314 |
+
def wavelength_to_rgb(wl):
|
315 |
+
# Simple approximation to map visible spectrum to RGB
|
316 |
+
if 380 <= wl < 440: return f'rgb({-(wl - 440) / (440 - 380) * 255}, 0, 255)' # Violet
|
317 |
+
elif 440 <= wl < 495: return f'rgb(0, {(wl - 440) / (495 - 440) * 255}, 255)' # Blue
|
318 |
+
elif 495 <= wl < 570: return f'rgb(0, 255, {-(wl - 570) / (570 - 495) * 255})' # Green
|
319 |
+
elif 570 <= wl < 590: return f'rgb({(wl - 570) / (590 - 570) * 255}, 255, 0)' # Yellow
|
320 |
+
elif 590 <= wl < 620: return f'rgb(255, {-(wl - 620) / (620 - 590) * 255}, 0)' # Orange
|
321 |
+
elif 620 <= wl <= 750: return f'rgb(255, 0, 0)' # Red
|
322 |
+
return 'rgb(255,255,255)'
|
323 |
+
|
324 |
+
mid_color = wavelength_to_rgb(wavelength)
|
325 |
+
custom_colorscale = [[0, 'rgb(20,0,40)'], [0.5, mid_color], [1, 'rgb(255,255,255)']]
|
326 |
+
|
327 |
+
|
328 |
+
fig = go.Figure()
|
329 |
+
# 1. The Holographic Surface (Topology + Phase Interference)
|
330 |
+
fig.add_trace(go.Surface(
|
331 |
+
x=grid_x, y=grid_y, z=mag_phi,
|
332 |
+
surfacecolor=phase_phi,
|
333 |
+
colorscale=custom_colorscale,
|
334 |
+
cmin=-np.pi, cmax=np.pi,
|
335 |
+
colorbar=dict(title='Φ Phase'),
|
336 |
+
name='Holographic Field',
|
337 |
+
contours_z=dict(show=True, usecolormap=True, highlightcolor="limegreen", project_z=True, highlightwidth=10)
|
338 |
+
))
|
339 |
+
# 2. The original data points projected onto the surface
|
340 |
+
fig.add_trace(go.Scatter3d(
|
341 |
+
x=np.real(z), y=np.imag(z), z=np.abs(phi) + 0.05, # slight offset
|
342 |
+
mode='markers',
|
343 |
+
marker=dict(size=3, color='black', symbol='x'),
|
344 |
+
name='Data Points'
|
345 |
+
))
|
346 |
+
# 3. The Vector Flow Field (using cones for direction)
|
347 |
+
grad_y, grad_x = np.gradient(mag_phi)
|
348 |
+
fig.add_trace(go.Cone(
|
349 |
+
x=grid_x.flatten(), y=grid_y.flatten(), z=mag_phi.flatten(),
|
350 |
+
u=-grad_x.flatten(), v=-grad_y.flatten(), w=np.full_like(mag_phi.flatten(), -0.1),
|
351 |
+
sizemode="absolute", sizeref=0.1,
|
352 |
+
anchor="tip",
|
353 |
+
colorscale='Greys',
|
354 |
+
showscale=False,
|
355 |
+
name='Vector Flow'
|
356 |
+
))
|
357 |
+
fig.update_layout(
|
358 |
+
title="Interactive Holographic Field Reconstruction",
|
359 |
+
scene=dict(
|
360 |
+
xaxis_title="Re(z) - Encoded Signal",
|
361 |
+
yaxis_title="Im(z) - Encoded Signal",
|
362 |
+
zaxis_title="|Φ| - Field Magnitude"
|
363 |
+
),
|
364 |
+
margin=dict(l=0, r=0, b=0, t=40)
|
365 |
+
)
|
366 |
+
return fig
|
367 |
+
|
368 |
+
def create_diagnostic_plots(z, w):
|
369 |
+
"""Creates a 2D plot showing the Aperture (z) and Lens Response (w)."""
|
370 |
+
if z is None or w is None:
|
371 |
+
return go.Figure(layout={"title": "Not enough data for diagnostic plots"})
|
372 |
+
|
373 |
+
fig = go.Figure()
|
374 |
+
|
375 |
+
# Aperture (Encoded Signal)
|
376 |
+
fig.add_trace(go.Scatter(
|
377 |
+
x=np.real(z), y=np.imag(z), mode='markers',
|
378 |
+
marker=dict(size=5, color='blue', opacity=0.6),
|
379 |
+
name='Aperture (z)'
|
380 |
+
))
|
381 |
+
|
382 |
+
# Lens Response
|
383 |
+
fig.add_trace(go.Scatter(
|
384 |
+
x=np.real(w), y=np.imag(w), mode='markers',
|
385 |
+
marker=dict(size=5, color='red', opacity=0.6, symbol='x'),
|
386 |
+
name='Lens Response (w)'
|
387 |
+
))
|
388 |
+
|
389 |
+
fig.update_layout(
|
390 |
+
title="Diagnostic View: Aperture and Lens Response",
|
391 |
+
xaxis_title="Real Part",
|
392 |
+
yaxis_title="Imaginary Part",
|
393 |
+
legend_title="Signal Stage",
|
394 |
+
margin=dict(l=20, r=20, t=60, b=20)
|
395 |
+
)
|
396 |
+
return fig
|
397 |
+
|
398 |
+
def create_dual_holography_plot(z1, phi1, z2, phi2, resolution, wavelength, title1="Primary", title2="Comparison"):
|
399 |
+
"""Creates side-by-side holographic visualizations for comparison."""
|
400 |
+
field_data1 = generate_holographic_field(z1, phi1, resolution)
|
401 |
+
field_data2 = generate_holographic_field(z2, phi2, resolution)
|
402 |
+
|
403 |
+
if field_data1 is None or field_data2 is None:
|
404 |
+
return go.Figure(layout={"title": "Insufficient data for dual holography"})
|
405 |
+
|
406 |
+
grid_x1, grid_y1, grid_phi1 = field_data1
|
407 |
+
grid_x2, grid_y2, grid_phi2 = field_data2
|
408 |
+
|
409 |
+
mag_phi1, phase_phi1 = np.abs(grid_phi1), np.angle(grid_phi1)
|
410 |
+
mag_phi2, phase_phi2 = np.abs(grid_phi2), np.angle(grid_phi2)
|
411 |
+
|
412 |
+
# Wavelength to colorscale mapping
|
413 |
+
def wavelength_to_rgb(wl):
|
414 |
+
if 380 <= wl < 440: return f'rgb({int(-(wl - 440) / (440 - 380) * 255)}, 0, 255)'
|
415 |
+
elif 440 <= wl < 495: return f'rgb(0, {int((wl - 440) / (495 - 440) * 255)}, 255)'
|
416 |
+
elif 495 <= wl < 570: return f'rgb(0, 255, {int(-(wl - 570) / (570 - 495) * 255)})'
|
417 |
+
elif 570 <= wl < 590: return f'rgb({int((wl - 570) / (590 - 570) * 255)}, 255, 0)'
|
418 |
+
elif 590 <= wl < 620: return f'rgb(255, {int(-(wl - 620) / (620 - 590) * 255)}, 0)'
|
419 |
+
elif 620 <= wl <= 750: return 'rgb(255, 0, 0)'
|
420 |
+
return 'rgb(255,255,255)'
|
421 |
+
|
422 |
+
mid_color = wavelength_to_rgb(wavelength)
|
423 |
+
custom_colorscale = [[0, 'rgb(20,0,40)'], [0.5, mid_color], [1, 'rgb(255,255,255)']]
|
424 |
+
|
425 |
+
fig = make_subplots(
|
426 |
+
rows=1, cols=2,
|
427 |
+
specs=[[{'type': 'scene'}, {'type': 'scene'}]],
|
428 |
+
subplot_titles=[title1, title2]
|
429 |
+
)
|
430 |
+
|
431 |
+
# Left plot (Primary)
|
432 |
+
fig.add_trace(go.Surface(
|
433 |
+
x=grid_x1, y=grid_y1, z=mag_phi1,
|
434 |
+
surfacecolor=phase_phi1,
|
435 |
+
colorscale=custom_colorscale,
|
436 |
+
cmin=-np.pi, cmax=np.pi,
|
437 |
+
showscale=False,
|
438 |
+
name=title1,
|
439 |
+
contours_z=dict(show=True, usecolormap=True, highlightcolor="limegreen", project_z=True)
|
440 |
+
), row=1, col=1)
|
441 |
+
|
442 |
+
# Right plot (Comparison)
|
443 |
+
fig.add_trace(go.Surface(
|
444 |
+
x=grid_x2, y=grid_y2, z=mag_phi2,
|
445 |
+
surfacecolor=phase_phi2,
|
446 |
+
colorscale=custom_colorscale,
|
447 |
+
cmin=-np.pi, cmax=np.pi,
|
448 |
+
showscale=False,
|
449 |
+
name=title2,
|
450 |
+
contours_z=dict(show=True, usecolormap=True, highlightcolor="limegreen", project_z=True)
|
451 |
+
), row=1, col=2)
|
452 |
+
|
453 |
+
# Add data points
|
454 |
+
if z1 is not None and phi1 is not None:
|
455 |
+
fig.add_trace(go.Scatter3d(
|
456 |
+
x=np.real(z1), y=np.imag(z1), z=np.abs(phi1) + 0.05,
|
457 |
+
mode='markers', marker=dict(size=3, color='black', symbol='x'),
|
458 |
+
name=f'{title1} Points', showlegend=False
|
459 |
+
), row=1, col=1)
|
460 |
+
|
461 |
+
if z2 is not None and phi2 is not None:
|
462 |
+
fig.add_trace(go.Scatter3d(
|
463 |
+
x=np.real(z2), y=np.imag(z2), z=np.abs(phi2) + 0.05,
|
464 |
+
mode='markers', marker=dict(size=3, color='black', symbol='x'),
|
465 |
+
name=f'{title2} Points', showlegend=False
|
466 |
+
), row=1, col=2)
|
467 |
+
|
468 |
+
fig.update_layout(
|
469 |
+
title="Side-by-Side Cross-Species Holographic Comparison",
|
470 |
+
scene=dict(
|
471 |
+
xaxis_title="Re(z)", yaxis_title="Im(z)", zaxis_title="|Φ|",
|
472 |
+
camera=dict(eye=dict(x=1.5, y=1.5, z=1.5))
|
473 |
+
),
|
474 |
+
scene2=dict(
|
475 |
+
xaxis_title="Re(z)", yaxis_title="Im(z)", zaxis_title="|Φ|",
|
476 |
+
camera=dict(eye=dict(x=1.5, y=1.5, z=1.5))
|
477 |
+
),
|
478 |
+
margin=dict(l=0, r=0, b=0, t=60),
|
479 |
+
height=600
|
480 |
+
)
|
481 |
+
return fig
|
482 |
+
|
483 |
+
def create_dual_diagnostic_plots(z1, w1, z2, w2, title1="Primary", title2="Comparison"):
|
484 |
+
"""Creates side-by-side diagnostic plots for cross-species comparison."""
|
485 |
+
fig = make_subplots(
|
486 |
+
rows=1, cols=2,
|
487 |
+
subplot_titles=[f"{title1}: Aperture & Lens Response", f"{title2}: Aperture & Lens Response"]
|
488 |
+
)
|
489 |
+
|
490 |
+
if z1 is not None and w1 is not None:
|
491 |
+
# Primary aperture and response
|
492 |
+
fig.add_trace(go.Scatter(
|
493 |
+
x=np.real(z1), y=np.imag(z1), mode='markers',
|
494 |
+
marker=dict(size=5, color='blue', opacity=0.6),
|
495 |
+
name=f'{title1} Aperture', showlegend=True
|
496 |
+
), row=1, col=1)
|
497 |
+
|
498 |
+
fig.add_trace(go.Scatter(
|
499 |
+
x=np.real(w1), y=np.imag(w1), mode='markers',
|
500 |
+
marker=dict(size=5, color='red', opacity=0.6, symbol='x'),
|
501 |
+
name=f'{title1} Response', showlegend=True
|
502 |
+
), row=1, col=1)
|
503 |
+
|
504 |
+
if z2 is not None and w2 is not None:
|
505 |
+
# Comparison aperture and response
|
506 |
+
fig.add_trace(go.Scatter(
|
507 |
+
x=np.real(z2), y=np.imag(z2), mode='markers',
|
508 |
+
marker=dict(size=5, color='darkblue', opacity=0.6),
|
509 |
+
name=f'{title2} Aperture', showlegend=True
|
510 |
+
), row=1, col=2)
|
511 |
+
|
512 |
+
fig.add_trace(go.Scatter(
|
513 |
+
x=np.real(w2), y=np.imag(w2), mode='markers',
|
514 |
+
marker=dict(size=5, color='darkred', opacity=0.6, symbol='x'),
|
515 |
+
name=f'{title2} Response', showlegend=True
|
516 |
+
), row=1, col=2)
|
517 |
+
|
518 |
+
fig.update_layout(
|
519 |
+
title="Cross-Species Diagnostic Comparison",
|
520 |
+
height=400,
|
521 |
+
margin=dict(l=20, r=20, t=60, b=20)
|
522 |
+
)
|
523 |
+
fig.update_xaxes(title_text="Real Part", row=1, col=1)
|
524 |
+
fig.update_yaxes(title_text="Imaginary Part", row=1, col=1)
|
525 |
+
fig.update_xaxes(title_text="Real Part", row=1, col=2)
|
526 |
+
fig.update_yaxes(title_text="Imaginary Part", row=1, col=2)
|
527 |
+
|
528 |
+
return fig
|
529 |
+
|
530 |
+
|
531 |
+
def create_entropy_geometry_plot(phi: np.ndarray):
|
532 |
+
"""Creates a plot showing magnitude/phase distributions and their entropy."""
|
533 |
+
if phi is None or len(phi) < 2:
|
534 |
+
return go.Figure(layout={"title": "Not enough data for entropy analysis"})
|
535 |
+
|
536 |
+
magnitudes = np.abs(phi)
|
537 |
+
phases = np.angle(phi)
|
538 |
+
|
539 |
+
# Calculate entropy
|
540 |
+
mag_hist, _ = np.histogram(magnitudes, bins='auto', density=True)
|
541 |
+
phase_hist, _ = np.histogram(phases, bins='auto', density=True)
|
542 |
+
mag_entropy = shannon_entropy(mag_hist)
|
543 |
+
phase_entropy = shannon_entropy(phase_hist)
|
544 |
+
|
545 |
+
fig = make_subplots(rows=1, cols=2, subplot_titles=(
|
546 |
+
f"Magnitude Distribution (Entropy: {mag_entropy:.3f})",
|
547 |
+
f"Phase Distribution (Entropy: {phase_entropy:.3f})"
|
548 |
+
))
|
549 |
+
|
550 |
+
fig.add_trace(go.Histogram(x=magnitudes, name='Magnitude', nbinsx=50), row=1, col=1)
|
551 |
+
fig.add_trace(go.Histogram(x=phases, name='Phase', nbinsx=50), row=1, col=2)
|
552 |
+
|
553 |
+
fig.update_layout(
|
554 |
+
title_text="Informational-Entropy Geometry",
|
555 |
+
showlegend=False,
|
556 |
+
bargap=0.1,
|
557 |
+
margin=dict(l=20, r=20, t=60, b=20)
|
558 |
+
)
|
559 |
+
fig.update_xaxes(title_text="|Φ|", row=1, col=1)
|
560 |
+
fig.update_yaxes(title_text="Count", row=1, col=1)
|
561 |
+
fig.update_xaxes(title_text="angle(Φ)", row=1, col=2)
|
562 |
+
fig.update_yaxes(title_text="Count", row=1, col=2)
|
563 |
+
|
564 |
+
return fig
|
565 |
+
|
566 |
+
# ---------------------------------------------------------------
|
567 |
+
# Gradio UI
|
568 |
+
# ---------------------------------------------------------------
|
569 |
+
with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="cyan")) as demo:
|
570 |
+
gr.Markdown("# Exhaustive CMT Explorer for Interspecies Communication v3.2")
|
571 |
+
file_choices = df_combined["filepath"].astype(str).tolist()
|
572 |
+
default_primary = file_choices[0] if file_choices else ""
|
573 |
+
|
574 |
+
with gr.Tabs():
|
575 |
+
with gr.TabItem("Unified Manifold"):
|
576 |
+
gr.Plot(value=lambda: go.Figure(data=[go.Scatter3d(
|
577 |
+
x=df_combined["x"], y=df_combined["y"], z=df_combined["z"],
|
578 |
+
mode="markers", marker=dict(color=df_combined["cluster"], size=5, colorscale="Viridis", showscale=True, colorbar={"title": "Cluster ID"}),
|
579 |
+
text=df_combined.apply(lambda r: f"{r['source']}: {r.get('label', '')}<br>File: {r['filepath']}", axis=1),
|
580 |
+
hoverinfo="text"
|
581 |
+
)], layout=dict(title="Communication Manifold (UMAP Projection)")), label="UMAP Manifold")
|
582 |
+
|
583 |
+
with gr.TabItem("Interactive Holography"):
|
584 |
+
with gr.Row():
|
585 |
+
with gr.Column(scale=1):
|
586 |
+
gr.Markdown("### Cross-Species Holography Controls")
|
587 |
+
|
588 |
+
# Species selection and automatic pairing
|
589 |
+
species_dropdown = gr.Dropdown(
|
590 |
+
label="Select Species",
|
591 |
+
choices=["Dog", "Human"],
|
592 |
+
value="Dog"
|
593 |
+
)
|
594 |
+
|
595 |
+
# Primary file selection (filtered by species)
|
596 |
+
primary_dropdown = gr.Dropdown(
|
597 |
+
label="Primary Audio File",
|
598 |
+
choices=[],
|
599 |
+
value=""
|
600 |
+
)
|
601 |
+
|
602 |
+
# Automatically found neighbor (from opposite species)
|
603 |
+
neighbor_dropdown = gr.Dropdown(
|
604 |
+
label="Auto-Found Cross-Species Neighbor",
|
605 |
+
choices=[],
|
606 |
+
value="",
|
607 |
+
interactive=True # Allow manual override
|
608 |
+
)
|
609 |
+
|
610 |
+
holo_lens_dropdown = gr.Dropdown(label="CMT Lens", choices=["gamma", "zeta", "airy", "bessel"], value="gamma")
|
611 |
+
holo_resolution_slider = gr.Slider(label="Field Resolution", minimum=20, maximum=100, step=5, value=40)
|
612 |
+
holo_wavelength_slider = gr.Slider(label="Illumination Wavelength (nm)", minimum=380, maximum=750, step=5, value=550)
|
613 |
+
|
614 |
+
# Information panels
|
615 |
+
primary_info_html = gr.HTML(label="Primary Audio Info")
|
616 |
+
neighbor_info_html = gr.HTML(label="Neighbor Audio Info")
|
617 |
+
|
618 |
+
# Audio players
|
619 |
+
primary_audio_out = gr.Audio(label="Primary Audio")
|
620 |
+
neighbor_audio_out = gr.Audio(label="Neighbor Audio")
|
621 |
+
|
622 |
+
with gr.Column(scale=2):
|
623 |
+
dual_holography_plot = gr.Plot(label="Side-by-Side Holographic Comparison")
|
624 |
+
dual_diagnostic_plot = gr.Plot(label="Cross-Species Diagnostic Comparison")
|
625 |
+
|
626 |
+
def update_file_choices(species):
|
627 |
+
"""Update the primary file dropdown based on selected species."""
|
628 |
+
species_files = df_combined[df_combined["source"] == species]["filepath"].astype(str).tolist()
|
629 |
+
return gr.Dropdown.update(choices=species_files, value=species_files[0] if species_files else "")
|
630 |
+
|
631 |
+
def update_cross_species_view(species, primary_file, neighbor_file, lens, resolution, wavelength):
|
632 |
+
if not primary_file:
|
633 |
+
empty_fig = go.Figure(layout={"title": "Please select a primary file."})
|
634 |
+
return empty_fig, empty_fig, "", "", None, None, []
|
635 |
+
|
636 |
+
# Get primary row
|
637 |
+
primary_row = df_combined[
|
638 |
+
(df_combined["filepath"] == primary_file) &
|
639 |
+
(df_combined["source"] == species)
|
640 |
+
].iloc[0] if len(df_combined[
|
641 |
+
(df_combined["filepath"] == primary_file) &
|
642 |
+
(df_combined["source"] == species)
|
643 |
+
]) > 0 else None
|
644 |
+
|
645 |
+
if primary_row is None:
|
646 |
+
empty_fig = go.Figure(layout={"title": "Primary file not found."})
|
647 |
+
return empty_fig, empty_fig, "", "", None, None, []
|
648 |
+
|
649 |
+
# Find cross-species neighbor if not manually selected
|
650 |
+
if not neighbor_file:
|
651 |
+
neighbor_row = find_nearest_cross_species_neighbor(primary_row, df_combined)
|
652 |
+
if neighbor_row is not None:
|
653 |
+
neighbor_file = neighbor_row['filepath']
|
654 |
+
else:
|
655 |
+
# Get manually selected neighbor
|
656 |
+
opposite_species = 'Human' if species == 'Dog' else 'Dog'
|
657 |
+
neighbor_row = df_combined[
|
658 |
+
(df_combined["filepath"] == neighbor_file) &
|
659 |
+
(df_combined["source"] == opposite_species)
|
660 |
+
].iloc[0] if len(df_combined[
|
661 |
+
(df_combined["filepath"] == neighbor_file) &
|
662 |
+
(df_combined["source"] == opposite_species)
|
663 |
+
]) > 0 else None
|
664 |
+
|
665 |
+
# Get CMT data for both files
|
666 |
+
primary_fp = resolve_audio_path(primary_row)
|
667 |
+
primary_cmt = get_cmt_data(primary_fp, lens)
|
668 |
+
|
669 |
+
neighbor_cmt = None
|
670 |
+
if neighbor_row is not None:
|
671 |
+
neighbor_fp = resolve_audio_path(neighbor_row)
|
672 |
+
neighbor_cmt = get_cmt_data(neighbor_fp, lens)
|
673 |
+
|
674 |
+
# Create visualizations
|
675 |
+
if primary_cmt and neighbor_cmt:
|
676 |
+
primary_title = f"{species}: {primary_row.get('label', 'Unknown')}"
|
677 |
+
neighbor_title = f"{neighbor_row['source']}: {neighbor_row.get('label', 'Unknown')}"
|
678 |
+
|
679 |
+
dual_holo_fig = create_dual_holography_plot(
|
680 |
+
primary_cmt["z"], primary_cmt["phi"],
|
681 |
+
neighbor_cmt["z"], neighbor_cmt["phi"],
|
682 |
+
resolution, wavelength, primary_title, neighbor_title
|
683 |
+
)
|
684 |
+
|
685 |
+
dual_diag_fig = create_dual_diagnostic_plots(
|
686 |
+
primary_cmt["z"], primary_cmt["w"],
|
687 |
+
neighbor_cmt["z"], neighbor_cmt["w"],
|
688 |
+
primary_title, neighbor_title
|
689 |
+
)
|
690 |
+
else:
|
691 |
+
dual_holo_fig = go.Figure(layout={"title": "Error processing audio files"})
|
692 |
+
dual_diag_fig = go.Figure(layout={"title": "Error processing audio files"})
|
693 |
+
|
694 |
+
# Build info strings
|
695 |
+
primary_info = f"""
|
696 |
+
<b>Primary:</b> {primary_row['filepath']}<br>
|
697 |
+
<b>Species:</b> {primary_row['source']}<br>
|
698 |
+
<b>Label:</b> {primary_row.get('label', 'N/A')}<br>
|
699 |
+
<b>Data Points:</b> {primary_cmt['final_count'] if primary_cmt else 0} / {primary_cmt['original_count'] if primary_cmt else 0}
|
700 |
+
"""
|
701 |
+
|
702 |
+
neighbor_info = ""
|
703 |
+
if neighbor_row is not None:
|
704 |
+
neighbor_info = f"""
|
705 |
+
<b>Neighbor:</b> {neighbor_row['filepath']}<br>
|
706 |
+
<b>Species:</b> {neighbor_row['source']}<br>
|
707 |
+
<b>Label:</b> {neighbor_row.get('label', 'N/A')}<br>
|
708 |
+
<b>Data Points:</b> {neighbor_cmt['final_count'] if neighbor_cmt else 0} / {neighbor_cmt['original_count'] if neighbor_cmt else 0}
|
709 |
+
"""
|
710 |
+
|
711 |
+
# Update neighbor dropdown choices
|
712 |
+
opposite_species = 'Human' if species == 'Dog' else 'Dog'
|
713 |
+
neighbor_choices = df_combined[df_combined["source"] == opposite_species]["filepath"].astype(str).tolist()
|
714 |
+
|
715 |
+
# Audio files
|
716 |
+
primary_audio = primary_fp if primary_fp and os.path.exists(primary_fp) else None
|
717 |
+
neighbor_audio = neighbor_fp if neighbor_row and neighbor_fp and os.path.exists(neighbor_fp) else None
|
718 |
+
|
719 |
+
return (dual_holo_fig, dual_diag_fig, primary_info, neighbor_info,
|
720 |
+
primary_audio, neighbor_audio,
|
721 |
+
gr.Dropdown.update(choices=neighbor_choices, value=neighbor_file if neighbor_row else ""))
|
722 |
+
|
723 |
+
# Event handlers
|
724 |
+
species_dropdown.change(
|
725 |
+
update_file_choices,
|
726 |
+
inputs=[species_dropdown],
|
727 |
+
outputs=[primary_dropdown]
|
728 |
+
)
|
729 |
+
|
730 |
+
cross_species_inputs = [species_dropdown, primary_dropdown, neighbor_dropdown,
|
731 |
+
holo_lens_dropdown, holo_resolution_slider, holo_wavelength_slider]
|
732 |
+
cross_species_outputs = [dual_holography_plot, dual_diagnostic_plot,
|
733 |
+
primary_info_html, neighbor_info_html,
|
734 |
+
primary_audio_out, neighbor_audio_out, neighbor_dropdown]
|
735 |
+
|
736 |
+
for component in cross_species_inputs:
|
737 |
+
component.change(update_cross_species_view,
|
738 |
+
inputs=cross_species_inputs,
|
739 |
+
outputs=cross_species_outputs)
|
740 |
+
|
741 |
+
# Initialize on load
|
742 |
+
demo.load(lambda: update_file_choices("Dog"), outputs=[primary_dropdown])
|
743 |
+
demo.load(update_cross_species_view,
|
744 |
+
inputs=cross_species_inputs,
|
745 |
+
outputs=cross_species_outputs)
|
746 |
+
|
747 |
+
if __name__ == "__main__":
|
748 |
+
demo.launch(share=True, debug=True)
|