Spaces:
Runtime error
Runtime error
Create voice_analysis.py
Browse files- voice_analysis.py +40 -0
voice_analysis.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import numpy as np
|
| 3 |
+
from speechbrain.pretrained import EncoderClassifier
|
| 4 |
+
from pydub import AudioSegment
|
| 5 |
+
from sklearn.cluster import DBSCAN
|
| 6 |
+
import librosa
|
| 7 |
+
|
| 8 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 9 |
+
classifier = EncoderClassifier.from_hparams(source="speechbrain/spkrec-ecapa-voxceleb", savedir="pretrained_models/spkrec-ecapa-voxceleb", run_opts={"device": device})
|
| 10 |
+
|
| 11 |
+
def extract_voice_embedding(audio_segment):
|
| 12 |
+
signal = np.array(audio_segment.get_array_of_samples())
|
| 13 |
+
signal = signal.astype(np.float32) / 32768.0 # Normalize to [-1, 1]
|
| 14 |
+
embedding = classifier.encode_batch(torch.tensor(signal).unsqueeze(0))
|
| 15 |
+
return embedding.squeeze().cpu().numpy()
|
| 16 |
+
|
| 17 |
+
def process_audio(audio_path, segment_duration=1000):
|
| 18 |
+
audio = AudioSegment.from_file(audio_path)
|
| 19 |
+
segments = [audio[i:i+segment_duration] for i in range(0, len(audio), segment_duration)]
|
| 20 |
+
embeddings = [extract_voice_embedding(segment) for segment in segments]
|
| 21 |
+
return embeddings
|
| 22 |
+
|
| 23 |
+
def cluster_voices(embeddings):
|
| 24 |
+
if len(embeddings) < 2:
|
| 25 |
+
print("Not enough voice segments for clustering. Assigning all to one cluster.")
|
| 26 |
+
return np.zeros(len(embeddings), dtype=int)
|
| 27 |
+
|
| 28 |
+
X = np.stack(embeddings)
|
| 29 |
+
dbscan = DBSCAN(eps=0.3, min_samples=5, metric='cosine')
|
| 30 |
+
clusters = dbscan.fit_predict(X)
|
| 31 |
+
|
| 32 |
+
if np.all(clusters == -1):
|
| 33 |
+
print("DBSCAN assigned all to noise. Considering as one cluster.")
|
| 34 |
+
return np.zeros(len(embeddings), dtype=int)
|
| 35 |
+
|
| 36 |
+
return clusters
|
| 37 |
+
|
| 38 |
+
def get_most_frequent_voice(embeddings, clusters):
|
| 39 |
+
largest_cluster = max(set(clusters), key=list(clusters).count)
|
| 40 |
+
return [emb for emb, cluster in zip(embeddings, clusters) if cluster == largest_cluster]
|