File size: 1,750 Bytes
a4db1a6 b568300 a4db1a6 d811c94 a4db1a6 b568300 a4db1a6 b568300 a4db1a6 b568300 a4db1a6 b568300 a4db1a6 d811c94 a4db1a6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
import moviepy.editor as mp
from pyannote.audio import Pipeline
import torch
import torchaudio
from pyannote.core import Segment
def extract_audio_from_video(video_path):
video = mp.VideoFileClip(video_path)
audio_path = video_path.rsplit('.', 1)[0] + '.wav'
video.audio.write_audiofile(audio_path)
return audio_path
def diarize_speakers(audio_path):
pipeline = Pipeline.from_pretrained("pyannote/[email protected]", use_auth_token="YOUR_HF_TOKEN")
diarization = pipeline(audio_path)
return diarization
def get_speaker_embeddings(audio_path, diarization, model):
waveform, sample_rate = torchaudio.load(audio_path)
embeddings = []
for turn, _, speaker in diarization.itertracks(yield_label=True):
start = int(turn.start * sample_rate)
end = int(turn.end * sample_rate)
segment = waveform[:, start:end]
if segment.shape[1] == 0:
continue
with torch.no_grad():
embedding = model({"waveform": segment, "sample_rate": sample_rate})
embeddings.append({"time": turn.start, "embedding": embedding.squeeze().cpu().numpy(), "speaker": speaker})
return embeddings
def align_voice_embeddings(voice_embeddings, frame_count, fps):
aligned_embeddings = []
current_embedding_index = 0
for frame in range(frame_count):
frame_time = frame / fps
while (current_embedding_index < len(voice_embeddings) - 1 and
voice_embeddings[current_embedding_index + 1]["time"] <= frame_time):
current_embedding_index += 1
aligned_embeddings.append(voice_embeddings[current_embedding_index]["embedding"])
return np.array(aligned_embeddings) |