|
|
|
|
|
import os |
|
import glob |
|
import logging |
|
import traceback |
|
import tempfile |
|
import shutil |
|
from difflib import SequenceMatcher |
|
import torch |
|
import torchaudio |
|
from pydub import AudioSegment |
|
from flask import jsonify |
|
from werkzeug.utils import secure_filename |
|
from concurrent.futures import ThreadPoolExecutor |
|
|
|
|
|
from translator import get_asr_model, get_asr_processor, LANGUAGE_CODES |
|
|
|
|
|
logger = logging.getLogger("speech_api") |
|
|
|
def calculate_similarity(text1, text2): |
|
"""Calculate text similarity percentage.""" |
|
def clean_text(text): |
|
return text.lower() |
|
|
|
clean1 = clean_text(text1) |
|
clean2 = clean_text(text2) |
|
|
|
matcher = SequenceMatcher(None, clean1, clean2) |
|
return matcher.ratio() * 100 |
|
|
|
|
|
|
|
def init_reference_audio(reference_dir, output_dir): |
|
try: |
|
|
|
os.makedirs(output_dir, exist_ok=True) |
|
logger.info(f"π Created output directory: {output_dir}") |
|
|
|
|
|
if not os.path.exists(reference_dir) or not os.access(os.path.dirname(reference_dir), os.W_OK): |
|
|
|
reference_dir = os.path.join('/tmp', 'reference_audios') |
|
logger.warning(f"β οΈ Using alternate reference directory: {reference_dir}") |
|
|
|
|
|
os.makedirs(reference_dir, exist_ok=True) |
|
logger.info(f"π Created/verified reference audio directory: {reference_dir}") |
|
|
|
|
|
setup_reference_patterns(reference_dir) |
|
|
|
|
|
original_ref_dir = "./reference_audios" |
|
if os.path.exists(original_ref_dir) and reference_dir != original_ref_dir: |
|
try: |
|
import shutil |
|
|
|
for item in os.listdir(original_ref_dir): |
|
src_path = os.path.join(original_ref_dir, item) |
|
dst_path = os.path.join(reference_dir, item) |
|
|
|
if os.path.isdir(src_path): |
|
|
|
if not os.path.exists(dst_path): |
|
shutil.copytree(src_path, dst_path) |
|
logger.info(f"π Copied reference pattern from {src_path} to {dst_path}") |
|
except Exception as e: |
|
logger.warning(f"β οΈ Could not copy original reference files: {str(e)}") |
|
|
|
|
|
if os.path.exists(reference_dir): |
|
pattern_dirs = [d for d in os.listdir(reference_dir) |
|
if os.path.isdir(os.path.join(reference_dir, d))] |
|
logger.info(f"π Found reference patterns: {pattern_dirs}") |
|
|
|
|
|
for pattern_dir_name in pattern_dirs: |
|
pattern_path = os.path.join(reference_dir, pattern_dir_name) |
|
wav_files = glob.glob(os.path.join(pattern_path, "*.wav")) |
|
logger.info(f"π Found {len(wav_files)} wav files in {pattern_dir_name}") |
|
|
|
return reference_dir |
|
|
|
except Exception as e: |
|
logger.error(f"β Failed to set up reference audio directory: {str(e)}") |
|
return reference_dir |
|
|
|
def handle_upload_reference(request, reference_dir, sample_rate): |
|
"""Handle upload of reference audio files""" |
|
try: |
|
if "audio" not in request.files: |
|
logger.warning("β οΈ Reference upload missing audio file") |
|
return jsonify({"error": "No audio file uploaded"}), 400 |
|
|
|
reference_word = request.form.get("reference_word", "").strip() |
|
if not reference_word: |
|
logger.warning("β οΈ Reference upload missing reference word") |
|
return jsonify({"error": "No reference word provided"}), 400 |
|
|
|
|
|
reference_patterns = [ |
|
"mayap_a_abak", "mayap_a_ugtu", "mayap_a_gatpanapun", "mayap_a_bengi", |
|
"komusta_ka", "malaus_ko_pu", "malaus_kayu", "agaganaka_da_ka", |
|
"pagdulapan_da_ka", "kaluguran_da_ka", "dakal_a_salamat", "panapaya_mu_ku" |
|
] |
|
|
|
if reference_word not in reference_patterns: |
|
logger.warning(f"β οΈ Invalid reference word: {reference_word}") |
|
return jsonify({"error": f"Invalid reference word. Available: {reference_patterns}"}), 400 |
|
|
|
|
|
pattern_dir = os.path.join(reference_dir, reference_word) |
|
os.makedirs(pattern_dir, exist_ok=True) |
|
|
|
|
|
audio_file = request.files["audio"] |
|
file_path = os.path.join(pattern_dir, secure_filename(audio_file.filename)) |
|
audio_file.save(file_path) |
|
|
|
|
|
if not file_path.lower().endswith('.wav'): |
|
base_path = os.path.splitext(file_path)[0] |
|
wav_path = f"{base_path}.wav" |
|
try: |
|
audio = AudioSegment.from_file(file_path) |
|
audio = audio.set_frame_rate(sample_rate).set_channels(1) |
|
audio.export(wav_path, format="wav") |
|
|
|
os.unlink(file_path) |
|
file_path = wav_path |
|
except Exception as e: |
|
logger.error(f"β Reference audio conversion failed: {str(e)}") |
|
return jsonify({"error": f"Audio conversion failed: {str(e)}"}), 500 |
|
|
|
logger.info(f"β
Reference audio saved successfully for {reference_word}: {file_path}") |
|
|
|
|
|
references = glob.glob(os.path.join(pattern_dir, "*.wav")) |
|
return jsonify({ |
|
"message": "Reference audio uploaded successfully", |
|
"reference_word": reference_word, |
|
"file": os.path.basename(file_path), |
|
"total_references": len(references) |
|
}) |
|
|
|
except Exception as e: |
|
logger.error(f"β Unhandled exception in reference upload: {str(e)}") |
|
logger.debug(f"Stack trace: {traceback.format_exc()}") |
|
return jsonify({"error": f"Internal server error: {str(e)}"}), 500 |
|
|
|
|
|
def handle_evaluation_request(request, reference_dir, output_dir, sample_rate): |
|
request_id = f"req-{id(request)}" |
|
logger.info(f"[{request_id}] π Starting new pronunciation evaluation request") |
|
|
|
temp_dir = None |
|
|
|
|
|
asr_model = get_asr_model() |
|
asr_processor = get_asr_processor() |
|
|
|
if asr_model is None or asr_processor is None: |
|
logger.error(f"[{request_id}] β Evaluation endpoint called but ASR models aren't loaded") |
|
return jsonify({"error": "ASR model not available"}), 503 |
|
|
|
try: |
|
if "audio" not in request.files: |
|
logger.warning(f"[{request_id}] β οΈ Evaluation request missing audio file") |
|
return jsonify({"error": "No audio file uploaded"}), 400 |
|
|
|
audio_file = request.files["audio"] |
|
reference_locator = request.form.get("reference_locator", "").strip() |
|
language = request.form.get("language", "kapampangan").lower() |
|
|
|
|
|
if not reference_locator: |
|
logger.warning(f"[{request_id}] β οΈ No reference locator provided") |
|
return jsonify({"error": "Reference locator is required"}), 400 |
|
|
|
|
|
reference_dir_path = os.path.join(reference_dir, reference_locator) |
|
logger.info(f"[{request_id}] π Reference directory path: {reference_dir_path}") |
|
|
|
if not os.path.exists(reference_dir_path): |
|
logger.warning(f"[{request_id}] β οΈ Reference directory not found: {reference_dir_path}") |
|
return jsonify({"error": f"Reference audio directory not found: {reference_locator}"}), 404 |
|
|
|
reference_files = glob.glob(os.path.join(reference_dir_path, "*.wav")) |
|
logger.info(f"[{request_id}] π Found {len(reference_files)} reference files") |
|
|
|
|
|
if not reference_files: |
|
logger.warning(f"[{request_id}] β οΈ No reference audio files found in {reference_dir_path}") |
|
|
|
|
|
try: |
|
dummy_file_path = os.path.join(reference_dir_path, "dummy_reference.wav") |
|
logger.info(f"[{request_id}] π Creating dummy reference file: {dummy_file_path}") |
|
|
|
|
|
silent_audio = AudioSegment.silent(duration=1000, frame_rate=sample_rate) |
|
silent_audio.export(dummy_file_path, format="wav") |
|
|
|
|
|
reference_files = [dummy_file_path] |
|
logger.info(f"[{request_id}] β
Created dummy reference file for testing") |
|
except Exception as e: |
|
logger.error(f"[{request_id}] β Failed to create dummy reference: {str(e)}") |
|
return jsonify({"error": f"No reference audio found for {reference_locator}"}), 404 |
|
|
|
lang_code = LANGUAGE_CODES.get(language, language) |
|
logger.info( |
|
f"[{request_id}] π Evaluating pronunciation for reference: {reference_locator} with language code: {lang_code}") |
|
|
|
|
|
temp_dir = os.path.join(output_dir, f"temp_{request_id}") |
|
os.makedirs(temp_dir, exist_ok=True) |
|
|
|
|
|
user_audio_path = os.path.join(temp_dir, "user_audio_input.wav") |
|
with open(user_audio_path, 'wb') as f: |
|
f.write(audio_file.read()) |
|
|
|
try: |
|
logger.info(f"[{request_id}] π Processing user audio file") |
|
audio = AudioSegment.from_file(user_audio_path) |
|
audio = audio.set_frame_rate(sample_rate).set_channels(1) |
|
|
|
processed_path = os.path.join(temp_dir, "processed_user_audio.wav") |
|
audio.export(processed_path, format="wav") |
|
|
|
user_waveform, sr = torchaudio.load(processed_path) |
|
user_waveform = user_waveform.squeeze().numpy() |
|
logger.info(f"[{request_id}] β
User audio processed: {sr}Hz, length: {len(user_waveform)} samples") |
|
|
|
user_audio_path = processed_path |
|
except Exception as e: |
|
logger.error(f"[{request_id}] β Audio processing failed: {str(e)}") |
|
return jsonify({"error": f"Audio processing failed: {str(e)}"}), 500 |
|
|
|
|
|
try: |
|
logger.info(f"[{request_id}] π Transcribing user audio") |
|
inputs = asr_processor( |
|
user_waveform, |
|
sampling_rate=sample_rate, |
|
return_tensors="pt", |
|
language=lang_code |
|
) |
|
inputs = {k: v.to(asr_model.device) for k, v in inputs.items()} |
|
|
|
with torch.no_grad(): |
|
logits = asr_model(**inputs).logits |
|
ids = torch.argmax(logits, dim=-1)[0] |
|
user_transcription = asr_processor.decode(ids) |
|
|
|
logger.info(f"[{request_id}] β
User transcription: '{user_transcription}'") |
|
except Exception as e: |
|
logger.error(f"[{request_id}] β ASR inference failed: {str(e)}") |
|
return jsonify({"error": f"ASR inference failed: {str(e)}"}), 500 |
|
|
|
|
|
batch_size = 2 |
|
results = [] |
|
best_score = 0 |
|
best_reference = None |
|
best_transcription = None |
|
|
|
|
|
max_files_to_check = min(5, len(reference_files)) |
|
reference_files = reference_files[:max_files_to_check] |
|
|
|
logger.info(f"[{request_id}] π Processing {len(reference_files)} reference files in batches of {batch_size}") |
|
|
|
|
|
def process_reference_file(ref_file): |
|
ref_filename = os.path.basename(ref_file) |
|
try: |
|
|
|
ref_waveform, ref_sr = torchaudio.load(ref_file) |
|
if ref_sr != sample_rate: |
|
ref_waveform = torchaudio.transforms.Resample(ref_sr, sample_rate)(ref_waveform) |
|
ref_waveform = ref_waveform.squeeze().numpy() |
|
|
|
|
|
inputs = asr_processor( |
|
ref_waveform, |
|
sampling_rate=sample_rate, |
|
return_tensors="pt", |
|
language=lang_code |
|
) |
|
inputs = {k: v.to(asr_model.device) for k, v in inputs.items()} |
|
|
|
|
|
with torch.no_grad(): |
|
logits = asr_model(**inputs).logits |
|
ids = torch.argmax(logits, dim=-1)[0] |
|
ref_transcription = asr_processor.decode(ids) |
|
|
|
|
|
similarity = calculate_similarity(user_transcription, ref_transcription) |
|
|
|
logger.info( |
|
f"[{request_id}] π Similarity with {ref_filename}: {similarity:.2f}%, transcription: '{ref_transcription}'") |
|
|
|
return { |
|
"reference_file": ref_filename, |
|
"reference_text": ref_transcription, |
|
"similarity_score": similarity |
|
} |
|
except Exception as e: |
|
logger.error(f"[{request_id}] β Error processing {ref_filename}: {str(e)}") |
|
return { |
|
"reference_file": ref_filename, |
|
"reference_text": "Error", |
|
"similarity_score": 0, |
|
"error": str(e) |
|
} |
|
|
|
|
|
with ThreadPoolExecutor(max_workers=batch_size) as executor: |
|
batch_results = list(executor.map(process_reference_file, reference_files)) |
|
results.extend(batch_results) |
|
|
|
|
|
for result in batch_results: |
|
if result["similarity_score"] > best_score: |
|
best_score = result["similarity_score"] |
|
best_reference = result["reference_file"] |
|
best_transcription = result["reference_text"] |
|
|
|
|
|
if best_score > 80.0: |
|
logger.info(f"[{request_id}] π Found excellent match: {best_score:.2f}%") |
|
break |
|
|
|
|
|
try: |
|
if temp_dir and os.path.exists(temp_dir): |
|
shutil.rmtree(temp_dir) |
|
logger.debug(f"[{request_id}] π§Ή Cleaned up temporary directory") |
|
except Exception as e: |
|
logger.warning(f"[{request_id}] β οΈ Failed to clean up temp files: {str(e)}") |
|
|
|
|
|
is_correct = best_score >= 70.0 |
|
|
|
if best_score >= 90.0: |
|
feedback = "Perfect pronunciation! Excellent job!" |
|
elif best_score >= 80.0: |
|
feedback = "Great pronunciation! Your accent is very good." |
|
elif best_score >= 70.0: |
|
feedback = "Good pronunciation. Keep practicing!" |
|
elif best_score >= 50.0: |
|
feedback = "Fair attempt. Try focusing on the syllables that differ from the sample." |
|
else: |
|
feedback = "Try again. Listen carefully to the sample pronunciation." |
|
|
|
logger.info(f"[{request_id}] π Final evaluation results: score={best_score:.2f}%, is_correct={is_correct}") |
|
logger.info(f"[{request_id}] π Feedback: '{feedback}'") |
|
logger.info(f"[{request_id}] β
Evaluation complete") |
|
|
|
|
|
results.sort(key=lambda x: x["similarity_score"], reverse=True) |
|
|
|
return jsonify({ |
|
"is_correct": is_correct, |
|
"score": best_score, |
|
"feedback": feedback, |
|
"user_transcription": user_transcription, |
|
"best_reference_transcription": best_transcription, |
|
"reference_locator": reference_locator, |
|
"details": results |
|
}) |
|
|
|
except Exception as e: |
|
logger.error(f"[{request_id}] β Unhandled exception in evaluation endpoint: {str(e)}") |
|
logger.debug(f"[{request_id}] Stack trace: {traceback.format_exc()}") |
|
|
|
|
|
try: |
|
if temp_dir and os.path.exists(temp_dir): |
|
shutil.rmtree(temp_dir) |
|
except: |
|
pass |
|
|
|
return jsonify({"error": f"Internal server error: {str(e)}"}), 500 |
|
|
|
def setup_reference_patterns(reference_dir): |
|
"""Create standard reference pattern directories if they don't exist""" |
|
reference_patterns = [ |
|
"mayap_a_abak", "mayap_a_ugtu", "mayap_a_gatpanapun", "mayap_a_bengi", |
|
"komusta_ka", "malaus_ko_pu", "malaus_kayu", "agaganaka_da_ka", |
|
"pagdulapan_da_ka", "kaluguran_da_ka", "dakal_a_salamat", "panapaya_mu_ku" |
|
] |
|
|
|
for pattern in reference_patterns: |
|
pattern_dir = os.path.join(reference_dir, pattern) |
|
if not os.path.exists(pattern_dir): |
|
try: |
|
os.makedirs(pattern_dir, exist_ok=True) |
|
logger.info(f"π Created reference pattern directory: {pattern_dir}") |
|
except Exception as e: |
|
logger.error(f"β Failed to create reference pattern directory {pattern_dir}: {str(e)}") |