|
|
|
|
|
import os
|
|
import glob
|
|
import logging
|
|
import traceback
|
|
import tempfile
|
|
import shutil
|
|
from difflib import SequenceMatcher
|
|
import torch
|
|
import torchaudio
|
|
from pydub import AudioSegment
|
|
from flask import jsonify
|
|
from werkzeug.utils import secure_filename
|
|
from concurrent.futures import ThreadPoolExecutor
|
|
|
|
|
|
from translator import asr_model, asr_processor, LANGUAGE_CODES
|
|
|
|
|
|
logger = logging.getLogger("speech_api")
|
|
|
|
def calculate_similarity(text1, text2):
|
|
"""Calculate text similarity percentage."""
|
|
def clean_text(text):
|
|
return text.lower()
|
|
|
|
clean1 = clean_text(text1)
|
|
clean2 = clean_text(text2)
|
|
|
|
matcher = SequenceMatcher(None, clean1, clean2)
|
|
return matcher.ratio() * 100
|
|
|
|
def init_reference_audio(reference_dir, output_dir):
|
|
try:
|
|
|
|
os.makedirs(output_dir, exist_ok=True)
|
|
logger.info(f"π Created output directory: {output_dir}")
|
|
|
|
|
|
if os.path.exists(reference_dir):
|
|
logger.info(f"β
Found reference audio directory: {reference_dir}")
|
|
|
|
|
|
pattern_dirs = [d for d in os.listdir(reference_dir)
|
|
if os.path.isdir(os.path.join(reference_dir, d))]
|
|
logger.info(f"π Found reference patterns: {pattern_dirs}")
|
|
|
|
|
|
for pattern_dir_name in pattern_dirs:
|
|
pattern_path = os.path.join(reference_dir, pattern_dir_name)
|
|
wav_files = glob.glob(os.path.join(pattern_path, "*.wav"))
|
|
logger.info(f"π Found {len(wav_files)} wav files in {pattern_dir_name}")
|
|
else:
|
|
logger.warning(f"β οΈ Reference audio directory not found: {reference_dir}")
|
|
|
|
os.makedirs(reference_dir, exist_ok=True)
|
|
logger.info(f"π Created reference audio directory: {reference_dir}")
|
|
except Exception as e:
|
|
logger.error(f"β Failed to set up reference audio directory: {str(e)}")
|
|
|
|
def handle_upload_reference(request, reference_dir, sample_rate):
|
|
"""Handle upload of reference audio files"""
|
|
try:
|
|
if "audio" not in request.files:
|
|
logger.warning("β οΈ Reference upload missing audio file")
|
|
return jsonify({"error": "No audio file uploaded"}), 400
|
|
|
|
reference_word = request.form.get("reference_word", "").strip()
|
|
if not reference_word:
|
|
logger.warning("β οΈ Reference upload missing reference word")
|
|
return jsonify({"error": "No reference word provided"}), 400
|
|
|
|
|
|
reference_patterns = [
|
|
"mayap_a_abak", "mayap_a_ugtu", "mayap_a_gatpanapun", "mayap_a_bengi",
|
|
"komusta_ka", "malaus_ko_pu", "malaus_kayu", "agaganaka_da_ka",
|
|
"pagdulapan_da_ka", "kaluguran_da_ka", "dakal_a_salamat", "panapaya_mu_ku"
|
|
]
|
|
|
|
if reference_word not in reference_patterns:
|
|
logger.warning(f"β οΈ Invalid reference word: {reference_word}")
|
|
return jsonify({"error": f"Invalid reference word. Available: {reference_patterns}"}), 400
|
|
|
|
|
|
pattern_dir = os.path.join(reference_dir, reference_word)
|
|
os.makedirs(pattern_dir, exist_ok=True)
|
|
|
|
|
|
audio_file = request.files["audio"]
|
|
file_path = os.path.join(pattern_dir, secure_filename(audio_file.filename))
|
|
audio_file.save(file_path)
|
|
|
|
|
|
if not file_path.lower().endswith('.wav'):
|
|
base_path = os.path.splitext(file_path)[0]
|
|
wav_path = f"{base_path}.wav"
|
|
try:
|
|
audio = AudioSegment.from_file(file_path)
|
|
audio = audio.set_frame_rate(sample_rate).set_channels(1)
|
|
audio.export(wav_path, format="wav")
|
|
|
|
os.unlink(file_path)
|
|
file_path = wav_path
|
|
except Exception as e:
|
|
logger.error(f"β Reference audio conversion failed: {str(e)}")
|
|
return jsonify({"error": f"Audio conversion failed: {str(e)}"}), 500
|
|
|
|
logger.info(f"β
Reference audio saved successfully for {reference_word}: {file_path}")
|
|
|
|
|
|
references = glob.glob(os.path.join(pattern_dir, "*.wav"))
|
|
return jsonify({
|
|
"message": "Reference audio uploaded successfully",
|
|
"reference_word": reference_word,
|
|
"file": os.path.basename(file_path),
|
|
"total_references": len(references)
|
|
})
|
|
|
|
except Exception as e:
|
|
logger.error(f"β Unhandled exception in reference upload: {str(e)}")
|
|
logger.debug(f"Stack trace: {traceback.format_exc()}")
|
|
return jsonify({"error": f"Internal server error: {str(e)}"}), 500
|
|
|
|
|
|
def handle_evaluation_request(request, reference_dir, output_dir, sample_rate):
|
|
"""Handle pronunciation evaluation requests"""
|
|
request_id = f"req-{id(request)}"
|
|
logger.info(f"[{request_id}] π Starting new pronunciation evaluation request")
|
|
|
|
temp_dir = None
|
|
|
|
if asr_model is None or asr_processor is None:
|
|
logger.error(f"[{request_id}] β Evaluation endpoint called but ASR models aren't loaded")
|
|
return jsonify({"error": "ASR model not available"}), 503
|
|
|
|
try:
|
|
if "audio" not in request.files:
|
|
logger.warning(f"[{request_id}] β οΈ Evaluation request missing audio file")
|
|
return jsonify({"error": "No audio file uploaded"}), 400
|
|
|
|
audio_file = request.files["audio"]
|
|
reference_locator = request.form.get("reference_locator", "").strip()
|
|
language = request.form.get("language", "kapampangan").lower()
|
|
|
|
|
|
if not reference_locator:
|
|
logger.warning(f"[{request_id}] β οΈ No reference locator provided")
|
|
return jsonify({"error": "Reference locator is required"}), 400
|
|
|
|
|
|
reference_dir_path = os.path.join(reference_dir, reference_locator)
|
|
logger.info(f"[{request_id}] π Reference directory path: {reference_dir_path}")
|
|
|
|
if not os.path.exists(reference_dir_path):
|
|
logger.warning(f"[{request_id}] β οΈ Reference directory not found: {reference_dir_path}")
|
|
return jsonify({"error": f"Reference audio directory not found: {reference_locator}"}), 404
|
|
|
|
reference_files = glob.glob(os.path.join(reference_dir_path, "*.wav"))
|
|
logger.info(f"[{request_id}] π Found {len(reference_files)} reference files")
|
|
|
|
if not reference_files:
|
|
logger.warning(f"[{request_id}] β οΈ No reference audio files found in {reference_dir_path}")
|
|
return jsonify({"error": f"No reference audio found for {reference_locator}"}), 404
|
|
|
|
lang_code = LANGUAGE_CODES.get(language, language)
|
|
logger.info(
|
|
f"[{request_id}] π Evaluating pronunciation for reference: {reference_locator} with language code: {lang_code}")
|
|
|
|
|
|
temp_dir = os.path.join(output_dir, f"temp_{request_id}")
|
|
os.makedirs(temp_dir, exist_ok=True)
|
|
|
|
|
|
user_audio_path = os.path.join(temp_dir, "user_audio_input.wav")
|
|
with open(user_audio_path, 'wb') as f:
|
|
f.write(audio_file.read())
|
|
|
|
try:
|
|
logger.info(f"[{request_id}] π Processing user audio file")
|
|
audio = AudioSegment.from_file(user_audio_path)
|
|
audio = audio.set_frame_rate(sample_rate).set_channels(1)
|
|
|
|
processed_path = os.path.join(temp_dir, "processed_user_audio.wav")
|
|
audio.export(processed_path, format="wav")
|
|
|
|
user_waveform, sr = torchaudio.load(processed_path)
|
|
user_waveform = user_waveform.squeeze().numpy()
|
|
logger.info(f"[{request_id}] β
User audio processed: {sr}Hz, length: {len(user_waveform)} samples")
|
|
|
|
user_audio_path = processed_path
|
|
except Exception as e:
|
|
logger.error(f"[{request_id}] β Audio processing failed: {str(e)}")
|
|
return jsonify({"error": f"Audio processing failed: {str(e)}"}), 500
|
|
|
|
|
|
try:
|
|
logger.info(f"[{request_id}] π Transcribing user audio")
|
|
inputs = asr_processor(
|
|
user_waveform,
|
|
sampling_rate=sample_rate,
|
|
return_tensors="pt",
|
|
language=lang_code
|
|
)
|
|
inputs = {k: v.to(asr_model.device) for k, v in inputs.items()}
|
|
|
|
with torch.no_grad():
|
|
logits = asr_model(**inputs).logits
|
|
ids = torch.argmax(logits, dim=-1)[0]
|
|
user_transcription = asr_processor.decode(ids)
|
|
|
|
logger.info(f"[{request_id}] β
User transcription: '{user_transcription}'")
|
|
except Exception as e:
|
|
logger.error(f"[{request_id}] β ASR inference failed: {str(e)}")
|
|
return jsonify({"error": f"ASR inference failed: {str(e)}"}), 500
|
|
|
|
|
|
batch_size = 2
|
|
results = []
|
|
best_score = 0
|
|
best_reference = None
|
|
best_transcription = None
|
|
|
|
|
|
max_files_to_check = min(5, len(reference_files))
|
|
reference_files = reference_files[:max_files_to_check]
|
|
|
|
logger.info(f"[{request_id}] π Processing {len(reference_files)} reference files in batches of {batch_size}")
|
|
|
|
|
|
def process_reference_file(ref_file):
|
|
ref_filename = os.path.basename(ref_file)
|
|
try:
|
|
|
|
ref_waveform, ref_sr = torchaudio.load(ref_file)
|
|
if ref_sr != sample_rate:
|
|
ref_waveform = torchaudio.transforms.Resample(ref_sr, sample_rate)(ref_waveform)
|
|
ref_waveform = ref_waveform.squeeze().numpy()
|
|
|
|
|
|
inputs = asr_processor(
|
|
ref_waveform,
|
|
sampling_rate=sample_rate,
|
|
return_tensors="pt",
|
|
language=lang_code
|
|
)
|
|
inputs = {k: v.to(asr_model.device) for k, v in inputs.items()}
|
|
|
|
with torch.no_grad():
|
|
logits = asr_model(**inputs).logits
|
|
ids = torch.argmax(logits, dim=-1)[0]
|
|
ref_transcription = asr_processor.decode(ids)
|
|
|
|
|
|
similarity = calculate_similarity(user_transcription, ref_transcription)
|
|
|
|
logger.info(
|
|
f"[{request_id}] π Similarity with {ref_filename}: {similarity:.2f}%, transcription: '{ref_transcription}'")
|
|
|
|
return {
|
|
"reference_file": ref_filename,
|
|
"reference_text": ref_transcription,
|
|
"similarity_score": similarity
|
|
}
|
|
except Exception as e:
|
|
logger.error(f"[{request_id}] β Error processing {ref_filename}: {str(e)}")
|
|
return {
|
|
"reference_file": ref_filename,
|
|
"reference_text": "Error",
|
|
"similarity_score": 0,
|
|
"error": str(e)
|
|
}
|
|
|
|
|
|
with ThreadPoolExecutor(max_workers=batch_size) as executor:
|
|
batch_results = list(executor.map(process_reference_file, reference_files))
|
|
results.extend(batch_results)
|
|
|
|
|
|
for result in batch_results:
|
|
if result["similarity_score"] > best_score:
|
|
best_score = result["similarity_score"]
|
|
best_reference = result["reference_file"]
|
|
best_transcription = result["reference_text"]
|
|
|
|
|
|
if best_score > 80.0:
|
|
logger.info(f"[{request_id}] π Found excellent match: {best_score:.2f}%")
|
|
break
|
|
|
|
|
|
try:
|
|
if temp_dir and os.path.exists(temp_dir):
|
|
shutil.rmtree(temp_dir)
|
|
logger.debug(f"[{request_id}] π§Ή Cleaned up temporary directory")
|
|
except Exception as e:
|
|
logger.warning(f"[{request_id}] β οΈ Failed to clean up temp files: {str(e)}")
|
|
|
|
|
|
is_correct = best_score >= 70.0
|
|
|
|
if best_score >= 90.0:
|
|
feedback = "Perfect pronunciation! Excellent job!"
|
|
elif best_score >= 80.0:
|
|
feedback = "Great pronunciation! Your accent is very good."
|
|
elif best_score >= 70.0:
|
|
feedback = "Good pronunciation. Keep practicing!"
|
|
elif best_score >= 50.0:
|
|
feedback = "Fair attempt. Try focusing on the syllables that differ from the sample."
|
|
else:
|
|
feedback = "Try again. Listen carefully to the sample pronunciation."
|
|
|
|
logger.info(f"[{request_id}] π Final evaluation results: score={best_score:.2f}%, is_correct={is_correct}")
|
|
logger.info(f"[{request_id}] π Feedback: '{feedback}'")
|
|
logger.info(f"[{request_id}] β
Evaluation complete")
|
|
|
|
|
|
results.sort(key=lambda x: x["similarity_score"], reverse=True)
|
|
|
|
return jsonify({
|
|
"is_correct": is_correct,
|
|
"score": best_score,
|
|
"feedback": feedback,
|
|
"user_transcription": user_transcription,
|
|
"best_reference_transcription": best_transcription,
|
|
"reference_locator": reference_locator,
|
|
"details": results
|
|
})
|
|
|
|
except Exception as e:
|
|
logger.error(f"[{request_id}] β Unhandled exception in evaluation endpoint: {str(e)}")
|
|
logger.debug(f"[{request_id}] Stack trace: {traceback.format_exc()}")
|
|
|
|
|
|
try:
|
|
if temp_dir and os.path.exists(temp_dir):
|
|
shutil.rmtree(temp_dir)
|
|
except:
|
|
pass
|
|
|
|
return jsonify({"error": f"Internal server error: {str(e)}"}), 500 |