# Set cache directories first, before other imports import os import sys import logging import traceback # Configure logging logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S' ) logger = logging.getLogger("speech_api") # Set all cache directories to locations within /tmp cache_dirs = { "HF_HOME": "/tmp/hf_home", "TRANSFORMERS_CACHE": "/tmp/transformers_cache", "HUGGINGFACE_HUB_CACHE": "/tmp/huggingface_hub_cache", "TORCH_HOME": "/tmp/torch_home", "XDG_CACHE_HOME": "/tmp/xdg_cache" } # Set environment variables and create directories for env_var, path in cache_dirs.items(): os.environ[env_var] = path try: os.makedirs(path, exist_ok=True) logger.info(f"๐Ÿ“ Created cache directory: {path}") except Exception as e: logger.error(f"โŒ Failed to create directory {path}: {str(e)}") # Now import the rest of the libraries try: import librosa from difflib import SequenceMatcher import glob import numpy as np import torch from pydub import AudioSegment import tempfile import torchaudio import soundfile as sf from flask import Flask, request, jsonify, send_file, g from flask_cors import CORS from transformers import Wav2Vec2ForCTC, AutoProcessor, VitsModel, AutoTokenizer from transformers import MarianMTModel, MarianTokenizer from werkzeug.utils import secure_filename logger.info("โœ… All required libraries imported successfully") except ImportError as e: logger.critical(f"โŒ Failed to import necessary libraries: {str(e)}") sys.exit(1) # Check CUDA availability if torch.cuda.is_available(): logger.info(f"๐Ÿš€ CUDA available: {torch.cuda.get_device_name(0)}") device = "cuda" else: logger.info("โš ๏ธ CUDA not available, using CPU") device = "cpu" app = Flask(__name__) CORS(app) # ASR Model ASR_MODEL_ID = "Coco-18/mms-asr-tgl-en-safetensor" logger.info(f"๐Ÿ”„ Loading ASR model: {ASR_MODEL_ID}") asr_processor = None asr_model = None try: asr_processor = AutoProcessor.from_pretrained( ASR_MODEL_ID, cache_dir=cache_dirs["TRANSFORMERS_CACHE"] ) logger.info("โœ… ASR processor loaded successfully") asr_model = Wav2Vec2ForCTC.from_pretrained( ASR_MODEL_ID, cache_dir=cache_dirs["TRANSFORMERS_CACHE"] ) asr_model.to(device) logger.info(f"โœ… ASR model loaded successfully on {device}") except Exception as e: logger.error(f"โŒ Error loading ASR model: {str(e)}") logger.debug(f"Stack trace: {traceback.format_exc()}") logger.debug(f"Python version: {sys.version}") logger.debug(f"Current working directory: {os.getcwd()}") logger.debug(f"Temp directory exists: {os.path.exists('/tmp')}") logger.debug(f"Temp directory writeable: {os.access('/tmp', os.W_OK)}") # Language-specific configurations LANGUAGE_CODES = { "kapampangan": "pam", "filipino": "fil", # Replaced tagalog with filipino "english": "eng", "tagalog": "tgl", } # TTS Models (Kapampangan, Tagalog, English) TTS_MODELS = { "kapampangan": "facebook/mms-tts-pam", "tagalog": "facebook/mms-tts-tgl", "english": "facebook/mms-tts-eng" } tts_models = {} tts_processors = {} for lang, model_id in TTS_MODELS.items(): logger.info(f"๐Ÿ”„ Loading TTS model for {lang}: {model_id}") try: tts_processors[lang] = AutoTokenizer.from_pretrained( model_id, cache_dir=cache_dirs["TRANSFORMERS_CACHE"] ) logger.info(f"โœ… {lang} TTS processor loaded") tts_models[lang] = VitsModel.from_pretrained( model_id, cache_dir=cache_dirs["TRANSFORMERS_CACHE"] ) tts_models[lang].to(device) logger.info(f"โœ… {lang} TTS model loaded on {device}") except Exception as e: logger.error(f"โŒ Failed to load {lang} TTS model: {str(e)}") logger.debug(f"Stack trace: {traceback.format_exc()}") tts_models[lang] = None # Replace the single translation model with a dictionary of models TRANSLATION_MODELS = { "pam-eng": "Coco-18/opus-mt-pam-en", "eng-pam": "Coco-18/opus-mt-en-pam", "tgl-eng": "Helsinki-NLP/opus-mt-tl-en", "eng-tgl": "Helsinki-NLP/opus-mt-en-tl", "phi": "Coco-18/opus-mt-phi" } logger.info(f"๐Ÿ”„ Loading Translation model: {TRANSLATION_MODELS}") # Initialize translation models and tokenizers translation_models = {} translation_tokenizers = {} for model_key, model_id in TRANSLATION_MODELS.items(): logger.info(f"๐Ÿ”„ Loading Translation model: {model_id}") try: translation_tokenizers[model_key] = MarianTokenizer.from_pretrained( model_id, cache_dir=cache_dirs["TRANSFORMERS_CACHE"] ) logger.info(f"โœ… Translation tokenizer loaded successfully for {model_key}") translation_models[model_key] = MarianMTModel.from_pretrained( model_id, cache_dir=cache_dirs["TRANSFORMERS_CACHE"] ) translation_models[model_key].to(device) logger.info(f"โœ… Translation model loaded successfully on {device} for {model_key}") except Exception as e: logger.error(f"โŒ Error loading Translation model for {model_key}: {str(e)}") logger.debug(f"Stack trace: {traceback.format_exc()}") translation_models[model_key] = None translation_tokenizers[model_key] = None # Constants SAMPLE_RATE = 16000 OUTPUT_DIR = "/tmp/audio_outputs" REFERENCE_AUDIO_DIR = "./reference_audios" try: os.makedirs(OUTPUT_DIR, exist_ok=True) logger.info(f"๐Ÿ“ Created output directory: {OUTPUT_DIR}") except Exception as e: logger.error(f"โŒ Failed to create output directory: {str(e)}") @app.route("/", methods=["GET"]) def home(): return jsonify({"message": "Speech API is running", "status": "active"}) @app.route("/health", methods=["GET"]) def health_check(): # Initialize direct language pair statuses based on loaded models translation_status = {} # Add status for direct model pairs for lang_pair in ["pam-eng", "eng-pam", "tgl-eng", "eng-tgl"]: translation_status[lang_pair] = "loaded" if lang_pair in translation_models and translation_models[ lang_pair] is not None else "failed" # Add special phi model status phi_status = "loaded" if "phi" in translation_models and translation_models["phi"] is not None else "failed" translation_status["pam-fil"] = phi_status translation_status["fil-pam"] = phi_status translation_status["pam-tgl"] = phi_status # Using phi model but replacing tgl with fil translation_status["tgl-pam"] = phi_status # Using phi model but replacing tgl with fil health_status = { "api_status": "online", "asr_model": "loaded" if asr_model is not None else "failed", "tts_models": {lang: "loaded" if model is not None else "failed" for lang, model in tts_models.items()}, "translation_models": translation_status, "device": device } return jsonify(health_status) @app.route("/check_references", methods=["GET"]) def check_references(): """Endpoint to check if reference files exist and are accessible""" ref_patterns = ["mayap_a_abak", "mayap_a_ugtu", "mayap_a_gatpanapun", "mayap_a_bengi", "komusta_ka"] results = {} for pattern in ref_patterns: pattern_dir = os.path.join(REFERENCE_AUDIO_DIR, pattern) if os.path.exists(pattern_dir): wav_files = glob.glob(os.path.join(pattern_dir, "*.wav")) results[pattern] = { "exists": True, "path": pattern_dir, "file_count": len(wav_files), "files": [os.path.basename(f) for f in wav_files] } else: results[pattern] = { "exists": False, "path": pattern_dir } return jsonify({ "reference_audio_dir": REFERENCE_AUDIO_DIR, "directory_exists": os.path.exists(REFERENCE_AUDIO_DIR), "patterns": results }) @app.route("/asr", methods=["POST"]) def transcribe_audio(): if asr_model is None or asr_processor is None: logger.error("โŒ ASR endpoint called but models aren't loaded") return jsonify({"error": "ASR model not available"}), 503 try: if "audio" not in request.files: logger.warning("โš ๏ธ ASR request missing audio file") return jsonify({"error": "No audio file uploaded"}), 400 audio_file = request.files["audio"] language = request.form.get("language", "english").lower() if language not in LANGUAGE_CODES: logger.warning(f"โš ๏ธ Unsupported language requested: {language}") return jsonify( {"error": f"Unsupported language: {language}. Available: {list(LANGUAGE_CODES.keys())}"}), 400 lang_code = LANGUAGE_CODES[language] logger.info(f"๐Ÿ”„ Processing {language} audio for ASR") # Save the uploaded file temporarily with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(audio_file.filename)[-1]) as temp_audio: temp_audio.write(audio_file.read()) temp_audio_path = temp_audio.name logger.debug(f"๐Ÿ“ Temporary audio saved to {temp_audio_path}") # Convert to WAV if necessary wav_path = temp_audio_path if not audio_file.filename.lower().endswith(".wav"): wav_path = os.path.join(OUTPUT_DIR, "converted_audio.wav") logger.info(f"๐Ÿ”„ Converting audio to WAV format: {wav_path}") try: audio = AudioSegment.from_file(temp_audio_path) audio = audio.set_frame_rate(SAMPLE_RATE).set_channels(1) audio.export(wav_path, format="wav") except Exception as e: logger.error(f"โŒ Audio conversion failed: {str(e)}") return jsonify({"error": f"Audio conversion failed: {str(e)}"}), 500 # Load and process the WAV file try: waveform, sr = torchaudio.load(wav_path) logger.debug(f"โœ… Audio loaded: {wav_path} (Sample rate: {sr}Hz)") # Resample if needed if sr != SAMPLE_RATE: logger.info(f"๐Ÿ”„ Resampling audio from {sr}Hz to {SAMPLE_RATE}Hz") waveform = torchaudio.transforms.Resample(sr, SAMPLE_RATE)(waveform) waveform = waveform / torch.max(torch.abs(waveform)) except Exception as e: logger.error(f"โŒ Failed to load or process audio: {str(e)}") return jsonify({"error": f"Audio processing failed: {str(e)}"}), 500 # Process audio for ASR try: inputs = asr_processor( waveform.squeeze().numpy(), sampling_rate=SAMPLE_RATE, return_tensors="pt", language=lang_code ) inputs = {k: v.to(device) for k, v in inputs.items()} except Exception as e: logger.error(f"โŒ ASR preprocessing failed: {str(e)}") return jsonify({"error": f"ASR preprocessing failed: {str(e)}"}), 500 # Perform ASR try: with torch.no_grad(): logits = asr_model(**inputs).logits ids = torch.argmax(logits, dim=-1)[0] transcription = asr_processor.decode(ids) logger.info(f"โœ… Transcription ({language}): {transcription}") # Clean up temp files try: os.unlink(temp_audio_path) if wav_path != temp_audio_path: os.unlink(wav_path) except Exception as e: logger.warning(f"โš ๏ธ Failed to clean up temp files: {str(e)}") return jsonify({ "transcription": transcription, "language": language, "language_code": lang_code }) except Exception as e: logger.error(f"โŒ ASR inference failed: {str(e)}") logger.debug(f"Stack trace: {traceback.format_exc()}") return jsonify({"error": f"ASR inference failed: {str(e)}"}), 500 except Exception as e: logger.error(f"โŒ Unhandled exception in ASR endpoint: {str(e)}") logger.debug(f"Stack trace: {traceback.format_exc()}") return jsonify({"error": f"Internal server error: {str(e)}"}), 500 @app.route("/tts", methods=["POST"]) def generate_tts(): try: data = request.get_json() if not data: logger.warning("โš ๏ธ TTS endpoint called with no JSON data") return jsonify({"error": "No JSON data provided"}), 400 text_input = data.get("text", "").strip() language = data.get("language", "kapampangan").lower() if not text_input: logger.warning("โš ๏ธ TTS request with empty text") return jsonify({"error": "No text provided"}), 400 if language not in TTS_MODELS: logger.warning(f"โš ๏ธ TTS requested for unsupported language: {language}") return jsonify({"error": f"Invalid language. Available options: {list(TTS_MODELS.keys())}"}), 400 if tts_models[language] is None: logger.error(f"โŒ TTS model for {language} not loaded") return jsonify({"error": f"TTS model for {language} not available"}), 503 logger.info(f"๐Ÿ”„ Generating TTS for language: {language}, text: '{text_input}'") try: processor = tts_processors[language] model = tts_models[language] inputs = processor(text_input, return_tensors="pt") inputs = {k: v.to(device) for k, v in inputs.items()} except Exception as e: logger.error(f"โŒ TTS preprocessing failed: {str(e)}") return jsonify({"error": f"TTS preprocessing failed: {str(e)}"}), 500 # Generate speech try: with torch.no_grad(): output = model(**inputs).waveform waveform = output.squeeze().cpu().numpy() except Exception as e: logger.error(f"โŒ TTS inference failed: {str(e)}") logger.debug(f"Stack trace: {traceback.format_exc()}") return jsonify({"error": f"TTS inference failed: {str(e)}"}), 500 # Save to file try: output_filename = os.path.join(OUTPUT_DIR, f"{language}_output.wav") sampling_rate = model.config.sampling_rate sf.write(output_filename, waveform, sampling_rate) logger.info(f"โœ… Speech generated! File saved: {output_filename}") except Exception as e: logger.error(f"โŒ Failed to save audio file: {str(e)}") return jsonify({"error": f"Failed to save audio file: {str(e)}"}), 500 return jsonify({ "message": "TTS audio generated", "file_url": f"/download/{os.path.basename(output_filename)}", "language": language, "text_length": len(text_input) }) except Exception as e: logger.error(f"โŒ Unhandled exception in TTS endpoint: {str(e)}") logger.debug(f"Stack trace: {traceback.format_exc()}") return jsonify({"error": f"Internal server error: {str(e)}"}), 500 @app.route("/download/", methods=["GET"]) def download_audio(filename): file_path = os.path.join(OUTPUT_DIR, filename) if os.path.exists(file_path): logger.info(f"๐Ÿ“ค Serving audio file: {file_path}") return send_file(file_path, mimetype="audio/wav", as_attachment=True) logger.warning(f"โš ๏ธ Requested file not found: {file_path}") return jsonify({"error": "File not found"}), 404 @app.route("/translate", methods=["POST"]) def translate_text(): try: data = request.get_json() if not data: logger.warning("โš ๏ธ Translation endpoint called with no JSON data") return jsonify({"error": "No JSON data provided"}), 400 source_text = data.get("text", "").strip() source_language = data.get("source_language", "").lower() target_language = data.get("target_language", "").lower() if not source_text: logger.warning("โš ๏ธ Translation request with empty text") return jsonify({"error": "No text provided"}), 400 # Map language names to codes source_code = LANGUAGE_CODES.get(source_language, source_language) target_code = LANGUAGE_CODES.get(target_language, target_language) logger.info(f"๐Ÿ”„ Translating from {source_language} to {target_language}: '{source_text}'") # Special handling for pam-fil, fil-pam, pam-tgl and tgl-pam using the phi model use_phi_model = False actual_source_code = source_code actual_target_code = target_code # Check if we need to use the phi model with fil replacement if (source_code == "pam" and target_code == "fil") or (source_code == "fil" and target_code == "pam"): use_phi_model = True elif (source_code == "pam" and target_code == "tgl"): use_phi_model = True actual_target_code = "fil" # Replace tgl with fil for the phi model elif (source_code == "tgl" and target_code == "pam"): use_phi_model = True actual_source_code = "fil" # Replace tgl with fil for the phi model if use_phi_model: model_key = "phi" # Check if we have the phi model if model_key not in translation_models or translation_models[model_key] is None: logger.error(f"โŒ Translation model for {model_key} not loaded") return jsonify({"error": f"Translation model not available"}), 503 try: # Get the phi model and tokenizer model = translation_models[model_key] tokenizer = translation_tokenizers[model_key] # Prepend target language token to input input_text = f">>{actual_target_code}<< {source_text}" logger.info(f"๐Ÿ”„ Using phi model with input: '{input_text}'") # Tokenize the text tokenized = tokenizer(input_text, return_tensors="pt", padding=True) tokenized = {k: v.to(device) for k, v in tokenized.items()} # Generate translation with torch.no_grad(): translated = model.generate(**tokenized) # Decode the translation result = tokenizer.decode(translated[0], skip_special_tokens=True) logger.info(f"โœ… Translation result: '{result}'") return jsonify({ "translated_text": result, "source_language": source_language, "target_language": target_language }) except Exception as e: logger.error(f"โŒ Translation processing failed: {str(e)}") logger.debug(f"Stack trace: {traceback.format_exc()}") return jsonify({"error": f"Translation processing failed: {str(e)}"}), 500 else: # Create the regular language pair key for other language pairs lang_pair = f"{source_code}-{target_code}" # Check if we have a model for this language pair if lang_pair not in translation_models: logger.warning(f"โš ๏ธ No translation model available for {lang_pair}") return jsonify( {"error": f"Translation from {source_language} to {target_language} is not supported yet"}), 400 if translation_models[lang_pair] is None or translation_tokenizers[lang_pair] is None: logger.error(f"โŒ Translation model for {lang_pair} not loaded") return jsonify({"error": f"Translation model not available"}), 503 try: # Regular translation process for other language pairs model = translation_models[lang_pair] tokenizer = translation_tokenizers[lang_pair] # Tokenize the text tokenized = tokenizer(source_text, return_tensors="pt", padding=True) tokenized = {k: v.to(device) for k, v in tokenized.items()} # Generate translation with torch.no_grad(): translated = model.generate(**tokenized) # Decode the translation result = tokenizer.decode(translated[0], skip_special_tokens=True) logger.info(f"โœ… Translation result: '{result}'") return jsonify({ "translated_text": result, "source_language": source_language, "target_language": target_language }) except Exception as e: logger.error(f"โŒ Translation processing failed: {str(e)}") logger.debug(f"Stack trace: {traceback.format_exc()}") return jsonify({"error": f"Translation processing failed: {str(e)}"}), 500 except Exception as e: logger.error(f"โŒ Unhandled exception in translation endpoint: {str(e)}") logger.debug(f"Stack trace: {traceback.format_exc()}") return jsonify({"error": f"Internal server error: {str(e)}"}), 500 # Add this function to your app.py def calculate_similarity(text1, text2): """Calculate text similarity percentage.""" def clean_text(text): return text.lower() clean1 = clean_text(text1) clean2 = clean_text(text2) matcher = SequenceMatcher(None, clean1, clean2) return matcher.ratio() * 100 @app.route("/evaluate", methods=["POST"]) def evaluate_pronunciation(): request_id = f"req-{id(request)}" # Create unique ID for this request logger.info(f"[{request_id}] ๐Ÿ†• Starting new pronunciation evaluation request") if asr_model is None or asr_processor is None: logger.error(f"[{request_id}] โŒ Evaluation endpoint called but ASR models aren't loaded") return jsonify({"error": "ASR model not available"}), 503 try: if "audio" not in request.files: logger.warning(f"[{request_id}] โš ๏ธ Evaluation request missing audio file") return jsonify({"error": "No audio file uploaded"}), 400 audio_file = request.files["audio"] reference_locator = request.form.get("reference_locator", "").strip() language = request.form.get("language", "kapampangan").lower() # Validate reference locator if not reference_locator: logger.warning(f"[{request_id}] โš ๏ธ No reference locator provided") return jsonify({"error": "Reference locator is required"}), 400 # Construct full reference directory path reference_dir = os.path.join(REFERENCE_AUDIO_DIR, reference_locator) logger.info(f"[{request_id}] ๐Ÿ“ Reference directory path: {reference_dir}") if not os.path.exists(reference_dir): logger.warning(f"[{request_id}] โš ๏ธ Reference directory not found: {reference_dir}") return jsonify({"error": f"Reference audio directory not found: {reference_locator}"}), 404 reference_files = glob.glob(os.path.join(reference_dir, "*.wav")) logger.info(f"[{request_id}] ๐Ÿ“ Found {len(reference_files)} reference files") if not reference_files: logger.warning(f"[{request_id}] โš ๏ธ No reference audio files found in {reference_dir}") return jsonify({"error": f"No reference audio found for {reference_locator}"}), 404 lang_code = LANGUAGE_CODES.get(language, language) logger.info(f"[{request_id}] ๐Ÿ”„ Evaluating pronunciation for reference: {reference_locator} with language code: {lang_code}") # Create a request-specific temp directory to avoid conflicts temp_dir = os.path.join(OUTPUT_DIR, f"temp_{request_id}") os.makedirs(temp_dir, exist_ok=True) # Process user audio user_audio_path = os.path.join(temp_dir, "user_audio_input.wav") with open(user_audio_path, 'wb') as f: f.write(audio_file.read()) try: logger.info(f"[{request_id}] ๐Ÿ”„ Processing user audio file") audio = AudioSegment.from_file(user_audio_path) audio = audio.set_frame_rate(SAMPLE_RATE).set_channels(1) processed_path = os.path.join(temp_dir, "processed_user_audio.wav") audio.export(processed_path, format="wav") user_waveform, sr = torchaudio.load(processed_path) user_waveform = user_waveform.squeeze().numpy() logger.info(f"[{request_id}] โœ… User audio processed: {sr}Hz, length: {len(user_waveform)} samples") user_audio_path = processed_path except Exception as e: logger.error(f"[{request_id}] โŒ Audio processing failed: {str(e)}") return jsonify({"error": f"Audio processing failed: {str(e)}"}), 500 # Transcribe user audio try: logger.info(f"[{request_id}] ๐Ÿ”„ Transcribing user audio") inputs = asr_processor( user_waveform, sampling_rate=SAMPLE_RATE, return_tensors="pt", language=lang_code ) inputs = {k: v.to(device) for k, v in inputs.items()} with torch.no_grad(): logits = asr_model(**inputs).logits ids = torch.argmax(logits, dim=-1)[0] user_transcription = asr_processor.decode(ids) logger.info(f"[{request_id}] โœ… User transcription: '{user_transcription}'") except Exception as e: logger.error(f"[{request_id}] โŒ ASR inference failed: {str(e)}") return jsonify({"error": f"ASR inference failed: {str(e)}"}), 500 # Process reference files in batches batch_size = 2 # Process 2 files at a time - adjust based on your hardware results = [] best_score = 0 best_reference = None best_transcription = None # Use this if you want to limit the number of files to process max_files_to_check = min(5, len(reference_files)) # Check at most 5 files reference_files = reference_files[:max_files_to_check] logger.info(f"[{request_id}] ๐Ÿ”„ Processing {len(reference_files)} reference files in batches of {batch_size}") # Function to process a single reference file def process_reference_file(ref_file): ref_filename = os.path.basename(ref_file) try: # Load and resample reference audio ref_waveform, ref_sr = torchaudio.load(ref_file) if ref_sr != SAMPLE_RATE: ref_waveform = torchaudio.transforms.Resample(ref_sr, SAMPLE_RATE)(ref_waveform) ref_waveform = ref_waveform.squeeze().numpy() # Transcribe reference audio inputs = asr_processor( ref_waveform, sampling_rate=SAMPLE_RATE, return_tensors="pt", language=lang_code ) inputs = {k: v.to(device) for k, v in inputs.items()} with torch.no_grad(): logits = asr_model(**inputs).logits ids = torch.argmax(logits, dim=-1)[0] ref_transcription = asr_processor.decode(ids) # Calculate similarity similarity = calculate_similarity(user_transcription, ref_transcription) logger.info(f"[{request_id}] ๐Ÿ“Š Similarity with {ref_filename}: {similarity:.2f}%, transcription: '{ref_transcription}'") return { "reference_file": ref_filename, "reference_text": ref_transcription, "similarity_score": similarity } except Exception as e: logger.error(f"[{request_id}] โŒ Error processing {ref_filename}: {str(e)}") return { "reference_file": ref_filename, "reference_text": "Error", "similarity_score": 0, "error": str(e) } # Process files in batches using ThreadPoolExecutor from concurrent.futures import ThreadPoolExecutor with ThreadPoolExecutor(max_workers=batch_size) as executor: batch_results = list(executor.map(process_reference_file, reference_files)) results.extend(batch_results) # Find the best result for result in batch_results: if result["similarity_score"] > best_score: best_score = result["similarity_score"] best_reference = result["reference_file"] best_transcription = result["reference_text"] # Exit early if we found a very good match (optional) if best_score > 80.0: logger.info(f"[{request_id}] ๐Ÿ Found excellent match: {best_score:.2f}%") break # Clean up temp files try: import shutil shutil.rmtree(temp_dir) logger.debug(f"[{request_id}] ๐Ÿงน Cleaned up temporary directory") except Exception as e: logger.warning(f"[{request_id}] โš ๏ธ Failed to clean up temp files: {str(e)}") # Determine feedback based on score is_correct = best_score >= 70.0 if best_score >= 90.0: feedback = "Perfect pronunciation! Excellent job!" elif best_score >= 80.0: feedback = "Great pronunciation! Your accent is very good." elif best_score >= 70.0: feedback = "Good pronunciation. Keep practicing!" elif best_score >= 50.0: feedback = "Fair attempt. Try focusing on the syllables that differ from the sample." else: feedback = "Try again. Listen carefully to the sample pronunciation." logger.info(f"[{request_id}] ๐Ÿ“Š Final evaluation results: score={best_score:.2f}%, is_correct={is_correct}") logger.info(f"[{request_id}] ๐Ÿ“ Feedback: '{feedback}'") logger.info(f"[{request_id}] โœ… Evaluation complete") # Sort results by score descending results.sort(key=lambda x: x["similarity_score"], reverse=True) return jsonify({ "is_correct": is_correct, "score": best_score, "feedback": feedback, "user_transcription": user_transcription, "best_reference_transcription": best_transcription, "reference_locator": reference_locator, "details": results }) except Exception as e: logger.error(f"[{request_id}] โŒ Unhandled exception in evaluation endpoint: {str(e)}") logger.debug(f"[{request_id}] Stack trace: {traceback.format_exc()}") # Clean up on error try: import shutil shutil.rmtree(temp_dir) except: pass return jsonify({"error": f"Internal server error: {str(e)}"}), 500 @app.route("/upload_reference", methods=["POST"]) def upload_reference_audio(): try: if "audio" not in request.files: logger.warning("โš ๏ธ Reference upload missing audio file") return jsonify({"error": "No audio file uploaded"}), 400 reference_word = request.form.get("reference_word", "").strip() if not reference_word: logger.warning("โš ๏ธ Reference upload missing reference word") return jsonify({"error": "No reference word provided"}), 400 # Validate reference word reference_patterns = [ "mayap_a_abak", "mayap_a_ugtu", "mayap_a_gatpanapun", "mayap_a_bengi", "komusta_ka", "malaus_ko_pu","malaus_kayu","agaganaka_da_ka", "pagdulapan_da_ka","kaluguran_da_ka","dakal_a_salamat","panapaya_mu_ku" ] if reference_word not in reference_patterns: logger.warning(f"โš ๏ธ Invalid reference word: {reference_word}") return jsonify({"error": f"Invalid reference word. Available: {reference_patterns}"}), 400 # Create directory for reference pattern if it doesn't exist pattern_dir = os.path.join(REFERENCE_AUDIO_DIR, reference_word) os.makedirs(pattern_dir, exist_ok=True) # Save the reference audio file audio_file = request.files["audio"] file_path = os.path.join(pattern_dir, secure_filename(audio_file.filename)) audio_file.save(file_path) # Convert to WAV if not already in that format if not file_path.lower().endswith('.wav'): base_path = os.path.splitext(file_path)[0] wav_path = f"{base_path}.wav" try: audio = AudioSegment.from_file(file_path) audio = audio.set_frame_rate(SAMPLE_RATE).set_channels(1) audio.export(wav_path, format="wav") # Remove original file if conversion successful os.unlink(file_path) file_path = wav_path except Exception as e: logger.error(f"โŒ Reference audio conversion failed: {str(e)}") return jsonify({"error": f"Audio conversion failed: {str(e)}"}), 500 logger.info(f"โœ… Reference audio saved successfully for {reference_word}: {file_path}") # Count how many references we have now references = glob.glob(os.path.join(pattern_dir, "*.wav")) return jsonify({ "message": "Reference audio uploaded successfully", "reference_word": reference_word, "file": os.path.basename(file_path), "total_references": len(references) }) except Exception as e: logger.error(f"โŒ Unhandled exception in reference upload: {str(e)}") logger.debug(f"Stack trace: {traceback.format_exc()}") return jsonify({"error": f"Internal server error: {str(e)}"}), 500 def init_reference_audio(): try: # Create the output directory first os.makedirs(OUTPUT_DIR, exist_ok=True) logger.info(f"๐Ÿ“ Created output directory: {OUTPUT_DIR}") # Check if the reference audio directory exists in the repository if os.path.exists(REFERENCE_AUDIO_DIR): logger.info(f"โœ… Found reference audio directory: {REFERENCE_AUDIO_DIR}") # Log the contents to verify pattern_dirs = [d for d in os.listdir(REFERENCE_AUDIO_DIR) if os.path.isdir(os.path.join(REFERENCE_AUDIO_DIR, d))] logger.info(f"๐Ÿ“ Found reference patterns: {pattern_dirs}") # Check each pattern directory for wav files for pattern_dir_name in pattern_dirs: pattern_path = os.path.join(REFERENCE_AUDIO_DIR, pattern_dir_name) wav_files = glob.glob(os.path.join(pattern_path, "*.wav")) logger.info(f"๐Ÿ“ Found {len(wav_files)} wav files in {pattern_dir_name}") else: logger.warning(f"โš ๏ธ Reference audio directory not found: {REFERENCE_AUDIO_DIR}") except Exception as e: logger.error(f"โŒ Failed to set up reference audio directory: {str(e)}") # Add an initialization route that will be called before the first request @app.before_request def before_request(): if not hasattr(g, 'initialized'): init_reference_audio() g.initialized = True if __name__ == "__main__": init_reference_audio() logger.info("๐Ÿš€ Starting Speech API server") logger.info(f"๐Ÿ“Š System status: ASR model: {'โœ…' if asr_model else 'โŒ'}") for lang, model in tts_models.items(): logger.info(f"๐Ÿ“Š TTS model {lang}: {'โœ…' if model else 'โŒ'}") app.run(host="0.0.0.0", port=7860, debug=True)