Update evaluate.py
Browse files- evaluate.py +255 -183
evaluate.py
CHANGED
@@ -6,6 +6,8 @@ import logging
|
|
6 |
import traceback
|
7 |
import tempfile
|
8 |
import shutil
|
|
|
|
|
9 |
from difflib import SequenceMatcher
|
10 |
import torch
|
11 |
import torchaudio
|
@@ -30,10 +32,17 @@ REFERENCE_CACHE = {}
|
|
30 |
# Traditional evaluation cache for quick responses to identical requests
|
31 |
EVALUATION_CACHE = {}
|
32 |
|
33 |
-
#
|
34 |
PREPROCESSING_COMPLETE = False
|
|
|
35 |
PREPROCESSING_LOCK = threading.Lock()
|
36 |
PREPROCESSING_THREAD = None
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
|
38 |
def calculate_similarity(text1, text2):
|
39 |
"""Calculate text similarity percentage."""
|
@@ -46,6 +55,92 @@ def calculate_similarity(text1, text2):
|
|
46 |
matcher = SequenceMatcher(None, clean1, clean2)
|
47 |
return matcher.ratio() * 100
|
48 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
def setup_reference_patterns(reference_dir, sample_rate=16000):
|
50 |
"""Create standard reference pattern directories without dummy files"""
|
51 |
reference_patterns = [
|
@@ -159,76 +254,134 @@ def preprocess_reference_file(ref_file, sample_rate, asr_model, asr_processor):
|
|
159 |
|
160 |
def preprocess_all_references(reference_dir, sample_rate=16000):
|
161 |
"""Preprocess all reference audio files at startup"""
|
162 |
-
global PREPROCESSING_COMPLETE, REFERENCE_CACHE
|
163 |
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
asr_model = get_asr_model()
|
168 |
-
asr_processor = get_asr_processor()
|
169 |
-
|
170 |
-
if asr_model is None or asr_processor is None:
|
171 |
-
logger.error("β Cannot preprocess reference audio - ASR models not loaded")
|
172 |
return False
|
173 |
|
174 |
try:
|
175 |
-
|
176 |
-
|
|
|
177 |
|
178 |
-
|
179 |
-
|
180 |
|
181 |
-
#
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
186 |
|
187 |
-
|
188 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
189 |
|
190 |
-
|
191 |
-
|
192 |
-
REFERENCE_CACHE[pattern] = {}
|
193 |
|
194 |
-
|
|
|
195 |
|
196 |
-
|
197 |
-
|
|
|
198 |
|
199 |
-
#
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
ref_file for ref_file in reference_files
|
204 |
-
}
|
205 |
-
|
206 |
-
for future in tasks:
|
207 |
-
ref_file = tasks[future]
|
208 |
-
try:
|
209 |
-
result = future.result()
|
210 |
-
if result:
|
211 |
-
REFERENCE_CACHE[pattern][os.path.basename(ref_file)] = result
|
212 |
-
total_processed += 1
|
213 |
-
except Exception as e:
|
214 |
-
logger.error(f"β Failed to process {ref_file}: {str(e)}")
|
215 |
-
|
216 |
-
elapsed_time = time.time() - start_time
|
217 |
-
logger.info(f"β
Preprocessing complete! Processed {total_processed} reference files in {elapsed_time:.2f} seconds")
|
218 |
-
|
219 |
-
with PREPROCESSING_LOCK:
|
220 |
-
PREPROCESSING_COMPLETE = True
|
221 |
|
222 |
-
|
223 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
224 |
except Exception as e:
|
225 |
-
logger.error(f"β
|
226 |
-
|
|
|
|
|
|
|
227 |
return False
|
228 |
|
229 |
def start_preprocessing_thread(reference_dir, sample_rate=16000):
|
230 |
"""Start preprocessing in a background thread"""
|
231 |
-
global PREPROCESSING_THREAD
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
232 |
|
233 |
def preprocessing_worker():
|
234 |
preprocess_all_references(reference_dir, sample_rate)
|
@@ -238,6 +391,7 @@ def start_preprocessing_thread(reference_dir, sample_rate=16000):
|
|
238 |
PREPROCESSING_THREAD.start()
|
239 |
|
240 |
logger.info("π§΅ Started reference audio preprocessing in background thread")
|
|
|
241 |
|
242 |
def init_reference_audio(reference_dir, output_dir):
|
243 |
"""Initialize reference audio directories and start preprocessing"""
|
@@ -359,114 +513,6 @@ def init_reference_audio(reference_dir, output_dir):
|
|
359 |
logger.critical("π₯ CRITICAL: Failed to create even a fallback directory")
|
360 |
return reference_dir
|
361 |
|
362 |
-
def handle_upload_reference(request, reference_dir, sample_rate):
|
363 |
-
"""Handle upload of reference audio files and preprocess immediately"""
|
364 |
-
global REFERENCE_CACHE
|
365 |
-
|
366 |
-
try:
|
367 |
-
if "audio" not in request.files:
|
368 |
-
logger.warning("β οΈ Reference upload missing audio file")
|
369 |
-
return jsonify({"error": "No audio file uploaded"}), 400
|
370 |
-
|
371 |
-
reference_word = request.form.get("reference_word", "").strip()
|
372 |
-
if not reference_word:
|
373 |
-
logger.warning("β οΈ Reference upload missing reference word")
|
374 |
-
return jsonify({"error": "No reference word provided"}), 400
|
375 |
-
|
376 |
-
# Validate reference word
|
377 |
-
reference_patterns = [
|
378 |
-
"mayap_a_abak", "mayap_a_ugtu", "mayap_a_gatpanapun", "mayap_a_bengi",
|
379 |
-
"komusta_ka", "malaus_ko_pu", "malaus_kayu", "agaganaka_da_ka",
|
380 |
-
"pagdulapan_da_ka", "kaluguran_da_ka", "dakal_a_salamat", "panapaya_mu_ku",
|
381 |
-
"wa", "ali", "tuknang", "lagwa", "galo", "buri_ke_ini", "tara_na",
|
382 |
-
"nokarin_ka_ibat", "nokarin_ka_munta", "atiu_na_ku", "nanung_panayan_mu",
|
383 |
-
"mako_na_ka", "muli_ta_na", "nanu_ing_pengan_mu", "mekeni", "mengan_na_ka",
|
384 |
-
"munta_ka_karin", "magkanu_ini", "mimingat_ka", "mangan_ta_na", "lakwan_da_ka",
|
385 |
-
"nanu_maliari_kung_daptan_keka", "pilan_na_ka_banwa", "saliwan_ke_ini",
|
386 |
-
"makananu_munta_king", "adwa", "anam", "apat", "apulu", "atlu", "dinalan", "libu", "lima",
|
387 |
-
"metung", "pitu", "siyam", "walu", "masala", "madalumdum", "maragul", "marimla", "malagu", "marok", "mababa", "malapit", "matuling", "maputi",
|
388 |
-
"arung", "asbuk", "balugbug", "bitis", "buntuk", "butit", "gamat", "kuku", "salu", "tud",
|
389 |
-
"pisan", "dara", "achi", "apu", "ima", "tatang", "pengari", "koya", "kapatad", "wali",
|
390 |
-
"pasbul", "awang", "dagis", "bale", "ulas", "sambra", "sulu", "pitudturan", "luklukan", "ulnan"
|
391 |
-
]
|
392 |
-
|
393 |
-
if reference_word not in reference_patterns:
|
394 |
-
logger.warning(f"β οΈ Invalid reference word: {reference_word}")
|
395 |
-
return jsonify({"error": f"Invalid reference word. Available: {reference_patterns}"}), 400
|
396 |
-
|
397 |
-
# Make sure we have a writable reference directory
|
398 |
-
if not os.path.exists(reference_dir):
|
399 |
-
reference_dir = os.path.join('/tmp', 'reference_audios')
|
400 |
-
os.makedirs(reference_dir, exist_ok=True)
|
401 |
-
logger.warning(f"β οΈ Using alternate reference directory for upload: {reference_dir}")
|
402 |
-
|
403 |
-
# Create directory for reference pattern if it doesn't exist
|
404 |
-
pattern_dir = os.path.join(reference_dir, reference_word)
|
405 |
-
os.makedirs(pattern_dir, exist_ok=True)
|
406 |
-
|
407 |
-
# Save the reference audio file
|
408 |
-
audio_file = request.files["audio"]
|
409 |
-
filename = secure_filename(audio_file.filename)
|
410 |
-
|
411 |
-
# Ensure filename has .wav extension
|
412 |
-
if not filename.lower().endswith('.wav'):
|
413 |
-
base_name = os.path.splitext(filename)[0]
|
414 |
-
filename = f"{base_name}.wav"
|
415 |
-
|
416 |
-
file_path = os.path.join(pattern_dir, filename)
|
417 |
-
|
418 |
-
# Create a temporary file first, then convert to WAV
|
419 |
-
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
|
420 |
-
audio_file.save(temp_file.name)
|
421 |
-
temp_path = temp_file.name
|
422 |
-
|
423 |
-
try:
|
424 |
-
# Process the audio file
|
425 |
-
audio = AudioSegment.from_file(temp_path)
|
426 |
-
audio = audio.set_frame_rate(sample_rate).set_channels(1)
|
427 |
-
audio.export(file_path, format="wav")
|
428 |
-
logger.info(f"β
Reference audio saved successfully for {reference_word}: {file_path}")
|
429 |
-
|
430 |
-
# Clean up temp file
|
431 |
-
try:
|
432 |
-
os.unlink(temp_path)
|
433 |
-
except:
|
434 |
-
pass
|
435 |
-
|
436 |
-
# Immediately preprocess this new reference file and add to cache
|
437 |
-
asr_model = get_asr_model()
|
438 |
-
asr_processor = get_asr_processor()
|
439 |
-
|
440 |
-
if asr_model and asr_processor:
|
441 |
-
# Initialize cache for this pattern if needed
|
442 |
-
if reference_word not in REFERENCE_CACHE:
|
443 |
-
REFERENCE_CACHE[reference_word] = {}
|
444 |
-
|
445 |
-
# Preprocess and add to cache
|
446 |
-
result = preprocess_reference_file(file_path, sample_rate, asr_model, asr_processor)
|
447 |
-
if result:
|
448 |
-
REFERENCE_CACHE[reference_word][filename] = result
|
449 |
-
logger.info(f"β
New reference audio preprocessed and added to cache: {filename}")
|
450 |
-
|
451 |
-
except Exception as e:
|
452 |
-
logger.error(f"β Reference audio processing failed: {str(e)}")
|
453 |
-
return jsonify({"error": f"Audio processing failed: {str(e)}"}), 500
|
454 |
-
|
455 |
-
# Count how many references we have now
|
456 |
-
references = glob.glob(os.path.join(pattern_dir, "*.wav"))
|
457 |
-
return jsonify({
|
458 |
-
"message": "Reference audio uploaded successfully",
|
459 |
-
"reference_word": reference_word,
|
460 |
-
"file": filename,
|
461 |
-
"total_references": len(references),
|
462 |
-
"preprocessed": True
|
463 |
-
})
|
464 |
-
|
465 |
-
except Exception as e:
|
466 |
-
logger.error(f"β Unhandled exception in reference upload: {str(e)}")
|
467 |
-
logger.debug(f"Stack trace: {traceback.format_exc()}")
|
468 |
-
return jsonify({"error": f"Internal server error: {str(e)}"}), 500
|
469 |
-
|
470 |
def handle_evaluation_request(request, reference_dir, output_dir, sample_rate):
|
471 |
"""Handle pronunciation evaluation requests with preprocessing optimization"""
|
472 |
global REFERENCE_CACHE, PREPROCESSING_COMPLETE
|
@@ -474,6 +520,9 @@ def handle_evaluation_request(request, reference_dir, output_dir, sample_rate):
|
|
474 |
request_id = f"req-{id(request)}"
|
475 |
logger.info(f"[{request_id}] π Starting pronunciation evaluation request")
|
476 |
|
|
|
|
|
|
|
477 |
temp_dir = None
|
478 |
|
479 |
# Get the ASR model and processor using the getter functions
|
@@ -482,12 +531,16 @@ def handle_evaluation_request(request, reference_dir, output_dir, sample_rate):
|
|
482 |
|
483 |
if asr_model is None or asr_processor is None:
|
484 |
logger.error(f"[{request_id}] β Evaluation endpoint called but ASR models aren't loaded")
|
|
|
|
|
485 |
return jsonify({"error": "ASR model not available"}), 503
|
486 |
|
487 |
try:
|
488 |
# Check for basic request requirements
|
489 |
if "audio" not in request.files:
|
490 |
logger.warning(f"[{request_id}] β οΈ Evaluation request missing audio file")
|
|
|
|
|
491 |
return jsonify({"error": "No audio file uploaded"}), 400
|
492 |
|
493 |
audio_file = request.files["audio"]
|
@@ -497,6 +550,8 @@ def handle_evaluation_request(request, reference_dir, output_dir, sample_rate):
|
|
497 |
# Validate reference locator
|
498 |
if not reference_locator:
|
499 |
logger.warning(f"[{request_id}] β οΈ No reference locator provided")
|
|
|
|
|
500 |
return jsonify({"error": "Reference locator is required"}), 400
|
501 |
|
502 |
# OPTIMIZATION: Simple caching based on audio content hash + reference_locator
|
@@ -509,6 +564,8 @@ def handle_evaluation_request(request, reference_dir, output_dir, sample_rate):
|
|
509 |
# Check in-memory cache using the module-level cache
|
510 |
if cache_key in EVALUATION_CACHE:
|
511 |
logger.info(f"[{request_id}] β
Using cached evaluation result")
|
|
|
|
|
512 |
return EVALUATION_CACHE[cache_key]
|
513 |
|
514 |
# Construct full reference directory path
|
@@ -522,6 +579,8 @@ def handle_evaluation_request(request, reference_dir, output_dir, sample_rate):
|
|
522 |
logger.warning(f"[{request_id}] β οΈ Created missing reference directory: {reference_dir_path}")
|
523 |
except Exception as e:
|
524 |
logger.error(f"[{request_id}] β Failed to create reference directory: {str(e)}")
|
|
|
|
|
525 |
return jsonify({"error": f"Reference audio directory not found: {reference_locator}"}), 404
|
526 |
|
527 |
# Check for reference files
|
@@ -533,6 +592,8 @@ def handle_evaluation_request(request, reference_dir, output_dir, sample_rate):
|
|
533 |
# If no reference files exist, return a more detailed error message
|
534 |
if not reference_files:
|
535 |
logger.warning(f"[{request_id}] β οΈ No valid reference audio files found in {reference_dir_path}")
|
|
|
|
|
536 |
return jsonify({
|
537 |
"error": f"No reference audio found for {reference_locator}",
|
538 |
"message": "Please upload a reference audio file before evaluation.",
|
@@ -566,6 +627,8 @@ def handle_evaluation_request(request, reference_dir, output_dir, sample_rate):
|
|
566 |
user_audio_path = processed_path
|
567 |
except Exception as e:
|
568 |
logger.error(f"[{request_id}] β Audio processing failed: {str(e)}")
|
|
|
|
|
569 |
return jsonify({"error": f"Audio processing failed: {str(e)}"}), 500
|
570 |
|
571 |
# Transcribe user audio
|
@@ -575,6 +638,8 @@ def handle_evaluation_request(request, reference_dir, output_dir, sample_rate):
|
|
575 |
logger.info(f"[{request_id}] β
User transcription: '{user_transcription}'")
|
576 |
except Exception as e:
|
577 |
logger.error(f"[{request_id}] β ASR inference failed: {str(e)}")
|
|
|
|
|
578 |
return jsonify({"error": f"ASR inference failed: {str(e)}"}), 500
|
579 |
|
580 |
# Check if we have preprocessed data for this reference locator
|
@@ -683,9 +748,6 @@ def handle_evaluation_request(request, reference_dir, output_dir, sample_rate):
|
|
683 |
additional_files = remaining_files[:5] # Process max 5 more
|
684 |
|
685 |
# Process remaining files
|
686 |
-
additional_results = list(executor.map(process_reference_file, additional_files))
|
687 |
-
all_results.extend(additional_results)
|
688 |
-
|
689 |
# Clean up temp files
|
690 |
try:
|
691 |
if temp_dir and os.path.exists(temp_dir):
|
@@ -738,7 +800,7 @@ def handle_evaluation_request(request, reference_dir, output_dir, sample_rate):
|
|
738 |
"total_references_compared": len(all_results),
|
739 |
"total_available_references": len(reference_files),
|
740 |
"used_preprocessed_data": using_preprocessed,
|
741 |
-
"
|
742 |
})
|
743 |
|
744 |
# Cache the result for future identical requests
|
@@ -748,40 +810,50 @@ def handle_evaluation_request(request, reference_dir, output_dir, sample_rate):
|
|
748 |
# Remove oldest entry (simplified approach)
|
749 |
EVALUATION_CACHE.pop(next(iter(EVALUATION_CACHE)))
|
750 |
|
|
|
|
|
751 |
return response
|
752 |
|
753 |
-
|
754 |
-
logger.error(f"[{request_id}] β Unhandled exception in evaluation endpoint: {str(e)}")
|
755 |
-
logger.debug(f"[{request_id}] Stack trace: {traceback.format_exc()}")
|
756 |
-
|
757 |
-
# Clean up on error
|
758 |
-
try:
|
759 |
-
if temp_dir and os.path.exists(temp_dir):
|
760 |
-
shutil.rmtree(temp_dir)
|
761 |
-
except:
|
762 |
-
pass
|
763 |
-
|
764 |
-
return jsonify({"error": f"Internal server error: {str(e)}"}), 500
|
765 |
-
|
766 |
-
# Add a new function to get preprocessing status
|
767 |
def get_preprocessing_status():
|
768 |
"""Get the current status of reference audio preprocessing"""
|
769 |
-
global PREPROCESSING_COMPLETE, REFERENCE_CACHE
|
770 |
|
771 |
with PREPROCESSING_LOCK:
|
772 |
is_complete = PREPROCESSING_COMPLETE
|
|
|
773 |
|
774 |
# Count total preprocessed references
|
775 |
preprocessed_count = 0
|
|
|
|
|
776 |
for pattern, files in REFERENCE_CACHE.items():
|
777 |
preprocessed_count += len(files)
|
|
|
|
|
778 |
|
779 |
# Check if preprocessing thread is alive
|
780 |
thread_running = PREPROCESSING_THREAD is not None and PREPROCESSING_THREAD.is_alive()
|
781 |
|
|
|
|
|
|
|
782 |
return {
|
783 |
"complete": is_complete,
|
|
|
|
|
784 |
"preprocessed_files": preprocessed_count,
|
785 |
"patterns_cached": len(REFERENCE_CACHE),
|
786 |
-
"
|
787 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
import traceback
|
7 |
import tempfile
|
8 |
import shutil
|
9 |
+
import json
|
10 |
+
import fcntl
|
11 |
from difflib import SequenceMatcher
|
12 |
import torch
|
13 |
import torchaudio
|
|
|
32 |
# Traditional evaluation cache for quick responses to identical requests
|
33 |
EVALUATION_CACHE = {}
|
34 |
|
35 |
+
# Flags to manage preprocessing state
|
36 |
PREPROCESSING_COMPLETE = False
|
37 |
+
PREPROCESSING_ACTIVE = False
|
38 |
PREPROCESSING_LOCK = threading.Lock()
|
39 |
PREPROCESSING_THREAD = None
|
40 |
+
PREPROCESSING_PAUSE = threading.Event() # Event for pausing/resuming preprocessing
|
41 |
+
PREPROCESSING_PAUSE.set() # Start in "resumed" state
|
42 |
+
|
43 |
+
# Lock file for ensuring only one preprocessing thread runs system-wide
|
44 |
+
LOCK_FILE = "/tmp/speech_api_preprocessing.lock"
|
45 |
+
_lock_file_handle = None # Global variable to hold the lock file handle
|
46 |
|
47 |
def calculate_similarity(text1, text2):
|
48 |
"""Calculate text similarity percentage."""
|
|
|
55 |
matcher = SequenceMatcher(None, clean1, clean2)
|
56 |
return matcher.ratio() * 100
|
57 |
|
58 |
+
def acquire_preprocessing_lock():
|
59 |
+
"""Attempt to acquire the system-wide preprocessing lock using a lock file.
|
60 |
+
Returns True if lock was acquired, False otherwise"""
|
61 |
+
try:
|
62 |
+
# Check if lock file exists, create it if not
|
63 |
+
if not os.path.exists(LOCK_FILE):
|
64 |
+
with open(LOCK_FILE, 'w') as f:
|
65 |
+
f.write(str(os.getpid()))
|
66 |
+
|
67 |
+
# Try to get an exclusive lock on the file
|
68 |
+
lock_file = open(LOCK_FILE, 'r+')
|
69 |
+
try:
|
70 |
+
fcntl.flock(lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
71 |
+
# If we get here, we have the lock
|
72 |
+
# Update with current PID
|
73 |
+
lock_file.seek(0)
|
74 |
+
lock_file.write(str(os.getpid()))
|
75 |
+
lock_file.truncate()
|
76 |
+
lock_file.flush()
|
77 |
+
|
78 |
+
# Store the file handle to maintain the lock
|
79 |
+
global _lock_file_handle
|
80 |
+
_lock_file_handle = lock_file
|
81 |
+
|
82 |
+
logger.info("π Acquired preprocessing lock")
|
83 |
+
return True
|
84 |
+
except IOError:
|
85 |
+
# Another process has the lock
|
86 |
+
lock_file.close()
|
87 |
+
logger.info("β οΈ Another process is already running preprocessing")
|
88 |
+
return False
|
89 |
+
except Exception as e:
|
90 |
+
logger.error(f"β Error acquiring preprocessing lock: {str(e)}")
|
91 |
+
return False
|
92 |
+
|
93 |
+
def release_preprocessing_lock():
|
94 |
+
"""Release the preprocessing lock if we have it"""
|
95 |
+
global _lock_file_handle
|
96 |
+
if '_lock_file_handle' in globals() and _lock_file_handle:
|
97 |
+
try:
|
98 |
+
fcntl.flock(_lock_file_handle, fcntl.LOCK_UN)
|
99 |
+
_lock_file_handle.close()
|
100 |
+
logger.info("π Released preprocessing lock")
|
101 |
+
except Exception as e:
|
102 |
+
logger.error(f"β Error releasing preprocessing lock: {str(e)}")
|
103 |
+
|
104 |
+
def save_preprocessing_state(reference_dir, state=None):
|
105 |
+
"""Save the current preprocessing state to a file"""
|
106 |
+
state_file = os.path.join(reference_dir, ".preprocessing_state.json")
|
107 |
+
if state is None:
|
108 |
+
# Generate current state
|
109 |
+
state = {
|
110 |
+
"complete": PREPROCESSING_COMPLETE,
|
111 |
+
"active": PREPROCESSING_ACTIVE,
|
112 |
+
"patterns_cached": list(REFERENCE_CACHE.keys()),
|
113 |
+
"timestamp": time.time(),
|
114 |
+
"pid": os.getpid()
|
115 |
+
}
|
116 |
+
|
117 |
+
try:
|
118 |
+
with open(state_file, 'w') as f:
|
119 |
+
json.dump(state, f)
|
120 |
+
except Exception as e:
|
121 |
+
logger.error(f"β Error saving preprocessing state: {str(e)}")
|
122 |
+
|
123 |
+
def load_preprocessing_state(reference_dir):
|
124 |
+
"""Load preprocessing state from a file"""
|
125 |
+
state_file = os.path.join(reference_dir, ".preprocessing_state.json")
|
126 |
+
if not os.path.exists(state_file):
|
127 |
+
return None
|
128 |
+
|
129 |
+
try:
|
130 |
+
with open(state_file, 'r') as f:
|
131 |
+
return json.load(f)
|
132 |
+
except Exception as e:
|
133 |
+
logger.error(f"β Error loading preprocessing state: {str(e)}")
|
134 |
+
return None
|
135 |
+
|
136 |
+
def pause_preprocessing():
|
137 |
+
"""Pause preprocessing temporarily"""
|
138 |
+
PREPROCESSING_PAUSE.clear()
|
139 |
+
|
140 |
+
def resume_preprocessing():
|
141 |
+
"""Resume preprocessing after pause"""
|
142 |
+
PREPROCESSING_PAUSE.set()
|
143 |
+
|
144 |
def setup_reference_patterns(reference_dir, sample_rate=16000):
|
145 |
"""Create standard reference pattern directories without dummy files"""
|
146 |
reference_patterns = [
|
|
|
254 |
|
255 |
def preprocess_all_references(reference_dir, sample_rate=16000):
|
256 |
"""Preprocess all reference audio files at startup"""
|
257 |
+
global PREPROCESSING_COMPLETE, REFERENCE_CACHE, PREPROCESSING_ACTIVE
|
258 |
|
259 |
+
# Check if another process already has the lock
|
260 |
+
if not acquire_preprocessing_lock():
|
261 |
+
logger.info("β© Skipping preprocessing as another process is already handling it")
|
|
|
|
|
|
|
|
|
|
|
262 |
return False
|
263 |
|
264 |
try:
|
265 |
+
logger.info("π Starting preprocessing of all reference audio files...")
|
266 |
+
with PREPROCESSING_LOCK:
|
267 |
+
PREPROCESSING_ACTIVE = True
|
268 |
|
269 |
+
# Save initial state
|
270 |
+
save_preprocessing_state(reference_dir)
|
271 |
|
272 |
+
# Get ASR model and processor
|
273 |
+
asr_model = get_asr_model()
|
274 |
+
asr_processor = get_asr_processor()
|
275 |
+
|
276 |
+
if asr_model is None or asr_processor is None:
|
277 |
+
logger.error("β Cannot preprocess reference audio - ASR models not loaded")
|
278 |
+
with PREPROCESSING_LOCK:
|
279 |
+
PREPROCESSING_ACTIVE = False
|
280 |
+
save_preprocessing_state(reference_dir)
|
281 |
+
release_preprocessing_lock()
|
282 |
+
return False
|
283 |
+
|
284 |
+
try:
|
285 |
+
pattern_dirs = [d for d in os.listdir(reference_dir)
|
286 |
+
if os.path.isdir(os.path.join(reference_dir, d))]
|
287 |
|
288 |
+
total_processed = 0
|
289 |
+
start_time = time.time()
|
290 |
+
|
291 |
+
# Process each reference pattern directory
|
292 |
+
for pattern in pattern_dirs:
|
293 |
+
# Wait if processing is paused
|
294 |
+
PREPROCESSING_PAUSE.wait()
|
295 |
+
|
296 |
+
pattern_path = os.path.join(reference_dir, pattern)
|
297 |
+
reference_files = glob.glob(os.path.join(pattern_path, "*.wav"))
|
298 |
+
reference_files = [f for f in reference_files if "dummy_reference" not in f]
|
299 |
+
|
300 |
+
if not reference_files:
|
301 |
+
continue
|
302 |
+
|
303 |
+
# Initialize cache for this pattern if needed
|
304 |
+
if pattern not in REFERENCE_CACHE:
|
305 |
+
REFERENCE_CACHE[pattern] = {}
|
306 |
+
|
307 |
+
logger.info(f"π Preprocessing {len(reference_files)} references for pattern: {pattern}")
|
308 |
+
pattern_start_time = time.time()
|
309 |
+
|
310 |
+
# Determine optimal number of workers
|
311 |
+
max_workers = min(os.cpu_count() or 4, len(reference_files), 5)
|
312 |
+
|
313 |
+
processed_in_pattern = 0
|
314 |
+
# Process files in parallel
|
315 |
+
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
316 |
+
tasks = {
|
317 |
+
executor.submit(preprocess_reference_file, ref_file, sample_rate, asr_model, asr_processor):
|
318 |
+
ref_file for ref_file in reference_files
|
319 |
+
}
|
320 |
+
|
321 |
+
for future in tasks:
|
322 |
+
ref_file = tasks[future]
|
323 |
+
try:
|
324 |
+
result = future.result()
|
325 |
+
if result:
|
326 |
+
REFERENCE_CACHE[pattern][os.path.basename(ref_file)] = result
|
327 |
+
total_processed += 1
|
328 |
+
processed_in_pattern += 1
|
329 |
+
except Exception as e:
|
330 |
+
logger.error(f"β Failed to process {ref_file}: {str(e)}")
|
331 |
+
|
332 |
+
# Log completion of pattern processing
|
333 |
+
pattern_time = time.time() - pattern_start_time
|
334 |
+
logger.info(f"β
Completed preprocessing pattern '{pattern}' - {processed_in_pattern}/{len(reference_files)} files in {pattern_time:.2f}s")
|
335 |
|
336 |
+
# Update state after each pattern
|
337 |
+
save_preprocessing_state(reference_dir)
|
|
|
338 |
|
339 |
+
elapsed_time = time.time() - start_time
|
340 |
+
logger.info(f"β
Preprocessing complete! Processed {total_processed} reference files in {elapsed_time:.2f} seconds")
|
341 |
|
342 |
+
with PREPROCESSING_LOCK:
|
343 |
+
PREPROCESSING_COMPLETE = True
|
344 |
+
PREPROCESSING_ACTIVE = False
|
345 |
|
346 |
+
# Save final state
|
347 |
+
save_preprocessing_state(reference_dir)
|
348 |
+
release_preprocessing_lock()
|
349 |
+
return True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
350 |
|
351 |
+
except Exception as e:
|
352 |
+
logger.error(f"β Error during reference preprocessing: {str(e)}")
|
353 |
+
logger.debug(f"Stack trace: {traceback.format_exc()}")
|
354 |
+
with PREPROCESSING_LOCK:
|
355 |
+
PREPROCESSING_ACTIVE = False
|
356 |
+
save_preprocessing_state(reference_dir)
|
357 |
+
release_preprocessing_lock()
|
358 |
+
return False
|
359 |
+
|
360 |
except Exception as e:
|
361 |
+
logger.error(f"β Unhandled exception in preprocessing: {str(e)}")
|
362 |
+
with PREPROCESSING_LOCK:
|
363 |
+
PREPROCESSING_ACTIVE = False
|
364 |
+
save_preprocessing_state(reference_dir)
|
365 |
+
release_preprocessing_lock()
|
366 |
return False
|
367 |
|
368 |
def start_preprocessing_thread(reference_dir, sample_rate=16000):
|
369 |
"""Start preprocessing in a background thread"""
|
370 |
+
global PREPROCESSING_THREAD, PREPROCESSING_ACTIVE
|
371 |
+
|
372 |
+
# Check if we're already preprocessing
|
373 |
+
with PREPROCESSING_LOCK:
|
374 |
+
if PREPROCESSING_ACTIVE:
|
375 |
+
logger.info("β© Skipping preprocessing start as it's already active")
|
376 |
+
return False
|
377 |
+
|
378 |
+
# Load previous state if available
|
379 |
+
state = load_preprocessing_state(reference_dir)
|
380 |
+
if state and state.get("complete", False):
|
381 |
+
logger.info("β© Skipping preprocessing as previous run was completed")
|
382 |
+
with PREPROCESSING_LOCK:
|
383 |
+
PREPROCESSING_COMPLETE = True
|
384 |
+
return False
|
385 |
|
386 |
def preprocessing_worker():
|
387 |
preprocess_all_references(reference_dir, sample_rate)
|
|
|
391 |
PREPROCESSING_THREAD.start()
|
392 |
|
393 |
logger.info("π§΅ Started reference audio preprocessing in background thread")
|
394 |
+
return True
|
395 |
|
396 |
def init_reference_audio(reference_dir, output_dir):
|
397 |
"""Initialize reference audio directories and start preprocessing"""
|
|
|
513 |
logger.critical("π₯ CRITICAL: Failed to create even a fallback directory")
|
514 |
return reference_dir
|
515 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
516 |
def handle_evaluation_request(request, reference_dir, output_dir, sample_rate):
|
517 |
"""Handle pronunciation evaluation requests with preprocessing optimization"""
|
518 |
global REFERENCE_CACHE, PREPROCESSING_COMPLETE
|
|
|
520 |
request_id = f"req-{id(request)}"
|
521 |
logger.info(f"[{request_id}] π Starting pronunciation evaluation request")
|
522 |
|
523 |
+
# Pause preprocessing while handling user request
|
524 |
+
pause_preprocessing()
|
525 |
+
|
526 |
temp_dir = None
|
527 |
|
528 |
# Get the ASR model and processor using the getter functions
|
|
|
531 |
|
532 |
if asr_model is None or asr_processor is None:
|
533 |
logger.error(f"[{request_id}] β Evaluation endpoint called but ASR models aren't loaded")
|
534 |
+
# Resume preprocessing before returning
|
535 |
+
resume_preprocessing()
|
536 |
return jsonify({"error": "ASR model not available"}), 503
|
537 |
|
538 |
try:
|
539 |
# Check for basic request requirements
|
540 |
if "audio" not in request.files:
|
541 |
logger.warning(f"[{request_id}] β οΈ Evaluation request missing audio file")
|
542 |
+
# Resume preprocessing before returning
|
543 |
+
resume_preprocessing()
|
544 |
return jsonify({"error": "No audio file uploaded"}), 400
|
545 |
|
546 |
audio_file = request.files["audio"]
|
|
|
550 |
# Validate reference locator
|
551 |
if not reference_locator:
|
552 |
logger.warning(f"[{request_id}] β οΈ No reference locator provided")
|
553 |
+
# Resume preprocessing before returning
|
554 |
+
resume_preprocessing()
|
555 |
return jsonify({"error": "Reference locator is required"}), 400
|
556 |
|
557 |
# OPTIMIZATION: Simple caching based on audio content hash + reference_locator
|
|
|
564 |
# Check in-memory cache using the module-level cache
|
565 |
if cache_key in EVALUATION_CACHE:
|
566 |
logger.info(f"[{request_id}] β
Using cached evaluation result")
|
567 |
+
# Resume preprocessing before returning
|
568 |
+
resume_preprocessing()
|
569 |
return EVALUATION_CACHE[cache_key]
|
570 |
|
571 |
# Construct full reference directory path
|
|
|
579 |
logger.warning(f"[{request_id}] β οΈ Created missing reference directory: {reference_dir_path}")
|
580 |
except Exception as e:
|
581 |
logger.error(f"[{request_id}] β Failed to create reference directory: {str(e)}")
|
582 |
+
# Resume preprocessing before returning
|
583 |
+
resume_preprocessing()
|
584 |
return jsonify({"error": f"Reference audio directory not found: {reference_locator}"}), 404
|
585 |
|
586 |
# Check for reference files
|
|
|
592 |
# If no reference files exist, return a more detailed error message
|
593 |
if not reference_files:
|
594 |
logger.warning(f"[{request_id}] β οΈ No valid reference audio files found in {reference_dir_path}")
|
595 |
+
# Resume preprocessing before returning
|
596 |
+
resume_preprocessing()
|
597 |
return jsonify({
|
598 |
"error": f"No reference audio found for {reference_locator}",
|
599 |
"message": "Please upload a reference audio file before evaluation.",
|
|
|
627 |
user_audio_path = processed_path
|
628 |
except Exception as e:
|
629 |
logger.error(f"[{request_id}] β Audio processing failed: {str(e)}")
|
630 |
+
# Resume preprocessing before returning
|
631 |
+
resume_preprocessing()
|
632 |
return jsonify({"error": f"Audio processing failed: {str(e)}"}), 500
|
633 |
|
634 |
# Transcribe user audio
|
|
|
638 |
logger.info(f"[{request_id}] β
User transcription: '{user_transcription}'")
|
639 |
except Exception as e:
|
640 |
logger.error(f"[{request_id}] β ASR inference failed: {str(e)}")
|
641 |
+
# Resume preprocessing before returning
|
642 |
+
resume_preprocessing()
|
643 |
return jsonify({"error": f"ASR inference failed: {str(e)}"}), 500
|
644 |
|
645 |
# Check if we have preprocessed data for this reference locator
|
|
|
748 |
additional_files = remaining_files[:5] # Process max 5 more
|
749 |
|
750 |
# Process remaining files
|
|
|
|
|
|
|
751 |
# Clean up temp files
|
752 |
try:
|
753 |
if temp_dir and os.path.exists(temp_dir):
|
|
|
800 |
"total_references_compared": len(all_results),
|
801 |
"total_available_references": len(reference_files),
|
802 |
"used_preprocessed_data": using_preprocessed,
|
803 |
+
"preprocessing_status": get_preprocessing_status()
|
804 |
})
|
805 |
|
806 |
# Cache the result for future identical requests
|
|
|
810 |
# Remove oldest entry (simplified approach)
|
811 |
EVALUATION_CACHE.pop(next(iter(EVALUATION_CACHE)))
|
812 |
|
813 |
+
# Resume preprocessing before returning
|
814 |
+
resume_preprocessing()
|
815 |
return response
|
816 |
|
817 |
+
# Add a new function to get preprocessing status
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
818 |
def get_preprocessing_status():
|
819 |
"""Get the current status of reference audio preprocessing"""
|
820 |
+
global PREPROCESSING_COMPLETE, REFERENCE_CACHE, PREPROCESSING_ACTIVE, PREPROCESSING_PAUSE
|
821 |
|
822 |
with PREPROCESSING_LOCK:
|
823 |
is_complete = PREPROCESSING_COMPLETE
|
824 |
+
is_active = PREPROCESSING_ACTIVE
|
825 |
|
826 |
# Count total preprocessed references
|
827 |
preprocessed_count = 0
|
828 |
+
reference_patterns_count = 0
|
829 |
+
|
830 |
for pattern, files in REFERENCE_CACHE.items():
|
831 |
preprocessed_count += len(files)
|
832 |
+
if len(files) > 0:
|
833 |
+
reference_patterns_count += 1
|
834 |
|
835 |
# Check if preprocessing thread is alive
|
836 |
thread_running = PREPROCESSING_THREAD is not None and PREPROCESSING_THREAD.is_alive()
|
837 |
|
838 |
+
# Check if preprocessing is currently paused
|
839 |
+
is_paused = not PREPROCESSING_PAUSE.is_set()
|
840 |
+
|
841 |
return {
|
842 |
"complete": is_complete,
|
843 |
+
"active": is_active,
|
844 |
+
"paused": is_paused,
|
845 |
"preprocessed_files": preprocessed_count,
|
846 |
"patterns_cached": len(REFERENCE_CACHE),
|
847 |
+
"completed_patterns": reference_patterns_count,
|
848 |
+
"thread_running": thread_running,
|
849 |
+
"pid": os.getpid()
|
850 |
+
}
|
851 |
+
|
852 |
+
# Clean up resources when the module is unloaded
|
853 |
+
def cleanup_resources():
|
854 |
+
"""Clean up any resources when the module is unloaded/restarted"""
|
855 |
+
release_preprocessing_lock()
|
856 |
+
|
857 |
+
# Register cleanup handler
|
858 |
+
import atexit
|
859 |
+
atexit.register(cleanup_resources)
|