from fastapi import FastAPI, File, UploadFile, HTTPException, Depends, Header from pydantic import BaseModel import os from pymongo import MongoClient from langchain_community.embeddings import SentenceTransformerEmbeddings from langchain_community.vectorstores import MongoDBAtlasVectorSearch import uvicorn from dotenv import load_dotenv from fastapi.middleware.cors import CORSMiddleware from uuid import uuid4 # import httpx import joblib import librosa import numpy as np import pandas as pd import numpy as np import librosa.display import soundfile as sf import opensmile import ffmpeg import noisereduce as nr from tensorflow.keras.models import load_model from tensorflow.keras.utils import to_categorical from tensorflow.keras.models import Sequential default_sample_rate=22050 def load(file_name, skip_seconds=0): return librosa.load(file_name, sr=None, res_type='kaiser_fast') # def preprocess_audio(audio_data, rate): # # Apply preprocessing steps # audio_data = nr.reduce_noise(y=audio_data, sr=rate) # audio_data = librosa.util.normalize(audio_data) # audio_data, _ = librosa.effects.trim(audio_data) # audio_data = librosa.resample(audio_data, orig_sr=rate, target_sr=default_sample_rate) # # audio_data = fix_length(audio_data) # rate = default_sample_rate # return audio_data, rate def extract_features(X, sample_rate): # Generate Mel-frequency cepstral coefficients (MFCCs) from a time series mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T,axis=0) # Generates a Short-time Fourier transform (STFT) to use in the chroma_stft stft = np.abs(librosa.stft(X)) # Computes a chromagram from a waveform or power spectrogram. chroma = np.mean(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T,axis=0) # Computes a mel-scaled spectrogram. mel = np.mean(librosa.feature.melspectrogram(y=X, sr=sample_rate).T,axis=0) # Computes spectral contrast contrast = np.mean(librosa.feature.spectral_contrast(S=stft, sr=sample_rate).T,axis=0) # Computes the tonal centroid features (tonnetz) tonnetz = np.mean(librosa.feature.tonnetz(y=librosa.effects.harmonic(X),sr=sample_rate).T,axis=0) # Concatenate all feature arrays into a single 1D array combined_features = np.hstack([mfccs, chroma, mel, contrast, tonnetz]) return combined_features load_dotenv() # MongoDB connection MONGODB_ATLAS_CLUSTER_URI = os.getenv("MONGODB_ATLAS_CLUSTER_URI", None) client = MongoClient(MONGODB_ATLAS_CLUSTER_URI) DB_NAME = "quran_db" COLLECTION_NAME = "tafsir" ATLAS_VECTOR_SEARCH_INDEX_NAME = "langchain_index" MONGODB_COLLECTION = client[DB_NAME][COLLECTION_NAME] embeddings = SentenceTransformerEmbeddings(model_name="BAAI/bge-m3") vector_search = MongoDBAtlasVectorSearch.from_connection_string( MONGODB_ATLAS_CLUSTER_URI, DB_NAME + "." + COLLECTION_NAME, embeddings, index_name=ATLAS_VECTOR_SEARCH_INDEX_NAME, ) df = pd.read_csv('app/quran.csv') # FastAPI application setup app = FastAPI() app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) def index_file(filepath): """ Index each block in a file separated by double newlines for quick search. Returns a dictionary with key as content and value as block number. """ index = {} with open(filepath, 'r', encoding='utf-8') as file: content = file.read() # Read the whole file at once blocks = content.split("\n\n") # Split the content by double newlines for block_number, block in enumerate(blocks, 1): # Starting block numbers at 1 for human readability # Replace single newlines within blocks with space and strip leading/trailing whitespace formatted_block = ' '.join(block.split('\n')).strip() index[formatted_block] = block_number # if(block_number == 100): # print(formatted_block) # Print the 5th block return index def get_text_by_block_number(filepath, block_numbers): """ Retrieve specific blocks from a file based on block numbers, where each block is separated by '\n\n'. """ blocks_text = [] with open(filepath, 'r', encoding='utf-8') as file: content = file.read() # Read the whole file at once blocks = content.split("\n\n\n") # Split the content by double newlines for block_number, block in enumerate(blocks, 1): # Starting block numbers at 1 for human readability if block_number in block_numbers: splitted = block.split('\n') ayah = splitted[0] tafsir = splitted[1] print(block_number-1) print(df.iloc[block_number - 1]) # Replace single newlines within blocks with space and strip leading/trailing whitespace # ayah_info = await get_ayah_info(ayah) # This makes the API call row_data = df.iloc[block_number - 1].to_dict() blocks_text.append({ "tafsir": tafsir, "surah_no": row_data['surah_no'], "surah_name_en": row_data['surah_name_en'], "surah_name_ar": row_data['surah_name_ar'], "surah_name_roman": row_data['surah_name_roman'], "ayah_no_surah": row_data['ayah_no_surah'], "ayah_no_quran": row_data['ayah_no_quran'], "ayah_ar": row_data['ayah_ar'], "ayah_en": row_data['ayah_en'] }) if len(blocks_text) == len(block_numbers): # Stop reading once all required blocks are retrieved break return blocks_text # Existing API endpoints @app.get("/") async def read_root(): return {"message": "Welcome to our app"} # New Query model for the POST request body class Item(BaseModel): question: str EXPECTED_TOKEN = os.getenv("API_TOKEN") def verify_token(authorization: str = Header(None)): """ Dependency to verify the Authorization header contains the correct Bearer token. """ # Prefix for bearer token in the Authorization header prefix = "Bearer " # Check if the Authorization header is present and correctly formatted if not authorization or not authorization.startswith(prefix): raise HTTPException(status_code=401, detail="Unauthorized: Missing or invalid token") # Extract the token from the Authorization header token = authorization[len(prefix):] # Compare the extracted token to the expected token value if token != EXPECTED_TOKEN: raise HTTPException(status_code=401, detail="Unauthorized: Incorrect token") # New API endpoint to get an answer using the chain @app.post("/get_answer") async def get_answer(item: Item, token: str = Depends(verify_token)): try: # Perform the similarity search with the provided question matching_docs = vector_search.similarity_search(item.question, k=3) clean_answers = [doc.page_content.replace("\n", " ").strip() for doc in matching_docs] # Assuming 'search_file.txt' is where we want to search answers answers_index = index_file('app/quran_tafseer_formatted.txt') # Collect line numbers based on answers found line_numbers = [answers_index[answer] for answer in clean_answers if answer in answers_index] # Assuming 'retrieve_file.txt' is where we retrieve lines based on line numbers result_text = get_text_by_block_number('app/quran_tafseer.txt', line_numbers) print(result_text) return {"result_text": result_text} except Exception as e: # If there's an error, return a 500 error with the error's details raise HTTPException(status_code=500, detail=str(e)) # ------- CNN # Constants TARGET_DURATION = 3 # seconds for each audio clip SAMPLE_RATE = 44100 # sample rate to use N_MELS = 128 # number of Mel bands to generate HOP_LENGTH = 512 # number of samples between successive frames def preprocess_audio(file_path): try: # Load the audio file audio, sr = librosa.load(file_path, sr=SAMPLE_RATE) audio_length = len(audio)/SAMPLE_RATE except FileNotFoundError: print(f"Error: File '{file_path}' not found.") return None except Exception as e: print(f"Error loading audio file: {e}") return None # Check if audio signal is None if audio is None: print(f"Error: Audio signal is None for file '{file_path}'.") return None audio, _ = librosa.effects.trim(audio, top_db = 25) audio = nr.reduce_noise(y = audio, sr=SAMPLE_RATE, thresh_n_mult_nonstationary=1,stationary=False) # Determine how many 20-second clips can be made from the audio if audio_length < TARGET_DURATION: # If audio is shorter than 20 seconds, pad it pad_length = int((TARGET_DURATION - audio_length) * sr) padded_audio = np.pad(audio, (0, pad_length), mode='constant') return [padded_audio] # Return as a list for consistent output format else: # If audio is longer than or equal to 20 seconds, split it into 20-second clips clip_length = TARGET_DURATION * sr clips = [] for start in range(0, len(audio), clip_length): end = start + clip_length # Ensure the last clip has enough samples if end > len(audio): # Here you can choose to pad the last clip or simply not use it if it's too short last_clip = np.pad(audio[start:], (0, end - len(audio)), mode='constant') clips.append(last_clip) else: clips.append(audio[start:end]) return clips def generate_spectrogram(audio): # Generate a Mel-scaled spectrogram S = librosa.feature.melspectrogram(y=audio, sr=SAMPLE_RATE, n_mels=N_MELS, hop_length=HOP_LENGTH) S_dB = librosa.power_to_db(S, ref=np.max) # Normalize the spectrogram to be between 0 and 1 S_dB_norm = librosa.util.normalize(S_dB) return S_dB_norm cnn_model = load_model('app/cnn.h5') cnn_label_encoder = joblib.load('app/cnn_label_encoder.pkl') @app.post("/cnn") async def handle_cnn(file: UploadFile = File(...)): try: # Ensure that we are handling an MP3 file if file.content_type == "audio/mpeg" or file.content_type == "audio/mp3": file_extension = ".mp3" elif file.content_type == "audio/wav": file_extension = ".wav" else: raise HTTPException(status_code=400, detail="Invalid file type. Supported types: MP3, WAV.") # Read the file's content contents = await file.read() temp_filename = f"app/{uuid4().hex}{file_extension}" # Save file to a temporary file if needed or process directly from memory with open(temp_filename, "wb") as f: f.write(contents) spectrograms = [] clips = preprocess_audio(temp_filename) for clip in clips: spectrogram = generate_spectrogram(clip) if np.isnan(spectrogram).any() or np.isinf(spectrogram).any(): print("Invalid spectrogram detected") continue spectrograms.append(spectrogram) X = np.array(spectrograms) X = X[..., np.newaxis] # Make predictions predictions = cnn_model.predict(X) # Convert predictions to label indexes predicted_label_indexes = np.argmax(predictions, axis=1) # Convert label indexes to actual label names predicted_labels = cnn_label_encoder.inverse_transform(predicted_label_indexes) print('decoded', predicted_labels) # .tolist() # Clean up the temporary file os.remove(temp_filename) # Return a successful response with decoded predictions return {"message": "File processed successfully", "sheikh": predicted_labels} except Exception as e: print(e) # Handle possible exceptions raise HTTPException(status_code=500, detail=str(e)) # random forest model = joblib.load('app/1713661391.0946255_trained_model.joblib') pca = joblib.load('app/pca.pkl') scaler = joblib.load('app/1713661464.8205004_scaler.joblib') label_encoder = joblib.load('app/1713661470.6730225_label_encoder.joblib') def preprocess_audio(audio_data, rate): audio_data = nr.reduce_noise(y=audio_data, sr=rate) # remove silence # intervals = librosa.effects.split(audio_data, top_db=20) # # Concatenate non-silent intervals # audio_data = np.concatenate([audio_data[start:end] for start, end in intervals]) audio_data = librosa.util.normalize(audio_data) audio_data, _ = librosa.effects.trim(audio_data) audio_data = librosa.resample(audio_data, orig_sr=rate, target_sr=default_sample_rate) rate = default_sample_rate # y_trimmed, _ = librosa.effects.trim(y_no_gaps, top_db = 20) # D = librosa.stft(y) # S_db = librosa.amplitude_to_db(np.abs(D), ref=np.max) # S = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=128*2,) # S_db_mel = librosa.amplitude_to_db(np.abs(S), ref=np.max) # Apply noise reduction (example using spectral subtraction) # y_denoised = librosa.effects.preemphasis(y_trimmed) # # Apply dynamic range compression # y_compressed = librosa.effects.preemphasis(y_denoised) # # Augmentation (example of time stretching) # # y_stretched = librosa.effects.time_stretch(y_compressed, rate=1.2) # # Silence Removal # y_silence_removed, _ = librosa.effects.trim(y_compressed) # # Equalization (example: apply high-pass filter) # y_equalized = librosa.effects.preemphasis(y_silence_removed) # # Define target sample rate # target_sr = sr # # Data Augmentation (example: pitch shifting) # y_pitch_shifted = librosa.effects.pitch_shift(y_normalized, sr=target_sr, n_steps=2) # Split audio into non-silent intervals # Normalize the audio signal # y_normalized = librosa.util.normalize(y_equalized) # Feature Extraction (example: MFCCs) # mfccs = librosa.feature.mfcc(y=y_normalized, sr=target_sr, n_mfcc=20) # output_file_path = os.path.join(save_dir, f"{file_name_without_extension}.{extension}") # Write the audio data to the output file in .wav format # sf.write(path, y_normalized, target_sr) return audio_data, rate # smile = opensmile.Smile( # feature_set=opensmile.FeatureSet.ComParE_2016, # feature_level=opensmile.FeatureLevel.Functionals, # ) # def extract_features(file_path): # # # Load the audio file # # y, sr = librosa.load(file_path, sr=None, dtype=np.float32) # # # Extract MFCCs # # mfccs = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=20) # # mfccs_mean = pd.Series(mfccs.mean(axis=1), index=[f'mfcc_{i}' for i in range(mfccs.shape[0])]) # # # Extract Spectral Features # # spectral_centroids = pd.Series(np.mean(librosa.feature.spectral_centroid(y=y, sr=sr)), index=['spectral_centroid']) # # spectral_rolloff = pd.Series(np.mean(librosa.feature.spectral_rolloff(y=y, sr=sr)), index=['spectral_rolloff']) # # spectral_flux = pd.Series(np.mean(librosa.onset.onset_strength(y=y, sr=sr)), index=['spectral_flux']) # # spectral_contrast = pd.Series(np.mean(librosa.feature.spectral_contrast(S=np.abs(librosa.stft(y)), sr=sr), axis=1), index=[f'spectral_contrast_{i}' for i in range(librosa.feature.spectral_contrast(S=np.abs(librosa.stft(y)), sr=sr).shape[0])]) # # # Extract Pitch # # pitches, magnitudes = librosa.piptrack(y=y, sr=sr) # # pitch_mean = pd.Series(np.mean(pitches[pitches != 0]), index=['pitch_mean']) # Average only non-zero values # # # Extract Zero Crossings # # zero_crossings = pd.Series(np.mean(librosa.feature.zero_crossing_rate(y)), index=['zero_crossings']) # # # Combine all features into a single Series # # features = pd.concat([mfccs_mean, spectral_centroids, spectral_rolloff, spectral_flux, spectral_contrast, pitch_mean, zero_crossings]) # features = smile.process_file(file_path) # features_reshaped = features.squeeze() # # Ensure it's now a 2D structure suitable for DataFrame # print("New shape of features:", features_reshaped.shape) # all_data = pd.DataFrame([features_reshaped]) # return all_data def repair_mp3_with_ffmpeg_python(input_path, output_path): """Attempt to repair an MP3 file using FFmpeg.""" try: # Define the audio stream with the necessary conversion parameters audio = ( ffmpeg .input(input_path, nostdin=None, y=None) .output(output_path, vn=None, acodec='libmp3lame', ar='44100', ac='1', b='192k', af='aresample=44100') .global_args('-nostdin', '-y') # Applying global arguments .overwrite_output() ) # Execute the FFmpeg command ffmpeg.run(audio) print(f"File repaired and saved as {output_path}") except ffmpeg.Error as e: print(f"Failed to repair file {input_path}: {str(e.stderr)}") @app.post("/rf") async def handle_audio(file: UploadFile = File(...)): try: # Ensure that we are handling an MP3 file if file.content_type == "audio/mpeg" or file.content_type == "audio/mp3": file_extension = ".mp3" elif file.content_type == "audio/wav": file_extension = ".wav" else: raise HTTPException(status_code=400, detail="Invalid file type. Supported types: MP3, WAV.") # Read the file's content contents = await file.read() temp_filename = f"app/{uuid4().hex}{file_extension}" # Save file to a temporary file if needed or process directly from memory with open(temp_filename, "wb") as f: f.write(contents) audio_data, sr = load(temp_filename, skip_seconds=5) print("finished loading ", temp_filename) # Preprocess data audio_data, sr = preprocess_audio(audio_data, sr) print("finished processing ", temp_filename) # Extract features features = extract_features(audio_data, sr) # preprocess_audio(temp_filename, 'app') # repair_mp3_with_ffmpeg_python(temp_filename, temp_filename) # # Here you would add the feature extraction logic # features = extract_features(temp_filename) # print("Extracted Features:", features) # features = pca.transform(features) # features = np.array(features).reshape(1, -1) features = features.reshape(1, -1) features = scaler.transform(features) # proceed with an inference results = model.predict(features) # decoded_predictions = [label_encoder.classes_[i] for i in results] # # Decode the predictions using the label encoder decoded_predictions = label_encoder.inverse_transform(results) print('decoded', decoded_predictions[0]) # .tolist() # Clean up the temporary file os.remove(temp_filename) print({"message": "File processed successfully", "sheikh": decoded_predictions[0]}) # Return a successful response with decoded predictions return {"message": "File processed successfully", "sheikh": decoded_predictions[0]} except Exception as e: print(e) # Handle possible exceptions raise HTTPException(status_code=500, detail=str(e)) # if __name__ == "__main__": # uvicorn.run("main:app", host="0.0.0.0", port=8080, reload=False)