RathodHarish's picture
Update app.py
ec8bfb9 verified
raw
history blame
5.42 kB
import gradio as gr
import librosa
import numpy as np
import torch
from transformers import Wav2Vec2Processor, Wav2Vec2Model
from simple_salesforce import Salesforce
import os
import hashlib
from datetime import datetime
# Salesforce credentials (store securely in environment variables)
SF_USERNAME = os.getenv("SF_USERNAME", "your_salesforce_username")
SF_PASSWORD = os.getenv("SF_PASSWORD", "your_salesforce_password")
SF_SECURITY_TOKEN = os.getenv("SF_SECURITY_TOKEN", "your_salesforce_security_token")
SF_INSTANCE_URL = os.getenv("SF_INSTANCE_URL", "https://your-salesforce-instance.salesforce.com")
# Initialize Salesforce connection
try:
sf = Salesforce(
username=SF_USERNAME,
password=SF_PASSWORD,
security_token=SF_SECURITY_TOKEN,
instance_url=SF_INSTANCE_URL
)
except Exception as e:
print(f"Failed to connect to Salesforce: {str(e)}")
sf = None
# Load Wav2Vec2 model for speech feature extraction
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h")
model = Wav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h")
def compute_file_hash(file_path):
"""Compute MD5 hash of a file to check uniqueness."""
hash_md5 = hashlib.md5()
with open(file_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def analyze_voice(audio_file):
"""Analyze voice for health indicators."""
try:
# Log audio file info
file_hash = compute_file_hash(audio_file)
print(f"Processing audio file: {audio_file}, Hash: {file_hash}")
# Load audio file
audio, sr = librosa.load(audio_file, sr=16000)
audio = audio / (np.max(np.abs(audio)) + 1e-10) # Normalize audio
print(f"Audio shape: {audio.shape}, Sampling rate: {sr}, Duration: {len(audio)/sr:.2f}s, Mean: {np.mean(audio):.4f}, Std: {np.std(audio):.4f}")
# Process audio for Wav2Vec2
inputs = processor(audio, sampling_rate=16000, return_tensors="pt", padding=True)
print(f"Input tensor shape: {inputs['input_values'].shape}, Sample values: {inputs['input_values'][0][:5]}")
with torch.no_grad():
outputs = model(**inputs)
# Extract features
features = outputs.last_hidden_state.numpy() # Use full hidden states
print(f"Features shape: {features.shape}, Sample values: {features[0, 0, :5]}")
# Compute scores
respiratory_score = np.mean(features, axis=(1, 2))
mental_health_score = np.std(features, axis=(1, 2))
# Log scores
print(f"Respiratory Score: {respiratory_score:.4f}, Mental Health Score: {mental_health_score:.4f}")
# Threshold-based feedback
feedback = ""
if respiratory_score > 0.1:
feedback += f"Possible respiratory issue detected (score: {respiratory_score:.4f}); consult a doctor. "
if mental_health_score > 0.2: # Raised from 0.1 to reduce false positives
feedback += f"Possible stress indicators detected (score: {mental_health_score:.4f}); consider professional advice. "
if not feedback:
feedback = "No significant health indicators detected."
feedback += f"\n\n**Debug Info**: Respiratory Score = {respiratory_score:.4f}, Mental Health Score = {mental_health_score:.4f}, File Hash = {file_hash}"
feedback += "\n**Disclaimer**: This is not a diagnostic tool. Consult a healthcare provider for medical advice."
# Store in Salesforce
if sf:
store_in_salesforce(audio_file, feedback, respiratory_score, mental_health_score)
# Clean up temporary audio file
try:
os.remove(audio_file)
print(f"Deleted temporary audio file: {audio_file}")
except Exception as e:
print(f"Failed to delete audio file: {str(e)}")
return feedback
except Exception as e:
return f"Error processing audio: {str(e)}"
def store_in_salesforce(audio_file, feedback, respiratory_score, mental_health_score):
"""Store analysis results in Salesforce."""
try:
sf.HealthAssessment__c.create({
"AssessmentDate__c": datetime.utcnow().isoformat(),
"Feedback__c": feedback,
"RespiratoryScore__c": float(respiratory_score),
"MentalHealthScore__c": float(mental_health_score),
"AudioFileName__c": os.path.basename(audio_file)
})
except Exception as e:
print(f"Failed to store in Salesforce: {str(e)}")
def test_with_sample_audio():
"""Test the app with a sample audio file."""
sample_audio_path = "audio_samples/sample.wav"
if os.path.exists(sample_audio_path):
return analyze_voice(sample_audio_path)
return "Sample audio file not found."
# Gradio interface
iface = gr.Interface(
fn=analyze_voice,
inputs=gr.Audio(type="filepath", label="Record or Upload Voice"),
outputs=gr.Textbox(label="Health Assessment Feedback"),
title="Health Voice Analyzer",
description="Record or upload a voice sample for preliminary health assessment. Supports English, Spanish, Hindi, Mandarin."
)
if __name__ == "__main__":
print(test_with_sample_audio())
iface.launch(server_name="0.0.0.0", server_port=7860)