Spaces:
Sleeping
Sleeping
Commit
Β·
1e1ecd3
1
Parent(s):
f4d629b
- Dockerfile +0 -0
- README.md +7 -7
- app.py +104 -0
- requirements.txt +7 -0
Dockerfile
ADDED
File without changes
|
README.md
CHANGED
@@ -1,10 +1,10 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
-
sdk: docker
|
|
|
|
|
7 |
pinned: false
|
8 |
---
|
9 |
-
|
10 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: 'Stress Analysis API'
|
3 |
+
emoji: 'π'
|
4 |
+
colorFrom: 'blue'
|
5 |
+
colorTo: 'green'
|
6 |
+
sdk: 'docker'
|
7 |
+
sdk_version: '1.0.0'
|
8 |
+
app_file: 'main.py'
|
9 |
pinned: false
|
10 |
---
|
|
|
|
app.py
ADDED
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, File, UploadFile, HTTPException, Form
|
2 |
+
from fastapi.responses import JSONResponse
|
3 |
+
from pydantic import BaseModel
|
4 |
+
import librosa
|
5 |
+
import numpy as np
|
6 |
+
import tempfile
|
7 |
+
import os
|
8 |
+
import warnings
|
9 |
+
|
10 |
+
warnings.filterwarnings("ignore", category=UserWarning, module='librosa')
|
11 |
+
|
12 |
+
app = FastAPI()
|
13 |
+
|
14 |
+
def extract_audio_features(audio_file_path):
|
15 |
+
# Load the audio file and extract features
|
16 |
+
y, sr = librosa.load(audio_file_path, sr=None)
|
17 |
+
f0, voiced_flag, voiced_probs = librosa.pyin(y, fmin=75, fmax=600)
|
18 |
+
f0 = f0[~np.isnan(f0)]
|
19 |
+
energy = librosa.feature.rms(y=y)[0]
|
20 |
+
mfccs = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=13)
|
21 |
+
onset_env = librosa.onset.onset_strength(y=y, sr=sr)
|
22 |
+
tempo, _ = librosa.beat.beat_track(onset_envelope=onset_env, sr=sr)
|
23 |
+
speech_rate = tempo / 60
|
24 |
+
return f0, energy, speech_rate, mfccs, y, sr
|
25 |
+
|
26 |
+
def analyze_voice_stress(audio_file_path):
|
27 |
+
f0, energy, speech_rate, mfccs, y, sr = extract_audio_features(audio_file_path)
|
28 |
+
mean_f0 = np.mean(f0)
|
29 |
+
std_f0 = np.std(f0)
|
30 |
+
mean_energy = np.mean(energy)
|
31 |
+
std_energy = np.std(energy)
|
32 |
+
gender = 'male' if mean_f0 < 165 else 'female'
|
33 |
+
norm_mean_f0 = 110 if gender == 'male' else 220
|
34 |
+
norm_std_f0 = 20
|
35 |
+
norm_mean_energy = 0.02
|
36 |
+
norm_std_energy = 0.005
|
37 |
+
norm_speech_rate = 4.4
|
38 |
+
norm_std_speech_rate = 0.5
|
39 |
+
z_f0 = (mean_f0 - norm_mean_f0) / norm_std_f0
|
40 |
+
z_energy = (mean_energy - norm_mean_energy) / norm_std_energy
|
41 |
+
z_speech_rate = (speech_rate - norm_speech_rate) / norm_std_speech_rate
|
42 |
+
stress_score = (0.4 * z_f0) + (0.4 * z_speech_rate) + (0.2 * z_energy)
|
43 |
+
stress_level = float(1 / (1 + np.exp(-stress_score)) * 100)
|
44 |
+
categories = ["Very Low Stress", "Low Stress", "Moderate Stress", "High Stress", "Very High Stress"]
|
45 |
+
category_idx = min(int(stress_level / 20), 4)
|
46 |
+
stress_category = categories[category_idx]
|
47 |
+
return {"stress_level": stress_level, "category": stress_category, "gender": gender}
|
48 |
+
|
49 |
+
def analyze_text_stress(text: str):
|
50 |
+
stress_keywords = ["anxious", "nervous", "stress", "panic", "tense"]
|
51 |
+
stress_score = sum([1 for word in stress_keywords if word in text.lower()])
|
52 |
+
stress_level = min(stress_score * 20, 100)
|
53 |
+
categories = ["Very Low Stress", "Low Stress", "Moderate Stress", "High Stress", "Very High Stress"]
|
54 |
+
category_idx = min(int(stress_level / 20), 4)
|
55 |
+
stress_category = categories[category_idx]
|
56 |
+
return {"stress_level": stress_level, "category": stress_category}
|
57 |
+
|
58 |
+
class StressResponse(BaseModel):
|
59 |
+
stress_level: float
|
60 |
+
category: str
|
61 |
+
gender: str = None # Optional, only for audio analysis
|
62 |
+
|
63 |
+
@app.post("/analyze-stress/", response_model=StressResponse)
|
64 |
+
async def analyze_stress(
|
65 |
+
file: UploadFile = File(None),
|
66 |
+
file_path: str = Form(None),
|
67 |
+
text: str = Form(None)
|
68 |
+
):
|
69 |
+
if file is None and file_path is None and text is None:
|
70 |
+
raise HTTPException(status_code=400, detail="Either a file, file path, or text input is required.")
|
71 |
+
|
72 |
+
# Handle audio file analysis
|
73 |
+
if file or file_path:
|
74 |
+
if file:
|
75 |
+
if not file.filename.endswith(".opus"):
|
76 |
+
raise HTTPException(status_code=400, detail="Only .opus files are supported.")
|
77 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".opus") as temp_file:
|
78 |
+
temp_file.write(await file.read())
|
79 |
+
temp_file_path = temp_file.name
|
80 |
+
else:
|
81 |
+
if not file_path.endswith(".opus"):
|
82 |
+
raise HTTPException(status_code=400, detail="Only .opus files are supported.")
|
83 |
+
if not os.path.exists(file_path):
|
84 |
+
raise HTTPException(status_code=400, detail="File path does not exist.")
|
85 |
+
temp_file_path = file_path
|
86 |
+
|
87 |
+
try:
|
88 |
+
result = analyze_voice_stress(temp_file_path)
|
89 |
+
return JSONResponse(content=result)
|
90 |
+
except Exception as e:
|
91 |
+
raise HTTPException(status_code=500, detail=str(e))
|
92 |
+
finally:
|
93 |
+
if file:
|
94 |
+
os.remove(temp_file_path)
|
95 |
+
|
96 |
+
# Handle text analysis
|
97 |
+
elif text:
|
98 |
+
result = analyze_text_stress(text)
|
99 |
+
return JSONResponse(content=result)
|
100 |
+
|
101 |
+
if __name__ == "__main__":
|
102 |
+
import uvicorn
|
103 |
+
port = int(os.getenv("PORT", 7860)) # Use the PORT environment variable for Render compatibility
|
104 |
+
uvicorn.run("main:app", host="0.0.0.0", port=port, reload=True)
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi
|
2 |
+
uvicorn
|
3 |
+
pydantic
|
4 |
+
librosa
|
5 |
+
numpy
|
6 |
+
matplotlib
|
7 |
+
python-multipart
|