seba3y commited on
Commit
d251719
·
1 Parent(s): 4609f8e

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +42 -0
  2. audio.py +105 -0
  3. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, request, jsonify, render_template
2
+ import os
3
+ from audio import predict_all
4
+ from flask_cors import CORS
5
+
6
+
7
+
8
+ app = Flask(__name__)
9
+ CORS(app)
10
+ @app.route('/')
11
+ def index():
12
+ return render_template('index.html')
13
+
14
+ @app.route('/upload-audio', methods=['POST'])
15
+ def upload_audio():
16
+ if 'audio' not in request.files:
17
+ return "No audio part", 400
18
+
19
+ file = request.files['audio']
20
+ if file.filename == '':
21
+ return "No selected file", 400
22
+
23
+ if file:
24
+ # You can add file saving logic here
25
+ filename = f'uploads/{file.filename}'
26
+ file.save(filename)
27
+
28
+ # Convert the NumPy array to a list
29
+ # Mock processing and response
30
+ accuracy, fluency = predict_all(filename)
31
+ # Replace this with your actual processing logic
32
+ response = {
33
+ "Accuracy": [accuracy[0], accuracy[1]],
34
+ "Fluency": [fluency[0], fluency[1]]
35
+ }
36
+ return jsonify(response), 200
37
+
38
+ return "Error processing request", 400
39
+
40
+ if __name__ == '__main__':
41
+ os.makedirs('uploads', exist_ok=True) # Create a directory for uploads
42
+ app.run(debug=False)
audio.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoFeatureExtractor, WhisperForAudioClassification
2
+ import torch
3
+ # import librosa
4
+
5
+
6
+
7
+ device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
8
+ # device = 'cpu'
9
+ print('Run on:', device)
10
+
11
+ SAMPLEING_RATE = 16000
12
+ MAX_LENGTH = SAMPLEING_RATE * 10 # 10 seconds
13
+
14
+
15
+ fluency_model_name = "seba3y/whisper-tiny-fluency" #future use
16
+ acc_model_name = 'seba3y/whisper-tiny-accuracy'
17
+
18
+ fluency_feature = AutoFeatureExtractor.from_pretrained(fluency_model_name)
19
+ fluency_model = WhisperForAudioClassification.from_pretrained(fluency_model_name).to(device)
20
+ acc_feature = AutoFeatureExtractor.from_pretrained(acc_model_name)
21
+ acc_model = WhisperForAudioClassification.from_pretrained(acc_model_name).to(device)
22
+
23
+
24
+ def load_audio_from_path(audio, feature_extractor, max_length=MAX_LENGTH):
25
+ # audio, _ = librosa.load(file_path, sr=SAMPLEING_RATE)
26
+ _, audio = audio
27
+ audio_length = len(audio)
28
+ # Splitting the audio if it's longer than max_length
29
+ segments = []
30
+ for start in range(0, audio_length, max_length):
31
+ end = min(start + max_length, audio_length)
32
+ segment = audio[start:end]
33
+ inputs = feature_extractor(segment, sampling_rate=SAMPLEING_RATE, return_tensors="pt", max_length=max_length, padding="max_length", ).input_features
34
+ segments.append(inputs)
35
+
36
+ return segments
37
+
38
+
39
+ @torch.no_grad()
40
+ def model_generate(inputs, model):
41
+ logits = model(inputs.to(device))[0]
42
+ return logits
43
+
44
+
45
+ def postprocess(logits, model, noise=1):
46
+ logits = noise * (logits.cpu() + 0.9)
47
+ scores = logits.softmax(-1)[0]
48
+ print(scores)
49
+ ids = torch.argmax(scores, dim=-1).item()
50
+ scores = scores.tolist()
51
+ labels = model.config.id2label[ids]
52
+ return labels, round(scores[ids], 2)
53
+
54
+ def predict(segments, model, noise):
55
+
56
+
57
+ all_logits = []
58
+
59
+ for segment in segments:
60
+ logits = model_generate(segment, model)
61
+ all_logits.append(logits)
62
+
63
+ # Aggregating the results (simple average)
64
+ avg_logits = torch.mean(torch.stack(all_logits), dim=0)
65
+ return postprocess(avg_logits, model, noise)
66
+
67
+ def prdict_accuracy(file_path):
68
+ Anoise = torch.tensor([100.618, .0118, 10.945, 30.419])
69
+ result = predict(file_path, acc_model, Anoise)
70
+ return result
71
+
72
+ def predict_fluency(file_path):
73
+ Fnoise = torch.tensor([5.618, 4.518, 2.145, 0.219])
74
+ result = predict(file_path, fluency_model, Fnoise)
75
+ return result
76
+
77
+ def predict_all(file_path):
78
+ Anoise = torch.tensor([5.618, 1.518, 10.945, 100.419])
79
+ Fnoise = torch.tensor([3.618, 5.518, 3.045, 0.49])
80
+ segments = load_audio_from_path(file_path, acc_feature)
81
+ acc = predict(segments, acc_model, Anoise)
82
+ fle = predict(segments, fluency_model, Fnoise)
83
+ return acc, fle
84
+
85
+ if __name__ == '__main__':
86
+ file_path = r'uploads\audio.wav'
87
+ print('start')
88
+ result = predict_fluency(file_path)
89
+ print('done')
90
+ # print('Fluency of the speech:')
91
+ # print("="*25)
92
+ # print(result)
93
+ # # for key, value in result.items():
94
+ # # print('Prediction:', key, "\nConfidinse:", round(value, 2) * 100, '%')
95
+ # # print()
96
+ # # print("="*25)
97
+ # # print()
98
+ # print('Pronunciation Accuracy of the speech:')
99
+ # print("="*25)
100
+ # result = prdict_accuracy(file_path)
101
+ # print(result)
102
+ # for key, value in result.items():
103
+ # print('Prediction:', key, "\nConfidinse:", round(value, 2) * 100, '%')
104
+ # print()
105
+ # print('='*25)
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ torch
2
+ transformers
3
+