Spaces:
Sleeping
Sleeping
File size: 3,421 Bytes
d251719 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 |
from transformers import AutoFeatureExtractor, WhisperForAudioClassification
import torch
# import librosa
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
# device = 'cpu'
print('Run on:', device)
SAMPLEING_RATE = 16000
MAX_LENGTH = SAMPLEING_RATE * 10 # 10 seconds
fluency_model_name = "seba3y/whisper-tiny-fluency" #future use
acc_model_name = 'seba3y/whisper-tiny-accuracy'
fluency_feature = AutoFeatureExtractor.from_pretrained(fluency_model_name)
fluency_model = WhisperForAudioClassification.from_pretrained(fluency_model_name).to(device)
acc_feature = AutoFeatureExtractor.from_pretrained(acc_model_name)
acc_model = WhisperForAudioClassification.from_pretrained(acc_model_name).to(device)
def load_audio_from_path(audio, feature_extractor, max_length=MAX_LENGTH):
# audio, _ = librosa.load(file_path, sr=SAMPLEING_RATE)
_, audio = audio
audio_length = len(audio)
# Splitting the audio if it's longer than max_length
segments = []
for start in range(0, audio_length, max_length):
end = min(start + max_length, audio_length)
segment = audio[start:end]
inputs = feature_extractor(segment, sampling_rate=SAMPLEING_RATE, return_tensors="pt", max_length=max_length, padding="max_length", ).input_features
segments.append(inputs)
return segments
@torch.no_grad()
def model_generate(inputs, model):
logits = model(inputs.to(device))[0]
return logits
def postprocess(logits, model, noise=1):
logits = noise * (logits.cpu() + 0.9)
scores = logits.softmax(-1)[0]
print(scores)
ids = torch.argmax(scores, dim=-1).item()
scores = scores.tolist()
labels = model.config.id2label[ids]
return labels, round(scores[ids], 2)
def predict(segments, model, noise):
all_logits = []
for segment in segments:
logits = model_generate(segment, model)
all_logits.append(logits)
# Aggregating the results (simple average)
avg_logits = torch.mean(torch.stack(all_logits), dim=0)
return postprocess(avg_logits, model, noise)
def prdict_accuracy(file_path):
Anoise = torch.tensor([100.618, .0118, 10.945, 30.419])
result = predict(file_path, acc_model, Anoise)
return result
def predict_fluency(file_path):
Fnoise = torch.tensor([5.618, 4.518, 2.145, 0.219])
result = predict(file_path, fluency_model, Fnoise)
return result
def predict_all(file_path):
Anoise = torch.tensor([5.618, 1.518, 10.945, 100.419])
Fnoise = torch.tensor([3.618, 5.518, 3.045, 0.49])
segments = load_audio_from_path(file_path, acc_feature)
acc = predict(segments, acc_model, Anoise)
fle = predict(segments, fluency_model, Fnoise)
return acc, fle
if __name__ == '__main__':
file_path = r'uploads\audio.wav'
print('start')
result = predict_fluency(file_path)
print('done')
# print('Fluency of the speech:')
# print("="*25)
# print(result)
# # for key, value in result.items():
# # print('Prediction:', key, "\nConfidinse:", round(value, 2) * 100, '%')
# # print()
# # print("="*25)
# # print()
# print('Pronunciation Accuracy of the speech:')
# print("="*25)
# result = prdict_accuracy(file_path)
# print(result)
# for key, value in result.items():
# print('Prediction:', key, "\nConfidinse:", round(value, 2) * 100, '%')
# print()
# print('='*25) |