File size: 6,722 Bytes
bb285c0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
aeac508
 
 
bb285c0
 
 
 
aeac508
a9a4e1f
 
 
aeac508
 
 
 
bb285c0
b9a73f5
aeac508
 
 
 
 
 
 
 
 
 
 
 
bb285c0
aeac508
 
 
 
 
 
 
 
 
bb285c0
 
 
aeac508
 
 
 
 
 
 
eaab174
d6f3644
 
 
 
 
aeac508
bb285c0
aeac508
 
 
 
 
 
 
 
 
 
 
bb285c0
 
 
aeac508
 
bb285c0
 
aeac508
bb285c0
aeac508
bb285c0
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
# import warnings
# warnings.filterwarnings("ignore")

# import os         # for handling of directories.
# import re         # regular expression.
# import pywt       # pywavelet for wavelet transformation. used in denoising.
# import librosa    # used for audio processing.
# import webrtcvad
# import torchaudio       # python library for audio processing.
# import numpy as np      # for mathematical calculations
# import gradio as gr     # for inferencing.
# import scipy.signal     # used for signal filtering. used in denoising audio signals.
# import soundfile as sf  # used for reading and processing audio files.
# from scipy.io.wavfile import write    # used for saving audio files in wav format.
# from transformers import pipeline     # used for inferencing.
# from transformers import AutoProcessor     #used for processing of input data.
# from pyctcdecode import build_ctcdecoder
# from transformers import Wav2Vec2ProcessorWithLM
# from scipy.signal import butter, lfilter, wiener
# from text2int import text_to_int
# from isNumber import is_number
# from Text2List import text_to_list
# from convert2list import convert_to_list
# from processDoubles import process_doubles
# from replaceWords import replace_words
# # from applyVad import apply_vad
# # from wienerFilter import wiener_filter
# # from highPassFilter import high_pass_filter
# # from waveletDenoise import wavelet_denoise

# asr_model = pipeline("automatic-speech-recognition", model="cdactvm/w2v-bert-punjabi")


# # Function to apply a high-pass filter
# def high_pass_filter(audio, sr, cutoff=300):
#     nyquist = 0.5 * sr
#     normal_cutoff = cutoff / nyquist
#     b, a = butter(1, normal_cutoff, btype='high', analog=False)
#     filtered_audio = lfilter(b, a, audio)
#     return filtered_audio

# # Function to apply wavelet denoising
# def wavelet_denoise(audio, wavelet='db1', level=1):
#     coeffs = pywt.wavedec(audio, wavelet, mode='per')
#     sigma = np.median(np.abs(coeffs[-level])) / 0.5
#     uthresh = sigma * np.sqrt(2 * np.log(len(audio)))
#     coeffs[1:] = [pywt.threshold(i, value=uthresh, mode='soft') for i in coeffs[1:]]
#     return pywt.waverec(coeffs, wavelet, mode='per')

# # Function to apply a Wiener filter for noise reduction
# def apply_wiener_filter(audio):
#     return wiener(audio)
    
    
# # # Function to handle speech recognition
# def recognize_speech(audio_file):
#     audio, sr = librosa.load(audio_file, sr=16000)
#     audio = high_pass_filter(audio, sr)
#     audio = apply_wiener_filter(audio)
#     denoised_audio = wavelet_denoise(audio)
#     result = asr_model(denoised_audio)
#     text_value = result['text']
#     cleaned_text = text_value.replace("[PAD]", "")
#     converted_to_list = convert_to_list(cleaned_text, text_to_list())
#     processed_doubles = process_doubles(converted_to_list)
#     replaced_words = replace_words(processed_doubles)
#     converted_text = text_to_int(replaced_words)
#     return converted_text

# def sel_lng(lng, mic=None, file=None):
#     if mic is not None:
#         audio = mic
#     elif file is not None:
#         audio = file
#     else:
#         return "You must either provide a mic recording or a file"
    
#     if lng == "model_1":
#         return recognize_speech(audio)
        
# demo=gr.Interface(
#     fn=sel_lng, 
      
#     inputs=[
#         gr.Dropdown([
#             "model_1"],label="Select Model"),
#         gr.Audio(sources=["microphone","upload"], type="filepath"),
#     ],
#     outputs=[
#         "textbox"
#     ],
#     title="Automatic Speech Recognition",
#     description = "Demo for Automatic Speech Recognition. Use microphone to record speech. Please press Record button. Initially it will take some time to load the model. The recognized text will appear in the output textbox",
#       ).launch()


import warnings
warnings.filterwarnings("ignore")

import librosa    # used for audio processing.
import numpy as np      # for mathematical calculations
import gradio as gr     # for inferencing.
from transformers import pipeline     # used for inferencing.
from scipy.signal import butter, lfilter, wiener
from text2int import text_to_int
from isNumber import is_number
from Text2List import text_to_list
from convert2list import convert_to_list
from processDoubles import process_doubles
from replaceWords import replace_words

# Initialize ASR model pipeline
asr_model = pipeline("automatic-speech-recognition", model="cdactvm/w2v-bert-punjabi")


# Function to apply a high-pass filter
def high_pass_filter(audio, sr, cutoff=300):
    nyquist = 0.5 * sr
    normal_cutoff = cutoff / nyquist
    b, a = butter(1, normal_cutoff, btype='high', analog=False)
    filtered_audio = lfilter(b, a, audio)
    return filtered_audio

# Function to apply wavelet denoising
def wavelet_denoise(audio, wavelet='db1', level=1):
    import pywt
    coeffs = pywt.wavedec(audio, wavelet, mode='per')
    sigma = np.median(np.abs(coeffs[-level])) / 0.5
    uthresh = sigma * np.sqrt(2 * np.log(len(audio)))
    coeffs[1:] = [pywt.threshold(i, value=uthresh, mode='soft') for i in coeffs[1:]]
    return pywt.waverec(coeffs, wavelet, mode='per')

# Function to apply a Wiener filter for noise reduction
def apply_wiener_filter(audio):
    return wiener(audio)


# Function to handle speech recognition
def recognize_speech(audio_file):
    audio, sr = librosa.load(audio_file, sr=16000)
    audio = high_pass_filter(audio, sr)
    audio = apply_wiener_filter(audio)
    denoised_audio = wavelet_denoise(audio)
    result = asr_model(denoised_audio)
    text_value = result['text']
    cleaned_text = text_value.replace("[PAD]", "")
    converted_to_list = convert_to_list(cleaned_text, text_to_list())
    processed_doubles = process_doubles(converted_to_list)
    replaced_words = replace_words(processed_doubles)
    converted_text = text_to_int(replaced_words)
    return converted_text


def sel_lng(lng, mic=None, file=None):
    if mic is not None:
        audio = mic
    elif file is not None:
        audio = file
    else:
        return "You must either provide a mic recording or a file"
    
    if lng == "model_1":
        return recognize_speech(audio)
        

# Create a Gradio interface
demo = gr.Interface(
    fn=sel_lng, 
    inputs=[
        gr.Dropdown(["model_1"], label="Select Model"),
        gr.Audio(sources=["microphone", "upload"], type="filepath"),
    ],
    outputs=["textbox"],
    title="Automatic Speech Recognition",
    description="Demo for Automatic Speech Recognition. Use microphone to record speech. Please press Record button. Initially, it will take some time to load the model. The recognized text will appear in the output textbox"
)

demo.launch()