File size: 3,727 Bytes
3b6b22e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6b31279
 
 
 
3b6b22e
6b31279
 
 
 
 
 
 
 
 
 
 
 
 
 
3b6b22e
 
 
 
6b31279
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3b6b22e
 
 
 
 
 
 
 
 
 
 
 
6b31279
500c811
6b31279
3b6b22e
 
 
 
 
 
 
 
 
500c811
6b31279
3b6b22e
 
6b31279
3b6b22e
 
6b31279
3b6b22e
 
 
 
6b31279
3b6b22e
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
#!/usr/bin/env python3
#
# Copyright      2022  Xiaomi Corp.        (authors: Fangjun Kuang)
#
# See LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import time
from datetime import datetime

import gradio as gr
import torchaudio

from model import get_gigaspeech_pre_trained_model, sample_rate

models = {"english": get_gigaspeech_pre_trained_model()}


def convert_to_wav(in_filename: str) -> str:
    """Convert the input audio file to a wave file"""
    out_filename = in_filename + ".wav"
    print(f"Converting '{in_filename}' to '{out_filename}'")
    _ = os.system(f"ffmpeg -hide_banner -i '{in_filename}' '{out_filename}'")
    return out_filename


demo = gr.Blocks()


def process(in_filename: str) -> str:
    print("in_filename", in_filename)
    filename = convert_to_wav(in_filename)

    now = datetime.now()
    date_time = now.strftime("%Y-%m-%d %H:%M:%S.%f")
    print(f"Started at {date_time}")

    start = time.time()
    wave, wave_sample_rate = torchaudio.load(filename)

    if wave_sample_rate != sample_rate:
        print(
            f"Expected sample rate: {sample_rate}. Given: {wave_sample_rate}. "
            f"Resampling to {sample_rate}."
        )

        wave = torchaudio.functional.resample(
            wave,
            orig_freq=wave_sample_rate,
            new_freq=sample_rate,
        )
    wave = wave[0]  # use only the first channel.

    hyp = models["english"].decode_waves([wave])[0]

    date_time = now.strftime("%Y-%m-%d %H:%M:%S.%f")
    end = time.time()

    duration = wave.shape[0] / sample_rate
    rtf = (end - start) / duration

    print(f"Finished at {date_time} s. Elapsed: {end - start: .3f} s")
    print(f"Duration {duration: .3f} s")
    print(f"RTF {rtf: .3f}")
    print("hyp")
    print(hyp)

    return hyp


with demo:
    gr.Markdown("Upload audio from disk or record from microphone for recognition")
    with gr.Tabs():
        with gr.TabItem("Upload from disk"):
            uploaded_file = gr.inputs.Audio(
                source="upload",  # Choose between "microphone", "upload"
                type="filepath",
                optional=False,
                label="Upload from disk",
            )
            upload_button = gr.Button("Submit for recognition")
            uploaded_output = gr.outputs.Textbox(
                label="Recognized speech from uploaded file"
            )

        with gr.TabItem("Record from microphone"):
            microphone = gr.inputs.Audio(
                source="microphone",  # Choose between "microphone", "upload"
                type="filepath",
                optional=False,
                label="Record from microphone",
            )
            recorded_output = gr.outputs.Textbox(
                label="Recognized speech from recordings"
            )

            record_button = gr.Button("Submit for recordings")

        upload_button.click(
            process,
            inputs=uploaded_file,
            outputs=uploaded_output,
        )
        record_button.click(
            process,
            inputs=microphone,
            outputs=recorded_output,
        )

if __name__ == "__main__":
    demo.launch()