File size: 3,271 Bytes
52ca3b2
 
 
 
 
 
353099d
576a0b9
52ca3b2
 
576a0b9
52ca3b2
 
 
 
 
 
 
 
576a0b9
 
 
 
 
 
52ca3b2
 
 
 
 
 
 
 
 
 
 
 
576a0b9
 
fd839c5
52ca3b2
f9af6c0
fd839c5
f9af6c0
fd839c5
52ca3b2
 
 
 
 
 
 
 
 
 
 
576a0b9
52ca3b2
 
 
 
 
576a0b9
 
 
52ca3b2
 
 
 
 
fd839c5
a852277
52ca3b2
 
 
 
 
576a0b9
52ca3b2
fd839c5
52ca3b2
dc7c88e
9ee742b
a852277
52ca3b2
 
 
 
 
576a0b9
 
 
 
52ca3b2
 
dc7c88e
9ee742b
52ca3b2
9ee742b
52ca3b2
 
 
 
 
 
 
576a0b9
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
import torch

import gradio as gr
import pytube as pt
from transformers import pipeline

MODEL_NAME = "openai/whisper-large-v2"
BATCH_SIZE = 8

device = 0 if torch.cuda.is_available() else "cpu"

pipe = pipeline(
    task="automatic-speech-recognition",
    model=MODEL_NAME,
    chunk_length_s=30,
    device=device,
)


all_special_ids = pipe.tokenizer.all_special_ids
transcribe_token_id = all_special_ids[-5]
translate_token_id = all_special_ids[-6]


def transcribe(microphone, file_upload, task):
    warn_output = ""
    if (microphone is not None) and (file_upload is not None):
        warn_output = (
            "WARNING: You've uploaded an audio file and used the microphone. "
            "The recorded file from the microphone will be used and the uploaded audio will be discarded.\n"
        )

    elif (microphone is None) and (file_upload is None):
        return "ERROR: You have to either use the microphone or upload an audio file"

    file = microphone if microphone is not None else file_upload

    pipe.model.config.forced_decoder_ids = [[2, transcribe_token_id if task=="transcribe" else translate_token_id]]

    textt = pipe(file, batch_size=BATCH_SIZE)["text"]

    with open('outt.txt', 'a+') as sw:
        sw.writelines(textt)

    return [textt,"outt.txt"]


def _return_yt_html_embed(yt_url):
    video_id = yt_url.split("?v=")[-1]
    HTML_str = (
        f'<center> <iframe width="500" height="320" src="https://www.youtube.com/embed/{video_id}"> </iframe>'
        " </center>"
    )
    return HTML_str


def yt_transcribe(yt_url, task):
    yt = pt.YouTube(yt_url)
    html_embed_str = _return_yt_html_embed(yt_url)
    stream = yt.streams.filter(only_audio=True)[0]
    stream.download(filename="audio.mp3")

    pipe.model.config.forced_decoder_ids = [[2, transcribe_token_id if task=="transcribe" else translate_token_id]]

    text = pipe("audio.mp3", batch_size=BATCH_SIZE)["text"]

    return html_embed_str, text


demo = gr.Blocks()
output_2 = gr.File(label="Download")
description = """This application displays transcribed text for given audio input <img src="https://i.ibb.co/J5DscKw/GVP-Womens.jpg" width=200px>"""
mf_transcribe = gr.Interface(
    fn=transcribe,
    inputs=[
        gr.inputs.Audio(source="microphone", type="filepath", optional=True),
        gr.inputs.Audio(source="upload", type="filepath", optional=True),
        gr.inputs.Radio(["transcribe", "translate"], label="Task", default="transcribe"),
    ],
    outputs=["text",output_2],
    layout="horizontal",
    theme="huggingface",
    title="Speech to Text",
    description= description,
    allow_flagging="never",
)

yt_transcribe = gr.Interface(
    fn=yt_transcribe,
    inputs=[
        gr.inputs.Textbox(lines=1, placeholder="Paste the URL to a YouTube video here", label="YouTube URL"),
        gr.inputs.Radio(["transcribe", "translate"], label="Task", default="transcribe")
    ],
    outputs=["html", "text"],
    layout="horizontal",
    theme="huggingface",
    title="Speech to Text ",
    description=(
        "Transcribe YouTube Videos to Text."
    ),
    allow_flagging="never",
)

with demo:
    gr.TabbedInterface([mf_transcribe, yt_transcribe], ["Transcribe Audio", "Transcribe YouTube"])

demo.launch(enable_queue=True)