File size: 3,466 Bytes
6c226f9
 
8e787d3
6c226f9
d790c0b
 
88183ad
f08ecef
6c226f9
f08ecef
a5bfe25
9d6fa91
f08ecef
6c226f9
 
 
 
 
 
 
 
 
f08ecef
3c0cd8e
 
f08ecef
3c0cd8e
f08ecef
6c226f9
f08ecef
6c226f9
 
f08ecef
6c226f9
d790c0b
f08ecef
6c226f9
f08ecef
6c226f9
d790c0b
 
 
 
 
b97a3c2
 
3c0cd8e
6c226f9
 
f08ecef
6c226f9
 
 
 
3c0cd8e
 
 
 
 
1fbf59c
f08ecef
3c0cd8e
 
 
 
 
 
609dcbe
6c226f9
 
 
81e27c5
a5bfe25
f08ecef
6c226f9
 
 
 
7097513
 
609dcbe
7097513
6c226f9
 
81e27c5
a5bfe25
f08ecef
6c226f9
 
f08ecef
 
 
3c0cd8e
6c226f9
7097513
 
f08ecef
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
import torch
import gradio as gr
import yt_dlp as youtube_dl
from transformers import pipeline
from transformers.pipelines.audio_utils import ffmpeg_read
import tempfile
import os
import time

# Model and setup
MODEL_NAME = "openai/whisper-large-v3"
BATCH_SIZE = 8
YT_LENGTH_LIMIT_S = 3600  # 1-hour limit for YouTube files
device = 0 if torch.cuda.is_available() else "cpu"

pipe = pipeline(
    task="automatic-speech-recognition",
    model=MODEL_NAME,
    chunk_length_s=30,
    device=device,
)

# Function to transcribe audio
def transcribe(inputs, task):
    if inputs is None:
        raise gr.Error("No audio file submitted! Please upload or record an audio file.")
    text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
    return text

# YouTube video processing functions
def _return_yt_html_embed(yt_url):
    video_id = yt_url.split("?v=")[-1]
    return f'<center><iframe width="500" height="320" src="https://www.youtube.com/embed/{video_id}"></iframe></center>'

def download_yt_audio(yt_url, filename):
    # [ ... existing code for download_yt_audio ... ]

def yt_transcribe(yt_url, task):
    html_embed_str = _return_yt_html_embed(yt_url)
    with tempfile.TemporaryDirectory() as tmpdirname:
        filepath = os.path.join(tmpdirname, "video.mp4")
        download_yt_audio(yt_url, filepath)
        with open(filepath, "rb") as f:
            inputs = f.read()
    inputs = ffmpeg_read(inputs, pipe.feature_extractor.sampling_rate)
    inputs = {"array": inputs, "sampling_rate": pipe.feature_extractor.sampling_rate}
    text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
    return html_embed_str, text

# Gradio interfaces
mf_transcribe = gr.Interface(
    fn=transcribe,
    inputs=[
        gr.inputs.Audio(source="microphone", type="filepath", optional=True),
        gr.inputs.Radio(["transcribe", "translate"], label="Task", default="transcribe"),
    ],
    outputs="text",
    layout="horizontal",
    theme="huggingface",
    title="Whisper Large V3: Transcribe Audio",
    description="Transcribe long-form microphone or audio inputs with the click of a button!"
)

file_transcribe = gr.Interface(
    fn=transcribe,
    inputs=[
        gr.inputs.Audio(source="upload", type="filepath", optional=True, label="Audio file"),
        gr.inputs.Radio(["transcribe", "translate"], label="Task", default="transcribe"),
    ],
    outputs="text",
    layout="horizontal",
    theme="NoCrypt/[email protected]",
    title="Whisper Large V3: Transcribe Audio",
    description="Transcribe long-form microphone or audio inputs with the click of a button!"
)

yt_transcribe = gr.Interface(
    fn=yt_transcribe,
    inputs=[
        gr.inputs.Textbox(lines=1, placeholder="Paste the URL to a YouTube video here", label="YouTube URL"),
        gr.inputs.Radio(["transcribe", "translate"], label="Task", default="transcribe")
    ],
    outputs=["html", "text"],
    layout="horizontal",
    theme="NoCrypt/[email protected]",
    title="Whisper Large V3: Transcribe YouTube",
    description="Transcribe long-form YouTube videos with the click of a button!"
)

# Main Gradio application
with gr.Blocks(theme="NoCrypt/[email protected]") as demo:     
    gr.HTML("<h1><center>AI Assistant<h1><center>")
    gr.TabbedInterface([mf_transcribe, file_transcribe, yt_transcribe], ["Microphone", "Audio file", "YouTube"])

demo.launch(enable_queue=True)