File size: 7,866 Bytes
7bd1e74
7091430
172ec24
7bd1e74
7091430
 
 
 
 
172ec24
7091430
 
 
 
 
 
 
 
 
172ec24
7091430
 
 
172ec24
2ad1599
 
7091430
26faa5f
7bd1e74
7091430
 
 
 
 
 
 
 
 
 
 
 
 
80ca55c
985c6bd
7091430
 
 
 
 
 
 
 
 
 
 
 
172ec24
7091430
 
 
 
 
 
 
172ec24
 
 
 
 
 
 
 
 
 
 
 
7bd1e74
 
172ec24
7bd1e74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7091430
7bd1e74
 
 
 
 
7091430
ccb306d
7bd1e74
7091430
7bd1e74
7091430
7bd1e74
7091430
7bd1e74
 
 
 
 
7091430
ccb306d
7bd1e74
7091430
 
 
 
 
 
 
 
 
 
 
 
 
7c1a8fa
7091430
 
 
 
 
 
 
275d7e8
88dd1aa
 
275d7e8
88dd1aa
275d7e8
88dd1aa
 
 
275d7e8
7091430
 
5d7014c
7091430
 
 
 
 
213b090
 
7091430
 
 
ccb306d
7091430
8bda11a
 
4487a27
8bda11a
 
 
35e9544
8bda11a
172ec24
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline, TextIteratorStreamer
from transformers.utils import is_flash_attn_2_available
from transformers.pipelines.audio_utils import ffmpeg_read
from threading import Thread
import torch
import gradio as gr
import time

BATCH_SIZE = 16
MAX_AUDIO_MINS = 30  # maximum audio input in minutes

device = "cuda:0" if torch.cuda.is_available() else "cpu"
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
use_flash_attention_2 = is_flash_attn_2_available()

model = AutoModelForSpeechSeq2Seq.from_pretrained(
    "openai/whisper-large-v2", torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True, use_flash_attention_2=use_flash_attention_2
)
distilled_model = AutoModelForSpeechSeq2Seq.from_pretrained(
    "distil-whisper/distil-large-v2", torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True, use_flash_attention_2=use_flash_attention_2
)

if not use_flash_attention_2:
    # use flash attention from pytorch sdpa
    model = model.to_bettertransformer()
    distilled_model = distilled_model.to_bettertransformer()

processor = AutoProcessor.from_pretrained("openai/whisper-large-v2")
streamer = TextIteratorStreamer(processor.tokenizer, skip_special_tokens=True)

model.to(device)
distilled_model.to(device)

pipe = pipeline(
    "automatic-speech-recognition",
    model=model,
    tokenizer=processor.tokenizer,
    feature_extractor=processor.feature_extractor,
    max_new_tokens=128,
    chunk_length_s=30,
    torch_dtype=torch_dtype,
    device=device,
    generate_kwargs={"language": "en", "task": "transcribe"},
    return_timestamps=True
)
pipe_forward = pipe._forward

distil_pipe = pipeline(
    "automatic-speech-recognition",
    model=distilled_model,
    tokenizer=processor.tokenizer,
    feature_extractor=processor.feature_extractor,
    max_new_tokens=128,
    chunk_length_s=15,
    torch_dtype=torch_dtype,
    device=device,
    generate_kwargs={"language": "en", "task": "transcribe"},
)
distil_pipe_forward = distil_pipe._forward

def transcribe(inputs):
    if inputs is None:
        raise gr.Error("No audio file submitted! Please record or upload an audio file before submitting your request.")

    with open(inputs, "rb") as f:
        inputs = f.read()

    inputs = ffmpeg_read(inputs, pipe.feature_extractor.sampling_rate)
    audio_length_mins = len(inputs) / pipe.feature_extractor.sampling_rate / 60

    if audio_length_mins > MAX_AUDIO_MINS:
        raise gr.Error(
            f"To ensure fair usage of the Space, the maximum audio length permitted is {MAX_AUDIO_MINS} minutes."
            f"Got an audio of length {round(audio_length_mins, 3)} minutes."
        )

    if audio_length_mins >= 0.5:
        inputs = {"array": inputs, "sampling_rate": pipe.feature_extractor.sampling_rate}

        def _forward_distil_time(*args, **kwargs):
            global distil_runtime
            start_time = time.time()
            result = distil_pipe_forward(*args, **kwargs)
            distil_runtime = time.time() - start_time
            distil_runtime = round(distil_runtime, 2)
            return result

        distil_pipe._forward = _forward_distil_time
        distil_text = distil_pipe(inputs.copy(), batch_size=BATCH_SIZE)["text"]
        yield distil_text, distil_runtime, None, None

        def _forward_time(*args, **kwargs):
            global runtime
            start_time = time.time()
            result = pipe_forward(*args, **kwargs)
            runtime = time.time() - start_time
            runtime = round(runtime, 2)
            return result

        pipe._forward = _forward_time
        text = pipe(inputs, batch_size=BATCH_SIZE)["text"]

        yield distil_text, distil_runtime, text, runtime

    else:
        input_features = processor(inputs, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt").input_features

        # Run the generation in a separate thread, so that we can fetch the generated text in a non-blocking way.
        generation_kwargs = dict(input_features, streamer=streamer, max_new_tokens=128, language="en", task="transcribe")
        thread = Thread(target=distilled_model.generate, kwargs=generation_kwargs)

        thread.start()
        start_time = time.time()
        distil_text = ""
        for generated_text in streamer:
            distil_text += generated_text
            yield distil_text, None, None, None

        distil_runtime = time.time() - start_time
        distil_runtime = round(distil_runtime, 2)
        yield distil_text, distil_runtime, None, None

        thread = Thread(target=model.generate, kwargs=generation_kwargs)

        thread.start()
        start_time = time.time()
        text = ""
        for generated_text in streamer:
            text += generated_text
            yield distil_text, distil_runtime, text, None

        runtime = time.time() - start_time
        runtime = round(runtime, 2)
        yield distil_text, distil_runtime, text, runtime


if __name__ == "__main__":
    with gr.Blocks() as demo:
        gr.HTML(
            """
                <div style="text-align: center; max-width: 700px; margin: 0 auto;">
                  <div
                    style="
                      display: inline-flex; align-items: center; gap: 0.8rem; font-size: 1.75rem;
                    "
                  >
                    <h1 style="font-weight: 900; margin-bottom: 7px; line-height: normal;">
                      Whisper vs Distil-Whisper: Speed Comparison
                    </h1>
                  </div>
                </div>
            """
        )
        gr.HTML(
            f"""
            <p><a href="https://huggingface.co/distil-whisper/distil-large-v2"> Distil-Whisper</a> is a distilled variant 
            of the <a href="https://huggingface.co/openai/whisper-large-v2"> Whisper</a> model by OpenAI. Compared to Whisper, 
            Distil-Whisper runs 6x faster with 50% fewer parameters, while performing to within 1% word error rate (WER) on
            out-of-distribution evaluation data.</p>
            
            <p>In this demo, we perform a speed comparison between Whisper and Distil-Whisper in order to test this claim.
            Both models use the <a href="https://huggingface.co/distil-whisper/distil-large-v2#long-form-transcription"> chunked long-form transcription algorithm</a> 
            in 🤗 Transformers, as well as Flash Attention. To use Distil-Whisper yourself, check the code examples on the
            <a href="https://github.com/huggingface/distil-whisper#1-usage"> Distil-Whisper repository</a>. To ensure fair 
            usage of the Space, we ask that audio file inputs are kept to < 30 mins.</p>
            """
        )
        audio = gr.components.Audio(type="filepath", label="Audio input")
        button = gr.Button("Transcribe")
        with gr.Row():
            distil_runtime = gr.components.Textbox(label="Distil-Whisper Transcription Time (s)")
            runtime = gr.components.Textbox(label="Whisper Transcription Time (s)")
        with gr.Row():
            distil_transcription = gr.components.Textbox(label="Distil-Whisper Transcription", show_copy_button=True)
            transcription = gr.components.Textbox(label="Whisper Transcription", show_copy_button=True)
        button.click(
            fn=transcribe,
            inputs=audio,
            outputs=[distil_transcription, distil_runtime, transcription, runtime],
        )
        gr.Markdown("## Examples")
        gr.Examples(
            [["./assets/example_1.wav"], ["./assets/example_2.wav"]],
            audio,
            outputs=[distil_transcription, distil_runtime, transcription, runtime],
            fn=transcribe,
            cache_examples=False,
        )
    demo.queue(max_size=10).launch()