File size: 1,305 Bytes
62b3774
71f6464
db56cf5
71f6464
 
 
db56cf5
 
a405ea7
db56cf5
62b3774
 
db56cf5
a405ea7
 
 
2a8b0bb
a405ea7
 
 
 
 
 
2a8b0bb
a405ea7
 
2b62b02
 
a405ea7
 
db56cf5
 
 
 
 
71f6464
1ba4a0c
71f6464
 
 
2a8b0bb
71f6464
 
 
 
 
 
 
f35ab4d
71f6464
 
b932faf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import os
import pickle
from typing import Union
from fastapi import Request
import torch
from transformers import pipeline

from fastapi import FastAPI
from contextlib import asynccontextmanager

DEVICE = os.getenv('DEVICE', 'mps')
ATTN_IMPLEMENTATION = os.getenv('ATTN_IMPLEMENTATION', "sdpa")


@asynccontextmanager
async def lifespan(app: FastAPI):
    app.state.transcribe_pipeline = pipeline(
        "automatic-speech-recognition",
        model="openai/whisper-large-v3",
        torch_dtype=torch.float16 if ATTN_IMPLEMENTATION == "sdpa" else torch.bfloat16,
        device=DEVICE,
        model_kwargs={"attn_implementation": ATTN_IMPLEMENTATION},
    )
    app.state.transcribe_pipeline.model.to('cuda')
    yield

app = FastAPI(lifespan=lifespan)



@app.get("/")
def read_root():
    return {"status": "ok"}



@app.post("/transcribe")
async def transcribe(request: Request):
    body = await request.body()
    audio_chunk = pickle.loads(body)
    outputs = app.state.transcribe_pipeline(
        audio_chunk,
        chunk_length_s=30,
        batch_size=24,
        generate_kwargs={
            'task': 'transcribe',
            'language': 'english'
        },
        return_timestamps='word'
    )
    text = outputs["text"].strip()
    return {"transcribe": text, "outputs": outputs}