File size: 1,404 Bytes
ea25f01
9ab1a01
 
 
 
 
 
 
ea25f01
 
 
 
9ab1a01
 
 
 
 
ea25f01
 
 
 
 
9ab1a01
 
ea25f01
 
9ab1a01
 
 
ea25f01
 
 
 
 
9ab1a01
 
 
 
 
ea25f01
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
from fastapi import FastAPI, UploadFile, File, Response
from transformers import pipeline
import librosa
from deep_translator import GoogleTranslator
import io

app = FastAPI()


# print("Loading Speech Recognition")

# print("Speech Recognition Loaded")

print("Loading translator")
translator = GoogleTranslator(source='ku', target='fr')
print("Translator loaded")

# print("Loading tts")

# print("TTS loaded")
    
    

def speech2text(audio_data: bytes):
    audio_array, _ = librosa.load(io.BytesIO(audio_data), sr=16000)  
    pipe = pipeline("automatic-speech-recognition", model="Akashpb13/xlsr_kurmanji_kurdish")  
    output = pipe(audio_array)
    return output["text"]

def text2speech(text:str):
    tts = pipeline("text-to-audio", model="roshna-omer/speecht5_tts_krd-kmr_CV17.0")
    output = tts(text)
    return output["audio"]

@app.post("/transcribe")
async def transcribe(file: UploadFile = File(...)):
    audio_data = await file.read()
    text_output = speech2text(audio_data)
    translated = translator.translate(text_output)
    return {"text": text_output, "translation": translated}

@app.post("/transcribe_audio")
async def transcribe_and_return_audio(file: UploadFile = File(...)):
    audio_data = await file.read()
    text_output = speech2text(audio_data)
    audio_output = text2speech(text_output)
    
    return Response(content=audio_output, media_type="audio/wav")