Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,21 +1,38 @@
|
|
1 |
import gradio as gr
|
2 |
-
from
|
3 |
-
|
4 |
|
5 |
-
transcriber = pipeline("automatic-speech-recognition", model="openai/whisper-base.en")
|
6 |
|
7 |
-
def transcribe(
|
8 |
-
|
9 |
-
y = y.astype(np.float32)
|
10 |
-
y /= np.max(np.abs(y))
|
11 |
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
|
15 |
demo = gr.Interface(
|
16 |
transcribe,
|
17 |
-
gr.Audio(sources=["microphone"]),
|
18 |
"text",
|
|
|
19 |
)
|
20 |
|
21 |
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
+
from google.cloud import speech_v1
|
3 |
+
from google.protobuf import timestamp_pb2
|
4 |
|
|
|
5 |
|
6 |
+
def transcribe(audio_bytes):
|
7 |
+
"""Transcribe audio bytes to text using Google Cloud Speech to Text."""
|
|
|
|
|
8 |
|
9 |
+
# Crea un cliente de Speech to Text
|
10 |
+
client = speech_v1.SpeechClient()
|
11 |
+
|
12 |
+
# Configura la configuración de la solicitud
|
13 |
+
config = speech_v1.RecognitionConfig()
|
14 |
+
config.language_code = "es-ES"
|
15 |
+
config.encoding = speech_v1.RecognitionConfig.Encoding.LINEAR16
|
16 |
+
config.sample_rate_hertz = 16000
|
17 |
+
|
18 |
+
# Crea una solicitud de reconocimiento de audio
|
19 |
+
audio = speech_v1.RecognitionAudio(content=audio_bytes)
|
20 |
+
request = speech_v1.RecognizeSpeechRequest(config=config, audio=audio)
|
21 |
+
|
22 |
+
# Realiza la transcripción
|
23 |
+
response = client.recognize_speech(request)
|
24 |
+
|
25 |
+
# Extrae el texto transcrito
|
26 |
+
transcript = response.results[0].alternatives[0].transcript
|
27 |
+
|
28 |
+
return transcript
|
29 |
|
30 |
|
31 |
demo = gr.Interface(
|
32 |
transcribe,
|
33 |
+
gr.Audio(sources=["microphone"], streaming=True),
|
34 |
"text",
|
35 |
+
live=True,
|
36 |
)
|
37 |
|
38 |
demo.launch()
|