freddyaboulton HF staff commited on
Commit
e6da608
·
verified ·
1 Parent(s): ca38502

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +11 -16
app.py CHANGED
@@ -1,22 +1,22 @@
 
 
 
 
 
 
 
 
1
  from fastrtc import (
2
- ReplyOnPause,
3
  AdditionalOutputs,
 
4
  Stream,
 
5
  aggregate_bytes_to_16bit,
6
  get_twilio_turn_credentials,
7
- WebRTCError,
8
  stt,
9
- audio_to_bytes,
10
  )
11
- import numpy as np
12
- import gradio as gr
13
  from gradio.utils import get_space
14
  from groq import Groq
15
- from elevenlabs import ElevenLabs
16
- from dotenv import load_dotenv
17
- import time
18
- import os
19
- from fastapi import FastAPI
20
 
21
  load_dotenv()
22
  groq_client = Groq()
@@ -34,11 +34,6 @@ def response(
34
  messages = [{"role": d["role"], "content": d["content"]} for d in chatbot]
35
  start = time.time()
36
  text = stt(audio)
37
- # text = groq_client.audio.transcriptions.create(
38
- # file=("audio-file.mp3", audio_to_bytes(audio)),
39
- # model="whisper-large-v3-turbo",
40
- # response_format="verbose_json",
41
- # ).text
42
  print("transcription", time.time() - start)
43
  print("prompt", text)
44
  chatbot.append({"role": "user", "content": text})
@@ -66,7 +61,7 @@ def response(
66
  audio_array = np.frombuffer(chunk, dtype=np.int16).reshape(1, -1)
67
  yield (24000, audio_array)
68
  yield AdditionalOutputs(chatbot)
69
- except Exception as e:
70
  import traceback
71
 
72
  traceback.print_exc()
 
1
+ import os
2
+ import time
3
+
4
+ import gradio as gr
5
+ import numpy as np
6
+ from dotenv import load_dotenv
7
+ from elevenlabs import ElevenLabs
8
+ from fastapi import FastAPI
9
  from fastrtc import (
 
10
  AdditionalOutputs,
11
+ ReplyOnPause,
12
  Stream,
13
+ WebRTCError,
14
  aggregate_bytes_to_16bit,
15
  get_twilio_turn_credentials,
 
16
  stt,
 
17
  )
 
 
18
  from gradio.utils import get_space
19
  from groq import Groq
 
 
 
 
 
20
 
21
  load_dotenv()
22
  groq_client = Groq()
 
34
  messages = [{"role": d["role"], "content": d["content"]} for d in chatbot]
35
  start = time.time()
36
  text = stt(audio)
 
 
 
 
 
37
  print("transcription", time.time() - start)
38
  print("prompt", text)
39
  chatbot.append({"role": "user", "content": text})
 
61
  audio_array = np.frombuffer(chunk, dtype=np.int16).reshape(1, -1)
62
  yield (24000, audio_array)
63
  yield AdditionalOutputs(chatbot)
64
+ except Exception:
65
  import traceback
66
 
67
  traceback.print_exc()