RamAnanth1 commited on
Commit
f96d7d0
·
1 Parent(s): 057830c

Refactor code

Browse files
Files changed (1) hide show
  1. app.py +22 -20
app.py CHANGED
@@ -13,25 +13,26 @@ whisper_model = whisper.load_model("base")
13
 
14
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
15
 
16
- def get_response_from_chatbot(audio):
17
- try:
18
- api = ChatGPT(session_token)
19
- audio = whisper.load_audio(audio)
20
- audio = whisper.pad_or_trim(audio)
21
-
22
- mel = whisper.log_mel_spectrogram(audio).to(whisper_model.device)
23
-
24
- _, probs = whisper_model.detect_language(mel)
25
- translate_options = whisper.DecodingOptions(task="translate", fp16 = False)
26
-
27
- translation = whisper.decode(whisper_model, mel, translate_options)
28
-
29
- print("Language Spoken: " + transcription.language)
30
-
31
- print("Translated: " + translation.text)
32
 
33
-
34
- resp = api.send_message(translation.text)
 
 
35
  api.refresh_auth()
36
  api.reset_conversation()
37
  response = resp['message']
@@ -40,10 +41,11 @@ def get_response_from_chatbot(audio):
40
  response = "Sorry, I'm tired. Please try again in some time"
41
  return response
42
 
43
- def chat(message, chat_history):
44
  out_chat = []
45
  if chat_history != '':
46
  out_chat = json.loads(chat_history)
 
47
  response = get_response_from_chatbot(message)
48
  out_chat.append((message, response))
49
  chat_history = json.dumps(out_chat)
@@ -151,7 +153,7 @@ with gr.Blocks(title='Talk to chatGPT') as demo:
151
  chatbot = gr.Chatbot(elem_id="chat_bot", visible=False).style(color_map=("green", "blue"))
152
  chatbot1 = gr.Chatbot(elem_id="chat_bot1").style(color_map=("green", "blue"))
153
  with gr.Row(elem_id="prompt_row"):
154
- prompt_input_audio = audio_input_r = gr.Audio(label = 'Record Audio Input',source="microphone",type="filepath")
155
  chat_history = gr.Textbox(lines=4, label="prompt", visible=False)
156
  submit_btn = gr.Button(value = "submit",elem_id="submit-btn").style(
157
  margin=True,
 
13
 
14
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
15
 
16
+ def translate(audio):
17
+ audio = whisper.load_audio(audio)
18
+ audio = whisper.pad_or_trim(audio)
19
+
20
+ mel = whisper.log_mel_spectrogram(audio).to(whisper_model.device)
21
+
22
+ _, probs = whisper_model.detect_language(mel)
23
+ translate_options = whisper.DecodingOptions(task="translate", fp16 = False)
24
+
25
+ translation = whisper.decode(whisper_model, mel, translate_options)
26
+
27
+ print("Language Spoken: " + transcription.language)
28
+
29
+ print("Translated: " + translation.text)
30
+ return translation.text
 
31
 
32
+ def get_response_from_chatbot(text):
33
+ try:
34
+ api = ChatGPT(session_token)
35
+ resp = api.send_message(text)
36
  api.refresh_auth()
37
  api.reset_conversation()
38
  response = resp['message']
 
41
  response = "Sorry, I'm tired. Please try again in some time"
42
  return response
43
 
44
+ def chat(audio, chat_history):
45
  out_chat = []
46
  if chat_history != '':
47
  out_chat = json.loads(chat_history)
48
+ message = translate(audio)
49
  response = get_response_from_chatbot(message)
50
  out_chat.append((message, response))
51
  chat_history = json.dumps(out_chat)
 
153
  chatbot = gr.Chatbot(elem_id="chat_bot", visible=False).style(color_map=("green", "blue"))
154
  chatbot1 = gr.Chatbot(elem_id="chat_bot1").style(color_map=("green", "blue"))
155
  with gr.Row(elem_id="prompt_row"):
156
+ prompt_input_audio = gr.Audio(label = 'Record Audio Input',source="microphone",type="filepath")
157
  chat_history = gr.Textbox(lines=4, label="prompt", visible=False)
158
  submit_btn = gr.Button(value = "submit",elem_id="submit-btn").style(
159
  margin=True,