mskov commited on
Commit
8ab0364
·
1 Parent(s): 5e49329

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -14
app.py CHANGED
@@ -5,6 +5,11 @@ This script calls the model from openai api to predict the next few words in a c
5
  import os
6
  from pprint import pprint
7
  import sys
 
 
 
 
 
8
  import openai
9
  import gradio as gr
10
  os.system("pip install git+https://github.com/openai/whisper.git")
@@ -44,7 +49,7 @@ def transcribe(audio_file):
44
  transcription = asr_model.transcribe(audio_file)["text"]
45
  return transcription
46
 
47
- def debug_inference(audio, prompt, model, temperature, state=""):
48
  # Transcribe with Whisper
49
  print("The audio is:", audio)
50
  transcript = transcribe(audio)
@@ -60,7 +65,7 @@ def debug_inference(audio, prompt, model, temperature, state=""):
60
 
61
  infers = []
62
  temp = []
63
- infered=[]
64
  for i in range(5):
65
  print("print1 ", response['choices'][i]['text'])
66
  temp.append(response['choices'][i]['text'])
@@ -72,17 +77,24 @@ def debug_inference(audio, prompt, model, temperature, state=""):
72
  infers = list(map(lambda x: x.replace("\n", ""), temp))
73
  #infered = list(map(lambda x: x.split(','), infers))
74
 
75
- return transcript, state, infers, text
 
76
 
77
  # get audio from microphone
78
- gr.Interface(
79
- fn=debug_inference,
80
- inputs=[gr.inputs.Audio(source="microphone", type="filepath"),
81
- gr.inputs.Textbox(lines=15, placeholder="Enter a prompt here"),
82
- gr.inputs.Dropdown(["text-ada-001", "text-davinci-002", "text-davinci-003", "gpt-3.5-turbo"], label="Model"),
83
- gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.8, step=0.1, label="Temperature"),
84
- "state"
85
- ],
86
- outputs=["textbox","state","textbox", "textbox"],
87
- # examples=[["example_in-the-mood-to-eat.m4a", EXAMPLE_PROMPT, "text-ada-001", 0.8, ""],["","","",0.9,""]],
88
- live=False).launch()
 
 
 
 
 
 
 
5
  import os
6
  from pprint import pprint
7
  import sys
8
+ '''
9
+ This script calls the model from openai api to predict the next few words in a conversation.
10
+ '''
11
+ import os
12
+ import sys
13
  import openai
14
  import gradio as gr
15
  os.system("pip install git+https://github.com/openai/whisper.git")
 
49
  transcription = asr_model.transcribe(audio_file)["text"]
50
  return transcription
51
 
52
+ def inference(audio, prompt, model, temperature):
53
  # Transcribe with Whisper
54
  print("The audio is:", audio)
55
  transcript = transcribe(audio)
 
65
 
66
  infers = []
67
  temp = []
68
+ #infered=[]
69
  for i in range(5):
70
  print("print1 ", response['choices'][i]['text'])
71
  temp.append(response['choices'][i]['text'])
 
77
  infers = list(map(lambda x: x.replace("\n", ""), temp))
78
  #infered = list(map(lambda x: x.split(','), infers))
79
 
80
+ return transcript, infers
81
+
82
 
83
  # get audio from microphone
84
+ with gr.Blocks() as face:
85
+ with gr.Row():
86
+ with gr.Column():
87
+ audio = gr.Audio(source="microphone", type="filepath")
88
+ promptText = gr.inputs.Textbox(lines=15, placeholder="Enter a prompt here"),
89
+ dropChoice = gr.inputs.Dropdown(["text-ada-001", "text-davinci-002", "text-davinci-003", "gpt-3.5-turbo"], label="Model"),
90
+ sliderChoice = gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.8, step=0.1, label="Temperature")
91
+ transcribe_btn = gr.Button(value="Transcribe")
92
+ with gr.Column():
93
+ script = gr.Textbox(label="text...")
94
+ options = gr.Textbox(label="predictions...")
95
+
96
+ transcribe_btn.click(inference, inputs=[audio, promptText, dropChoice, sliderChoice] outputs=[script, options])
97
+ examples = gr.Examples(examples=["Sedan, Truck, SUV", "Dalmaion, Shepherd, Lab, Mutt"],
98
+ inputs=[options])
99
+
100
+ face.launch()