Update app.py
Browse files
app.py
CHANGED
@@ -2,7 +2,7 @@
|
|
2 |
import gradio as gr
|
3 |
import openai
|
4 |
import os
|
5 |
-
import
|
6 |
import base64
|
7 |
|
8 |
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
@@ -12,7 +12,7 @@ def image_to_base64(img_path):
|
|
12 |
with open(img_path, "rb") as img_file:
|
13 |
return base64.b64encode(img_file.read()).decode('utf-8')
|
14 |
|
15 |
-
img_base64 = image_to_base64("
|
16 |
img_html = f'<img src="data:image/jpg;base64,{img_base64}" alt="SBC6" width="300" style="display: block; margin: auto;"/>'
|
17 |
|
18 |
def predict(question_choice, audio):
|
@@ -22,16 +22,16 @@ def predict(question_choice, audio):
|
|
22 |
message = transcript["text"] # This is the transcribed message from the audio input
|
23 |
|
24 |
# Generate the system message based on the chosen question
|
25 |
-
strategy, explanation =
|
26 |
|
27 |
-
# Reference to the picture description from
|
28 |
-
picture_description =
|
29 |
|
30 |
# Determine whether to include the picture description based on the question choice
|
31 |
picture_description_inclusion = f"""
|
32 |
For the first question, ensure your feedback refers to the picture description provided:
|
33 |
{picture_description}
|
34 |
-
""" if question_choice ==
|
35 |
|
36 |
# Construct the conversation with the system and user's message
|
37 |
conversation = [
|
@@ -51,12 +51,11 @@ def predict(question_choice, audio):
|
|
51 |
{"role": "user", "content": message}
|
52 |
]
|
53 |
|
54 |
-
|
55 |
response = openai.ChatCompletion.create(
|
56 |
model='gpt-3.5-turbo',
|
57 |
messages=conversation,
|
58 |
temperature=0.6,
|
59 |
-
max_tokens=
|
60 |
stream=True
|
61 |
)
|
62 |
|
@@ -70,7 +69,7 @@ def predict(question_choice, audio):
|
|
70 |
iface = gr.Interface(
|
71 |
fn=predict,
|
72 |
inputs=[
|
73 |
-
gr.Radio(
|
74 |
gr.inputs.Audio(source="microphone", type="filepath") # Audio input
|
75 |
],
|
76 |
outputs=gr.inputs.Textbox(), # Using inputs.Textbox as an output to make it editable
|
@@ -81,4 +80,3 @@ iface = gr.Interface(
|
|
81 |
iface.queue(max_size=99, concurrency_count=40).launch(debug=True)
|
82 |
|
83 |
|
84 |
-
|
|
|
2 |
import gradio as gr
|
3 |
import openai
|
4 |
import os
|
5 |
+
import RiverValleyData # Importing the RiverValleyData module
|
6 |
import base64
|
7 |
|
8 |
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
|
|
12 |
with open(img_path, "rb") as img_file:
|
13 |
return base64.b64encode(img_file.read()).decode('utf-8')
|
14 |
|
15 |
+
img_base64 = image_to_base64("RiverValleySBC.JPG")
|
16 |
img_html = f'<img src="data:image/jpg;base64,{img_base64}" alt="SBC6" width="300" style="display: block; margin: auto;"/>'
|
17 |
|
18 |
def predict(question_choice, audio):
|
|
|
22 |
message = transcript["text"] # This is the transcribed message from the audio input
|
23 |
|
24 |
# Generate the system message based on the chosen question
|
25 |
+
strategy, explanation = RiverValleyData.strategy_text["SEP"]
|
26 |
|
27 |
+
# Reference to the picture description from RiverValleyData.py
|
28 |
+
picture_description = RiverValleyData.description
|
29 |
|
30 |
# Determine whether to include the picture description based on the question choice
|
31 |
picture_description_inclusion = f"""
|
32 |
For the first question, ensure your feedback refers to the picture description provided:
|
33 |
{picture_description}
|
34 |
+
""" if question_choice == RiverValleyData.questions[0] else ""
|
35 |
|
36 |
# Construct the conversation with the system and user's message
|
37 |
conversation = [
|
|
|
51 |
{"role": "user", "content": message}
|
52 |
]
|
53 |
|
|
|
54 |
response = openai.ChatCompletion.create(
|
55 |
model='gpt-3.5-turbo',
|
56 |
messages=conversation,
|
57 |
temperature=0.6,
|
58 |
+
max_tokens=500, # Limiting the response to 500 tokens
|
59 |
stream=True
|
60 |
)
|
61 |
|
|
|
69 |
iface = gr.Interface(
|
70 |
fn=predict,
|
71 |
inputs=[
|
72 |
+
gr.Radio(RiverValleyData.questions, label="Choose a question", default=RiverValleyData.questions[0]), # Dropdown for question choice
|
73 |
gr.inputs.Audio(source="microphone", type="filepath") # Audio input
|
74 |
],
|
75 |
outputs=gr.inputs.Textbox(), # Using inputs.Textbox as an output to make it editable
|
|
|
80 |
iface.queue(max_size=99, concurrency_count=40).launch(debug=True)
|
81 |
|
82 |
|
|