simonraj commited on
Commit
9a05e2a
·
1 Parent(s): 1fe0f9d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -9
app.py CHANGED
@@ -2,7 +2,7 @@
2
  import gradio as gr
3
  import openai
4
  import os
5
- import HongWenData # Importing the HongWenData module
6
  import base64
7
 
8
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
@@ -12,7 +12,7 @@ def image_to_base64(img_path):
12
  with open(img_path, "rb") as img_file:
13
  return base64.b64encode(img_file.read()).decode('utf-8')
14
 
15
- img_base64 = image_to_base64("HongWenSBC.JPG")
16
  img_html = f'<img src="data:image/jpg;base64,{img_base64}" alt="SBC6" width="300" style="display: block; margin: auto;"/>'
17
 
18
  def predict(question_choice, audio):
@@ -22,16 +22,16 @@ def predict(question_choice, audio):
22
  message = transcript["text"] # This is the transcribed message from the audio input
23
 
24
  # Generate the system message based on the chosen question
25
- strategy, explanation = HongWenData.strategy_text["TREES"]
26
 
27
- # Reference to the picture description from HongWenData.py
28
- picture_description = HongWenData.description
29
 
30
  # Determine whether to include the picture description based on the question choice
31
  picture_description_inclusion = f"""
32
  For the first question, ensure your feedback refers to the picture description provided:
33
  {picture_description}
34
- """ if question_choice == HongWenData.questions[0] else ""
35
 
36
  # Construct the conversation with the system and user's message
37
  conversation = [
@@ -70,7 +70,7 @@ def predict(question_choice, audio):
70
  iface = gr.Interface(
71
  fn=predict,
72
  inputs=[
73
- gr.Radio(HongWenData.questions, label="Choose a question", default=HongWenData.questions[0]), # Dropdown for question choice
74
  gr.inputs.Audio(source="microphone", type="filepath") # Audio input
75
  ],
76
  outputs=gr.inputs.Textbox(), # Using inputs.Textbox as an output to make it editable
@@ -84,5 +84,4 @@ iface = gr.Interface(
84
  css="custom.css" # Link to the custom CSS file
85
  )
86
 
87
- iface.queue(max_size=99, concurrency_count=40).launch(debug=True)
88
-
 
2
  import gradio as gr
3
  import openai
4
  import os
5
+ import StTeresaData # Importing the StTeresaData module
6
  import base64
7
 
8
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
 
12
  with open(img_path, "rb") as img_file:
13
  return base64.b64encode(img_file.read()).decode('utf-8')
14
 
15
+ img_base64 = image_to_base64("StTeresaSBC.JPG")
16
  img_html = f'<img src="data:image/jpg;base64,{img_base64}" alt="SBC6" width="300" style="display: block; margin: auto;"/>'
17
 
18
  def predict(question_choice, audio):
 
22
  message = transcript["text"] # This is the transcribed message from the audio input
23
 
24
  # Generate the system message based on the chosen question
25
+ strategy, explanation = StTeresaData.strategy_text["TREES"]
26
 
27
+ # Reference to the picture description from StTeresaData.py
28
+ picture_description = StTeresaData.description
29
 
30
  # Determine whether to include the picture description based on the question choice
31
  picture_description_inclusion = f"""
32
  For the first question, ensure your feedback refers to the picture description provided:
33
  {picture_description}
34
+ """ if question_choice == StTeresaData.questions[0] else ""
35
 
36
  # Construct the conversation with the system and user's message
37
  conversation = [
 
70
  iface = gr.Interface(
71
  fn=predict,
72
  inputs=[
73
+ gr.Radio(StTeresaData.questions, label="Choose a question", default=StTeresaData.questions[0]), # Dropdown for question choice
74
  gr.inputs.Audio(source="microphone", type="filepath") # Audio input
75
  ],
76
  outputs=gr.inputs.Textbox(), # Using inputs.Textbox as an output to make it editable
 
84
  css="custom.css" # Link to the custom CSS file
85
  )
86
 
87
+ iface.queue(max_size=99, concurrency_count=40).launch(debug=True)