simonraj commited on
Commit
da0780f
·
1 Parent(s): 1358da4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -16
app.py CHANGED
@@ -1,8 +1,8 @@
1
- #app.py
2
  import gradio as gr
3
  import openai
4
  import os
5
- import HongWenData # Importing the HongWenData module
6
  import base64
7
 
8
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
@@ -12,9 +12,9 @@ def image_to_base64(img_path):
12
  with open(img_path, "rb") as img_file:
13
  return base64.b64encode(img_file.read()).decode('utf-8')
14
 
15
- img_base64 = image_to_base64("HongWenSBC.JPG")
16
  img_html = f'<img src="data:image/jpg;base64,{img_base64}" alt="SBC6" width="300" style="display: block; margin: auto;"/>'
17
-
18
  def predict(question_choice, audio):
19
  # Transcribe the audio using Whisper
20
  with open(audio, "rb") as audio_file:
@@ -22,16 +22,16 @@ def predict(question_choice, audio):
22
  message = transcript["text"] # This is the transcribed message from the audio input
23
 
24
  # Generate the system message based on the chosen question
25
- strategy, explanation = HongWenData.strategy_text["TREES"]
26
 
27
- # Reference to the picture description from HongWenData.py
28
- picture_description = HongWenData.description
29
 
30
  # Determine whether to include the picture description based on the question choice
31
  picture_description_inclusion = f"""
32
  For the first question, ensure your feedback refers to the picture description provided:
33
  {picture_description}
34
- """ if question_choice == HongWenData.questions[0] else ""
35
 
36
  # Construct the conversation with the system and user's message
37
  conversation = [
@@ -41,22 +41,17 @@ def predict(question_choice, audio):
41
  You are an expert English Language Teacher in a Singapore Primary school, directly guiding a Primary 6 student in Singapore.
42
  The student is answering the question: '{question_choice}'.
43
  {picture_description_inclusion}
44
- Point out areas they did well and where they can improve, following the {strategy}.
45
- Encourage the use of sophisticated vocabulary and expressions.
46
- For the second and third questions, the picture is not relevant, so the student should not refer to it in their response.
47
- {explanation}
48
- The feedback should be in second person, addressing the student directly.
49
  """
50
  },
51
  {"role": "user", "content": message}
52
  ]
53
 
54
-
55
  response = openai.ChatCompletion.create(
56
  model='gpt-3.5-turbo',
57
  messages=conversation,
58
  temperature=0.6,
59
- max_tokens=1000, # Limiting the response to 1000 tokens
60
  stream=True
61
  )
62
 
@@ -70,7 +65,7 @@ def predict(question_choice, audio):
70
  iface = gr.Interface(
71
  fn=predict,
72
  inputs=[
73
- gr.Radio(HongWenData.questions, label="Choose a question", default=HongWenData.questions[0]), # Dropdown for question choice
74
  gr.inputs.Audio(source="microphone", type="filepath") # Audio input
75
  ],
76
  outputs=gr.inputs.Textbox(), # Using inputs.Textbox as an output to make it editable
 
1
+ # app.py
2
  import gradio as gr
3
  import openai
4
  import os
5
+ import RadinMas # Importing the RadinMas module
6
  import base64
7
 
8
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
 
12
  with open(img_path, "rb") as img_file:
13
  return base64.b64encode(img_file.read()).decode('utf-8')
14
 
15
+ img_base64 = image_to_base64("RadinMasSBC.JPG")
16
  img_html = f'<img src="data:image/jpg;base64,{img_base64}" alt="SBC6" width="300" style="display: block; margin: auto;"/>'
17
+
18
  def predict(question_choice, audio):
19
  # Transcribe the audio using Whisper
20
  with open(audio, "rb") as audio_file:
 
22
  message = transcript["text"] # This is the transcribed message from the audio input
23
 
24
  # Generate the system message based on the chosen question
25
+ system_message = RadinMas.generate_system_message(question_choice)
26
 
27
+ # Reference to the picture description from RadinMas.py
28
+ picture_description = RadinMas.description
29
 
30
  # Determine whether to include the picture description based on the question choice
31
  picture_description_inclusion = f"""
32
  For the first question, ensure your feedback refers to the picture description provided:
33
  {picture_description}
34
+ """ if question_choice == RadinMas.questions[0] else ""
35
 
36
  # Construct the conversation with the system and user's message
37
  conversation = [
 
41
  You are an expert English Language Teacher in a Singapore Primary school, directly guiding a Primary 6 student in Singapore.
42
  The student is answering the question: '{question_choice}'.
43
  {picture_description_inclusion}
44
+ {system_message}
 
 
 
 
45
  """
46
  },
47
  {"role": "user", "content": message}
48
  ]
49
 
 
50
  response = openai.ChatCompletion.create(
51
  model='gpt-3.5-turbo',
52
  messages=conversation,
53
  temperature=0.6,
54
+ max_tokens=500, # Limiting the response to 500 tokens
55
  stream=True
56
  )
57
 
 
65
  iface = gr.Interface(
66
  fn=predict,
67
  inputs=[
68
+ gr.Radio(RadinMas.questions, label="Choose a question", default=RadinMas.questions[0]), # Dropdown for question choice
69
  gr.inputs.Audio(source="microphone", type="filepath") # Audio input
70
  ],
71
  outputs=gr.inputs.Textbox(), # Using inputs.Textbox as an output to make it editable