Spaces:
Runtime error
Runtime error
File size: 3,319 Bytes
da0780f aff19d1 da0780f aff19d1 da0780f aff19d1 da0780f a73412a e8f9b13 28aefcd e8f9b13 da0780f aff19d1 e8f9b13 da0780f e8f9b13 aff19d1 0abf22c e8f9b13 da0780f 0abf22c 364fbfd aff19d1 9bab38b da0780f aff19d1 da0780f aff19d1 f5734da 28ee00f f5734da 28ee00f f5734da 364fbfd aff19d1 0cf65dc efb7dae |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 |
# app.py
import gradio as gr
import openai
import os
import RadinMas # Importing the RadinMas module
import base64
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
openai.api_key = OPENAI_API_KEY
def image_to_base64(img_path):
with open(img_path, "rb") as img_file:
return base64.b64encode(img_file.read()).decode('utf-8')
img_base64 = image_to_base64("RadinMasSBC.JPG")
img_html = f'<img src="data:image/jpg;base64,{img_base64}" alt="SBC6" width="300" style="display: block; margin: auto;"/>'
def predict(question_choice, audio):
# Transcribe the audio using Whisper
with open(audio, "rb") as audio_file:
transcript = openai.Audio.transcribe("whisper-1", audio_file)
message = transcript["text"] # This is the transcribed message from the audio input
# Determine question number based on question_choice
question_number = RadinMas.questions.index(question_choice) + 1 # New line
# Generate the system message based on the question number
system_message = RadinMas.generate_system_message(question_number) # Updated line
# Reference to the picture description from RadinMas.py
picture_description = RadinMas.description
# Determine whether to include the picture description based on the question choice
picture_description_inclusion = f"""
For the first question, ensure your feedback refers to the picture description provided:
{picture_description}
""" if question_choice == RadinMas.questions[0] else ""
# Construct the conversation with the system and user's message
conversation = [
{
"role": "system",
"content": f"""
You are an expert English Language Teacher in a Singapore Primary school, directly guiding a Primary 6 student in Singapore.
The student is answering the question: '{question_choice}'.
{picture_description_inclusion}
{system_message}
"""
},
{"role": "user", "content": message}
]
response = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
messages=conversation,
temperature=0.6,
max_tokens=500, # Limiting the response to 500 tokens
stream=True
)
partial_message = ""
for chunk in response:
if len(chunk['choices'][0]['delta']) != 0:
partial_message = partial_message + chunk['choices'][0]['delta']['content']
yield partial_message
# Gradio Interface
iface = gr.Interface(
fn=predict,
inputs=[
gr.Radio(RadinMas.questions, label="Choose a question", default=RadinMas.questions[0]), # Dropdown for question choice
gr.inputs.Audio(source="microphone", type="filepath") # Audio input
],
outputs=gr.inputs.Textbox(), # Using inputs.Textbox as an output to make it editable
description=img_html + '''
<div style="text-align: center; font-size: medium;">
<a href="https://forms.moe.edu.sg/forms/J0lmkJ" target="_blank">
📝 Click here to provide feedback on the initial prototype of the Oral Coach 📝
</a>
</div>
''', # Corrected string concatenation
css="custom.css" # Link to the custom CSS file
)
iface.queue(max_size=99, concurrency_count=40).launch(debug=True)
|