File size: 2,829 Bytes
d80c140
aff19d1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
364fbfd
aff19d1
 
 
364fbfd
aff19d1
 
 
364fbfd
 
2e731d2
364fbfd
 
 
 
aff19d1
 
 
 
855049a
a1ee098
aff19d1
 
 
 
 
 
 
 
 
364fbfd
aff19d1
 
 
364fbfd
aff19d1
 
 
 
 
 
 
 
364fbfd
 
aff19d1
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
#app.py
import gradio as gr
import openai
import os
import data6  # Importing the data6 module
import base64

OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") 
openai.api_key = OPENAI_API_KEY

def image_to_base64(img_path):
    with open(img_path, "rb") as img_file:
        return base64.b64encode(img_file.read()).decode('utf-8')

img_base64 = image_to_base64("SBC6.jpg")
img_html = f'<img src="data:image/jpg;base64,{img_base64}" alt="SBC6" width="300" style="display: block; margin: auto;"/>'

def predict(question_choice, audio):
    # Transcribe the audio using Whisper
    with open(audio, "rb") as audio_file:
        transcript = openai.Audio.transcribe("whisper-1", audio_file)
    message = transcript["text"]  # This is the transcribed message from the audio input
    
    # Generate the system message based on the chosen question
    current_question_index = data6.questions.index(question_choice)
    strategy, explanation = data6.strategy_text[current_question_index]

    # Construct the conversation with the system and user's message
    conversation = [
    {
        "role": "system",
        "content": f"You are an expert English Language Teacher in a Singapore Primary school, directly guiding a Primary 6 student in Singapore. The student is answering the question: '{data6.questions[current_question_index]}'. Point out areas they did well and where they can improve. Then, provide a suggested response using the {data6.strategy_text[current_question_index][0]} strategy. Encourage the use of sophisticated vocabulary and expressions. For the second and third questions, the picture is not relevant, so the student should not refer to it in their response. {explanation} The feedback should be in second person, addressing the student directly."
    },
    {"role": "user", "content": message}
]


    response = openai.ChatCompletion.create(
        model='gpt-3.5-turbo',
        messages=conversation,
        temperature=0.7,
        max_tokens=500,  # Limiting the response to 500 tokens
        stream=True
    )

    partial_message = ""
    for chunk in response:
        if len(chunk['choices'][0]['delta']) != 0:
            partial_message = partial_message + chunk['choices'][0]['delta']['content']
            yield partial_message


def get_image_html():
    return "![](SBC6.jpg)"  # Markdown syntax to embed the image


# Gradio Interface
iface = gr.Interface(
    fn=predict,
    inputs=[
        gr.Radio(data6.questions, label="Choose a question", default=data6.questions[0]),  # Dropdown for question choice
        gr.inputs.Audio(source="microphone", type="filepath")  # Audio input
    ],
    outputs=gr.inputs.Textbox(),  # Using inputs.Textbox as an output to make it editable
    description=img_html,
    css="custom.css"  # Link to the custom CSS file
)
iface.queue().launch()