Spaces:
Sleeping
Sleeping
import os | |
import openai | |
import gradio as gr | |
import base64 | |
from data4 import strategy_text, description, questions | |
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") | |
openai.api_key = OPENAI_API_KEY | |
def transcribe_audio(audio_file_path): | |
# Use OpenAI's Whisper to transcribe the audio | |
audio_file = open(audio_file_path, "rb") | |
transcript = openai.Audio.transcribe("whisper-1", audio_file) | |
return transcript["text"] | |
def get_base64_image(): | |
with open("SBC4.jpg", "rb") as img_file: | |
return base64.b64encode(img_file.read()).decode("utf-8") | |
def get_image_html(): | |
return ( | |
f"<img src='data:image/jpeg;base64,{get_base64_image()}' style='display: block; margin-left: auto; margin-right: auto; padding-bottom: 15px; width: 300px;'>" | |
) | |
current_question_index = 0 | |
user_input_counter = 0 | |
conversation_history = [] | |
def intelligent_tutor(audio_file, provide_hints=False): | |
global current_question_index | |
global questions | |
global user_input_counter | |
global conversation_history | |
input_text = transcribe_audio(audio_file) | |
current_question = questions[current_question_index] | |
if provide_hints: | |
hint_message = f"考虑使用 {strategy_text[current_question_index]} 策略来回答这个问题:'{questions[current_question_index]}'。" | |
return f"请回答这个问题:{questions[current_question_index]}", hint_message | |
conversation = [ | |
{ | |
"role": "system", | |
"content": f"你是一名专家级的中文老师,正在指导一名学生。学生正在回答这个问题:'{questions[current_question_index]}'。根据他们的回答,为他们提供直接的反馈,以帮助他们提高口语技能。强调他们的优点,建议改进的地方,并指导他们如何使用 {strategy_text[current_question_index]} 策略更好地回答。反馈应该用第二人称,直接向学生发言 请用简单的话给学生建议,帮助他们说得更好。" | |
}, | |
{"role": "user", "content": input_text} | |
] | |
# Append the user's response to the conversation history | |
conversation_history.append(input_text) | |
response = openai.ChatCompletion.create( | |
model="gpt-3.5-turbo", | |
messages=conversation, | |
max_tokens=400 | |
) | |
if not response.choices: | |
return "No response from the model.", "" | |
text_response = response.choices[0]['message']['content'].strip() | |
text_response = text_response.replace('\n', '<br>') | |
user_input_counter += 1 | |
if user_input_counter % 2 == 0: | |
if current_question_index + 1 < len(questions): | |
current_question_index += 1 | |
next_question = questions[current_question_index] | |
text_response += f"\n\nNext question ({current_question_index + 1}): {next_question}" | |
else: | |
# All questions have been answered, provide a summary | |
summary_prompt = { | |
"role": "system", | |
"content": f"你是一名中文老师,正在帮助新加坡小六的学生。学生正在回答这个问题:'{questions[current_question_index]}'。请用简单的词汇和句子给出反馈,帮助学生改进。" | |
} | |
summary_conversation = [summary_prompt, {"role": "user", "content": " ".join(conversation_history)}] | |
summary_response = openai.ChatCompletion.create( | |
model="gpt-3.5-turbo", | |
messages=summary_conversation, | |
max_tokens=600 # Increased token limit for detailed summary | |
) | |
if not summary_response.choices: | |
return "No response from the model.", "" | |
text_response = summary_response.choices[0]['message']['content'].strip() | |
text_response = text_response.replace('\n', '<br>') | |
wrapped_output_text = f'<div style="height: 300px; overflow-y: scroll;">{text_response}</div>' | |
return f"Current Question: {questions[current_question_index]}", wrapped_output_text | |
iface = gr.Interface( | |
fn=intelligent_tutor, | |
inputs=[ | |
gr.Audio(source="microphone", type="filepath", label="录音", sampling_rate=16000), | |
gr.inputs.Checkbox(label="提供对话总结"), # Checkbox for hints | |
], | |
outputs=[ | |
gr.outputs.HTML(label="问题"), | |
gr.outputs.HTML(label="输出文本"), | |
], | |
title="口语教练", | |
description=(get_image_html() + | |
"<br> " + questions[0] + | |
"<br>每个问题有两次尝试机会。<br>" + | |
"<b>请在第一个问题后的输出屏幕上回答显示的问题。</b>"), | |
) | |
iface.launch(share=False) | |