AI-Edify's picture
Update app.py
371ca9c verified
raw
history blame
2.87 kB
import os
import gradio as gr
import openai
import speech_recognition as sr
# Set OpenAI API key
openai.api_key = os.environ.get("OPENAI_API_KEY")
def generate_text():
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "Generate a short paragraph (2-3 sentences) for an English learner to read aloud."},
{"role": "user", "content": "Create a practice text."}
]
)
return response.choices[0].message['content']
def get_pronunciation_feedback(original_text, transcription):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful pronunciation assistant. Compare the generated text with the user's transcription and provide feedback on how the user can improve their pronunciation. Single out specific words they pronounced incorrectly and give tips on how to improve, like for example 'schedule' can be pronounced as 'sked-jool'."},
{"role": "user", "content": f"Original text: '{original_text}'\nTranscription: '{transcription}'\nProvide pronunciation feedback."}
]
)
return response.choices[0].message['content']
def transcribe_audio_realtime(audio):
recognizer = sr.Recognizer()
with sr.AudioFile(audio) as source:
audio_data = recognizer.record(source)
try:
return recognizer.recognize_google(audio_data)
except sr.UnknownValueError:
return "Could not understand audio"
except sr.RequestError:
return "Could not request results from the speech recognition service"
def practice_pronunciation(audio, text_to_read):
if not text_to_read:
text_to_read = generate_text()
transcription = transcribe_audio_realtime(audio)
feedback = get_pronunciation_feedback(text_to_read, transcription)
return text_to_read, transcription, feedback
# Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# Pronunciation Practice Tool")
gr.Markdown("Generate a text to read, then record yourself reading it. The system will provide pronunciation feedback.")
with gr.Row():
text_to_read = gr.Textbox(label="Text to Read")
generate_button = gr.Button("Generate New Text")
audio_input = gr.Audio(type="filepath", label="Record your voice")
with gr.Row():
transcription_output = gr.Textbox(label="Your Transcription")
feedback_output = gr.Textbox(label="Pronunciation Feedback")
submit_button = gr.Button("Submit")
generate_button.click(generate_text, outputs=text_to_read)
submit_button.click(practice_pronunciation, inputs=[audio_input, text_to_read], outputs=[text_to_read, transcription_output, feedback_output])
# Launch the app
if __name__ == "__main__":
demo.launch()