import gradio as gr from huggingface_hub import InferenceClient import difflib # Load Hugging Face Inference client client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") # Load the speech-to-text model from Hugging Face s2t = gr.Interface.load('huggingface/facebook/s2t-medium-librispeech-asr') def generate_text_with_huggingface(system_message, max_tokens, temperature, top_p): """ Function to generate text using Hugging Face Inference API based on the system message, max tokens, temperature, and top-p. """ messages = [{"role": "system", "content": system_message}] message = "" response = "" for message in client.chat_completion( messages, max_tokens=max_tokens, stream=True, temperature=temperature, top_p=top_p, ): token = message.choices[0].delta.content response += token return response.strip() # Return the generated text def pronunciation_feedback(transcription, reference_text): """ Function to provide feedback on pronunciation based on differences between the transcription and the reference (expected) text. """ diff = difflib.ndiff(reference_text.split(), transcription.split()) # Identify words that are incorrect or missing in the transcription errors = [word for word in diff if word.startswith('- ')] if errors: feedback = "Mispronounced words: " + ', '.join([error[2:] for error in errors]) else: feedback = "Great job! Your pronunciation is spot on." return feedback def transcribe_and_feedback(audio, system_message, max_tokens, temperature, top_p): """ Transcribe the audio and provide pronunciation feedback using the generated text. """ # Generate the reference text using Hugging Face Inference API reference_text = generate_text_with_huggingface(system_message, max_tokens, temperature, top_p) # Transcribe the audio using the speech-to-text model transcription = s2t(audio) # Provide pronunciation feedback based on the transcription and the generated text feedback = pronunciation_feedback(transcription, reference_text) return transcription, feedback, reference_text # Gradio interface demo = gr.Interface( fn=transcribe_and_feedback, # The function that transcribes audio and provides feedback inputs=[ gr.Audio(type="filepath", label="Record Audio"), # Microphone input for recording gr.Textbox(value="Please read a simple sentence.", label="System message"), # Message used to generate text gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), # Controls max token length for the generated text gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), # Temperature control for text generation gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)") # Top-p control for text generation ], outputs=[ gr.Textbox(label="Transcription"), # Display transcription of the audio gr.Textbox(label="Pronunciation Feedback"), # Feedback on pronunciation gr.Textbox(label="Generated Text (What You Were Supposed to Read)") # Display the text generated by the API ], title="Speech-to-Text with Pronunciation Feedback", description="Record an audio sample and the system will transcribe it, " "compare your transcription to the generated text, and give pronunciation feedback.", live=True # Real-time interaction ) # Enable queuing and launch the app demo.queue().launch(show_error=True)