AI-Edify commited on
Commit
c562fea
·
verified ·
1 Parent(s): 16fbeae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -1,13 +1,13 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import difflib
 
4
 
5
  # Load Hugging Face Inference client
6
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
7
 
8
- # Load the speech-to-text model from Hugging Face
9
- s2t = gr.Interface.load('huggingface/facebook/s2t-medium-librispeech-asr')
10
-
11
 
12
  def generate_text_with_huggingface(system_message, max_tokens, temperature, top_p):
13
  """
@@ -57,7 +57,7 @@ def transcribe_and_feedback(audio, system_message, max_tokens, temperature, top_
57
  reference_text = generate_text_with_huggingface(system_message, max_tokens, temperature, top_p)
58
 
59
  # Transcribe the audio using the speech-to-text model
60
- transcription = s2t(audio)
61
 
62
  # Provide pronunciation feedback based on the transcription and the generated text
63
  feedback = pronunciation_feedback(transcription, reference_text)
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import difflib
4
+ from transformers import pipeline # Import transformers to load the speech-to-text model
5
 
6
  # Load Hugging Face Inference client
7
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
9
+ # Load the speech-to-text model using transformers pipeline
10
+ s2t_model = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-large-960h-lv60-self")
 
11
 
12
  def generate_text_with_huggingface(system_message, max_tokens, temperature, top_p):
13
  """
 
57
  reference_text = generate_text_with_huggingface(system_message, max_tokens, temperature, top_p)
58
 
59
  # Transcribe the audio using the speech-to-text model
60
+ transcription = s2t_model(audio)["text"]
61
 
62
  # Provide pronunciation feedback based on the transcription and the generated text
63
  feedback = pronunciation_feedback(transcription, reference_text)