gngpostalsrvc commited on
Commit
bccf3ee
·
1 Parent(s): b51aacd

updated application file

Browse files
Files changed (1) hide show
  1. app.py +2 -13
app.py CHANGED
@@ -1,21 +1,15 @@
1
  # -*- coding: utf-8 -*-
2
 
3
- import sys
4
  import crepe
5
- import spacy
6
  import librosa
7
- import subprocess
8
  import gradio as gr
9
  import pandas as pd
10
  from transformers import pipeline, RobertaTokenizerFast, TFRobertaForSequenceClassification
11
 
12
- subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'https://huggingface.co/spacy/en_core_web_sm/resolve/main/en_core_web_sm-any-py3-none-any.whl'])
13
-
14
  asr = pipeline('automatic-speech-recognition', model='facebook/wav2vec2-large-960h-lv60-self')
15
  tokenizer = RobertaTokenizerFast.from_pretrained("arpanghoshal/EmoRoBERTa")
16
  model = TFRobertaForSequenceClassification.from_pretrained("arpanghoshal/EmoRoBERTa")
17
  emo = pipeline('sentiment-analysis', model='arpanghoshal/EmoRoBERTa')
18
- lang_model = spacy.load("spacy/en_core_web_sm")
19
 
20
  def transcribe_and_describe(audio):
21
 
@@ -23,10 +17,6 @@ def transcribe_and_describe(audio):
23
 
24
  text = asr(audio)['text']
25
 
26
- doc = lang_model(text)
27
- filler_words = [token.text for token in doc if token.pos_ == 'INTJ']
28
- filler_word_pr = len(filler_words) / len(doc)
29
-
30
  flatness = pd.DataFrame(librosa.feature.spectral_flatness(y=audio).T).describe().T
31
  loudness = pd.DataFrame(librosa.feature.rms(audio).T).describe().T
32
  time, frequency, confidence, activation = crepe.predict(audio, sr)
@@ -43,15 +33,14 @@ def transcribe_and_describe(audio):
43
 
44
  emotion = emo(text)[0]['label']
45
 
46
- return (text, filler_word_pr, words_per_minute, mean_pitch, pitch_std, mean_volume, volume_std, mean_spectral_flatness, spectral_flatness_std, emotion)
47
 
48
  gr.Interface(
49
  fn=transcribe_and_describe,
50
  inputs=gr.Audio(source="microphone", type="filepath"),
51
  outputs=[
52
- gr.Text(label="Transcription"),
53
  gr.Text(label="Rate of Speech (WPM)"),
54
- gr.Text(label="Filler Word Percent"),
55
  gr.Text(label="Mean Pitch (Hz)"),
56
  gr.Text(label="Pitch Variation (Hz)"),
57
  gr.Text(label="Mean Volume (W)"),
 
1
  # -*- coding: utf-8 -*-
2
 
 
3
  import crepe
 
4
  import librosa
 
5
  import gradio as gr
6
  import pandas as pd
7
  from transformers import pipeline, RobertaTokenizerFast, TFRobertaForSequenceClassification
8
 
 
 
9
  asr = pipeline('automatic-speech-recognition', model='facebook/wav2vec2-large-960h-lv60-self')
10
  tokenizer = RobertaTokenizerFast.from_pretrained("arpanghoshal/EmoRoBERTa")
11
  model = TFRobertaForSequenceClassification.from_pretrained("arpanghoshal/EmoRoBERTa")
12
  emo = pipeline('sentiment-analysis', model='arpanghoshal/EmoRoBERTa')
 
13
 
14
  def transcribe_and_describe(audio):
15
 
 
17
 
18
  text = asr(audio)['text']
19
 
 
 
 
 
20
  flatness = pd.DataFrame(librosa.feature.spectral_flatness(y=audio).T).describe().T
21
  loudness = pd.DataFrame(librosa.feature.rms(audio).T).describe().T
22
  time, frequency, confidence, activation = crepe.predict(audio, sr)
 
33
 
34
  emotion = emo(text)[0]['label']
35
 
36
+ return (text, words_per_minute, mean_pitch, pitch_std, mean_volume, volume_std, mean_spectral_flatness, spectral_flatness_std, emotion)
37
 
38
  gr.Interface(
39
  fn=transcribe_and_describe,
40
  inputs=gr.Audio(source="microphone", type="filepath"),
41
  outputs=[
42
+ gr.Text(label="Transcription"),
43
  gr.Text(label="Rate of Speech (WPM)"),
 
44
  gr.Text(label="Mean Pitch (Hz)"),
45
  gr.Text(label="Pitch Variation (Hz)"),
46
  gr.Text(label="Mean Volume (W)"),