mskov commited on
Commit
d80aaed
·
1 Parent(s): f1dffe0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -9
app.py CHANGED
@@ -50,7 +50,7 @@ def slider_logic(slider):
50
  return threshold
51
 
52
  # Create a Gradio interface with audio file and text inputs
53
- def classify_toxicity(audio_file, selected_sounds, viz_state, slider):
54
  # Transcribe the audio file using Whisper ASR
55
  # transcribed_text = pipe(audio_file)["text"]
56
 
@@ -93,11 +93,9 @@ def classify_toxicity(audio_file, selected_sounds, viz_state, slider):
93
  score = class_score_dict[selected_class_name]
94
  if score > threshold:
95
  print(f"Threshold exceeded for class '{selected_class_name}': Score = {score:.4f}")
96
- viz_state = True
97
- calm_image : gr.update(visible=True)
98
  else:
99
- viz_state = False
100
- calm_image : gr.update(visible=False)
101
 
102
 
103
 
@@ -110,7 +108,7 @@ def classify_toxicity(audio_file, selected_sounds, viz_state, slider):
110
  # miso_label_dict = {label: score for label, score in classify_anxiety[0].items()}
111
  holder2 = ""
112
  holder3= " "
113
- return {class_name: score for class_name, score in zip(class_names, scores)}, viz_state
114
 
115
  def positive_affirmations():
116
  affirmations = [
@@ -123,7 +121,6 @@ def positive_affirmations():
123
  return selected_affirm
124
 
125
  with gr.Blocks() as iface:
126
- show_state = gr.State(False)
127
  with gr.Column():
128
  miso_sounds = gr.CheckboxGroup(["chewing", "breathing", "mouthsounds", "popping", "sneezing", "yawning", "smacking", "sniffling", "panting"])
129
  sense_slider = gr.Slider(minimum=1, maximum=5, step=1.0, label="How readily do you want the tool to intervene? 1 = in extreme cases and 5 = at every opportunity")
@@ -133,8 +130,8 @@ with gr.Blocks() as iface:
133
  with gr.Column():
134
  # out_val = gr.Textbox()
135
  out_class = gr.Label()
136
- calm_image = gr.Image(value="./hrv-breathing.gif", visible=False)
137
- submit_btn.click(fn=classify_toxicity, inputs=[aud_input, miso_sounds, show_state, sense_slider], outputs=[out_class, calm_image])
138
 
139
 
140
  iface.launch()
 
50
  return threshold
51
 
52
  # Create a Gradio interface with audio file and text inputs
53
+ def classify_toxicity(audio_file, selected_sounds, slider):
54
  # Transcribe the audio file using Whisper ASR
55
  # transcribed_text = pipe(audio_file)["text"]
56
 
 
93
  score = class_score_dict[selected_class_name]
94
  if score > threshold:
95
  print(f"Threshold exceeded for class '{selected_class_name}': Score = {score:.4f}")
96
+ affirm = positive_affirmations()
 
97
  else:
98
+ affirm = ""
 
99
 
100
 
101
 
 
108
  # miso_label_dict = {label: score for label, score in classify_anxiety[0].items()}
109
  holder2 = ""
110
  holder3= " "
111
+ return {class_name: score for class_name, score in zip(class_names, scores)}, affirm
112
 
113
  def positive_affirmations():
114
  affirmations = [
 
121
  return selected_affirm
122
 
123
  with gr.Blocks() as iface:
 
124
  with gr.Column():
125
  miso_sounds = gr.CheckboxGroup(["chewing", "breathing", "mouthsounds", "popping", "sneezing", "yawning", "smacking", "sniffling", "panting"])
126
  sense_slider = gr.Slider(minimum=1, maximum=5, step=1.0, label="How readily do you want the tool to intervene? 1 = in extreme cases and 5 = at every opportunity")
 
130
  with gr.Column():
131
  # out_val = gr.Textbox()
132
  out_class = gr.Label()
133
+ out_text = gr.Textbox()
134
+ submit_btn.click(fn=classify_toxicity, inputs=[aud_input, miso_sounds, sense_slider], outputs=[out_class, out_text])
135
 
136
 
137
  iface.launch()