mskov commited on
Commit
71fe961
·
1 Parent(s): 577ccea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -8
app.py CHANGED
@@ -116,11 +116,10 @@ def classify_toxicity(audio_file, text_input, classify_anxiety, emo_class, expli
116
  # plot.update(x=classification_df["labels"], y=classification_df["scores"])
117
  if toxicity_score > threshold:
118
  print("threshold exceeded!! Launch intervention")
119
- output_col = intervention_output(intervention)
120
- else:
121
- intervene = " "
122
-
123
- return toxicity_score, classification_output, transcribed_text, output_col
124
  # return f"Toxicity Score ({available_models[selected_model]}): {toxicity_score:.4f}"
125
  else:
126
  threshold = slider_logic(slider)
@@ -197,9 +196,9 @@ with gr.Blocks() as iface:
197
  out_val = gr.Textbox()
198
  out_class = gr.Textbox()
199
  with gr.Column(visible=False) as output_col:
200
- out_text = gr.Textbox(visible=False)
201
- out_img = gr.Image(value="hrv-breathing.gif", visible=False)
202
- out_aud = gr.Audio(value="calm.wav", visible=False)
203
  submit_btn.click(fn=classify_toxicity, inputs=[aud_input, text, anxiety_class, emo_class, explit_preference, sense_slider, intervention_type], outputs=[out_val, out_class, out_text, output_col])
204
 
205
  iface.launch()
 
116
  # plot.update(x=classification_df["labels"], y=classification_df["scores"])
117
  if toxicity_score > threshold:
118
  print("threshold exceeded!! Launch intervention")
119
+ holder = intervention_output(intervention)
120
+
121
+ print("output column: ", holder)
122
+ return toxicity_score, classification_output, transcribed_text, holder
 
123
  # return f"Toxicity Score ({available_models[selected_model]}): {toxicity_score:.4f}"
124
  else:
125
  threshold = slider_logic(slider)
 
196
  out_val = gr.Textbox()
197
  out_class = gr.Textbox()
198
  with gr.Column(visible=False) as output_col:
199
+ out_text = gr.Textbox()
200
+ out_img = gr.Image(value="hrv-breathing.gif")
201
+ out_aud = gr.Audio(value="calm.wav")
202
  submit_btn.click(fn=classify_toxicity, inputs=[aud_input, text, anxiety_class, emo_class, explit_preference, sense_slider, intervention_type], outputs=[out_val, out_class, out_text, output_col])
203
 
204
  iface.launch()