Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -116,9 +116,11 @@ def classify_toxicity(audio_file, text_input, classify_anxiety, emo_class, expli
|
|
116 |
# plot.update(x=classification_df["labels"], y=classification_df["scores"])
|
117 |
if toxicity_score > threshold:
|
118 |
print("threshold exceeded!! Launch intervention")
|
119 |
-
intervention_output(intervention)
|
|
|
|
|
120 |
|
121 |
-
return toxicity_score, classification_output, transcribed_text
|
122 |
# return f"Toxicity Score ({available_models[selected_model]}): {toxicity_score:.4f}"
|
123 |
else:
|
124 |
threshold = slider_logic(slider)
|
@@ -156,16 +158,17 @@ def classify_toxicity(audio_file, text_input, classify_anxiety, emo_class, expli
|
|
156 |
print("threshold exceeded!! Launch intervention")
|
157 |
return classify_anxiety
|
158 |
def intervention_output(intervene):
|
159 |
-
if intervene== "Audio File":
|
160 |
-
out_aud : gr.update(value="calm.wav", visible=True, autoplay=True)
|
161 |
print("audio updated")
|
|
|
162 |
elif intervene == "Therapy App":
|
163 |
-
|
|
|
164 |
elif intervene == "Text Message":
|
165 |
phrase = positive_affirmations()
|
166 |
-
out_text : gr.update(visible=True, value=phrase)
|
167 |
else:
|
168 |
-
|
169 |
|
170 |
def positive_affirmations():
|
171 |
affirmations = [
|
@@ -192,9 +195,10 @@ with gr.Blocks() as iface:
|
|
192 |
with gr.Column():
|
193 |
out_val = gr.Textbox()
|
194 |
out_class = gr.Textbox()
|
|
|
195 |
out_text = gr.Textbox(visible=False)
|
196 |
-
out_img = gr.
|
197 |
out_aud = gr.Audio(value="calm.wav", visible=False)
|
198 |
-
submit_btn.click(fn=classify_toxicity, inputs=[aud_input, text, anxiety_class, emo_class, explit_preference, sense_slider, intervention_type], outputs=[out_val, out_class, out_text])
|
199 |
|
200 |
iface.launch()
|
|
|
116 |
# plot.update(x=classification_df["labels"], y=classification_df["scores"])
|
117 |
if toxicity_score > threshold:
|
118 |
print("threshold exceeded!! Launch intervention")
|
119 |
+
intervene = intervention_output(intervention)
|
120 |
+
else:
|
121 |
+
intervene = " "
|
122 |
|
123 |
+
return toxicity_score, classification_output, transcribed_text, intervene
|
124 |
# return f"Toxicity Score ({available_models[selected_model]}): {toxicity_score:.4f}"
|
125 |
else:
|
126 |
threshold = slider_logic(slider)
|
|
|
158 |
print("threshold exceeded!! Launch intervention")
|
159 |
return classify_anxiety
|
160 |
def intervention_output(intervene):
|
161 |
+
if intervene == "Audio File":
|
|
|
162 |
print("audio updated")
|
163 |
+
return { out_aud : gr.update(value="calm.wav", visible=True, autoplay=True) }
|
164 |
elif intervene == "Therapy App":
|
165 |
+
print("therapy app updated")
|
166 |
+
return { out_img : gr.update(value="hrv-breathing.gif", visible=True)}
|
167 |
elif intervene == "Text Message":
|
168 |
phrase = positive_affirmations()
|
169 |
+
return { out_text : gr.update(visible=True, value=phrase) }
|
170 |
else:
|
171 |
+
return " "
|
172 |
|
173 |
def positive_affirmations():
|
174 |
affirmations = [
|
|
|
195 |
with gr.Column():
|
196 |
out_val = gr.Textbox()
|
197 |
out_class = gr.Textbox()
|
198 |
+
# with gr.Column(visible=False) as output_col:
|
199 |
out_text = gr.Textbox(visible=False)
|
200 |
+
out_img = gr.Image(value="hrv-breathing.gif", visible=False)
|
201 |
out_aud = gr.Audio(value="calm.wav", visible=False)
|
202 |
+
submit_btn.click(fn=classify_toxicity, inputs=[aud_input, text, anxiety_class, emo_class, explit_preference, sense_slider, intervention_type], outputs=[out_val, out_class, out_text, out_text])
|
203 |
|
204 |
iface.launch()
|