David Driscoll
commited on
Commit
·
e421b40
1
Parent(s):
565e309
Update app
Browse files
app.py
CHANGED
@@ -131,7 +131,7 @@ custom_css = """
|
|
131 |
@import url('https://fonts.googleapis.com/css2?family=Orbitron:wght@400;700&display=swap');
|
132 |
body {
|
133 |
background-color: #0e0e0e;
|
134 |
-
color: #
|
135 |
font-family: 'Orbitron', sans-serif;
|
136 |
margin: 0;
|
137 |
padding: 0;
|
@@ -145,7 +145,7 @@ body {
|
|
145 |
}
|
146 |
.gradio-title {
|
147 |
font-size: 2.5em;
|
148 |
-
color: #
|
149 |
text-align: center;
|
150 |
margin-bottom: 0.2em;
|
151 |
}
|
@@ -153,6 +153,7 @@ body {
|
|
153 |
font-size: 1.2em;
|
154 |
text-align: center;
|
155 |
margin-bottom: 1em;
|
|
|
156 |
}
|
157 |
"""
|
158 |
|
@@ -164,7 +165,8 @@ posture_interface = gr.Interface(
|
|
164 |
inputs=gr.Image(sources=["webcam"], streaming=True, label="Capture Your Posture"),
|
165 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Posture Analysis")],
|
166 |
title="Posture Analysis",
|
167 |
-
description="Detects your posture using MediaPipe."
|
|
|
168 |
)
|
169 |
|
170 |
emotion_interface = gr.Interface(
|
@@ -172,7 +174,8 @@ emotion_interface = gr.Interface(
|
|
172 |
inputs=gr.Image(sources=["webcam"], streaming=True, label="Capture Your Face"),
|
173 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Emotion Analysis")],
|
174 |
title="Emotion Analysis",
|
175 |
-
description="Detects facial emotions using FER."
|
|
|
176 |
)
|
177 |
|
178 |
objects_interface = gr.Interface(
|
@@ -180,7 +183,8 @@ objects_interface = gr.Interface(
|
|
180 |
inputs=gr.Image(sources=["webcam"], streaming=True, label="Capture the Scene"),
|
181 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Object Detection")],
|
182 |
title="Object Detection",
|
183 |
-
description="Detects objects using a pretrained Faster R-CNN."
|
|
|
184 |
)
|
185 |
|
186 |
faces_interface = gr.Interface(
|
@@ -188,7 +192,8 @@ faces_interface = gr.Interface(
|
|
188 |
inputs=gr.Image(sources=["webcam"], streaming=True, label="Capture Your Face"),
|
189 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Face Detection")],
|
190 |
title="Face Detection",
|
191 |
-
description="Detects faces using MediaPipe."
|
|
|
192 |
)
|
193 |
|
194 |
# -----------------------------
|
|
|
131 |
@import url('https://fonts.googleapis.com/css2?family=Orbitron:wght@400;700&display=swap');
|
132 |
body {
|
133 |
background-color: #0e0e0e;
|
134 |
+
color: #ffffff;
|
135 |
font-family: 'Orbitron', sans-serif;
|
136 |
margin: 0;
|
137 |
padding: 0;
|
|
|
145 |
}
|
146 |
.gradio-title {
|
147 |
font-size: 2.5em;
|
148 |
+
color: #ffffff;
|
149 |
text-align: center;
|
150 |
margin-bottom: 0.2em;
|
151 |
}
|
|
|
153 |
font-size: 1.2em;
|
154 |
text-align: center;
|
155 |
margin-bottom: 1em;
|
156 |
+
color: #ffffff;
|
157 |
}
|
158 |
"""
|
159 |
|
|
|
165 |
inputs=gr.Image(sources=["webcam"], streaming=True, label="Capture Your Posture"),
|
166 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Posture Analysis")],
|
167 |
title="Posture Analysis",
|
168 |
+
description="Detects your posture using MediaPipe.",
|
169 |
+
live=True
|
170 |
)
|
171 |
|
172 |
emotion_interface = gr.Interface(
|
|
|
174 |
inputs=gr.Image(sources=["webcam"], streaming=True, label="Capture Your Face"),
|
175 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Emotion Analysis")],
|
176 |
title="Emotion Analysis",
|
177 |
+
description="Detects facial emotions using FER.",
|
178 |
+
live=True
|
179 |
)
|
180 |
|
181 |
objects_interface = gr.Interface(
|
|
|
183 |
inputs=gr.Image(sources=["webcam"], streaming=True, label="Capture the Scene"),
|
184 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Object Detection")],
|
185 |
title="Object Detection",
|
186 |
+
description="Detects objects using a pretrained Faster R-CNN.",
|
187 |
+
live=True
|
188 |
)
|
189 |
|
190 |
faces_interface = gr.Interface(
|
|
|
192 |
inputs=gr.Image(sources=["webcam"], streaming=True, label="Capture Your Face"),
|
193 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Face Detection")],
|
194 |
title="Face Detection",
|
195 |
+
description="Detects faces using MediaPipe.",
|
196 |
+
live=True
|
197 |
)
|
198 |
|
199 |
# -----------------------------
|