Spaces:
Runtime error
Runtime error
design of dashboard
Browse files- app.py +10 -14
- data_mnist +1 -1
- utils.py +1 -0
app.py
CHANGED
|
@@ -423,18 +423,17 @@ def main():
|
|
| 423 |
|
| 424 |
|
| 425 |
image_input =gr.inputs.Image(source="canvas",shape=(28,28),invert_colors=True,image_mode="L",type="pil")
|
| 426 |
-
gr.Markdown(MODEL_IS_WRONG)
|
| 427 |
-
|
| 428 |
-
with gr.Row():
|
| 429 |
label_output = gr.outputs.Label(num_top_classes=2)
|
| 430 |
|
|
|
|
|
|
|
| 431 |
|
| 432 |
-
|
|
|
|
|
|
|
| 433 |
|
| 434 |
-
with gr.Row():
|
| 435 |
-
submit = gr.Button("Submit")
|
| 436 |
|
| 437 |
-
|
| 438 |
|
| 439 |
output_result = gr.outputs.HTML()
|
| 440 |
adversarial_number = gr.Variable(value=0)
|
|
@@ -449,13 +448,10 @@ def main():
|
|
| 449 |
</div>
|
| 450 |
""")
|
| 451 |
|
| 452 |
-
|
| 453 |
-
|
| 454 |
-
|
| 455 |
-
|
| 456 |
-
stat_adv_image =gr.Plot(type="matplotlib")
|
| 457 |
-
|
| 458 |
-
test_results=gr.Plot(type="matplotlib")
|
| 459 |
|
| 460 |
dashboard.select(get_statistics,inputs=[],outputs=[stat_adv_image,test_results,notification,stats])
|
| 461 |
|
|
|
|
| 423 |
|
| 424 |
|
| 425 |
image_input =gr.inputs.Image(source="canvas",shape=(28,28),invert_colors=True,image_mode="L",type="pil")
|
|
|
|
|
|
|
|
|
|
| 426 |
label_output = gr.outputs.Label(num_top_classes=2)
|
| 427 |
|
| 428 |
+
|
| 429 |
+
submit = gr.Button("Submit")
|
| 430 |
|
| 431 |
+
gr.Markdown(MODEL_IS_WRONG)
|
| 432 |
+
|
| 433 |
+
number_dropdown = gr.Dropdown(choices=[i for i in range(10)],type='value',default=None,label="What was the correct prediction?")
|
| 434 |
|
|
|
|
|
|
|
| 435 |
|
| 436 |
+
flag_btn = gr.Button("Flag")
|
| 437 |
|
| 438 |
output_result = gr.outputs.HTML()
|
| 439 |
adversarial_number = gr.Variable(value=0)
|
|
|
|
| 448 |
</div>
|
| 449 |
""")
|
| 450 |
|
| 451 |
+
stats = gr.Markdown()
|
| 452 |
+
stat_adv_image =gr.Plot(type="matplotlib")
|
| 453 |
+
gr.Markdown(DASHBOARD_EXPLANATION)
|
| 454 |
+
test_results=gr.Plot(type="matplotlib")
|
|
|
|
|
|
|
|
|
|
| 455 |
|
| 456 |
dashboard.select(get_statistics,inputs=[],outputs=[stat_adv_image,test_results,notification,stats])
|
| 457 |
|
data_mnist
CHANGED
|
@@ -1 +1 @@
|
|
| 1 |
-
Subproject commit
|
|
|
|
| 1 |
+
Subproject commit fa05b54608bc4ddb261094a84ca9a38d215897d6
|
utils.py
CHANGED
|
@@ -20,6 +20,7 @@ WHAT_TO_DO="""
|
|
| 20 |
"""
|
| 21 |
|
| 22 |
MODEL_IS_WRONG = """
|
|
|
|
| 23 |
### Did the model get it wrong or has a low confidence? Choose the correct prediction below and flag it. When you flag it, the instance is saved to our dataset and the model is trained on it.
|
| 24 |
"""
|
| 25 |
DEFAULT_TEST_METRIC = "<html> Current test metric - Avg. loss: 1000, Accuracy: 30/1000 (30%) </html>"
|
|
|
|
| 20 |
"""
|
| 21 |
|
| 22 |
MODEL_IS_WRONG = """
|
| 23 |
+
---
|
| 24 |
### Did the model get it wrong or has a low confidence? Choose the correct prediction below and flag it. When you flag it, the instance is saved to our dataset and the model is trained on it.
|
| 25 |
"""
|
| 26 |
DEFAULT_TEST_METRIC = "<html> Current test metric - Avg. loss: 1000, Accuracy: 30/1000 (30%) </html>"
|