Spaces:
Runtime error
Runtime error
added background
Browse files- app.css +4 -0
- app.py +3 -2
- data_mnist +1 -1
- mnist-dall.jpg +0 -0
- utils.py +2 -2
app.css
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
body {
|
| 2 |
+
background-image: url("mnist-dall.jpg");
|
| 3 |
+
background-color: #cccccc;
|
| 4 |
+
}
|
app.py
CHANGED
|
@@ -13,7 +13,8 @@ import numpy as np
|
|
| 13 |
from collections import Counter
|
| 14 |
|
| 15 |
|
| 16 |
-
|
|
|
|
| 17 |
|
| 18 |
n_epochs = 10
|
| 19 |
batch_size_train = 128
|
|
@@ -409,7 +410,7 @@ def get_statistics():
|
|
| 409 |
|
| 410 |
def main():
|
| 411 |
#block = gr.Blocks(css=BLOCK_CSS)
|
| 412 |
-
block = gr.Blocks()
|
| 413 |
|
| 414 |
with block:
|
| 415 |
gr.Markdown(TITLE)
|
|
|
|
| 13 |
from collections import Counter
|
| 14 |
|
| 15 |
|
| 16 |
+
with open('app.css','r') as f:
|
| 17 |
+
BLOCK_CSS = f.read()
|
| 18 |
|
| 19 |
n_epochs = 10
|
| 20 |
batch_size_train = 128
|
|
|
|
| 410 |
|
| 411 |
def main():
|
| 412 |
#block = gr.Blocks(css=BLOCK_CSS)
|
| 413 |
+
block = gr.Blocks(css=BLOCK_CSS)
|
| 414 |
|
| 415 |
with block:
|
| 416 |
gr.Markdown(TITLE)
|
data_mnist
CHANGED
|
@@ -1 +1 @@
|
|
| 1 |
-
Subproject commit
|
|
|
|
| 1 |
+
Subproject commit 650e2ac4a86b5e109a12b5adc7bf6436bbe578de
|
mnist-dall.jpg
ADDED
|
utils.py
CHANGED
|
@@ -12,7 +12,7 @@ This kind of data is presumably the most valuable for a model, so this can be he
|
|
| 12 |
"""
|
| 13 |
WHAT_TO_DO="""
|
| 14 |
### What to do:
|
| 15 |
-
1. Draw
|
| 16 |
2. Click `Submit` and see the model's prediciton.
|
| 17 |
3. If the model misclassifies it, Flag that example.
|
| 18 |
4. This will add your (adversarial) example to a dataset on which the model will be trained later.
|
|
@@ -25,7 +25,7 @@ MODEL_IS_WRONG = """
|
|
| 25 |
"""
|
| 26 |
DEFAULT_TEST_METRIC = "<html> Current test metric - Avg. loss: 1000, Accuracy: 30/1000 (30%) </html>"
|
| 27 |
|
| 28 |
-
DASHBOARD_EXPLANATION="To
|
| 29 |
|
| 30 |
STATS_EXPLANATION = "Here is the distribution of the __{num_adv_samples}__ adversarial samples we've got. The dataset can be found [here](https://huggingface.co/datasets/chrisjay/mnist-adversarial-dataset)."
|
| 31 |
|
|
|
|
| 12 |
"""
|
| 13 |
WHAT_TO_DO="""
|
| 14 |
### What to do:
|
| 15 |
+
1. Draw any number from 0-9.
|
| 16 |
2. Click `Submit` and see the model's prediciton.
|
| 17 |
3. If the model misclassifies it, Flag that example.
|
| 18 |
4. This will add your (adversarial) example to a dataset on which the model will be trained later.
|
|
|
|
| 25 |
"""
|
| 26 |
DEFAULT_TEST_METRIC = "<html> Current test metric - Avg. loss: 1000, Accuracy: 30/1000 (30%) </html>"
|
| 27 |
|
| 28 |
+
DASHBOARD_EXPLANATION="To test the effect of adversarial training on out-of-distribution data, we track the performance progress of the model on the [MNIST Corrupted test dataset](https://zenodo.org/record/3239543)."
|
| 29 |
|
| 30 |
STATS_EXPLANATION = "Here is the distribution of the __{num_adv_samples}__ adversarial samples we've got. The dataset can be found [here](https://huggingface.co/datasets/chrisjay/mnist-adversarial-dataset)."
|
| 31 |
|