Spaces:
Running
Running
Update constants.py
Browse files- constants.py +37 -5
constants.py
CHANGED
|
@@ -1,10 +1,37 @@
|
|
| 1 |
-
|
| 2 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
|
| 4 |
-
|
|
|
|
|
|
|
|
|
|
| 5 |
<p align="center">
|
| 6 |
| <a href="https://nus-hpc-ai-lab.github.io/DD-Ranking/"><b>Documentation</b></a> | <a href="https://github.com/NUS-HPC-AI-Lab/DD-Ranking"><b>Github</b></a> | <a href=""><b>Paper </b> (Coming Soon)</a> | <a href=""><b>Twitter/X</b> (Coming Soon)</a> | <a href=""><b>Developer Slack</b> (Coming Soon)</a> |
|
| 7 |
-
</p>
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
🏆 Welcome to the leaderboard of the **DD-Ranking**!
|
| 10 |
|
|
@@ -32,12 +59,17 @@ Hard labels are categorical, having the same format of the real dataset. Soft la
|
|
| 32 |
"""
|
| 33 |
|
| 34 |
WEIGHT_ADJUSTMENT_INTRODUCTION = """
|
| 35 |
-
The score for ranking in the following table is computed by $score =
|
| 36 |
**You can specify the weights for each metric below.**
|
| 37 |
"""
|
| 38 |
|
| 39 |
DATASET_LIST = ["CIFAR-10", "CIFAR-100", "Tiny-ImageNet"]
|
| 40 |
IPC_LIST = ["IPC-1", "IPC-10", "IPC-50"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
LABEL_TYPE_LIST = ["Hard Label", "Soft Label"]
|
| 42 |
|
| 43 |
METRICS = ["HLR", "IOR"]
|
|
|
|
| 1 |
+
LEADERBOARD_HEADER = """
|
| 2 |
+
<style>
|
| 3 |
+
.header-gradient {
|
| 4 |
+
top: 40%;
|
| 5 |
+
bottom: 40%;
|
| 6 |
+
padding: 10px 0px;
|
| 7 |
+
font-weight: bold;
|
| 8 |
+
font-size: 40px;
|
| 9 |
+
font-family: Inter, Arial, Helvetica, sans-serif;
|
| 10 |
+
background: linear-gradient(to right, #5EA2EF, #0072F5);
|
| 11 |
+
-webkit-text-fill-color: transparent;
|
| 12 |
+
-webkit-background-clip: text;
|
| 13 |
+
}
|
| 14 |
+
|
| 15 |
+
.header-normal {
|
| 16 |
+
top: 40%;
|
| 17 |
+
bottom: 40%;
|
| 18 |
+
padding: 10px 0px;
|
| 19 |
+
font-weight: bold;
|
| 20 |
+
font-size: 40px;
|
| 21 |
+
font-family: Inter, Arial, Helvetica, sans-serif;
|
| 22 |
+
}
|
| 23 |
+
</style>
|
| 24 |
|
| 25 |
+
<div align="center">
|
| 26 |
+
<span class="header-gradient"> DD-Ranking </span>
|
| 27 |
+
<span class="header-normal"> Leaderboard </span>
|
| 28 |
+
</div>
|
| 29 |
<p align="center">
|
| 30 |
| <a href="https://nus-hpc-ai-lab.github.io/DD-Ranking/"><b>Documentation</b></a> | <a href="https://github.com/NUS-HPC-AI-Lab/DD-Ranking"><b>Github</b></a> | <a href=""><b>Paper </b> (Coming Soon)</a> | <a href=""><b>Twitter/X</b> (Coming Soon)</a> | <a href=""><b>Developer Slack</b> (Coming Soon)</a> |
|
| 31 |
+
</p>"""
|
| 32 |
+
|
| 33 |
+
LEADERBOARD_INTRODUCTION = """
|
| 34 |
+
# DD-Ranking Leaderboard
|
| 35 |
|
| 36 |
🏆 Welcome to the leaderboard of the **DD-Ranking**!
|
| 37 |
|
|
|
|
| 59 |
"""
|
| 60 |
|
| 61 |
WEIGHT_ADJUSTMENT_INTRODUCTION = """
|
| 62 |
+
The score for ranking in the following table is computed by $score = w score_{HLR} - (1 - w) score_{IOR}$, where $w$ is the weight for the HLR metric.
|
| 63 |
**You can specify the weights for each metric below.**
|
| 64 |
"""
|
| 65 |
|
| 66 |
DATASET_LIST = ["CIFAR-10", "CIFAR-100", "Tiny-ImageNet"]
|
| 67 |
IPC_LIST = ["IPC-1", "IPC-10", "IPC-50"]
|
| 68 |
+
DATASET_IPC_LIST = {
|
| 69 |
+
"CIFAR-10": ["IPC-1", "IPC-10", "IPC-50"],
|
| 70 |
+
"CIFAR-100": ["IPC-1", "IPC-10", "IPC-50"],
|
| 71 |
+
"Tiny-ImageNet": ["IPC-10", "IPC-50"],
|
| 72 |
+
}
|
| 73 |
LABEL_TYPE_LIST = ["Hard Label", "Soft Label"]
|
| 74 |
|
| 75 |
METRICS = ["HLR", "IOR"]
|