DD-Ranking / constants.py
Soptq's picture
Update constants.py
be4b0eb verified
raw
history blame
3.41 kB
LEADERBOARD_HEADER = """
<style>
.header-gradient {
top: 40%;
bottom: 40%;
padding: 10px 0px;
font-weight: bold;
font-size: 40px;
font-family: Inter, Arial, Helvetica, sans-serif;
background: linear-gradient(to right, #5EA2EF, #0072F5);
-webkit-text-fill-color: transparent;
-webkit-background-clip: text;
}
.header-normal {
top: 40%;
bottom: 40%;
padding: 10px 0px;
font-weight: bold;
font-size: 40px;
font-family: Inter, Arial, Helvetica, sans-serif;
}
</style>
<div align="center">
<span class="header-gradient"> DD-Ranking </span>
<span class="header-normal"> Leaderboard </span>
</div>
<p align="center">
| <a href="https://nus-hpc-ai-lab.github.io/DD-Ranking/"><b>Documentation</b></a> | <a href="https://github.com/NUS-HPC-AI-Lab/DD-Ranking"><b>Github</b></a> | <a href=""><b>Paper </b> (Coming Soon)</a> | <a href=""><b>Twitter/X</b> (Coming Soon)</a> | <a href=""><b>Developer Slack</b> (Coming Soon)</a> |
</p>"""
LEADERBOARD_INTRODUCTION = """
# DD-Ranking Leaderboard
🏆 Welcome to the leaderboard of the **DD-Ranking**!
> DD-Ranking (DD, i.e., Dataset Distillation) is an integrated and easy-to-use benchmark for dataset distillation. It aims to provide a fair evaluation scheme for DD methods that can decouple the impacts from knowledge distillation and data augmentation to reflect the real informativeness of the distilled data.
- **Fair Evaluation**: DD-Ranking provides a fair evaluation scheme for DD methods that can decouple the impacts from knowledge distillation and data augmentation to reflect the real informativeness of the distilled data.
- **Easy-to-use**: DD-Ranking provides a unified interface for dataset distillation evaluation.
- **Extensible**: DD-Ranking supports various datasets and models.
- **Customizable**: DD-Ranking supports various data augmentations and soft label strategies.
**Join Leaderboard**: Please see the [instructions](https://github.com/NUS-HPC-AI-Lab/DD-Ranking/blob/main/CONTRIBUTING.md) to participate.
"""
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
CITATION_BUTTON_TEXT = r"""
COMING SOON
"""
IPC_INFO = """
Images Per Class
"""
LABEL_TYPE_INFO = """
Hard labels are categorical, having the same format of the real dataset. Soft labels are generated by a teacher model pretrained on the target dataset
"""
WEIGHT_ADJUSTMENT_INTRODUCTION = """
The score for ranking in the following table is computed by $score = w score_{HLR} - (1 - w) score_{IOR}$, where $w$ is the weight for the HLR metric.
**You can specify the weights for each metric below.**
"""
DATASET_LIST = ["CIFAR-10", "CIFAR-100", "Tiny-ImageNet"]
IPC_LIST = ["IPC-1", "IPC-10", "IPC-50"]
DATASET_IPC_LIST = {
"CIFAR-10": ["IPC-1", "IPC-10", "IPC-50"],
"CIFAR-100": ["IPC-1", "IPC-10", "IPC-50"],
"Tiny-ImageNet": ["IPC-10", "IPC-50"],
}
LABEL_TYPE_LIST = ["Hard Label", "Soft Label"]
METRICS = ["HLR", "IOR"]
METRICS_SIGN = [1.0, -1.0]
COLUMN_NAMES = ["Ranking", "Method", "Verified", "Date", "Label Type", "HLR", "IOR", "Score"]
DATA_TITLE_TYPE = ['number', 'markdown', 'markdown', 'markdown', 'markdown', 'number', 'number', 'number']
DATASET_MAPPING = {
"CIFAR-10": 0,
"CIFAR-100": 1,
"Tiny-ImageNet": 2,
}
IPC_MAPPING = {
"IPC-1": 0,
"IPC-10": 1,
"IPC-50": 2,
}
LABEL_MAPPING = {
"Hard Label": 0,
"Soft Label": 1,
}