File size: 2,550 Bytes
8b036d9
 
 
6797a38
5d76431
84bc8b3
5d76431
7c7f7ff
5d76431
8b036d9
cf623e7
 
 
 
 
 
 
84bc8b3
8b036d9
 
 
 
cf623e7
8b036d9
 
 
 
 
 
 
84bc8b3
 
 
 
 
 
8b036d9
 
 
 
 
 
84bc8b3
 
 
8b036d9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
LEADERBOARD_INTRODUCTION = """
# DD-Ranking Leaderboard

</h3>
<p align="center">
| <a href="https://nus-hpc-ai-lab.github.io/DD-Ranking/"><b>Documentation</b></a> | <a href="https://github.com/NUS-HPC-AI-Lab/DD-Ranking"><b>Github</b></a> | <a href=""><b>Paper </b> (Coming Soon)</a> | <a href=""><b>Twitter/X</b> (Coming Soon)</a> | <a href=""><b>Developer Slack</b> (Coming Soon)</a> |
</p>

πŸ† Welcome to the leaderboard of the **DD-Ranking**! 

> DD-Ranking (DD, i.e., Dataset Distillation) is an integrated and easy-to-use benchmark for dataset distillation. It aims to provide a fair evaluation scheme for DD methods that can decouple the impacts from knowledge distillation and data augmentation to reflect the real informativeness of the distilled data.

- **Fair Evaluation**: DD-Ranking provides a fair evaluation scheme for DD methods that can decouple the impacts from knowledge distillation and data augmentation to reflect the real informativeness of the distilled data.
- **Easy-to-use**: DD-Ranking provides a unified interface for dataset distillation evaluation.
- **Extensible**: DD-Ranking supports various datasets and models.
- **Customizable**: DD-Ranking supports various data augmentations and soft label strategies.

**Join Leaderboard**: Please see the [instructions](https://github.com/NUS-HPC-AI-Lab/DD-Ranking/blob/main/CONTRIBUTING.md) to participate.
"""

CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
CITATION_BUTTON_TEXT = r"""
COMING SOON
"""

IPC_INFO = """
Images Per Class
"""

LABEL_TYPE_INFO = """
Hard labels are categorical, having the same format of the real dataset. Soft labels are generated by a teacher model pretrained on the target dataset
"""

WEIGHT_ADJUSTMENT_INTRODUCTION = """
The score for ranking in the following table is computed by $score = \sum w_i score_i$, where $w_i$ is the weight for the $i$-th metric.
**You can specify the weights for each metric below.**
"""

DATASET_LIST = ["CIFAR-10", "CIFAR-100", "Tiny-ImageNet"]
IPC_LIST = ["IPC-1", "IPC-10", "IPC-50"]
LABEL_TYPE_LIST = ["Hard Label", "Soft Label"]

METRICS = ["HLR", "IOR"]
COLUMN_NAMES = ["Ranking", "Method", "Verified", "Date", "Label Type", "HLR", "IOR", "Score"]
DATA_TITLE_TYPE = ['number', 'markdown', 'markdown', 'markdown', 'markdown', 'number', 'number', 'number']

DATASET_MAPPING = {
    "CIFAR-10": 0,
    "CIFAR-100": 1,
    "Tiny-ImageNet": 2,
}

IPC_MAPPING = {
    "IPC-1": 0,
    "IPC-10": 1,
    "IPC-50": 2,
}

LABEL_MAPPING = {
    "Hard Label": 0,
    "Soft Label": 1,
}