Soptq commited on
Commit
1661c7f
·
verified ·
1 Parent(s): b2d1070

Update constants.py

Browse files
Files changed (1) hide show
  1. constants.py +19 -3
constants.py CHANGED
@@ -59,10 +59,26 @@ Hard labels are categorical, having the same format of the real dataset. Soft la
59
  """
60
 
61
  WEIGHT_ADJUSTMENT_INTRODUCTION = """
62
- The score for ranking in the following table is computed by $score = w HLR - (1 - w) IOR$, where $w$ is the weight for the HLR metric.
63
  **You can specify the weight $w$ below.**
64
  """
65
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
  DATASET_LIST = ["CIFAR-10", "CIFAR-100", "Tiny-ImageNet"]
67
  IPC_LIST = ["IPC-1", "IPC-10", "IPC-50"]
68
  DATASET_IPC_LIST = {
@@ -74,7 +90,7 @@ LABEL_TYPE_LIST = ["Hard Label", "Soft Label"]
74
 
75
  METRICS = ["HLR", "IOR"]
76
  METRICS_SIGN = [1.0, -1.0]
77
- COLUMN_NAMES = ["Ranking", "Method", "Verified", "Date", "Label Type", "HLR", "IOR", "Score"]
78
  DATA_TITLE_TYPE = ['number', 'markdown', 'markdown', 'markdown', 'markdown', 'number', 'number', 'number']
79
 
80
  DATASET_MAPPING = {
@@ -92,4 +108,4 @@ IPC_MAPPING = {
92
  LABEL_MAPPING = {
93
  "Hard Label": 0,
94
  "Soft Label": 1,
95
- }
 
59
  """
60
 
61
  WEIGHT_ADJUSTMENT_INTRODUCTION = """
62
+ The score for ranking (DD-Ranking Score, DDRS) in the following table is computed by DDRS = \\frac{e^{w IOR - (1 - w) HLR} - e^{-1}}{e - e^{-1}}$, where $w$ is the weight for the HLR metric.
63
  **You can specify the weight $w$ below.**
64
  """
65
 
66
+ METRIC_DEFINITION_INTRODUCTION = """
67
+ $\\text{Acc.}$: The accuracy of models trained on different samples.
68
+
69
+ $\\text{full-hard}$: Full dataset with hard labels.
70
+
71
+ $\\text{syn-hard}$: Synthetic dataset with hard labels.
72
+
73
+ $\\text{syn-any}$: Synthetic dataset with personalized evaluation methods (hard or soft labels).
74
+
75
+ $\\text{rdm-any}$: Randomly selected dataset (under the same compression ratio) with the same personalized evaluation methods.
76
+
77
+ $\\text{HLR} = \\text{Acc.} \\text{full-hard} - \\text{Acc.} \\text{syn-hard}$: The degree to which the original dataset is recovered under hard labels (hard label recovery).
78
+
79
+ $\\text{IOR} = \\text{Acc.} \\text{syn-any} - \\text{Acc.} \\text{rdm-any}$: The improvement over random selection when using personalized evaluation methods (improvement over random).
80
+ """
81
+
82
  DATASET_LIST = ["CIFAR-10", "CIFAR-100", "Tiny-ImageNet"]
83
  IPC_LIST = ["IPC-1", "IPC-10", "IPC-50"]
84
  DATASET_IPC_LIST = {
 
90
 
91
  METRICS = ["HLR", "IOR"]
92
  METRICS_SIGN = [1.0, -1.0]
93
+ COLUMN_NAMES = ["Ranking", "Method", "Verified", "Date", "Label Type", "HLR%", "IOR%", "DDRS"]
94
  DATA_TITLE_TYPE = ['number', 'markdown', 'markdown', 'markdown', 'markdown', 'number', 'number', 'number']
95
 
96
  DATASET_MAPPING = {
 
108
  LABEL_MAPPING = {
109
  "Hard Label": 0,
110
  "Soft Label": 1,
111
+ }