Martin Jurkovic commited on
Commit
0a65444
·
1 Parent(s): 9446fe5

Update method names

Browse files
Files changed (1) hide show
  1. src/populate.py +14 -38
src/populate.py CHANGED
@@ -9,6 +9,17 @@ from src.display.utils import EvalQueueColumn
9
  from src.about import Tasks, SingleTableTasks, SingleColumnTasks
10
 
11
 
 
 
 
 
 
 
 
 
 
 
 
12
  # def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
13
  # """Creates a dataframe from all the individual experiment results"""
14
  # raw_data = get_raw_eval_results(results_path, requests_path)
@@ -62,6 +73,9 @@ def get_leaderboard_df(results_path: str, cols: list, benchmark_cols: list) -> p
62
  # iterate through all json files and add the data to the dataframe
63
  for data in all_data_json:
64
  model = data["method_name"]
 
 
 
65
  dataset = data["dataset_name"]
66
  row = {"Dataset": dataset, "Model": model}
67
  for metric in multi_table_metrics:
@@ -139,41 +153,3 @@ def get_leaderboard_df(results_path: str, cols: list, benchmark_cols: list) -> p
139
 
140
 
141
  return singlecolumn_df, singletable_df, multitable_df
142
-
143
-
144
- def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
145
- """Creates the different dataframes for the evaluation queues requestes"""
146
- entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
147
- all_evals = []
148
-
149
- for entry in entries:
150
- if ".json" in entry:
151
- file_path = os.path.join(save_path, entry)
152
- with open(file_path) as fp:
153
- data = json.load(fp)
154
-
155
- data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
156
- data[EvalQueueColumn.revision.name] = data.get("revision", "main")
157
-
158
- all_evals.append(data)
159
- elif ".md" not in entry:
160
- # this is a folder
161
- sub_entries = [
162
- e for e in os.listdir(f"{save_path}/{entry}") if os.path.isfile(e) and not e.startswith(".")
163
- ]
164
- for sub_entry in sub_entries:
165
- file_path = os.path.join(save_path, entry, sub_entry)
166
- with open(file_path) as fp:
167
- data = json.load(fp)
168
-
169
- data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
170
- data[EvalQueueColumn.revision.name] = data.get("revision", "main")
171
- all_evals.append(data)
172
-
173
- pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
174
- running_list = [e for e in all_evals if e["status"] == "RUNNING"]
175
- finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
176
- df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
177
- df_running = pd.DataFrame.from_records(running_list, columns=cols)
178
- df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
179
- return df_finished[cols], df_running[cols], df_pending[cols]
 
9
  from src.about import Tasks, SingleTableTasks, SingleColumnTasks
10
 
11
 
12
+ # Model name mapping dictionary
13
+ model_names = {
14
+ 'CLAVADDPM': "ClavaDDPM",
15
+ 'RGCLD': "RGCLD",
16
+ 'MOSTLYAI': "TabularARGN",
17
+ 'RCTGAN': "RCTGAN",
18
+ 'REALTABFORMER': "REaLTabFormer",
19
+ 'SDV': "SDV",
20
+ }
21
+
22
+
23
  # def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
24
  # """Creates a dataframe from all the individual experiment results"""
25
  # raw_data = get_raw_eval_results(results_path, requests_path)
 
73
  # iterate through all json files and add the data to the dataframe
74
  for data in all_data_json:
75
  model = data["method_name"]
76
+ # Rename model if it exists in the mapping dictionary
77
+ if model.upper() in model_names:
78
+ model = model_names[model.upper()]
79
  dataset = data["dataset_name"]
80
  row = {"Dataset": dataset, "Model": model}
81
  for metric in multi_table_metrics:
 
153
 
154
 
155
  return singlecolumn_df, singletable_df, multitable_df