abhinav-joshi commited on
Commit
bd89e20
·
1 Parent(s): bcd68d3

add team name

Browse files
Files changed (3) hide show
  1. app.py +2 -2
  2. src/display/utils.py +3 -0
  3. src/leaderboard/read_evals.py +20 -3
app.py CHANGED
@@ -81,7 +81,7 @@ def update_table(
81
  show_deleted: bool,
82
  query: str,
83
  ):
84
- # filtered_df = filter_models(hidden_df, type_query, size_query, precision_query, show_deleted)
85
  # filtered_df = filter_queries(query, filtered_df)
86
  df = select_columns(hidden_df, columns)
87
  return df
@@ -93,7 +93,7 @@ def search_table(df: pd.DataFrame, query: str) -> pd.DataFrame:
93
 
94
  def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame:
95
  always_here_cols = [
96
- AutoEvalColumn.model_type_symbol.name,
97
  AutoEvalColumn.model.name,
98
  ]
99
  # We use COLS to maintain sorting
 
81
  show_deleted: bool,
82
  query: str,
83
  ):
84
+ filtered_df = filter_models(hidden_df, type_query, size_query, precision_query, show_deleted)
85
  # filtered_df = filter_queries(query, filtered_df)
86
  df = select_columns(hidden_df, columns)
87
  return df
 
93
 
94
  def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame:
95
  always_here_cols = [
96
+ # AutoEvalColumn.model_type_symbol.name,
97
  AutoEvalColumn.model.name,
98
  ]
99
  # We use COLS to maintain sorting
src/display/utils.py CHANGED
@@ -27,6 +27,9 @@ auto_eval_column_dict = []
27
  # Init
28
  # auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
29
  # auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
 
 
 
30
  # Scores
31
  auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
32
  for task in Tasks:
 
27
  # Init
28
  # auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
29
  # auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
30
+ auto_eval_column_dict += [
31
+ ["team_name", ColumnContent, ColumnContent("Team", "str", True)],
32
+ ]
33
  # Scores
34
  auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
35
  for task in Tasks:
src/leaderboard/read_evals.py CHANGED
@@ -111,17 +111,18 @@ class EvalResult:
111
 
112
  def to_dict(self):
113
  """Converts the Eval Result to a dict compatible with our dataframe display"""
 
114
  average = sum([v for v in self.results.values() if v is not None]) / len(Tasks)
 
115
  data_dict = {
116
  "eval_name": self.eval_name, # not a column, just a save name,
117
  # AutoEvalColumn.precision.name: self.precision.value.name,
118
- AutoEvalColumn.model_type.name: self.model_type.value.name,
119
- AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol,
120
  # AutoEvalColumn.weight_type.name: self.weight_type.value.name,
121
  # AutoEvalColumn.architecture.name: self.architecture,
122
  # AutoEvalColumn.model.name: make_clickable_model(self.full_model),
123
  # AutoEvalColumn.revision.name: self.revision,
124
- AutoEvalColumn.average.name: average,
125
  # AutoEvalColumn.license.name: self.license,
126
  # AutoEvalColumn.likes.name: self.likes,
127
  # AutoEvalColumn.params.name: self.num_params,
@@ -131,6 +132,22 @@ class EvalResult:
131
  for task in Tasks:
132
  data_dict[task.value.col_name] = self.results[task.value.benchmark]
133
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
  return data_dict
135
 
136
 
 
111
 
112
  def to_dict(self):
113
  """Converts the Eval Result to a dict compatible with our dataframe display"""
114
+
115
  average = sum([v for v in self.results.values() if v is not None]) / len(Tasks)
116
+
117
  data_dict = {
118
  "eval_name": self.eval_name, # not a column, just a save name,
119
  # AutoEvalColumn.precision.name: self.precision.value.name,
120
+ AutoEvalColumn.team_name.name: self.team_name.value.name,
 
121
  # AutoEvalColumn.weight_type.name: self.weight_type.value.name,
122
  # AutoEvalColumn.architecture.name: self.architecture,
123
  # AutoEvalColumn.model.name: make_clickable_model(self.full_model),
124
  # AutoEvalColumn.revision.name: self.revision,
125
+ # AutoEvalColumn.average.name: average,
126
  # AutoEvalColumn.license.name: self.license,
127
  # AutoEvalColumn.likes.name: self.likes,
128
  # AutoEvalColumn.params.name: self.num_params,
 
132
  for task in Tasks:
133
  data_dict[task.value.col_name] = self.results[task.value.benchmark]
134
 
135
+ # data_dict = {
136
+ # "eval_name": self.eval_name, # not a column, just a save name,
137
+ # # AutoEvalColumn.precision.name: self.precision.value.name,
138
+ # AutoEvalColumn.model_type.name: self.model_type.value.name,
139
+ # AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol,
140
+ # # AutoEvalColumn.weight_type.name: self.weight_type.value.name,
141
+ # # AutoEvalColumn.architecture.name: self.architecture,
142
+ # # AutoEvalColumn.model.name: make_clickable_model(self.full_model),
143
+ # # AutoEvalColumn.revision.name: self.revision,
144
+ # AutoEvalColumn.average.name: average,
145
+ # # AutoEvalColumn.license.name: self.license,
146
+ # # AutoEvalColumn.likes.name: self.likes,
147
+ # # AutoEvalColumn.params.name: self.num_params,
148
+ # # AutoEvalColumn.still_on_hub.name: self.still_on_hub,
149
+ # }
150
+
151
  return data_dict
152
 
153