Commit
07d99fc
·
verified ·
1 Parent(s): 9ae12f5

Update src/populate.py

Browse files
Files changed (1) hide show
  1. src/populate.py +17 -5
src/populate.py CHANGED
@@ -4,7 +4,8 @@ import os
4
  import pandas as pd
5
  import json
6
 
7
- from src.display.utils import COLUMNS, EVAL_COLS
 
8
 
9
  def get_leaderboard_df(eval_results_path, eval_requests_path, cols, benchmark_cols):
10
  # Initialize an empty DataFrame
@@ -12,12 +13,15 @@ def get_leaderboard_df(eval_results_path, eval_requests_path, cols, benchmark_co
12
 
13
  # Load evaluation results from JSON files
14
  if os.path.exists(eval_results_path):
15
- result_files = [os.path.join(eval_results_path, f) for f in os.listdir(eval_results_path) if f.endswith('.json')]
 
 
 
 
16
  data_list = []
17
  for file in result_files:
18
  with open(file, 'r') as f:
19
  data = json.load(f)
20
- # Flatten the JSON structure if needed
21
  flattened_data = {}
22
  flattened_data.update(data.get('config', {}))
23
  flattened_data.update(data.get('results', {}))
@@ -30,6 +34,10 @@ def get_leaderboard_df(eval_results_path, eval_requests_path, cols, benchmark_co
30
  if col not in df.columns:
31
  df[col] = None
32
 
 
 
 
 
33
  # Sort by 'average' column if it exists
34
  if 'average' in df.columns:
35
  df = df.sort_values(by=['average'], ascending=False)
@@ -44,7 +52,11 @@ def get_evaluation_queue_df(eval_requests_path, eval_cols):
44
 
45
  # Load evaluation requests from JSON files
46
  if os.path.exists(eval_requests_path):
47
- request_files = [os.path.join(eval_requests_path, f) for f in os.listdir(eval_requests_path) if f.endswith('.json')]
 
 
 
 
48
  data_list = []
49
  for file in request_files:
50
  with open(file, 'r') as f:
@@ -57,4 +69,4 @@ def get_evaluation_queue_df(eval_requests_path, eval_cols):
57
  running_df = df[df['status'] == 'running']
58
  pending_df = df[df['status'] == 'pending']
59
 
60
- return finished_df, running_df, pending_df
 
4
  import pandas as pd
5
  import json
6
 
7
+ from src.display.utils import COLUMNS, EVAL_COLS, Tasks
8
+ from src.envs import EVAL_RESULTS_PATH
9
 
10
  def get_leaderboard_df(eval_results_path, eval_requests_path, cols, benchmark_cols):
11
  # Initialize an empty DataFrame
 
13
 
14
  # Load evaluation results from JSON files
15
  if os.path.exists(eval_results_path):
16
+ result_files = [
17
+ os.path.join(eval_results_path, f)
18
+ for f in os.listdir(eval_results_path)
19
+ if f.endswith('.json')
20
+ ]
21
  data_list = []
22
  for file in result_files:
23
  with open(file, 'r') as f:
24
  data = json.load(f)
 
25
  flattened_data = {}
26
  flattened_data.update(data.get('config', {}))
27
  flattened_data.update(data.get('results', {}))
 
34
  if col not in df.columns:
35
  df[col] = None
36
 
37
+ # Convert 'average' column to float and handle errors
38
+ if 'average' in df.columns:
39
+ df['average'] = pd.to_numeric(df['average'], errors='coerce')
40
+
41
  # Sort by 'average' column if it exists
42
  if 'average' in df.columns:
43
  df = df.sort_values(by=['average'], ascending=False)
 
52
 
53
  # Load evaluation requests from JSON files
54
  if os.path.exists(eval_requests_path):
55
+ request_files = [
56
+ os.path.join(eval_requests_path, f)
57
+ for f in os.listdir(eval_requests_path)
58
+ if f.endswith('.json')
59
+ ]
60
  data_list = []
61
  for file in request_files:
62
  with open(file, 'r') as f:
 
69
  running_df = df[df['status'] == 'running']
70
  pending_df = df[df['status'] == 'pending']
71
 
72
+ return finished_df, running_df, pending_df