rwightman HF staff commited on
Commit
2669301
·
verified ·
1 Parent(s): bd26425

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -24
app.py CHANGED
@@ -5,8 +5,11 @@ import plotly.express as px
5
  from rapidfuzz import fuzz
6
  import re
7
 
 
8
  def load_leaderboard():
9
  # Load validation / test CSV files
 
 
10
  results_csv_files = {
11
  'imagenet': 'https://raw.githubusercontent.com/huggingface/pytorch-image-models/main/results/results-imagenet.csv',
12
  'real': 'https://raw.githubusercontent.com/huggingface/pytorch-image-models/main/results/results-imagenet-real.csv',
@@ -26,51 +29,41 @@ def load_leaderboard():
26
  'fp32-nchw-pt240-cpu-i9_10940x-dynamo': 'https://raw.githubusercontent.com/huggingface/pytorch-image-models/main/results/benchmark-infer-fp32-nchw-pt240-cpu-i9_10940x-dynamo.csv',
27
  'fp32-nchw-pt240-cpu-i7_12700h-dynamo': 'https://raw.githubusercontent.com/huggingface/pytorch-image-models/main/results/benchmark-infer-fp32-nchw-pt240-cpu-i7_12700h-dynamo.csv',
28
  }
29
-
30
  dataframes = {name: pd.read_csv(url) for name, url in results_csv_files.items()}
31
  bench_dataframes = {name: pd.read_csv(url) for name, url in benchmark_csv_files.items()}
32
  bench_dataframes = {name: df for name, df in bench_dataframes.items() if 'infer_gmacs' in df.columns}
33
- main_bench_dataframe = bench_dataframes[main_bench]
34
-
35
  # Clean up dataframes
36
  remove_column_names = ["top1_err", "top5_err", "top1_diff", "top5_diff", "rank_diff", "param_count"]
37
  for df in dataframes.values():
38
  for col in remove_column_names:
39
  if col in df.columns:
40
- df.drop(columns=[col], inplace=True)
41
 
42
  # Rename / process results columns
43
  for name, df in dataframes.items():
44
  df.rename(columns={"top1": f"{name}_top1", "top5": f"{name}_top5"}, inplace=True)
45
- df['arch_name'] = df['model'].apply(lambda x: x.split('.')[0])
46
-
47
- # Process benchmark dataframe
48
- main_bench_dataframe['arch_name'] = main_bench_dataframe['model']
49
- main_bench_dataframe.rename(columns={'infer_img_size': 'img_size'}, inplace=True)
50
-
 
51
  # Merge all result dataframes
52
  result = dataframes['imagenet']
53
  for name, df in dataframes.items():
54
  if name != 'imagenet':
55
  result = pd.merge(result, df, on=['arch_name', 'model', 'img_size', 'crop_pct', 'interpolation'], how='outer')
56
-
57
- # Merge with benchmark data
58
- result = pd.merge(result, main_bench_dataframe, on=['arch_name', 'img_size'], how='left', suffixes=('', '_benchmark'))
59
 
60
- # Calculate TFLOP/s
61
- result['infer_tflop_s'] = result['infer_samples_per_sec'] * result['infer_gmacs'] * 2 / 1000
62
-
63
  # Calculate average scores
64
  top1_columns = [col for col in result.columns if col.endswith('_top1') and not col == 'a_top1']
65
  top5_columns = [col for col in result.columns if col.endswith('_top5') and not col == 'a_top5']
66
  result['avg_top1'] = result[top1_columns].mean(axis=1)
67
  result['avg_top5'] = result[top5_columns].mean(axis=1)
68
-
69
- # Reorder columns
70
- first_columns = ['model', 'img_size', 'avg_top1', 'avg_top5']
71
- other_columns = [col for col in result.columns if col not in first_columns and col != 'model_benchmark']
72
- result = result[first_columns + other_columns]
73
-
74
  # Create fully merged dataframes for each benchmark set
75
  merged_dataframes = {}
76
  for bench_name, bench_df in bench_dataframes.items():
@@ -82,7 +75,7 @@ def load_leaderboard():
82
  # Reorder columns
83
  first_columns = ['model', 'img_size', 'avg_top1', 'avg_top5']
84
  other_columns = [col for col in merged_df.columns if col not in first_columns]
85
- merged_df = merged_df[first_columns + other_columns].copy()
86
 
87
  # Drop columns that are no longer needed / add too much noise
88
  merged_df.drop('arch_name', axis=1, inplace=True)
@@ -97,7 +90,6 @@ def load_leaderboard():
97
 
98
  return merged_dataframes
99
 
100
-
101
  REGEX_PREFIX = "re:"
102
 
103
  def auto_match(pattern, text):
 
5
  from rapidfuzz import fuzz
6
  import re
7
 
8
+
9
  def load_leaderboard():
10
  # Load validation / test CSV files
11
+ pd.set_option('display.float_format', '{:.2f}'.format)
12
+
13
  results_csv_files = {
14
  'imagenet': 'https://raw.githubusercontent.com/huggingface/pytorch-image-models/main/results/results-imagenet.csv',
15
  'real': 'https://raw.githubusercontent.com/huggingface/pytorch-image-models/main/results/results-imagenet-real.csv',
 
29
  'fp32-nchw-pt240-cpu-i9_10940x-dynamo': 'https://raw.githubusercontent.com/huggingface/pytorch-image-models/main/results/benchmark-infer-fp32-nchw-pt240-cpu-i9_10940x-dynamo.csv',
30
  'fp32-nchw-pt240-cpu-i7_12700h-dynamo': 'https://raw.githubusercontent.com/huggingface/pytorch-image-models/main/results/benchmark-infer-fp32-nchw-pt240-cpu-i7_12700h-dynamo.csv',
31
  }
32
+
33
  dataframes = {name: pd.read_csv(url) for name, url in results_csv_files.items()}
34
  bench_dataframes = {name: pd.read_csv(url) for name, url in benchmark_csv_files.items()}
35
  bench_dataframes = {name: df for name, df in bench_dataframes.items() if 'infer_gmacs' in df.columns}
36
+ print(bench_dataframes.keys())
37
+
38
  # Clean up dataframes
39
  remove_column_names = ["top1_err", "top5_err", "top1_diff", "top5_diff", "rank_diff", "param_count"]
40
  for df in dataframes.values():
41
  for col in remove_column_names:
42
  if col in df.columns:
43
+ df.drop(columns=[col], inplace=True)
44
 
45
  # Rename / process results columns
46
  for name, df in dataframes.items():
47
  df.rename(columns={"top1": f"{name}_top1", "top5": f"{name}_top5"}, inplace=True)
48
+ df['arch_name'] = df['model'].apply(lambda x: x.split('.')[0])
49
+
50
+ # Process benchmark dataframes
51
+ for name, df in bench_dataframes.items():
52
+ df['arch_name'] = df['model']
53
+ df.rename(columns={'infer_img_size': 'img_size'}, inplace=True)
54
+
55
  # Merge all result dataframes
56
  result = dataframes['imagenet']
57
  for name, df in dataframes.items():
58
  if name != 'imagenet':
59
  result = pd.merge(result, df, on=['arch_name', 'model', 'img_size', 'crop_pct', 'interpolation'], how='outer')
 
 
 
60
 
 
 
 
61
  # Calculate average scores
62
  top1_columns = [col for col in result.columns if col.endswith('_top1') and not col == 'a_top1']
63
  top5_columns = [col for col in result.columns if col.endswith('_top5') and not col == 'a_top5']
64
  result['avg_top1'] = result[top1_columns].mean(axis=1)
65
  result['avg_top5'] = result[top5_columns].mean(axis=1)
66
+
 
 
 
 
 
67
  # Create fully merged dataframes for each benchmark set
68
  merged_dataframes = {}
69
  for bench_name, bench_df in bench_dataframes.items():
 
75
  # Reorder columns
76
  first_columns = ['model', 'img_size', 'avg_top1', 'avg_top5']
77
  other_columns = [col for col in merged_df.columns if col not in first_columns]
78
+ merged_df = merged_df[first_columns + other_columns].copy(deep=True)
79
 
80
  # Drop columns that are no longer needed / add too much noise
81
  merged_df.drop('arch_name', axis=1, inplace=True)
 
90
 
91
  return merged_dataframes
92
 
 
93
  REGEX_PREFIX = "re:"
94
 
95
  def auto_match(pattern, text):