ajude commited on
Commit
db1ab48
·
1 Parent(s): da6c970

feat(leaderboard): Added two features

Browse files

1. Slider for filtering out the models based on the number of parameters.
2. Model name has embedded links to the respective hf model page.

Files changed (3) hide show
  1. app.py +70 -43
  2. core.py +28 -8
  3. utils.py +193 -0
app.py CHANGED
@@ -2,6 +2,7 @@ import gradio as gr
2
 
3
  import core as core
4
  from style import CSS, LANG_SYMBOLS, MT_BENCH_LANG_SYMBOLS, T_SYMBOLS, TITLE
 
5
 
6
  demo = gr.Blocks(css=CSS)
7
  with demo:
@@ -28,17 +29,23 @@ with demo:
28
  show_label=True,
29
  elem_id="search-bar",
30
  )
31
- model_types = gr.CheckboxGroup(
32
- label="Select model type",
33
- choices=[
34
- (
35
- f"Pretrained {T_SYMBOLS['pretrained']}",
36
- T_SYMBOLS["pretrained"],
37
- ),
38
- (f"Chat {T_SYMBOLS['chat']}", T_SYMBOLS["chat"]),
39
- ],
40
- value=list(T_SYMBOLS.values()),
41
- )
 
 
 
 
 
 
42
 
43
  with gr.Row():
44
  langs_bar = gr.CheckboxGroup(
@@ -92,7 +99,9 @@ with demo:
92
  inputs=[],
93
  outputs=shown_tasks,
94
  )
95
- leaderboard_table = gr.Dataframe()
 
 
96
 
97
  with gr.TabItem(
98
  "🏅 LLM accuracy benchmark (Zero-Shot)",
@@ -107,17 +116,24 @@ with demo:
107
  show_label=True,
108
  elem_id="search-bar",
109
  )
110
- model_types_zero_shot = gr.CheckboxGroup(
111
- label="Select model type",
112
- choices=[
113
- (
114
- f"Pretrained {T_SYMBOLS['pretrained']}",
115
- T_SYMBOLS["pretrained"],
116
- ),
117
- (f"Chat {T_SYMBOLS['chat']}", T_SYMBOLS["chat"]),
118
- ],
119
- value=list(T_SYMBOLS.values()),
120
- )
 
 
 
 
 
 
 
121
 
122
  with gr.Row():
123
  langs_bar_zero_shot = gr.CheckboxGroup(
@@ -171,7 +187,7 @@ with demo:
171
  inputs=[],
172
  outputs=shown_tasks_zero_shot,
173
  )
174
- leaderboard_table_zero_shot = gr.Dataframe()
175
 
176
  with gr.TabItem(
177
  "🌐 LLM translation benchmark",
@@ -187,17 +203,23 @@ with demo:
187
  elem_id="search-bar",
188
  )
189
 
190
- model_types_misc = gr.CheckboxGroup(
191
- label="Select model type",
192
- choices=[
193
- (
194
- f"Pretrained {T_SYMBOLS['pretrained']}",
195
- T_SYMBOLS["pretrained"],
196
- ),
197
- (f"Chat {T_SYMBOLS['chat']}", T_SYMBOLS["chat"]),
198
- ],
199
- value=list(T_SYMBOLS.values()),
200
- )
 
 
 
 
 
 
201
 
202
  with gr.Row():
203
  langs_bar_misc = gr.CheckboxGroup(
@@ -252,7 +274,7 @@ with demo:
252
  outputs=shown_tasks_misc,
253
  )
254
 
255
- leaderboard_table_misc = gr.Dataframe()
256
 
257
  with gr.TabItem(
258
  "🌐 LLM MT-Bench benchmark",
@@ -295,17 +317,19 @@ with demo:
295
  outputs=langs_bar_mtbench,
296
  )
297
 
298
- leaderboard_table_mtbench = gr.Dataframe(scale=5)
299
 
300
  for comp, fn in [
301
  (search_bar, "submit"),
302
  (langs_bar, "change"),
303
  (shown_tasks, "change"),
304
  (model_types, "change"),
 
305
  ]:
306
  getattr(comp, fn)(
307
  core.update_df,
308
- [shown_tasks, search_bar, langs_bar, model_types, gr.State(value=True)],
 
309
  leaderboard_table,
310
  )
311
 
@@ -314,10 +338,11 @@ with demo:
314
  (model_types_zero_shot, "change"),
315
  (langs_bar_zero_shot, "change"),
316
  (shown_tasks_zero_shot, "change"),
 
317
  ]:
318
  getattr(comp, fn)(
319
  core.update_df,
320
- [shown_tasks_zero_shot, search_bar_zero_shot, langs_bar_zero_shot, model_types_zero_shot, gr.State(value=False)],
321
  leaderboard_table_zero_shot,
322
  )
323
 
@@ -326,10 +351,11 @@ with demo:
326
  (langs_bar_misc, "change"),
327
  (shown_tasks_misc, "change"),
328
  (model_types_misc, "change"),
 
329
  ]:
330
  getattr(comp, fn)(
331
  core.update_df,
332
- [shown_tasks_misc, search_bar_misc, langs_bar_misc, model_types_misc, gr.State(value=False)],
333
  leaderboard_table_misc,
334
  )
335
 
@@ -346,21 +372,22 @@ with demo:
346
  gr.Blocks.load(
347
  block=demo,
348
  fn=core.update_df,
349
- inputs=[shown_tasks, search_bar, langs_bar, model_types, gr.State(value=True)],
 
350
  outputs=leaderboard_table,
351
  )
352
 
353
  gr.Blocks.load(
354
  block=demo,
355
  fn=core.update_df,
356
- inputs=[shown_tasks_zero_shot, search_bar_zero_shot, langs_bar_zero_shot, model_types_zero_shot, gr.State(value=False)],
357
  outputs=leaderboard_table_zero_shot,
358
  )
359
 
360
  gr.Blocks.load(
361
  block=demo,
362
  fn=core.update_df,
363
- inputs=[shown_tasks_misc, search_bar_misc, langs_bar_misc, model_types_misc, gr.State(value=False)],
364
  outputs=leaderboard_table_misc,
365
  )
366
 
 
2
 
3
  import core as core
4
  from style import CSS, LANG_SYMBOLS, MT_BENCH_LANG_SYMBOLS, T_SYMBOLS, TITLE
5
+ from gradio_rangeslider import RangeSlider
6
 
7
  demo = gr.Blocks(css=CSS)
8
  with demo:
 
29
  show_label=True,
30
  elem_id="search-bar",
31
  )
32
+
33
+ with gr.Row():
34
+ with gr.Column():
35
+ model_types = gr.CheckboxGroup(
36
+ label="Select model type",
37
+ choices=[
38
+ (
39
+ f"Pretrained {T_SYMBOLS['pretrained']}",
40
+ T_SYMBOLS["pretrained"],
41
+ ),
42
+ (f"Chat {T_SYMBOLS['chat']}", T_SYMBOLS["chat"]),
43
+ ],
44
+ value=list(T_SYMBOLS.values()),
45
+ )
46
+ with gr.Column():
47
+ model_sizes = RangeSlider(minimum=0,maximum=150,value=(7, 10),label="Select the number of parameters (B)")
48
+
49
 
50
  with gr.Row():
51
  langs_bar = gr.CheckboxGroup(
 
99
  inputs=[],
100
  outputs=shown_tasks,
101
  )
102
+ # TODO When adding markdown as the data type of the model_name column, the text is getting overflown into the next column.
103
+ # leaderboard_table = gr.Dataframe(datatype=['str', 'markdown'])
104
+ leaderboard_table = gr.Dataframe(datatype=["str", "markdown"], column_widths=[None, "30%"], wrap=False)
105
 
106
  with gr.TabItem(
107
  "🏅 LLM accuracy benchmark (Zero-Shot)",
 
116
  show_label=True,
117
  elem_id="search-bar",
118
  )
119
+
120
+
121
+ with gr.Row():
122
+ with gr.Column():
123
+ model_types_zero_shot = gr.CheckboxGroup(
124
+ label="Select model type",
125
+ choices=[
126
+ (
127
+ f"Pretrained {T_SYMBOLS['pretrained']}",
128
+ T_SYMBOLS["pretrained"],
129
+ ),
130
+ (f"Chat {T_SYMBOLS['chat']}", T_SYMBOLS["chat"]),
131
+ ],
132
+ value=list(T_SYMBOLS.values()),
133
+ )
134
+ with gr.Column():
135
+ model_sizes_zero_shot = RangeSlider(minimum=0, maximum=150, value=(7, 10),
136
+ label="Select the number of parameters (B)")
137
 
138
  with gr.Row():
139
  langs_bar_zero_shot = gr.CheckboxGroup(
 
187
  inputs=[],
188
  outputs=shown_tasks_zero_shot,
189
  )
190
+ leaderboard_table_zero_shot = gr.Dataframe(datatype=["str", "markdown"], column_widths=[None, "30%"], wrap=False)
191
 
192
  with gr.TabItem(
193
  "🌐 LLM translation benchmark",
 
203
  elem_id="search-bar",
204
  )
205
 
206
+ with gr.Row():
207
+ with gr.Column():
208
+ model_types_misc = gr.CheckboxGroup(
209
+ label="Select model type",
210
+ choices=[
211
+ (
212
+ f"Pretrained {T_SYMBOLS['pretrained']}",
213
+ T_SYMBOLS["pretrained"],
214
+ ),
215
+ (f"Chat {T_SYMBOLS['chat']}", T_SYMBOLS["chat"]),
216
+ ],
217
+ value=list(T_SYMBOLS.values()),
218
+ )
219
+ with gr.Column():
220
+ model_sizes_misc = RangeSlider(minimum=0, maximum=150, value=(7, 10),
221
+ label="Select the number of parameters (B)")
222
+
223
 
224
  with gr.Row():
225
  langs_bar_misc = gr.CheckboxGroup(
 
274
  outputs=shown_tasks_misc,
275
  )
276
 
277
+ leaderboard_table_misc = gr.Dataframe(datatype=["str", "markdown"], column_widths=[None, "30%"], wrap=False)
278
 
279
  with gr.TabItem(
280
  "🌐 LLM MT-Bench benchmark",
 
317
  outputs=langs_bar_mtbench,
318
  )
319
 
320
+ leaderboard_table_mtbench = gr.Dataframe(datatype=["str", "markdown"], column_widths=[None, "60%"], wrap=False)
321
 
322
  for comp, fn in [
323
  (search_bar, "submit"),
324
  (langs_bar, "change"),
325
  (shown_tasks, "change"),
326
  (model_types, "change"),
327
+ (model_sizes, "change"),
328
  ]:
329
  getattr(comp, fn)(
330
  core.update_df,
331
+ [shown_tasks, search_bar, langs_bar, model_types, model_sizes, gr.State(value=True)],
332
+ # [shown_tasks, search_bar, langs_bar, model_types, gr.State(value=True)],
333
  leaderboard_table,
334
  )
335
 
 
338
  (model_types_zero_shot, "change"),
339
  (langs_bar_zero_shot, "change"),
340
  (shown_tasks_zero_shot, "change"),
341
+ (model_sizes_zero_shot, "change")
342
  ]:
343
  getattr(comp, fn)(
344
  core.update_df,
345
+ [shown_tasks_zero_shot, search_bar_zero_shot, langs_bar_zero_shot, model_types_zero_shot, model_sizes_zero_shot, gr.State(value=False)],
346
  leaderboard_table_zero_shot,
347
  )
348
 
 
351
  (langs_bar_misc, "change"),
352
  (shown_tasks_misc, "change"),
353
  (model_types_misc, "change"),
354
+ (model_sizes_misc, "change"),
355
  ]:
356
  getattr(comp, fn)(
357
  core.update_df,
358
+ [shown_tasks_misc, search_bar_misc, langs_bar_misc, model_types_misc, model_sizes_misc, gr.State(value=False)],
359
  leaderboard_table_misc,
360
  )
361
 
 
372
  gr.Blocks.load(
373
  block=demo,
374
  fn=core.update_df,
375
+ inputs=[shown_tasks, search_bar, langs_bar, model_types, model_sizes, gr.State(value=True)],
376
+ # inputs=[shown_tasks, search_bar, langs_bar, model_types, gr.State(value=True)],
377
  outputs=leaderboard_table,
378
  )
379
 
380
  gr.Blocks.load(
381
  block=demo,
382
  fn=core.update_df,
383
+ inputs=[shown_tasks_zero_shot, search_bar_zero_shot, langs_bar_zero_shot, model_types_zero_shot, model_sizes_zero_shot, gr.State(value=False)],
384
  outputs=leaderboard_table_zero_shot,
385
  )
386
 
387
  gr.Blocks.load(
388
  block=demo,
389
  fn=core.update_df,
390
+ inputs=[shown_tasks_misc, search_bar_misc, langs_bar_misc, model_types_misc, model_sizes_misc, gr.State(value=False)],
391
  outputs=leaderboard_table_misc,
392
  )
393
 
core.py CHANGED
@@ -4,6 +4,7 @@ import os
4
  import numpy as np
5
  import pandas as pd
6
  from datasets import load_dataset
 
7
 
8
  import style
9
 
@@ -27,7 +28,8 @@ def init():
27
  task_groups_shots_df = hidden_df[hidden_df["Few_Shot"] == True][["Task_Group", "Number_Shots"]].drop_duplicates()
28
  task_groups_shots_dict = task_groups_shots_df.set_index("Task_Group")["Number_Shots"].to_dict()
29
  languages_list = hidden_df["Language"].drop_duplicates().str.upper().tolist()
30
- mt_bench_language_list = hidden_df[hidden_df["Task_Group"] == "MTBENCH"]["Language"].drop_duplicates().str.upper().tolist()
 
31
  model_type_df = hidden_df[["Model_Name", "Model_Type"]].drop_duplicates()
32
  model_type_dict = model_type_df.set_index("Model_Name")["Model_Type"].to_dict()
33
 
@@ -41,8 +43,19 @@ def init():
41
  hidden_df["Type"] = hidden_df["Model_Name"].apply(lambda x: style.T_SYMBOLS[model_type_dict[x]])
42
 
43
 
 
 
 
 
 
 
 
 
 
44
  def sort_cols(df: pd.DataFrame, fewshot: bool = False) -> pd.DataFrame:
45
  task_cols = get_task_columns(df)
 
 
46
  return df.reindex(["Type", "Model_Name", "Average"] + sorted(task_cols), axis=1)
47
 
48
 
@@ -97,12 +110,13 @@ def select_shots(df: pd.DataFrame, fewshot: bool = False):
97
 
98
 
99
  def update_df(
100
- tasks: list[str],
101
- model_query: str,
102
- langs: list[str],
103
- model_types: list[str],
104
- fewshot: bool = False,
105
- format: bool = True,
 
106
  ) -> pd.DataFrame:
107
  """Return a filtered dataframe according to selected models, tasks and
108
  languages. The format flag controls whether the output dataframe should
@@ -119,6 +133,11 @@ def update_df(
119
  df = search_model(df, model_query)
120
  df = filter_type(df, model_types)
121
 
 
 
 
 
 
122
  if format:
123
  return sort_cols(df, fewshot).style.format(precision=2, decimal=".", na_rep="N/A")
124
  else:
@@ -132,7 +151,8 @@ def get_selected_task_type(task_type_id):
132
 
133
 
134
  def get_available_task_groups(selected_task_type, fewshot):
135
- task_groups = [task_group_name for task_group_name, task_type in task_group_type_dict.items() if task_type == selected_task_type]
 
136
 
137
  if fewshot:
138
  available_tasks = [c for c in task_groups if c not in ZERO_SHOT_ONLY]
 
4
  import numpy as np
5
  import pandas as pd
6
  from datasets import load_dataset
7
+ from utils import model_hf_look_up_table_filter
8
 
9
  import style
10
 
 
28
  task_groups_shots_df = hidden_df[hidden_df["Few_Shot"] == True][["Task_Group", "Number_Shots"]].drop_duplicates()
29
  task_groups_shots_dict = task_groups_shots_df.set_index("Task_Group")["Number_Shots"].to_dict()
30
  languages_list = hidden_df["Language"].drop_duplicates().str.upper().tolist()
31
+ mt_bench_language_list = hidden_df[hidden_df["Task_Group"] == "MTBENCH"][
32
+ "Language"].drop_duplicates().str.upper().tolist()
33
  model_type_df = hidden_df[["Model_Name", "Model_Type"]].drop_duplicates()
34
  model_type_dict = model_type_df.set_index("Model_Name")["Model_Type"].to_dict()
35
 
 
43
  hidden_df["Type"] = hidden_df["Model_Name"].apply(lambda x: style.T_SYMBOLS[model_type_dict[x]])
44
 
45
 
46
+ def model_hyperlink(link, model_name):
47
+ return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;"> {model_name} </a>'
48
+
49
+
50
+ def make_clickable_model(model_name):
51
+ link = f"https://huggingface.co/" + model_hf_look_up_table_filter[model_name]['link']
52
+ return model_hyperlink(link, model_name)
53
+
54
+
55
  def sort_cols(df: pd.DataFrame, fewshot: bool = False) -> pd.DataFrame:
56
  task_cols = get_task_columns(df)
57
+ df['Model_Name'] = df['Model_Name'].apply(
58
+ lambda x: make_clickable_model(x) if x in model_hf_look_up_table_filter else x)
59
  return df.reindex(["Type", "Model_Name", "Average"] + sorted(task_cols), axis=1)
60
 
61
 
 
110
 
111
 
112
  def update_df(
113
+ tasks: list[str],
114
+ model_query: str,
115
+ langs: list[str],
116
+ model_types: list[str],
117
+ model_sizes: list[str],
118
+ fewshot: bool = False,
119
+ format: bool = True,
120
  ) -> pd.DataFrame:
121
  """Return a filtered dataframe according to selected models, tasks and
122
  languages. The format flag controls whether the output dataframe should
 
133
  df = search_model(df, model_query)
134
  df = filter_type(df, model_types)
135
 
136
+ if model_sizes:
137
+ result = [key for key, value in model_hf_look_up_table_filter.items() if
138
+ (value.get("model_size") >= model_sizes[0] and value.get("model_size") <= model_sizes[1])]
139
+ df = df[df['Model_Name'].isin(result)]
140
+
141
  if format:
142
  return sort_cols(df, fewshot).style.format(precision=2, decimal=".", na_rep="N/A")
143
  else:
 
151
 
152
 
153
  def get_available_task_groups(selected_task_type, fewshot):
154
+ task_groups = [task_group_name for task_group_name, task_type in task_group_type_dict.items() if
155
+ task_type == selected_task_type]
156
 
157
  if fewshot:
158
  available_tasks = [c for c in task_groups if c not in ZERO_SHOT_ONLY]
utils.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_hf_look_up_table_filter = {
2
+ "Aya-23-8B": {
3
+ "link": "CohereForAI/aya-23-8B",
4
+ "model_size": 8
5
+ },
6
+ "Bloom-7b1": {
7
+ "link": "bigscience/bloom-7b1",
8
+ "model_size": 7,
9
+ },
10
+ "Bloomz-7b1": {
11
+ "link": "bigscience/bloomz-7b1",
12
+ "model_size": 7,
13
+ },
14
+ "Meta-Llama-2-7B": {
15
+ "link": "meta-llama/Llama-2-7b",
16
+ "model_size": 7,
17
+ },
18
+ "Gemma-7b": {
19
+ "link": "google/gemma-7b",
20
+ "model_size": 7,
21
+ },
22
+ "Gemma-1.1-7b-Instruct": {
23
+ "link": "google/gemma-1.1-7b-it",
24
+ "model_size": 7,
25
+ },
26
+ "Meta-Llama-3-8B": {
27
+ "link": "meta-llama/Meta-Llama-3-8B",
28
+ "model_size": 8
29
+ },
30
+ "Meta-Llama-3-8B-Instruct": {
31
+ "link": "meta-llama/Meta-Llama-3-8B-Instruct",
32
+ "model_size": 8
33
+ },
34
+ "Mistral-7B-Instruct-v0.3": {
35
+ "link": "mistralai/Mistral-7B-Instruct-v0.3",
36
+ "model_size": 7
37
+ },
38
+ "Mistral-7B-Instruct-v0.1": {
39
+ "link": "mistralai/Mistral-7B-Instruct-v0.1",
40
+ "model_size": 7
41
+ },
42
+ "Mistral-7B-Instruct-v0.2": {
43
+ "link": "mistralai/Mistral-7B-Instruct-v0.2",
44
+ "model_size": 7
45
+ },
46
+ "Mistral-7B-v0.1": {
47
+ "link": "mistralai/Mistral-7B-v0.1",
48
+ "model_size": 7
49
+ },
50
+ "Mistral-7B-v0.3": {
51
+ "link": "mistralai/Mistral-7B-v0.3",
52
+ "model_size": 7
53
+ },
54
+ "Occiglot-7b-eu5": {
55
+ "link": "occiglot/occiglot-7b-eu5",
56
+ "model_size": 7
57
+ },
58
+ "Occiglot-7b-eu5-Instruct": {
59
+ "link": "occiglot/occiglot-7b-eu5-instruct",
60
+ "model_size": 7
61
+ },
62
+ "Phi-3-mini-4k-Instruct": {
63
+ "link": "microsoft/Phi-3-mini-4k-instruct",
64
+ "model_size": 3.8
65
+ },
66
+ "Qwen2-7B": {
67
+ "link": "Qwen/Qwen2-7B-Instruct",
68
+ "model_size": 7
69
+ },
70
+ "Qwen2-7B-Instruct": {
71
+ "link": "Qwen/Qwen2-7B-Instruct",
72
+ "model_size": 7
73
+ },
74
+ "7B_24EU_2.5T_bactrianx17_bb_ckp1": {
75
+ "link": "",
76
+ "model_size": 7
77
+ },
78
+ "7B_24EU_2.5T_bactrianx5_bb_ckp1": {
79
+ "link": "",
80
+ "model_size": 7
81
+ },
82
+ "7B_24EU_2.5T_honey_ckp2701": {
83
+ "link": "",
84
+ "model_size": 7
85
+ },
86
+ "7B_24EU_2T_bactrianx17_bb_ckp2": {
87
+ "link": "",
88
+ "model_size": 7
89
+ },
90
+ "7B_24EU_2T_bactrianx5_bb_ckp2": {
91
+ "link": "",
92
+ "model_size": 7
93
+ },
94
+ "7B_24EU_2.86T_EP5_iter_0681300": {
95
+ "link": "",
96
+ "model_size": 7
97
+ },
98
+ "7B_24EU_2.86T_iter_0602100": {
99
+ "link": "",
100
+ "model_size": 7
101
+ },
102
+ "7B_24EU_1.45T_bactrianx17_ckp1": {
103
+ "link": "",
104
+ "model_size": 7
105
+ },
106
+ "7B_24EU_1.45T_bactrianx17_bb_ckp2": {
107
+ "link": "",
108
+ "model_size": 7
109
+ },
110
+ "7B_24EU_1.45T_bactrianx5_ckp1": {
111
+ "link": "",
112
+ "model_size": 7
113
+ },
114
+ "7B_24EU_1.65T_bactrianx17_ckp1": {
115
+ "link": "",
116
+ "model_size": 7
117
+ },
118
+ "7B_24EU_1.65T_bactrianx17_bb_ckp1": {
119
+ "link": "",
120
+ "model_size": 7
121
+ },
122
+ "7B_24EU_1.65T_bactrianx5_ckp1": {
123
+ "link": "",
124
+ "model_size": 7
125
+ },
126
+ "7B_EN_200B_iter_0047683": {
127
+ "link": "",
128
+ "model_size": 7
129
+ },
130
+ "7B_EQUAL_200B_iter_0046950": {
131
+ "link": "",
132
+ "model_size": 7
133
+ },
134
+ "7B_EU24_1.1T_iter_0236250": {
135
+ "link": "",
136
+ "model_size": 7
137
+ },
138
+ "7B_EU24_1.45T_iter_0346050": {
139
+ "link": "",
140
+ "model_size": 7
141
+ },
142
+ "7B_EU24_1.65T_iter_0393075": {
143
+ "link": "",
144
+ "model_size": 7
145
+ },
146
+ "7B_EU24_2.5T_DE_213B": {
147
+ "link": "",
148
+ "model_size": 7
149
+ },
150
+ "7B_EU24_2.5T_DE_262B": {
151
+ "link": "",
152
+ "model_size": 7
153
+ },
154
+ "7B_EU24_2.5T_iter_0602100": {
155
+ "link": "",
156
+ "model_size": 7
157
+ },
158
+ "7B_EU24_2T_iter_0477675": {
159
+ "link": "",
160
+ "model_size": 7
161
+ },
162
+ "7B_EU24_2T_iter_0477900": {
163
+ "link": "",
164
+ "model_size": 7
165
+ },
166
+ "7B_EU24_2T_iter_0478125": {
167
+ "link": "",
168
+ "model_size": 7
169
+ },
170
+ "7B_EU24_3T_oscar_iter_0715255": {
171
+ "link": "",
172
+ "model_size": 7
173
+ },
174
+ "7B_EU24_3T_fw_iter_0715255": {
175
+ "link": "",
176
+ "model_size": 7
177
+ },
178
+ "7B_EU24_fw_3T_honey_ckp1350": {
179
+ "link": "",
180
+ "model_size": 7
181
+ },
182
+ "7B_EU24_fw_3.1T_iter_0025875": {
183
+ "link": "",
184
+ "model_size": 7
185
+ },
186
+ "7B_EU24_1.1T_bactrianx_ckp2": {
187
+ "link": "",
188
+ "model_size": 7
189
+ },
190
+
191
+
192
+
193
+ }