Sakalti commited on
Commit
72ae346
ยท
verified ยท
1 Parent(s): 41c03bb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -9
app.py CHANGED
@@ -78,7 +78,7 @@ def init_leaderboard(dataframe):
78
  type="slider",
79
  min=0.01,
80
  max=150,
81
- label="Select the number of parameters (B)",
82
  ),
83
  ColumnFilter(
84
  AutoEvalColumn.still_on_hub.name, type="boolean", label="Deleted/incomplete", default=True
@@ -95,20 +95,20 @@ with demo:
95
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
96
 
97
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
98
- with gr.TabItem("๐Ÿ… LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
99
  leaderboard = init_leaderboard(LEADERBOARD_DF)
100
 
101
  with gr.TabItem("๐Ÿ“ About", elem_id="llm-benchmark-tab-table", id=2):
102
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
103
 
104
- with gr.TabItem("๐Ÿš€ Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
105
  with gr.Column():
106
  with gr.Row():
107
  gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
108
 
109
  with gr.Column():
110
  with gr.Accordion(
111
- f"โœ… Finished Evaluations ({len(finished_eval_queue_df)})",
112
  open=False,
113
  ):
114
  with gr.Row():
@@ -119,7 +119,7 @@ with demo:
119
  row_count=5,
120
  )
121
  with gr.Accordion(
122
- f"๐Ÿ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
123
  open=False,
124
  ):
125
  with gr.Row():
@@ -142,7 +142,7 @@ with demo:
142
  row_count=5,
143
  )
144
  with gr.Row():
145
- gr.Markdown("# โœ‰๏ธโœจ Submit your model here!", elem_classes="markdown-text")
146
 
147
  with gr.Row():
148
  with gr.Column():
@@ -150,7 +150,7 @@ with demo:
150
  revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
151
  model_type = gr.Dropdown(
152
  choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
153
- label="Model type",
154
  multiselect=False,
155
  value=None,
156
  interactive=True,
@@ -166,14 +166,14 @@ with demo:
166
  )
167
  weight_type = gr.Dropdown(
168
  choices=[i.value.name for i in WeightType],
169
- label="Weights type",
170
  multiselect=False,
171
  value="Original",
172
  interactive=True,
173
  )
174
  base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
175
 
176
- submit_button = gr.Button("Submit Eval")
177
  submission_result = gr.Markdown()
178
  submit_button.click(
179
  add_new_eval,
 
78
  type="slider",
79
  min=0.01,
80
  max=150,
81
+ label="ใƒ‘ใƒฉใƒกใƒผใ‚ฟใƒผ",
82
  ),
83
  ColumnFilter(
84
  AutoEvalColumn.still_on_hub.name, type="boolean", label="Deleted/incomplete", default=True
 
95
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
96
 
97
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
98
+ with gr.TabItem("๐Ÿ… LLM ใƒ™ใƒณใƒใƒžใƒผใ‚ฏ", elem_id="llm-benchmark-tab-table", id=0):
99
  leaderboard = init_leaderboard(LEADERBOARD_DF)
100
 
101
  with gr.TabItem("๐Ÿ“ About", elem_id="llm-benchmark-tab-table", id=2):
102
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
103
 
104
+ with gr.TabItem("๐Ÿš€ ่ฟฝๅŠ ! ", elem_id="llm-benchmark-tab-table", id=3):
105
  with gr.Column():
106
  with gr.Row():
107
  gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
108
 
109
  with gr.Column():
110
  with gr.Accordion(
111
+ f"โœ… ๅฎŒไบ†ใ—ใŸ่ฉ•ไพก ({len(finished_eval_queue_df)})",
112
  open=False,
113
  ):
114
  with gr.Row():
 
119
  row_count=5,
120
  )
121
  with gr.Accordion(
122
+ f"๐Ÿ”„ ๅฎŸ่กŒไธญใฎ่ฉ•ไพก ({len(running_eval_queue_df)})",
123
  open=False,
124
  ):
125
  with gr.Row():
 
142
  row_count=5,
143
  )
144
  with gr.Row():
145
+ gr.Markdown("# โœ‰๏ธโœจ ใœใฒ่ฟฝๅŠ ใ—ใฆใใ ใ•ใ„๏ผ", elem_classes="markdown-text")
146
 
147
  with gr.Row():
148
  with gr.Column():
 
150
  revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
151
  model_type = gr.Dropdown(
152
  choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
153
+ label="ใƒขใƒ‡ใƒซใ‚ฟใ‚คใƒ—",
154
  multiselect=False,
155
  value=None,
156
  interactive=True,
 
166
  )
167
  weight_type = gr.Dropdown(
168
  choices=[i.value.name for i in WeightType],
169
+ label="ใ‚ฆใ‚งใ‚คใƒˆใ‚ฟใ‚คใƒ—",
170
  multiselect=False,
171
  value="Original",
172
  interactive=True,
173
  )
174
  base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
175
 
176
+ submit_button = gr.Button("็™ป้Œฒ")
177
  submission_result = gr.Markdown()
178
  submit_button.click(
179
  add_new_eval,