openfree commited on
Commit
68464a8
·
verified ·
1 Parent(s): bca421b

Update src/about.py

Browse files
Files changed (1) hide show
  1. src/about.py +9 -15
src/about.py CHANGED
@@ -7,23 +7,15 @@ class Task:
7
  metric: str
8
  col_name: str
9
 
10
- # ---------------------------------------------------
11
- # Select your tasks here
12
  class Tasks(Enum):
13
- # task_key in the json file, metric_key in the json file, name to display in the leaderboard
14
  task0 = Task("anli_r1", "acc", "Korean Bar Exam (Lawyer)")
15
  task1 = Task("logiqa", "acc_norm", "Senior Civil Service Examination(국가직 5급)")
16
 
17
- NUM_FEWSHOT = 0 # Change with your few shot
18
- # ---------------------------------------------------
19
 
20
- # Your leaderboard name
21
  TITLE = """<h1 align="center" id="space-title">Demo leaderboard</h1>"""
22
-
23
- # What does your leaderboard evaluate?
24
  INTRODUCTION_TEXT = """Intro text"""
25
 
26
- # Which evaluations are you running? How can people reproduce what you have?
27
  LLM_BENCHMARKS_TEXT = """\
28
  ## How it works
29
  ## Reproducibility
@@ -32,9 +24,11 @@ To reproduce our results, here is the commands you can run:
32
 
33
  EVALUATION_QUEUE_TEXT = """
34
  ## Some good practices before submitting a model
35
- ### 1) Make sure you can load your model and tokenizer using AutoClasses:
36
- ```python
37
- from transformers import AutoConfig, AutoModel, AutoTokenizer
38
- config = AutoConfig.from_pretrained("your model name", revision=revision)
39
- model = AutoModel.from_pretrained("your model name", revision=revision)
40
- tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
 
 
 
7
  metric: str
8
  col_name: str
9
 
 
 
10
  class Tasks(Enum):
 
11
  task0 = Task("anli_r1", "acc", "Korean Bar Exam (Lawyer)")
12
  task1 = Task("logiqa", "acc_norm", "Senior Civil Service Examination(국가직 5급)")
13
 
14
+ NUM_FEWSHOT = 0
 
15
 
 
16
  TITLE = """<h1 align="center" id="space-title">Demo leaderboard</h1>"""
 
 
17
  INTRODUCTION_TEXT = """Intro text"""
18
 
 
19
  LLM_BENCHMARKS_TEXT = """\
20
  ## How it works
21
  ## Reproducibility
 
24
 
25
  EVALUATION_QUEUE_TEXT = """
26
  ## Some good practices before submitting a model
27
+ (중략)
28
+ 여기에 긴 텍스트를 붙여넣되,
29
+ 삼중 따옴표가 등장하지 않도록 주의하세요.
30
+ """
31
+
32
+ CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
33
+ CITATION_BUTTON_TEXT = """
34
+ """