Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Delete src
Browse files- src/about.py +0 -205
- src/display/css_html_js.py +0 -105
- src/display/formatting.py +0 -27
- src/display/utils.py +0 -139
- src/envs.py +0 -25
- src/leaderboard/read_evals.py +0 -199
- src/populate.py +0 -70
- src/submission/check_validity.py +0 -99
- src/submission/submit.py +0 -119
src/about.py
DELETED
@@ -1,205 +0,0 @@
|
|
1 |
-
from dataclasses import dataclass
|
2 |
-
from enum import Enum
|
3 |
-
|
4 |
-
@dataclass
|
5 |
-
class Task:
|
6 |
-
benchmark: str
|
7 |
-
metric: str
|
8 |
-
col_name: str
|
9 |
-
|
10 |
-
|
11 |
-
# Select your tasks here
|
12 |
-
# ---------------------------------------------------
|
13 |
-
class Tasks(Enum):
|
14 |
-
# # task_key in the json file, metric_key in the json file, name to display in the leaderboard
|
15 |
-
# task0 = Task("anli_r1", "acc", "ANLI")
|
16 |
-
# task1 = Task("logiqa", "acc_norm", "LogiQA")
|
17 |
-
acva = Task("community|acva:_average|0", "acc_norm", "ACVA")
|
18 |
-
alghafa = Task("community|alghafa:_average|0", "acc_norm", "AlGhafa")
|
19 |
-
arabic_mmlu = Task("community|arabic_mmlu:_average|0", "acc_norm", "MMLU")
|
20 |
-
arabic_exams = Task("community|arabic_exams|0", "acc_norm", "EXAMS")
|
21 |
-
arc_challenge_okapi_ar = Task("community|arc_challenge_okapi_ar|0", "acc_norm", "ARC Challenge")
|
22 |
-
arc_easy_ar = Task("community|arc_easy_ar|0", "acc_norm", "ARC Easy")
|
23 |
-
boolq_ar = Task("community|boolq_ar|0", "acc_norm", "BOOLQ")
|
24 |
-
copa_ext_ar = Task("community|copa_ext_ar|0", "acc_norm", "COPA")
|
25 |
-
hellaswag_okapi_ar = Task("community|hellaswag_okapi_ar|0", "acc_norm", "HELLASWAG")
|
26 |
-
openbook_qa_ext_ar = Task("community|openbook_qa_ext_ar|0", "acc_norm", "OPENBOOK QA")
|
27 |
-
piqa_ar = Task("community|piqa_ar|0", "acc_norm", "PIQA")
|
28 |
-
race_ar = Task("community|race_ar|0", "acc_norm", "RACE")
|
29 |
-
sciq_ar = Task("community|sciq_ar|0", "acc_norm", "SCIQ")
|
30 |
-
toxigen_ar = Task("community|toxigen_ar|0", "acc_norm", "TOXIGEN")
|
31 |
-
|
32 |
-
NUM_FEWSHOT = 0 # Change with your few shot
|
33 |
-
# ---------------------------------------------------
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
# Your leaderboard name
|
38 |
-
# TITLE = """<h1 align="center" id="space-title">Open Arabic LLM Leaderboard</h1>"""
|
39 |
-
TITLE = """<img src="https://raw.githubusercontent.com/alielfilali01/OALL-assets/main/TITLE.png" style="width:30%;display:block;margin-left:auto;margin-right:auto;border-radius:15px;">"""
|
40 |
-
|
41 |
-
BOTTOM_LOGO = """<img src="https://raw.githubusercontent.com/alielfilali01/OALL-assets/main/BOTTOM.png" style="width:50%;display:block;margin-left:auto;margin-right:auto;border-radius:15px;">"""
|
42 |
-
|
43 |
-
# What does your leaderboard evaluate?
|
44 |
-
INTRODUCTION_TEXT = """
|
45 |
-
🌴 The Open Arabic LLM Leaderboard : Evaluate and compare the performance of Arabic Large Language Models (LLMs).
|
46 |
-
|
47 |
-
|
48 |
-
When you submit a model on the "Submit here!" page, it is automatically evaluated on a set of benchmarks.
|
49 |
-
|
50 |
-
The GPU used for evaluation is operated with the support of __[Technology Innovation Institute (TII)](https://www.tii.ae/)__.
|
51 |
-
|
52 |
-
The datasets used for evaluation consist of datasets that are Arabic Native like the `AlGhafa` benchmark from [TII](https://www.tii.ae/) and `ACVA` benchmark from [FreedomIntelligence](https://huggingface.co/FreedomIntelligence) to assess reasoning, language understanding, commonsense, and more.
|
53 |
-
|
54 |
-
More details about the benchmarks and the evaluation process is provided on the “About” page.
|
55 |
-
"""
|
56 |
-
|
57 |
-
# Which evaluations are you running? how can people reproduce what you have?
|
58 |
-
LLM_BENCHMARKS_TEXT = f"""
|
59 |
-
# Context
|
60 |
-
While outstanding LLM models are being released competitively, most of them are centered on English and are familiar with the English cultural sphere. We operate the Open Arabic LLM Leaderboard (OALL), to evaluate models that reflect the characteristics of the Arabic language, culture and heritage. Through this, we hope that users can conveniently use the leaderboard, participate, and contribute to the advancement of research in the Arab region 🔥.
|
61 |
-
|
62 |
-
## Icons & Model types
|
63 |
-
|
64 |
-
🟢 : `pretrained` or `continuously pretrained`
|
65 |
-
|
66 |
-
🔶 : `fine-tuned on domain-specific datasets`
|
67 |
-
|
68 |
-
💬 : `chat models (RLHF, DPO, ORPO, ...)`
|
69 |
-
|
70 |
-
🤝 : `base merges and moerges`
|
71 |
-
|
72 |
-
|
73 |
-
If the icon is "?", it indicates that there is insufficient information about the model.
|
74 |
-
Please provide information about the model through an issue! 🤩
|
75 |
-
|
76 |
-
Note 1 : We reserve the right to correct any incorrect tags/icons after manual verification to ensure the accuracy and reliability of the leaderboard.
|
77 |
-
|
78 |
-
Note 2 ⚠️ : Some models might be widely discussed as subjects of caution by the community, implying that users should exercise restraint when using them. Models that have used the evaluation set for training to achieve a high leaderboard ranking, among others, may be selected as subjects of caution and might result in their deletion from the leaderboard.
|
79 |
-
|
80 |
-
## How it works
|
81 |
-
📈 We evaluate models using the impressive [LightEval](https://github.com/huggingface/lighteval), a unified and straightforward framework from the HuggingFace Eval Team to test and assess causal language models on a large number of different evaluation tasks.
|
82 |
-
We have set up a benchmark using datasets, most of them translated to Arabic, and validated by native Arabic speakers. We also added `AlGhafa`, a new benchmark prepared from scratch natively for Arabic, alongside the `ACVA` benchmark introduced in the [AceGPT](https://arxiv.org/abs/2309.12053) paper by [FreedomIntelligence](https://huggingface.co/FreedomIntelligence).
|
83 |
-
|
84 |
-
Find below the Native benchmarks :
|
85 |
-
|
86 |
-
- AlGhafa : Find more details [here](https://aclanthology.org/2023.arabicnlp-1.21.pdf) - (provided by [TII](https://www.tii.ae/))
|
87 |
-
- Arabic-Culture-Value-Alignment (ACVA) : Find more details [here](https://arxiv.org/pdf/2309.12053) - (provided by [FreedomIntelligence](https://huggingface.co/FreedomIntelligence))
|
88 |
-
|
89 |
-
|
90 |
-
And here find all the translated benchmarks provided by the Language evaluation team at [Technology Innovation Institute](https://www.tii.ae/) :
|
91 |
-
|
92 |
-
- `Arabic-MMLU`, `Arabic-EXAMS`, `Arabic-ARC-Challenge`, `Arabic-ARC-Easy`, `Arabic-BOOLQ`, `Arabic-COPA`, `Arabic-HELLASWAG`, `Arabic-OPENBOOK-QA`, `Arabic-PIQA`, `Arabic-RACE`, `Arabic-SCIQ`, `Arabic-TOXIGEN`. All part of the extended version of the AlGhafa benchmark (AlGhafa-T version)
|
93 |
-
|
94 |
-
|
95 |
-
To ensure a fair and unbiased assessment of the models' true capabilities, all evaluations are conducted in zero-shot settings `0-shots`. This approach eliminates any potential advantage from task-specific fine-tuning, providing a clear indication of how well the models can generalize to new tasks.
|
96 |
-
|
97 |
-
Also, given the nature of the tasks, which include multiple-choice and yes/no questions, the leaderboard primarily uses normalized log likelihood accuracy `loglikelihood_acc_norm` for all tasks. This metric was chosen for its ability to provide a clear and fair measurement of model performance across different types of questions.
|
98 |
-
|
99 |
-
|
100 |
-
Please, consider reaching out to us through the discussions tab if you are working on benchmarks for Arabic LLMs and willing to see them on this leaderboard as well. Your benchmark might change the whole game for Arabic models !
|
101 |
-
|
102 |
-
GPUs are provided by __[Technology Innovation Institute (TII)](https://www.tii.ae/)__ for the evaluations.
|
103 |
-
|
104 |
-
## Details and Logs
|
105 |
-
- Detailed numerical results in the `results` OALL dataset: https://huggingface.co/datasets/OALL/results
|
106 |
-
- Community queries and running status in the `requests` OALL dataset: https://huggingface.co/datasets/OALL/requests
|
107 |
-
|
108 |
-
## More resources
|
109 |
-
If you still have questions, you can check our FAQ [here](https://huggingface.co/spaces/OALL/Open-Arabic-LLM-Leaderboard/discussions/1)!
|
110 |
-
"""
|
111 |
-
|
112 |
-
EVALUATION_QUEUE_TEXT = """
|
113 |
-
## Some good practices before submitting a model
|
114 |
-
|
115 |
-
### 1) Make sure you can load your model and tokenizer using AutoClasses:
|
116 |
-
|
117 |
-
```python
|
118 |
-
from transformers import AutoConfig, AutoModel, AutoTokenizer
|
119 |
-
config = AutoConfig.from_pretrained("your model name", revision=revision)
|
120 |
-
model = AutoModel.from_pretrained("your model name", revision=revision)
|
121 |
-
tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
|
122 |
-
```
|
123 |
-
If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
|
124 |
-
|
125 |
-
Note: make sure your model is public!
|
126 |
-
Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted!
|
127 |
-
|
128 |
-
### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index)
|
129 |
-
It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!
|
130 |
-
|
131 |
-
### 3) Make sure your model has an open license!
|
132 |
-
This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model 🤗
|
133 |
-
|
134 |
-
### 4) Fill up your model card
|
135 |
-
When we add extra information about models to the leaderboard, it will be automatically taken from the model card
|
136 |
-
|
137 |
-
## In case of model failure
|
138 |
-
If your model is displayed in the `FAILED` category, its execution stopped.
|
139 |
-
Make sure you have followed the above steps first.
|
140 |
-
If everything is done, check you can launch the LightEval script on your model locally, using [this script](https://gist.github.com/alielfilali01/d486cfc962dca3ed4091b7c562a4377f).
|
141 |
-
"""
|
142 |
-
|
143 |
-
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
|
144 |
-
CITATION_BUTTON_TEXT = r"""
|
145 |
-
@misc{OALL,
|
146 |
-
author = {Elfilali, Ali and Alobeidli, Hamza and Fourrier, Clémentine and Boussaha, Basma El Amel and Cojocaru, Ruxandra and Habib, Nathan and Hacid, Hakim},
|
147 |
-
title = {Open Arabic LLM Leaderboard},
|
148 |
-
year = {2024},
|
149 |
-
publisher = {OALL},
|
150 |
-
howpublished = "\url{https://huggingface.co/spaces/OALL/Open-Arabic-LLM-Leaderboard}"
|
151 |
-
}
|
152 |
-
|
153 |
-
@inproceedings{almazrouei-etal-2023-alghafa,
|
154 |
-
title = "{A}l{G}hafa Evaluation Benchmark for {A}rabic Language Models",
|
155 |
-
author = "Almazrouei, Ebtesam and
|
156 |
-
Cojocaru, Ruxandra and
|
157 |
-
Baldo, Michele and
|
158 |
-
Malartic, Quentin and
|
159 |
-
Alobeidli, Hamza and
|
160 |
-
Mazzotta, Daniele and
|
161 |
-
Penedo, Guilherme and
|
162 |
-
Campesan, Giulia and
|
163 |
-
Farooq, Mugariya and
|
164 |
-
Alhammadi, Maitha and
|
165 |
-
Launay, Julien and
|
166 |
-
Noune, Badreddine",
|
167 |
-
editor = "Sawaf, Hassan and
|
168 |
-
El-Beltagy, Samhaa and
|
169 |
-
Zaghouani, Wajdi and
|
170 |
-
Magdy, Walid and
|
171 |
-
Abdelali, Ahmed and
|
172 |
-
Tomeh, Nadi and
|
173 |
-
Abu Farha, Ibrahim and
|
174 |
-
Habash, Nizar and
|
175 |
-
Khalifa, Salam and
|
176 |
-
Keleg, Amr and
|
177 |
-
Haddad, Hatem and
|
178 |
-
Zitouni, Imed and
|
179 |
-
Mrini, Khalil and
|
180 |
-
Almatham, Rawan",
|
181 |
-
booktitle = "Proceedings of ArabicNLP 2023",
|
182 |
-
month = dec,
|
183 |
-
year = "2023",
|
184 |
-
address = "Singapore (Hybrid)",
|
185 |
-
publisher = "Association for Computational Linguistics",
|
186 |
-
url = "https://aclanthology.org/2023.arabicnlp-1.21",
|
187 |
-
doi = "10.18653/v1/2023.arabicnlp-1.21",
|
188 |
-
pages = "244--275",
|
189 |
-
abstract = "Recent advances in the space of Arabic large language models have opened up a wealth of potential practical applications. From optimal training strategies, large scale data acquisition and continuously increasing NLP resources, the Arabic LLM landscape has improved in a very short span of time, despite being plagued by training data scarcity and limited evaluation resources compared to English. In line with contributing towards this ever-growing field, we introduce AlGhafa, a new multiple-choice evaluation benchmark for Arabic LLMs. For showcasing purposes, we train a new suite of models, including a 14 billion parameter model, the largest monolingual Arabic decoder-only model to date. We use a collection of publicly available datasets, as well as a newly introduced HandMade dataset consisting of 8 billion tokens. Finally, we explore the quantitative and qualitative toxicity of several Arabic models, comparing our models to existing public Arabic LLMs.",
|
190 |
-
}
|
191 |
-
@misc{huang2023acegpt,
|
192 |
-
title={AceGPT, Localizing Large Language Models in Arabic},
|
193 |
-
author={Huang Huang and Fei Yu and Jianqing Zhu and Xuening Sun and Hao Cheng and Dingjie Song and Zhihong Chen and Abdulmohsen Alharthi and Bang An and Ziche Liu and Zhiyi Zhang and Junying Chen and Jianquan Li and Benyou Wang and Lian Zhang and Ruoyu Sun and Xiang Wan and Haizhou Li and Jinchao Xu},
|
194 |
-
year={2023},
|
195 |
-
eprint={2309.12053},
|
196 |
-
archivePrefix={arXiv},
|
197 |
-
primaryClass={cs.CL}
|
198 |
-
}
|
199 |
-
@misc{lighteval,
|
200 |
-
author = {Fourrier, Clémentine and Habib, Nathan and Wolf, Thomas and Tunstall, Lewis},
|
201 |
-
title = {LightEval: A lightweight framework for LLM evaluation},
|
202 |
-
year = {2023},
|
203 |
-
version = {0.3.0},
|
204 |
-
url = {https://github.com/huggingface/lighteval}
|
205 |
-
}"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/display/css_html_js.py
DELETED
@@ -1,105 +0,0 @@
|
|
1 |
-
custom_css = """
|
2 |
-
|
3 |
-
.markdown-text {
|
4 |
-
font-size: 16px !important;
|
5 |
-
}
|
6 |
-
|
7 |
-
#models-to-add-text {
|
8 |
-
font-size: 18px !important;
|
9 |
-
}
|
10 |
-
|
11 |
-
#citation-button span {
|
12 |
-
font-size: 16px !important;
|
13 |
-
}
|
14 |
-
|
15 |
-
#citation-button textarea {
|
16 |
-
font-size: 16px !important;
|
17 |
-
}
|
18 |
-
|
19 |
-
#citation-button > label > button {
|
20 |
-
margin: 6px;
|
21 |
-
transform: scale(1.3);
|
22 |
-
}
|
23 |
-
|
24 |
-
#leaderboard-table {
|
25 |
-
margin-top: 15px
|
26 |
-
}
|
27 |
-
|
28 |
-
#leaderboard-table-lite {
|
29 |
-
margin-top: 15px
|
30 |
-
}
|
31 |
-
|
32 |
-
#search-bar-table-box > div:first-child {
|
33 |
-
background: none;
|
34 |
-
border: none;
|
35 |
-
}
|
36 |
-
|
37 |
-
#search-bar {
|
38 |
-
padding: 0px;
|
39 |
-
}
|
40 |
-
|
41 |
-
/* Limit the width of the first AutoEvalColumn so that names don't expand too much */
|
42 |
-
table td:first-child,
|
43 |
-
table th:first-child {
|
44 |
-
max-width: 400px;
|
45 |
-
overflow: auto;
|
46 |
-
white-space: nowrap;
|
47 |
-
}
|
48 |
-
|
49 |
-
.tab-buttons button {
|
50 |
-
font-size: 20px;
|
51 |
-
}
|
52 |
-
|
53 |
-
#scale-logo {
|
54 |
-
border-style: none !important;
|
55 |
-
box-shadow: none;
|
56 |
-
display: block;
|
57 |
-
margin-left: auto;
|
58 |
-
margin-right: auto;
|
59 |
-
max-width: 600px;
|
60 |
-
}
|
61 |
-
|
62 |
-
#scale-logo .download {
|
63 |
-
display: none;
|
64 |
-
}
|
65 |
-
#filter_type{
|
66 |
-
border: 0;
|
67 |
-
padding-left: 0;
|
68 |
-
padding-top: 0;
|
69 |
-
}
|
70 |
-
#filter_type label {
|
71 |
-
display: flex;
|
72 |
-
}
|
73 |
-
#filter_type label > span{
|
74 |
-
margin-top: var(--spacing-lg);
|
75 |
-
margin-right: 0.5em;
|
76 |
-
}
|
77 |
-
#filter_type label > .wrap{
|
78 |
-
width: 103px;
|
79 |
-
}
|
80 |
-
#filter_type label > .wrap .wrap-inner{
|
81 |
-
padding: 2px;
|
82 |
-
}
|
83 |
-
#filter_type label > .wrap .wrap-inner input{
|
84 |
-
width: 1px
|
85 |
-
}
|
86 |
-
#filter-columns-type{
|
87 |
-
border:0;
|
88 |
-
padding:0.5;
|
89 |
-
}
|
90 |
-
#filter-columns-size{
|
91 |
-
border:0;
|
92 |
-
padding:0.5;
|
93 |
-
}
|
94 |
-
#box-filter > .form{
|
95 |
-
border: 0
|
96 |
-
}
|
97 |
-
"""
|
98 |
-
|
99 |
-
get_window_url_params = """
|
100 |
-
function(url_params) {
|
101 |
-
const params = new URLSearchParams(window.location.search);
|
102 |
-
url_params = Object.fromEntries(params);
|
103 |
-
return url_params;
|
104 |
-
}
|
105 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/display/formatting.py
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
def model_hyperlink(link, model_name):
|
2 |
-
return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
|
3 |
-
|
4 |
-
|
5 |
-
def make_clickable_model(model_name):
|
6 |
-
link = f"https://huggingface.co/{model_name}"
|
7 |
-
return model_hyperlink(link, model_name)
|
8 |
-
|
9 |
-
|
10 |
-
def styled_error(error):
|
11 |
-
return f"<p style='color: red; font-size: 20px; text-align: center;'>{error}</p>"
|
12 |
-
|
13 |
-
|
14 |
-
def styled_warning(warn):
|
15 |
-
return f"<p style='color: orange; font-size: 20px; text-align: center;'>{warn}</p>"
|
16 |
-
|
17 |
-
|
18 |
-
def styled_message(message):
|
19 |
-
return f"<p style='color: green; font-size: 20px; text-align: center;'>{message}</p>"
|
20 |
-
|
21 |
-
|
22 |
-
def has_no_nan_values(df, columns):
|
23 |
-
return df[columns].notna().all(axis=1)
|
24 |
-
|
25 |
-
|
26 |
-
def has_nan_values(df, columns):
|
27 |
-
return df[columns].isna().any(axis=1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/display/utils.py
DELETED
@@ -1,139 +0,0 @@
|
|
1 |
-
from dataclasses import dataclass, make_dataclass
|
2 |
-
from enum import Enum
|
3 |
-
|
4 |
-
import pandas as pd
|
5 |
-
|
6 |
-
from src.about import Tasks
|
7 |
-
|
8 |
-
def fields(raw_class):
|
9 |
-
return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
|
10 |
-
|
11 |
-
|
12 |
-
# These classes are for user facing column names,
|
13 |
-
# to avoid having to change them all around the code
|
14 |
-
# when a modif is needed
|
15 |
-
@dataclass
|
16 |
-
class ColumnContent:
|
17 |
-
name: str
|
18 |
-
type: str
|
19 |
-
displayed_by_default: bool
|
20 |
-
hidden: bool = False
|
21 |
-
never_hidden: bool = False
|
22 |
-
|
23 |
-
## Leaderboard columns
|
24 |
-
auto_eval_column_dict = []
|
25 |
-
# Init
|
26 |
-
auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
|
27 |
-
auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
|
28 |
-
#Scores
|
29 |
-
auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
|
30 |
-
for task in Tasks:
|
31 |
-
auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
|
32 |
-
# Model information
|
33 |
-
auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
|
34 |
-
auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
|
35 |
-
auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
|
36 |
-
auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
|
37 |
-
auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
|
38 |
-
auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
|
39 |
-
auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
|
40 |
-
auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
|
41 |
-
auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
|
42 |
-
|
43 |
-
# We use make dataclass to dynamically fill the scores from Tasks
|
44 |
-
AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
|
45 |
-
|
46 |
-
## For the queue columns in the submission tab
|
47 |
-
@dataclass(frozen=True)
|
48 |
-
class EvalQueueColumn: # Queue column
|
49 |
-
model = ColumnContent("model", "markdown", True)
|
50 |
-
revision = ColumnContent("revision", "str", True)
|
51 |
-
private = ColumnContent("private", "bool", True)
|
52 |
-
precision = ColumnContent("precision", "str", True)
|
53 |
-
weight_type = ColumnContent("weight_type", "str", "Original")
|
54 |
-
status = ColumnContent("status", "str", True)
|
55 |
-
|
56 |
-
## All the model information that we might need
|
57 |
-
@dataclass
|
58 |
-
class ModelDetails:
|
59 |
-
name: str
|
60 |
-
display_name: str = ""
|
61 |
-
symbol: str = "" # emoji
|
62 |
-
|
63 |
-
|
64 |
-
class ModelType(Enum):
|
65 |
-
PT = ModelDetails(name="pretrained", symbol="🟢")
|
66 |
-
CPT = ModelDetails(name="continuously pretrained", symbol="🟩")
|
67 |
-
FT = ModelDetails(name="fine-tuned on domain-specific datasets", symbol="🔶")
|
68 |
-
chat = ModelDetails(name="chat models (RLHF, DPO, IFT, ...)", symbol="💬")
|
69 |
-
merges = ModelDetails(name="base merges and moerges", symbol="🤝")
|
70 |
-
Unknown = ModelDetails(name="", symbol="?")
|
71 |
-
|
72 |
-
def to_str(self, separator=" "):
|
73 |
-
return f"{self.value.symbol}{separator}{self.value.name}"
|
74 |
-
|
75 |
-
@staticmethod
|
76 |
-
def from_str(type):
|
77 |
-
if "pretrained" in type or "🟢" in type:
|
78 |
-
return ModelType.PT
|
79 |
-
if "continously pretrained" in type or "🟩" in type:
|
80 |
-
return ModelType.CPT
|
81 |
-
if "fine-tuned" in type or "🔶" in type:
|
82 |
-
return ModelType.FT
|
83 |
-
if any([k in type for k in ["instruction-tuned", "RL-tuned", "chat", "🟦", "⭕", "💬"]]):
|
84 |
-
return ModelType.chat
|
85 |
-
if "merges" in type or "🤝" in type:
|
86 |
-
return ModelType.merges
|
87 |
-
return ModelType.Unknown
|
88 |
-
|
89 |
-
|
90 |
-
class WeightType(Enum):
|
91 |
-
Adapter = ModelDetails("Adapter")
|
92 |
-
Original = ModelDetails("Original")
|
93 |
-
Delta = ModelDetails("Delta")
|
94 |
-
|
95 |
-
class Precision(Enum):
|
96 |
-
# float32 = ModelDetails("float32")
|
97 |
-
float16 = ModelDetails("float16")
|
98 |
-
bfloat16 = ModelDetails("bfloat16")
|
99 |
-
qt_8bit = ModelDetails("8bit")
|
100 |
-
qt_4bit = ModelDetails("4bit")
|
101 |
-
qt_GPTQ = ModelDetails("GPTQ")
|
102 |
-
Unknown = ModelDetails("?")
|
103 |
-
|
104 |
-
def from_str(precision):
|
105 |
-
# if precision in ["torch.float32", "float32"]:
|
106 |
-
# return Precision.float32
|
107 |
-
if precision in ["torch.float16", "float16"]:
|
108 |
-
return Precision.float16
|
109 |
-
if precision in ["torch.bfloat16", "bfloat16"]:
|
110 |
-
return Precision.bfloat16
|
111 |
-
if precision in ["8bit"]:
|
112 |
-
return Precision.qt_8bit
|
113 |
-
if precision in ["4bit"]:
|
114 |
-
return Precision.qt_4bit
|
115 |
-
if precision in ["GPTQ", "None"]:
|
116 |
-
return Precision.qt_GPTQ
|
117 |
-
return Precision.Unknown
|
118 |
-
|
119 |
-
# Column selection
|
120 |
-
COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
|
121 |
-
TYPES = [c.type for c in fields(AutoEvalColumn) if not c.hidden]
|
122 |
-
COLS_LITE = [c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and not c.hidden]
|
123 |
-
TYPES_LITE = [c.type for c in fields(AutoEvalColumn) if c.displayed_by_default and not c.hidden]
|
124 |
-
|
125 |
-
EVAL_COLS = [c.name for c in fields(EvalQueueColumn)]
|
126 |
-
EVAL_TYPES = [c.type for c in fields(EvalQueueColumn)]
|
127 |
-
|
128 |
-
BENCHMARK_COLS = [t.value.col_name for t in Tasks]
|
129 |
-
|
130 |
-
NUMERIC_INTERVALS = {
|
131 |
-
"?": pd.Interval(-1, 0, closed="right"),
|
132 |
-
"~1.5": pd.Interval(0, 2, closed="right"),
|
133 |
-
"~3": pd.Interval(2, 4, closed="right"),
|
134 |
-
"~7": pd.Interval(4, 9, closed="right"),
|
135 |
-
"~13": pd.Interval(9, 20, closed="right"),
|
136 |
-
"~35": pd.Interval(20, 45, closed="right"),
|
137 |
-
"~60": pd.Interval(45, 70, closed="right"),
|
138 |
-
"70+": pd.Interval(70, 10000, closed="right"),
|
139 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/envs.py
DELETED
@@ -1,25 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
from huggingface_hub import HfApi
|
4 |
-
|
5 |
-
# Info to change for your repository
|
6 |
-
# ----------------------------------
|
7 |
-
TOKEN = os.environ.get("TOKEN") # A read/write token for your org
|
8 |
-
|
9 |
-
OWNER = "OALL" # Change to your org - don't forget to create a results and request dataset, with the correct format!
|
10 |
-
# ----------------------------------
|
11 |
-
|
12 |
-
REPO_ID = f"{OWNER}/Open-Arabic-LLM-Leaderboard"
|
13 |
-
QUEUE_REPO = f"{OWNER}/requests"
|
14 |
-
RESULTS_REPO = f"{OWNER}/results"
|
15 |
-
|
16 |
-
# If you setup a cache later, just change HF_HOME
|
17 |
-
CACHE_PATH=os.getenv("HF_HOME", ".")
|
18 |
-
|
19 |
-
# Local caches
|
20 |
-
EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue")
|
21 |
-
EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
|
22 |
-
EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
|
23 |
-
EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
|
24 |
-
|
25 |
-
API = HfApi(token=TOKEN)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/leaderboard/read_evals.py
DELETED
@@ -1,199 +0,0 @@
|
|
1 |
-
import glob
|
2 |
-
import json
|
3 |
-
import math
|
4 |
-
import os
|
5 |
-
from dataclasses import dataclass
|
6 |
-
|
7 |
-
import dateutil
|
8 |
-
import numpy as np
|
9 |
-
|
10 |
-
from src.display.formatting import make_clickable_model
|
11 |
-
from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType
|
12 |
-
from src.submission.check_validity import is_model_on_hub
|
13 |
-
|
14 |
-
|
15 |
-
@dataclass
|
16 |
-
class EvalResult:
|
17 |
-
"""Represents one full evaluation. Built from a combination of the result and request file for a given run.
|
18 |
-
"""
|
19 |
-
eval_name: str # org_model_precision (uid)
|
20 |
-
full_model: str # org/model (path on hub)
|
21 |
-
org: str
|
22 |
-
model: str
|
23 |
-
revision: str # commit hash, "" if main
|
24 |
-
results: dict
|
25 |
-
precision: Precision = Precision.Unknown
|
26 |
-
model_type: ModelType = ModelType.Unknown # Pretrained, fine tuned, ...
|
27 |
-
weight_type: WeightType = WeightType.Original # Original or Adapter
|
28 |
-
architecture: str = "Unknown"
|
29 |
-
license: str = "?"
|
30 |
-
likes: int = 0
|
31 |
-
num_params: int = 0
|
32 |
-
date: str = "" # submission date of request file
|
33 |
-
still_on_hub: bool = False
|
34 |
-
|
35 |
-
@classmethod
|
36 |
-
def init_from_json_file(self, json_filepath):
|
37 |
-
"""Inits the result from the specific model result file"""
|
38 |
-
with open(json_filepath) as fp:
|
39 |
-
data = json.load(fp)
|
40 |
-
|
41 |
-
config = data.get("config_general")
|
42 |
-
|
43 |
-
# Precision
|
44 |
-
precision = Precision.from_str(config.get("model_dtype"))
|
45 |
-
|
46 |
-
# Get model and org
|
47 |
-
org_and_model = config.get("model_name", config.get("model_args", None))
|
48 |
-
org_and_model = org_and_model.split("/", 1)
|
49 |
-
|
50 |
-
if len(org_and_model) == 1:
|
51 |
-
org = None
|
52 |
-
model = org_and_model[0]
|
53 |
-
result_key = f"{model}_{precision.value.name}"
|
54 |
-
else:
|
55 |
-
org = org_and_model[0]
|
56 |
-
model = org_and_model[1]
|
57 |
-
result_key = f"{org}_{model}_{precision.value.name}"
|
58 |
-
full_model = "/".join(org_and_model)
|
59 |
-
|
60 |
-
still_on_hub, _, model_config = is_model_on_hub(
|
61 |
-
full_model, config.get("model_sha", "main"), trust_remote_code=True, test_tokenizer=False
|
62 |
-
)
|
63 |
-
architecture = "?"
|
64 |
-
if model_config is not None:
|
65 |
-
architectures = getattr(model_config, "architectures", None)
|
66 |
-
if architectures:
|
67 |
-
architecture = ";".join(architectures)
|
68 |
-
|
69 |
-
# Extract results available in this file (some results are split in several files)
|
70 |
-
results = {}
|
71 |
-
for task in Tasks:
|
72 |
-
task = task.value
|
73 |
-
|
74 |
-
# We average all scores of a given metric (not all metrics are present in all files)
|
75 |
-
accs = np.array([v.get(task.metric, None) for k, v in data["results"].items() if task.benchmark == k])
|
76 |
-
if accs.size == 0 or any([acc is None for acc in accs]):
|
77 |
-
continue
|
78 |
-
|
79 |
-
mean_acc = np.mean(accs) * 100.0
|
80 |
-
results[task.benchmark] = mean_acc
|
81 |
-
|
82 |
-
return self(
|
83 |
-
eval_name=result_key,
|
84 |
-
full_model=full_model,
|
85 |
-
org=org,
|
86 |
-
model=model,
|
87 |
-
results=results,
|
88 |
-
precision=precision,
|
89 |
-
revision= config.get("model_sha", ""),
|
90 |
-
still_on_hub=still_on_hub,
|
91 |
-
architecture=architecture
|
92 |
-
)
|
93 |
-
|
94 |
-
def update_with_request_file(self, requests_path):
|
95 |
-
"""Finds the relevant request file for the current model and updates info with it"""
|
96 |
-
request_file = get_request_file_for_model(requests_path, self.full_model, self.precision.value.name)
|
97 |
-
|
98 |
-
try:
|
99 |
-
with open(request_file, "r") as f:
|
100 |
-
request = json.load(f)
|
101 |
-
self.model_type = ModelType.from_str(request.get("model_type", ""))
|
102 |
-
self.weight_type = WeightType[request.get("weight_type", "Original")]
|
103 |
-
self.license = request.get("license", "?")
|
104 |
-
self.likes = request.get("likes", 0)
|
105 |
-
self.num_params = request.get("params", 0)
|
106 |
-
self.date = request.get("submitted_time", "")
|
107 |
-
self.architecture = request.get("architectures", "Unknown") # delete later
|
108 |
-
self.status = request.get("status", "FAILED")
|
109 |
-
except Exception:
|
110 |
-
self.status = "FAILED"
|
111 |
-
print(f'Could not find request file for {self.org}/{self.model} with "precision:{self.precision.value.name},model_type:{self.model_type}",license:{self.license},status:{self.status}')
|
112 |
-
|
113 |
-
def to_dict(self):
|
114 |
-
"""Converts the Eval Result to a dict compatible with our dataframe display"""
|
115 |
-
average = sum([v for v in self.results.values() if v is not None]) / len(Tasks)
|
116 |
-
data_dict = {
|
117 |
-
"eval_name": self.eval_name, # not a column, just a save name,
|
118 |
-
AutoEvalColumn.precision.name: self.precision.value.name,
|
119 |
-
AutoEvalColumn.model_type.name: self.model_type.value.name,
|
120 |
-
AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol,
|
121 |
-
AutoEvalColumn.weight_type.name: self.weight_type.value.name,
|
122 |
-
AutoEvalColumn.architecture.name: self.architecture,
|
123 |
-
AutoEvalColumn.model.name: make_clickable_model(self.full_model),
|
124 |
-
AutoEvalColumn.revision.name: self.revision,
|
125 |
-
AutoEvalColumn.average.name: average,
|
126 |
-
AutoEvalColumn.license.name: self.license,
|
127 |
-
AutoEvalColumn.likes.name: self.likes,
|
128 |
-
AutoEvalColumn.params.name: self.num_params,
|
129 |
-
AutoEvalColumn.still_on_hub.name: self.still_on_hub,
|
130 |
-
}
|
131 |
-
|
132 |
-
for task in Tasks:
|
133 |
-
data_dict[task.value.col_name] = self.results[task.value.benchmark]
|
134 |
-
|
135 |
-
return data_dict
|
136 |
-
|
137 |
-
|
138 |
-
def get_request_file_for_model(requests_path, model_name, precision):
|
139 |
-
"""Selects the correct request file for a given model. Only keeps runs tagged as FINISHED"""
|
140 |
-
request_files = os.path.join(
|
141 |
-
requests_path,
|
142 |
-
f"{model_name}_eval_request_*.json",
|
143 |
-
)
|
144 |
-
request_files = glob.glob(request_files)
|
145 |
-
|
146 |
-
# Select correct request file (precision)
|
147 |
-
request_file = ""
|
148 |
-
request_files = sorted(request_files, reverse=True)
|
149 |
-
for tmp_request_file in request_files:
|
150 |
-
with open(tmp_request_file, "r") as f:
|
151 |
-
req_content = json.load(f)
|
152 |
-
if (
|
153 |
-
req_content["status"] in ["FINISHED"]
|
154 |
-
and req_content["precision"] == precision.split(".")[-1]
|
155 |
-
):
|
156 |
-
request_file = tmp_request_file
|
157 |
-
return request_file
|
158 |
-
|
159 |
-
|
160 |
-
def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]:
|
161 |
-
"""From the path of the results folder root, extract all needed info for results"""
|
162 |
-
model_result_filepaths = []
|
163 |
-
|
164 |
-
for root, _, files in os.walk(results_path):
|
165 |
-
# We should only have json files in model results
|
166 |
-
if len(files) == 0 or any([not f.endswith(".json") for f in files]):
|
167 |
-
continue
|
168 |
-
|
169 |
-
# Sort the files by date
|
170 |
-
try:
|
171 |
-
files.sort(key=lambda x: x.removesuffix(".json").removeprefix("results_")[:-7])
|
172 |
-
except dateutil.parser._parser.ParserError:
|
173 |
-
files = [files[-1]]
|
174 |
-
|
175 |
-
for file in files:
|
176 |
-
model_result_filepaths.append(os.path.join(root, file))
|
177 |
-
|
178 |
-
eval_results = {}
|
179 |
-
for model_result_filepath in model_result_filepaths:
|
180 |
-
# Creation of result
|
181 |
-
eval_result = EvalResult.init_from_json_file(model_result_filepath)
|
182 |
-
eval_result.update_with_request_file(requests_path)
|
183 |
-
|
184 |
-
# Store results of same eval together
|
185 |
-
eval_name = eval_result.eval_name
|
186 |
-
if eval_name in eval_results.keys():
|
187 |
-
eval_results[eval_name].results.update({k: v for k, v in eval_result.results.items() if v is not None})
|
188 |
-
else:
|
189 |
-
eval_results[eval_name] = eval_result
|
190 |
-
|
191 |
-
results = []
|
192 |
-
for v in eval_results.values():
|
193 |
-
try:
|
194 |
-
v.to_dict() # we test if the dict version is complete
|
195 |
-
results.append(v)
|
196 |
-
except KeyError: # not all eval values present
|
197 |
-
continue
|
198 |
-
|
199 |
-
return results
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/populate.py
DELETED
@@ -1,70 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
import os
|
3 |
-
|
4 |
-
import pandas as pd
|
5 |
-
|
6 |
-
from src.display.formatting import has_no_nan_values, make_clickable_model
|
7 |
-
from src.display.utils import AutoEvalColumn, EvalQueueColumn
|
8 |
-
from src.leaderboard.read_evals import get_raw_eval_results
|
9 |
-
|
10 |
-
|
11 |
-
def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
|
12 |
-
"""Creates a dataframe from all the individual experiment results"""
|
13 |
-
raw_data = get_raw_eval_results(results_path, requests_path)
|
14 |
-
all_data_json = [v.to_dict() for v in raw_data]
|
15 |
-
|
16 |
-
df = pd.DataFrame.from_records(all_data_json)
|
17 |
-
df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
|
18 |
-
df = df[cols].round(decimals=2)
|
19 |
-
|
20 |
-
# filter out if any of the benchmarks have not been produced
|
21 |
-
df = df[has_no_nan_values(df, benchmark_cols)]
|
22 |
-
print("\nsrc/populate.py/get_leaderboard_df() : num of elements in leaderboard is :", len(df))
|
23 |
-
|
24 |
-
return raw_data, df
|
25 |
-
|
26 |
-
|
27 |
-
def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
|
28 |
-
entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
|
29 |
-
all_evals = []
|
30 |
-
|
31 |
-
for entry in entries:
|
32 |
-
if ".json" in entry:
|
33 |
-
file_path = os.path.join(save_path, entry)
|
34 |
-
try:
|
35 |
-
with open(file_path, encoding='utf-8') as fp:
|
36 |
-
data = json.load(fp)
|
37 |
-
except UnicodeDecodeError as e:
|
38 |
-
print(f"Unicode decoding error in {file_path}: {e}")
|
39 |
-
continue
|
40 |
-
|
41 |
-
data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
|
42 |
-
data[EvalQueueColumn.revision.name] = data.get("revision", "main")
|
43 |
-
|
44 |
-
all_evals.append(data)
|
45 |
-
elif ".md" not in entry:
|
46 |
-
# this is a folder
|
47 |
-
sub_entries = [e for e in os.listdir(f"{save_path}/{entry}") if not e.startswith(".")]
|
48 |
-
for sub_entry in sub_entries:
|
49 |
-
file_path = os.path.join(save_path, entry, sub_entry)
|
50 |
-
try:
|
51 |
-
with open(file_path, encoding='utf-8') as fp:
|
52 |
-
data = json.load(fp)
|
53 |
-
except json.JSONDecodeError:
|
54 |
-
print(f"Error reading {file_path}")
|
55 |
-
continue
|
56 |
-
except UnicodeDecodeError as e:
|
57 |
-
print(f"Unicode decoding error in {file_path}: {e}")
|
58 |
-
continue
|
59 |
-
|
60 |
-
data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
|
61 |
-
data[EvalQueueColumn.revision.name] = data.get("revision", "main")
|
62 |
-
all_evals.append(data)
|
63 |
-
|
64 |
-
pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
|
65 |
-
running_list = [e for e in all_evals if e["status"] == "RUNNING"]
|
66 |
-
finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
|
67 |
-
df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
|
68 |
-
df_running = pd.DataFrame.from_records(running_list, columns=cols)
|
69 |
-
df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
|
70 |
-
return df_finished[cols], df_running[cols], df_pending[cols]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/submission/check_validity.py
DELETED
@@ -1,99 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
import os
|
3 |
-
import re
|
4 |
-
from collections import defaultdict
|
5 |
-
from datetime import datetime, timedelta, timezone
|
6 |
-
|
7 |
-
import huggingface_hub
|
8 |
-
from huggingface_hub import ModelCard
|
9 |
-
from huggingface_hub.hf_api import ModelInfo
|
10 |
-
from transformers import AutoConfig
|
11 |
-
from transformers.models.auto.tokenization_auto import AutoTokenizer
|
12 |
-
|
13 |
-
def check_model_card(repo_id: str) -> tuple[bool, str]:
|
14 |
-
"""Checks if the model card and license exist and have been filled"""
|
15 |
-
try:
|
16 |
-
card = ModelCard.load(repo_id)
|
17 |
-
except huggingface_hub.utils.EntryNotFoundError:
|
18 |
-
return False, "Please add a model card to your model to explain how you trained/fine-tuned it."
|
19 |
-
|
20 |
-
# Enforce license metadata
|
21 |
-
if card.data.license is None:
|
22 |
-
if not ("license_name" in card.data and "license_link" in card.data):
|
23 |
-
return False, (
|
24 |
-
"License not found. Please add a license to your model card using the `license` metadata or a"
|
25 |
-
" `license_name`/`license_link` pair."
|
26 |
-
)
|
27 |
-
|
28 |
-
# Enforce card content
|
29 |
-
if len(card.text) < 200:
|
30 |
-
return False, "Please add a description to your model card, it is too short."
|
31 |
-
|
32 |
-
return True, ""
|
33 |
-
|
34 |
-
def is_model_on_hub(model_name: str, revision: str, token: str = None, trust_remote_code=False, test_tokenizer=False) -> tuple[bool, str]:
|
35 |
-
"""Checks if the model model_name is on the hub, and whether it (and its tokenizer) can be loaded with AutoClasses."""
|
36 |
-
try:
|
37 |
-
config = AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
|
38 |
-
if test_tokenizer:
|
39 |
-
try:
|
40 |
-
tk = AutoTokenizer.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
|
41 |
-
except ValueError as e:
|
42 |
-
return (
|
43 |
-
False,
|
44 |
-
f"uses a tokenizer which is not in a transformers release: {e}",
|
45 |
-
None
|
46 |
-
)
|
47 |
-
except Exception as e:
|
48 |
-
return (False, "'s tokenizer cannot be loaded. Is your tokenizer class in a stable transformers release, and correctly configured?", None)
|
49 |
-
return True, None, config
|
50 |
-
|
51 |
-
except ValueError:
|
52 |
-
return (
|
53 |
-
False,
|
54 |
-
"needs to be launched with `trust_remote_code=True`. For safety reason, we do not allow these models to be automatically submitted to the leaderboard.",
|
55 |
-
None
|
56 |
-
)
|
57 |
-
|
58 |
-
except Exception as e:
|
59 |
-
return False, "was not found on hub!", None
|
60 |
-
|
61 |
-
|
62 |
-
def get_model_size(model_info: ModelInfo, precision: str):
|
63 |
-
"""Gets the model size from the configuration, or the model name if the configuration does not contain the information."""
|
64 |
-
try:
|
65 |
-
model_size = round(model_info.safetensors["total"] / 1e9, 3)
|
66 |
-
except (AttributeError, TypeError):
|
67 |
-
return 0 # Unknown model sizes are indicated as 0, see NUMERIC_INTERVALS in app.py
|
68 |
-
|
69 |
-
size_factor = 8 if (precision == "GPTQ" or "gptq" in model_info.modelId.lower()) else 1
|
70 |
-
model_size = size_factor * model_size
|
71 |
-
return model_size
|
72 |
-
|
73 |
-
def get_model_arch(model_info: ModelInfo):
|
74 |
-
"""Gets the model architecture from the configuration"""
|
75 |
-
return model_info.config.get("architectures", "Unknown")
|
76 |
-
|
77 |
-
def already_submitted_models(requested_models_dir: str) -> set[str]:
|
78 |
-
"""Gather a list of already submitted models to avoid duplicates"""
|
79 |
-
depth = 1
|
80 |
-
file_names = []
|
81 |
-
users_to_submission_dates = defaultdict(list)
|
82 |
-
|
83 |
-
for root, _, files in os.walk(requested_models_dir):
|
84 |
-
current_depth = root.count(os.sep) - requested_models_dir.count(os.sep)
|
85 |
-
if current_depth == depth:
|
86 |
-
for file in files:
|
87 |
-
if not file.endswith(".json"):
|
88 |
-
continue
|
89 |
-
with open(os.path.join(root, file), "r") as f:
|
90 |
-
info = json.load(f)
|
91 |
-
file_names.append(f"{info['model']}_{info['revision']}_{info['precision']}")
|
92 |
-
|
93 |
-
# Select organisation
|
94 |
-
if info["model"].count("/") == 0 or "submitted_time" not in info:
|
95 |
-
continue
|
96 |
-
organisation, _ = info["model"].split("/")
|
97 |
-
users_to_submission_dates[organisation].append(info["submitted_time"])
|
98 |
-
|
99 |
-
return set(file_names), users_to_submission_dates
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/submission/submit.py
DELETED
@@ -1,119 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
import os
|
3 |
-
from datetime import datetime, timezone
|
4 |
-
|
5 |
-
from src.display.formatting import styled_error, styled_message, styled_warning
|
6 |
-
from src.envs import API, EVAL_REQUESTS_PATH, TOKEN, QUEUE_REPO
|
7 |
-
from src.submission.check_validity import (
|
8 |
-
already_submitted_models,
|
9 |
-
check_model_card,
|
10 |
-
get_model_size,
|
11 |
-
is_model_on_hub,
|
12 |
-
)
|
13 |
-
|
14 |
-
REQUESTED_MODELS = None
|
15 |
-
USERS_TO_SUBMISSION_DATES = None
|
16 |
-
|
17 |
-
def add_new_eval(
|
18 |
-
model: str,
|
19 |
-
base_model: str,
|
20 |
-
revision: str,
|
21 |
-
precision: str,
|
22 |
-
weight_type: str,
|
23 |
-
model_type: str,
|
24 |
-
):
|
25 |
-
global REQUESTED_MODELS
|
26 |
-
global USERS_TO_SUBMISSION_DATES
|
27 |
-
if not REQUESTED_MODELS:
|
28 |
-
REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
|
29 |
-
|
30 |
-
user_name = ""
|
31 |
-
model_path = model
|
32 |
-
if "/" in model:
|
33 |
-
user_name = model.split("/")[0]
|
34 |
-
model_path = model.split("/")[1]
|
35 |
-
|
36 |
-
precision = precision.split(" ")[0]
|
37 |
-
current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
38 |
-
|
39 |
-
if model_type is None or model_type == "":
|
40 |
-
return styled_error("Please select a model type.")
|
41 |
-
|
42 |
-
# Does the model actually exist?
|
43 |
-
if revision == "":
|
44 |
-
revision = "main"
|
45 |
-
|
46 |
-
# Is the model on the hub?
|
47 |
-
if weight_type in ["Delta", "Adapter"]:
|
48 |
-
base_model_on_hub, error, _ = is_model_on_hub(model_name=base_model, revision=revision, token=TOKEN, test_tokenizer=True)
|
49 |
-
if not base_model_on_hub:
|
50 |
-
return styled_error(f'Base model "{base_model}" {error}')
|
51 |
-
|
52 |
-
if not weight_type == "Adapter":
|
53 |
-
model_on_hub, error, _ = is_model_on_hub(model_name=model, revision=revision, token=TOKEN, test_tokenizer=True)
|
54 |
-
if not model_on_hub:
|
55 |
-
return styled_error(f'Model "{model}" {error}')
|
56 |
-
|
57 |
-
# Is the model info correctly filled?
|
58 |
-
try:
|
59 |
-
model_info = API.model_info(repo_id=model, revision=revision)
|
60 |
-
except Exception:
|
61 |
-
return styled_error("Could not get your model information. Please fill it up properly.")
|
62 |
-
|
63 |
-
model_size = get_model_size(model_info=model_info, precision=precision)
|
64 |
-
|
65 |
-
# Were the model card and license filled?
|
66 |
-
try:
|
67 |
-
license = model_info.cardData["license"]
|
68 |
-
except Exception:
|
69 |
-
return styled_error("Please select a license for your model")
|
70 |
-
|
71 |
-
modelcard_OK, error_msg = check_model_card(model)
|
72 |
-
if not modelcard_OK:
|
73 |
-
return styled_error(error_msg)
|
74 |
-
|
75 |
-
# Seems good, creating the eval
|
76 |
-
print("Adding new eval")
|
77 |
-
|
78 |
-
eval_entry = {
|
79 |
-
"model": model,
|
80 |
-
"base_model": base_model,
|
81 |
-
"revision": revision,
|
82 |
-
"precision": precision,
|
83 |
-
"weight_type": weight_type,
|
84 |
-
"status": "PENDING",
|
85 |
-
"submitted_time": current_time,
|
86 |
-
"model_type": model_type,
|
87 |
-
"likes": model_info.likes,
|
88 |
-
"params": model_size,
|
89 |
-
"license": license,
|
90 |
-
"private": False,
|
91 |
-
}
|
92 |
-
|
93 |
-
# Check for duplicate submission
|
94 |
-
if f"{model}_{revision}_{precision}" in REQUESTED_MODELS:
|
95 |
-
return styled_warning("This model has been already submitted.")
|
96 |
-
|
97 |
-
print("Creating eval file")
|
98 |
-
OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
|
99 |
-
os.makedirs(OUT_DIR, exist_ok=True)
|
100 |
-
out_path = f"{OUT_DIR}/{model_path}_eval_request_False_{precision}_{weight_type}.json"
|
101 |
-
|
102 |
-
with open(out_path, "w") as f:
|
103 |
-
f.write(json.dumps(eval_entry))
|
104 |
-
|
105 |
-
print("Uploading eval file")
|
106 |
-
API.upload_file(
|
107 |
-
path_or_fileobj=out_path,
|
108 |
-
path_in_repo=out_path.split("eval-queue/")[1],
|
109 |
-
repo_id=QUEUE_REPO,
|
110 |
-
repo_type="dataset",
|
111 |
-
commit_message=f"Add {model} to eval queue",
|
112 |
-
)
|
113 |
-
|
114 |
-
# Remove the local file
|
115 |
-
os.remove(out_path)
|
116 |
-
|
117 |
-
return styled_message(
|
118 |
-
"Your request has been submitted to the evaluation queue!\nPlease wait for up to an hour for the model to show in the PENDING list."
|
119 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|