Spaces:
Sleeping
Sleeping
| from huggingface_hub import HfApi, Repository | |
| import gradio as gr | |
| import json | |
| def change_tab(query_param): | |
| query_param = query_param.replace("'", '"') | |
| query_param = json.loads(query_param) | |
| if ( | |
| isinstance(query_param, dict) | |
| and "tab" in query_param | |
| and query_param["tab"] == "plot" | |
| ): | |
| return gr.Tabs.update(selected=1) | |
| else: | |
| return gr.Tabs.update(selected=0) | |
| def restart_space(LLM_PERF_LEADERBOARD_REPO, OPTIMUM_TOKEN): | |
| HfApi().restart_space(repo_id=LLM_PERF_LEADERBOARD_REPO, token=OPTIMUM_TOKEN) | |
| def load_dataset_repo(LLM_PERF_DATASET_REPO, OPTIMUM_TOKEN): | |
| llm_perf_repo = None | |
| if OPTIMUM_TOKEN: | |
| print("Loading LLM-Perf-Dataset from Hub...") | |
| llm_perf_repo = Repository( | |
| local_dir="./llm-perf-dataset", | |
| clone_from=LLM_PERF_DATASET_REPO, | |
| token=OPTIMUM_TOKEN, | |
| repo_type="dataset", | |
| ) | |
| llm_perf_repo.git_pull() | |
| return llm_perf_repo | |
| LLAMAS = [ | |
| "huggingface/llama-7b", | |
| "huggingface/llama-13b", | |
| "huggingface/llama-30b", | |
| "huggingface/llama-65b", | |
| ] | |
| KOALA_LINK = "https://huggingface.co/TheBloke/koala-13B-HF" | |
| VICUNA_LINK = "https://huggingface.co/lmsys/vicuna-13b-delta-v1.1" | |
| OASST_LINK = "https://huggingface.co/OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5" | |
| DOLLY_LINK = "https://huggingface.co/databricks/dolly-v2-12b" | |
| MODEL_PAGE = "https://huggingface.co/models" | |
| LLAMA_LINK = "https://ai.facebook.com/blog/large-language-model-llama-meta-ai/" | |
| VICUNA_LINK = "https://huggingface.co/CarperAI/stable-vicuna-13b-delta" | |
| ALPACA_LINK = "https://crfm.stanford.edu/2023/03/13/alpaca.html" | |
| def model_hyperlink(link, model_name): | |
| return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>' | |
| def make_clickable_model(model_name): | |
| link = f"https://huggingface.co/{model_name}" | |
| if model_name in LLAMAS: | |
| link = LLAMA_LINK | |
| model_name = model_name.split("/")[1] | |
| elif model_name == "HuggingFaceH4/stable-vicuna-13b-2904": | |
| link = VICUNA_LINK | |
| model_name = "stable-vicuna-13b" | |
| elif model_name == "HuggingFaceH4/llama-7b-ift-alpaca": | |
| link = ALPACA_LINK | |
| model_name = "alpaca-13b" | |
| if model_name == "dolly-12b": | |
| link = DOLLY_LINK | |
| elif model_name == "vicuna-13b": | |
| link = VICUNA_LINK | |
| elif model_name == "koala-13b": | |
| link = KOALA_LINK | |
| elif model_name == "oasst-12b": | |
| link = OASST_LINK | |
| return model_hyperlink(link, model_name) | |
| def make_clickable_score(score): | |
| link = f"https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard" | |
| return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{score}</a>' | |
| def num_to_str(num): | |
| if num < 1000: | |
| return str(int(num)) | |
| elif num < 1000000: | |
| return str(int(num / 1000)) + "K" | |
| elif num < 1000000000: | |
| return str(int(num / 1000000)) + "M" | |
| elif num < 1000000000000: | |
| return str(int(num / 1000000000)) + "B" | |
| return None | |