Weyaxi commited on
Commit
c1d63cd
·
verified ·
1 Parent(s): d91bfdf

go back to old temp

Browse files
Files changed (1) hide show
  1. app.py +21 -6
app.py CHANGED
@@ -1,4 +1,6 @@
1
  import os
 
 
2
  import requests
3
  import pandas as pd
4
  from bs4 import BeautifulSoup
@@ -15,6 +17,7 @@ HF_TOKEN = os.getenv('HF_TOKEN')
15
 
16
 
17
  headers_models = ["🔢 Serial Number", "👤 Author Name", "📥 Total Downloads", "👍 Total Likes", "🤖 Number of Models",
 
18
  "📊 Average Downloads per Model", "📈 Average Likes per Model", "🚀 Most Downloaded Model",
19
  "📈 Most Download Count", "❤️ Most Liked Model", "👍 Most Like Count", "🔥 Trending Model",
20
  "👑 Best Rank at Trending Models", "🏷️ Type"]
@@ -70,7 +73,15 @@ def get_sum(df_for_sum_function):
70
  return {"Downloads": sum_downloads, "Likes": sum_likes}
71
 
72
 
73
-
 
 
 
 
 
 
 
 
74
 
75
 
76
  def get_ranking(model_list, target_org):
@@ -121,6 +132,7 @@ def group_models_by_author(all_things):
121
 
122
  def make_leaderboard(orgs, users, which_one, data):
123
  data_rows = []
 
124
 
125
  trend = get_trending_list(1, which_one)
126
  hepsi = [orgs, users]
@@ -139,12 +151,15 @@ def make_leaderboard(orgs, users, which_one, data):
139
  most_info = get_most(df)
140
 
141
  if which_one == "models":
142
-
 
143
  data_rows.append({
144
  "Author Name": org,
145
  "Total Downloads": sum_info["Downloads"],
146
  "Total Likes": sum_info["Likes"],
147
  "Number of Models": num_things,
 
 
148
  "Average Downloads per Model": int(sum_info["Downloads"] / num_things) if num_things != 0 else 0,
149
  "Average Likes per Model": int(sum_info["Likes"] / num_things) if num_things != 0 else 0,
150
  "Most Downloaded Model": most_info["Most Download"]["id"],
@@ -188,7 +203,6 @@ def make_leaderboard(orgs, users, which_one, data):
188
  })
189
 
190
  leaderboard = pd.DataFrame(data_rows)
191
- print(leaderboard.head())
192
  temp = ["Total Downloads"] if which_one != "spaces" else ["Total Likes"]
193
 
194
  leaderboard = leaderboard.sort_values(by=temp, ascending=False)
@@ -278,6 +292,8 @@ INTRODUCTION_TEXT = f"""
278
 
279
  🛠️ The leaderboard's backend mainly runs on the [Hugging Face Hub API](https://huggingface.co/docs/huggingface_hub/v0.5.1/en/package_reference/hf_api).
280
 
 
 
281
  📒 **Note:** In trending models/datasets/spaces, first 300 models/datasets/spaces is being retrieved from huggingface.
282
 
283
  ## 🔍 Searching Organizations and Users
@@ -523,13 +539,13 @@ with gr.Blocks() as demo:
523
  search_bar_in_df = gr.Textbox(placeholder="🔍 Search for a author", show_label=False)
524
 
525
  with gr.TabItem("🏛️ Models", id=1):
526
- columns_to_convert = ["Author Name", "Most Downloaded Model",
527
  "Most Liked Model", "Trending Model"]
528
  models_df = make_leaderboard(org_names_in_list, user_names_in_list, "models", group_models_by_author(all_models))
529
  models_df = models_df_to_clickable(models_df, columns_to_convert, "models")
530
 
531
  gr_models = gr.Dataframe(apply_headers(models_df, headers_models).head(400), headers=headers_models, interactive=True,
532
- datatype=["str", "markdown", "str", "str", "str", "str", "str",
533
  "markdown", "str", "markdown", "str", "markdown", "str", "str"])
534
 
535
  with gr.TabItem("📊 Datasets", id=2):
@@ -591,4 +607,3 @@ filtered_spaces_users = update_table_spaces(orgs=False, users=True, return_all=T
591
  filtered_spaces_orgs = update_table_spaces(orgs=True, users=False, return_all=True)['👤 Author Name'].tolist()
592
 
593
  demo.launch(debug=True)
594
-
 
1
  import os
2
+ os.system("wget https://raw.githubusercontent.com/Weyaxi/scrape-open-llm-leaderboard/main/openllm.py")
3
+ from openllm import *
4
  import requests
5
  import pandas as pd
6
  from bs4 import BeautifulSoup
 
17
 
18
 
19
  headers_models = ["🔢 Serial Number", "👤 Author Name", "📥 Total Downloads", "👍 Total Likes", "🤖 Number of Models",
20
+ "🏆 Best Model On Open LLM Leaderboard", "🥇 Best Rank On Open LLM Leaderboard",
21
  "📊 Average Downloads per Model", "📈 Average Likes per Model", "🚀 Most Downloaded Model",
22
  "📈 Most Download Count", "❤️ Most Liked Model", "👍 Most Like Count", "🔥 Trending Model",
23
  "👑 Best Rank at Trending Models", "🏷️ Type"]
 
73
  return {"Downloads": sum_downloads, "Likes": sum_likes}
74
 
75
 
76
+ def get_openllm_leaderboard():
77
+ try:
78
+ data = get_json_format_data()
79
+ finished_models = get_datas(data)
80
+ df = pd.DataFrame(finished_models)
81
+ return df['Model'].tolist()
82
+ except Exception as e: # something is wrong about the leaderboard so return empty list
83
+ print(e)
84
+ return []
85
 
86
 
87
  def get_ranking(model_list, target_org):
 
132
 
133
  def make_leaderboard(orgs, users, which_one, data):
134
  data_rows = []
135
+ open_llm_leaderboard = get_openllm_leaderboard() if which_one == "models" else None
136
 
137
  trend = get_trending_list(1, which_one)
138
  hepsi = [orgs, users]
 
151
  most_info = get_most(df)
152
 
153
  if which_one == "models":
154
+ open_llm_leaderboard_get_org = get_ranking(open_llm_leaderboard, org)
155
+
156
  data_rows.append({
157
  "Author Name": org,
158
  "Total Downloads": sum_info["Downloads"],
159
  "Total Likes": sum_info["Likes"],
160
  "Number of Models": num_things,
161
+ "Best Model On Open LLM Leaderboard": open_llm_leaderboard_get_org[1] if open_llm_leaderboard_get_org not in ["Not Found", "Error on Leaderboard"] else open_llm_leaderboard_get_org,
162
+ "Best Rank On Open LLM Leaderboard": open_llm_leaderboard_get_org[1] if open_llm_leaderboard_get_org not in ["Not Found", "Error on Leaderboard"] else open_llm_leaderboard_get_org,
163
  "Average Downloads per Model": int(sum_info["Downloads"] / num_things) if num_things != 0 else 0,
164
  "Average Likes per Model": int(sum_info["Likes"] / num_things) if num_things != 0 else 0,
165
  "Most Downloaded Model": most_info["Most Download"]["id"],
 
203
  })
204
 
205
  leaderboard = pd.DataFrame(data_rows)
 
206
  temp = ["Total Downloads"] if which_one != "spaces" else ["Total Likes"]
207
 
208
  leaderboard = leaderboard.sort_values(by=temp, ascending=False)
 
292
 
293
  🛠️ The leaderboard's backend mainly runs on the [Hugging Face Hub API](https://huggingface.co/docs/huggingface_hub/v0.5.1/en/package_reference/hf_api).
294
 
295
+ 📒 **Note:** In the model's dataframe, there are some columns related to the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). This data is also retrieved through web scraping.
296
+
297
  📒 **Note:** In trending models/datasets/spaces, first 300 models/datasets/spaces is being retrieved from huggingface.
298
 
299
  ## 🔍 Searching Organizations and Users
 
539
  search_bar_in_df = gr.Textbox(placeholder="🔍 Search for a author", show_label=False)
540
 
541
  with gr.TabItem("🏛️ Models", id=1):
542
+ columns_to_convert = ["Author Name", "Best Model On Open LLM Leaderboard", "Most Downloaded Model",
543
  "Most Liked Model", "Trending Model"]
544
  models_df = make_leaderboard(org_names_in_list, user_names_in_list, "models", group_models_by_author(all_models))
545
  models_df = models_df_to_clickable(models_df, columns_to_convert, "models")
546
 
547
  gr_models = gr.Dataframe(apply_headers(models_df, headers_models).head(400), headers=headers_models, interactive=True,
548
+ datatype=["str", "markdown", "str", "str", "str", "markdown", "str", "str", "str",
549
  "markdown", "str", "markdown", "str", "markdown", "str", "str"])
550
 
551
  with gr.TabItem("📊 Datasets", id=2):
 
607
  filtered_spaces_orgs = update_table_spaces(orgs=True, users=False, return_all=True)['👤 Author Name'].tolist()
608
 
609
  demo.launch(debug=True)