fdaudens HF staff commited on
Commit
309d02d
·
verified ·
1 Parent(s): 3b09729

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +65 -21
app.py CHANGED
@@ -15,19 +15,41 @@ def format_number(num):
15
  return str(num)
16
 
17
  def fetch_stats():
18
- """Fetch all DeepSeek model statistics"""
19
  api = HfApi()
20
 
21
  # Fetch original models
22
  original_models = [
23
- "deepseek-ai/deepseek-r1",
24
- "deepseek-ai/deepseek-r1-zero",
25
- "deepseek-ai/deepseek-r1-distill-llama-70b",
26
- "deepseek-ai/deepseek-r1-distill-qwen-32b",
27
- "deepseek-ai/deepseek-r1-distill-qwen-14b",
28
- "deepseek-ai/deepseek-r1-distill-llama-8b",
29
- "deepseek-ai/deepseek-r1-distill-qwen-7b",
30
- "deepseek-ai/deepseek-r1-distill-qwen-1.5b"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  ]
32
 
33
  original_stats = []
@@ -44,14 +66,36 @@ def fetch_stats():
44
 
45
  model_types = ["adapter", "finetune", "merge", "quantized"]
46
  base_models = [
47
- "DeepSeek-R1",
48
- "DeepSeek-R1-Zero",
49
- "DeepSeek-R1-Distill-Llama-70B",
50
- "DeepSeek-R1-Distill-Qwen-32B",
51
- "DeepSeek-R1-Distill-Qwen-14B",
52
- "DeepSeek-R1-Distill-Llama-8B",
53
- "DeepSeek-R1-Distill-Qwen-7B",
54
- "DeepSeek-R1-Distill-Qwen-1.5B"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  ]
56
 
57
  derivative_stats = []
@@ -60,14 +104,14 @@ def fetch_stats():
60
  for model_type in model_types:
61
  try:
62
  models = list(api.list_models(
63
- filter=f"base_model:{model_type}:deepseek-ai/{base_model}",
64
  full=True
65
  ))
66
 
67
  # Add each model to our stats
68
  for model in models:
69
  derivative_stats.append({
70
- 'base_model': f"deepseek-ai/{base_model}",
71
  'model_type': model_type,
72
  'model_id': model.id,
73
  'downloads_30d': model.downloads if hasattr(model, 'downloads') else 0,
@@ -165,8 +209,8 @@ def create_stats_html():
165
  def create_interface():
166
  """Create Gradio interface"""
167
  with gr.Blocks(theme=gr.themes.Soft()) as interface:
168
- gr.HTML("<h1 style='text-align: center;'>DeepSeek-R1 Models Stats</h1>")
169
-
170
  with gr.Row():
171
  with gr.Column():
172
  summary_html = gr.HTML()
 
15
  return str(num)
16
 
17
  def fetch_stats():
18
+ """Fetch all Meta Llama 3 model statistics"""
19
  api = HfApi()
20
 
21
  # Fetch original models
22
  original_models = [
23
+ "meta-llama/Llama-3.3-70B-Instruct",
24
+ "meta-llama/Meta-Llama-3-70B-Instruct",
25
+ "meta-llama/Llama-3.1-70B-Instruct",
26
+ "meta-llama/Llama-3.1-405B-FP8",
27
+ "meta-llama/Llama-3.2-90B-Vision-Instruct",
28
+ "meta-llama/Llama-3.2-11B-Vision-Instruct",
29
+ "meta-llama/Llama-3.2-3B-Instruct-QLORA_INT4_EO8",
30
+ "meta-llama/Llama-3.2-3B-Instruct-SpinQuant_INT4_EO8",
31
+ "meta-llama/Llama-3.2-1B-Instruct-SpinQuant_INT4_EO8",
32
+ "meta-llama/Llama-3.2-1B-Instruct-QLORA_INT4_EO8",
33
+ "meta-llama/Llama-Guard-3-11B-Vision",
34
+ "meta-llama/Llama-3.2-1B",
35
+ "meta-llama/Llama-3.2-1B-Instruct",
36
+ "meta-llama/Llama-3.2-3B",
37
+ "meta-llama/Llama-3.2-3B-Instruct",
38
+ "meta-llama/Llama-3.1-8B",
39
+ "meta-llama/Llama-Guard-3-8B",
40
+ "meta-llama/Meta-Llama-3-70B",
41
+ "meta-llama/Meta-Llama-3-8B-Instruct",
42
+ "meta-llama/Meta-Llama-3-8B",
43
+ "meta-llama/Llama-3.2-90B-Vision",
44
+ "meta-llama/Llama-3.2-11B-Vision",
45
+ "meta-llama/Llama-Guard-3-1B",
46
+ "meta-llama/Llama-Guard-3-1B-INT4",
47
+ "meta-llama/Llama-3.1-405B-Instruct-FP8",
48
+ "meta-llama/Llama-3.1-405B-Instruct",
49
+ "meta-llama/Llama-3.1-405B",
50
+ "meta-llama/Llama-3.1-70B",
51
+ "meta-llama/Llama-3.1-8B-Instruct",
52
+ "meta-llama/Llama-Guard-3-8B-INT8"
53
  ]
54
 
55
  original_stats = []
 
66
 
67
  model_types = ["adapter", "finetune", "merge", "quantized"]
68
  base_models = [
69
+ "meta-llama/Llama-3.3-70B-Instruct",
70
+ "meta-llama/Meta-Llama-3-70B-Instruct",
71
+ "meta-llama/Llama-3.1-70B-Instruct",
72
+ "meta-llama/Llama-3.1-405B-FP8",
73
+ "meta-llama/Llama-3.2-90B-Vision-Instruct",
74
+ "meta-llama/Llama-3.2-11B-Vision-Instruct",
75
+ "meta-llama/Llama-3.2-3B-Instruct-QLORA_INT4_EO8",
76
+ "meta-llama/Llama-3.2-3B-Instruct-SpinQuant_INT4_EO8",
77
+ "meta-llama/Llama-3.2-1B-Instruct-SpinQuant_INT4_EO8",
78
+ "meta-llama/Llama-3.2-1B-Instruct-QLORA_INT4_EO8",
79
+ "meta-llama/Llama-Guard-3-11B-Vision",
80
+ "meta-llama/Llama-3.2-1B",
81
+ "meta-llama/Llama-3.2-1B-Instruct",
82
+ "meta-llama/Llama-3.2-3B",
83
+ "meta-llama/Llama-3.2-3B-Instruct",
84
+ "meta-llama/Llama-3.1-8B",
85
+ "meta-llama/Llama-Guard-3-8B",
86
+ "meta-llama/Meta-Llama-3-70B",
87
+ "meta-llama/Meta-Llama-3-8B-Instruct",
88
+ "meta-llama/Meta-Llama-3-8B",
89
+ "meta-llama/Llama-3.2-90B-Vision",
90
+ "meta-llama/Llama-3.2-11B-Vision",
91
+ "meta-llama/Llama-Guard-3-1B",
92
+ "meta-llama/Llama-Guard-3-1B-INT4",
93
+ "meta-llama/Llama-3.1-405B-Instruct-FP8",
94
+ "meta-llama/Llama-3.1-405B-Instruct",
95
+ "meta-llama/Llama-3.1-405B",
96
+ "meta-llama/Llama-3.1-70B",
97
+ "meta-llama/Llama-3.1-8B-Instruct",
98
+ "meta-llama/Llama-Guard-3-8B-INT8"
99
  ]
100
 
101
  derivative_stats = []
 
104
  for model_type in model_types:
105
  try:
106
  models = list(api.list_models(
107
+ filter=f"base_model:{model_type}:meta-llama/{base_model}",
108
  full=True
109
  ))
110
 
111
  # Add each model to our stats
112
  for model in models:
113
  derivative_stats.append({
114
+ 'base_model': f"meta-llama/{base_model}",
115
  'model_type': model_type,
116
  'model_id': model.id,
117
  'downloads_30d': model.downloads if hasattr(model, 'downloads') else 0,
 
209
  def create_interface():
210
  """Create Gradio interface"""
211
  with gr.Blocks(theme=gr.themes.Soft()) as interface:
212
+ gr.HTML("<h1 style='text-align: center;'>Meta Llama3 Models Stats</h1>")
213
+ gr.HTML("<h3 style='text-align: center;'>(Includes models from the 3, 3.1, 3.2, and 3.3 versions)</h3>")
214
  with gr.Row():
215
  with gr.Column():
216
  summary_html = gr.HTML()