evijit HF Staff commited on
Commit
afd7356
·
verified ·
1 Parent(s): a809fe2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +91 -326
app.py CHANGED
@@ -1,23 +1,19 @@
1
  # --- START OF FILE app.py ---
2
 
3
- import json
4
  import gradio as gr
5
  import pandas as pd
6
  import plotly.express as px
7
- import os
8
- import numpy as np
9
- import duckdb
10
- from tqdm.auto import tqdm # Standard tqdm for console, gr.Progress will track it
11
  import time
12
- import ast # For safely evaluating string representations of lists/dicts
13
 
14
  # --- Constants ---
15
  MODEL_SIZE_RANGES = {
16
  "Small (<1GB)": (0, 1), "Medium (1-5GB)": (1, 5), "Large (5-20GB)": (5, 20),
17
  "X-Large (20-50GB)": (20, 50), "XX-Large (>50GB)": (50, float('inf'))
18
  }
19
- PROCESSED_PARQUET_FILE_PATH = "models_processed.parquet"
20
- HF_PARQUET_URL = 'https://huggingface.co/datasets/cfahlgren1/hub-stats/resolve/main/models.parquet' # Added for completeness within app.py context
 
21
 
22
  TAG_FILTER_CHOICES = [
23
  "Audio & Speech", "Time series", "Robotics", "Music", "Video", "Images",
@@ -38,139 +34,16 @@ PIPELINE_TAGS = [
38
  'table-question-answering',
39
  ]
40
 
41
- def extract_model_size(safetensors_data):
42
- try:
43
- if pd.isna(safetensors_data): return 0.0
44
- data_to_parse = safetensors_data
45
- if isinstance(safetensors_data, str):
46
- try:
47
- if (safetensors_data.startswith('{') and safetensors_data.endswith('}')) or \
48
- (safetensors_data.startswith('[') and safetensors_data.endswith(']')):
49
- data_to_parse = ast.literal_eval(safetensors_data)
50
- else: data_to_parse = json.loads(safetensors_data)
51
- except: return 0.0
52
- if isinstance(data_to_parse, dict) and 'total' in data_to_parse:
53
- try:
54
- total_bytes_val = data_to_parse['total']
55
- size_bytes = float(total_bytes_val)
56
- return size_bytes / (1024 * 1024 * 1024)
57
- except (ValueError, TypeError): pass
58
- return 0.0
59
- except: return 0.0
60
-
61
- def extract_org_from_id(model_id):
62
- if pd.isna(model_id): return "unaffiliated"
63
- model_id_str = str(model_id)
64
- return model_id_str.split("/")[0] if "/" in model_id_str else "unaffiliated"
65
-
66
- def process_tags_for_series(series_of_tags_values):
67
- processed_tags_accumulator = []
68
-
69
- for i, tags_value_from_series in enumerate(tqdm(series_of_tags_values, desc="Standardizing Tags", leave=False, unit="row")):
70
- temp_processed_list_for_row = []
71
- current_value_for_error_msg = str(tags_value_from_series)[:200] # Truncate for long error messages
72
 
73
- try:
74
- # Order of checks is important!
75
- # 1. Handle explicit Python lists first
76
- if isinstance(tags_value_from_series, list):
77
- current_tags_in_list = []
78
- for idx_tag, tag_item in enumerate(tags_value_from_series):
79
- try:
80
- # Ensure item is not NaN before string conversion if it might be a float NaN in a list
81
- if pd.isna(tag_item): continue
82
- str_tag = str(tag_item)
83
- stripped_tag = str_tag.strip()
84
- if stripped_tag:
85
- current_tags_in_list.append(stripped_tag)
86
- except Exception as e_inner_list_proc:
87
- print(f"ERROR processing item '{tag_item}' (type: {type(tag_item)}) within a list for row {i}. Error: {e_inner_list_proc}. Original list: {current_value_for_error_msg}")
88
- temp_processed_list_for_row = current_tags_in_list
89
-
90
- # 2. Handle NumPy arrays
91
- elif isinstance(tags_value_from_series, np.ndarray):
92
- # Convert to list, then process elements, handling potential NaNs within the array
93
- current_tags_in_list = []
94
- for idx_tag, tag_item in enumerate(tags_value_from_series.tolist()): # .tolist() is crucial
95
- try:
96
- if pd.isna(tag_item): continue # Check for NaN after converting to Python type
97
- str_tag = str(tag_item)
98
- stripped_tag = str_tag.strip()
99
- if stripped_tag:
100
- current_tags_in_list.append(stripped_tag)
101
- except Exception as e_inner_array_proc:
102
- print(f"ERROR processing item '{tag_item}' (type: {type(tag_item)}) within a NumPy array for row {i}. Error: {e_inner_array_proc}. Original array: {current_value_for_error_msg}")
103
- temp_processed_list_for_row = current_tags_in_list
104
-
105
- # 3. Handle simple None or pd.NA after lists and arrays (which might contain pd.NA elements handled above)
106
- elif tags_value_from_series is None or pd.isna(tags_value_from_series): # Now pd.isna is safe for scalars
107
- temp_processed_list_for_row = []
108
-
109
- # 4. Handle strings (could be JSON-like, list-like, or comma-separated)
110
- elif isinstance(tags_value_from_series, str):
111
- processed_str_tags = []
112
- # Attempt ast.literal_eval for strings that look like lists/tuples
113
- if (tags_value_from_series.startswith('[') and tags_value_from_series.endswith(']')) or \
114
- (tags_value_from_series.startswith('(') and tags_value_from_series.endswith(')')):
115
- try:
116
- evaluated_tags = ast.literal_eval(tags_value_from_series)
117
- if isinstance(evaluated_tags, (list, tuple)): # Check if eval result is a list/tuple
118
- # Recursively process this evaluated list/tuple, as its elements could be complex
119
- # For simplicity here, assume elements are simple strings after eval
120
- current_eval_list = []
121
- for tag_item in evaluated_tags:
122
- if pd.isna(tag_item): continue
123
- str_tag = str(tag_item).strip()
124
- if str_tag: current_eval_list.append(str_tag)
125
- processed_str_tags = current_eval_list
126
- except (ValueError, SyntaxError):
127
- pass # If ast.literal_eval fails, let it fall to JSON or comma split
128
-
129
- # If ast.literal_eval didn't populate, try JSON
130
- if not processed_str_tags:
131
- try:
132
- json_tags = json.loads(tags_value_from_series)
133
- if isinstance(json_tags, list):
134
- # Similar to above, assume elements are simple strings after JSON parsing
135
- current_json_list = []
136
- for tag_item in json_tags:
137
- if pd.isna(tag_item): continue
138
- str_tag = str(tag_item).strip()
139
- if str_tag: current_json_list.append(str_tag)
140
- processed_str_tags = current_json_list
141
- except json.JSONDecodeError:
142
- # If not a valid JSON list, fall back to comma splitting as the final string strategy
143
- processed_str_tags = [tag.strip() for tag in tags_value_from_series.split(',') if tag.strip()]
144
- except Exception as e_json_other:
145
- print(f"ERROR during JSON processing for string '{current_value_for_error_msg}' for row {i}. Error: {e_json_other}")
146
- processed_str_tags = [tag.strip() for tag in tags_value_from_series.split(',') if tag.strip()] # Fallback
147
-
148
- temp_processed_list_for_row = processed_str_tags
149
-
150
- # 5. Fallback for other scalar types (e.g., int, float that are not NaN)
151
- else:
152
- # This path is for non-list, non-ndarray, non-None/NaN, non-string types.
153
- # Or for NaNs that slipped through if they are not None or pd.NA (e.g. float('nan'))
154
- if pd.isna(tags_value_from_series): # Catch any remaining NaNs like float('nan')
155
- temp_processed_list_for_row = []
156
- else:
157
- str_val = str(tags_value_from_series).strip()
158
- temp_processed_list_for_row = [str_val] if str_val else []
159
-
160
- processed_tags_accumulator.append(temp_processed_list_for_row)
161
-
162
- except Exception as e_outer_tag_proc:
163
- print(f"CRITICAL UNHANDLED ERROR processing row {i}: value '{current_value_for_error_msg}' (type: {type(tags_value_from_series)}). Error: {e_outer_tag_proc}. Appending [].")
164
- processed_tags_accumulator.append([])
165
-
166
- return processed_tags_accumulator
167
-
168
- def load_models_data(force_refresh=False, tqdm_cls=None):
169
- if tqdm_cls is None: tqdm_cls = tqdm
170
  overall_start_time = time.time()
171
- print(f"Gradio load_models_data called with force_refresh={force_refresh}")
172
 
173
- expected_cols_in_processed_parquet = [
 
174
  'id', 'downloads', 'downloadsAllTime', 'likes', 'pipeline_tag', 'tags', 'params',
175
  'size_category', 'organization', 'has_audio', 'has_speech', 'has_music',
176
  'has_robot', 'has_bio', 'has_med', 'has_series', 'has_video', 'has_image',
@@ -178,139 +51,44 @@ def load_models_data(force_refresh=False, tqdm_cls=None):
178
  'data_download_timestamp'
179
  ]
180
 
181
- if not force_refresh and os.path.exists(PROCESSED_PARQUET_FILE_PATH):
182
- print(f"Attempting to load pre-processed data from: {PROCESSED_PARQUET_FILE_PATH}")
183
- try:
184
- df = pd.read_parquet(PROCESSED_PARQUET_FILE_PATH)
185
- elapsed = time.time() - overall_start_time
186
- missing_cols = [col for col in expected_cols_in_processed_parquet if col not in df.columns]
187
- if missing_cols:
188
- raise ValueError(f"Pre-processed Parquet is missing columns: {missing_cols}. Please run preprocessor or refresh data in app.")
189
-
190
- # --- Diagnostic for 'has_robot' after loading parquet ---
191
- if 'has_robot' in df.columns:
192
- robot_count_parquet = df['has_robot'].sum()
193
- print(f"DIAGNOSTIC (App - Parquet Load): 'has_robot' column found. Number of True values: {robot_count_parquet}")
194
- if 0 < robot_count_parquet < 10:
195
- print(f"Sample 'has_robot' models (from parquet): {df[df['has_robot']]['id'].head().tolist()}")
196
- else:
197
- print("DIAGNOSTIC (App - Parquet Load): 'has_robot' column NOT FOUND.")
198
- # --- End Diagnostic ---
199
-
200
- msg = f"Successfully loaded pre-processed data in {elapsed:.2f}s. Shape: {df.shape}"
201
- print(msg)
202
- return df, True, msg
203
- except Exception as e:
204
- print(f"Could not load pre-processed Parquet: {e}. ")
205
- if force_refresh: print("Proceeding to fetch fresh data as force_refresh=True.")
206
- else:
207
- err_msg = (f"Pre-processed data could not be loaded: {e}. "
208
- "Please use 'Refresh Data from Hugging Face' button.")
209
- return pd.DataFrame(), False, err_msg
210
-
211
- df_raw = None
212
- raw_data_source_msg = ""
213
- if force_refresh:
214
- print("force_refresh=True (Gradio). Fetching fresh data...")
215
- fetch_start = time.time()
216
- try:
217
- query = f"SELECT * FROM read_parquet('{HF_PARQUET_URL}')" # Ensure HF_PARQUET_URL is defined
218
- df_raw = duckdb.sql(query).df()
219
- if df_raw is None or df_raw.empty: raise ValueError("Fetched data is empty or None.")
220
- raw_data_source_msg = f"Fetched by Gradio in {time.time() - fetch_start:.2f}s. Rows: {len(df_raw)}"
221
- print(raw_data_source_msg)
222
- except Exception as e_hf:
223
- return pd.DataFrame(), False, f"Fatal error fetching from Hugging Face (Gradio): {e_hf}"
224
- else:
225
- err_msg = (f"Pre-processed data '{PROCESSED_PARQUET_FILE_PATH}' not found/invalid. "
226
- "Run preprocessor or use 'Refresh Data' button.")
227
- return pd.DataFrame(), False, err_msg
228
 
229
- print(f"Initiating processing for data newly fetched by Gradio. {raw_data_source_msg}")
230
- df = pd.DataFrame()
231
- proc_start = time.time()
232
-
233
- core_cols = {'id': str, 'downloads': float, 'downloadsAllTime': float, 'likes': float,
234
- 'pipeline_tag': str, 'tags': object, 'safetensors': object}
235
- for col, dtype in core_cols.items():
236
- if col in df_raw.columns:
237
- df[col] = df_raw[col]
238
- if dtype == float: df[col] = pd.to_numeric(df[col], errors='coerce').fillna(0.0)
239
- elif dtype == str: df[col] = df[col].astype(str).fillna('')
 
 
 
 
 
 
 
240
  else:
241
- if col in ['downloads', 'downloadsAllTime', 'likes']: df[col] = 0.0
242
- elif col == 'pipeline_tag': df[col] = ''
243
- elif col == 'tags': df[col] = pd.Series([[] for _ in range(len(df_raw))])
244
- elif col == 'safetensors': df[col] = None
245
- elif col == 'id': return pd.DataFrame(), False, "Critical: 'id' column missing."
246
-
247
- output_filesize_col_name = 'params'
248
- if output_filesize_col_name in df_raw.columns and pd.api.types.is_numeric_dtype(df_raw[output_filesize_col_name]):
249
- df[output_filesize_col_name] = pd.to_numeric(df_raw[output_filesize_col_name], errors='coerce').fillna(0.0)
250
- elif 'safetensors' in df.columns:
251
- safetensors_iter = df['safetensors']
252
- if tqdm_cls != tqdm :
253
- safetensors_iter = tqdm_cls(df['safetensors'], desc="Extracting model sizes (GB)")
254
- df[output_filesize_col_name] = [extract_model_size(s) for s in safetensors_iter]
255
- df[output_filesize_col_name] = pd.to_numeric(df[output_filesize_col_name], errors='coerce').fillna(0.0)
256
- else:
257
- df[output_filesize_col_name] = 0.0
258
-
259
- def get_size_category_gradio(size_gb_val):
260
- try: numeric_size_gb = float(size_gb_val)
261
- except (ValueError, TypeError): numeric_size_gb = 0.0
262
- if pd.isna(numeric_size_gb): numeric_size_gb = 0.0
263
- if 0 <= numeric_size_gb < 1: return "Small (<1GB)"
264
- elif 1 <= numeric_size_gb < 5: return "Medium (1-5GB)"
265
- elif 5 <= numeric_size_gb < 20: return "Large (5-20GB)"
266
- elif 20 <= numeric_size_gb < 50: return "X-Large (20-50GB)"
267
- elif numeric_size_gb >= 50: return "XX-Large (>50GB)"
268
- else: return "Small (<1GB)"
269
- df['size_category'] = df[output_filesize_col_name].apply(get_size_category_gradio)
270
-
271
- df['tags'] = process_tags_for_series(df['tags'])
272
- df['temp_tags_joined'] = df['tags'].apply(
273
- lambda tl: '~~~'.join(str(t).lower() for t in tl if pd.notna(t) and str(t).strip()) if isinstance(tl, list) else ''
274
- )
275
- tag_map = {
276
- 'has_audio': ['audio'], 'has_speech': ['speech'], 'has_music': ['music'],
277
- 'has_robot': ['robot', 'robotics'],
278
- 'has_bio': ['bio'], 'has_med': ['medic', 'medical'],
279
- 'has_series': ['series', 'time-series', 'timeseries'],
280
- 'has_video': ['video'], 'has_image': ['image', 'vision'],
281
- 'has_text': ['text', 'nlp', 'llm']
282
- }
283
- for col, kws in tag_map.items():
284
- pattern = '|'.join(kws)
285
- df[col] = df['temp_tags_joined'].str.contains(pattern, na=False, case=False, regex=True)
286
- df['has_science'] = (
287
- df['temp_tags_joined'].str.contains('science', na=False, case=False, regex=True) &
288
- ~df['temp_tags_joined'].str.contains('bigscience', na=False, case=False, regex=True)
289
- )
290
- del df['temp_tags_joined']
291
- df['is_audio_speech'] = (df['has_audio'] | df['has_speech'] |
292
- df['pipeline_tag'].str.contains('audio|speech', case=False, na=False, regex=True))
293
- df['is_biomed'] = df['has_bio'] | df['has_med']
294
- df['organization'] = df['id'].apply(extract_org_from_id)
295
-
296
- if 'safetensors' in df.columns and \
297
- not (output_filesize_col_name in df_raw.columns and pd.api.types.is_numeric_dtype(df_raw[output_filesize_col_name])):
298
- df = df.drop(columns=['safetensors'], errors='ignore')
299
-
300
- # --- Diagnostic for 'has_robot' after app-side processing (force_refresh path) ---
301
- if force_refresh and 'has_robot' in df.columns:
302
- robot_count_app_proc = df['has_robot'].sum()
303
- print(f"DIAGNOSTIC (App - Force Refresh Processing): 'has_robot' column processed. Number of True values: {robot_count_app_proc}")
304
- if 0 < robot_count_app_proc < 10:
305
- print(f"Sample 'has_robot' models (App processed): {df[df['has_robot']]['id'].head().tolist()}")
306
- # --- End Diagnostic ---
307
-
308
- print(f"Data processing by Gradio completed in {time.time() - proc_start:.2f}s.")
309
-
310
- total_elapsed = time.time() - overall_start_time
311
- final_msg = f"{raw_data_source_msg}. Processing by Gradio took {time.time() - proc_start:.2f}s. Total: {total_elapsed:.2f}s. Shape: {df.shape}"
312
- print(final_msg)
313
- return df, True, final_msg
314
 
315
 
316
  def make_treemap_data(df, count_by, top_k=25, tag_filter=None, pipeline_filter=None, size_filter=None, skip_orgs=None):
@@ -320,49 +98,48 @@ def make_treemap_data(df, count_by, top_k=25, tag_filter=None, pipeline_filter=N
320
  "Biomedical": "is_biomed", "Time series": "has_series", "Sciences": "has_science",
321
  "Video": "has_video", "Images": "has_image", "Text": "has_text"}
322
 
323
- # --- Diagnostic within make_treemap_data ---
324
- if 'has_robot' in filtered_df.columns:
325
- initial_robot_count = filtered_df['has_robot'].sum()
326
- print(f"DIAGNOSTIC (make_treemap_data entry): Input df has {initial_robot_count} 'has_robot' models.")
327
- else:
328
- print("DIAGNOSTIC (make_treemap_data entry): 'has_robot' column NOT in input df.")
329
- # --- End Diagnostic ---
330
-
331
  if tag_filter and tag_filter in col_map:
332
  target_col = col_map[tag_filter]
333
  if target_col in filtered_df.columns:
334
- # --- Diagnostic for specific 'Robotics' filter application ---
335
- if tag_filter == "Robotics":
336
- count_before_robot_filter = filtered_df[target_col].sum()
337
- print(f"DIAGNOSTIC (make_treemap_data): Applying 'Robotics' filter. Models with '{target_col}'=True before this filter step: {count_before_robot_filter}")
338
- # --- End Diagnostic ---
339
  filtered_df = filtered_df[filtered_df[target_col]]
340
- if tag_filter == "Robotics":
341
- print(f"DIAGNOSTIC (make_treemap_data): After 'Robotics' filter ({target_col}), df rows: {len(filtered_df)}")
342
  else:
343
  print(f"Warning: Tag filter column '{col_map[tag_filter]}' not found in DataFrame.")
 
344
  if pipeline_filter:
345
  if "pipeline_tag" in filtered_df.columns:
346
- filtered_df = filtered_df[filtered_df["pipeline_tag"] == pipeline_filter]
 
347
  else:
348
  print(f"Warning: 'pipeline_tag' column not found for filtering.")
 
349
  if size_filter and size_filter != "None" and size_filter in MODEL_SIZE_RANGES.keys():
350
  if 'size_category' in filtered_df.columns:
351
  filtered_df = filtered_df[filtered_df['size_category'] == size_filter]
352
  else:
353
  print("Warning: 'size_category' column not found for filtering.")
 
354
  if skip_orgs and len(skip_orgs) > 0:
355
  if "organization" in filtered_df.columns:
356
  filtered_df = filtered_df[~filtered_df["organization"].isin(skip_orgs)]
357
  else:
358
  print("Warning: 'organization' column not found for filtering.")
 
359
  if filtered_df.empty: return pd.DataFrame()
360
- if count_by not in filtered_df.columns or not pd.api.types.is_numeric_dtype(filtered_df[count_by]):
361
- filtered_df[count_by] = pd.to_numeric(filtered_df.get(count_by), errors="coerce").fillna(0.0)
 
 
 
 
 
 
362
  org_totals = filtered_df.groupby("organization")[count_by].sum().nlargest(top_k, keep='first')
363
  top_orgs_list = org_totals.index.tolist()
 
 
364
  treemap_data = filtered_df[filtered_df["organization"].isin(top_orgs_list)][["id", "organization", count_by]].copy()
365
  treemap_data["root"] = "models"
 
366
  treemap_data[count_by] = pd.to_numeric(treemap_data[count_by], errors="coerce").fillna(0.0)
367
  return treemap_data
368
 
@@ -395,7 +172,7 @@ with gr.Blocks(title="HuggingFace Model Explorer", fill_width=True) as demo:
395
  top_k_slider = gr.Slider(label="Number of Top Organizations", minimum=5, maximum=50, value=25, step=5)
396
  skip_orgs_textbox = gr.Textbox(label="Organizations to Skip (comma-separated)", value="TheBloke,MaziyarPanahi,unsloth,modularai,Gensyn,bartowski")
397
  generate_plot_button = gr.Button(value="Generate Plot", variant="primary", interactive=False)
398
- refresh_data_button = gr.Button(value="Refresh Data from Hugging Face", variant="secondary")
399
  with gr.Column(scale=3):
400
  plot_output = gr.Plot()
401
  status_message_md = gr.Markdown("Initializing...")
@@ -409,28 +186,30 @@ with gr.Blocks(title="HuggingFace Model Explorer", fill_width=True) as demo:
409
  return gr.update(visible=choice == "Tag Filter"), gr.update(visible=choice == "Pipeline Filter")
410
  filter_choice_radio.change(fn=_toggle_filters_visibility, inputs=filter_choice_radio, outputs=[tag_filter_dropdown, pipeline_filter_dropdown])
411
 
412
- def ui_load_data_controller(force_refresh_ui_trigger=False, progress=gr.Progress(track_tqdm=True)):
413
- print(f"ui_load_data_controller called with force_refresh_ui_trigger={force_refresh_ui_trigger}")
 
414
  status_msg_ui = "Loading data..."
415
  data_info_text = ""
416
  current_df = pd.DataFrame()
417
  load_success_flag = False
418
  data_as_of_date_display = "N/A"
419
  try:
420
- current_df, load_success_flag, status_msg_from_load = load_models_data(
421
- force_refresh=force_refresh_ui_trigger, tqdm_cls=progress.tqdm
422
- )
423
  if load_success_flag:
424
- if force_refresh_ui_trigger:
425
- data_as_of_date_display = pd.Timestamp.now(tz='UTC').strftime('%B %d, %Y, %H:%M:%S %Z')
426
- elif 'data_download_timestamp' in current_df.columns and not current_df.empty and pd.notna(current_df['data_download_timestamp'].iloc[0]):
427
  timestamp_from_parquet = pd.to_datetime(current_df['data_download_timestamp'].iloc[0])
 
428
  if timestamp_from_parquet.tzinfo is None:
429
  timestamp_from_parquet = timestamp_from_parquet.tz_localize('UTC')
430
  data_as_of_date_display = timestamp_from_parquet.strftime('%B %d, %Y, %H:%M:%S %Z')
431
  else:
432
  data_as_of_date_display = "Pre-processed (date unavailable)"
433
 
 
434
  size_dist_lines = []
435
  if 'size_category' in current_df.columns:
436
  for cat in MODEL_SIZE_RANGES.keys():
@@ -440,22 +219,12 @@ with gr.Blocks(title="HuggingFace Model Explorer", fill_width=True) as demo:
440
  size_dist = "\n".join(size_dist_lines)
441
 
442
  data_info_text = (f"### Data Information\n"
 
443
  f"- Overall Status: {status_msg_from_load}\n"
444
  f"- Total models loaded: {len(current_df):,}\n"
445
  f"- Data as of: {data_as_of_date_display}\n"
446
  f"- Size categories:\n{size_dist}")
447
 
448
- # # --- MODIFICATION: Add 'has_robot' count to UI data_info_text ---
449
- # if not current_df.empty and 'has_robot' in current_df.columns:
450
- # robot_true_count = current_df['has_robot'].sum()
451
- # data_info_text += f"\n- **Models flagged 'has_robot'**: {robot_true_count}"
452
- # if 0 < robot_true_count <= 10: # If a few are found, list some IDs
453
- # sample_robot_ids = current_df[current_df['has_robot']]['id'].head(5).tolist()
454
- # data_info_text += f"\n - Sample 'has_robot' model IDs: `{', '.join(sample_robot_ids)}`"
455
- # elif not current_df.empty:
456
- # data_info_text += "\n- **Models flagged 'has_robot'**: 'has_robot' column not found in loaded data."
457
- # # --- END MODIFICATION ---
458
-
459
  status_msg_ui = "Data loaded successfully. Ready to generate plot."
460
  else:
461
  data_info_text = f"### Data Load Failed\n- {status_msg_from_load}"
@@ -468,46 +237,45 @@ with gr.Blocks(title="HuggingFace Model Explorer", fill_width=True) as demo:
468
  return current_df, load_success_flag, data_info_text, status_msg_ui
469
 
470
  def ui_generate_plot_controller(metric_choice, filter_type, tag_choice, pipeline_choice,
471
- size_choice, k_orgs, skip_orgs_input, df_current_models):
472
  if df_current_models is None or df_current_models.empty:
473
  empty_fig = create_treemap(pd.DataFrame(), metric_choice, "Error: Model Data Not Loaded")
474
- error_msg = "Model data is not loaded or is empty. Please load or refresh data first."
475
  gr.Warning(error_msg)
476
  return empty_fig, error_msg
 
 
 
477
  tag_to_use = tag_choice if filter_type == "Tag Filter" else None
478
  pipeline_to_use = pipeline_choice if filter_type == "Pipeline Filter" else None
479
  size_to_use = size_choice if size_choice != "None" else None
480
  orgs_to_skip = [org.strip() for org in skip_orgs_input.split(',') if org.strip()] if skip_orgs_input else []
481
 
482
- # --- Diagnostic before calling make_treemap_data ---
483
- if 'has_robot' in df_current_models.columns:
484
- robot_count_before_treemap = df_current_models['has_robot'].sum()
485
- print(f"DIAGNOSTIC (ui_generate_plot_controller): df_current_models entering make_treemap_data has {robot_count_before_treemap} 'has_robot' models.")
486
- # --- End Diagnostic ---
487
 
488
  treemap_df = make_treemap_data(df_current_models, metric_choice, k_orgs, tag_to_use, pipeline_to_use, size_to_use, orgs_to_skip)
489
 
 
 
490
  title_labels = {"downloads": "Downloads (last 30 days)", "downloadsAllTime": "Downloads (All Time)", "likes": "Likes"}
491
  chart_title = f"HuggingFace Models - {title_labels.get(metric_choice, metric_choice)} by Organization"
492
  plotly_fig = create_treemap(treemap_df, metric_choice, chart_title)
 
493
  if treemap_df.empty:
494
  plot_stats_md = "No data matches the selected filters. Try adjusting your filters."
495
  else:
496
  total_items_in_plot = len(treemap_df['id'].unique())
497
  total_value_in_plot = treemap_df[metric_choice].sum()
498
  plot_stats_md = (f"## Plot Statistics\n- **Models shown**: {total_items_in_plot:,}\n- **Total {metric_choice}**: {int(total_value_in_plot):,}")
 
499
  return plotly_fig, plot_stats_md
500
 
 
501
  demo.load(
502
- fn=lambda progress=gr.Progress(track_tqdm=True): ui_load_data_controller(force_refresh_ui_trigger=False, progress=progress),
503
  inputs=[],
504
  outputs=[models_data_state, loading_complete_state, data_info_md, status_message_md]
505
  )
506
- refresh_data_button.click(
507
- fn=lambda progress=gr.Progress(track_tqdm=True): ui_load_data_controller(force_refresh_ui_trigger=True, progress=progress),
508
- inputs=[],
509
- outputs=[models_data_state, loading_complete_state, data_info_md, status_message_md]
510
- )
511
  generate_plot_button.click(
512
  fn=ui_generate_plot_controller,
513
  inputs=[count_by_dropdown, filter_choice_radio, tag_filter_dropdown, pipeline_filter_dropdown,
@@ -516,11 +284,8 @@ with gr.Blocks(title="HuggingFace Model Explorer", fill_width=True) as demo:
516
  )
517
 
518
  if __name__ == "__main__":
519
- if not os.path.exists(PROCESSED_PARQUET_FILE_PATH):
520
- print(f"WARNING: Pre-processed data file '{PROCESSED_PARQUET_FILE_PATH}' not found.")
521
- print("It is highly recommended to run the preprocessing script (e.g., preprocess.py) first.") # Corrected script name
522
- else:
523
- print(f"Found pre-processed data file: '{PROCESSED_PARQUET_FILE_PATH}'.")
524
- demo.launch()
525
 
526
  # --- END OF FILE app.py ---
 
1
  # --- START OF FILE app.py ---
2
 
 
3
  import gradio as gr
4
  import pandas as pd
5
  import plotly.express as px
 
 
 
 
6
  import time
7
+ from datasets import load_dataset # Import the datasets library
8
 
9
  # --- Constants ---
10
  MODEL_SIZE_RANGES = {
11
  "Small (<1GB)": (0, 1), "Medium (1-5GB)": (1, 5), "Large (5-20GB)": (5, 20),
12
  "X-Large (20-50GB)": (20, 50), "XX-Large (>50GB)": (50, float('inf'))
13
  }
14
+
15
+ # The Hugging Face dataset ID to load.
16
+ HF_DATASET_ID = "evijit/orgstats_daily_data"
17
 
18
  TAG_FILTER_CHOICES = [
19
  "Audio & Speech", "Time series", "Robotics", "Music", "Video", "Images",
 
34
  'table-question-answering',
35
  ]
36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
 
38
+ def load_models_data():
39
+ """
40
+ Loads the pre-processed models data using the HF datasets library.
41
+ """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  overall_start_time = time.time()
43
+ print(f"Attempting to load dataset from Hugging Face Hub: {HF_DATASET_ID}")
44
 
45
+ # These are the columns expected to be in the pre-processed dataset.
46
+ expected_cols = [
47
  'id', 'downloads', 'downloadsAllTime', 'likes', 'pipeline_tag', 'tags', 'params',
48
  'size_category', 'organization', 'has_audio', 'has_speech', 'has_music',
49
  'has_robot', 'has_bio', 'has_med', 'has_series', 'has_video', 'has_image',
 
51
  'data_download_timestamp'
52
  ]
53
 
54
+ try:
55
+ # Load the dataset using the datasets library
56
+ # It will be cached locally after the first run.
57
+ dataset_dict = load_dataset(HF_DATASET_ID)
58
+
59
+ if not dataset_dict:
60
+ raise ValueError(f"Dataset '{HF_DATASET_ID}' loaded but appears empty.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
+ # Get the name of the first split (e.g., 'train')
63
+ split_name = list(dataset_dict.keys())[0]
64
+ print(f"Using dataset split: '{split_name}'. Converting to Pandas.")
65
+
66
+ # Convert the dataset object to a Pandas DataFrame
67
+ df = dataset_dict[split_name].to_pandas()
68
+
69
+ elapsed = time.time() - overall_start_time
70
+
71
+ # Validate that the loaded data has the columns we expect.
72
+ missing_cols = [col for col in expected_cols if col not in df.columns]
73
+ if missing_cols:
74
+ raise ValueError(f"Loaded dataset is missing expected columns: {missing_cols}.")
75
+
76
+ # --- Diagnostic for 'has_robot' after loading ---
77
+ if 'has_robot' in df.columns:
78
+ robot_count = df['has_robot'].sum()
79
+ print(f"DIAGNOSTIC (Dataset Load): 'has_robot' column found. Number of True values: {robot_count}")
80
  else:
81
+ print("DIAGNOSTIC (Dataset Load): 'has_robot' column NOT FOUND.")
82
+ # --- End Diagnostic ---
83
+
84
+ msg = f"Successfully loaded dataset '{HF_DATASET_ID}' (split: {split_name}) from HF Hub in {elapsed:.2f}s. Shape: {df.shape}"
85
+ print(msg)
86
+ return df, True, msg
87
+
88
+ except Exception as e:
89
+ err_msg = f"Failed to load dataset '{HF_DATASET_ID}' from Hugging Face Hub. Error: {e}"
90
+ print(err_msg)
91
+ return pd.DataFrame(), False, err_msg
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
 
93
 
94
  def make_treemap_data(df, count_by, top_k=25, tag_filter=None, pipeline_filter=None, size_filter=None, skip_orgs=None):
 
98
  "Biomedical": "is_biomed", "Time series": "has_series", "Sciences": "has_science",
99
  "Video": "has_video", "Images": "has_image", "Text": "has_text"}
100
 
 
 
 
 
 
 
 
 
101
  if tag_filter and tag_filter in col_map:
102
  target_col = col_map[tag_filter]
103
  if target_col in filtered_df.columns:
 
 
 
 
 
104
  filtered_df = filtered_df[filtered_df[target_col]]
 
 
105
  else:
106
  print(f"Warning: Tag filter column '{col_map[tag_filter]}' not found in DataFrame.")
107
+
108
  if pipeline_filter:
109
  if "pipeline_tag" in filtered_df.columns:
110
+ # Ensure the comparison works even if pipeline_tag has NaNs or mixed types
111
+ filtered_df = filtered_df[filtered_df["pipeline_tag"].astype(str) == pipeline_filter]
112
  else:
113
  print(f"Warning: 'pipeline_tag' column not found for filtering.")
114
+
115
  if size_filter and size_filter != "None" and size_filter in MODEL_SIZE_RANGES.keys():
116
  if 'size_category' in filtered_df.columns:
117
  filtered_df = filtered_df[filtered_df['size_category'] == size_filter]
118
  else:
119
  print("Warning: 'size_category' column not found for filtering.")
120
+
121
  if skip_orgs and len(skip_orgs) > 0:
122
  if "organization" in filtered_df.columns:
123
  filtered_df = filtered_df[~filtered_df["organization"].isin(skip_orgs)]
124
  else:
125
  print("Warning: 'organization' column not found for filtering.")
126
+
127
  if filtered_df.empty: return pd.DataFrame()
128
+
129
+ # Ensure the metric column is numeric and handle potential missing values
130
+ if count_by not in filtered_df.columns:
131
+ print(f"Warning: Metric column '{count_by}' not found. Using 0.")
132
+ filtered_df[count_by] = 0.0
133
+ filtered_df[count_by] = pd.to_numeric(filtered_df[count_by], errors="coerce").fillna(0.0)
134
+
135
+ # Group and get top organizations
136
  org_totals = filtered_df.groupby("organization")[count_by].sum().nlargest(top_k, keep='first')
137
  top_orgs_list = org_totals.index.tolist()
138
+
139
+ # Prepare data for treemap
140
  treemap_data = filtered_df[filtered_df["organization"].isin(top_orgs_list)][["id", "organization", count_by]].copy()
141
  treemap_data["root"] = "models"
142
+ # Ensure numeric again for the final slice
143
  treemap_data[count_by] = pd.to_numeric(treemap_data[count_by], errors="coerce").fillna(0.0)
144
  return treemap_data
145
 
 
172
  top_k_slider = gr.Slider(label="Number of Top Organizations", minimum=5, maximum=50, value=25, step=5)
173
  skip_orgs_textbox = gr.Textbox(label="Organizations to Skip (comma-separated)", value="TheBloke,MaziyarPanahi,unsloth,modularai,Gensyn,bartowski")
174
  generate_plot_button = gr.Button(value="Generate Plot", variant="primary", interactive=False)
175
+
176
  with gr.Column(scale=3):
177
  plot_output = gr.Plot()
178
  status_message_md = gr.Markdown("Initializing...")
 
186
  return gr.update(visible=choice == "Tag Filter"), gr.update(visible=choice == "Pipeline Filter")
187
  filter_choice_radio.change(fn=_toggle_filters_visibility, inputs=filter_choice_radio, outputs=[tag_filter_dropdown, pipeline_filter_dropdown])
188
 
189
+ def ui_load_data_controller(progress=gr.Progress()):
190
+ progress(0, desc=f"Loading dataset '{HF_DATASET_ID}' from Hugging Face Hub...")
191
+ print("ui_load_data_controller called.")
192
  status_msg_ui = "Loading data..."
193
  data_info_text = ""
194
  current_df = pd.DataFrame()
195
  load_success_flag = False
196
  data_as_of_date_display = "N/A"
197
  try:
198
+ # Call the load function that uses the datasets library.
199
+ current_df, load_success_flag, status_msg_from_load = load_models_data()
 
200
  if load_success_flag:
201
+ progress(0.9, desc="Processing loaded data...")
202
+ # Get the data timestamp from the loaded file
203
+ if 'data_download_timestamp' in current_df.columns and not current_df.empty and pd.notna(current_df['data_download_timestamp'].iloc[0]):
204
  timestamp_from_parquet = pd.to_datetime(current_df['data_download_timestamp'].iloc[0])
205
+ # Ensure the timestamp is timezone-aware for consistent formatting
206
  if timestamp_from_parquet.tzinfo is None:
207
  timestamp_from_parquet = timestamp_from_parquet.tz_localize('UTC')
208
  data_as_of_date_display = timestamp_from_parquet.strftime('%B %d, %Y, %H:%M:%S %Z')
209
  else:
210
  data_as_of_date_display = "Pre-processed (date unavailable)"
211
 
212
+ # Create summary text for the UI
213
  size_dist_lines = []
214
  if 'size_category' in current_df.columns:
215
  for cat in MODEL_SIZE_RANGES.keys():
 
219
  size_dist = "\n".join(size_dist_lines)
220
 
221
  data_info_text = (f"### Data Information\n"
222
+ f"- Source: `{HF_DATASET_ID}`\n"
223
  f"- Overall Status: {status_msg_from_load}\n"
224
  f"- Total models loaded: {len(current_df):,}\n"
225
  f"- Data as of: {data_as_of_date_display}\n"
226
  f"- Size categories:\n{size_dist}")
227
 
 
 
 
 
 
 
 
 
 
 
 
228
  status_msg_ui = "Data loaded successfully. Ready to generate plot."
229
  else:
230
  data_info_text = f"### Data Load Failed\n- {status_msg_from_load}"
 
237
  return current_df, load_success_flag, data_info_text, status_msg_ui
238
 
239
  def ui_generate_plot_controller(metric_choice, filter_type, tag_choice, pipeline_choice,
240
+ size_choice, k_orgs, skip_orgs_input, df_current_models, progress=gr.Progress()):
241
  if df_current_models is None or df_current_models.empty:
242
  empty_fig = create_treemap(pd.DataFrame(), metric_choice, "Error: Model Data Not Loaded")
243
+ error_msg = "Model data is not loaded or is empty. Please wait for data to load."
244
  gr.Warning(error_msg)
245
  return empty_fig, error_msg
246
+
247
+ progress(0.1, desc="Preparing data for visualization...")
248
+
249
  tag_to_use = tag_choice if filter_type == "Tag Filter" else None
250
  pipeline_to_use = pipeline_choice if filter_type == "Pipeline Filter" else None
251
  size_to_use = size_choice if size_choice != "None" else None
252
  orgs_to_skip = [org.strip() for org in skip_orgs_input.split(',') if org.strip()] if skip_orgs_input else []
253
 
 
 
 
 
 
254
 
255
  treemap_df = make_treemap_data(df_current_models, metric_choice, k_orgs, tag_to_use, pipeline_to_use, size_to_use, orgs_to_skip)
256
 
257
+ progress(0.7, desc="Generating Plotly visualization...")
258
+
259
  title_labels = {"downloads": "Downloads (last 30 days)", "downloadsAllTime": "Downloads (All Time)", "likes": "Likes"}
260
  chart_title = f"HuggingFace Models - {title_labels.get(metric_choice, metric_choice)} by Organization"
261
  plotly_fig = create_treemap(treemap_df, metric_choice, chart_title)
262
+
263
  if treemap_df.empty:
264
  plot_stats_md = "No data matches the selected filters. Try adjusting your filters."
265
  else:
266
  total_items_in_plot = len(treemap_df['id'].unique())
267
  total_value_in_plot = treemap_df[metric_choice].sum()
268
  plot_stats_md = (f"## Plot Statistics\n- **Models shown**: {total_items_in_plot:,}\n- **Total {metric_choice}**: {int(total_value_in_plot):,}")
269
+
270
  return plotly_fig, plot_stats_md
271
 
272
+ # On app load, call the controller to fetch data using the datasets library.
273
  demo.load(
274
+ fn=ui_load_data_controller,
275
  inputs=[],
276
  outputs=[models_data_state, loading_complete_state, data_info_md, status_message_md]
277
  )
278
+
 
 
 
 
279
  generate_plot_button.click(
280
  fn=ui_generate_plot_controller,
281
  inputs=[count_by_dropdown, filter_choice_radio, tag_filter_dropdown, pipeline_filter_dropdown,
 
284
  )
285
 
286
  if __name__ == "__main__":
287
+ print(f"Application starting. Data will be loaded from Hugging Face dataset: {HF_DATASET_ID}")
288
+ # Increase the queue size for potentially busy traffic if hosted
289
+ demo.queue().launch()
 
 
 
290
 
291
  # --- END OF FILE app.py ---