evijit HF Staff commited on
Commit
4517d15
·
verified ·
1 Parent(s): 97da54a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +82 -171
app.py CHANGED
@@ -1,4 +1,4 @@
1
- # --- START OF MODIFIED FILE app.py ---
2
 
3
  import gradio as gr
4
  import pandas as pd
@@ -7,158 +7,81 @@ import time
7
  from datasets import load_dataset # Import the datasets library
8
 
9
  # --- Constants ---
10
- # REMOVED the old MODEL_SIZE_RANGES dictionary.
11
- # NEW: Define the discrete steps for the parameter range slider.
12
  PARAM_CHOICES = ['< 1B', '1B', '5B', '12B', '32B', '64B', '128B', '256B', '> 500B']
13
- PARAM_CHOICES_DEFAULT = [PARAM_CHOICES[0], PARAM_CHOICES[-1]]
 
14
 
15
- # The Hugging Face dataset ID to load.
16
  HF_DATASET_ID = "evijit/orgstats_daily_data"
17
-
18
- TAG_FILTER_CHOICES = [
19
- "Audio & Speech", "Time series", "Robotics", "Music", "Video", "Images",
20
- "Text", "Biomedical", "Sciences"
21
- ]
22
-
23
- PIPELINE_TAGS = [
24
- 'text-generation', 'text-to-image', 'text-classification', 'text2text-generation',
25
- 'audio-to-audio', 'feature-extraction', 'image-classification', 'translation',
26
- 'reinforcement-learning', 'fill-mask', 'text-to-speech', 'automatic-speech-recognition',
27
- 'image-text-to-text', 'token-classification', 'sentence-similarity', 'question-answering',
28
- 'image-feature-extraction', 'summarization', 'zero-shot-image-classification',
29
- 'object-detection', 'image-segmentation', 'image-to-image', 'image-to-text',
30
- 'audio-classification', 'visual-question-answering', 'text-to-video',
31
- 'zero-shot-classification', 'depth-estimation', 'text-ranking', 'image-to-video',
32
- 'multiple-choice', 'unconditional-image-generation', 'video-classification',
33
- 'text-to-audio', 'time-series-forecasting', 'any-to-any', 'video-text-to-text',
34
- 'table-question-answering',
35
- ]
36
-
37
 
38
  def load_models_data():
39
- """
40
- Loads the pre-processed models data using the HF datasets library.
41
- """
42
  overall_start_time = time.time()
43
  print(f"Attempting to load dataset from Hugging Face Hub: {HF_DATASET_ID}")
44
-
45
- expected_cols = [
46
- 'id', 'downloads', 'downloadsAllTime', 'likes', 'pipeline_tag', 'tags', 'params',
47
- 'organization', 'has_audio', 'has_speech', 'has_music',
48
- 'has_robot', 'has_bio', 'has_med', 'has_series', 'has_video', 'has_image',
49
- 'has_text', 'has_science', 'is_audio_speech', 'is_biomed',
50
- 'data_download_timestamp'
51
- ]
52
-
53
  try:
54
  dataset_dict = load_dataset(HF_DATASET_ID)
55
-
56
- if not dataset_dict:
57
- raise ValueError(f"Dataset '{HF_DATASET_ID}' loaded but appears empty.")
58
-
59
  split_name = list(dataset_dict.keys())[0]
60
  print(f"Using dataset split: '{split_name}'. Converting to Pandas.")
61
  df = dataset_dict[split_name].to_pandas()
62
  elapsed = time.time() - overall_start_time
63
-
64
  missing_cols = [col for col in expected_cols if col not in df.columns]
65
  if missing_cols:
66
- # The 'params' column is crucial for the new slider.
67
- if 'params' in missing_cols:
68
- raise ValueError(f"FATAL: Loaded dataset is missing the crucial 'params' column.")
69
  print(f"Warning: Loaded dataset is missing some expected columns: {missing_cols}.")
70
-
71
- # Ensure 'params' column is numeric, coercing errors to NaN and then filling with 0.
72
- # This is important for filtering. Assumes params are in billions.
73
  if 'params' in df.columns:
74
  df['params'] = pd.to_numeric(df['params'], errors='coerce').fillna(0)
75
  else:
76
- # If 'params' is missing after all, create a dummy column to prevent crashes.
77
  df['params'] = 0
78
  print("CRITICAL WARNING: 'params' column not found in data. Parameter filtering will not work.")
79
-
80
  msg = f"Successfully loaded dataset '{HF_DATASET_ID}' (split: {split_name}) from HF Hub in {elapsed:.2f}s. Shape: {df.shape}"
81
  print(msg)
82
  return df, True, msg
83
-
84
  except Exception as e:
85
  err_msg = f"Failed to load dataset '{HF_DATASET_ID}' from Hugging Face Hub. Error: {e}"
86
  print(err_msg)
87
  return pd.DataFrame(), False, err_msg
88
 
89
- # --- NEW: Helper function to parse slider labels into numerical values ---
90
  def get_param_range_values(param_range_labels):
91
- """Converts a list of two string labels from the slider into a numerical min/max tuple."""
92
- if not param_range_labels or len(param_range_labels) != 2:
93
- return None, None
94
-
95
  min_label, max_label = param_range_labels
96
-
97
- # Min value logic: '< 1B' becomes 0, otherwise parse the number.
98
  min_val = 0.0 if '<' in min_label else float(min_label.replace('B', ''))
99
-
100
- # Max value logic: '> 500B' becomes infinity, otherwise parse the number.
101
  max_val = float('inf') if '>' in max_label else float(max_label.replace('B', ''))
102
-
103
  return min_val, max_val
104
 
105
- # --- MODIFIED: Function signature and filtering logic updated for parameter range ---
106
  def make_treemap_data(df, count_by, top_k=25, tag_filter=None, pipeline_filter=None, param_range=None, skip_orgs=None):
107
  if df is None or df.empty: return pd.DataFrame()
108
  filtered_df = df.copy()
109
- col_map = { "Audio & Speech": "is_audio_speech", "Music": "has_music", "Robotics": "has_robot",
110
- "Biomedical": "is_biomed", "Time series": "has_series", "Sciences": "has_science",
111
- "Video": "has_video", "Images": "has_image", "Text": "has_text"}
112
-
113
  if tag_filter and tag_filter in col_map:
114
  target_col = col_map[tag_filter]
115
- if target_col in filtered_df.columns:
116
- filtered_df = filtered_df[filtered_df[target_col]]
117
- else:
118
- print(f"Warning: Tag filter column '{col_map[tag_filter]}' not found in DataFrame.")
119
-
120
  if pipeline_filter:
121
- if "pipeline_tag" in filtered_df.columns:
122
- filtered_df = filtered_df[filtered_df["pipeline_tag"].astype(str) == pipeline_filter]
123
- else:
124
- print(f"Warning: 'pipeline_tag' column not found for filtering.")
125
-
126
- # --- MODIFIED: Filtering logic now uses the numerical parameter range ---
127
  if param_range:
128
  min_params, max_params = get_param_range_values(param_range)
129
- is_default_range = (param_range == PARAM_CHOICES_DEFAULT)
130
-
131
- # Only filter if the range is not the default full range
132
  if not is_default_range and 'params' in filtered_df.columns:
133
- # The 'params' column is in billions, so the values match our slider
134
- if min_params is not None:
135
- filtered_df = filtered_df[filtered_df['params'] >= min_params]
136
- if max_params is not None and max_params != float('inf'):
137
- # The upper bound is exclusive, e.g., 5B to 64B is [5, 64)
138
- filtered_df = filtered_df[filtered_df['params'] < max_params]
139
- elif 'params' not in filtered_df.columns:
140
- print("Warning: 'params' column not found for filtering.")
141
-
142
-
143
  if skip_orgs and len(skip_orgs) > 0:
144
- if "organization" in filtered_df.columns:
145
- filtered_df = filtered_df[~filtered_df["organization"].isin(skip_orgs)]
146
- else:
147
- print("Warning: 'organization' column not found for filtering.")
148
-
149
  if filtered_df.empty: return pd.DataFrame()
150
-
151
  if count_by not in filtered_df.columns:
152
- print(f"Warning: Metric column '{count_by}' not found. Using 0.")
153
- filtered_df[count_by] = 0.0
154
  filtered_df[count_by] = pd.to_numeric(filtered_df[count_by], errors="coerce").fillna(0.0)
155
-
156
  org_totals = filtered_df.groupby("organization")[count_by].sum().nlargest(top_k, keep='first')
157
  top_orgs_list = org_totals.index.tolist()
158
-
159
  treemap_data = filtered_df[filtered_df["organization"].isin(top_orgs_list)][["id", "organization", count_by]].copy()
160
  treemap_data["root"] = "models"
161
- treemap_data[count_by] = pd.to_numeric(treemap_data[count_by], errors="coerce").fillna(0.0)
162
  return treemap_data
163
 
164
  def create_treemap(treemap_data, count_by, title=None):
@@ -166,20 +89,16 @@ def create_treemap(treemap_data, count_by, title=None):
166
  fig = px.treemap(names=["No data matches filters"], parents=[""], values=[1])
167
  fig.update_layout(title="No data matches the selected filters", margin=dict(t=50, l=25, r=25, b=25))
168
  return fig
169
- fig = px.treemap(
170
- treemap_data, path=["root", "organization", "id"], values=count_by,
171
- title=title or f"HuggingFace Models - {count_by.capitalize()} by Organization",
172
- color_discrete_sequence=px.colors.qualitative.Plotly
173
- )
174
  fig.update_layout(margin=dict(t=50, l=25, r=25, b=25))
175
  fig.update_traces(textinfo="label+value+percent root", hovertemplate="<b>%{label}</b><br>%{value:,} " + count_by + "<br>%{percentRoot:.2%} of total<extra></extra>")
176
  return fig
177
 
178
- with gr.Blocks(title="HuggingFace Model Explorer", fill_width=True) as demo:
179
  models_data_state = gr.State(pd.DataFrame())
180
  loading_complete_state = gr.State(False)
181
 
182
- with gr.Row(): gr.Markdown("# HuggingFace Models TreeMap Visualization")
183
  with gr.Row():
184
  with gr.Column(scale=1):
185
  count_by_dropdown = gr.Dropdown(label="Metric", choices=[("Downloads (last 30 days)", "downloads"), ("Downloads (All Time)", "downloadsAllTime"), ("Likes", "likes")], value="downloads")
@@ -187,15 +106,22 @@ with gr.Blocks(title="HuggingFace Model Explorer", fill_width=True) as demo:
187
  tag_filter_dropdown = gr.Dropdown(label="Select Tag", choices=TAG_FILTER_CHOICES, value=None, visible=False)
188
  pipeline_filter_dropdown = gr.Dropdown(label="Select Pipeline Tag", choices=PIPELINE_TAGS, value=None, visible=False)
189
 
190
- # --- MODIFIED: Replaced Dropdown with RangeSlider and a Reset Button ---
191
  with gr.Group():
192
  with gr.Row():
193
  gr.Markdown("<div style='padding-top: 10px; font-weight: 500;'>Parameters</div>")
194
  reset_params_button = gr.Button("🔄 Reset", visible=False, size="sm", min_width=80)
195
- param_range_slider = gr.RangeSlider(
196
- label=None, # Label is handled by Markdown above
197
- choices=PARAM_CHOICES,
198
- value=PARAM_CHOICES_DEFAULT,
 
 
 
 
 
 
 
199
  )
200
  # --- END OF MODIFICATION ---
201
 
@@ -208,85 +134,76 @@ with gr.Blocks(title="HuggingFace Model Explorer", fill_width=True) as demo:
208
  status_message_md = gr.Markdown("Initializing...")
209
  data_info_md = gr.Markdown("")
210
 
211
- # --- NEW: Event handlers for the new parameter slider and reset button ---
212
- def _update_reset_button_visibility(current_range):
213
- """Shows the reset button only if the slider is not at its default full range."""
214
- is_default = (current_range == PARAM_CHOICES_DEFAULT)
215
- return gr.update(visible=not is_default)
 
 
 
 
 
 
 
 
216
 
217
- def _reset_param_slider_and_button():
218
- """Resets the slider to its default value and hides the reset button."""
219
- return gr.update(value=PARAM_CHOICES_DEFAULT), gr.update(visible=False)
 
 
 
220
 
221
- param_range_slider.release(fn=_update_reset_button_visibility, inputs=param_range_slider, outputs=reset_params_button)
222
- reset_params_button.click(fn=_reset_param_slider_and_button, outputs=[param_range_slider, reset_params_button])
223
  # --- END OF NEW EVENT HANDLERS ---
224
 
225
- def _update_button_interactivity(is_loaded_flag):
226
- return gr.update(interactive=is_loaded_flag)
227
  loading_complete_state.change(fn=_update_button_interactivity, inputs=loading_complete_state, outputs=generate_plot_button)
228
 
229
- def _toggle_filters_visibility(choice):
230
- return gr.update(visible=choice == "Tag Filter"), gr.update(visible=choice == "Pipeline Filter")
231
  filter_choice_radio.change(fn=_toggle_filters_visibility, inputs=filter_choice_radio, outputs=[tag_filter_dropdown, pipeline_filter_dropdown])
232
 
233
  def ui_load_data_controller(progress=gr.Progress()):
234
  progress(0, desc=f"Loading dataset '{HF_DATASET_ID}' from Hugging Face Hub...")
235
- print("ui_load_data_controller called.")
236
- status_msg_ui = "Loading data..."
237
- data_info_text = ""
238
- current_df = pd.DataFrame()
239
- load_success_flag = False
240
  try:
241
  current_df, load_success_flag, status_msg_from_load = load_models_data()
242
  if load_success_flag:
243
  progress(0.9, desc="Processing loaded data...")
244
- if 'data_download_timestamp' in current_df.columns and not current_df.empty and pd.notna(current_df['data_download_timestamp'].iloc[0]):
245
- timestamp_from_parquet = pd.to_datetime(current_df['data_download_timestamp'].iloc[0]).tz_localize('UTC')
246
- data_as_of_date_display = timestamp_from_parquet.strftime('%B %d, %Y, %H:%M:%S %Z')
247
- else:
248
- data_as_of_date_display = "Pre-processed (date unavailable)"
249
-
250
- # --- MODIFIED: Removed the old size category distribution text ---
251
  param_count = (current_df['params'] > 0).sum() if 'params' in current_df.columns else 0
252
- data_info_text = (f"### Data Information\n"
253
- f"- Source: `{HF_DATASET_ID}`\n"
254
- f"- Overall Status: {status_msg_from_load}\n"
255
- f"- Total models loaded: {len(current_df):,}\n"
256
- f"- Models with parameter counts: {param_count:,}\n"
257
- f"- Data as of: {data_as_of_date_display}\n")
258
-
259
  status_msg_ui = "Data loaded successfully. Ready to generate plot."
260
- else:
261
  data_info_text = f"### Data Load Failed\n- {status_msg_from_load}"
262
- status_msg_ui = status_msg_from_load
263
  except Exception as e:
264
- status_msg_ui = f"An unexpected error occurred in ui_load_data_controller: {str(e)}"
265
  data_info_text = f"### Critical Error\n- {status_msg_ui}"
266
  print(f"Critical error in ui_load_data_controller: {e}")
267
- load_success_flag = False
268
  return current_df, load_success_flag, data_info_text, status_msg_ui
269
 
270
- # --- MODIFIED: Updated controller signature and logic to handle new slider ---
271
  def ui_generate_plot_controller(metric_choice, filter_type, tag_choice, pipeline_choice,
272
- param_range_choice, k_orgs, skip_orgs_input, df_current_models, progress=gr.Progress()):
273
  if df_current_models is None or df_current_models.empty:
274
- empty_fig = create_treemap(pd.DataFrame(), metric_choice, "Error: Model Data Not Loaded")
275
- error_msg = "Model data is not loaded or is empty. Please wait for data to load."
276
- gr.Warning(error_msg)
277
- return empty_fig, error_msg
278
-
279
  progress(0.1, desc="Preparing data for visualization...")
280
-
281
  tag_to_use = tag_choice if filter_type == "Tag Filter" else None
282
  pipeline_to_use = pipeline_choice if filter_type == "Pipeline Filter" else None
283
  orgs_to_skip = [org.strip() for org in skip_orgs_input.split(',') if org.strip()] if skip_orgs_input else []
284
 
285
- # Pass the param_range_choice directly to make_treemap_data
286
- treemap_df = make_treemap_data(df_current_models, metric_choice, k_orgs, tag_to_use, pipeline_to_use, param_range_choice, orgs_to_skip)
 
 
 
 
287
 
288
  progress(0.7, desc="Generating Plotly visualization...")
289
-
290
  title_labels = {"downloads": "Downloads (last 30 days)", "downloadsAllTime": "Downloads (All Time)", "likes": "Likes"}
291
  chart_title = f"HuggingFace Models - {title_labels.get(metric_choice, metric_choice)} by Organization"
292
  plotly_fig = create_treemap(treemap_df, metric_choice, chart_title)
@@ -296,21 +213,15 @@ with gr.Blocks(title="HuggingFace Model Explorer", fill_width=True) as demo:
296
  else:
297
  total_items_in_plot = len(treemap_df['id'].unique())
298
  total_value_in_plot = treemap_df[metric_choice].sum()
299
- plot_stats_md = (f"## Plot Statistics\n- **Models shown**: {total_items_in_plot:,}\n- **Total {metric_choice}**: {int(total_value_in_plot):,}")
300
-
301
  return plotly_fig, plot_stats_md
302
 
303
- demo.load(
304
- fn=ui_load_data_controller,
305
- inputs=[],
306
- outputs=[models_data_state, loading_complete_state, data_info_md, status_message_md]
307
- )
308
 
309
- # --- MODIFIED: Updated the inputs list for the click event ---
310
  generate_plot_button.click(
311
  fn=ui_generate_plot_controller,
312
  inputs=[count_by_dropdown, filter_choice_radio, tag_filter_dropdown, pipeline_filter_dropdown,
313
- param_range_slider, top_k_slider, skip_orgs_textbox, models_data_state],
314
  outputs=[plot_output, status_message_md]
315
  )
316
 
@@ -318,4 +229,4 @@ if __name__ == "__main__":
318
  print(f"Application starting. Data will be loaded from Hugging Face dataset: {HF_DATASET_ID}")
319
  demo.queue().launch()
320
 
321
- # --- END OF MODIFIED FILE app.py ---
 
1
+ # --- START OF FULLY CORRECTED FILE app.py ---
2
 
3
  import gradio as gr
4
  import pandas as pd
 
7
  from datasets import load_dataset # Import the datasets library
8
 
9
  # --- Constants ---
 
 
10
  PARAM_CHOICES = ['< 1B', '1B', '5B', '12B', '32B', '64B', '128B', '256B', '> 500B']
11
+ # --- NEW: Define the default value using indices ---
12
+ PARAM_CHOICES_DEFAULT_INDICES = [0, len(PARAM_CHOICES) - 1]
13
 
 
14
  HF_DATASET_ID = "evijit/orgstats_daily_data"
15
+ TAG_FILTER_CHOICES = [ "Audio & Speech", "Time series", "Robotics", "Music", "Video", "Images", "Text", "Biomedical", "Sciences" ]
16
+ PIPELINE_TAGS = [ 'text-generation', 'text-to-image', 'text-classification', 'text2text-generation', 'audio-to-audio', 'feature-extraction', 'image-classification', 'translation', 'reinforcement-learning', 'fill-mask', 'text-to-speech', 'automatic-speech-recognition', 'image-text-to-text', 'token-classification', 'sentence-similarity', 'question-answering', 'image-feature-extraction', 'summarization', 'zero-shot-image-classification', 'object-detection', 'image-segmentation', 'image-to-image', 'image-to-text', 'audio-classification', 'visual-question-answering', 'text-to-video', 'zero-shot-classification', 'depth-estimation', 'text-ranking', 'image-to-video', 'multiple-choice', 'unconditional-image-generation', 'video-classification', 'text-to-audio', 'time-series-forecasting', 'any-to-any', 'video-text-to-text', 'table-question-answering' ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
  def load_models_data():
 
 
 
19
  overall_start_time = time.time()
20
  print(f"Attempting to load dataset from Hugging Face Hub: {HF_DATASET_ID}")
21
+ expected_cols = [ 'id', 'downloads', 'downloadsAllTime', 'likes', 'pipeline_tag', 'tags', 'params', 'organization', 'has_audio', 'has_speech', 'has_music', 'has_robot', 'has_bio', 'has_med', 'has_series', 'has_video', 'has_image', 'has_text', 'has_science', 'is_audio_speech', 'is_biomed', 'data_download_timestamp' ]
 
 
 
 
 
 
 
 
22
  try:
23
  dataset_dict = load_dataset(HF_DATASET_ID)
24
+ if not dataset_dict: raise ValueError(f"Dataset '{HF_DATASET_ID}' loaded but appears empty.")
 
 
 
25
  split_name = list(dataset_dict.keys())[0]
26
  print(f"Using dataset split: '{split_name}'. Converting to Pandas.")
27
  df = dataset_dict[split_name].to_pandas()
28
  elapsed = time.time() - overall_start_time
 
29
  missing_cols = [col for col in expected_cols if col not in df.columns]
30
  if missing_cols:
31
+ if 'params' in missing_cols: raise ValueError(f"FATAL: Loaded dataset is missing the crucial 'params' column.")
 
 
32
  print(f"Warning: Loaded dataset is missing some expected columns: {missing_cols}.")
 
 
 
33
  if 'params' in df.columns:
34
  df['params'] = pd.to_numeric(df['params'], errors='coerce').fillna(0)
35
  else:
 
36
  df['params'] = 0
37
  print("CRITICAL WARNING: 'params' column not found in data. Parameter filtering will not work.")
 
38
  msg = f"Successfully loaded dataset '{HF_DATASET_ID}' (split: {split_name}) from HF Hub in {elapsed:.2f}s. Shape: {df.shape}"
39
  print(msg)
40
  return df, True, msg
 
41
  except Exception as e:
42
  err_msg = f"Failed to load dataset '{HF_DATASET_ID}' from Hugging Face Hub. Error: {e}"
43
  print(err_msg)
44
  return pd.DataFrame(), False, err_msg
45
 
 
46
  def get_param_range_values(param_range_labels):
47
+ if not param_range_labels or len(param_range_labels) != 2: return None, None
 
 
 
48
  min_label, max_label = param_range_labels
 
 
49
  min_val = 0.0 if '<' in min_label else float(min_label.replace('B', ''))
 
 
50
  max_val = float('inf') if '>' in max_label else float(max_label.replace('B', ''))
 
51
  return min_val, max_val
52
 
 
53
  def make_treemap_data(df, count_by, top_k=25, tag_filter=None, pipeline_filter=None, param_range=None, skip_orgs=None):
54
  if df is None or df.empty: return pd.DataFrame()
55
  filtered_df = df.copy()
56
+ col_map = { "Audio & Speech": "is_audio_speech", "Music": "has_music", "Robotics": "has_robot", "Biomedical": "is_biomed", "Time series": "has_series", "Sciences": "has_science", "Video": "has_video", "Images": "has_image", "Text": "has_text" }
 
 
 
57
  if tag_filter and tag_filter in col_map:
58
  target_col = col_map[tag_filter]
59
+ if target_col in filtered_df.columns: filtered_df = filtered_df[filtered_df[target_col]]
60
+ else: print(f"Warning: Tag filter column '{col_map[tag_filter]}' not found in DataFrame.")
 
 
 
61
  if pipeline_filter:
62
+ if "pipeline_tag" in filtered_df.columns: filtered_df = filtered_df[filtered_df["pipeline_tag"].astype(str) == pipeline_filter]
63
+ else: print(f"Warning: 'pipeline_tag' column not found for filtering.")
 
 
 
 
64
  if param_range:
65
  min_params, max_params = get_param_range_values(param_range)
66
+ # --- MODIFIED: Check against labels, not indices ---
67
+ is_default_range = (param_range[0] == PARAM_CHOICES[0] and param_range[1] == PARAM_CHOICES[-1])
 
68
  if not is_default_range and 'params' in filtered_df.columns:
69
+ if min_params is not None: filtered_df = filtered_df[filtered_df['params'] >= min_params]
70
+ if max_params is not None and max_params != float('inf'): filtered_df = filtered_df[filtered_df['params'] < max_params]
71
+ elif 'params' not in filtered_df.columns: print("Warning: 'params' column not found for filtering.")
 
 
 
 
 
 
 
72
  if skip_orgs and len(skip_orgs) > 0:
73
+ if "organization" in filtered_df.columns: filtered_df = filtered_df[~filtered_df["organization"].isin(skip_orgs)]
74
+ else: print("Warning: 'organization' column not found for filtering.")
 
 
 
75
  if filtered_df.empty: return pd.DataFrame()
 
76
  if count_by not in filtered_df.columns:
77
+ print(f"Warning: Metric column '{count_by}' not found. Using 0.")
78
+ filtered_df[count_by] = 0.0
79
  filtered_df[count_by] = pd.to_numeric(filtered_df[count_by], errors="coerce").fillna(0.0)
 
80
  org_totals = filtered_df.groupby("organization")[count_by].sum().nlargest(top_k, keep='first')
81
  top_orgs_list = org_totals.index.tolist()
 
82
  treemap_data = filtered_df[filtered_df["organization"].isin(top_orgs_list)][["id", "organization", count_by]].copy()
83
  treemap_data["root"] = "models"
84
+ treemap_data[count_by] = pd.to_numeric(treemap_data[count_by], errors="coerce").fillna(0.0)
85
  return treemap_data
86
 
87
  def create_treemap(treemap_data, count_by, title=None):
 
89
  fig = px.treemap(names=["No data matches filters"], parents=[""], values=[1])
90
  fig.update_layout(title="No data matches the selected filters", margin=dict(t=50, l=25, r=25, b=25))
91
  return fig
92
+ fig = px.treemap(treemap_data, path=["root", "organization", "id"], values=count_by, title=title, color_discrete_sequence=px.colors.qualitative.Plotly)
 
 
 
 
93
  fig.update_layout(margin=dict(t=50, l=25, r=25, b=25))
94
  fig.update_traces(textinfo="label+value+percent root", hovertemplate="<b>%{label}</b><br>%{value:,} " + count_by + "<br>%{percentRoot:.2%} of total<extra></extra>")
95
  return fig
96
 
97
+ with gr.Blocks(title="ModelVerse Explorer", fill_width=True) as demo:
98
  models_data_state = gr.State(pd.DataFrame())
99
  loading_complete_state = gr.State(False)
100
 
101
+ with gr.Row(): gr.Markdown("# 🤗 The Hub Org-Model Atlas")
102
  with gr.Row():
103
  with gr.Column(scale=1):
104
  count_by_dropdown = gr.Dropdown(label="Metric", choices=[("Downloads (last 30 days)", "downloads"), ("Downloads (All Time)", "downloadsAllTime"), ("Likes", "likes")], value="downloads")
 
106
  tag_filter_dropdown = gr.Dropdown(label="Select Tag", choices=TAG_FILTER_CHOICES, value=None, visible=False)
107
  pipeline_filter_dropdown = gr.Dropdown(label="Select Pipeline Tag", choices=PIPELINE_TAGS, value=None, visible=False)
108
 
109
+ # --- MODIFIED: Replaced RangeSlider with Slider and a dynamic Markdown label ---
110
  with gr.Group():
111
  with gr.Row():
112
  gr.Markdown("<div style='padding-top: 10px; font-weight: 500;'>Parameters</div>")
113
  reset_params_button = gr.Button("🔄 Reset", visible=False, size="sm", min_width=80)
114
+
115
+ # This markdown will show the selected text labels
116
+ param_label_display = gr.Markdown(f"Range: **{PARAM_CHOICES[0]}** to **{PARAM_CHOICES[-1]}**")
117
+
118
+ param_slider = gr.Slider(
119
+ minimum=0,
120
+ maximum=len(PARAM_CHOICES) - 1,
121
+ step=1,
122
+ value=PARAM_CHOICES_DEFAULT_INDICES,
123
+ label=None, # Label is handled by components above
124
+ show_label=False
125
  )
126
  # --- END OF MODIFICATION ---
127
 
 
134
  status_message_md = gr.Markdown("Initializing...")
135
  data_info_md = gr.Markdown("")
136
 
137
+ # --- NEW AND MODIFIED: Event handlers for the new Slider ---
138
+ def _update_slider_ui_elements(current_range_indices):
139
+ """Updates the label and reset button visibility based on the slider's state."""
140
+ min_idx, max_idx = int(current_range_indices[0]), int(current_range_indices[1])
141
+ min_label = PARAM_CHOICES[min_idx]
142
+ max_label = PARAM_CHOICES[max_idx]
143
+
144
+ is_default = (min_idx == PARAM_CHOICES_DEFAULT_INDICES[0] and max_idx == PARAM_CHOICES_DEFAULT_INDICES[1])
145
+
146
+ label_md = f"Range: **{min_label}** to **{max_label}**"
147
+ button_visibility = gr.update(visible=not is_default)
148
+
149
+ return label_md, button_visibility
150
 
151
+ def _reset_param_slider_and_ui():
152
+ """Resets the slider to default and updates the UI elements accordingly."""
153
+ default_indices = PARAM_CHOICES_DEFAULT_INDICES
154
+ default_label = f"Range: **{PARAM_CHOICES[default_indices[0]]}** to **{PARAM_CHOICES[default_indices[1]]}**"
155
+
156
+ return gr.update(value=default_indices), default_label, gr.update(visible=False)
157
 
158
+ param_slider.release(fn=_update_slider_ui_elements, inputs=param_slider, outputs=[param_label_display, reset_params_button])
159
+ reset_params_button.click(fn=_reset_param_slider_and_ui, outputs=[param_slider, param_label_display, reset_params_button])
160
  # --- END OF NEW EVENT HANDLERS ---
161
 
162
+ def _update_button_interactivity(is_loaded_flag): return gr.update(interactive=is_loaded_flag)
 
163
  loading_complete_state.change(fn=_update_button_interactivity, inputs=loading_complete_state, outputs=generate_plot_button)
164
 
165
+ def _toggle_filters_visibility(choice): return gr.update(visible=choice == "Tag Filter"), gr.update(visible=choice == "Pipeline Filter")
 
166
  filter_choice_radio.change(fn=_toggle_filters_visibility, inputs=filter_choice_radio, outputs=[tag_filter_dropdown, pipeline_filter_dropdown])
167
 
168
  def ui_load_data_controller(progress=gr.Progress()):
169
  progress(0, desc=f"Loading dataset '{HF_DATASET_ID}' from Hugging Face Hub...")
170
+ status_msg_ui, data_info_text, load_success_flag = "Loading data...", "", False
 
 
 
 
171
  try:
172
  current_df, load_success_flag, status_msg_from_load = load_models_data()
173
  if load_success_flag:
174
  progress(0.9, desc="Processing loaded data...")
175
+ ts = pd.to_datetime(current_df['data_download_timestamp'].iloc[0]).tz_localize('UTC') if 'data_download_timestamp' in current_df.columns and not current_df.empty and pd.notna(current_df['data_download_timestamp'].iloc[0]) else None
176
+ date_display = ts.strftime('%B %d, %Y, %H:%M:%S %Z') if ts else "Pre-processed (date unavailable)"
 
 
 
 
 
177
  param_count = (current_df['params'] > 0).sum() if 'params' in current_df.columns else 0
178
+ data_info_text = f"### Data Information\n- Source: `{HF_DATASET_ID}`\n- Status: {status_msg_from_load}\n- Total models loaded: {len(current_df):,}\n- Models with parameter counts: {param_count:,}\n- Data as of: {date_display}\n"
 
 
 
 
 
 
179
  status_msg_ui = "Data loaded successfully. Ready to generate plot."
180
+ else:
181
  data_info_text = f"### Data Load Failed\n- {status_msg_from_load}"
182
+ status_msg_ui = status_msg_from_load
183
  except Exception as e:
184
+ status_msg_ui = f"An unexpected error occurred: {str(e)}"
185
  data_info_text = f"### Critical Error\n- {status_msg_ui}"
186
  print(f"Critical error in ui_load_data_controller: {e}")
 
187
  return current_df, load_success_flag, data_info_text, status_msg_ui
188
 
189
+ # --- MODIFIED: Controller now takes slider indices and converts them to labels ---
190
  def ui_generate_plot_controller(metric_choice, filter_type, tag_choice, pipeline_choice,
191
+ param_range_indices, k_orgs, skip_orgs_input, df_current_models, progress=gr.Progress()):
192
  if df_current_models is None or df_current_models.empty:
193
+ return create_treemap(pd.DataFrame(), metric_choice, "Error: Model Data Not Loaded"), "Model data is not loaded or is empty."
 
 
 
 
194
  progress(0.1, desc="Preparing data for visualization...")
 
195
  tag_to_use = tag_choice if filter_type == "Tag Filter" else None
196
  pipeline_to_use = pipeline_choice if filter_type == "Pipeline Filter" else None
197
  orgs_to_skip = [org.strip() for org in skip_orgs_input.split(',') if org.strip()] if skip_orgs_input else []
198
 
199
+ # Convert indices from slider back to text labels for filtering logic
200
+ min_label = PARAM_CHOICES[int(param_range_indices[0])]
201
+ max_label = PARAM_CHOICES[int(param_range_indices[1])]
202
+ param_labels_for_filtering = [min_label, max_label]
203
+
204
+ treemap_df = make_treemap_data(df_current_models, metric_choice, k_orgs, tag_to_use, pipeline_to_use, param_labels_for_filtering, orgs_to_skip)
205
 
206
  progress(0.7, desc="Generating Plotly visualization...")
 
207
  title_labels = {"downloads": "Downloads (last 30 days)", "downloadsAllTime": "Downloads (All Time)", "likes": "Likes"}
208
  chart_title = f"HuggingFace Models - {title_labels.get(metric_choice, metric_choice)} by Organization"
209
  plotly_fig = create_treemap(treemap_df, metric_choice, chart_title)
 
213
  else:
214
  total_items_in_plot = len(treemap_df['id'].unique())
215
  total_value_in_plot = treemap_df[metric_choice].sum()
216
+ plot_stats_md = f"## Plot Statistics\n- **Models shown**: {total_items_in_plot:,}\n- **Total {metric_choice}**: {int(total_value_in_plot):,}"
 
217
  return plotly_fig, plot_stats_md
218
 
219
+ demo.load(fn=ui_load_data_controller, inputs=[], outputs=[models_data_state, loading_complete_state, data_info_md, status_message_md])
 
 
 
 
220
 
 
221
  generate_plot_button.click(
222
  fn=ui_generate_plot_controller,
223
  inputs=[count_by_dropdown, filter_choice_radio, tag_filter_dropdown, pipeline_filter_dropdown,
224
+ param_slider, top_k_slider, skip_orgs_textbox, models_data_state], # MODIFIED: Use param_slider
225
  outputs=[plot_output, status_message_md]
226
  )
227
 
 
229
  print(f"Application starting. Data will be loaded from Hugging Face dataset: {HF_DATASET_ID}")
230
  demo.queue().launch()
231
 
232
+ # --- END OF FULLY CORRECTED FILE app.py ---