Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,15 +1,18 @@
|
|
1 |
-
# --- START OF
|
2 |
|
3 |
import gradio as gr
|
4 |
import pandas as pd
|
5 |
import plotly.express as px
|
6 |
import time
|
7 |
-
from datasets import load_dataset
|
8 |
|
9 |
# --- Constants ---
|
10 |
PARAM_CHOICES = ['< 1B', '1B', '5B', '12B', '32B', '64B', '128B', '256B', '> 500B']
|
11 |
PARAM_CHOICES_DEFAULT_INDICES = [0, len(PARAM_CHOICES) - 1]
|
12 |
|
|
|
|
|
|
|
13 |
HF_DATASET_ID = "evijit/orgstats_daily_data"
|
14 |
TAG_FILTER_CHOICES = [ "Audio & Speech", "Time series", "Robotics", "Music", "Video", "Images", "Text", "Biomedical", "Sciences" ]
|
15 |
PIPELINE_TAGS = [ 'text-generation', 'text-to-image', 'text-classification', 'text2text-generation', 'audio-to-audio', 'feature-extraction', 'image-classification', 'translation', 'reinforcement-learning', 'fill-mask', 'text-to-speech', 'automatic-speech-recognition', 'image-text-to-text', 'token-classification', 'sentence-similarity', 'question-answering', 'image-feature-extraction', 'summarization', 'zero-shot-image-classification', 'object-detection', 'image-segmentation', 'image-to-image', 'image-to-text', 'audio-classification', 'visual-question-answering', 'text-to-video', 'zero-shot-classification', 'depth-estimation', 'text-ranking', 'image-to-video', 'multiple-choice', 'unconditional-image-generation', 'video-classification', 'text-to-audio', 'time-series-forecasting', 'any-to-any', 'video-text-to-text', 'table-question-answering' ]
|
@@ -17,18 +20,12 @@ PIPELINE_TAGS = [ 'text-generation', 'text-to-image', 'text-classification', 'te
|
|
17 |
def load_models_data():
|
18 |
overall_start_time = time.time()
|
19 |
print(f"Attempting to load dataset from Hugging Face Hub: {HF_DATASET_ID}")
|
20 |
-
expected_cols = [ 'id', 'downloads', 'downloadsAllTime', 'likes', 'pipeline_tag', 'tags', 'params', 'organization', 'has_audio', 'has_speech', 'has_music', 'has_robot', 'has_bio', 'has_med', 'has_series', 'has_video', 'has_image', 'has_text', 'has_science', 'is_audio_speech', 'is_biomed', 'data_download_timestamp' ]
|
21 |
try:
|
22 |
dataset_dict = load_dataset(HF_DATASET_ID)
|
23 |
if not dataset_dict: raise ValueError(f"Dataset '{HF_DATASET_ID}' loaded but appears empty.")
|
24 |
split_name = list(dataset_dict.keys())[0]
|
25 |
-
print(f"Using dataset split: '{split_name}'. Converting to Pandas.")
|
26 |
df = dataset_dict[split_name].to_pandas()
|
27 |
elapsed = time.time() - overall_start_time
|
28 |
-
missing_cols = [col for col in expected_cols if col not in df.columns]
|
29 |
-
if missing_cols:
|
30 |
-
if 'params' in missing_cols: raise ValueError(f"FATAL: Loaded dataset is missing the crucial 'params' column.")
|
31 |
-
print(f"Warning: Loaded dataset is missing some expected columns: {missing_cols}.")
|
32 |
if 'params' in df.columns:
|
33 |
df['params'] = pd.to_numeric(df['params'], errors='coerce').fillna(0)
|
34 |
else:
|
@@ -53,33 +50,25 @@ def make_treemap_data(df, count_by, top_k=25, tag_filter=None, pipeline_filter=N
|
|
53 |
if df is None or df.empty: return pd.DataFrame()
|
54 |
filtered_df = df.copy()
|
55 |
col_map = { "Audio & Speech": "is_audio_speech", "Music": "has_music", "Robotics": "has_robot", "Biomedical": "is_biomed", "Time series": "has_series", "Sciences": "has_science", "Video": "has_video", "Images": "has_image", "Text": "has_text" }
|
56 |
-
if tag_filter and tag_filter in col_map:
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
if pipeline_filter:
|
61 |
-
if "pipeline_tag" in filtered_df.columns: filtered_df = filtered_df[filtered_df["pipeline_tag"].astype(str) == pipeline_filter]
|
62 |
-
else: print(f"Warning: 'pipeline_tag' column not found for filtering.")
|
63 |
if param_range:
|
64 |
min_params, max_params = get_param_range_values(param_range)
|
65 |
is_default_range = (param_range[0] == PARAM_CHOICES[0] and param_range[1] == PARAM_CHOICES[-1])
|
66 |
if not is_default_range and 'params' in filtered_df.columns:
|
67 |
if min_params is not None: filtered_df = filtered_df[filtered_df['params'] >= min_params]
|
68 |
if max_params is not None and max_params != float('inf'): filtered_df = filtered_df[filtered_df['params'] < max_params]
|
69 |
-
|
70 |
-
|
71 |
-
if "organization" in filtered_df.columns: filtered_df = filtered_df[~filtered_df["organization"].isin(skip_orgs)]
|
72 |
-
else: print("Warning: 'organization' column not found for filtering.")
|
73 |
if filtered_df.empty: return pd.DataFrame()
|
74 |
-
if count_by not in filtered_df.columns:
|
75 |
-
print(f"Warning: Metric column '{count_by}' not found. Using 0.")
|
76 |
-
filtered_df[count_by] = 0.0
|
77 |
filtered_df[count_by] = pd.to_numeric(filtered_df[count_by], errors="coerce").fillna(0.0)
|
78 |
org_totals = filtered_df.groupby("organization")[count_by].sum().nlargest(top_k, keep='first')
|
79 |
top_orgs_list = org_totals.index.tolist()
|
80 |
treemap_data = filtered_df[filtered_df["organization"].isin(top_orgs_list)][["id", "organization", count_by]].copy()
|
81 |
treemap_data["root"] = "models"
|
82 |
-
treemap_data[count_by] = pd.to_numeric(treemap_data[count_by], errors="coerce").fillna(0.0)
|
83 |
return treemap_data
|
84 |
|
85 |
def create_treemap(treemap_data, count_by, title=None):
|
@@ -108,14 +97,19 @@ with gr.Blocks(title="ModelVerse Explorer", fill_width=True) as demo:
|
|
108 |
with gr.Row():
|
109 |
param_label_display = gr.Markdown("<div style='font-weight: 500;'>Parameters</div>")
|
110 |
reset_params_button = gr.Button("🔄 Reset", visible=False, size="sm", min_width=80)
|
111 |
-
|
112 |
param_slider = gr.Slider(
|
113 |
minimum=0, maximum=len(PARAM_CHOICES) - 1, step=1,
|
114 |
value=PARAM_CHOICES_DEFAULT_INDICES,
|
115 |
-
label="Parameter Range", show_label=False
|
116 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
117 |
|
118 |
-
top_k_slider = gr.Slider(label="Number of Top Organizations", minimum=5, maximum=50, value=25, step=5)
|
119 |
skip_orgs_textbox = gr.Textbox(label="Organizations to Skip (comma-separated)", value="TheBloke,MaziyarPanahi,unsloth,modularai,Gensyn,bartowski")
|
120 |
generate_plot_button = gr.Button(value="Generate Plot", variant="primary", interactive=False)
|
121 |
|
@@ -124,31 +118,19 @@ with gr.Blocks(title="ModelVerse Explorer", fill_width=True) as demo:
|
|
124 |
status_message_md = gr.Markdown("Initializing...")
|
125 |
data_info_md = gr.Markdown("")
|
126 |
|
127 |
-
# --- MODIFIED: More robust event handlers for the slider ---
|
128 |
def _update_slider_ui_elements(current_range_indices):
|
129 |
-
|
130 |
-
if not isinstance(current_range_indices, list) or len(current_range_indices) != 2:
|
131 |
-
# This is a defensive check to prevent crashes if the input is malformed.
|
132 |
-
return gr.update(), gr.update()
|
133 |
-
|
134 |
min_idx, max_idx = int(current_range_indices[0]), int(current_range_indices[1])
|
135 |
min_label, max_label = PARAM_CHOICES[min_idx], PARAM_CHOICES[max_idx]
|
136 |
-
|
137 |
-
# Using HTML for bold is more reliable in gr.Markdown
|
138 |
label_md = f"<div style='font-weight: 500;'>Parameters <span style='float: right; font-weight: normal; color: #555;'>{min_label} to {max_label}</span></div>"
|
139 |
-
|
140 |
is_default = (min_idx == 0 and max_idx == len(PARAM_CHOICES) - 1)
|
141 |
-
|
142 |
-
|
143 |
-
return label_md, button_visibility
|
144 |
|
145 |
def _reset_param_slider_and_ui():
|
146 |
-
"""Resets the slider to default and updates the UI elements accordingly."""
|
147 |
default_label = "<div style='font-weight: 500;'>Parameters</div>"
|
148 |
return gr.update(value=PARAM_CHOICES_DEFAULT_INDICES), default_label, gr.update(visible=False)
|
149 |
|
150 |
-
|
151 |
-
param_slider.change(fn=_update_slider_ui_elements, inputs=param_slider, outputs=[param_label_display, reset_params_button])
|
152 |
reset_params_button.click(fn=_reset_param_slider_and_ui, outputs=[param_slider, param_label_display, reset_params_button])
|
153 |
|
154 |
def _update_button_interactivity(is_loaded_flag): return gr.update(interactive=is_loaded_flag)
|
@@ -157,44 +139,37 @@ with gr.Blocks(title="ModelVerse Explorer", fill_width=True) as demo:
|
|
157 |
def _toggle_filters_visibility(choice): return gr.update(visible=choice == "Tag Filter"), gr.update(visible=choice == "Pipeline Filter")
|
158 |
filter_choice_radio.change(fn=_toggle_filters_visibility, inputs=filter_choice_radio, outputs=[tag_filter_dropdown, pipeline_filter_dropdown])
|
159 |
|
160 |
-
# --- MODIFIED: Fixed the timezone handling logic ---
|
161 |
def ui_load_data_controller(progress=gr.Progress()):
|
162 |
-
progress(0, desc=f"Loading dataset '{HF_DATASET_ID}'
|
163 |
-
status_msg_ui, data_info_text, load_success_flag = "Loading data...", "", False
|
164 |
try:
|
165 |
current_df, load_success_flag, status_msg_from_load = load_models_data()
|
166 |
if load_success_flag:
|
167 |
-
progress(0.9, desc="Processing
|
168 |
date_display = "Pre-processed (date unavailable)"
|
169 |
-
if 'data_download_timestamp' in current_df.columns and
|
170 |
-
|
171 |
-
# Check if tz-aware. If so, convert. If not, localize.
|
172 |
-
if timestamp.tzinfo is None:
|
173 |
-
ts = timestamp.tz_localize('UTC')
|
174 |
-
else:
|
175 |
-
ts = timestamp.tz_convert('UTC')
|
176 |
date_display = ts.strftime('%B %d, %Y, %H:%M:%S %Z')
|
177 |
-
|
178 |
param_count = (current_df['params'] > 0).sum() if 'params' in current_df.columns else 0
|
179 |
data_info_text = f"### Data Information\n- Source: `{HF_DATASET_ID}`\n- Status: {status_msg_from_load}\n- Total models loaded: {len(current_df):,}\n- Models with parameter counts: {param_count:,}\n- Data as of: {date_display}\n"
|
180 |
-
status_msg_ui = "Data loaded
|
181 |
else:
|
182 |
data_info_text = f"### Data Load Failed\n- {status_msg_from_load}"
|
183 |
status_msg_ui = status_msg_from_load
|
184 |
except Exception as e:
|
185 |
-
status_msg_ui = f"An unexpected error occurred: {str(e)}"
|
186 |
data_info_text = f"### Critical Error\n- {status_msg_ui}"
|
|
|
187 |
print(f"Critical error in ui_load_data_controller: {e}")
|
188 |
return current_df, load_success_flag, data_info_text, status_msg_ui
|
189 |
|
190 |
def ui_generate_plot_controller(metric_choice, filter_type, tag_choice, pipeline_choice,
|
191 |
param_range_indices, k_orgs, skip_orgs_input, df_current_models, progress=gr.Progress()):
|
192 |
if df_current_models is None or df_current_models.empty:
|
193 |
-
return create_treemap(pd.DataFrame(), metric_choice, "Error: Model Data Not Loaded"), "Model data is not loaded
|
194 |
-
progress(0.1, desc="Preparing data
|
195 |
tag_to_use = tag_choice if filter_type == "Tag Filter" else None
|
196 |
pipeline_to_use = pipeline_choice if filter_type == "Pipeline Filter" else None
|
197 |
-
orgs_to_skip = [org.strip() for org in skip_orgs_input.split(',') if org.strip()]
|
198 |
|
199 |
min_label = PARAM_CHOICES[int(param_range_indices[0])]
|
200 |
max_label = PARAM_CHOICES[int(param_range_indices[1])]
|
@@ -202,25 +177,26 @@ with gr.Blocks(title="ModelVerse Explorer", fill_width=True) as demo:
|
|
202 |
|
203 |
treemap_df = make_treemap_data(df_current_models, metric_choice, k_orgs, tag_to_use, pipeline_to_use, param_labels_for_filtering, orgs_to_skip)
|
204 |
|
205 |
-
progress(0.7, desc="Generating
|
206 |
title_labels = {"downloads": "Downloads (last 30 days)", "downloadsAllTime": "Downloads (All Time)", "likes": "Likes"}
|
207 |
chart_title = f"HuggingFace Models - {title_labels.get(metric_choice, metric_choice)} by Organization"
|
208 |
plotly_fig = create_treemap(treemap_df, metric_choice, chart_title)
|
209 |
|
210 |
if treemap_df.empty:
|
211 |
-
plot_stats_md = "No data matches the selected filters.
|
212 |
else:
|
213 |
total_items_in_plot = len(treemap_df['id'].unique())
|
214 |
-
total_value_in_plot = treemap_df[
|
215 |
plot_stats_md = f"## Plot Statistics\n- **Models shown**: {total_items_in_plot:,}\n- **Total {metric_choice}**: {int(total_value_in_plot):,}"
|
216 |
return plotly_fig, plot_stats_md
|
217 |
|
218 |
demo.load(fn=ui_load_data_controller, inputs=[], outputs=[models_data_state, loading_complete_state, data_info_md, status_message_md])
|
219 |
|
|
|
220 |
generate_plot_button.click(
|
221 |
fn=ui_generate_plot_controller,
|
222 |
inputs=[count_by_dropdown, filter_choice_radio, tag_filter_dropdown, pipeline_filter_dropdown,
|
223 |
-
param_slider,
|
224 |
outputs=[plot_output, status_message_md]
|
225 |
)
|
226 |
|
@@ -228,4 +204,4 @@ if __name__ == "__main__":
|
|
228 |
print(f"Application starting. Data will be loaded from Hugging Face dataset: {HF_DATASET_ID}")
|
229 |
demo.queue().launch()
|
230 |
|
231 |
-
# --- END OF
|
|
|
1 |
+
# --- START OF FINAL POLISHED FILE app.py ---
|
2 |
|
3 |
import gradio as gr
|
4 |
import pandas as pd
|
5 |
import plotly.express as px
|
6 |
import time
|
7 |
+
from datasets import load_dataset
|
8 |
|
9 |
# --- Constants ---
|
10 |
PARAM_CHOICES = ['< 1B', '1B', '5B', '12B', '32B', '64B', '128B', '256B', '> 500B']
|
11 |
PARAM_CHOICES_DEFAULT_INDICES = [0, len(PARAM_CHOICES) - 1]
|
12 |
|
13 |
+
# --- NEW: Define choices for the Top-K dropdown ---
|
14 |
+
TOP_K_CHOICES = list(range(5, 51, 5))
|
15 |
+
|
16 |
HF_DATASET_ID = "evijit/orgstats_daily_data"
|
17 |
TAG_FILTER_CHOICES = [ "Audio & Speech", "Time series", "Robotics", "Music", "Video", "Images", "Text", "Biomedical", "Sciences" ]
|
18 |
PIPELINE_TAGS = [ 'text-generation', 'text-to-image', 'text-classification', 'text2text-generation', 'audio-to-audio', 'feature-extraction', 'image-classification', 'translation', 'reinforcement-learning', 'fill-mask', 'text-to-speech', 'automatic-speech-recognition', 'image-text-to-text', 'token-classification', 'sentence-similarity', 'question-answering', 'image-feature-extraction', 'summarization', 'zero-shot-image-classification', 'object-detection', 'image-segmentation', 'image-to-image', 'image-to-text', 'audio-classification', 'visual-question-answering', 'text-to-video', 'zero-shot-classification', 'depth-estimation', 'text-ranking', 'image-to-video', 'multiple-choice', 'unconditional-image-generation', 'video-classification', 'text-to-audio', 'time-series-forecasting', 'any-to-any', 'video-text-to-text', 'table-question-answering' ]
|
|
|
20 |
def load_models_data():
|
21 |
overall_start_time = time.time()
|
22 |
print(f"Attempting to load dataset from Hugging Face Hub: {HF_DATASET_ID}")
|
|
|
23 |
try:
|
24 |
dataset_dict = load_dataset(HF_DATASET_ID)
|
25 |
if not dataset_dict: raise ValueError(f"Dataset '{HF_DATASET_ID}' loaded but appears empty.")
|
26 |
split_name = list(dataset_dict.keys())[0]
|
|
|
27 |
df = dataset_dict[split_name].to_pandas()
|
28 |
elapsed = time.time() - overall_start_time
|
|
|
|
|
|
|
|
|
29 |
if 'params' in df.columns:
|
30 |
df['params'] = pd.to_numeric(df['params'], errors='coerce').fillna(0)
|
31 |
else:
|
|
|
50 |
if df is None or df.empty: return pd.DataFrame()
|
51 |
filtered_df = df.copy()
|
52 |
col_map = { "Audio & Speech": "is_audio_speech", "Music": "has_music", "Robotics": "has_robot", "Biomedical": "is_biomed", "Time series": "has_series", "Sciences": "has_science", "Video": "has_video", "Images": "has_image", "Text": "has_text" }
|
53 |
+
if tag_filter and tag_filter in col_map and col_map[tag_filter] in filtered_df.columns:
|
54 |
+
filtered_df = filtered_df[filtered_df[col_map[tag_filter]]]
|
55 |
+
if pipeline_filter and "pipeline_tag" in filtered_df.columns:
|
56 |
+
filtered_df = filtered_df[filtered_df["pipeline_tag"].astype(str) == pipeline_filter]
|
|
|
|
|
|
|
57 |
if param_range:
|
58 |
min_params, max_params = get_param_range_values(param_range)
|
59 |
is_default_range = (param_range[0] == PARAM_CHOICES[0] and param_range[1] == PARAM_CHOICES[-1])
|
60 |
if not is_default_range and 'params' in filtered_df.columns:
|
61 |
if min_params is not None: filtered_df = filtered_df[filtered_df['params'] >= min_params]
|
62 |
if max_params is not None and max_params != float('inf'): filtered_df = filtered_df[filtered_df['params'] < max_params]
|
63 |
+
if skip_orgs and len(skip_orgs) > 0 and "organization" in filtered_df.columns:
|
64 |
+
filtered_df = filtered_df[~filtered_df["organization"].isin(skip_orgs)]
|
|
|
|
|
65 |
if filtered_df.empty: return pd.DataFrame()
|
66 |
+
if count_by not in filtered_df.columns: filtered_df[count_by] = 0.0
|
|
|
|
|
67 |
filtered_df[count_by] = pd.to_numeric(filtered_df[count_by], errors="coerce").fillna(0.0)
|
68 |
org_totals = filtered_df.groupby("organization")[count_by].sum().nlargest(top_k, keep='first')
|
69 |
top_orgs_list = org_totals.index.tolist()
|
70 |
treemap_data = filtered_df[filtered_df["organization"].isin(top_orgs_list)][["id", "organization", count_by]].copy()
|
71 |
treemap_data["root"] = "models"
|
|
|
72 |
return treemap_data
|
73 |
|
74 |
def create_treemap(treemap_data, count_by, title=None):
|
|
|
97 |
with gr.Row():
|
98 |
param_label_display = gr.Markdown("<div style='font-weight: 500;'>Parameters</div>")
|
99 |
reset_params_button = gr.Button("🔄 Reset", visible=False, size="sm", min_width=80)
|
|
|
100 |
param_slider = gr.Slider(
|
101 |
minimum=0, maximum=len(PARAM_CHOICES) - 1, step=1,
|
102 |
value=PARAM_CHOICES_DEFAULT_INDICES,
|
103 |
+
label="Parameter Range", show_label=False
|
104 |
)
|
105 |
+
|
106 |
+
# --- MODIFIED: Replaced Slider with Dropdown for Top-K selection ---
|
107 |
+
top_k_dropdown = gr.Dropdown(
|
108 |
+
label="Number of Top Organizations",
|
109 |
+
choices=TOP_K_CHOICES,
|
110 |
+
value=25
|
111 |
+
)
|
112 |
|
|
|
113 |
skip_orgs_textbox = gr.Textbox(label="Organizations to Skip (comma-separated)", value="TheBloke,MaziyarPanahi,unsloth,modularai,Gensyn,bartowski")
|
114 |
generate_plot_button = gr.Button(value="Generate Plot", variant="primary", interactive=False)
|
115 |
|
|
|
118 |
status_message_md = gr.Markdown("Initializing...")
|
119 |
data_info_md = gr.Markdown("")
|
120 |
|
|
|
121 |
def _update_slider_ui_elements(current_range_indices):
|
122 |
+
if not isinstance(current_range_indices, list) or len(current_range_indices) != 2: return gr.update(), gr.update()
|
|
|
|
|
|
|
|
|
123 |
min_idx, max_idx = int(current_range_indices[0]), int(current_range_indices[1])
|
124 |
min_label, max_label = PARAM_CHOICES[min_idx], PARAM_CHOICES[max_idx]
|
|
|
|
|
125 |
label_md = f"<div style='font-weight: 500;'>Parameters <span style='float: right; font-weight: normal; color: #555;'>{min_label} to {max_label}</span></div>"
|
|
|
126 |
is_default = (min_idx == 0 and max_idx == len(PARAM_CHOICES) - 1)
|
127 |
+
return label_md, gr.update(visible=not is_default)
|
|
|
|
|
128 |
|
129 |
def _reset_param_slider_and_ui():
|
|
|
130 |
default_label = "<div style='font-weight: 500;'>Parameters</div>"
|
131 |
return gr.update(value=PARAM_CHOICES_DEFAULT_INDICES), default_label, gr.update(visible=False)
|
132 |
|
133 |
+
param_slider.release(fn=_update_slider_ui_elements, inputs=param_slider, outputs=[param_label_display, reset_params_button])
|
|
|
134 |
reset_params_button.click(fn=_reset_param_slider_and_ui, outputs=[param_slider, param_label_display, reset_params_button])
|
135 |
|
136 |
def _update_button_interactivity(is_loaded_flag): return gr.update(interactive=is_loaded_flag)
|
|
|
139 |
def _toggle_filters_visibility(choice): return gr.update(visible=choice == "Tag Filter"), gr.update(visible=choice == "Pipeline Filter")
|
140 |
filter_choice_radio.change(fn=_toggle_filters_visibility, inputs=filter_choice_radio, outputs=[tag_filter_dropdown, pipeline_filter_dropdown])
|
141 |
|
|
|
142 |
def ui_load_data_controller(progress=gr.Progress()):
|
143 |
+
progress(0, desc=f"Loading dataset '{HF_DATASET_ID}'...")
|
|
|
144 |
try:
|
145 |
current_df, load_success_flag, status_msg_from_load = load_models_data()
|
146 |
if load_success_flag:
|
147 |
+
progress(0.9, desc="Processing data...")
|
148 |
date_display = "Pre-processed (date unavailable)"
|
149 |
+
if 'data_download_timestamp' in current_df.columns and pd.notna(current_df['data_download_timestamp'].iloc[0]):
|
150 |
+
ts = pd.to_datetime(current_df['data_download_timestamp'].iloc[0], utc=True)
|
|
|
|
|
|
|
|
|
|
|
151 |
date_display = ts.strftime('%B %d, %Y, %H:%M:%S %Z')
|
|
|
152 |
param_count = (current_df['params'] > 0).sum() if 'params' in current_df.columns else 0
|
153 |
data_info_text = f"### Data Information\n- Source: `{HF_DATASET_ID}`\n- Status: {status_msg_from_load}\n- Total models loaded: {len(current_df):,}\n- Models with parameter counts: {param_count:,}\n- Data as of: {date_display}\n"
|
154 |
+
status_msg_ui = "Data loaded. Ready to generate plot."
|
155 |
else:
|
156 |
data_info_text = f"### Data Load Failed\n- {status_msg_from_load}"
|
157 |
status_msg_ui = status_msg_from_load
|
158 |
except Exception as e:
|
159 |
+
status_msg_ui = f"An unexpected error occurred during data loading: {str(e)}"
|
160 |
data_info_text = f"### Critical Error\n- {status_msg_ui}"
|
161 |
+
load_success_flag = False
|
162 |
print(f"Critical error in ui_load_data_controller: {e}")
|
163 |
return current_df, load_success_flag, data_info_text, status_msg_ui
|
164 |
|
165 |
def ui_generate_plot_controller(metric_choice, filter_type, tag_choice, pipeline_choice,
|
166 |
param_range_indices, k_orgs, skip_orgs_input, df_current_models, progress=gr.Progress()):
|
167 |
if df_current_models is None or df_current_models.empty:
|
168 |
+
return create_treemap(pd.DataFrame(), metric_choice, "Error: Model Data Not Loaded"), "Model data is not loaded."
|
169 |
+
progress(0.1, desc="Preparing data...")
|
170 |
tag_to_use = tag_choice if filter_type == "Tag Filter" else None
|
171 |
pipeline_to_use = pipeline_choice if filter_type == "Pipeline Filter" else None
|
172 |
+
orgs_to_skip = [org.strip() for org in skip_orgs_input.split(',') if org.strip()]
|
173 |
|
174 |
min_label = PARAM_CHOICES[int(param_range_indices[0])]
|
175 |
max_label = PARAM_CHOICES[int(param_range_indices[1])]
|
|
|
177 |
|
178 |
treemap_df = make_treemap_data(df_current_models, metric_choice, k_orgs, tag_to_use, pipeline_to_use, param_labels_for_filtering, orgs_to_skip)
|
179 |
|
180 |
+
progress(0.7, desc="Generating plot...")
|
181 |
title_labels = {"downloads": "Downloads (last 30 days)", "downloadsAllTime": "Downloads (All Time)", "likes": "Likes"}
|
182 |
chart_title = f"HuggingFace Models - {title_labels.get(metric_choice, metric_choice)} by Organization"
|
183 |
plotly_fig = create_treemap(treemap_df, metric_choice, chart_title)
|
184 |
|
185 |
if treemap_df.empty:
|
186 |
+
plot_stats_md = "No data matches the selected filters. Please try different options."
|
187 |
else:
|
188 |
total_items_in_plot = len(treemap_df['id'].unique())
|
189 |
+
total_value_in_plot = treemap_df[count_by].sum()
|
190 |
plot_stats_md = f"## Plot Statistics\n- **Models shown**: {total_items_in_plot:,}\n- **Total {metric_choice}**: {int(total_value_in_plot):,}"
|
191 |
return plotly_fig, plot_stats_md
|
192 |
|
193 |
demo.load(fn=ui_load_data_controller, inputs=[], outputs=[models_data_state, loading_complete_state, data_info_md, status_message_md])
|
194 |
|
195 |
+
# --- MODIFIED: The inputs list now uses top_k_dropdown ---
|
196 |
generate_plot_button.click(
|
197 |
fn=ui_generate_plot_controller,
|
198 |
inputs=[count_by_dropdown, filter_choice_radio, tag_filter_dropdown, pipeline_filter_dropdown,
|
199 |
+
param_slider, top_k_dropdown, skip_orgs_textbox, models_data_state],
|
200 |
outputs=[plot_output, status_message_md]
|
201 |
)
|
202 |
|
|
|
204 |
print(f"Application starting. Data will be loaded from Hugging Face dataset: {HF_DATASET_ID}")
|
205 |
demo.queue().launch()
|
206 |
|
207 |
+
# --- END OF FINAL POLISHED FILE app.py ---
|