Spaces:
Running
Running
import pandas as pd | |
import matplotlib.pyplot as plt | |
import seaborn as sns | |
import gradio as gr | |
import requests | |
from bs4 import BeautifulSoup | |
import io | |
import os | |
import base64 | |
import zipfile | |
from PIL import Image | |
from io import BytesIO | |
import tempfile | |
import sys | |
import subprocess | |
############################################# | |
# PART 1: YOUR EXISTING PLOTS & FUNCTIONALITY | |
############################################# | |
# For demonstration, assume you have a small data_full for "tiny" benchmarks: | |
data_full = [ | |
# your existing data | |
] | |
columns = ["Model Configuration", "Model Link", "tinyArc", "tinyHellaswag", | |
"tinyMMLU", "tinyTruthfulQA", "tinyTruthfulQA_mc1", "tinyWinogrande"] | |
df_full = pd.DataFrame(data_full, columns=columns) | |
def plot_average_scores(): | |
df_full["Average Score"] = df_full.iloc[:, 2:].mean(axis=1) | |
df_avg_sorted = df_full.sort_values(by="Average Score", ascending=False) | |
plt.figure(figsize=(14, 10)) | |
plt.barh(df_avg_sorted["Model Configuration"], df_avg_sorted["Average Score"]) | |
plt.title("Average Performance of Models Across Tasks", fontsize=16) | |
plt.xlabel("Average Score", fontsize=14) | |
plt.ylabel("Model Configuration", fontsize=14) | |
plt.gca().invert_yaxis() | |
plt.grid(axis='x', linestyle='--', alpha=0.7) | |
plt.tight_layout() | |
img_buffer = io.BytesIO() | |
plt.savefig(img_buffer, format='png') | |
img_buffer.seek(0) | |
img_base64 = base64.b64encode(img_buffer.read()).decode('utf-8') | |
plt.close() | |
pil_image = Image.open(BytesIO(base64.b64decode(img_base64))) | |
temp_image_file = tempfile.NamedTemporaryFile(suffix=".png", delete=False) | |
pil_image.save(temp_image_file.name) | |
return pil_image, temp_image_file.name | |
def plot_task_performance(): | |
df_full_melted = df_full.melt(id_vars=["Model Configuration", "Model Link"], | |
var_name="Task", value_name="Score") | |
plt.figure(figsize=(16, 12)) | |
for model in df_full["Model Configuration"]: | |
model_data = df_full_melted[df_full_melted["Model Configuration"] == model] | |
plt.plot(model_data["Task"], model_data["Score"], marker="o", label=model) | |
plt.title("Performance of All Models Across Tasks", fontsize=16) | |
plt.xlabel("Task", fontsize=14) | |
plt.ylabel("Score", fontsize=14) | |
plt.xticks(rotation=45) | |
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', fontsize=9) | |
plt.grid(axis='y', linestyle='--', alpha=0.7) | |
plt.tight_layout() | |
img_buffer = io.BytesIO() | |
plt.savefig(img_buffer, format='png') | |
img_buffer.seek(0) | |
img_base64 = base64.b64encode(img_buffer.read()).decode('utf-8') | |
plt.close() | |
pil_image = Image.open(BytesIO(base64.b64decode(img_base64))) | |
temp_image_file = tempfile.NamedTemporaryFile(suffix=".png", delete=False) | |
pil_image.save(temp_image_file.name) | |
return pil_image, temp_image_file.name | |
def plot_task_specific_top_models(): | |
top_models = df_full.iloc[:, 2:].idxmax() | |
top_scores = df_full.iloc[:, 2:].max() | |
results = pd.DataFrame({"Top Model": top_models, "Score": top_scores}).reset_index().rename(columns={"index": "Task"}) | |
plt.figure(figsize=(14, 8)) | |
plt.bar(results["Task"], results["Score"]) | |
plt.title("Task-Specific Top Models", fontsize=16) | |
plt.xlabel("Task", fontsize=14) | |
plt.ylabel("Score", fontsize=14) | |
plt.grid(axis="y", linestyle="--", alpha=0.7) | |
plt.tight_layout() | |
img_buffer = io.BytesIO() | |
plt.savefig(img_buffer, format='png') | |
img_buffer.seek(0) | |
img_base64 = base64.b64encode(img_buffer.read()).decode('utf-8') | |
plt.close() | |
pil_image = Image.open(BytesIO(base64.b64decode(img_base64))) | |
temp_image_file = tempfile.NamedTemporaryFile(suffix=".png", delete=False) | |
pil_image.save(temp_image_file.name) | |
return pil_image, temp_image_file.name | |
def plot_heatmap(): | |
plt.figure(figsize=(14, 10)) | |
sns.heatmap(df_full.iloc[:, 2:], annot=True, cmap="YlGnBu", | |
xticklabels=columns[2:], yticklabels=df_full["Model Configuration"]) | |
plt.title("Performance Heatmap", fontsize=16) | |
plt.tight_layout() | |
img_buffer = io.BytesIO() | |
plt.savefig(img_buffer, format='png') | |
img_buffer.seek(0) | |
img_base64 = base64.b64encode(img_buffer.read()).decode('utf-8') | |
plt.close() | |
pil_image = Image.open(BytesIO(base64.b64decode(img_base64))) | |
temp_image_file = tempfile.NamedTemporaryFile(suffix=".png", delete=False) | |
pil_image.save(temp_image_file.name) | |
return pil_image, temp_image_file.name | |
def scrape_mergekit_config(model_name): | |
""" | |
Example from your code that tries to find <pre> blocks on the model page. | |
""" | |
model_link = df_full.loc[df_full["Model Configuration"] == model_name, "Model Link"].values[0] | |
response = requests.get(model_link) | |
if response.status_code != 200: | |
return f"Failed to fetch model page for {model_name}. Please check the link." | |
soup = BeautifulSoup(response.text, "html.parser") | |
yaml_config = soup.find("pre") | |
if yaml_config: | |
return yaml_config.text.strip() | |
return f"No YAML configuration found for {model_name}." | |
def download_yaml(yaml_content, model_name): | |
if "No YAML configuration found" in yaml_content or "Failed to fetch model page" in yaml_content: | |
return None | |
filename = f"{model_name.replace('/', '_')}_config.yaml" | |
return gr.File(value=yaml_content.encode(), filename=filename) | |
def scrape_model_page(model_url): | |
try: | |
response = requests.get(model_url) | |
if response.status_code != 200: | |
return f"Error: Unable to fetch the page (Status Code: {response.status_code})" | |
soup = BeautifulSoup(response.text, "html.parser") | |
yaml_config = soup.find("pre") | |
yaml_text = yaml_config.text.strip() if yaml_config else "No YAML configuration found." | |
metadata_section = soup.find("div", class_="metadata") | |
metadata_text = metadata_section.text.strip() if metadata_section else "No metadata found." | |
return f"**YAML Configuration:**\n{yaml_text}\n\n**Metadata:**\n{metadata_text}" | |
except Exception as e: | |
return f"Error: {str(e)}" | |
def display_scraped_model_data(model_url): | |
return scrape_model_page(model_url) | |
def download_all_data(): | |
import io | |
csv_buffer = io.StringIO() | |
df_full.to_csv(csv_buffer, index=False) | |
csv_data = csv_buffer.getvalue().encode('utf-8') | |
average_plot_pil, average_plot_name = plot_average_scores() | |
task_plot_pil, task_plot_name = plot_task_performance() | |
top_models_plot_pil, top_models_plot_name = plot_task_specific_top_models() | |
heatmap_plot_pil, heatmap_plot_name = plot_heatmap() | |
plot_dict = { | |
"average_performance": (average_plot_pil, average_plot_name), | |
"task_performance": (task_plot_pil, task_plot_name), | |
"top_models": (top_models_plot_pil, top_models_plot_name), | |
"heatmap": (heatmap_plot_pil, heatmap_plot_name) | |
} | |
zip_buffer = io.BytesIO() | |
with zipfile.ZipFile(zip_buffer, 'w') as zf: | |
zf.writestr("model_scores.csv", csv_data) | |
for name, (pil_image, filename) in plot_dict.items(): | |
image_bytes = io.BytesIO() | |
pil_image.save(image_bytes, format='PNG') | |
image_bytes.seek(0) | |
zf.writestr(filename, image_bytes.read()) | |
# Optionally, scrape each model for a YAML config: | |
for model_name in df_full["Model Configuration"].to_list(): | |
yaml_content = scrape_mergekit_config(model_name) | |
if ("No YAML configuration found" not in yaml_content) and ("Failed to fetch model page" not in yaml_content): | |
zf.writestr(f"{model_name.replace('/', '_')}_config.yaml", yaml_content.encode()) | |
zip_buffer.seek(0) | |
return zip_buffer, "analysis_data.zip" | |
############################################# | |
# PART 2: RUNNING `scrape-leaderboard.py` | |
############################################# | |
def run_scrape_leaderboard(): | |
""" | |
Uses Python's `subprocess` to call the external script: `scrape-leaderboard.py` | |
capturing whatever the script prints to stdout. | |
""" | |
try: | |
# Make sure 'scrape-leaderboard.py' is in the same folder or give the full path | |
result = subprocess.run(["python", "scrape-leaderboard.py"], capture_output=True, text=True) | |
# Return the combined stdout/stderr or just stdout | |
return result.stdout if result.stdout else result.stderr | |
except Exception as e: | |
return f"Error running script: {str(e)}" | |
############################### | |
# PART 3: YOUR GRADIO INTERFACE | |
############################### | |
with gr.Blocks() as demo: | |
gr.Markdown("# Comprehensive Model Performance Analysis with Hugging Face Links") | |
with gr.Row(): | |
btn1 = gr.Button("Show Average Performance") | |
img1 = gr.Image(type="pil", label="Average Performance Plot") | |
img1_download = gr.File(label="Download Average Performance") | |
btn1.click(plot_average_scores, outputs=[img1, img1_download]) | |
with gr.Row(): | |
btn2 = gr.Button("Show Task Performance") | |
img2 = gr.Image(type="pil", label="Task Performance Plot") | |
img2_download = gr.File(label="Download Task Performance") | |
btn2.click(plot_task_performance, outputs=[img2, img2_download]) | |
with gr.Row(): | |
btn3 = gr.Button("Task-Specific Top Models") | |
img3 = gr.Image(type="pil", label="Task-Specific Top Models Plot") | |
img3_download = gr.File(label="Download Top Models") | |
btn3.click(plot_task_specific_top_models, outputs=[img3, img3_download]) | |
with gr.Row(): | |
btn4 = gr.Button("Plot Performance Heatmap") | |
heatmap_img = gr.Image(type="pil", label="Performance Heatmap") | |
heatmap_download = gr.File(label="Download Heatmap") | |
btn4.click(plot_heatmap, outputs=[heatmap_img, heatmap_download]) | |
# Drop-down to pick a model, scrape for config | |
with gr.Row(): | |
model_selector = gr.Dropdown(choices=df_full["Model Configuration"].tolist(), label="Select a Model") | |
with gr.Column(): | |
scrape_btn = gr.Button("Scrape MergeKit Configuration") | |
yaml_output = gr.Textbox(lines=10, placeholder="YAML Configuration will appear here.") | |
scrape_btn.click(scrape_mergekit_config, inputs=model_selector, outputs=yaml_output) | |
with gr.Column(): | |
save_yaml_btn = gr.Button("Save MergeKit Configuration") | |
yaml_download = gr.File(label="Download MergeKit Configuration") | |
save_yaml_btn.click(download_yaml, inputs=[yaml_output, model_selector], outputs=yaml_download) | |
# Button to download everything (CSV + plots) | |
with gr.Row(): | |
download_all_btn = gr.Button("Download Everything") | |
all_downloads = gr.File(label="Download All Data") | |
download_all_btn.click(download_all_data, outputs=all_downloads) | |
# Live scraping of any model URL | |
gr.Markdown("## Live Scraping Features") | |
with gr.Row(): | |
url_input = gr.Textbox(label="Enter Hugging Face Model URL", placeholder="https://huggingface.co/<model>") | |
live_scrape_btn = gr.Button("Scrape Model Page") | |
live_scrape_output = gr.Textbox(label="Scraped Data", lines=15) | |
live_scrape_btn.click(display_scraped_model_data, inputs=url_input, outputs=live_scrape_output) | |
# NEW: Button that runs the external script 'scrape-leaderboard.py' | |
gr.Markdown("## Run `scrape-leaderboard.py` Externally") | |
with gr.Row(): | |
run_script_btn = gr.Button("Run 'scrape-leaderboard.py'") | |
run_script_output = gr.Textbox(label="Script Output", lines=25) | |
run_script_btn.click(fn=run_scrape_leaderboard, outputs=run_script_output) | |
# Finally, launch the app | |
demo.launch() | |