File size: 6,856 Bytes
bbc4e83 0af4146 bbc4e83 0af4146 bbc4e83 0af4146 bbc4e83 0af4146 bbc4e83 0af4146 bbc4e83 0af4146 bbc4e83 0af4146 bbc4e83 0af4146 bbc4e83 0af4146 bbc4e83 0af4146 bbc4e83 0af4146 bbc4e83 0af4146 bbc4e83 0af4146 264ebcb 0af4146 264ebcb 0af4146 264ebcb 0af4146 bbc4e83 0af4146 bbc4e83 0af4146 bbc4e83 0af4146 bbc4e83 0af4146 bbc4e83 0af4146 bbc4e83 0af4146 bbc4e83 0af4146 bbc4e83 0af4146 bbc4e83 0af4146 bbc4e83 0af4146 bbc4e83 0af4146 bbc4e83 0af4146 bbc4e83 0af4146 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 |
"""
Script to generate a Markdown comparison table for new model results in a pull request.
Usage:
gh pr checkout {pr-number}
python scripts/create_pr_results_comment.py [--models MODEL1 MODEL2 ...] [--output OUTPUT_FILE]
Description:
- Compares new model results (added in the current PR) against reference models.
- Outputs a Markdown file with results for each new model and highlights the best scores.
- By default, compares against: intfloat/multilingual-e5-large and google/gemini-embedding-001.
- You can specify reference models with the --models argument.
Arguments:
--reference-models: List of reference models to compare against (default: intfloat/multilingual-e5-large google/gemini-embedding-001)
--output: Output markdown file path (default: model-comparison.md)
Example:
python scripts/create_pr_results_comment.py --models intfloat/multilingual-e5-large myorg/my-new-model
"""
from __future__ import annotations
import argparse
import json
import os
import subprocess
import logging
from collections import defaultdict
from pathlib import Path
import mteb
import pandas as pd
TaskName, ModelName = str, str
# Default reference models to compare against
REFERENCE_MODELS: list[str] = [
"intfloat/multilingual-e5-large",
"google/gemini-embedding-001",
]
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)
repo_path = Path(__file__).parents[1]
os.environ["MTEB_CACHE"] = str(repo_path.parent)
def get_diff_from_main() -> list[str]:
differences = subprocess.run(
["git", "diff", "--name-only", "origin/main...HEAD"],
cwd=repo_path,
text=True,
capture_output=True,
).stdout.splitlines()
return differences
def extract_new_models_and_tasks(
differences: list[str],
) -> dict[ModelName, list[TaskName]]:
diffs = [repo_path / diff for diff in differences]
result_diffs = filter(
lambda p: p.exists() and p.suffix == ".json" and p.name != "model_meta.json",
diffs,
)
models = defaultdict(list)
for diff in result_diffs:
model_meta = diff.parent / "model_meta.json"
task_name = diff.stem
with model_meta.open("r") as f:
model_name = json.load(f)["name"]
models[model_name].append(task_name)
return models
def create_comparison_table(
model: str, tasks: list[str], reference_models: list[str]
) -> pd.DataFrame:
models = [model] + reference_models
max_col_name = "Max result"
task_col_name = "task_name"
results = mteb.load_results(models=models, tasks=tasks, download_latest=False)
results = results.join_revisions()
df = results.to_dataframe()
if df.empty:
raise ValueError(f"No results found for models {models} on tasks {tasks}")
df[max_col_name] = None
task_results = mteb.load_results(tasks=tasks, download_latest=False)
task_results = task_results.join_revisions()
task_results_df = task_results.to_dataframe(format="long")
# some scores are in percentage, convert them to decimal
task_results_df.loc[task_results_df["score"] > 1, "score"] /= 100
max_dataframe = task_results_df.groupby(task_col_name).max()
if not max_dataframe.empty:
for task_name, row in max_dataframe.iterrows():
df.loc[df[task_col_name] == task_name, max_col_name] = row["score"]
averages: dict[str, float | None] = {}
for col in models + [max_col_name]:
if col not in df.columns:
continue
numeric = pd.to_numeric(df[col], errors="coerce")
avg = numeric.mean()
averages[col] = avg if not pd.isna(avg) else None
avg_row = pd.DataFrame(
{
task_col_name: ["**Average**"],
**{col: [val] for col, val in averages.items()},
}
)
return pd.concat([df, avg_row], ignore_index=True)
def highlight_max_bold(
df: pd.DataFrame, exclude_cols: list[str] = ["task_name"]
) -> pd.DataFrame:
result_df = df.copy()
for col in result_df.columns:
if col not in exclude_cols:
result_df[col] = result_df[col].apply(
lambda x: f"{x:.2f}"
if isinstance(x, (int, float)) and pd.notna(x)
else x
)
tmp = df.drop(columns=exclude_cols)
for idx in df.index:
row = pd.to_numeric(tmp.loc[idx], errors="coerce")
if row.isna().all():
continue
max_col = row.idxmax()
if pd.notna(row[max_col]):
result_df.at[idx, max_col] = f"**{result_df.at[idx, max_col]}**"
return result_df
def generate_markdown_content(
model_tasks: dict[str, list[str]], reference_models: list[str]
) -> str:
if not model_tasks:
return "# Model Results Comparison\n\nNo new model results found in this PR."
all_tasks = sorted({t for tasks in model_tasks.values() for t in tasks})
new_models = list(model_tasks.keys())
parts: list[str] = [
"# Model Results Comparison",
"",
f"**Reference models:** {', '.join(f'`{m}`' for m in reference_models)}",
f"**New models evaluated:** {', '.join(f'`{m}`' for m in new_models)}",
f"**Tasks:** {', '.join(f'`{t}`' for t in all_tasks)}",
"",
]
for model_name, tasks in model_tasks.items():
parts.append(f"## Results for `{model_name}`")
df = create_comparison_table(model_name, tasks, reference_models)
bold_df = highlight_max_bold(df)
parts.append(bold_df.to_markdown(index=False))
parts.extend(["", "---", ""])
return "\n".join(parts)
def create_argparse() -> argparse.ArgumentParser:
"""Create the argument parser for the script."""
parser = argparse.ArgumentParser(
description="Create PR comment with results comparison."
)
parser.add_argument(
"--reference-models",
nargs="+",
default=REFERENCE_MODELS,
help="List of reference models to compare against (default: %(default)s)",
)
parser.add_argument(
"--output",
type=Path,
default=Path("model-comparison.md"),
help="Output markdown file path",
)
return parser
def main(reference_models: list[str], output_path: Path) -> None:
logger.info("Starting to create PR results comment...")
logger.info(f"Using reference models: {', '.join(reference_models)}")
diff = get_diff_from_main()
model_tasks = extract_new_models_and_tasks(diff)
markdown = generate_markdown_content(model_tasks, reference_models)
output_path.parent.mkdir(parents=True, exist_ok=True)
output_path.write_text(markdown)
if __name__ == "__main__":
parser = create_argparse()
args = parser.parse_args()
main(args.reference_models, args.output)
|