eduagarcia commited on
Commit
405857a
Β·
1 Parent(s): e6779d8

add top model bar graph

Browse files
app.py CHANGED
@@ -54,7 +54,8 @@ from src.tools.plots import (
54
  create_metric_plot_obj,
55
  create_plot_df,
56
  create_scores_df,
57
- create_lat_score_mem_plot_obj
 
58
  )
59
 
60
  # Start ephemeral Spaces on PRs (see config in README.md)
@@ -380,6 +381,25 @@ with demo:
380
  )
381
 
382
  with gr.TabItem("πŸ“ˆ Metrics", elem_id="llm-benchmark-tab-table", id=4):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
383
  with gr.Row():
384
  with gr.Column():
385
  chart = create_metric_plot_obj(
 
54
  create_metric_plot_obj,
55
  create_plot_df,
56
  create_scores_df,
57
+ create_lat_score_mem_plot_obj,
58
+ create_top_n_models_comparison_plot
59
  )
60
 
61
  # Start ephemeral Spaces on PRs (see config in README.md)
 
381
  )
382
 
383
  with gr.TabItem("πŸ“ˆ Metrics", elem_id="llm-benchmark-tab-table", id=4):
384
+ with gr.Row():
385
+ with gr.Column():
386
+ size_filter = gr.Dropdown(
387
+ choices=["All Sizes"] + list(NUMERIC_INTERVALS.keys()),
388
+ label="Filter by Model Size",
389
+ value="All Sizes",
390
+ interactive=True
391
+ )
392
+ fig = create_top_n_models_comparison_plot(leaderboard_df, top_n=5)
393
+ top_n_plot = gr.components.Plot(value=fig, show_label=False)
394
+
395
+ def update_top_n_plot(size_option):
396
+ return create_top_n_models_comparison_plot(leaderboard_df, top_n=5, size_filter=size_option)
397
+
398
+ size_filter.change(
399
+ fn=update_top_n_plot,
400
+ inputs=[size_filter],
401
+ outputs=[top_n_plot]
402
+ )
403
  with gr.Row():
404
  with gr.Column():
405
  chart = create_metric_plot_obj(
src/display/formatting.py CHANGED
@@ -48,6 +48,21 @@ def make_clickable_model(model_name, json_path=None, revision=None, precision=No
48
 
49
  return model_hyperlink(link, model_name) + " " + model_hyperlink(details_link, "πŸ“‘") + " " + posfix
50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
 
52
  def styled_error(error):
53
  return f"<p style='color: red; font-size: 20px; text-align: center;'>{error}</p>"
 
48
 
49
  return model_hyperlink(link, model_name) + " " + model_hyperlink(details_link, "πŸ“‘") + " " + posfix
50
 
51
+ def make_dummy_name(model_name, revision=None, precision=None, num_evals_same_model=1):
52
+ posfix = ""
53
+ if revision is not None and revision != "" and revision != "main":
54
+ if len(revision) > 12:
55
+ revision = revision[:7]
56
+ posfix += f" (rev: {revision})"
57
+ if precision is not None:
58
+ if num_evals_same_model == 1 and precision in ['float16', 'bfloat16']:
59
+ pass
60
+ else:
61
+ #if precision not in model_name:
62
+ posfix += f" [{precision}]"
63
+ posfix = posfix.strip()
64
+ return f"{model_name} {posfix}"
65
+
66
 
67
  def styled_error(error):
68
  return f"<p style='color: red; font-size: 20px; text-align: center;'>{error}</p>"
src/display/utils.py CHANGED
@@ -256,13 +256,15 @@ class Language(Enum):
256
 
257
  #External models
258
  external_rows = []
 
259
  if os.path.exists('external_models_results.json'):
260
  with open('external_models_results.json', 'r', encoding='utf8') as f:
261
  all_models = json.load(f)
262
  for model_data in all_models:
 
263
  model_row = deepcopy(baseline_row)
264
  model_row[AutoEvalColumn.model.name] = f'<a target="_blank" href="{model_data["link"]}" style="color: var(--text-color); text-decoration: underline;text-decoration-style: dotted;">{model_data["name"]} [{model_data["date"]}]</a>'
265
- model_row[AutoEvalColumn.dummy.name] = model_data['model']
266
  for task in Tasks:
267
  model_row[task.value.col_name] = round(model_data['result_metrics'][task.value.benchmark]*100, 2)
268
  model_row[AutoEvalColumn.average.name] = round(model_data['result_metrics_average']*100, 2)
@@ -277,8 +279,81 @@ if os.path.exists('external_models_results.json'):
277
  model_row[AutoEvalColumn.params.name] = model_data['params']
278
 
279
  model_row[AutoEvalColumn.main_language.name] = model_data['main_language']
 
280
  external_rows.append(model_row)
281
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
282
 
283
  # Column selection
284
  COLS = [c.name for c in fields(AutoEvalColumn)]
 
256
 
257
  #External models
258
  external_rows = []
259
+ external_eval_results = [] # Initialize the list to store EvalResult objects
260
  if os.path.exists('external_models_results.json'):
261
  with open('external_models_results.json', 'r', encoding='utf8') as f:
262
  all_models = json.load(f)
263
  for model_data in all_models:
264
+ #Create external_rows
265
  model_row = deepcopy(baseline_row)
266
  model_row[AutoEvalColumn.model.name] = f'<a target="_blank" href="{model_data["link"]}" style="color: var(--text-color); text-decoration: underline;text-decoration-style: dotted;">{model_data["name"]} [{model_data["date"]}]</a>'
267
+ model_row[AutoEvalColumn.dummy.name] = model_data['name']
268
  for task in Tasks:
269
  model_row[task.value.col_name] = round(model_data['result_metrics'][task.value.benchmark]*100, 2)
270
  model_row[AutoEvalColumn.average.name] = round(model_data['result_metrics_average']*100, 2)
 
279
  model_row[AutoEvalColumn.params.name] = model_data['params']
280
 
281
  model_row[AutoEvalColumn.main_language.name] = model_data['main_language']
282
+ #convert 2025-04-03 to 2025-04-03T00:00:00Z
283
  external_rows.append(model_row)
284
 
285
+ #Create external_eval_results
286
+ eval_result = dict(
287
+ eval_name=f"external_{model_data['model']}",
288
+ full_model=model_data['name'],
289
+ org="External", # External models don't have an org in this context
290
+ model=model_data['name'],
291
+ # Scale results by 100 to match expected format
292
+ results={k: v * 100 for k, v in model_data['result_metrics'].items()},
293
+ model_sha="", # Not available
294
+ revision="main", # Default
295
+ precision=Precision.Unknown, # Not available
296
+ model_type=model_type, # Already determined above
297
+ weight_type=WeightType.Original, # Assuming original weights
298
+ main_language=model_data['main_language'],
299
+ architecture="Unknown", # Not available
300
+ license="Proprietary" if model_type == ModelType.proprietary else "?",
301
+ likes=0, # Not available
302
+ num_params=model_data.get('params', 0), # Use .get() for safety
303
+ date=model_data['date']+"T00:00:00Z",
304
+ still_on_hub=True, # Not applicable
305
+ is_merge=False, # Not applicable
306
+ flagged=False, # Not applicable
307
+ status="FINISHED",
308
+ tags=None, # Not available
309
+ json_filename='external_models_results.json', # Not applicable
310
+ eval_time=0.0, # Not available
311
+ # Scale average by 100
312
+ original_benchmark_average=None,#model_data.get('result_metrics_average', 0.0) * 100,
313
+ hidden=False, # Default
314
+ num_evals_model_rev=1 # Default
315
+ )
316
+ """
317
+ EvalResult(eval_name='01-ai_Yi-1.5-34B_bfloat16',
318
+ ' full_model='01-ai/Yi-1.5-34B',
319
+ ' org='01-ai',
320
+ ' model='Yi-1.5-34B',
321
+ ' results={'enem_challenge': 71.51854443666899,
322
+ ' 'bluex': 66.62030598052851,
323
+ ' 'oab_exams': 54.89749430523918,
324
+ ' 'assin2_rte': 89.76911637262349,
325
+ ' 'assin2_sts': 81.48786802023537,
326
+ ' 'faquad_nli': 58.5644163957417,
327
+ ' 'hatebr_offensive': 83.63023241432246,
328
+ ' 'portuguese_hate_speech': 69.62399848962205,
329
+ ' 'tweetsentbr': 72.28749707523902},
330
+ ' model_sha='81136a42efdf6f6a63031ac31639a37813fe6e37',
331
+ ' revision='main',
332
+ ' precision=<Precision.bfloat16: ModelDetails(name='bfloat16',
333
+ ' symbol='')>,
334
+ ' model_type=<ModelType.PT: ModelDetails(name='pretrained',
335
+ ' symbol='🟒')>,
336
+ ' weight_type=<WeightType.Original: ModelDetails(name='Original',
337
+ ' symbol='')>,
338
+ ' main_language='English',
339
+ ' architecture='LlamaForCausalLM',
340
+ ' license='?',
341
+ ' likes=0,
342
+ ' num_params=34.39,
343
+ ' date='2024-05-15T17:40:15Z',
344
+ ' still_on_hub=True,
345
+ ' is_merge=False,
346
+ ' flagged=False,
347
+ ' status='FINISHED',
348
+ ' tags=None,
349
+ ' json_filename='results_2024-05-17T10-36-18.336343.json',
350
+ ' eval_time=11545.340715408325,
351
+ ' original_benchmark_average=None,
352
+ ' hidden=False,
353
+ ' num_evals_model_rev=1)
354
+ """
355
+ external_eval_results.append(eval_result)
356
+
357
 
358
  # Column selection
359
  COLS = [c.name for c in fields(AutoEvalColumn)]
src/leaderboard/read_evals.py CHANGED
@@ -11,7 +11,7 @@ import numpy as np
11
 
12
  from huggingface_hub import ModelCard
13
 
14
- from src.display.formatting import make_clickable_model
15
  from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, Language, WeightType, ORIGINAL_TASKS
16
  from src.envs import GET_ORIGINAL_HF_LEADERBOARD_EVAL_RESULTS, SHOW_INCOMPLETE_EVALS
17
 
@@ -191,7 +191,7 @@ class EvalResult:
191
  AutoEvalColumn.weight_type.name: self.weight_type.value.name,
192
  AutoEvalColumn.architecture.name: self.architecture,
193
  AutoEvalColumn.model.name: make_clickable_model(self.full_model, self.json_filename, revision=self.revision, precision=self.precision.value.name, num_evals_same_model=self.num_evals_model_rev),
194
- AutoEvalColumn.dummy.name: self.full_model,
195
  AutoEvalColumn.revision.name: self.revision,
196
  AutoEvalColumn.average.name: average,
197
  AutoEvalColumn.license.name: self.license,
 
11
 
12
  from huggingface_hub import ModelCard
13
 
14
+ from src.display.formatting import make_clickable_model, make_dummy_name
15
  from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, Language, WeightType, ORIGINAL_TASKS
16
  from src.envs import GET_ORIGINAL_HF_LEADERBOARD_EVAL_RESULTS, SHOW_INCOMPLETE_EVALS
17
 
 
191
  AutoEvalColumn.weight_type.name: self.weight_type.value.name,
192
  AutoEvalColumn.architecture.name: self.architecture,
193
  AutoEvalColumn.model.name: make_clickable_model(self.full_model, self.json_filename, revision=self.revision, precision=self.precision.value.name, num_evals_same_model=self.num_evals_model_rev),
194
+ AutoEvalColumn.dummy.name: make_dummy_name(self.full_model, revision=self.revision, precision=self.precision.value.name, num_evals_same_model=self.num_evals_model_rev),
195
  AutoEvalColumn.revision.name: self.revision,
196
  AutoEvalColumn.average.name: average,
197
  AutoEvalColumn.license.name: self.license,
src/tools/plots.py CHANGED
@@ -4,8 +4,9 @@ import plotly.express as px
4
  from plotly.graph_objs import Figure
5
 
6
  from src.leaderboard.filter_models import FLAGGED_MODELS
7
- from src.display.utils import human_baseline_row as HUMAN_BASELINE, AutoEvalColumn, Tasks, Task, BENCHMARK_COLS
8
  from src.leaderboard.read_evals import EvalResult
 
9
 
10
 
11
 
@@ -19,9 +20,16 @@ def create_scores_df(raw_data: list[EvalResult]) -> pd.DataFrame:
19
  # Step 1: Ensure 'date' is in datetime format and sort the DataFrame by it
20
 
21
  #create dataframe with EvalResult dataclass columns, even if raw_data is empty
 
 
 
22
  results_df = pd.DataFrame(raw_data, columns=EvalResult.__dataclass_fields__.keys())
23
 
24
  #results_df["date"] = pd.to_datetime(results_df["date"], format="mixed", utc=True)
 
 
 
 
25
  results_df.sort_values(by="date", inplace=True)
26
 
27
  # Step 2: Initialize the scores dictionary
@@ -129,7 +137,7 @@ def create_metric_plot_obj(
129
  )
130
 
131
  # Update the range of the y-axis
132
- fig.update_layout(yaxis_range=[0, 100])
133
 
134
  # Create a dictionary to hold the color mapping for each metric
135
  metric_color_mapping = {}
@@ -212,6 +220,222 @@ def create_lat_score_mem_plot_obj(leaderboard_df):
212
 
213
  return fig
214
 
215
- # Example Usage:
216
- # human_baselines dictionary is defined.
217
- # chart = create_metric_plot_obj(scores_df, ["ARC", "HellaSwag", "MMLU", "TruthfulQA"], human_baselines, "Graph Title")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  from plotly.graph_objs import Figure
5
 
6
  from src.leaderboard.filter_models import FLAGGED_MODELS
7
+ from src.display.utils import human_baseline_row as HUMAN_BASELINE, AutoEvalColumn, Tasks, Task, BENCHMARK_COLS, external_eval_results, NUMERIC_INTERVALS
8
  from src.leaderboard.read_evals import EvalResult
9
+ import copy
10
 
11
 
12
 
 
20
  # Step 1: Ensure 'date' is in datetime format and sort the DataFrame by it
21
 
22
  #create dataframe with EvalResult dataclass columns, even if raw_data is empty
23
+ raw_data = copy.deepcopy(raw_data)
24
+ for external_row in external_eval_results:
25
+ raw_data.append(EvalResult(**external_row))
26
  results_df = pd.DataFrame(raw_data, columns=EvalResult.__dataclass_fields__.keys())
27
 
28
  #results_df["date"] = pd.to_datetime(results_df["date"], format="mixed", utc=True)
29
+ #convert date to datetime
30
+ results_df["date"] = pd.to_datetime(results_df["date"], format="mixed", utc=True)
31
+ #convert to simple date string 2025-04-26
32
+ results_df["date"] = results_df["date"].dt.strftime("%Y-%m-%d")
33
  results_df.sort_values(by="date", inplace=True)
34
 
35
  # Step 2: Initialize the scores dictionary
 
137
  )
138
 
139
  # Update the range of the y-axis
140
+ #fig.update_layout(yaxis_range=[0, 100])
141
 
142
  # Create a dictionary to hold the color mapping for each metric
143
  metric_color_mapping = {}
 
220
 
221
  return fig
222
 
223
+ def create_top_n_models_comparison_plot(leaderboard_df: pd.DataFrame, top_n: int = 5, size_filter: str = None) -> Figure:
224
+ """
225
+ Creates a grouped bar chart comparing the performance of the top N models across all metrics.
226
+
227
+ :param leaderboard_df: DataFrame containing the leaderboard data.
228
+ :param top_n: The number of top models to include in the comparison (default is 5).
229
+ :param size_filter: If provided, only include models of this specific size category.
230
+ :return: A Plotly figure object representing the comparison plot.
231
+ """
232
+ # Ensure BENCHMARK_COLS contains the correct metric column names
233
+ metric_cols = BENCHMARK_COLS
234
+
235
+ # Filter out non-model rows (like baseline or human) and select relevant columns
236
+ models_df = leaderboard_df[~leaderboard_df[AutoEvalColumn.dummy.name].isin(["baseline", "human_baseline"])].copy()
237
+
238
+ # Add size group information to the DataFrame
239
+ models_df['size_group'] = models_df[AutoEvalColumn.params.name].apply(
240
+ lambda x: next((k for k, v in NUMERIC_INTERVALS.items() if x in v), '?')
241
+ )
242
+
243
+ # Filter by size category if specified
244
+ if size_filter and size_filter != 'All Sizes':
245
+ models_df = models_df[models_df['size_group'] == size_filter]
246
+ if models_df.empty:
247
+ # If no models match the size filter, return an empty figure with a message
248
+ fig = px.bar(
249
+ x=["No Data"],
250
+ y=[0],
251
+ title=f"No models found in the {size_filter} size category"
252
+ )
253
+ fig.update_layout(
254
+ xaxis_title="",
255
+ yaxis_title="",
256
+ showlegend=False
257
+ )
258
+ return fig
259
+
260
+ # Sort models by average score and select the top N
261
+ top_models_df = models_df.nlargest(top_n, AutoEvalColumn.average.name)
262
+
263
+ # Select only the necessary columns: model name and metric scores
264
+ plot_data = top_models_df[[AutoEvalColumn.dummy.name] + metric_cols]
265
+
266
+ # Melt the DataFrame to long format suitable for plotting
267
+ # 'id_vars' specifies the column(s) to keep as identifiers
268
+ # 'value_vars' specifies the columns to unpivot
269
+ # 'var_name' is the name for the new column containing the original column names (metrics)
270
+ # 'value_name' is the name for the new column containing the values (scores)
271
+ melted_df = pd.melt(
272
+ plot_data,
273
+ id_vars=[AutoEvalColumn.dummy.name],
274
+ value_vars=metric_cols,
275
+ var_name="Metric",
276
+ value_name="Score",
277
+ )
278
+
279
+ # Validate and cap scores to ensure they're within a reasonable range (0-100)
280
+ melted_df['Score'] = melted_df['Score'].apply(lambda x: min(max(x, 0), 100))
281
+
282
+ # Create the grouped bar chart
283
+ fig = px.bar(
284
+ melted_df,
285
+ x="Metric",
286
+ y="Score",
287
+ color=AutoEvalColumn.dummy.name, # Group bars by model name
288
+ barmode="group", # Display bars side-by-side for each metric
289
+ title=f"Top {top_n} Models Comparison Across Metrics",
290
+ labels={AutoEvalColumn.dummy.name: "Model"}, # Rename legend title
291
+ custom_data=[AutoEvalColumn.dummy.name, "Metric", "Score"], # Data for hover
292
+ range_y=[0, 100], # Force y-axis range to be 0-100
293
+ )
294
+
295
+ # Update hovertemplate
296
+ fig.update_traces(
297
+ hovertemplate="<br>".join(
298
+ [
299
+ "Model: %{customdata[0]}",
300
+ "Metric: %{customdata[1]}",
301
+ "Score: %{customdata[2]:.2f}", # Format score to 2 decimal places
302
+ "<extra></extra>", # Remove the default trace info
303
+ ]
304
+ )
305
+ )
306
+
307
+ # Create title with size filter information if applicable
308
+ title_text = f"Top {top_n} Models Comparison Across Metrics"
309
+ if size_filter and size_filter != 'All Sizes':
310
+ title_text += f" ({size_filter} Models)"
311
+
312
+ # Calculate appropriate y-axis range based on the data
313
+ min_score = melted_df['Score'].min()
314
+ max_score = melted_df['Score'].max()
315
+
316
+ # Set y-axis minimum (start at 0 unless all scores are high)
317
+ y_min = 40 if min_score > 50 else 0
318
+
319
+ # Set y-axis maximum (ensure there's room for annotations)
320
+ y_max = 100 if max_score < 95 else 105
321
+
322
+ # Optional: Adjust layout for better readability
323
+ fig.update_layout(
324
+ title={
325
+ "text": title_text,
326
+ "y": 0.95,
327
+ "x": 0.5,
328
+ "xanchor": "center",
329
+ "yanchor": "top",
330
+ },
331
+ xaxis_title="Metric",
332
+ yaxis_title="Score (%)",
333
+ legend_title="Model",
334
+ yaxis=dict(
335
+ range=[y_min, y_max], # Set y-axis range dynamically
336
+ constrain="domain", # Constrain the axis to the domain
337
+ constraintoward="top" # Constrain toward the top
338
+ ),
339
+ width=1600,
340
+ height=450,
341
+ )
342
+
343
+ # Define shape icons for each model
344
+ shape_icons = {
345
+ 0: "triangle-up", # First model gets triangle
346
+ 1: "square", # Second model gets square
347
+ 2: "circle", # Third model gets circle
348
+ 3: "diamond", # Fourth model gets diamond
349
+ 4: "star", # Fifth model gets star
350
+ 5: "pentagon", # Sixth model gets pentagon
351
+ 6: "hexagon", # Seventh model gets hexagon
352
+ 7: "cross", # Eighth model gets cross
353
+ 8: "x", # Ninth model gets x
354
+ 9: "hourglass", # Tenth model gets hourglass
355
+ }
356
+
357
+ # Get the average score for each model
358
+ model_averages = {}
359
+ for model in top_models_df[AutoEvalColumn.dummy.name].unique():
360
+ try:
361
+ model_averages[model] = top_models_df.loc[top_models_df[AutoEvalColumn.dummy.name] == model, AutoEvalColumn.average.name].values[0]
362
+ except (IndexError, KeyError):
363
+ # If average score is not available, use None
364
+ model_averages[model] = None
365
+
366
+ # Add shapes to the legend and annotations with icons for each bar
367
+ for i, bar in enumerate(fig.data):
368
+ model_name = bar.name
369
+ model_index = list(top_models_df[AutoEvalColumn.dummy.name].unique()).index(model_name) % len(shape_icons)
370
+ icon_shape = shape_icons[model_index]
371
+
372
+ # Update the name in the legend to include the shape symbol
373
+ shape_symbol = get_symbol_for_shape(icon_shape)
374
+ fig.data[i].name = f"{shape_symbol} {model_name}"
375
+
376
+ # For each bar in this trace
377
+ for j, (x, y) in enumerate(zip(bar.x, bar.y)):
378
+ # Use the actual bar score instead of the average
379
+ score_text = f"<b>{y:.1f}</b>"
380
+
381
+ # Calculate the exact position for the annotation
382
+ # Plotly's grouped bar charts position bars at specific offsets
383
+ # We need to match these offsets exactly
384
+ num_models = len(top_models_df[AutoEvalColumn.dummy.name].unique())
385
+
386
+ # The total width allocated for all bars in a group
387
+ total_group_width = 0.8
388
+
389
+ # Width of each individual bar
390
+ bar_width = total_group_width / num_models
391
+
392
+ # Calculate the offset for this specific bar within its group
393
+ # i represents which model in the group (0 is the first model, etc.)
394
+ # Center of the group is at x, so we need to adjust from there
395
+ offset = (i - (num_models-1)/2) * bar_width
396
+
397
+ # Add score text directly above its bar
398
+ fig.add_annotation(
399
+ x=x,
400
+ y=y + 2, # Position slightly above the bar
401
+ text=score_text, # Display the actual bar score
402
+ showarrow=False,
403
+ font=dict(
404
+ size=10,
405
+ color=bar.marker.color # Match the bar color
406
+ ),
407
+ opacity=0.9,
408
+ xshift=offset * 130 # Adjust the multiplier to better center the annotation
409
+ )
410
+
411
+ # Add the shape icon above the score
412
+ fig.add_annotation(
413
+ x=x,
414
+ y=y - 3, # Position above the score text
415
+ text=get_symbol_for_shape(icon_shape), # Convert shape name to symbol
416
+ showarrow=False,
417
+ font=dict(
418
+ size=14,
419
+ color="black" # Match the bar color
420
+ ),
421
+ opacity=0.9,
422
+ xshift=offset * 130 # Adjust the multiplier to better center the annotation
423
+ )
424
+
425
+ return fig
426
+
427
+ def get_symbol_for_shape(shape_name):
428
+ """Convert shape name to a symbol character that can be used in annotations."""
429
+ symbols = {
430
+ "triangle-up": "β–²",
431
+ "square": "β– ",
432
+ "circle": "●",
433
+ "diamond": "β—†",
434
+ "star": "β˜…",
435
+ "pentagon": "⬟",
436
+ "hexagon": "β¬’",
437
+ "cross": "✚",
438
+ "x": "βœ–",
439
+ "hourglass": "β§—"
440
+ }
441
+ return symbols.get(shape_name, "●") # Default to circle if shape not found