Spaces:
Running
Running
import re | |
import gradio as gr | |
import pandas as pd | |
import plotly | |
from pandas.api.types import is_numeric_dtype | |
from pipeline.config import LLMBoardConfig, QueriesConfig | |
README = """ | |
Projects compares different large language models and their providers for real time applications and mass data processing. | |
While other benchmarks compare LLMs on different human intelligence tasks this bencchmark focus on features related to business and engineering aspects such as response times, pricing and data streaming capabilities. | |
To preform evaluation we chose a task of newspaper articles summarization from DATASET_LINK/NAME as it represents a very standard type of task where model has to understand unstructured natural language text, process it and output text in a specified format. | |
For this version we chose English, Polish and Japanese languages, with Japanese representing languages using logographic alphabets. This enable us also validate the effectiveness of the LLM for different language groups. | |
Each of the models was asked to summarize the text using the following prompt: | |
``` | |
{} | |
``` | |
Where \{language\} stands for original language of the text as we wanted to avoid the model translating the text to English during summarization. | |
LLM was asked to return the output in three formats: markdown, json and function call. Note that currently function calls are only supported by Open AI API. | |
To do that we added following text to the query: | |
{} | |
All of the call were made from the same machine with the same internet connection with usage of the LiteLLM library which may adds some time overhead compared to pure curl calls. Call were made from Poland, UTC +1. | |
Please take a look at the following project and let us know if you have any questions or suggestions. | |
""" | |
time_periods_explanation_df = pd.DataFrame( | |
{ | |
"time_of_day": [ | |
"early morning", | |
"morning", | |
"afternoon", | |
"late afternoon", | |
"evening", | |
"late evening", | |
"midnight", | |
"night", | |
], | |
"hour_range": ["6-8", "9-11", "12-14", "15-17", "18-20", "21-23", "0-2", "3-5"], | |
} | |
) | |
queries_config = QueriesConfig() | |
output_types_df = pd.DataFrame( | |
{"Output Type": queries_config.query_template.keys(), "Added text": queries_config.query_template.values()} | |
) | |
summary_df: pd.DataFrame = pd.read_csv("data/2024-02-05 23:33:22.947120_summary.csv") | |
time_of_day_comparison_df = pd.read_csv("data/2024-02-06 09:49:19.637072_time_of_day_comparison.csv") | |
general_plots = pd.read_csv("data/general_plots.csv") | |
model_costs_df = pd.read_csv("data/2024-02-05 12:03:45.281624_model_costs.csv") | |
time_of_day_plots = pd.read_csv("data/time_of_day_plots.csv") | |
output_plots = pd.read_csv("data/output_plots.csv") | |
searched_model_name = "" | |
collapse_languages = False | |
collapse_output_method = False | |
def filter_dataframes(input: str): | |
global searched_model_name | |
input = input.lower() | |
searched_model_name = input | |
return dataframes() | |
def collapse_languages_toggle(): | |
global collapse_languages | |
if collapse_languages: | |
collapse_languages = False | |
button_text = "Collapse languages" | |
else: | |
collapse_languages = True | |
button_text = "Un-collapse languages" | |
return dataframes()[0], button_text | |
def collapse_output_method_toggle(): | |
global collapse_output_method | |
if collapse_output_method: | |
collapse_output_method = False | |
button_text = "Collapse output method" | |
else: | |
collapse_output_method = True | |
button_text = "Un-collapse output method" | |
return dataframes()[0], button_text | |
def dataframes(): | |
global collapse_languages, collapse_output_method, searched_model_name, summary_df, time_of_day_comparison_df, model_costs_df | |
summary_df_columns = summary_df.columns.to_list() | |
group_columns = LLMBoardConfig().group_columns.copy() | |
if collapse_languages: | |
summary_df_columns.remove("language") | |
group_columns.remove("language") | |
if collapse_output_method: | |
summary_df_columns.remove("template_name") | |
group_columns.remove("template_name") | |
summary_df_processed = summary_df[summary_df_columns].groupby(by=group_columns).mean().reset_index() | |
return ( | |
dataframe_style(summary_df_processed[summary_df_processed.model.str.lower().str.contains(searched_model_name)]), | |
dataframe_style( | |
time_of_day_comparison_df[time_of_day_comparison_df.model.str.lower().str.contains(searched_model_name)] | |
), | |
dataframe_style(model_costs_df[model_costs_df.model.str.lower().str.contains(searched_model_name)]), | |
) | |
def dataframe_style(df: pd.DataFrame): | |
df = df.copy() | |
df.columns = [snake_case_to_title(column) for column in df.columns] | |
column_formats = {} | |
for column in df.columns: | |
if is_numeric_dtype(df[column]): | |
if column == "execution_time": | |
column_formats[column] = "{:.4f}" | |
else: | |
column_formats[column] = "{:.2f}" | |
df = df.style.format(column_formats, na_rep="") | |
return df | |
def snake_case_to_title(text): | |
# Convert snake_case to title-case | |
words = re.split(r"_", text) | |
title_words = [word.capitalize() for word in words] | |
return " ".join(title_words) | |
filter_textbox = gr.Textbox(label="Model name part", scale=2) | |
filter_button = gr.Button("Filter dataframes by model name", scale=1) | |
collapse_languages_button = gr.Button("Collapse languages") | |
collapse_output_method_button = gr.Button("Collapse output method") | |
last_textbox = 0 | |
with gr.Blocks() as demo: | |
gr.HTML("<h1>LLM Board</h1>") | |
with gr.Row(): | |
filter_textbox.render() | |
filter_button.render() | |
with gr.Tab("About this project"): | |
gr.Markdown( | |
README.format( | |
queries_config.base_query_template.replace("```", "'''"), output_types_df.to_markdown(index=False) | |
) | |
) | |
with gr.Tab("General plots"): | |
for index, row in general_plots.iterrows(): | |
plot = plotly.io.from_json(row["plot_json"]) | |
plot.update_layout(autosize=True) | |
gr.Plot(plot, label=row["header"], scale=1) | |
if pd.notna(row["description"]): | |
gr.Markdown(str(row["description"])) | |
with gr.Tab("Output characteristics"): | |
with gr.Row(): | |
collapse_languages_button.render() | |
collapse_output_method_button.render() | |
summary_ui = gr.DataFrame(dataframe_style(summary_df), label="Output characteristics") | |
gr.Markdown( | |
"""\ | |
This table compares output characteristics of different models which include execution time, output size and chunking of the output. Some providers and models don't support output chunking, in this case chunk related fields are left empty. | |
Execution time refers to averaged time needed to execute one query. | |
To count words we split the output string by whitespace `\w` regex character. | |
Chunk sizes are measured in the characters count.""" | |
) | |
for index, row in output_plots.iterrows(): | |
plot = plotly.io.from_json(row["plot_json"]) | |
plot.update_layout(autosize=True) | |
gr.Plot(plot, label=row["header"], scale=1) | |
with gr.Tab("Preformance by time of the day"): | |
# display only first plot for all models | |
for index, row in time_of_day_plots[0:1].iterrows(): | |
plot = plotly.io.from_json(row["plot_json"]) | |
plot.update_layout(autosize=True) | |
gr.Plot(plot, label=row["header"], scale=1) | |
time_periods_explanation_ui = gr.DataFrame( | |
dataframe_style(time_periods_explanation_df), label="Times of day ranges" | |
) | |
time_of_day_comparison_ui = gr.DataFrame(dataframe_style(time_of_day_comparison_df), label="Time of day") | |
gr.Markdown( | |
"""\ | |
These measurements were made by testing the models using the same dataset as in the other comparisons every hour for 24 hours. | |
Execution time refers to averaged time needed to execute one query. | |
Hours and times of day in the table and in the plot are based on Central European Time. | |
Measurements were made during a normal work week. | |
""" | |
) | |
# display rest of the plots | |
for index, row in time_of_day_plots[1:].iterrows(): | |
plot = plotly.io.from_json(row["plot_json"]) | |
plot.update_layout(autosize=True) | |
gr.Plot(plot, label=row["header"], scale=1) | |
with gr.Tab("Costs comparison"): | |
models_costs_ui = gr.DataFrame(dataframe_style(model_costs_df), label="Costs comparison") | |
gr.Markdown( | |
"""\ | |
Provider pricing column contains pricing from the website of the provider. | |
Hugging Face Inference Endpoints are charged by hour so to compare different providers together, | |
for models hosted this way we calculated "Cost Per Token" column using data collected during the experiment. | |
Note that pause and resume time cost was not included in the "Cost Per Token" column calculation. | |
""" | |
) | |
filter_button.click( | |
fn=filter_dataframes, | |
inputs=filter_textbox, | |
outputs=[summary_ui, time_of_day_comparison_ui, models_costs_ui], | |
api_name="filter_dataframes", | |
) | |
filter_textbox.submit( | |
fn=filter_dataframes, | |
inputs=filter_textbox, | |
outputs=[summary_ui, time_of_day_comparison_ui, models_costs_ui], | |
api_name="filter_dataframes", | |
) | |
collapse_languages_button.click( | |
fn=collapse_languages_toggle, | |
outputs=[summary_ui, collapse_languages_button], | |
api_name="collapse_languages_toggle", | |
) | |
collapse_output_method_button.click( | |
fn=collapse_output_method_toggle, | |
outputs=[summary_ui, collapse_output_method_button], | |
api_name="collapse_output_method_toggle", | |
) | |
demo.launch() | |