from collections import defaultdict
from typing import get_args

import gradio as gr
import numpy as np

from literals import TASK_CONSISTENCY_BUTTON_LABEL, CHECK_MISSING_DATAPOINTS_BUTTON_LABEL
from plot import prepare_plot_data, plot_metric
from viewer.results import fetch_run_results, fetch_run_list, init_input_normalization_runs, select_runs_by_regex, \
    select_runs_by_language, \
    init_input_component_values, init_std_dev_runs, render_results_table, export_results_csv, \
    check_missing_datapoints
from viewer.stats import generate_and_export_stats, format_statistics, calculate_statistics, smooth_tasks
from viewer.utils import PlotOptions, check_task_hash_consistency, BASELINE_GROUPING_MODE

with gr.Blocks() as demo:
    list_of_runs = gr.State([])
    plot_data = gr.State([])
    statistics = gr.State(defaultdict(lambda: np.nan))
    login_button = gr.LoginButton(visible=False)
    run_data = gr.State([])
    gr.Markdown("# FineWeb Multilingual experiments results explorer V2")
    results_uri = gr.Textbox(label="TB HF Repo", value="s3://fineweb-multilingual-v1/evals/results/", visible=True)
    with gr.Column():
        with gr.Row():
            # crop_prefix = gr.Textbox(label="Prefix to crop", value="tb/fineweb-exps-1p82G-")
            steps = gr.Textbox(label="Training steps", value="%500",
                               info="Use \",\" to separate. Use \"%32000\" for every 32000 steps. Use \"-\" for ranges. You can also combine them: \"1000-5000%1000\", 1000 to 5000 every 1000 steps.",
                               interactive=True)
            with gr.Column():
                select_by_language = gr.Dropdown(choices=["ar", "fr", "ru", "hi", "th", "tr", "zh", "sw", "te"],
                                                 interactive=True, label="Select language",
                                                 info="Choose a language preset")
                mcq_type = gr.Radio(choices=["prob_raw", "prob", "acc"], value="prob", label="MCQ agg metric type")
            with gr.Column():
                select_by_regex_text = gr.Textbox(label="Regex to select runs",
                                                  value="1p46G-gemma-fp-.*-{lang}-.*")
                select_by_regex_button = gr.Button("Select matching runs")
        selected_runs = gr.Dropdown(choices=[], interactive=True, multiselect=True, label="Selected runs")
        fetch_res = gr.Button("Fetch results")
        with gr.Column():
            aggregate_score_cols = gr.Dropdown(
                choices=[], interactive=True, multiselect=True,
                value=[],
                label="Aggregate score columns", allow_custom_value=True,
                info="The values from these columns/metrics will be averaged to produce the \"agg_score\""
            )
            metrics_to_show = gr.Checkboxgroup(
                interactive=True,
                value=["agg_score_metrics"],
                choices=["agg_score_metrics"],
                label="Metrics to display",
                info="Results for these metrics will be shown")
            with gr.Row():
                with gr.Column(scale=1):
                    task_averaging = gr.Checkboxgroup(
                        interactive=True,
                        choices=["show averages", "show expanded"],
                        value=["show averages"],
                        label="Task averaging",
                        info="Behaviour for tasks with subsets")

                    std_dev_run = gr.Dropdown(
                        interactive=True,
                        choices=[],
                        label="Run for std_dev",
                        info="Select a run to compute std_devs. Must have multiple seeds."
                    )
                with gr.Column(scale=2):
                    # includes the seed
                    with gr.Row():
                        with gr.Column(scale=1):
                            normalization_runs = gr.Dropdown(
                                interactive=True,
                                value=[], choices=[],
                                multiselect=True,
                                label="Normalization runs",
                                info="Select runs to use for normalization"
                            )
                            normalization_mode = gr.Radio(
                                choices=["No norm", "Rescale", "Z-norm"],
                                value="Z-norm",
                                label="Normalization mode"
                            )
                            clip_scores_checkbox = gr.Checkbox(value=False, label="Clip Scores")
                        with gr.Column(scale=1):
                            baseline_runs = gr.Dropdown(
                                interactive=True,
                                value=[], choices=[],
                                multiselect=True,
                                label="Baseline runs",
                                info="Select runs to use as baseline"
                            )
                            baseline_groupping_mode = gr.Dropdown(choices=list(get_args(BASELINE_GROUPING_MODE)), value="Mean", label="Baseline grouping mode")
        results_df = gr.Dataframe(interactive=False)

        with gr.Row():
            with gr.Column():
                export_button = gr.Button("Export Results")
                csv = gr.File(interactive=False, visible=False)
            with gr.Column():
                export_stats_button = gr.Button("Export Stats")
                stats_csv = gr.File(interactive=False, visible=False)

            check_missing_checkpoints = gr.Button(CHECK_MISSING_DATAPOINTS_BUTTON_LABEL)
            check_task_consistency_button = gr.Button(TASK_CONSISTENCY_BUTTON_LABEL, visible=True)

        task_consistency_output = gr.Json(label="Task hash consistency", visible=False)
        missing_list = gr.Json(label="Missing datapoints", visible=False)
        with gr.Row():
            column_to_plot = gr.Dropdown(
                choices=[], interactive=True,
                value='agg_score_macro',
                label="Task and metric", allow_custom_value=True)
            score_step = gr.Number(
                value=14000,
                label="Step to use for computing benchmark score",
            )
            baseline_window = gr.Number(
                value=5,
                label="Window size for computing variability and randomness",
            )
        with gr.Row():
            with gr.Column():
                gr.Markdown("### Monotonicity - Spearman Rank Correlation (steps vs score)")
                monotonicity_md = gr.Markdown()
            with gr.Column():
                gr.Markdown("### Variability (Windowed)  - std_dev (all steps of std_dev_run) and SNR (last step)")
                variability_md = gr.Markdown()
            with gr.Column():
                gr.Markdown("### Randomness (Windowed) - distance to RB (in std_dev)")
                randomness_md = gr.Markdown()
            with gr.Column():
                gr.Markdown("### Ordering - Kendall Tau (steps vs score)")
                ordering_md = gr.Markdown()
        with gr.Row():
            merge_seeds = gr.Dropdown(
                choices=["none", "min", "max", "mean"],
                value='mean',
                label="Seed merging")
            smoothing_steps = gr.Number(
                value=3,
                label="Smooth every N datapoints (sliding window)",
            )
            stds_to_plot = gr.Number(
                value=0,
                label="plot N stds as error bars",
            )
            with gr.Column():
                interpolate_checkbox = gr.Checkbox(value=False, label="Interpolate missing steps")
                percent_checkbox = gr.Checkbox(value=False, label="%")
                barplot_checkbox = gr.Checkbox(value=False, label="Bar plot")
        plot = gr.Plot()

    # run selection
    gr.on(
        triggers=[results_uri.change],
        fn=fetch_run_list, inputs=[results_uri], outputs=[list_of_runs, selected_runs]
    )
    gr.on(
        triggers=[select_by_regex_button.click],
        fn=select_runs_by_regex,
        inputs=[list_of_runs, selected_runs, select_by_regex_text, select_by_language], outputs=[selected_runs]
    )
    gr.on(
        triggers=[select_by_language.change, mcq_type.change],
        fn=select_runs_by_language,
        inputs=[list_of_runs, selected_runs, select_by_language, aggregate_score_cols, mcq_type], outputs=[selected_runs, aggregate_score_cols]
    )
    demo.load(fn=fetch_run_list, inputs=[results_uri], outputs=[list_of_runs, selected_runs])

    gr.on(
        triggers=[selected_runs.change],
        fn=init_std_dev_runs,
        inputs=[selected_runs, std_dev_run],
        outputs=[std_dev_run]
    )
    # fetch result
    gr.on(
        triggers=[fetch_res.click],
        fn=fetch_run_results,
        inputs=[results_uri, selected_runs, steps],
        # We set the plot as output, as state has stae has no loading indicator
        outputs=[run_data, plot]
    ).then(
        fn=init_input_component_values, inputs=[run_data, normalization_mode, select_by_language],
        outputs=[metrics_to_show, normalization_runs, baseline_runs]
    ).then(
        fn=render_results_table,
        inputs=[run_data, metrics_to_show, task_averaging, normalization_runs, baseline_runs, baseline_groupping_mode, clip_scores_checkbox,
                normalization_mode, aggregate_score_cols, select_by_language, baseline_window, mcq_type],
        outputs=[results_df, aggregate_score_cols, column_to_plot]
    )
    # change results table
    gr.on(
        triggers=[
            metrics_to_show.input,
            task_averaging.input,
            normalization_runs.input,
            baseline_runs.input,
            clip_scores_checkbox.input,
            baseline_groupping_mode.input,
            aggregate_score_cols.input
        ],
        fn=render_results_table,
        inputs=[run_data, metrics_to_show, task_averaging, normalization_runs, baseline_runs, baseline_groupping_mode, clip_scores_checkbox,
                normalization_mode, aggregate_score_cols, select_by_language, baseline_window, mcq_type],
        outputs=[results_df, aggregate_score_cols, column_to_plot]
    )
    
    # On normalization mode we first have to preinit the compoentntns
    gr.on(
        triggers=[normalization_mode.input],
        fn=init_input_normalization_runs,
        inputs=[run_data, normalization_mode],
        outputs=[normalization_runs]
    ).then(
        fn=render_results_table,
        inputs=[run_data, metrics_to_show, task_averaging, normalization_runs, baseline_runs, baseline_groupping_mode, clip_scores_checkbox,
                normalization_mode, aggregate_score_cols, select_by_language, baseline_window, mcq_type],
        outputs=[results_df, aggregate_score_cols, column_to_plot]
    )
    # table actions
    gr.on(
        triggers=[export_button.click],
        fn=export_results_csv, inputs=[results_df], outputs=[csv]
    )
    gr.on(
        triggers=[check_missing_checkpoints.click],
        fn=check_missing_datapoints, inputs=[selected_runs, steps, run_data, check_missing_checkpoints],
        outputs=[missing_list, check_missing_checkpoints]
    )

    gr.on(
        triggers=[check_task_consistency_button.click],
        fn=check_task_hash_consistency, inputs=[run_data, check_task_consistency_button],
        outputs=[task_consistency_output, check_task_consistency_button]
    )
    # plot
    gr.on(
        triggers=[results_df.change, column_to_plot.input, merge_seeds.input, smoothing_steps.input, stds_to_plot.input,
                  interpolate_checkbox.input, percent_checkbox.input, baseline_window.input, barplot_checkbox.input],
        fn=lambda df, col, merge_seeds, smoothing_steps, interpolate_checkbox, percent_checkbox:
        prepare_plot_data(df,
                          col,
                          merge_seeds,
                          PlotOptions(
                              smoothing=smoothing_steps,
                              interpolate=interpolate_checkbox,
                              pct=percent_checkbox,
                              merge_seeds=merge_seeds)),
        inputs=[results_df, column_to_plot, merge_seeds, smoothing_steps, interpolate_checkbox, percent_checkbox],
        outputs=[plot_data]
    ).then(
        fn=lambda df ,std_dev_run_name, column_name, score_s, variance_window, smoothing_steps:
        calculate_statistics(smooth_tasks(df, smoothing_steps), std_dev_run_name, column_name, score_s, variance_window),
        inputs=[results_df, std_dev_run, column_to_plot, score_step, baseline_window, smoothing_steps],
        outputs=[statistics]
    ).then(
        fn=plot_metric,
        inputs=[plot_data, column_to_plot, merge_seeds, percent_checkbox, statistics, stds_to_plot, select_by_language, barplot_checkbox],
        outputs=[plot]
    ).then(
        fn=format_statistics,
        inputs=[statistics],
        outputs=[monotonicity_md, variability_md, randomness_md, ordering_md]
    )

    gr.on(
        triggers=[export_stats_button.click],
        fn=generate_and_export_stats,
        inputs=[run_data, std_dev_run, baseline_runs, baseline_groupping_mode,
                score_step, baseline_window],
        outputs=[stats_csv]
    )

demo.launch()