import gradio as gr
from evaluation_logic import run_evaluation, AVAILABLE_PROMPT_FORMATS

def gradio_run_evaluation(inference_api, model_name, prompt_format):
    output = []
    for result in run_evaluation(inference_api, str(model_name).strip(), prompt_format):
        output.append(result)
        yield "\n".join(output)

with gr.Blocks() as demo:
    gr.Markdown("# DuckDB SQL Evaluation App")

    inference_api = gr.Dropdown(
        label="Inference API",
        choices=['openrouter', 'hf_inference_api'], #AVAILABLE_PROMPT_FORMATS,
        value="openrouter"
    )
    model_name = gr.Textbox(label="Model Name (e.g., qwen/qwen-2.5-72b-instruct)")
    prompt_format = gr.Dropdown(
        label="Prompt Format",
        choices=['duckdbinst', 'duckdbinstgraniteshort'], #AVAILABLE_PROMPT_FORMATS,
        value="duckdbinstgraniteshort"
    )
    start_btn = gr.Button("Start Evaluation")
    output = gr.Textbox(label="Output", lines=20)

    start_btn.click(fn=gradio_run_evaluation, inputs=[inference_api, model_name, prompt_format], outputs=output)

demo.queue().launch()