import gradio as gr import subprocess import sys import os import threading import time import uuid class Logger: def __init__(self, filename): self.terminal = sys.stdout self.log = open(filename, "w") def write(self, message): self.terminal.write(message) self.log.write(message) self.log.flush() def flush(self): self.terminal.flush() self.log.flush() def isatty(self): return False default_command = "bigcodebench.evaluate" is_running = False def generate_command( jsonl_file, split, subset, save_pass_rate, parallel, min_time_limit, max_as_limit, max_data_limit, max_stack_limit, check_gt_only, no_gt ): command = [default_command] if jsonl_file is not None: samples = os.path.basename(jsonl_file.name) command.extend(["--samples", samples]) command.extend(["--split", split, "--subset", subset]) if save_pass_rate: command.append("--save_pass_rate") if parallel is not None and parallel != 0: command.extend(["--parallel", str(int(parallel))]) command.extend([ "--min-time-limit", str(min_time_limit), "--max-as-limit", str(int(max_as_limit)), "--max-data-limit", str(int(max_data_limit)), "--max-stack-limit", str(int(max_stack_limit)) ]) if check_gt_only: command.append("--check-gt-only") if no_gt: command.append("--no-gt") return " ".join(command) def run_bigcodebench(command): global is_running is_running = True yield f"Executing command: {command}\n" process = subprocess.Popen(command.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) for line in process.stdout: yield line process.wait() if process.returncode != 0: yield f"Error: Command exited with status {process.returncode}\n" cleanup_command = "pids=$(ps -u $(id -u) -o pid,comm | grep 'bigcodebench' | awk '{print $1}'); if [ -n \"$pids\" ]; then echo $pids | xargs -r kill; fi; rm -rf /tmp/*" subprocess.run(cleanup_command, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) is_running = False yield "Evaluation completed.\n" def stream_logs(command): global is_running if is_running: yield "A command is already running. Please wait for it to finish.\n" return log_content = [] for log_line in run_bigcodebench(command): log_content.append(log_line) yield "".join(log_content) def read_logs(log_file): if os.path.exists(log_file): with open(log_file, "r") as f: return f.read() return "" with gr.Blocks() as demo: gr.Markdown("# BigCodeBench Evaluator") with gr.Row(): jsonl_file = gr.File(label="Upload JSONL file", file_types=[".jsonl"]) split = gr.Dropdown(choices=["complete", "instruct"], label="Split", value="complete") subset = gr.Dropdown(choices=["full", "hard"], label="Subset", value="hard") with gr.Row(): save_pass_rate = gr.Checkbox(label="Save Pass Rate") parallel = gr.Number(label="Parallel (optional)", precision=0) min_time_limit = gr.Number(label="Min Time Limit", value=1, precision=1) max_as_limit = gr.Number(label="Max AS Limit", value=200*1024, precision=0) with gr.Row(): max_data_limit = gr.Number(label="Max Data Limit", value=10*1024, precision=0) max_stack_limit = gr.Number(label="Max Stack Limit", value=5, precision=0) check_gt_only = gr.Checkbox(label="Check GT Only") no_gt = gr.Checkbox(label="No GT") command_output = gr.Textbox(label="Command", value=default_command, interactive=False) submit_btn = gr.Button("Run Evaluation") log_output = gr.Textbox(label="Execution Logs", lines=10) # Hidden component to store the unique log file path session_log_file = gr.State("") def update_command(*args): return generate_command(*args) input_components = [ jsonl_file, split, subset, save_pass_rate, parallel, min_time_limit, max_as_limit, max_data_limit, max_stack_limit, check_gt_only, no_gt ] for component in input_components: component.change(update_command, inputs=input_components, outputs=command_output) def on_submit(command): global is_running if is_running: yield "A command is already running. Please wait for it to finish." return log_accumulator = [] for log_line in run_bigcodebench(command): log_accumulator.append(log_line) yield "\n".join(log_accumulator) submit_btn.click(stream_logs, inputs=[command_output], outputs=[log_output]) # def update_logs(session_log_file): # return read_logs(session_log_file) # demo.load(update_logs, inputs=[session_log_file], outputs=[log_output], every=1) if __name__ == "__main__": demo.queue(max_size=300).launch(server_name="0.0.0.0", server_port=7860)