import gradio as gr import json from run_llm import template_all, prompt2_pos, prompt2_chunk, prompt2_parse, demon_pos, demon_chunk, demon_parse, model_mapping from tqdm import tqdm # Your existing code theme = gr.themes.Soft() with open('sample_uniform_1k_2.txt', 'r') as f: selected_idx = f.readlines() selected_idx = [int(i.strip()) for i in selected_idx]#[s:e] gid_list = selected_idx[0] ptb = [] with open('ptb.jsonl', 'r') as f: for l in f: ptb.append(json.loads(l)) # Function to process text based on model and task def process_text(model_name, task, text): for gid in tqdm(gid_list, desc='Query'): text = ptb[gid]['text'] # Define prompts for each strategy based on the task strategy_prompts = { 'Strategy 1': template_all.format(text), 'Strategy 2': { 'POS': prompt2_pos.format(text), 'Chunking': prompt2_chunk.format(text), 'Parsing': prompt2_parse.format(text), }.get(task, "Invalid Task Selection for Strategy 2"), 'Strategy 3': { 'POS': demon_pos, 'Chunking': demon_chunk, 'Parsing': demon_parse, }.get(task, "Invalid Task Selection for Strategy 3"), } # Get the selected prompt based on the strategy prompt = strategy_prompts.get(model_name, "Invalid Model Selection") # Add your logic to feed the prompt to the selected model and get the result result = "Processed Result" # Replace this with your actual result return result # Dropdown options for model and task model_options = list(model_mapping.keys()) task_options = ['POS', 'Chunking', 'Parsing'] # Gradio interface iface = gr.Interface( fn=process_text, inputs=[ gr.Dropdown(model_options, label="Select Model"), gr.Dropdown(task_options, label="Select Task"), ], outputs=[ gr.Textbox(label="Strategy 1 QA Result"), gr.Textbox(label="Strategy 2 Instruction Result"), gr.Textbox(label="Strategy 3 Structured Prompting Result"), ], theme = theme, live=False, ) iface.launch()