Spaces:
Sleeping
Sleeping
import gradio as gr | |
import requests | |
import os | |
# Maximum number of factor textboxes allowed | |
MAX_FACTORS = 10 | |
def add_factor(num_factors): | |
""" | |
Increase the number of visible factor rows. | |
Inputs: | |
1. num_factors: current number of visible factor rows | |
Outputs: | |
1. Updated number of visible rows | |
2. Updated visibility for each factor textbox (list of gr.update objects) | |
""" | |
new_num = num_factors + 1 if num_factors < MAX_FACTORS else num_factors | |
# Prepare update list for each textbox: show textbox if its index is less than new_num, hide otherwise. | |
updates = [gr.update(visible=True) if i < new_num else gr.update(visible=False) for i in range(MAX_FACTORS)] | |
return new_num, *updates | |
def generate_factors(problem_statement, *factors): | |
""" | |
Call the Hugging Face inference API to generate additional factors. | |
Inputs: | |
1. problem_statement: The problem statement provided by the user. | |
2. factors: A list of factor inputs (only non-empty ones will be used). | |
Output: | |
1. A string containing additional factor suggestions. | |
""" | |
# Filter out empty or whitespace-only factor entries | |
factor_list = [f for f in factors if f and f.strip() != ""] | |
# Prepare the prompt text for the LLM | |
factors_text = "\n".join([f"- {factor}" for factor in factor_list]) | |
prompt = ( | |
f"You are an expert problem solver. Given the following problem statement:\n" | |
f"{problem_statement}\n\n" | |
f"And the following user-provided factors:\n" | |
f"{factors_text}\n\n" | |
f"Please suggest additional factors that would complete a MECE (Mutually Exclusive, Collectively Exhaustive) " | |
f"set of factors responsible for solving the problem. Provide your suggestions as a bullet list." | |
) | |
# Call the Hugging Face inference API (using GPT-2 as an example; change the model as needed) | |
API_URL = "https://api-inference.huggingface.co/models/gpt2" | |
token = os.environ.get("HF_API_TOKEN", "") | |
headers = {"Authorization": f"Bearer {token}"} if token else {} | |
response = requests.post(API_URL, headers=headers, json={"inputs": prompt}) | |
if response.status_code == 200: | |
result = response.json() | |
if isinstance(result, list) and result and "generated_text" in result[0]: | |
generated = result[0]["generated_text"] | |
# Remove the prompt portion from the response to return only the suggestions. | |
suggestions = generated[len(prompt):].strip() | |
return suggestions | |
else: | |
return "Unexpected response format." | |
else: | |
return f"Error: {response.status_code} - {response.text}" | |
with gr.Blocks() as demo: | |
# State variable to keep track of the current number of factor rows visible. | |
num_factors_state = gr.State(value=1) | |
# Define the layout with 4 columns for 4 levels. | |
with gr.Row(): | |
# Level 1: Problem Statement | |
with gr.Column(): | |
problem_statement = gr.Textbox(label="Level 1: Problem Statement", placeholder="Enter your problem statement here") | |
# Level 2: Factor inputs and an Add Factor Row button | |
with gr.Column(): | |
factor_textboxes = [] | |
# Pre-create MAX_FACTORS textboxes. Only the first 'num_factors_state' will be visible. | |
for i in range(MAX_FACTORS): | |
tb = gr.Textbox(label=f"Factor {i+1}", visible=(i == 0), placeholder="Enter a factor") | |
factor_textboxes.append(tb) | |
add_factor_btn = gr.Button("Add Factor Row") | |
# Level 3: Generate More Factors button | |
with gr.Column(): | |
generate_btn = gr.Button("Generate More Factors") | |
# Level 4: LLM suggestions display | |
with gr.Column(): | |
llm_output = gr.Textbox(label="Level 4: LLM Suggestions", interactive=False, placeholder="LLM suggestions will appear here") | |
# When Add Factor Row is clicked, update the state and the visibility of each factor textbox. | |
add_factor_btn.click( | |
fn=add_factor, | |
inputs=num_factors_state, | |
outputs=[num_factors_state] + factor_textboxes | |
) | |
# When Generate More Factors is clicked, call the LLM generation function. | |
generate_btn.click( | |
fn=generate_factors, | |
inputs=[problem_statement] + factor_textboxes, | |
outputs=llm_output | |
) | |
demo.launch() | |