File size: 4,437 Bytes
c95ddc9
1682ce7
c95ddc9
1682ce7
c95ddc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1682ce7
 
 
 
 
 
 
 
c95ddc9
 
1682ce7
c95ddc9
1682ce7
c95ddc9
1682ce7
 
 
 
 
c95ddc9
1682ce7
 
 
 
 
 
 
c95ddc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1682ce7
c95ddc9
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
import gradio as gr
import requests
import os

# Maximum number of factor textboxes allowed
MAX_FACTORS = 10

def add_factor(num_factors):
    """
    Increase the number of visible factor rows.
    
    Inputs:
    1. num_factors: current number of visible factor rows
    
    Outputs:
    1. Updated number of visible rows
    2. Updated visibility for each factor textbox (list of gr.update objects)
    """
    new_num = num_factors + 1 if num_factors < MAX_FACTORS else num_factors
    # Prepare update list for each textbox: show textbox if its index is less than new_num, hide otherwise.
    updates = [gr.update(visible=True) if i < new_num else gr.update(visible=False) for i in range(MAX_FACTORS)]
    return new_num, *updates

def generate_factors(problem_statement, *factors):
    """
    Call the Hugging Face inference API to generate additional factors.
    
    Inputs:
    1. problem_statement: The problem statement provided by the user.
    2. factors: A list of factor inputs (only non-empty ones will be used).
    
    Output:
    1. A string containing additional factor suggestions.
    """
    # Filter out empty or whitespace-only factor entries
    factor_list = [f for f in factors if f and f.strip() != ""]
    
    # Prepare the prompt text for the LLM
    factors_text = "\n".join([f"- {factor}" for factor in factor_list])
    prompt = (
        f"You are an expert problem solver. Given the following problem statement:\n"
        f"{problem_statement}\n\n"
        f"And the following user-provided factors:\n"
        f"{factors_text}\n\n"
        f"Please suggest additional factors that would complete a MECE (Mutually Exclusive, Collectively Exhaustive) "
        f"set of factors responsible for solving the problem. Provide your suggestions as a bullet list."
    )
    
    # Call the Hugging Face inference API (using GPT-2 as an example; change the model as needed)
    API_URL = "https://api-inference.huggingface.co/models/gpt2"
    token = os.environ.get("HF_API_TOKEN", "")
    headers = {"Authorization": f"Bearer {token}"} if token else {}
    
    response = requests.post(API_URL, headers=headers, json={"inputs": prompt})
    if response.status_code == 200:
        result = response.json()
        if isinstance(result, list) and result and "generated_text" in result[0]:
            generated = result[0]["generated_text"]
            # Remove the prompt portion from the response to return only the suggestions.
            suggestions = generated[len(prompt):].strip()
            return suggestions
        else:
            return "Unexpected response format."
    else:
        return f"Error: {response.status_code} - {response.text}"

with gr.Blocks() as demo:
    # State variable to keep track of the current number of factor rows visible.
    num_factors_state = gr.State(value=1)
    
    # Define the layout with 4 columns for 4 levels.
    with gr.Row():
        # Level 1: Problem Statement
        with gr.Column():
            problem_statement = gr.Textbox(label="Level 1: Problem Statement", placeholder="Enter your problem statement here")
        
        # Level 2: Factor inputs and an Add Factor Row button
        with gr.Column():
            factor_textboxes = []
            # Pre-create MAX_FACTORS textboxes. Only the first 'num_factors_state' will be visible.
            for i in range(MAX_FACTORS):
                tb = gr.Textbox(label=f"Factor {i+1}", visible=(i == 0), placeholder="Enter a factor")
                factor_textboxes.append(tb)
            add_factor_btn = gr.Button("Add Factor Row")
        
        # Level 3: Generate More Factors button
        with gr.Column():
            generate_btn = gr.Button("Generate More Factors")
        
        # Level 4: LLM suggestions display
        with gr.Column():
            llm_output = gr.Textbox(label="Level 4: LLM Suggestions", interactive=False, placeholder="LLM suggestions will appear here")
    
    # When Add Factor Row is clicked, update the state and the visibility of each factor textbox.
    add_factor_btn.click(
        fn=add_factor,
        inputs=num_factors_state,
        outputs=[num_factors_state] + factor_textboxes
    )
    
    # When Generate More Factors is clicked, call the LLM generation function.
    generate_btn.click(
        fn=generate_factors,
        inputs=[problem_statement] + factor_textboxes,
        outputs=llm_output
    )

demo.launch()