saadfarhad commited on
Commit
c95ddc9
·
verified ·
1 Parent(s): 1682ce7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +83 -64
app.py CHANGED
@@ -1,9 +1,42 @@
1
- import streamlit as st
2
  import requests
 
3
 
4
- def generate_more_factors(problem_statement, user_factors):
5
- # 1. Prepare prompt text
6
- factors_text = "\n".join([f"- {factor}" for factor in user_factors if factor.strip() != ""])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  prompt = (
8
  f"You are an expert problem solver. Given the following problem statement:\n"
9
  f"{problem_statement}\n\n"
@@ -12,18 +45,18 @@ def generate_more_factors(problem_statement, user_factors):
12
  f"Please suggest additional factors that would complete a MECE (Mutually Exclusive, Collectively Exhaustive) "
13
  f"set of factors responsible for solving the problem. Provide your suggestions as a bullet list."
14
  )
15
-
16
- # 2. Call Hugging Face inference API (using a sample model; replace as needed)
17
  API_URL = "https://api-inference.huggingface.co/models/gpt2"
18
- token = st.secrets.get("HF_API_TOKEN", "")
19
  headers = {"Authorization": f"Bearer {token}"} if token else {}
20
-
21
  response = requests.post(API_URL, headers=headers, json={"inputs": prompt})
22
  if response.status_code == 200:
23
  result = response.json()
24
  if isinstance(result, list) and result and "generated_text" in result[0]:
25
  generated = result[0]["generated_text"]
26
- # Remove the prompt from the generated output
27
  suggestions = generated[len(prompt):].strip()
28
  return suggestions
29
  else:
@@ -31,59 +64,45 @@ def generate_more_factors(problem_statement, user_factors):
31
  else:
32
  return f"Error: {response.status_code} - {response.text}"
33
 
34
- def main():
35
- st.title("Problem Statement and Factor Guess")
36
- st.write("Enter your problem statement and factors. The UI is organized into four horizontal levels:")
37
-
38
- # Initialize session state variables
39
- if "factor_rows" not in st.session_state:
40
- st.session_state.factor_rows = [""]
41
- if "llm_suggestions" not in st.session_state:
42
- st.session_state.llm_suggestions = None
43
-
44
- # Create four columns for the four levels
45
- col1, col2, col3, col4 = st.columns(4)
46
-
47
- # Level 1: Problem Statement (Left-most column)
48
- with col1:
49
- st.header("Level 1: Problem Statement")
50
- problem_statement = st.text_input("Enter your problem statement:")
51
-
52
- # Level 2: User-Provided Factors
53
- with col2:
54
- st.header("Level 2: Your Factors")
55
- factor_inputs = []
56
- for i in range(len(st.session_state.factor_rows)):
57
- key = f"factor_{i}"
58
- value = st.text_input(f"Factor {i+1}", value=st.session_state.factor_rows[i], key=key)
59
- factor_inputs.append(value)
60
- st.session_state.factor_rows[i] = value
61
-
62
- if st.button("Add Factor Row", key="add_row"):
63
- st.session_state.factor_rows.append("")
64
- try:
65
- st.experimental_rerun()
66
- except AttributeError:
67
- st.write("Row added. Please refresh the page to see the new row.")
68
-
69
- # Level 3: Generate More Factors Button
70
- with col3:
71
- st.header("Level 3: Generate More Factors")
72
- if st.button("Generate More Factors", key="generate_factors"):
73
- if not problem_statement.strip():
74
- st.error("Please enter a problem statement before generating factors.")
75
- else:
76
- with st.spinner("Generating more factors..."):
77
- suggestions = generate_more_factors(problem_statement, factor_inputs)
78
- st.session_state.llm_suggestions = suggestions
79
-
80
- # Level 4: LLM Suggestions Display
81
- with col4:
82
- st.header("Level 4: LLM Suggestions")
83
- if st.session_state.llm_suggestions:
84
- st.write(st.session_state.llm_suggestions)
85
- else:
86
- st.write("LLM suggestions will appear here after you click 'Generate More Factors'.")
87
 
88
- if __name__ == "__main__":
89
- main()
 
1
+ import gradio as gr
2
  import requests
3
+ import os
4
 
5
+ # Maximum number of factor textboxes allowed
6
+ MAX_FACTORS = 10
7
+
8
+ def add_factor(num_factors):
9
+ """
10
+ Increase the number of visible factor rows.
11
+
12
+ Inputs:
13
+ 1. num_factors: current number of visible factor rows
14
+
15
+ Outputs:
16
+ 1. Updated number of visible rows
17
+ 2. Updated visibility for each factor textbox (list of gr.update objects)
18
+ """
19
+ new_num = num_factors + 1 if num_factors < MAX_FACTORS else num_factors
20
+ # Prepare update list for each textbox: show textbox if its index is less than new_num, hide otherwise.
21
+ updates = [gr.update(visible=True) if i < new_num else gr.update(visible=False) for i in range(MAX_FACTORS)]
22
+ return new_num, *updates
23
+
24
+ def generate_factors(problem_statement, *factors):
25
+ """
26
+ Call the Hugging Face inference API to generate additional factors.
27
+
28
+ Inputs:
29
+ 1. problem_statement: The problem statement provided by the user.
30
+ 2. factors: A list of factor inputs (only non-empty ones will be used).
31
+
32
+ Output:
33
+ 1. A string containing additional factor suggestions.
34
+ """
35
+ # Filter out empty or whitespace-only factor entries
36
+ factor_list = [f for f in factors if f and f.strip() != ""]
37
+
38
+ # Prepare the prompt text for the LLM
39
+ factors_text = "\n".join([f"- {factor}" for factor in factor_list])
40
  prompt = (
41
  f"You are an expert problem solver. Given the following problem statement:\n"
42
  f"{problem_statement}\n\n"
 
45
  f"Please suggest additional factors that would complete a MECE (Mutually Exclusive, Collectively Exhaustive) "
46
  f"set of factors responsible for solving the problem. Provide your suggestions as a bullet list."
47
  )
48
+
49
+ # Call the Hugging Face inference API (using GPT-2 as an example; change the model as needed)
50
  API_URL = "https://api-inference.huggingface.co/models/gpt2"
51
+ token = os.environ.get("HF_API_TOKEN", "")
52
  headers = {"Authorization": f"Bearer {token}"} if token else {}
53
+
54
  response = requests.post(API_URL, headers=headers, json={"inputs": prompt})
55
  if response.status_code == 200:
56
  result = response.json()
57
  if isinstance(result, list) and result and "generated_text" in result[0]:
58
  generated = result[0]["generated_text"]
59
+ # Remove the prompt portion from the response to return only the suggestions.
60
  suggestions = generated[len(prompt):].strip()
61
  return suggestions
62
  else:
 
64
  else:
65
  return f"Error: {response.status_code} - {response.text}"
66
 
67
+ with gr.Blocks() as demo:
68
+ # State variable to keep track of the current number of factor rows visible.
69
+ num_factors_state = gr.State(value=1)
70
+
71
+ # Define the layout with 4 columns for 4 levels.
72
+ with gr.Row():
73
+ # Level 1: Problem Statement
74
+ with gr.Column():
75
+ problem_statement = gr.Textbox(label="Level 1: Problem Statement", placeholder="Enter your problem statement here")
76
+
77
+ # Level 2: Factor inputs and an Add Factor Row button
78
+ with gr.Column():
79
+ factor_textboxes = []
80
+ # Pre-create MAX_FACTORS textboxes. Only the first 'num_factors_state' will be visible.
81
+ for i in range(MAX_FACTORS):
82
+ tb = gr.Textbox(label=f"Factor {i+1}", visible=(i == 0), placeholder="Enter a factor")
83
+ factor_textboxes.append(tb)
84
+ add_factor_btn = gr.Button("Add Factor Row")
85
+
86
+ # Level 3: Generate More Factors button
87
+ with gr.Column():
88
+ generate_btn = gr.Button("Generate More Factors")
89
+
90
+ # Level 4: LLM suggestions display
91
+ with gr.Column():
92
+ llm_output = gr.Textbox(label="Level 4: LLM Suggestions", interactive=False, placeholder="LLM suggestions will appear here")
93
+
94
+ # When Add Factor Row is clicked, update the state and the visibility of each factor textbox.
95
+ add_factor_btn.click(
96
+ fn=add_factor,
97
+ inputs=num_factors_state,
98
+ outputs=[num_factors_state] + factor_textboxes
99
+ )
100
+
101
+ # When Generate More Factors is clicked, call the LLM generation function.
102
+ generate_btn.click(
103
+ fn=generate_factors,
104
+ inputs=[problem_statement] + factor_textboxes,
105
+ outputs=llm_output
106
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
107
 
108
+ demo.launch()