Spaces:
Sleeping
Sleeping
import streamlit as st | |
import requests | |
def generate_more_factors(problem_statement, user_factors): | |
# 1. Prepare prompt text | |
factors_text = "\n".join([f"- {factor}" for factor in user_factors if factor.strip() != ""]) | |
prompt = ( | |
f"You are an expert problem solver. Given the following problem statement:\n" | |
f"{problem_statement}\n\n" | |
f"And the following user-provided factors:\n" | |
f"{factors_text}\n\n" | |
f"Please suggest additional factors that would complete a MECE (Mutually Exclusive, Collectively Exhaustive) " | |
f"set of factors responsible for solving the problem. Provide your suggestions as a bullet list." | |
) | |
# 2. Call Hugging Face inference API (using a sample model; replace as needed) | |
API_URL = "https://api-inference.huggingface.co/models/gpt2" | |
token = st.secrets.get("HF_API_TOKEN", "") | |
headers = {"Authorization": f"Bearer {token}"} if token else {} | |
response = requests.post(API_URL, headers=headers, json={"inputs": prompt}) | |
if response.status_code == 200: | |
result = response.json() | |
if isinstance(result, list) and result and "generated_text" in result[0]: | |
generated = result[0]["generated_text"] | |
# Remove the prompt from the generated output | |
suggestions = generated[len(prompt):].strip() | |
return suggestions | |
else: | |
return "Unexpected response format." | |
else: | |
return f"Error: {response.status_code} - {response.text}" | |
def main(): | |
st.title("Problem Statement and Factor Guess") | |
st.write("Enter your problem statement and factors. The UI is organized into four horizontal levels:") | |
# Initialize session state variables | |
if "factor_rows" not in st.session_state: | |
st.session_state.factor_rows = [""] | |
if "llm_suggestions" not in st.session_state: | |
st.session_state.llm_suggestions = None | |
# Create four columns for the four levels | |
col1, col2, col3, col4 = st.columns(4) | |
# Level 1: Problem Statement (Left-most column) | |
with col1: | |
st.header("Level 1: Problem Statement") | |
problem_statement = st.text_input("Enter your problem statement:") | |
# Level 2: User-Provided Factors | |
with col2: | |
st.header("Level 2: Your Factors") | |
factor_inputs = [] | |
for i in range(len(st.session_state.factor_rows)): | |
key = f"factor_{i}" | |
value = st.text_input(f"Factor {i+1}", value=st.session_state.factor_rows[i], key=key) | |
factor_inputs.append(value) | |
st.session_state.factor_rows[i] = value | |
if st.button("Add Factor Row", key="add_row"): | |
st.session_state.factor_rows.append("") | |
try: | |
st.experimental_rerun() | |
except AttributeError: | |
st.write("Row added. Please refresh the page to see the new row.") | |
# Level 3: Generate More Factors Button | |
with col3: | |
st.header("Level 3: Generate More Factors") | |
if st.button("Generate More Factors", key="generate_factors"): | |
if not problem_statement.strip(): | |
st.error("Please enter a problem statement before generating factors.") | |
else: | |
with st.spinner("Generating more factors..."): | |
suggestions = generate_more_factors(problem_statement, factor_inputs) | |
st.session_state.llm_suggestions = suggestions | |
# Level 4: LLM Suggestions Display | |
with col4: | |
st.header("Level 4: LLM Suggestions") | |
if st.session_state.llm_suggestions: | |
st.write(st.session_state.llm_suggestions) | |
else: | |
st.write("LLM suggestions will appear here after you click 'Generate More Factors'.") | |
if __name__ == "__main__": | |
main() | |