Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -10,6 +10,7 @@ load_dotenv()
|
|
10 |
|
11 |
# Configure logging
|
12 |
logging.basicConfig(level=logging.INFO)
|
|
|
13 |
|
14 |
# Patch the OpenAI client with Instructor
|
15 |
client = instructor.from_openai(OpenAI(api_key=os.environ['OPENAI_API_KEY']))
|
@@ -116,7 +117,6 @@ def format_template(template, **kwargs):
|
|
116 |
|
117 |
# Define functions
|
118 |
def llm_call(user_prompt, system_prompt=None):
|
119 |
-
|
120 |
messages = [
|
121 |
{"role": "user", "content": user_prompt}
|
122 |
] if system_prompt is None else [
|
@@ -125,62 +125,72 @@ def llm_call(user_prompt, system_prompt=None):
|
|
125 |
]
|
126 |
|
127 |
try:
|
128 |
-
|
129 |
model="gpt-4-turbo-preview",
|
130 |
response_model=None,
|
131 |
messages=messages,
|
132 |
)
|
|
|
133 |
except Exception as e:
|
134 |
-
|
135 |
return None
|
136 |
|
137 |
-
# Define the Chainlit message handler
|
138 |
@cl.on_chat_start
|
139 |
async def start():
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
180 |
else:
|
181 |
-
await cl.Message(content="Error generating
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
# Load the starters ... overrided by on_chat_start
|
186 |
-
import starters
|
|
|
10 |
|
11 |
# Configure logging
|
12 |
logging.basicConfig(level=logging.INFO)
|
13 |
+
logger = logging.getLogger(__name__)
|
14 |
|
15 |
# Patch the OpenAI client with Instructor
|
16 |
client = instructor.from_openai(OpenAI(api_key=os.environ['OPENAI_API_KEY']))
|
|
|
117 |
|
118 |
# Define functions
|
119 |
def llm_call(user_prompt, system_prompt=None):
|
|
|
120 |
messages = [
|
121 |
{"role": "user", "content": user_prompt}
|
122 |
] if system_prompt is None else [
|
|
|
125 |
]
|
126 |
|
127 |
try:
|
128 |
+
response = client.chat.completions.create(
|
129 |
model="gpt-4-turbo-preview",
|
130 |
response_model=None,
|
131 |
messages=messages,
|
132 |
)
|
133 |
+
return response
|
134 |
except Exception as e:
|
135 |
+
logger.error(f"Error during LLM call: {e}")
|
136 |
return None
|
137 |
|
|
|
138 |
@cl.on_chat_start
|
139 |
async def start():
|
140 |
+
try:
|
141 |
+
# Welcome message
|
142 |
+
wel_msg = cl.Message(content="Welcome to Build Advisor!\n\nBuild Advisor creates plan, production requirement spec and implementation for your AI application idea.\nQuickly create a PoC so you can determine whether an idea is worth starting, worth investing time and/or money in.")
|
143 |
+
await wel_msg.send()
|
144 |
+
|
145 |
+
# Ask user for AI application / business idea
|
146 |
+
res = await cl.AskUserMessage(content="What is your AI application/business idea?", timeout=30).send()
|
147 |
+
if res:
|
148 |
+
await wel_msg.remove()
|
149 |
+
await cl.Message(
|
150 |
+
content=f"User Proposal: {res['output']}.\n\nStarting...",
|
151 |
+
).send()
|
152 |
+
|
153 |
+
user_proposal = res['output']
|
154 |
+
|
155 |
+
prd_sys1 = format_template(PRD_PROMPT_TEMPLATE, user_proposal=user_proposal) # system message to create PRD
|
156 |
+
prd_response = llm_call(user_prompt=user_proposal, system_prompt=prd_sys1)
|
157 |
+
|
158 |
+
if prd_response:
|
159 |
+
prd_response_raw = prd_response.choices[0].message.content
|
160 |
+
|
161 |
+
# send PRD output to UI
|
162 |
+
prd_msg = cl.Message(content=prd_response_raw)
|
163 |
+
await prd_msg.send()
|
164 |
+
|
165 |
+
prd_json = json.dumps({
|
166 |
+
"objective_goal": "Develop a chatbot...",
|
167 |
+
"features": [],
|
168 |
+
"ux_flow_design_notes": "...",
|
169 |
+
"system_environment_requirements": "...",
|
170 |
+
"assumptions": [],
|
171 |
+
"constraints": [],
|
172 |
+
"dependencies": [],
|
173 |
+
"prompt_engineering_practices": "...",
|
174 |
+
"task_composability": "...",
|
175 |
+
"review_approval_process": "..."
|
176 |
+
})
|
177 |
+
designer_prompt = format_template(DESIGNER_PROMPT_TEMPLATE, prd_response_raw=prd_response_raw, prd_json=prd_json, user_proposal=user_proposal)
|
178 |
+
designer_response = llm_call(designer_prompt)
|
179 |
+
|
180 |
+
if designer_response:
|
181 |
+
designer_output = designer_response.choices[0].message.content
|
182 |
+
|
183 |
+
designer_output_msg = cl.Message(content=designer_output)
|
184 |
+
await designer_output_msg.send()
|
185 |
+
|
186 |
+
# update outputs in UI
|
187 |
+
for secs in [1, 5, 10, 20]:
|
188 |
+
await cl.sleep(secs)
|
189 |
+
await prd_msg.update()
|
190 |
+
await designer_output_msg.update()
|
191 |
+
else:
|
192 |
+
await cl.Message(content="Error generating designer output. Please try again.").send()
|
193 |
else:
|
194 |
+
await cl.Message(content="Error generating PRD. Please try again.").send()
|
195 |
+
except Exception as e:
|
196 |
+
logger.error(f"Error during chat start: {e}")
|
|
|
|
|
|