danielrosehill commited on
Commit
221cb88
·
1 Parent(s): 93fada4
.streamlit/config.toml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ [theme]
2
+ primaryColor = "#FF4B4B"
3
+ backgroundColor = "#FFFFFF"
4
+ secondaryBackgroundColor = "#F0F2F6"
5
+ textColor = "#262730"
6
+ font = "sans serif"
7
+
8
+ [server]
9
+ enableXsrfProtection = true
10
+ enableCORS = false
.vscode/settings.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "window.title": "${dirty}${activeEditorShort}${separator}${rootName}${separator}${profileName}${separator}${appName}${separator}[Branch: main]"
3
+ }
README.md CHANGED
@@ -10,4 +10,44 @@ pinned: false
10
  short_description: Model workflow using agents to proactively develop context
11
  ---
12
 
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  short_description: Model workflow using agents to proactively develop context
11
  ---
12
 
13
+ # Context Extraction Demo
14
+
15
+ [![Made with Streamlit](https://img.shields.io/badge/Made%20with-Streamlit-FF4B4B.svg)](https://www.streamlit.io)
16
+ [![Python Version](https://img.shields.io/badge/python-3.8%2B-blue)](https://www.python.org)
17
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
18
+
19
+ This project demonstrates an innovative approach to enhancing personalized Large Language Model (LLM) experiences through agentic workflow-based context extraction. The system showcases how AI agents can proactively generate and collect contextual data to improve the quality and relevance of LLM interactions.
20
+
21
+ ## Purpose
22
+
23
+ The primary goal of this project is to illustrate how an agent-driven workflow can:
24
+ - Proactively identify and extract relevant contextual information
25
+ - Generate meaningful data that enhances LLM understanding
26
+ - Create more personalized and context-aware AI interactions
27
+ - Demonstrate practical implementation of agentic workflows in LLM systems
28
+
29
+ ## Screenshots
30
+
31
+ ![alt text](screenshots/1.png)
32
+
33
+ ![alt text](screenshots/2.png)
34
+
35
+ ![alt text](screenshots/3.png)
36
+
37
+ ## About
38
+
39
+ This project was developed through collaboration between [Daniel Rosehill](https://danielrosehill.com) and Claude (Anthropic). It serves as a practical demonstration of how AI systems can be designed to actively participate in context generation and enhancement, leading to more effective and personalized LLM experiences.
40
+
41
+ ## Implementation
42
+
43
+ The system implements an agentic workflow that enables:
44
+ - Automated context extraction from user interactions
45
+ - Proactive generation of contextual metadata
46
+ - Integration of extracted context into LLM inference processes
47
+ - Enhanced personalization through accumulated contextual understanding
48
+
49
+ ## Attribution
50
+
51
+ Development: Claude (Anthropic)
52
+ Project Direction and Implementation: Daniel Rosehill
53
+
app.py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import openai
3
+ import json
4
+ from datetime import datetime
5
+
6
+ # Page configuration
7
+ st.set_page_config(
8
+ page_title="Agent Interview Context Generation Demo",
9
+ layout="wide"
10
+ )
11
+
12
+ # Custom CSS for chat bubbles and Font Awesome
13
+ st.markdown("""
14
+ <style>
15
+ @import url('https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0/css/all.min.css');
16
+ .chat-bubble {
17
+ padding: 15px;
18
+ border-radius: 15px;
19
+ margin: 5px 0;
20
+ max-width: 80%;
21
+ position: relative;
22
+ }
23
+
24
+ .bot-bubble {
25
+ background-color: #F0F2F6;
26
+ margin-right: auto;
27
+ margin-left: 10px;
28
+ border-bottom-left-radius: 5px;
29
+ }
30
+
31
+ .user-bubble {
32
+ background-color: #4CAF50;
33
+ color: white;
34
+ margin-left: auto;
35
+ margin-right: 10px;
36
+ border-bottom-right-radius: 5px;
37
+ }
38
+
39
+ .chat-container {
40
+ display: flex;
41
+ flex-direction: column;
42
+ gap: 10px;
43
+ padding: 10px;
44
+ background-color: white;
45
+ border-radius: 10px;
46
+ box-shadow: 0 2px 5px rgba(0,0,0,0.1);
47
+ }
48
+
49
+ .stTextArea textarea {
50
+ border-radius: 20px;
51
+ padding: 10px 15px;
52
+ font-size: 16px;
53
+ }
54
+
55
+ .stButton button {
56
+ border-radius: 20px;
57
+ padding: 5px 20px;
58
+ }
59
+ </style>
60
+ """, unsafe_allow_html=True)
61
+
62
+ # Initialize session state variables
63
+ if 'messages' not in st.session_state:
64
+ st.session_state.messages = []
65
+ if 'interview_complete' not in st.session_state:
66
+ st.session_state.interview_complete = False
67
+ if 'context_data' not in st.session_state:
68
+ st.session_state.context_data = ""
69
+ if 'interview_started' not in st.session_state:
70
+ st.session_state.interview_started = False
71
+ if 'context_focus' not in st.session_state:
72
+ st.session_state.context_focus = None
73
+
74
+ def get_random_question(api_key, focus_area=None):
75
+ """Get a random question from OpenAI based on optional focus area."""
76
+ try:
77
+ client = openai.OpenAI(api_key=api_key)
78
+ system_content = "You are an interviewer gathering context about the user. "
79
+ if focus_area and focus_area != "general":
80
+ system_content += f"Focus specifically on questions about their {focus_area}. "
81
+ system_content += "Ask one random, open-ended question that reveals meaningful information about the user. Be creative and never repeat questions. Each response should be just one engaging question."
82
+
83
+ response = client.chat.completions.create(
84
+ model="gpt-3.5-turbo",
85
+ messages=[
86
+ {
87
+ "role": "system",
88
+ "content": system_content
89
+ },
90
+ {
91
+ "role": "user",
92
+ "content": "Please ask me a random question."
93
+ }
94
+ ]
95
+ )
96
+ return response.choices[0].message.content if hasattr(response.choices[0].message, 'content') else str(response.choices[0].message)
97
+ except Exception as e:
98
+ return f"Error: {str(e)}"
99
+
100
+ def extract_context(api_key, conversation):
101
+ """Extract context from the conversation using OpenAI."""
102
+ try:
103
+ client = openai.OpenAI(api_key=api_key)
104
+ conversation_text = "\n".join([f"{'Bot' if i%2==0 else 'User'}: {msg}" for i, msg in enumerate(conversation)])
105
+
106
+ response = client.chat.completions.create(
107
+ model="gpt-3.5-turbo",
108
+ messages=[
109
+ {
110
+ "role": "system",
111
+ "content": "Analyze the following conversation and extract key information about the user. Create a well-organized summary in markdown format, grouping similar information under appropriate headings. Write in third person perspective."
112
+ },
113
+ {
114
+ "role": "user",
115
+ "content": f"Please analyze this conversation and create a context summary:\n\n{conversation_text}"
116
+ }
117
+ ]
118
+ )
119
+ return response.choices[0].message.content if hasattr(response.choices[0].message, 'content') else str(response.choices[0].message)
120
+ except Exception as e:
121
+ return f"Error: {str(e)}"
122
+
123
+ # Sidebar for API key and controls
124
+ with st.sidebar:
125
+ st.title("Settings")
126
+ api_key = st.text_input("Enter OpenAI API Key", type="password")
127
+
128
+ if st.button("Clear/Reset"):
129
+ st.session_state.messages = []
130
+ st.session_state.interview_complete = False
131
+ st.session_state.context_data = ""
132
+ st.session_state.interview_started = False
133
+ st.session_state.context_focus = None
134
+ st.rerun()
135
+
136
+ # Main content
137
+ st.title("Agent Interview Context Generation Demo")
138
+ st.markdown("""
139
+ This project demonstrates how AI agents can proactively gather and generate rich contextual data
140
+ through intelligent interviewing. By focusing on specific areas of interest, the agent builds a comprehensive
141
+ understanding that enhances AI-human interactions and enables more personalized experiences.
142
+ """)
143
+
144
+ # Create tabs for different sections
145
+ tab1, tab2, tab3, tab4 = st.tabs(["Instructions", "Interview", "Gallery", "Generated Context"])
146
+
147
+ with tab1:
148
+ st.header("How it Works")
149
+ st.markdown("""
150
+ This application helps gather and extract contextual information about you through an interactive interview process.
151
+
152
+ Created by [Daniel Rosehill](https://danielrosehill.com) and Claude (Anthropic).
153
+
154
+ View the source code on [GitHub](https://github.com/danielrosehill/Context-Extraction-Demo).
155
+
156
+ ### Process:
157
+ 1. Enter your OpenAI API key in the sidebar
158
+ 2. Choose your preferred context focus area
159
+ 3. Click the "Start Interview" button in the Interview tab
160
+ 4. The AI interviewer will ask targeted questions based on your chosen focus
161
+ 5. Answer each question naturally - you can type or use voice input
162
+ 6. Click "Submit Answer" after each response
163
+ 7. Continue the conversation until you're ready to end
164
+ 8. Click "End Interview" to generate your context summary
165
+ 9. Review the extracted context and export it as needed
166
+
167
+ ### Features:
168
+ - **Focus Areas**: Choose to focus on specific aspects like professional background, technical skills, or keep it general
169
+ - **Voice Input**: Use Chrome's built-in speech-to-text by clicking the microphone icon
170
+ - **Targeted Questions**: The AI asks questions relevant to your chosen focus area
171
+ - **Context Extraction**: Automatically organizes your information into a structured summary
172
+ - **Export Options**: Copy or download your context data in markdown format
173
+
174
+ ### Tips:
175
+ - Provide detailed, honest answers for better context extraction
176
+ - Use voice input to make the process faster and more natural
177
+ - Take your time with each response
178
+ - You can reset and start over at any time using the Clear/Reset button
179
+ """)
180
+
181
+ with tab2:
182
+ # Create two columns for the main interface
183
+ col1, col2 = st.columns([2, 1])
184
+
185
+ with col1:
186
+ st.subheader("Conversation")
187
+ # Display conversation history with chat bubbles
188
+ st.markdown('<div class="chat-container">', unsafe_allow_html=True)
189
+ for msg in st.session_state.messages:
190
+ is_bot = msg.startswith('Q:')
191
+ bubble_class = "bot-bubble" if is_bot else "user-bubble"
192
+ message_content = msg[3:] if is_bot else msg
193
+ bot_icon = '<i class="fas fa-robot" style="margin-right: 8px;"></i>' if is_bot else ''
194
+ st.markdown(
195
+ f'<div class="chat-bubble {bubble_class}">{bot_icon}{message_content}</div>',
196
+ unsafe_allow_html=True
197
+ )
198
+ st.markdown('</div>', unsafe_allow_html=True)
199
+
200
+ with col2:
201
+ st.subheader("Your Response")
202
+ # Add microphone icon and voice input instructions
203
+ st.markdown("""
204
+ 💡 **Voice Input Tip**:
205
+ - Click the microphone icon in Chrome
206
+ - Or use built-in speech-to-text
207
+ """)
208
+
209
+ # Show interview interface
210
+ if api_key:
211
+ if not st.session_state.interview_started and not st.session_state.interview_complete:
212
+ # Context focus selection
213
+ if st.session_state.context_focus is None:
214
+ st.write("Before we begin, would you like to focus on a specific area or keep the questions general?")
215
+ focus_options = ["general", "professional background", "personal interests", "technical skills", "life experiences"]
216
+ selected_focus = st.selectbox("Choose focus area:", focus_options)
217
+ if st.button("Set Focus"):
218
+ st.session_state.context_focus = selected_focus
219
+ st.rerun()
220
+ else:
221
+ if st.button("Start Interview", type="primary", use_container_width=True):
222
+ st.session_state.interview_started = True
223
+ question = get_random_question(api_key, st.session_state.context_focus)
224
+ st.session_state.messages.append(f"Q: {question}")
225
+ st.rerun()
226
+
227
+ elif st.session_state.interview_started and not st.session_state.interview_complete:
228
+ # Get last message
229
+ last_message = st.session_state.messages[-1]
230
+
231
+ # If last message was an answer, get next question
232
+ if not last_message.startswith('Q:'):
233
+ question = get_random_question(api_key, st.session_state.context_focus)
234
+ st.session_state.messages.append(f"Q: {question}")
235
+ st.rerun()
236
+
237
+ # User input
238
+ user_answer = st.text_area("Your answer:", height=100)
239
+
240
+ # Submit answer button
241
+ if st.button("Submit Answer"):
242
+ if user_answer:
243
+ st.session_state.messages.append(user_answer)
244
+ st.rerun()
245
+
246
+ # End interview button
247
+ if st.button("End Interview"):
248
+ if len(st.session_state.messages) > 1: # Ensure there's at least one Q&A pair
249
+ st.session_state.interview_complete = True
250
+ # Extract context
251
+ st.session_state.context_data = extract_context(api_key, st.session_state.messages)
252
+ st.rerun()
253
+ else:
254
+ st.warning("Please enter your OpenAI API key in the sidebar to begin.")
255
+
256
+ with tab3:
257
+ st.header("Feature Gallery")
258
+ st.markdown("### Interactive Interview Process")
259
+ st.image("screenshots/1.png", use_column_width=True)
260
+ st.markdown("### Context Focus Selection")
261
+ st.image("screenshots/2.png", use_column_width=True)
262
+ st.markdown("### Generated Context Summary")
263
+ st.image("screenshots/3.png", use_column_width=True)
264
+
265
+ with tab4:
266
+ if st.session_state.interview_complete and st.session_state.context_data:
267
+ st.header("Generated Context")
268
+ st.markdown("""
269
+ Below is the AI-generated context summary based on your interview responses.
270
+ This structured data can be used to enhance future AI interactions and create
271
+ more personalized experiences.
272
+ """)
273
+ st.markdown(st.session_state.context_data)
274
+
275
+ # Export options in columns
276
+ st.subheader("Export Options")
277
+ col3, col4 = st.columns(2)
278
+ with col3:
279
+ if st.button("Copy to Clipboard", type="secondary", use_container_width=True):
280
+ st.write("Context copied to clipboard!")
281
+ st.code(st.session_state.context_data)
282
+
283
+ with col4:
284
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
285
+ filename = f"context_data_{timestamp}.md"
286
+ with open(filename, "w") as f:
287
+ f.write(st.session_state.context_data)
288
+ st.download_button(
289
+ label="Download as Markdown",
290
+ data=st.session_state.context_data,
291
+ file_name=filename,
292
+ mime="text/markdown",
293
+ use_container_width=True
294
+ )
295
+ else:
296
+ st.info("Complete the interview to generate your context summary.")
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ streamlit>=1.29.0
2
+ openai>=1.6.0
screenshots/1.png ADDED
screenshots/2.png ADDED
screenshots/3.png ADDED