Update app.py
Browse files
app.py
CHANGED
@@ -1,253 +1,86 @@
|
|
1 |
import streamlit as st
|
2 |
import json
|
3 |
from typing import Dict, List, Any
|
4 |
-
import re
|
5 |
|
6 |
-
def format_project_response(project: dict
|
7 |
-
"""Format project details with
|
8 |
-
|
9 |
-
|
10 |
-
response = [f"{indent}• {project['name']}"]
|
11 |
-
response.append(f"{indent} {project['description']}")
|
12 |
|
13 |
if 'skills_used' in project:
|
14 |
-
response.append(f"
|
|
|
15 |
|
16 |
if 'status' in project:
|
17 |
-
|
18 |
-
if '
|
19 |
-
response.append(f"
|
20 |
-
if 'confidentiality_note' in project:
|
21 |
-
response.append(f"{indent} Note: {project['confidentiality_note']}")
|
22 |
|
23 |
-
return '\n'.join(response) + '\n'
|
24 |
|
25 |
-
def
|
26 |
-
"""
|
27 |
-
|
28 |
-
|
29 |
-
categories = {
|
30 |
-
'Machine Learning & AI': ['core', 'frameworks', 'focus_areas'],
|
31 |
-
'Programming': ['primary', 'libraries', 'tools'],
|
32 |
-
'Data & Analytics': ['databases', 'visualization', 'processing']
|
33 |
-
}
|
34 |
-
|
35 |
-
for category, subcategories in categories.items():
|
36 |
-
response.append(f"• {category}")
|
37 |
-
for subcat in subcategories:
|
38 |
-
if subcat in skills['machine_learning']:
|
39 |
-
items = skills['machine_learning'][subcat]
|
40 |
-
response.append(f" - {subcat.title()}: {', '.join(items)}")
|
41 |
-
response.append("") # Add spacing between categories
|
42 |
|
43 |
-
|
|
|
|
|
44 |
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
'technical_tools': set(),
|
50 |
-
'soft_skills': set(),
|
51 |
-
'responsibilities': set()
|
52 |
-
}
|
53 |
-
|
54 |
-
# Common technical tools and skills
|
55 |
-
tech_keywords = {
|
56 |
-
'data science', 'analytics', 'visualization', 'tableau', 'python',
|
57 |
-
'machine learning', 'modeling', 'automation', 'sql', 'data analysis'
|
58 |
-
}
|
59 |
-
|
60 |
-
# Common soft skills
|
61 |
-
soft_keywords = {
|
62 |
-
'collaborate', 'communicate', 'analyze', 'design', 'implement',
|
63 |
-
'produce insights', 'improve', 'support'
|
64 |
-
}
|
65 |
-
|
66 |
-
text_lower = text.lower()
|
67 |
-
|
68 |
-
# Extract company name if present
|
69 |
-
companies = ['rbc', 'shopify', 'google', 'microsoft', 'amazon']
|
70 |
-
company_name = next((company.upper() for company in companies if company in text_lower), None)
|
71 |
-
|
72 |
-
# Extract requirements
|
73 |
-
for word in tech_keywords:
|
74 |
-
if word in text_lower:
|
75 |
-
requirements['technical_tools'].add(word)
|
76 |
-
|
77 |
-
for word in soft_keywords:
|
78 |
-
if word in text_lower:
|
79 |
-
requirements['soft_skills'].add(word)
|
80 |
-
|
81 |
-
# Build response
|
82 |
-
response_parts = []
|
83 |
-
|
84 |
-
# Company-specific introduction if applicable
|
85 |
-
if company_name:
|
86 |
-
response_parts.append(f"Here's how I align with {company_name}'s requirements:\n")
|
87 |
-
else:
|
88 |
-
response_parts.append("Based on the job requirements, here's how I align:\n")
|
89 |
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
my_relevant_skills.append(" - Proficient in Tableau and data visualization (used in multiple projects)")
|
95 |
-
if 'data analysis' in requirements['technical_tools']:
|
96 |
-
my_relevant_skills.append(" - Strong data analysis skills demonstrated in projects like LoanTap Credit Assessment")
|
97 |
-
if 'machine learning' in requirements['technical_tools'] or 'modeling' in requirements['technical_tools']:
|
98 |
-
my_relevant_skills.append(" - Experienced in building ML models from scratch (demonstrated in algorithm practice projects)")
|
99 |
-
|
100 |
-
response_parts.extend(my_relevant_skills)
|
101 |
-
response_parts.append("") # Add spacing
|
102 |
-
|
103 |
-
# Business Understanding
|
104 |
-
response_parts.append("• Business Acumen:")
|
105 |
-
response_parts.append(" - Commerce background provides strong understanding of business requirements")
|
106 |
-
response_parts.append(" - Experience in translating business needs into technical solutions")
|
107 |
-
response_parts.append(" - Proven ability to communicate technical findings to business stakeholders")
|
108 |
-
response_parts.append("") # Add spacing
|
109 |
-
|
110 |
-
# Project Experience
|
111 |
-
response_parts.append("• Relevant Project Experience:")
|
112 |
-
relevant_projects = []
|
113 |
-
if 'automation' in requirements['technical_tools']:
|
114 |
-
relevant_projects.append(" - Developed AI-powered POS system with automated operations")
|
115 |
-
if 'data analysis' in requirements['technical_tools']:
|
116 |
-
relevant_projects.append(" - Built credit assessment model for LoanTap using comprehensive data analysis")
|
117 |
-
if 'machine learning' in requirements['technical_tools']:
|
118 |
-
relevant_projects.append(" - Created multiple ML models from scratch, including predictive analytics for Ola")
|
119 |
-
|
120 |
-
response_parts.extend(relevant_projects)
|
121 |
-
response_parts.append("") # Add spacing
|
122 |
-
|
123 |
-
# Education and Additional Qualifications
|
124 |
-
response_parts.append("• Additional Strengths:")
|
125 |
-
response_parts.append(" - Currently pursuing advanced AI/ML education in Canada")
|
126 |
-
response_parts.append(" - Strong foundation in both technical implementation and business analysis")
|
127 |
-
response_parts.append(" - Experience in end-to-end project delivery and deployment")
|
128 |
-
|
129 |
-
return '\n'.join(response_parts)
|
130 |
|
131 |
-
|
132 |
-
"""Format background story with proper structure"""
|
133 |
-
response_parts = ["My Journey from Commerce to ML/AI:\n"]
|
134 |
-
|
135 |
-
# Education Background
|
136 |
-
response_parts.append("• Education Background:")
|
137 |
-
response_parts.append(f" - Commerce degree from {knowledge_base['education']['undergraduate']['institution']}")
|
138 |
-
response_parts.append(f" - Currently at {knowledge_base['education']['postgraduate'][0]['institution']}")
|
139 |
-
response_parts.append(f" - Also enrolled at {knowledge_base['education']['postgraduate'][1]['institution']}")
|
140 |
-
response_parts.append("") # Add spacing
|
141 |
-
|
142 |
-
# Career Transition
|
143 |
-
response_parts.append("• Career Transition:")
|
144 |
-
transition = next((qa['answer'] for qa in knowledge_base['frequently_asked_questions']
|
145 |
-
if 'transition' in qa['question'].lower()), '')
|
146 |
-
response_parts.append(f" - {transition[:200]}...") # Truncate for readability
|
147 |
-
response_parts.append("") # Add spacing
|
148 |
-
|
149 |
-
# Current Focus
|
150 |
-
response_parts.append("• Current Focus:")
|
151 |
-
response_parts.append(" - Building practical ML projects")
|
152 |
-
response_parts.append(" - Advancing AI/ML education in Canada")
|
153 |
-
response_parts.append("") # Add spacing
|
154 |
-
|
155 |
-
# Goals
|
156 |
-
response_parts.append("• Future Goals:")
|
157 |
-
response_parts.append(" - Secure ML Engineering role in Canada")
|
158 |
-
response_parts.append(" - Develop innovative AI solutions")
|
159 |
-
response_parts.append(" - Contribute to cutting-edge ML projects")
|
160 |
-
|
161 |
-
return '\n'.join(response_parts)
|
162 |
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
links.append(f"\nConnect with me: {knowledge_base['personal_details']['online_presence']['linkedin']}")
|
174 |
-
|
175 |
-
for post in knowledge_base['personal_details']['online_presence']['blog_posts']:
|
176 |
-
if 'link' in post and any(word in query_lower for word in post['title'].lower().split()):
|
177 |
-
links.append(f"\nRelated blog post: {post['link']}")
|
178 |
-
break
|
179 |
-
|
180 |
-
if links:
|
181 |
-
response += '\n' + '\n'.join(links)
|
182 |
-
|
183 |
-
return response
|
184 |
|
185 |
def generate_response(query: str, knowledge_base: dict) -> str:
|
186 |
-
"""
|
187 |
query_lower = query.lower()
|
188 |
|
|
|
|
|
|
|
|
|
|
|
189 |
# Handle project listing requests
|
190 |
if any(word in query_lower for word in ['list', 'project', 'portfolio', 'built', 'created', 'developed']):
|
191 |
-
response_parts = ["Here are my key projects
|
192 |
|
193 |
-
# Major Projects
|
194 |
-
response_parts.append("
|
195 |
for project in knowledge_base['projects']['major_projects']:
|
196 |
-
response_parts.append(format_project_response(project
|
197 |
|
198 |
-
# Algorithm
|
199 |
-
response_parts.append("
|
200 |
for project in knowledge_base['projects']['algorithm_practice_projects']:
|
201 |
-
response_parts.append(format_project_response(project
|
202 |
|
203 |
response = '\n'.join(response_parts)
|
204 |
-
return add_relevant_links(response, query, knowledge_base)
|
205 |
-
|
206 |
-
# Handle job description analysis
|
207 |
-
elif len(query.split()) > 20 and any(phrase in query_lower for phrase in
|
208 |
-
['requirements', 'qualifications', 'looking for', 'job description']):
|
209 |
-
return analyze_job_description(query, knowledge_base)
|
210 |
-
|
211 |
-
# Handle background/story queries
|
212 |
-
elif any(word in query_lower for word in ['background', 'journey', 'story', 'transition']):
|
213 |
-
return format_story_response(knowledge_base)
|
214 |
-
|
215 |
-
# Handle skill-specific queries
|
216 |
-
elif any(word in query_lower for word in ['skill', 'know', 'technology', 'stack']):
|
217 |
-
return format_skills_response(knowledge_base['skills']['technical_skills'])
|
218 |
-
|
219 |
-
# Handle standout/unique qualities queries
|
220 |
-
elif any(word in query_lower for word in ['stand out', 'unique', 'different', 'special']):
|
221 |
-
response_parts = ["What Makes Me Stand Out:\n"]
|
222 |
-
response_parts.append("• Unique Background:")
|
223 |
-
response_parts.append(" - Successfully transitioned from commerce to tech")
|
224 |
-
response_parts.append(" - Blend of business acumen and technical expertise")
|
225 |
-
response_parts.append("")
|
226 |
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
response_parts.append("")
|
231 |
|
232 |
-
|
233 |
-
response_parts.append(" - Strong foundation in ML/AI principles")
|
234 |
-
response_parts.append(" - Experience with end-to-end project implementation")
|
235 |
-
response_parts.append("")
|
236 |
-
|
237 |
-
response_parts.append("• Innovation Focus:")
|
238 |
-
response_parts.append(" - Developing novel solutions in ML/AI")
|
239 |
-
response_parts.append(" - Emphasis on practical impact")
|
240 |
-
|
241 |
-
return '\n'.join(response_parts)
|
242 |
|
243 |
-
#
|
244 |
-
return (f"I'm {knowledge_base['personal_details']['professional_summary']}\n\n"
|
245 |
-
"You can ask me about:\n"
|
246 |
-
"• My projects and portfolio\n"
|
247 |
-
"• My journey from commerce to ML/AI\n"
|
248 |
-
"• My technical skills and experience\n"
|
249 |
-
"• My fit for ML/AI roles\n"
|
250 |
-
"Or paste a job description to see how my profile matches!")
|
251 |
|
252 |
def main():
|
253 |
st.title("💬 Chat with Manyue's Portfolio")
|
@@ -257,7 +90,7 @@ def main():
|
|
257 |
st.session_state.messages = []
|
258 |
if "knowledge_base" not in st.session_state:
|
259 |
try:
|
260 |
-
with open('
|
261 |
st.session_state.knowledge_base = json.load(f)
|
262 |
except FileNotFoundError:
|
263 |
st.error("Knowledge base file not found.")
|
@@ -273,31 +106,33 @@ def main():
|
|
273 |
- You can also paste job descriptions to see how my profile matches!
|
274 |
""")
|
275 |
st.session_state.displayed_welcome = True
|
276 |
-
|
277 |
-
# Create two columns
|
278 |
col1, col2 = st.columns([3, 1])
|
279 |
|
280 |
with col1:
|
281 |
-
#
|
282 |
-
|
283 |
-
|
284 |
-
|
|
|
|
|
285 |
|
286 |
# Chat input
|
287 |
-
if prompt := st.chat_input("Ask me anything or paste a job description..."):
|
288 |
# Add user message
|
|
|
|
|
289 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
290 |
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
response = generate_response(prompt, st.session_state.knowledge_base)
|
295 |
st.markdown(response)
|
296 |
st.session_state.messages.append({"role": "assistant", "content": response})
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
st.rerun()
|
301 |
|
302 |
with col2:
|
303 |
st.subheader("Quick Questions")
|
@@ -306,16 +141,27 @@ def main():
|
|
306 |
"What are your technical skills?",
|
307 |
"What makes you stand out?",
|
308 |
"What's your journey into ML?",
|
309 |
-
"
|
310 |
]
|
311 |
|
312 |
-
|
313 |
-
|
314 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
315 |
st.rerun()
|
316 |
|
317 |
st.markdown("---")
|
318 |
-
if st.button("Clear Chat"):
|
319 |
st.session_state.messages = []
|
320 |
st.rerun()
|
321 |
|
|
|
1 |
import streamlit as st
|
2 |
import json
|
3 |
from typing import Dict, List, Any
|
|
|
4 |
|
5 |
+
def format_project_response(project: dict) -> str:
|
6 |
+
"""Format project details with clear separation"""
|
7 |
+
response = [f"\n• {project['name']}:"]
|
8 |
+
response.append(f" Description: {project['description']}")
|
|
|
|
|
9 |
|
10 |
if 'skills_used' in project:
|
11 |
+
response.append(f"\n Technologies Used:")
|
12 |
+
response.append(f" {', '.join(project['skills_used'])}")
|
13 |
|
14 |
if 'status' in project:
|
15 |
+
response.append(f"\n Current Status: {project['status']}")
|
16 |
+
if 'confidentiality_note' in project:
|
17 |
+
response.append(f" Note: {project['confidentiality_note']}")
|
|
|
|
|
18 |
|
19 |
+
return '\n'.join(response) + '\n'
|
20 |
|
21 |
+
def get_philosophical_response(query: str, knowledge_base: dict) -> str:
|
22 |
+
"""Handle philosophical or market-related queries"""
|
23 |
+
query_lower = query.lower()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
|
25 |
+
# Market-related response
|
26 |
+
if any(word in query_lower for word in ['market', 'job', 'opportunity', 'down']):
|
27 |
+
return """I believe success in any market comes down to quality of effort and preparation. While the market may have cycles, I focus on:
|
28 |
|
29 |
+
• Continuous Skill Development:
|
30 |
+
- Building practical projects that solve real problems
|
31 |
+
- Staying updated with latest ML/AI trends
|
32 |
+
- Enhancing my technical portfolio
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
|
34 |
+
• Value Proposition:
|
35 |
+
- Unique combination of business and technical skills
|
36 |
+
- Focus on practical implementation
|
37 |
+
- Strong problem-solving approach
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
|
39 |
+
I see this period as an opportunity to strengthen my skills and build more impactful projects."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
|
41 |
+
# Off-topic response
|
42 |
+
if any(word in query_lower for word in ['weather', 'temperature', 'climate']):
|
43 |
+
return """I'm focused on discussing my ML/AI journey and projects. For weather information, I'd recommend checking local weather services.
|
44 |
+
|
45 |
+
Would you like to know about:
|
46 |
+
• My ML projects and technical skills?
|
47 |
+
• My journey from commerce to tech?
|
48 |
+
• My approach to the current job market?"""
|
49 |
+
|
50 |
+
return None # Return None if not a philosophical query
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
|
52 |
def generate_response(query: str, knowledge_base: dict) -> str:
|
53 |
+
"""Enhanced response generation with better handling of various queries"""
|
54 |
query_lower = query.lower()
|
55 |
|
56 |
+
# First check for philosophical/off-topic queries
|
57 |
+
philosophical_response = get_philosophical_response(query, knowledge_base)
|
58 |
+
if philosophical_response:
|
59 |
+
return philosophical_response
|
60 |
+
|
61 |
# Handle project listing requests
|
62 |
if any(word in query_lower for word in ['list', 'project', 'portfolio', 'built', 'created', 'developed']):
|
63 |
+
response_parts = ["Here are my key projects:"]
|
64 |
|
65 |
+
# Major Projects
|
66 |
+
response_parts.append("\nMajor Projects (In Development):")
|
67 |
for project in knowledge_base['projects']['major_projects']:
|
68 |
+
response_parts.append(format_project_response(project))
|
69 |
|
70 |
+
# Algorithm Projects
|
71 |
+
response_parts.append("\nCompleted Algorithm Implementation Projects:")
|
72 |
for project in knowledge_base['projects']['algorithm_practice_projects']:
|
73 |
+
response_parts.append(format_project_response(project))
|
74 |
|
75 |
response = '\n'.join(response_parts)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
|
77 |
+
# Add relevant links
|
78 |
+
if 'online_presence' in knowledge_base.get('personal_details', {}):
|
79 |
+
response += f"\n\nView my complete portfolio: {knowledge_base['personal_details']['online_presence']['portfolio']}"
|
|
|
80 |
|
81 |
+
return response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
|
83 |
+
# [Rest of your existing response handlers]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
|
85 |
def main():
|
86 |
st.title("💬 Chat with Manyue's Portfolio")
|
|
|
90 |
st.session_state.messages = []
|
91 |
if "knowledge_base" not in st.session_state:
|
92 |
try:
|
93 |
+
with open('manny_knowledge_base.json', 'r', encoding='utf-8') as f:
|
94 |
st.session_state.knowledge_base = json.load(f)
|
95 |
except FileNotFoundError:
|
96 |
st.error("Knowledge base file not found.")
|
|
|
106 |
- You can also paste job descriptions to see how my profile matches!
|
107 |
""")
|
108 |
st.session_state.displayed_welcome = True
|
109 |
+
|
110 |
+
# Create two columns with proper sizing
|
111 |
col1, col2 = st.columns([3, 1])
|
112 |
|
113 |
with col1:
|
114 |
+
# Chat container for better scrolling
|
115 |
+
chat_container = st.container()
|
116 |
+
with chat_container:
|
117 |
+
for message in st.session_state.messages:
|
118 |
+
with st.chat_message(message["role"]):
|
119 |
+
st.markdown(message["content"])
|
120 |
|
121 |
# Chat input
|
122 |
+
if prompt := st.chat_input("Ask me anything or paste a job description...", key="chat_input"):
|
123 |
# Add user message
|
124 |
+
with st.chat_message("user"):
|
125 |
+
st.markdown(prompt)
|
126 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
127 |
|
128 |
+
# Generate and display response
|
129 |
+
with st.chat_message("assistant"):
|
130 |
+
try:
|
131 |
response = generate_response(prompt, st.session_state.knowledge_base)
|
132 |
st.markdown(response)
|
133 |
st.session_state.messages.append({"role": "assistant", "content": response})
|
134 |
+
except Exception as e:
|
135 |
+
st.error(f"An error occurred: {str(e)}")
|
|
|
|
|
136 |
|
137 |
with col2:
|
138 |
st.subheader("Quick Questions")
|
|
|
141 |
"What are your technical skills?",
|
142 |
"What makes you stand out?",
|
143 |
"What's your journey into ML?",
|
144 |
+
"Your view on the current market?"
|
145 |
]
|
146 |
|
147 |
+
# Handle quick questions with proper keys
|
148 |
+
for i, question in enumerate(example_questions):
|
149 |
+
if st.button(question, key=f"btn_{i}"):
|
150 |
+
with st.chat_message("user"):
|
151 |
+
st.markdown(question)
|
152 |
+
st.session_state.messages.append({"role": "user", "content": question})
|
153 |
+
|
154 |
+
with st.chat_message("assistant"):
|
155 |
+
try:
|
156 |
+
response = generate_response(question, st.session_state.knowledge_base)
|
157 |
+
st.markdown(response)
|
158 |
+
st.session_state.messages.append({"role": "assistant", "content": response})
|
159 |
+
except Exception as e:
|
160 |
+
st.error(f"An error occurred: {str(e)}")
|
161 |
st.rerun()
|
162 |
|
163 |
st.markdown("---")
|
164 |
+
if st.button("Clear Chat", key="clear_chat"):
|
165 |
st.session_state.messages = []
|
166 |
st.rerun()
|
167 |
|