Update app.py
Browse files
app.py
CHANGED
@@ -1,86 +1,253 @@
|
|
1 |
import streamlit as st
|
2 |
import json
|
3 |
from typing import Dict, List, Any
|
|
|
4 |
|
5 |
-
def format_project_response(project: dict) -> str:
|
6 |
-
"""Format project details with
|
7 |
-
|
8 |
-
|
|
|
|
|
9 |
|
10 |
if 'skills_used' in project:
|
11 |
-
response.append(f"
|
12 |
-
response.append(f" {', '.join(project['skills_used'])}")
|
13 |
|
14 |
if 'status' in project:
|
15 |
-
|
16 |
-
if '
|
17 |
-
response.append(f"
|
|
|
|
|
18 |
|
19 |
-
return '\n'.join(response) + '\n'
|
20 |
|
21 |
-
def
|
22 |
-
"""
|
23 |
-
|
24 |
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
-
|
37 |
-
|
|
|
|
|
38 |
|
39 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
|
41 |
-
#
|
42 |
-
|
43 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
|
50 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
|
52 |
def generate_response(query: str, knowledge_base: dict) -> str:
|
53 |
-
"""
|
54 |
query_lower = query.lower()
|
55 |
|
56 |
-
# First check for philosophical/off-topic queries
|
57 |
-
philosophical_response = get_philosophical_response(query, knowledge_base)
|
58 |
-
if philosophical_response:
|
59 |
-
return philosophical_response
|
60 |
-
|
61 |
# Handle project listing requests
|
62 |
if any(word in query_lower for word in ['list', 'project', 'portfolio', 'built', 'created', 'developed']):
|
63 |
-
response_parts = ["Here are my key projects
|
64 |
|
65 |
-
# Major Projects
|
66 |
-
response_parts.append("
|
67 |
for project in knowledge_base['projects']['major_projects']:
|
68 |
-
response_parts.append(format_project_response(project))
|
69 |
|
70 |
-
# Algorithm Projects
|
71 |
-
response_parts.append("
|
72 |
for project in knowledge_base['projects']['algorithm_practice_projects']:
|
73 |
-
response_parts.append(format_project_response(project))
|
74 |
|
75 |
response = '\n'.join(response_parts)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
|
77 |
-
|
78 |
-
|
79 |
-
|
|
|
80 |
|
81 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
|
83 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
|
85 |
def main():
|
86 |
st.title("💬 Chat with Manyue's Portfolio")
|
@@ -106,33 +273,31 @@ def main():
|
|
106 |
- You can also paste job descriptions to see how my profile matches!
|
107 |
""")
|
108 |
st.session_state.displayed_welcome = True
|
109 |
-
|
110 |
-
# Create two columns
|
111 |
col1, col2 = st.columns([3, 1])
|
112 |
|
113 |
with col1:
|
114 |
-
#
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
with st.chat_message(message["role"]):
|
119 |
-
st.markdown(message["content"])
|
120 |
|
121 |
# Chat input
|
122 |
-
if prompt := st.chat_input("Ask me anything or paste a job description..."
|
123 |
# Add user message
|
124 |
-
with st.chat_message("user"):
|
125 |
-
st.markdown(prompt)
|
126 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
127 |
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
response = generate_response(prompt, st.session_state.knowledge_base)
|
132 |
st.markdown(response)
|
133 |
st.session_state.messages.append({"role": "assistant", "content": response})
|
134 |
-
|
135 |
-
|
|
|
|
|
136 |
|
137 |
with col2:
|
138 |
st.subheader("Quick Questions")
|
@@ -141,27 +306,16 @@ def main():
|
|
141 |
"What are your technical skills?",
|
142 |
"What makes you stand out?",
|
143 |
"What's your journey into ML?",
|
144 |
-
"
|
145 |
]
|
146 |
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
with st.chat_message("user"):
|
151 |
-
st.markdown(question)
|
152 |
-
st.session_state.messages.append({"role": "user", "content": question})
|
153 |
-
|
154 |
-
with st.chat_message("assistant"):
|
155 |
-
try:
|
156 |
-
response = generate_response(question, st.session_state.knowledge_base)
|
157 |
-
st.markdown(response)
|
158 |
-
st.session_state.messages.append({"role": "assistant", "content": response})
|
159 |
-
except Exception as e:
|
160 |
-
st.error(f"An error occurred: {str(e)}")
|
161 |
st.rerun()
|
162 |
|
163 |
st.markdown("---")
|
164 |
-
if st.button("Clear Chat"
|
165 |
st.session_state.messages = []
|
166 |
st.rerun()
|
167 |
|
|
|
1 |
import streamlit as st
|
2 |
import json
|
3 |
from typing import Dict, List, Any
|
4 |
+
import re
|
5 |
|
6 |
+
def format_project_response(project: dict, indent_level: int = 0) -> str:
|
7 |
+
"""Format project details with proper indentation and spacing"""
|
8 |
+
indent = " " * indent_level
|
9 |
+
|
10 |
+
response = [f"{indent}• {project['name']}"]
|
11 |
+
response.append(f"{indent} {project['description']}")
|
12 |
|
13 |
if 'skills_used' in project:
|
14 |
+
response.append(f"{indent} Technologies: {', '.join(project['skills_used'])}")
|
|
|
15 |
|
16 |
if 'status' in project:
|
17 |
+
status = project['status']
|
18 |
+
if 'development' in status.lower() or 'progress' in status.lower():
|
19 |
+
response.append(f"{indent} Status: {status}")
|
20 |
+
if 'confidentiality_note' in project:
|
21 |
+
response.append(f"{indent} Note: {project['confidentiality_note']}")
|
22 |
|
23 |
+
return '\n'.join(response) + '\n' # Add extra newline for spacing
|
24 |
|
25 |
+
def format_skills_response(skills: dict) -> str:
|
26 |
+
"""Format skills with proper hierarchy and spacing"""
|
27 |
+
response = ["My Technical Expertise:\n"]
|
28 |
|
29 |
+
categories = {
|
30 |
+
'Machine Learning & AI': ['core', 'frameworks', 'focus_areas'],
|
31 |
+
'Programming': ['primary', 'libraries', 'tools'],
|
32 |
+
'Data & Analytics': ['databases', 'visualization', 'processing']
|
33 |
+
}
|
34 |
+
|
35 |
+
for category, subcategories in categories.items():
|
36 |
+
response.append(f"• {category}")
|
37 |
+
for subcat in subcategories:
|
38 |
+
if subcat in skills['machine_learning']:
|
39 |
+
items = skills['machine_learning'][subcat]
|
40 |
+
response.append(f" - {subcat.title()}: {', '.join(items)}")
|
41 |
+
response.append("") # Add spacing between categories
|
42 |
+
|
43 |
+
return '\n'.join(response)
|
44 |
|
45 |
+
def analyze_job_description(text: str, knowledge_base: dict) -> str:
|
46 |
+
"""Analyze job description and provide detailed alignment"""
|
47 |
+
# Extract key requirements
|
48 |
+
requirements = {
|
49 |
+
'technical_tools': set(),
|
50 |
+
'soft_skills': set(),
|
51 |
+
'responsibilities': set()
|
52 |
+
}
|
53 |
+
|
54 |
+
# Common technical tools and skills
|
55 |
+
tech_keywords = {
|
56 |
+
'data science', 'analytics', 'visualization', 'tableau', 'python',
|
57 |
+
'machine learning', 'modeling', 'automation', 'sql', 'data analysis'
|
58 |
+
}
|
59 |
+
|
60 |
+
# Common soft skills
|
61 |
+
soft_keywords = {
|
62 |
+
'collaborate', 'communicate', 'analyze', 'design', 'implement',
|
63 |
+
'produce insights', 'improve', 'support'
|
64 |
+
}
|
65 |
+
|
66 |
+
text_lower = text.lower()
|
67 |
+
|
68 |
+
# Extract company name if present
|
69 |
+
companies = ['rbc', 'shopify', 'google', 'microsoft', 'amazon']
|
70 |
+
company_name = next((company.upper() for company in companies if company in text_lower), None)
|
71 |
+
|
72 |
+
# Extract requirements
|
73 |
+
for word in tech_keywords:
|
74 |
+
if word in text_lower:
|
75 |
+
requirements['technical_tools'].add(word)
|
76 |
+
|
77 |
+
for word in soft_keywords:
|
78 |
+
if word in text_lower:
|
79 |
+
requirements['soft_skills'].add(word)
|
80 |
+
|
81 |
+
# Build response
|
82 |
+
response_parts = []
|
83 |
+
|
84 |
+
# Company-specific introduction if applicable
|
85 |
+
if company_name:
|
86 |
+
response_parts.append(f"Here's how I align with {company_name}'s requirements:\n")
|
87 |
+
else:
|
88 |
+
response_parts.append("Based on the job requirements, here's how I align:\n")
|
89 |
|
90 |
+
# Technical Skills Alignment
|
91 |
+
response_parts.append("• Technical Skills Match:")
|
92 |
+
my_relevant_skills = []
|
93 |
+
if 'visualization' in requirements['technical_tools'] or 'tableau' in requirements['technical_tools']:
|
94 |
+
my_relevant_skills.append(" - Proficient in Tableau and data visualization (used in multiple projects)")
|
95 |
+
if 'data analysis' in requirements['technical_tools']:
|
96 |
+
my_relevant_skills.append(" - Strong data analysis skills demonstrated in projects like LoanTap Credit Assessment")
|
97 |
+
if 'machine learning' in requirements['technical_tools'] or 'modeling' in requirements['technical_tools']:
|
98 |
+
my_relevant_skills.append(" - Experienced in building ML models from scratch (demonstrated in algorithm practice projects)")
|
99 |
+
|
100 |
+
response_parts.extend(my_relevant_skills)
|
101 |
+
response_parts.append("") # Add spacing
|
102 |
+
|
103 |
+
# Business Understanding
|
104 |
+
response_parts.append("• Business Acumen:")
|
105 |
+
response_parts.append(" - Commerce background provides strong understanding of business requirements")
|
106 |
+
response_parts.append(" - Experience in translating business needs into technical solutions")
|
107 |
+
response_parts.append(" - Proven ability to communicate technical findings to business stakeholders")
|
108 |
+
response_parts.append("") # Add spacing
|
109 |
+
|
110 |
+
# Project Experience
|
111 |
+
response_parts.append("• Relevant Project Experience:")
|
112 |
+
relevant_projects = []
|
113 |
+
if 'automation' in requirements['technical_tools']:
|
114 |
+
relevant_projects.append(" - Developed AI-powered POS system with automated operations")
|
115 |
+
if 'data analysis' in requirements['technical_tools']:
|
116 |
+
relevant_projects.append(" - Built credit assessment model for LoanTap using comprehensive data analysis")
|
117 |
+
if 'machine learning' in requirements['technical_tools']:
|
118 |
+
relevant_projects.append(" - Created multiple ML models from scratch, including predictive analytics for Ola")
|
119 |
+
|
120 |
+
response_parts.extend(relevant_projects)
|
121 |
+
response_parts.append("") # Add spacing
|
122 |
+
|
123 |
+
# Education and Additional Qualifications
|
124 |
+
response_parts.append("• Additional Strengths:")
|
125 |
+
response_parts.append(" - Currently pursuing advanced AI/ML education in Canada")
|
126 |
+
response_parts.append(" - Strong foundation in both technical implementation and business analysis")
|
127 |
+
response_parts.append(" - Experience in end-to-end project delivery and deployment")
|
128 |
+
|
129 |
+
return '\n'.join(response_parts)
|
130 |
|
131 |
+
def format_story_response(knowledge_base: dict) -> str:
|
132 |
+
"""Format background story with proper structure"""
|
133 |
+
response_parts = ["My Journey from Commerce to ML/AI:\n"]
|
134 |
+
|
135 |
+
# Education Background
|
136 |
+
response_parts.append("• Education Background:")
|
137 |
+
response_parts.append(f" - Commerce degree from {knowledge_base['education']['undergraduate']['institution']}")
|
138 |
+
response_parts.append(f" - Currently at {knowledge_base['education']['postgraduate'][0]['institution']}")
|
139 |
+
response_parts.append(f" - Also enrolled at {knowledge_base['education']['postgraduate'][1]['institution']}")
|
140 |
+
response_parts.append("") # Add spacing
|
141 |
+
|
142 |
+
# Career Transition
|
143 |
+
response_parts.append("• Career Transition:")
|
144 |
+
transition = next((qa['answer'] for qa in knowledge_base['frequently_asked_questions']
|
145 |
+
if 'transition' in qa['question'].lower()), '')
|
146 |
+
response_parts.append(f" - {transition[:200]}...") # Truncate for readability
|
147 |
+
response_parts.append("") # Add spacing
|
148 |
+
|
149 |
+
# Current Focus
|
150 |
+
response_parts.append("• Current Focus:")
|
151 |
+
response_parts.append(" - Building practical ML projects")
|
152 |
+
response_parts.append(" - Advancing AI/ML education in Canada")
|
153 |
+
response_parts.append("") # Add spacing
|
154 |
+
|
155 |
+
# Goals
|
156 |
+
response_parts.append("• Future Goals:")
|
157 |
+
response_parts.append(" - Secure ML Engineering role in Canada")
|
158 |
+
response_parts.append(" - Develop innovative AI solutions")
|
159 |
+
response_parts.append(" - Contribute to cutting-edge ML projects")
|
160 |
+
|
161 |
+
return '\n'.join(response_parts)
|
162 |
|
163 |
+
def add_relevant_links(response: str, query: str, knowledge_base: dict) -> str:
|
164 |
+
"""Add relevant links based on query context"""
|
165 |
+
query_lower = query.lower()
|
166 |
+
links = []
|
167 |
+
|
168 |
+
# Add links strategically based on context
|
169 |
+
if any(word in query_lower for word in ['project', 'portfolio', 'work']):
|
170 |
+
links.append(f"\nView my complete portfolio: {knowledge_base['personal_details']['online_presence']['portfolio']}")
|
171 |
+
|
172 |
+
if any(word in query_lower for word in ['background', 'experience', 'work']):
|
173 |
+
links.append(f"\nConnect with me: {knowledge_base['personal_details']['online_presence']['linkedin']}")
|
174 |
+
|
175 |
+
for post in knowledge_base['personal_details']['online_presence']['blog_posts']:
|
176 |
+
if 'link' in post and any(word in query_lower for word in post['title'].lower().split()):
|
177 |
+
links.append(f"\nRelated blog post: {post['link']}")
|
178 |
+
break
|
179 |
+
|
180 |
+
if links:
|
181 |
+
response += '\n' + '\n'.join(links)
|
182 |
+
|
183 |
+
return response
|
184 |
|
185 |
def generate_response(query: str, knowledge_base: dict) -> str:
|
186 |
+
"""Generate enhanced responses using the knowledge base"""
|
187 |
query_lower = query.lower()
|
188 |
|
|
|
|
|
|
|
|
|
|
|
189 |
# Handle project listing requests
|
190 |
if any(word in query_lower for word in ['list', 'project', 'portfolio', 'built', 'created', 'developed']):
|
191 |
+
response_parts = ["Here are my key projects:\n"]
|
192 |
|
193 |
+
# Major Projects (under development)
|
194 |
+
response_parts.append("Major Projects (In Development):")
|
195 |
for project in knowledge_base['projects']['major_projects']:
|
196 |
+
response_parts.append(format_project_response(project, indent_level=1))
|
197 |
|
198 |
+
# Algorithm Implementation Projects
|
199 |
+
response_parts.append("Completed Algorithm Implementation Projects:")
|
200 |
for project in knowledge_base['projects']['algorithm_practice_projects']:
|
201 |
+
response_parts.append(format_project_response(project, indent_level=1))
|
202 |
|
203 |
response = '\n'.join(response_parts)
|
204 |
+
return add_relevant_links(response, query, knowledge_base)
|
205 |
+
|
206 |
+
# Handle job description analysis
|
207 |
+
elif len(query.split()) > 20 and any(phrase in query_lower for phrase in
|
208 |
+
['requirements', 'qualifications', 'looking for', 'job description']):
|
209 |
+
return analyze_job_description(query, knowledge_base)
|
210 |
+
|
211 |
+
# Handle background/story queries
|
212 |
+
elif any(word in query_lower for word in ['background', 'journey', 'story', 'transition']):
|
213 |
+
return format_story_response(knowledge_base)
|
214 |
+
|
215 |
+
# Handle skill-specific queries
|
216 |
+
elif any(word in query_lower for word in ['skill', 'know', 'technology', 'stack']):
|
217 |
+
return format_skills_response(knowledge_base['skills']['technical_skills'])
|
218 |
+
|
219 |
+
# Handle standout/unique qualities queries
|
220 |
+
elif any(word in query_lower for word in ['stand out', 'unique', 'different', 'special']):
|
221 |
+
response_parts = ["What Makes Me Stand Out:\n"]
|
222 |
+
response_parts.append("• Unique Background:")
|
223 |
+
response_parts.append(" - Successfully transitioned from commerce to tech")
|
224 |
+
response_parts.append(" - Blend of business acumen and technical expertise")
|
225 |
+
response_parts.append("")
|
226 |
|
227 |
+
response_parts.append("• Practical Experience:")
|
228 |
+
response_parts.append(" - Built multiple ML projects from scratch")
|
229 |
+
response_parts.append(" - Focus on real-world applications")
|
230 |
+
response_parts.append("")
|
231 |
|
232 |
+
response_parts.append("• Technical Depth:")
|
233 |
+
response_parts.append(" - Strong foundation in ML/AI principles")
|
234 |
+
response_parts.append(" - Experience with end-to-end project implementation")
|
235 |
+
response_parts.append("")
|
236 |
+
|
237 |
+
response_parts.append("• Innovation Focus:")
|
238 |
+
response_parts.append(" - Developing novel solutions in ML/AI")
|
239 |
+
response_parts.append(" - Emphasis on practical impact")
|
240 |
+
|
241 |
+
return '\n'.join(response_parts)
|
242 |
|
243 |
+
# Default response
|
244 |
+
return (f"I'm {knowledge_base['personal_details']['professional_summary']}\n\n"
|
245 |
+
"You can ask me about:\n"
|
246 |
+
"• My projects and portfolio\n"
|
247 |
+
"• My journey from commerce to ML/AI\n"
|
248 |
+
"• My technical skills and experience\n"
|
249 |
+
"• My fit for ML/AI roles\n"
|
250 |
+
"Or paste a job description to see how my profile matches!")
|
251 |
|
252 |
def main():
|
253 |
st.title("💬 Chat with Manyue's Portfolio")
|
|
|
273 |
- You can also paste job descriptions to see how my profile matches!
|
274 |
""")
|
275 |
st.session_state.displayed_welcome = True
|
276 |
+
|
277 |
+
# Create two columns
|
278 |
col1, col2 = st.columns([3, 1])
|
279 |
|
280 |
with col1:
|
281 |
+
# Display chat messages
|
282 |
+
for message in st.session_state.messages:
|
283 |
+
with st.chat_message(message["role"]):
|
284 |
+
st.markdown(message["content"])
|
|
|
|
|
285 |
|
286 |
# Chat input
|
287 |
+
if prompt := st.chat_input("Ask me anything or paste a job description..."):
|
288 |
# Add user message
|
|
|
|
|
289 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
290 |
|
291 |
+
try:
|
292 |
+
# Generate and display response
|
293 |
+
with st.chat_message("assistant"):
|
294 |
response = generate_response(prompt, st.session_state.knowledge_base)
|
295 |
st.markdown(response)
|
296 |
st.session_state.messages.append({"role": "assistant", "content": response})
|
297 |
+
except Exception as e:
|
298 |
+
st.error(f"An error occurred: {str(e)}")
|
299 |
+
|
300 |
+
st.rerun()
|
301 |
|
302 |
with col2:
|
303 |
st.subheader("Quick Questions")
|
|
|
306 |
"What are your technical skills?",
|
307 |
"What makes you stand out?",
|
308 |
"What's your journey into ML?",
|
309 |
+
"Paste a job description to see how I match!"
|
310 |
]
|
311 |
|
312 |
+
for question in example_questions:
|
313 |
+
if st.button(question):
|
314 |
+
st.session_state.messages.append({"role": "user", "content": question})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
315 |
st.rerun()
|
316 |
|
317 |
st.markdown("---")
|
318 |
+
if st.button("Clear Chat"):
|
319 |
st.session_state.messages = []
|
320 |
st.rerun()
|
321 |
|