Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
import streamlit as st
|
2 |
import json
|
3 |
-
import
|
|
|
4 |
|
5 |
# Initialize Streamlit page configuration
|
6 |
st.set_page_config(
|
@@ -9,113 +10,156 @@ st.set_page_config(
|
|
9 |
layout="wide"
|
10 |
)
|
11 |
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
# Load and cache knowledge base
|
27 |
@st.cache_data
|
28 |
def load_knowledge_base():
|
29 |
try:
|
30 |
-
with open('
|
31 |
return json.load(f)
|
32 |
except FileNotFoundError:
|
33 |
st.error("Knowledge base file not found.")
|
34 |
return {}
|
35 |
|
36 |
-
def get_context(query: str, knowledge_base: dict) -> str:
|
37 |
-
"""Get relevant context for the query"""
|
38 |
-
query_lower = query.lower()
|
39 |
-
contexts = []
|
40 |
-
|
41 |
-
# Project-related queries
|
42 |
-
if any(word in query_lower for word in ["project", "build", "develop", "create"]):
|
43 |
-
if "projects" in knowledge_base:
|
44 |
-
for name, details in knowledge_base["projects"].items():
|
45 |
-
contexts.append(f"Project - {name}: {details.get('description', '')}")
|
46 |
-
|
47 |
-
# Skills and expertise
|
48 |
-
elif any(word in query_lower for word in ["skill", "know", "experience", "expert"]):
|
49 |
-
if "skills" in knowledge_base.get("personal_details", {}):
|
50 |
-
contexts.extend([
|
51 |
-
f"Skill - {skill}: {desc}"
|
52 |
-
for skill, desc in knowledge_base["personal_details"]["skills"].items()
|
53 |
-
])
|
54 |
-
|
55 |
-
# Role fit and career
|
56 |
-
elif any(word in query_lower for word in ["role", "fit", "job", "position", "career"]):
|
57 |
-
contexts.append(knowledge_base.get("professional_journey", {}).get("mindset", ""))
|
58 |
-
contexts.extend(knowledge_base.get("goals_and_aspirations", {}).get("short_term", []))
|
59 |
-
|
60 |
-
# Background and journey
|
61 |
-
elif any(word in query_lower for word in ["background", "journey", "story"]):
|
62 |
-
faq = knowledge_base.get("frequently_asked_questions", [])
|
63 |
-
for qa in faq:
|
64 |
-
if "background" in qa["question"].lower():
|
65 |
-
contexts.append(qa["answer"])
|
66 |
-
|
67 |
-
# Default context
|
68 |
-
if not contexts:
|
69 |
-
contexts = [
|
70 |
-
f"I am {knowledge_base.get('personal_details', {}).get('full_name', 'Manyue')}",
|
71 |
-
knowledge_base.get('personal_details', {}).get('professional_summary', '')
|
72 |
-
]
|
73 |
-
|
74 |
-
return "\n".join(contexts)
|
75 |
-
|
76 |
-
def generate_openai_prompt(query: str, context: str) -> str:
|
77 |
-
"""Generate prompt for OpenAI model"""
|
78 |
-
return f"""You are Manyue's AI assistant. Use this context to answer questions about Manyue:
|
79 |
-
|
80 |
-
Context:
|
81 |
-
{context}
|
82 |
-
|
83 |
-
Question: {query}
|
84 |
-
|
85 |
-
Instructions:
|
86 |
-
- Use information from the context
|
87 |
-
- Speak in first person as Manyue
|
88 |
-
- Be specific about technical details and achievements
|
89 |
-
- Keep responses concise but informative
|
90 |
-
- Focus on relevant experience and skills
|
91 |
-
- Maintain a professional tone"""
|
92 |
-
|
93 |
-
def get_chat_response(query: str, knowledge_base: dict) -> str:
|
94 |
-
"""Get response from OpenAI API"""
|
95 |
-
try:
|
96 |
-
# Get context
|
97 |
-
context = get_context(query, knowledge_base)
|
98 |
-
|
99 |
-
# Generate prompt
|
100 |
-
prompt = generate_openai_prompt(query, context)
|
101 |
-
|
102 |
-
# Generate response
|
103 |
-
response = openai.ChatCompletion.create(
|
104 |
-
model="gpt-3.5-turbo",
|
105 |
-
messages=[{"role": "user", "content": prompt}],
|
106 |
-
max_tokens=200,
|
107 |
-
temperature=0.7
|
108 |
-
)
|
109 |
-
|
110 |
-
# Extract and clean response
|
111 |
-
response_text = response['choices'][0]['message']['content'].strip()
|
112 |
-
|
113 |
-
return response_text
|
114 |
-
|
115 |
-
except Exception as e:
|
116 |
-
st.error(f"Error generating response: {str(e)}")
|
117 |
-
return "I apologize, but I encountered an error. Please try asking again."
|
118 |
-
|
119 |
def initialize_session_state():
|
120 |
"""Initialize session state variables"""
|
121 |
if "messages" not in st.session_state:
|
@@ -123,15 +167,15 @@ def initialize_session_state():
|
|
123 |
if "knowledge_base" not in st.session_state:
|
124 |
st.session_state.knowledge_base = load_knowledge_base()
|
125 |
|
126 |
-
def display_chat_interface():
|
127 |
-
"""Display main chat interface"""
|
128 |
-
for message in st.session_state.messages:
|
129 |
-
with st.chat_message(message["role"]):
|
130 |
-
st.markdown(message["content"])
|
131 |
-
|
132 |
def main():
|
133 |
st.title("💬 Chat with Manyue's Portfolio")
|
134 |
-
st.write("
|
|
|
|
|
|
|
|
|
|
|
|
|
135 |
|
136 |
# Initialize session state
|
137 |
initialize_session_state()
|
@@ -140,34 +184,32 @@ def main():
|
|
140 |
col1, col2 = st.columns([3, 1])
|
141 |
|
142 |
with col1:
|
143 |
-
# Display chat
|
144 |
-
|
|
|
|
|
145 |
|
146 |
# Chat input
|
147 |
-
if prompt := st.chat_input("
|
148 |
# Add user message
|
149 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
150 |
-
|
151 |
-
# Display user message
|
152 |
with st.chat_message("user"):
|
153 |
st.markdown(prompt)
|
154 |
|
155 |
# Generate and display response
|
156 |
with st.chat_message("assistant"):
|
157 |
-
|
158 |
-
response = get_chat_response(prompt, st.session_state.knowledge_base)
|
159 |
st.markdown(response)
|
160 |
-
|
161 |
-
# Add assistant response to history
|
162 |
-
st.session_state.messages.append({"role": "assistant", "content": response})
|
163 |
|
164 |
with col2:
|
165 |
st.subheader("Quick Questions")
|
166 |
example_questions = [
|
167 |
"Tell me about your ML projects",
|
168 |
"What are your technical skills?",
|
169 |
-
"Why
|
170 |
-
"What
|
|
|
171 |
]
|
172 |
|
173 |
for question in example_questions:
|
@@ -181,4 +223,4 @@ def main():
|
|
181 |
st.experimental_rerun()
|
182 |
|
183 |
if __name__ == "__main__":
|
184 |
-
main()
|
|
|
1 |
import streamlit as st
|
2 |
import json
|
3 |
+
from typing import Dict, List, Any
|
4 |
+
import re
|
5 |
|
6 |
# Initialize Streamlit page configuration
|
7 |
st.set_page_config(
|
|
|
10 |
layout="wide"
|
11 |
)
|
12 |
|
13 |
+
def extract_key_requirements(text: str) -> Dict[str, List[str]]:
|
14 |
+
"""Extract key requirements from text"""
|
15 |
+
text_lower = text.lower()
|
16 |
+
|
17 |
+
categories = {
|
18 |
+
'technical_skills': [
|
19 |
+
'python', 'machine learning', 'deep learning', 'nlp', 'neural networks',
|
20 |
+
'data science', 'sql', 'tensorflow', 'pytorch', 'scikit-learn', 'data analysis'
|
21 |
+
],
|
22 |
+
'soft_skills': [
|
23 |
+
'communication', 'teamwork', 'leadership', 'problem solving', 'analytical',
|
24 |
+
'collaborative', 'independent', 'innovative'
|
25 |
+
],
|
26 |
+
'education': [
|
27 |
+
'master', 'phd', 'bachelor', 'degree', 'computer science', 'statistics',
|
28 |
+
'mathematics', 'post graduate', 'certification'
|
29 |
+
],
|
30 |
+
'experience': [
|
31 |
+
'year', 'experience', 'background', 'industry', 'startup', 'enterprise'
|
32 |
+
]
|
33 |
+
}
|
34 |
+
|
35 |
+
found = {category: [] for category in categories}
|
36 |
+
for category, keywords in categories.items():
|
37 |
+
for keyword in keywords:
|
38 |
+
if keyword in text_lower:
|
39 |
+
found[category].append(keyword)
|
40 |
+
|
41 |
+
return found
|
42 |
+
|
43 |
+
def analyze_profile_match(requirements: Dict[str, List[str]], knowledge_base: dict) -> Dict[str, Any]:
|
44 |
+
"""Analyze how well the profile matches requirements"""
|
45 |
+
my_skills = set(s.lower() for s in knowledge_base['skills']['technical_skills'])
|
46 |
+
my_soft_skills = set(s.lower() for s in knowledge_base['skills']['soft_skills'])
|
47 |
+
|
48 |
+
# Match technical skills
|
49 |
+
matching_tech_skills = [skill for skill in requirements['technical_skills']
|
50 |
+
if any(my_skill in skill or skill in my_skill
|
51 |
+
for my_skill in my_skills)]
|
52 |
+
|
53 |
+
# Match soft skills
|
54 |
+
matching_soft_skills = [skill for skill in requirements['soft_skills']
|
55 |
+
if any(my_skill in skill or skill in my_skill
|
56 |
+
for my_skill in my_soft_skills)]
|
57 |
+
|
58 |
+
# Find relevant projects
|
59 |
+
relevant_projects = []
|
60 |
+
for project in knowledge_base['professional_experience']['projects']:
|
61 |
+
project_skills = set(s.lower() for s in project['skills_used'])
|
62 |
+
if any(skill in ' '.join(requirements['technical_skills']) for skill in project_skills):
|
63 |
+
relevant_projects.append(project)
|
64 |
+
|
65 |
+
# Check education match
|
66 |
+
education_matches = []
|
67 |
+
for edu in knowledge_base['education']['postgraduate']:
|
68 |
+
if any(req in edu['course_name'].lower() for req in requirements['education']):
|
69 |
+
education_matches.append(edu)
|
70 |
+
|
71 |
+
return {
|
72 |
+
'matching_tech_skills': matching_tech_skills,
|
73 |
+
'matching_soft_skills': matching_soft_skills,
|
74 |
+
'relevant_projects': relevant_projects[:2],
|
75 |
+
'education_matches': education_matches,
|
76 |
+
'background_story': knowledge_base['frequently_asked_questions'][0]['answer'] # Transition story
|
77 |
+
}
|
78 |
+
|
79 |
+
def generate_response(query: str, knowledge_base: dict) -> str:
|
80 |
+
"""Generate enhanced responses using the knowledge base"""
|
81 |
+
query_lower = query.lower()
|
82 |
+
|
83 |
+
# Handle job descriptions or role requirements
|
84 |
+
if len(query.split()) > 20 or any(phrase in query_lower for phrase in
|
85 |
+
['requirements', 'qualifications', 'looking for', 'job description', 'responsibilities']):
|
86 |
+
|
87 |
+
requirements = extract_key_requirements(query)
|
88 |
+
match_analysis = analyze_profile_match(requirements, knowledge_base)
|
89 |
+
|
90 |
+
response_parts = []
|
91 |
+
|
92 |
+
# Start with unique background if it's an ML role
|
93 |
+
if any(skill in query_lower for skill in ['machine learning', 'ml', 'ai', 'data science']):
|
94 |
+
transition_story = match_analysis['background_story']
|
95 |
+
response_parts.append(f"With my unique transition from commerce to ML/AI, {transition_story[:200]}...")
|
96 |
+
|
97 |
+
# Add technical alignment
|
98 |
+
if match_analysis['matching_tech_skills']:
|
99 |
+
response_parts.append(f"I have hands-on experience with key technical requirements including {', '.join(match_analysis['matching_tech_skills'])}.")
|
100 |
+
|
101 |
+
# Highlight relevant project
|
102 |
+
if match_analysis['relevant_projects']:
|
103 |
+
project = match_analysis['relevant_projects'][0]
|
104 |
+
response_parts.append(f"My project '{project['name']}' demonstrates my capabilities as {project['description']}")
|
105 |
+
|
106 |
+
# Add education and Canadian context
|
107 |
+
response_parts.append("I'm completing advanced AI/ML education in Canada through Georgian College and George Brown College, gaining cutting-edge knowledge in ML engineering and practical implementation.")
|
108 |
+
|
109 |
+
# Add forward-looking statement
|
110 |
+
response_parts.append("I'm actively expanding my ML expertise through hands-on projects and am ready to contribute to innovative ML solutions in the Canadian tech industry.")
|
111 |
+
|
112 |
+
return ' '.join(response_parts)
|
113 |
+
|
114 |
+
# Handle specific company/role queries
|
115 |
+
elif any(word in query_lower for word in ['role', 'fit', 'job', 'position', 'company']):
|
116 |
+
company_name = None
|
117 |
+
words = query.split()
|
118 |
+
for word in words:
|
119 |
+
if word[0].isupper() and word.lower() not in ['i', 'ml', 'ai', 'nlp']:
|
120 |
+
company_name = word
|
121 |
+
break
|
122 |
+
|
123 |
+
projects = knowledge_base['professional_experience']['projects']
|
124 |
+
skills = knowledge_base['skills']['technical_skills']
|
125 |
+
goals = knowledge_base['goals_and_aspirations']['short_term']
|
126 |
+
|
127 |
+
response = [
|
128 |
+
f"{'As a candidate for ' + company_name if company_name else 'As an ML engineer candidate'}, I bring a unique combination of technical expertise and business understanding from my commerce background.",
|
129 |
+
f"My strongest project is my {projects[0]['name']}, where {projects[0]['description']}",
|
130 |
+
f"I've developed expertise in {', '.join(skills[:3])}, applying these skills in real-world projects.",
|
131 |
+
"With my Canadian AI/ML education and practical project experience, I'm well-prepared to contribute to innovative ML solutions.",
|
132 |
+
f"I'm actively {goals[0].lower()} and expanding my portfolio with industry-relevant projects."
|
133 |
+
]
|
134 |
+
|
135 |
+
return ' '.join(response)
|
136 |
+
|
137 |
+
# Handle specific skill queries
|
138 |
+
elif any(word in query_lower for word in ['skill', 'know', 'experience', 'expert']):
|
139 |
+
tech_skills = knowledge_base['skills']['technical_skills']
|
140 |
+
projects = knowledge_base['professional_experience']['projects']
|
141 |
+
|
142 |
+
return f"My core technical stack includes {', '.join(tech_skills[:5])}. I've applied these skills in real-world projects like my {projects[0]['name']}, which {projects[0]['description']}. I'm currently enhancing my ML expertise through advanced studies in Canada and practical project implementation."
|
143 |
+
|
144 |
+
# Handle background/journey queries
|
145 |
+
elif any(word in query_lower for word in ['background', 'journey', 'story']):
|
146 |
+
transition = next((qa['answer'] for qa in knowledge_base['frequently_asked_questions']
|
147 |
+
if 'transition' in qa['question'].lower()), '')
|
148 |
+
return f"{transition[:300]}... This unique journey gives me both technical expertise and business understanding, valuable for ML engineering roles."
|
149 |
+
|
150 |
+
# Default response
|
151 |
+
return f"I'm {knowledge_base['personal_details']['full_name']}, a Machine Learning Engineer candidate with a unique background in commerce and technology. {knowledge_base['personal_details']['professional_summary']}"
|
152 |
|
153 |
# Load and cache knowledge base
|
154 |
@st.cache_data
|
155 |
def load_knowledge_base():
|
156 |
try:
|
157 |
+
with open('manny_knowledge_base.json', 'r', encoding='utf-8') as f:
|
158 |
return json.load(f)
|
159 |
except FileNotFoundError:
|
160 |
st.error("Knowledge base file not found.")
|
161 |
return {}
|
162 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
163 |
def initialize_session_state():
|
164 |
"""Initialize session state variables"""
|
165 |
if "messages" not in st.session_state:
|
|
|
167 |
if "knowledge_base" not in st.session_state:
|
168 |
st.session_state.knowledge_base = load_knowledge_base()
|
169 |
|
|
|
|
|
|
|
|
|
|
|
|
|
170 |
def main():
|
171 |
st.title("💬 Chat with Manyue's Portfolio")
|
172 |
+
st.write("""
|
173 |
+
Hi! I'm Manyue's AI assistant. I can tell you about:
|
174 |
+
- My journey from commerce to ML/AI
|
175 |
+
- My technical skills and projects
|
176 |
+
- My fit for ML/AI roles
|
177 |
+
- You can also paste job descriptions, and I'll show how my profile matches!
|
178 |
+
""")
|
179 |
|
180 |
# Initialize session state
|
181 |
initialize_session_state()
|
|
|
184 |
col1, col2 = st.columns([3, 1])
|
185 |
|
186 |
with col1:
|
187 |
+
# Display chat messages
|
188 |
+
for message in st.session_state.messages:
|
189 |
+
with st.chat_message(message["role"]):
|
190 |
+
st.markdown(message["content"])
|
191 |
|
192 |
# Chat input
|
193 |
+
if prompt := st.chat_input("Ask me anything about Manyue's experience or paste a job description..."):
|
194 |
# Add user message
|
195 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
|
|
|
|
196 |
with st.chat_message("user"):
|
197 |
st.markdown(prompt)
|
198 |
|
199 |
# Generate and display response
|
200 |
with st.chat_message("assistant"):
|
201 |
+
response = generate_response(prompt, st.session_state.knowledge_base)
|
|
|
202 |
st.markdown(response)
|
203 |
+
st.session_state.messages.append({"role": "assistant", "content": response})
|
|
|
|
|
204 |
|
205 |
with col2:
|
206 |
st.subheader("Quick Questions")
|
207 |
example_questions = [
|
208 |
"Tell me about your ML projects",
|
209 |
"What are your technical skills?",
|
210 |
+
"Why should we hire you as an ML Engineer?",
|
211 |
+
"What's your journey into ML?",
|
212 |
+
"Paste a job description to see how I match!"
|
213 |
]
|
214 |
|
215 |
for question in example_questions:
|
|
|
223 |
st.experimental_rerun()
|
224 |
|
225 |
if __name__ == "__main__":
|
226 |
+
main()
|