Manyue-DataScientist commited on
Commit
a05bcb9
·
verified ·
1 Parent(s): f73c0d1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +142 -169
app.py CHANGED
@@ -1,189 +1,170 @@
1
  import streamlit as st
2
  import json
 
3
  import time
4
 
5
- # Page configuration
6
  st.set_page_config(
7
  page_title="Manyue's Portfolio Chatbot",
8
  page_icon="🤖",
9
- layout="wide",
10
- initial_sidebar_state="collapsed"
11
  )
12
 
13
- # Custom CSS for better UI
14
- st.markdown("""
15
- <style>
16
- .chat-message {
17
- padding: 1.5rem;
18
- border-radius: 0.5rem;
19
- margin-bottom: 1rem;
20
- }
21
- .user-message {
22
- background-color: #e9ecef;
23
- }
24
- .bot-message {
25
- background-color: #f8f9fa;
26
- }
27
- .stButton>button {
28
- width: 100%;
29
- }
30
- </style>
31
- """, unsafe_allow_html=True)
32
 
33
- # Initialize session state
34
- if 'messages' not in st.session_state:
35
- st.session_state.messages = []
36
- if 'knowledge_base' not in st.session_state:
37
- st.session_state.knowledge_base = None
 
 
 
38
 
 
 
39
  def load_knowledge_base():
40
- """Load the knowledge base from JSON file"""
41
- if st.session_state.knowledge_base is None:
42
- try:
43
- with open('knowledge_base.json', 'r', encoding='utf-8') as f:
44
- st.session_state.knowledge_base = json.load(f)
45
- except Exception as e:
46
- st.error(f"Error loading knowledge base: {str(e)}")
47
- st.session_state.knowledge_base = {}
48
- return st.session_state.knowledge_base
49
 
50
- def get_enhanced_context(query: str, knowledge_base: dict) -> dict:
51
- """Get relevant context with improved retrieval"""
52
  query_lower = query.lower()
53
- context = {
54
- "type": "general",
55
- "content": [],
56
- "relevant_sections": []
57
- }
58
 
59
  # Project-related queries
60
- if any(word in query_lower for word in ["project", "build", "develop", "create", "make", "portfolio"]):
61
- context["type"] = "project"
62
  if "projects" in knowledge_base:
63
- for name, project in knowledge_base["projects"].items():
64
- context["content"].append({
65
- "title": name,
66
- "description": project.get("description", ""),
67
- "skills_used": project.get("skills_used", []),
68
- "status": project.get("status", "")
69
- })
70
 
71
- # Skills and experience
72
- elif any(word in query_lower for word in ["skill", "experience", "know", "capable", "ability", "expert"]):
73
- context["type"] = "skill"
74
  if "skills" in knowledge_base.get("personal_details", {}):
75
- context["content"] = knowledge_base["personal_details"]["skills"]
76
-
77
- # Educational background
78
- elif any(word in query_lower for word in ["education", "study", "learn", "degree", "college", "university"]):
79
- context["type"] = "education"
80
- context["content"] = knowledge_base.get("education", {})
81
-
82
- # Career and goals
83
- elif any(word in query_lower for word in ["goal", "plan", "future", "career", "aspiration"]):
84
- context["type"] = "career"
85
- context["content"] = {
86
- "short_term": knowledge_base.get("goals_and_aspirations", {}).get("short_term", []),
87
- "long_term": knowledge_base.get("goals_and_aspirations", {}).get("long_term", [])
88
- }
89
-
90
- # Personal background
91
- elif any(word in query_lower for word in ["background", "journey", "story", "transition"]):
92
- context["type"] = "background"
93
- context["content"] = knowledge_base.get("personal_journey", {})
94
 
95
- # Add FAQ matches if available
96
- for qa in knowledge_base.get("frequently_asked_questions", []):
97
- if any(word in qa["question"].lower() for word in query_lower.split()):
98
- context["relevant_sections"].append({
99
- "type": "faq",
100
- "question": qa["question"],
101
- "answer": qa["answer"]
102
- })
103
-
104
- return context
 
 
 
 
 
105
 
106
- def generate_response(query: str, context: dict) -> str:
107
- """Generate natural response based on context"""
108
- response_parts = []
109
-
110
- # Handle different types of queries
111
- if context["type"] == "project":
112
- response_parts.append("Let me tell you about my projects.")
113
- for project in context["content"]:
114
- response_parts.append(f"\n\n**{project['title']}**")
115
- response_parts.append(f"{project['description']}")
116
- if project['skills_used']:
117
- response_parts.append(f"\nSkills used: {', '.join(project['skills_used'])}")
118
- if project['status']:
119
- response_parts.append(f"\nStatus: {project['status']}")
120
-
121
- elif context["type"] == "skill":
122
- response_parts.append("Here are my key skills and experiences:")
123
- for skill, desc in context["content"].items():
124
- response_parts.append(f"\n\n**{skill}**:\n{desc}")
125
-
126
- elif context["type"] == "education":
127
- response_parts.append("Regarding my educational background:")
128
- if "academic_background" in context["content"]:
129
- response_parts.append(context["content"]["academic_background"])
130
- if "academic_achievements" in context["content"]:
131
- response_parts.append("\n\nAchievements:")
132
- for achievement in context["content"]["academic_achievements"]:
133
- response_parts.append(f"- {achievement}")
134
-
135
- elif context["type"] == "career":
136
- response_parts.append("Let me share my career goals:")
137
- response_parts.append("\n\n**Short-term goals:**")
138
- for goal in context["content"]["short_term"]:
139
- response_parts.append(f"- {goal}")
140
- response_parts.append("\n\n**Long-term goals:**")
141
- for goal in context["content"]["long_term"]:
142
- response_parts.append(f"- {goal}")
143
-
144
- elif context["type"] == "background":
145
- response_parts.append(context["content"].get("mindset", ""))
146
- response_parts.append("\n\n" + context["content"].get("motto_or_vision", ""))
147
-
148
- # Add any relevant FAQ information
149
- if context["relevant_sections"]:
150
- for section in context["relevant_sections"]:
151
- if section["type"] == "faq":
152
- response_parts.append(f"\n\n{section['answer']}")
153
-
154
- # Default response if no specific context matched
155
- if not response_parts:
156
- response_parts = ["I am Manyue, an aspiring AI/ML engineer. I can tell you about my projects, skills, education, or career goals. What would you like to know?"]
157
-
158
- return "\n".join(response_parts)
 
 
 
 
 
 
 
 
 
 
159
 
160
  def main():
161
  st.title("💬 Chat with Manyue's Portfolio")
162
- st.write("Ask me about my skills, projects, education, or career goals!")
163
 
164
- # Load knowledge base
165
- knowledge_base = load_knowledge_base()
166
 
167
- # Create two columns for layout
168
  col1, col2 = st.columns([3, 1])
169
 
170
  with col1:
171
- # Display chat messages
172
- for message in st.session_state.messages:
173
- with st.chat_message(message["role"]):
174
- st.markdown(message["content"])
175
 
176
  # Chat input
177
  if prompt := st.chat_input("What would you like to know?"):
178
  # Add user message
179
  st.session_state.messages.append({"role": "user", "content": prompt})
180
 
181
- # Get context and generate response
182
- context = get_enhanced_context(prompt, knowledge_base)
183
- response = generate_response(prompt, context)
184
 
185
- # Display response with typing effect
186
  with st.chat_message("assistant"):
 
 
187
  st.markdown(response)
188
 
189
  # Add assistant response to history
@@ -191,28 +172,20 @@ def main():
191
 
192
  with col2:
193
  st.subheader("Quick Questions")
194
- if st.button("📊 Tell me about your projects"):
195
- prompt = "What projects have you worked on?"
196
- st.session_state.messages.append({"role": "user", "content": prompt})
197
- st.experimental_rerun()
198
-
199
- if st.button("💻 What are your technical skills?"):
200
- prompt = "What are your main technical skills?"
201
- st.session_state.messages.append({"role": "user", "content": prompt})
202
- st.experimental_rerun()
203
-
204
- if st.button("🎓 Educational background?"):
205
- prompt = "Tell me about your education"
206
- st.session_state.messages.append({"role": "user", "content": prompt})
207
- st.experimental_rerun()
208
-
209
- if st.button("🎯 What are your career goals?"):
210
- prompt = "What are your career goals?"
211
- st.session_state.messages.append({"role": "user", "content": prompt})
212
- st.experimental_rerun()
213
 
214
  st.markdown("---")
215
- if st.button("🗑️ Clear Chat"):
216
  st.session_state.messages = []
217
  st.experimental_rerun()
218
 
 
1
  import streamlit as st
2
  import json
3
+ from huggingface_hub import InferenceClient
4
  import time
5
 
6
+ # Initialize Streamlit page configuration
7
  st.set_page_config(
8
  page_title="Manyue's Portfolio Chatbot",
9
  page_icon="🤖",
10
+ layout="wide"
 
11
  )
12
 
13
+ # Secret management
14
+ def get_hf_api_token():
15
+ # In production, use Streamlit secrets
16
+ return st.secrets["HUGGINGFACE_API_TOKEN"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
+ # Initialize HF client
19
+ @st.cache_resource
20
+ def get_hf_client():
21
+ client = InferenceClient(
22
+ model="meta-llama/Llama-2-7b-chat-hf",
23
+ token=get_hf_api_token()
24
+ )
25
+ return client
26
 
27
+ # Load and cache knowledge base
28
+ @st.cache_data
29
  def load_knowledge_base():
30
+ try:
31
+ with open('knowledge_base.json', 'r', encoding='utf-8') as f:
32
+ return json.load(f)
33
+ except Exception as e:
34
+ st.error(f"Error loading knowledge base: {str(e)}")
35
+ return {}
 
 
 
36
 
37
+ def get_context(query: str, knowledge_base: dict) -> str:
38
+ """Get relevant context for the query"""
39
  query_lower = query.lower()
40
+ contexts = []
 
 
 
 
41
 
42
  # Project-related queries
43
+ if any(word in query_lower for word in ["project", "build", "develop", "create"]):
 
44
  if "projects" in knowledge_base:
45
+ for name, details in knowledge_base["projects"].items():
46
+ contexts.append(f"Project - {name}: {details.get('description', '')}")
 
 
 
 
 
47
 
48
+ # Skills and expertise
49
+ elif any(word in query_lower for word in ["skill", "know", "experience", "expert"]):
 
50
  if "skills" in knowledge_base.get("personal_details", {}):
51
+ contexts.extend([
52
+ f"Skill - {skill}: {desc}"
53
+ for skill, desc in knowledge_base["personal_details"]["skills"].items()
54
+ ])
55
+
56
+ # Role fit and career
57
+ elif any(word in query_lower for word in ["role", "fit", "job", "position", "career"]):
58
+ contexts.append(knowledge_base.get("professional_journey", {}).get("mindset", ""))
59
+ contexts.extend(knowledge_base.get("goals_and_aspirations", {}).get("short_term", []))
 
 
 
 
 
 
 
 
 
 
60
 
61
+ # Background and journey
62
+ elif any(word in query_lower for word in ["background", "journey", "story"]):
63
+ faq = knowledge_base.get("frequently_asked_questions", [])
64
+ for qa in faq:
65
+ if "background" in qa["question"].lower():
66
+ contexts.append(qa["answer"])
67
+
68
+ # Default context
69
+ if not contexts:
70
+ contexts = [
71
+ f"I am {knowledge_base.get('personal_details', {}).get('full_name', 'Manyue')}",
72
+ knowledge_base.get('personal_details', {}).get('professional_summary', '')
73
+ ]
74
+
75
+ return "\n".join(contexts)
76
 
77
+ def generate_llama_prompt(query: str, context: str) -> str:
78
+ """Generate prompt for Llama model"""
79
+ return f"""[INST] You are Manyue's AI assistant. Use this context to answer questions about Manyue:
80
+
81
+ Context:
82
+ {context}
83
+
84
+ Question: {query}
85
+
86
+ Instructions:
87
+ - Use information from the context
88
+ - Speak in first person as Manyue
89
+ - Be specific about technical details and achievements
90
+ - Keep responses concise but informative
91
+ - Focus on relevant experience and skills
92
+ - Maintain a professional tone
93
+
94
+ Your response: [/INST]"""
95
+
96
+ def get_chat_response(query: str, knowledge_base: dict) -> str:
97
+ """Get response from Llama model via HF API"""
98
+ try:
99
+ # Get context
100
+ context = get_context(query, knowledge_base)
101
+
102
+ # Generate prompt
103
+ prompt = generate_llama_prompt(query, context)
104
+
105
+ # Get client
106
+ client = get_hf_client()
107
+
108
+ # Generate response
109
+ response = client.text_generation(
110
+ prompt,
111
+ max_new_tokens=200,
112
+ temperature=0.7,
113
+ top_p=0.95,
114
+ repetition_penalty=1.1
115
+ )
116
+
117
+ # Clean response
118
+ response = response.strip()
119
+ response = response.split("[/INST]")[-1].strip()
120
+
121
+ return response
122
+
123
+ except Exception as e:
124
+ st.error(f"Error generating response: {str(e)}")
125
+ return "I apologize, but I encountered an error. Please try asking again."
126
+
127
+ def initialize_session_state():
128
+ """Initialize session state variables"""
129
+ if "messages" not in st.session_state:
130
+ st.session_state.messages = []
131
+ if "knowledge_base" not in st.session_state:
132
+ st.session_state.knowledge_base = load_knowledge_base()
133
+
134
+ def display_chat_interface():
135
+ """Display main chat interface"""
136
+ # Display chat messages
137
+ for message in st.session_state.messages:
138
+ with st.chat_message(message["role"]):
139
+ st.markdown(message["content"])
140
 
141
  def main():
142
  st.title("💬 Chat with Manyue's Portfolio")
143
+ st.write("Ask me about my skills, projects, experience, or career goals!")
144
 
145
+ # Initialize session state
146
+ initialize_session_state()
147
 
148
+ # Create two columns
149
  col1, col2 = st.columns([3, 1])
150
 
151
  with col1:
152
+ # Display chat interface
153
+ display_chat_interface()
 
 
154
 
155
  # Chat input
156
  if prompt := st.chat_input("What would you like to know?"):
157
  # Add user message
158
  st.session_state.messages.append({"role": "user", "content": prompt})
159
 
160
+ # Display user message
161
+ with st.chat_message("user"):
162
+ st.markdown(prompt)
163
 
164
+ # Generate and display response
165
  with st.chat_message("assistant"):
166
+ with st.spinner("Thinking..."):
167
+ response = get_chat_response(prompt, st.session_state.knowledge_base)
168
  st.markdown(response)
169
 
170
  # Add assistant response to history
 
172
 
173
  with col2:
174
  st.subheader("Quick Questions")
175
+ example_questions = [
176
+ "Tell me about your ML projects",
177
+ "What are your technical skills?",
178
+ "Why are you suitable for ML roles?",
179
+ "What is your educational background?"
180
+ ]
181
+
182
+ for question in example_questions:
183
+ if st.button(question):
184
+ st.session_state.messages.append({"role": "user", "content": question})
185
+ st.experimental_rerun()
 
 
 
 
 
 
 
 
186
 
187
  st.markdown("---")
188
+ if st.button("Clear Chat"):
189
  st.session_state.messages = []
190
  st.experimental_rerun()
191