Spaces:
Sleeping
Sleeping
import streamlit as st | |
import json | |
import openai | |
import time | |
# Initialize Streamlit page configuration | |
st.set_page_config( | |
page_title="Manyue's Portfolio Chatbot", | |
page_icon="🤖", | |
layout="wide" | |
) | |
# Secret management | |
def get_openai_api_token(): | |
# In production, use Streamlit secrets | |
return st.secrets["OPENAI_API_KEY"] | |
# Initialize OpenAI client | |
def get_openai_client(): | |
openai.api_key = get_openai_api_token() | |
return openai | |
# Load and cache knowledge base | |
def load_knowledge_base(): | |
try: | |
with open('knowledge_base.json', 'r', encoding='utf-8') as f: | |
return json.load(f) | |
except Exception as e: | |
st.error(f"Error loading knowledge base: {str(e)}") | |
return {} | |
def get_context(query: str, knowledge_base: dict) -> str: | |
"""Get relevant context for the query""" | |
query_lower = query.lower() | |
contexts = [] | |
# Project-related queries | |
if any(word in query_lower for word in ["project", "build", "develop", "create"]): | |
if "projects" in knowledge_base: | |
for name, details in knowledge_base["projects"].items(): | |
contexts.append(f"Project - {name}: {details.get('description', '')}") | |
# Skills and expertise | |
elif any(word in query_lower for word in ["skill", "know", "experience", "expert"]): | |
if "skills" in knowledge_base.get("personal_details", {}): | |
contexts.extend([ | |
f"Skill - {skill}: {desc}" | |
for skill, desc in knowledge_base["personal_details"]["skills"].items() | |
]) | |
# Role fit and career | |
elif any(word in query_lower for word in ["role", "fit", "job", "position", "career"]): | |
contexts.append(knowledge_base.get("professional_journey", {}).get("mindset", "")) | |
contexts.extend(knowledge_base.get("goals_and_aspirations", {}).get("short_term", [])) | |
# Background and journey | |
elif any(word in query_lower for word in ["background", "journey", "story"]): | |
faq = knowledge_base.get("frequently_asked_questions", []) | |
for qa in faq: | |
if "background" in qa["question"].lower(): | |
contexts.append(qa["answer"]) | |
# Default context | |
if not contexts: | |
contexts = [ | |
f"I am {knowledge_base.get('personal_details', {}).get('full_name', 'Manyue')}", | |
knowledge_base.get('personal_details', {}).get('professional_summary', '') | |
] | |
return "\n".join(contexts) | |
def generate_openai_prompt(query: str, context: str) -> str: | |
"""Generate prompt for OpenAI model""" | |
return f"""You are Manyue's AI assistant. Use this context to answer questions about Manyue: | |
Context: | |
{context} | |
Question: {query} | |
Instructions: | |
- Use information from the context | |
- Speak in first person as Manyue | |
- Be specific about technical details and achievements | |
- Keep responses concise but informative | |
- Focus on relevant experience and skills | |
- Maintain a professional tone""" | |
def get_chat_response(query: str, knowledge_base: dict) -> str: | |
"""Get response from OpenAI API""" | |
try: | |
# Get context | |
context = get_context(query, knowledge_base) | |
# Generate prompt | |
prompt = generate_openai_prompt(query, context) | |
# Get OpenAI client | |
client = get_openai_client() | |
# Generate response | |
response = client.ChatCompletion.create( | |
model="gpt-4-mini", | |
messages=[ | |
{"role": "system", "content": prompt}, | |
{"role": "user", "content": query} | |
], | |
max_tokens=200, | |
temperature=0.7 | |
) | |
# Extract and clean response | |
response_text = response.choices[0].message.content.strip() | |
return response_text | |
except Exception as e: | |
st.error(f"Error generating response: {str(e)}") | |
return "I apologize, but I encountered an error. Please try asking again." | |
def initialize_session_state(): | |
"""Initialize session state variables""" | |
if "messages" not in st.session_state: | |
st.session_state.messages = [] | |
if "knowledge_base" not in st.session_state: | |
st.session_state.knowledge_base = load_knowledge_base() | |
def display_chat_interface(): | |
"""Display main chat interface""" | |
# Display chat messages | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"]): | |
st.markdown(message["content"]) | |
def main(): | |
st.title("💬 Chat with Manyue's Portfolio") | |
st.write("Ask me about my skills, projects, experience, or career goals!") | |
# Initialize session state | |
initialize_session_state() | |
# Create two columns | |
col1, col2 = st.columns([3, 1]) | |
with col1: | |
# Display chat interface | |
display_chat_interface() | |
# Chat input | |
if prompt := st.chat_input("What would you like to know?"): | |
# Add user message | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
# Display user message | |
with st.chat_message("user"): | |
st.markdown(prompt) | |
# Generate and display response | |
with st.chat_message("assistant"): | |
with st.spinner("Thinking..."): | |
response = get_chat_response(prompt, st.session_state.knowledge_base) | |
st.markdown(response) | |
# Add assistant response to history | |
st.session_state.messages.append({"role": "assistant", "content": response}) | |
with col2: | |
st.subheader("Quick Questions") | |
example_questions = [ | |
"Tell me about your ML projects", | |
"What are your technical skills?", | |
"Why are you suitable for ML roles?", | |
"What is your educational background?" | |
] | |
for question in example_question: | |
if st.button(question): | |
st.session_state.messages.append({"role": "user", "content": question}) | |
st.experimental_rerun() | |
st.markdown("---") | |
if st.button("Clear Chat"): | |
st.session_state.messages = [] | |
st.experimental_rerun() | |
if __name__ == "__main__": | |
main() |