Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import json | |
| from huggingface_hub import InferenceClient | |
| import time | |
| # Initialize Streamlit page configuration | |
| st.set_page_config( | |
| page_title="Manyue's Portfolio Chatbot", | |
| page_icon="🤖", | |
| layout="wide" | |
| ) | |
| # Secret management | |
| def get_hf_api_token(): | |
| # In production, use Streamlit secrets | |
| return st.secrets["HUGGINGFACE_API_TOKEN"] | |
| # Initialize HF client | |
| def get_hf_client(): | |
| client = InferenceClient( | |
| model="meta-llama/Llama-2-7b-chat-hf", | |
| token=get_hf_api_token() | |
| ) | |
| return client | |
| # Load and cache knowledge base | |
| def load_knowledge_base(): | |
| try: | |
| with open('knowledge_base.json', 'r', encoding='utf-8') as f: | |
| return json.load(f) | |
| except Exception as e: | |
| st.error(f"Error loading knowledge base: {str(e)}") | |
| return {} | |
| def get_context(query: str, knowledge_base: dict) -> str: | |
| """Get relevant context for the query""" | |
| query_lower = query.lower() | |
| contexts = [] | |
| # Project-related queries | |
| if any(word in query_lower for word in ["project", "build", "develop", "create"]): | |
| if "projects" in knowledge_base: | |
| for name, details in knowledge_base["projects"].items(): | |
| contexts.append(f"Project - {name}: {details.get('description', '')}") | |
| # Skills and expertise | |
| elif any(word in query_lower for word in ["skill", "know", "experience", "expert"]): | |
| if "skills" in knowledge_base.get("personal_details", {}): | |
| contexts.extend([ | |
| f"Skill - {skill}: {desc}" | |
| for skill, desc in knowledge_base["personal_details"]["skills"].items() | |
| ]) | |
| # Role fit and career | |
| elif any(word in query_lower for word in ["role", "fit", "job", "position", "career"]): | |
| contexts.append(knowledge_base.get("professional_journey", {}).get("mindset", "")) | |
| contexts.extend(knowledge_base.get("goals_and_aspirations", {}).get("short_term", [])) | |
| # Background and journey | |
| elif any(word in query_lower for word in ["background", "journey", "story"]): | |
| faq = knowledge_base.get("frequently_asked_questions", []) | |
| for qa in faq: | |
| if "background" in qa["question"].lower(): | |
| contexts.append(qa["answer"]) | |
| # Default context | |
| if not contexts: | |
| contexts = [ | |
| f"I am {knowledge_base.get('personal_details', {}).get('full_name', 'Manyue')}", | |
| knowledge_base.get('personal_details', {}).get('professional_summary', '') | |
| ] | |
| return "\n".join(contexts) | |
| def generate_llama_prompt(query: str, context: str) -> str: | |
| """Generate prompt for Llama model""" | |
| return f"""[INST] You are Manyue's AI assistant. Use this context to answer questions about Manyue: | |
| Context: | |
| {context} | |
| Question: {query} | |
| Instructions: | |
| - Use information from the context | |
| - Speak in first person as Manyue | |
| - Be specific about technical details and achievements | |
| - Keep responses concise but informative | |
| - Focus on relevant experience and skills | |
| - Maintain a professional tone | |
| Your response: [/INST]""" | |
| def get_chat_response(query: str, knowledge_base: dict) -> str: | |
| """Get response from Llama model via HF API""" | |
| try: | |
| # Get context | |
| context = get_context(query, knowledge_base) | |
| # Generate prompt | |
| prompt = generate_llama_prompt(query, context) | |
| # Get client | |
| client = get_hf_client() | |
| # Generate response | |
| response = client.text_generation( | |
| prompt, | |
| max_new_tokens=200, | |
| temperature=0.7, | |
| top_p=0.95, | |
| repetition_penalty=1.1 | |
| ) | |
| # Clean response | |
| response = response.strip() | |
| response = response.split("[/INST]")[-1].strip() | |
| return response | |
| except Exception as e: | |
| st.error(f"Error generating response: {str(e)}") | |
| return "I apologize, but I encountered an error. Please try asking again." | |
| def initialize_session_state(): | |
| """Initialize session state variables""" | |
| if "messages" not in st.session_state: | |
| st.session_state.messages = [] | |
| if "knowledge_base" not in st.session_state: | |
| st.session_state.knowledge_base = load_knowledge_base() | |
| def display_chat_interface(): | |
| """Display main chat interface""" | |
| # Display chat messages | |
| for message in st.session_state.messages: | |
| with st.chat_message(message["role"]): | |
| st.markdown(message["content"]) | |
| def main(): | |
| st.title("💬 Chat with Manyue's Portfolio") | |
| st.write("Ask me about my skills, projects, experience, or career goals!") | |
| # Initialize session state | |
| initialize_session_state() | |
| # Create two columns | |
| col1, col2 = st.columns([3, 1]) | |
| with col1: | |
| # Display chat interface | |
| display_chat_interface() | |
| # Chat input | |
| if prompt := st.chat_input("What would you like to know?"): | |
| # Add user message | |
| st.session_state.messages.append({"role": "user", "content": prompt}) | |
| # Display user message | |
| with st.chat_message("user"): | |
| st.markdown(prompt) | |
| # Generate and display response | |
| with st.chat_message("assistant"): | |
| with st.spinner("Thinking..."): | |
| response = get_chat_response(prompt, st.session_state.knowledge_base) | |
| st.markdown(response) | |
| # Add assistant response to history | |
| st.session_state.messages.append({"role": "assistant", "content": response}) | |
| with col2: | |
| st.subheader("Quick Questions") | |
| example_questions = [ | |
| "Tell me about your ML projects", | |
| "What are your technical skills?", | |
| "Why are you suitable for ML roles?", | |
| "What is your educational background?" | |
| ] | |
| for question in example_questions: | |
| if st.button(question): | |
| st.session_state.messages.append({"role": "user", "content": question}) | |
| st.experimental_rerun() | |
| st.markdown("---") | |
| if st.button("Clear Chat"): | |
| st.session_state.messages = [] | |
| st.experimental_rerun() | |
| if __name__ == "__main__": | |
| main() |