File size: 6,396 Bytes
352d473
c672d1b
a05bcb9
35e8298
352d473
a05bcb9
35e8298
f73c0d1
35e8298
a05bcb9
35e8298
352d473
a05bcb9
 
 
 
f73c0d1
a05bcb9
 
 
 
 
 
 
 
473251f
a05bcb9
 
c672d1b
a05bcb9
 
 
 
 
 
473251f
a05bcb9
 
35e8298
a05bcb9
35e8298
f73c0d1
a05bcb9
35e8298
a05bcb9
 
f73c0d1
a05bcb9
 
f73c0d1
a05bcb9
 
 
 
 
 
 
 
 
35e8298
a05bcb9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f73c0d1
a05bcb9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c672d1b
35e8298
f73c0d1
a05bcb9
352d473
a05bcb9
 
35e8298
a05bcb9
f73c0d1
352d473
35e8298
a05bcb9
 
35e8298
f73c0d1
35e8298
f73c0d1
35e8298
 
a05bcb9
 
 
35e8298
a05bcb9
35e8298
a05bcb9
 
35e8298
 
f73c0d1
35e8298
 
 
f73c0d1
a05bcb9
 
 
 
 
 
 
 
 
 
 
35e8298
f73c0d1
a05bcb9
f73c0d1
35e8298
352d473
35e8298
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
import streamlit as st
import json
from huggingface_hub import InferenceClient
import time

# Initialize Streamlit page configuration
st.set_page_config(
    page_title="Manyue's Portfolio Chatbot",
    page_icon="🤖",
    layout="wide"
)

# Secret management
def get_hf_api_token():
    # In production, use Streamlit secrets
    return st.secrets["HUGGINGFACE_API_TOKEN"]

# Initialize HF client
@st.cache_resource
def get_hf_client():
    client = InferenceClient(
        model="meta-llama/Llama-2-7b-chat-hf",
        token=get_hf_api_token()
    )
    return client

# Load and cache knowledge base
@st.cache_data
def load_knowledge_base():
    try:
        with open('knowledge_base.json', 'r', encoding='utf-8') as f:
            return json.load(f)
    except Exception as e:
        st.error(f"Error loading knowledge base: {str(e)}")
        return {}

def get_context(query: str, knowledge_base: dict) -> str:
    """Get relevant context for the query"""
    query_lower = query.lower()
    contexts = []
    
    # Project-related queries
    if any(word in query_lower for word in ["project", "build", "develop", "create"]):
        if "projects" in knowledge_base:
            for name, details in knowledge_base["projects"].items():
                contexts.append(f"Project - {name}: {details.get('description', '')}")
    
    # Skills and expertise
    elif any(word in query_lower for word in ["skill", "know", "experience", "expert"]):
        if "skills" in knowledge_base.get("personal_details", {}):
            contexts.extend([
                f"Skill - {skill}: {desc}"
                for skill, desc in knowledge_base["personal_details"]["skills"].items()
            ])
    
    # Role fit and career
    elif any(word in query_lower for word in ["role", "fit", "job", "position", "career"]):
        contexts.append(knowledge_base.get("professional_journey", {}).get("mindset", ""))
        contexts.extend(knowledge_base.get("goals_and_aspirations", {}).get("short_term", []))
        
    # Background and journey
    elif any(word in query_lower for word in ["background", "journey", "story"]):
        faq = knowledge_base.get("frequently_asked_questions", [])
        for qa in faq:
            if "background" in qa["question"].lower():
                contexts.append(qa["answer"])
    
    # Default context
    if not contexts:
        contexts = [
            f"I am {knowledge_base.get('personal_details', {}).get('full_name', 'Manyue')}",
            knowledge_base.get('personal_details', {}).get('professional_summary', '')
        ]
    
    return "\n".join(contexts)

def generate_llama_prompt(query: str, context: str) -> str:
    """Generate prompt for Llama model"""
    return f"""[INST] You are Manyue's AI assistant. Use this context to answer questions about Manyue:

Context:
{context}

Question: {query}

Instructions:
- Use information from the context
- Speak in first person as Manyue
- Be specific about technical details and achievements
- Keep responses concise but informative
- Focus on relevant experience and skills
- Maintain a professional tone

Your response: [/INST]"""

def get_chat_response(query: str, knowledge_base: dict) -> str:
    """Get response from Llama model via HF API"""
    try:
        # Get context
        context = get_context(query, knowledge_base)
        
        # Generate prompt
        prompt = generate_llama_prompt(query, context)
        
        # Get client
        client = get_hf_client()
        
        # Generate response
        response = client.text_generation(
            prompt,
            max_new_tokens=200,
            temperature=0.7,
            top_p=0.95,
            repetition_penalty=1.1
        )
        
        # Clean response
        response = response.strip()
        response = response.split("[/INST]")[-1].strip()
        
        return response
        
    except Exception as e:
        st.error(f"Error generating response: {str(e)}")
        return "I apologize, but I encountered an error. Please try asking again."

def initialize_session_state():
    """Initialize session state variables"""
    if "messages" not in st.session_state:
        st.session_state.messages = []
    if "knowledge_base" not in st.session_state:
        st.session_state.knowledge_base = load_knowledge_base()

def display_chat_interface():
    """Display main chat interface"""
    # Display chat messages
    for message in st.session_state.messages:
        with st.chat_message(message["role"]):
            st.markdown(message["content"])

def main():
    st.title("💬 Chat with Manyue's Portfolio")
    st.write("Ask me about my skills, projects, experience, or career goals!")
    
    # Initialize session state
    initialize_session_state()
    
    # Create two columns
    col1, col2 = st.columns([3, 1])
    
    with col1:
        # Display chat interface
        display_chat_interface()
        
        # Chat input
        if prompt := st.chat_input("What would you like to know?"):
            # Add user message
            st.session_state.messages.append({"role": "user", "content": prompt})
            
            # Display user message
            with st.chat_message("user"):
                st.markdown(prompt)
            
            # Generate and display response
            with st.chat_message("assistant"):
                with st.spinner("Thinking..."):
                    response = get_chat_response(prompt, st.session_state.knowledge_base)
                st.markdown(response)
            
            # Add assistant response to history
            st.session_state.messages.append({"role": "assistant", "content": response})
    
    with col2:
        st.subheader("Quick Questions")
        example_questions = [
            "Tell me about your ML projects",
            "What are your technical skills?",
            "Why are you suitable for ML roles?",
            "What is your educational background?"
        ]
        
        for question in example_questions:
            if st.button(question):
                st.session_state.messages.append({"role": "user", "content": question})
                st.experimental_rerun()
        
        st.markdown("---")
        if st.button("Clear Chat"):
            st.session_state.messages = []
            st.experimental_rerun()

if __name__ == "__main__":
    main()