|
import streamlit as st |
|
import json |
|
from huggingface_hub import InferenceClient |
|
import time |
|
|
|
|
|
st.set_page_config( |
|
page_title="Manyue's Portfolio Chatbot", |
|
page_icon="🤖", |
|
layout="wide" |
|
) |
|
|
|
|
|
def get_hf_api_token(): |
|
|
|
return st.secrets["HUGGINGFACE_API_TOKEN"] |
|
|
|
|
|
@st.cache_resource |
|
def get_hf_client(): |
|
client = InferenceClient( |
|
model="meta-llama/Llama-2-7b-chat-hf", |
|
token=get_hf_api_token() |
|
) |
|
return client |
|
|
|
|
|
@st.cache_data |
|
def load_knowledge_base(): |
|
try: |
|
with open('knowledge_base.json', 'r', encoding='utf-8') as f: |
|
return json.load(f) |
|
except Exception as e: |
|
st.error(f"Error loading knowledge base: {str(e)}") |
|
return {} |
|
|
|
def get_context(query: str, knowledge_base: dict) -> str: |
|
"""Get relevant context for the query""" |
|
query_lower = query.lower() |
|
contexts = [] |
|
|
|
|
|
if any(word in query_lower for word in ["project", "build", "develop", "create"]): |
|
if "projects" in knowledge_base: |
|
for name, details in knowledge_base["projects"].items(): |
|
contexts.append(f"Project - {name}: {details.get('description', '')}") |
|
|
|
|
|
elif any(word in query_lower for word in ["skill", "know", "experience", "expert"]): |
|
if "skills" in knowledge_base.get("personal_details", {}): |
|
contexts.extend([ |
|
f"Skill - {skill}: {desc}" |
|
for skill, desc in knowledge_base["personal_details"]["skills"].items() |
|
]) |
|
|
|
|
|
elif any(word in query_lower for word in ["role", "fit", "job", "position", "career"]): |
|
contexts.append(knowledge_base.get("professional_journey", {}).get("mindset", "")) |
|
contexts.extend(knowledge_base.get("goals_and_aspirations", {}).get("short_term", [])) |
|
|
|
|
|
elif any(word in query_lower for word in ["background", "journey", "story"]): |
|
faq = knowledge_base.get("frequently_asked_questions", []) |
|
for qa in faq: |
|
if "background" in qa["question"].lower(): |
|
contexts.append(qa["answer"]) |
|
|
|
|
|
if not contexts: |
|
contexts = [ |
|
f"I am {knowledge_base.get('personal_details', {}).get('full_name', 'Manyue')}", |
|
knowledge_base.get('personal_details', {}).get('professional_summary', '') |
|
] |
|
|
|
return "\n".join(contexts) |
|
|
|
def generate_llama_prompt(query: str, context: str) -> str: |
|
"""Generate prompt for Llama model""" |
|
return f"""[INST] You are Manyue's AI assistant. Use this context to answer questions about Manyue: |
|
|
|
Context: |
|
{context} |
|
|
|
Question: {query} |
|
|
|
Instructions: |
|
- Use information from the context |
|
- Speak in first person as Manyue |
|
- Be specific about technical details and achievements |
|
- Keep responses concise but informative |
|
- Focus on relevant experience and skills |
|
- Maintain a professional tone |
|
|
|
Your response: [/INST]""" |
|
|
|
def get_chat_response(query: str, knowledge_base: dict) -> str: |
|
"""Get response from Llama model via HF API""" |
|
try: |
|
|
|
context = get_context(query, knowledge_base) |
|
|
|
|
|
prompt = generate_llama_prompt(query, context) |
|
|
|
|
|
client = get_hf_client() |
|
|
|
|
|
response = client.text_generation( |
|
prompt, |
|
max_new_tokens=200, |
|
temperature=0.7, |
|
top_p=0.95, |
|
repetition_penalty=1.1 |
|
) |
|
|
|
|
|
response = response.strip() |
|
response = response.split("[/INST]")[-1].strip() |
|
|
|
return response |
|
|
|
except Exception as e: |
|
st.error(f"Error generating response: {str(e)}") |
|
return "I apologize, but I encountered an error. Please try asking again." |
|
|
|
def initialize_session_state(): |
|
"""Initialize session state variables""" |
|
if "messages" not in st.session_state: |
|
st.session_state.messages = [] |
|
if "knowledge_base" not in st.session_state: |
|
st.session_state.knowledge_base = load_knowledge_base() |
|
|
|
def display_chat_interface(): |
|
"""Display main chat interface""" |
|
|
|
for message in st.session_state.messages: |
|
with st.chat_message(message["role"]): |
|
st.markdown(message["content"]) |
|
|
|
def main(): |
|
st.title("💬 Chat with Manyue's Portfolio") |
|
st.write("Ask me about my skills, projects, experience, or career goals!") |
|
|
|
|
|
initialize_session_state() |
|
|
|
|
|
col1, col2 = st.columns([3, 1]) |
|
|
|
with col1: |
|
|
|
display_chat_interface() |
|
|
|
|
|
if prompt := st.chat_input("What would you like to know?"): |
|
|
|
st.session_state.messages.append({"role": "user", "content": prompt}) |
|
|
|
|
|
with st.chat_message("user"): |
|
st.markdown(prompt) |
|
|
|
|
|
with st.chat_message("assistant"): |
|
with st.spinner("Thinking..."): |
|
response = get_chat_response(prompt, st.session_state.knowledge_base) |
|
st.markdown(response) |
|
|
|
|
|
st.session_state.messages.append({"role": "assistant", "content": response}) |
|
|
|
with col2: |
|
st.subheader("Quick Questions") |
|
example_questions = [ |
|
"Tell me about your ML projects", |
|
"What are your technical skills?", |
|
"Why are you suitable for ML roles?", |
|
"What is your educational background?" |
|
] |
|
|
|
for question in example_questions: |
|
if st.button(question): |
|
st.session_state.messages.append({"role": "user", "content": question}) |
|
st.experimental_rerun() |
|
|
|
st.markdown("---") |
|
if st.button("Clear Chat"): |
|
st.session_state.messages = [] |
|
st.experimental_rerun() |
|
|
|
if __name__ == "__main__": |
|
main() |