File size: 4,526 Bytes
352d473 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 |
import streamlit as st
import json
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
import time
# Page configuration
st.set_page_config(
page_title="Portfolio Chatbot Test",
page_icon="🤖",
layout="wide"
)
# Initialize session state
if 'messages' not in st.session_state:
st.session_state.messages = []
def load_knowledge_base():
"""Load the knowledge base from JSON file"""
try:
with open('knowledge_base.json', 'r', encoding='utf-8') as f:
return json.load(f)
except Exception as e:
st.error(f"Error loading knowledge base: {str(e)}")
return {}
def get_context(query: str, knowledge_base: dict) -> str:
"""Get relevant context from knowledge base based on query"""
query_lower = query.lower()
contexts = []
# Project context
if "project" in query_lower:
if "projects" in knowledge_base:
contexts.extend([
f"{name}: {desc}"
for name, desc in knowledge_base["projects"].items()
])
# Skills context
elif any(keyword in query_lower for keyword in ["skill", "experience", "capability"]):
if "personal_details" in knowledge_base and "skills" in knowledge_base["personal_details"]:
contexts.extend([
f"{skill}: {desc}"
for skill, desc in knowledge_base["personal_details"]["skills"].items()
])
# Default context
else:
contexts = [
f"Name: {knowledge_base.get('personal_details', {}).get('full_name', 'Manyue')}",
"Summary: I am an aspiring AI/ML engineer with experience in Python, Machine Learning, and Data Analysis."
]
return "\n".join(contexts)
def initialize_model():
"""Initialize the model and tokenizer"""
try:
# For testing, use a smaller model
model_name = "meta-llama/Llama-2-7b-chat-hf" # You'll need to adjust this
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.float16,
device_map="auto"
)
return tokenizer, model
except Exception as e:
st.error(f"Error initializing model: {str(e)}")
return None, None
def main():
st.title("Portfolio Chatbot Testing Interface")
st.write("Test the chatbot's responses and interaction patterns")
# Load knowledge base
knowledge_base = load_knowledge_base()
# Create two columns for layout
col1, col2 = st.columns([2, 1])
with col1:
st.subheader("Chat Interface")
# Display chat messages from history
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Accept user input
if prompt := st.chat_input("What would you like to know?"):
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Get context for the query
context = get_context(prompt, knowledge_base)
# For now, just echo back a response (replace with actual model response later)
response = f"Test Response: Let me tell you about that based on my experience..."
# Display assistant response in chat message container
with st.chat_message("assistant"):
st.markdown(response)
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": response})
with col2:
st.subheader("Testing Tools")
if st.button("Clear Chat History"):
st.session_state.messages = []
st.experimental_rerun()
st.subheader("Sample Questions")
if st.button("Tell me about your ML projects"):
st.session_state.messages.append({
"role": "user",
"content": "Tell me about your ML projects"
})
st.experimental_rerun()
if st.button("What are your Python skills?"):
st.session_state.messages.append({
"role": "user",
"content": "What are your Python skills?"
})
st.experimental_rerun()
if __name__ == "__main__":
main() |