|
import streamlit as st |
|
import json |
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
import torch |
|
import time |
|
|
|
|
|
st.set_page_config( |
|
page_title="Portfolio Chatbot Test", |
|
page_icon="🤖", |
|
layout="wide" |
|
) |
|
|
|
|
|
if 'messages' not in st.session_state: |
|
st.session_state.messages = [] |
|
|
|
def load_knowledge_base(): |
|
"""Load the knowledge base from JSON file""" |
|
try: |
|
with open('knowledge_base.json', 'r', encoding='utf-8') as f: |
|
return json.load(f) |
|
except Exception as e: |
|
st.error(f"Error loading knowledge base: {str(e)}") |
|
return {} |
|
|
|
def get_context(query: str, knowledge_base: dict) -> str: |
|
"""Get relevant context from knowledge base based on query""" |
|
query_lower = query.lower() |
|
contexts = [] |
|
|
|
|
|
if "project" in query_lower: |
|
if "projects" in knowledge_base: |
|
contexts.extend([ |
|
f"{name}: {desc}" |
|
for name, desc in knowledge_base["projects"].items() |
|
]) |
|
|
|
|
|
elif any(keyword in query_lower for keyword in ["skill", "experience", "capability"]): |
|
if "personal_details" in knowledge_base and "skills" in knowledge_base["personal_details"]: |
|
contexts.extend([ |
|
f"{skill}: {desc}" |
|
for skill, desc in knowledge_base["personal_details"]["skills"].items() |
|
]) |
|
|
|
|
|
else: |
|
contexts = [ |
|
f"Name: {knowledge_base.get('personal_details', {}).get('full_name', 'Manyue')}", |
|
"Summary: I am an aspiring AI/ML engineer with experience in Python, Machine Learning, and Data Analysis." |
|
] |
|
|
|
return "\n".join(contexts) |
|
|
|
def initialize_model(): |
|
"""Initialize the model and tokenizer""" |
|
try: |
|
|
|
model_name = "meta-llama/Llama-2-7b-chat-hf" |
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForCausalLM.from_pretrained( |
|
model_name, |
|
torch_dtype=torch.float16, |
|
device_map="auto" |
|
) |
|
return tokenizer, model |
|
except Exception as e: |
|
st.error(f"Error initializing model: {str(e)}") |
|
return None, None |
|
|
|
def main(): |
|
st.title("Portfolio Chatbot Testing Interface") |
|
st.write("Test the chatbot's responses and interaction patterns") |
|
|
|
|
|
knowledge_base = load_knowledge_base() |
|
|
|
|
|
col1, col2 = st.columns([2, 1]) |
|
|
|
with col1: |
|
st.subheader("Chat Interface") |
|
|
|
for message in st.session_state.messages: |
|
with st.chat_message(message["role"]): |
|
st.markdown(message["content"]) |
|
|
|
|
|
if prompt := st.chat_input("What would you like to know?"): |
|
|
|
st.session_state.messages.append({"role": "user", "content": prompt}) |
|
|
|
|
|
context = get_context(prompt, knowledge_base) |
|
|
|
|
|
response = f"Test Response: Let me tell you about that based on my experience..." |
|
|
|
|
|
with st.chat_message("assistant"): |
|
st.markdown(response) |
|
|
|
|
|
st.session_state.messages.append({"role": "assistant", "content": response}) |
|
|
|
with col2: |
|
st.subheader("Testing Tools") |
|
if st.button("Clear Chat History"): |
|
st.session_state.messages = [] |
|
st.experimental_rerun() |
|
|
|
st.subheader("Sample Questions") |
|
if st.button("Tell me about your ML projects"): |
|
st.session_state.messages.append({ |
|
"role": "user", |
|
"content": "Tell me about your ML projects" |
|
}) |
|
st.experimental_rerun() |
|
|
|
if st.button("What are your Python skills?"): |
|
st.session_state.messages.append({ |
|
"role": "user", |
|
"content": "What are your Python skills?" |
|
}) |
|
st.experimental_rerun() |
|
|
|
if __name__ == "__main__": |
|
main() |