tushar-r-pawar's picture
Upload 2 files
b52e376 verified
raw
history blame
2.04 kB
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
import streamlit as st
import airllm
# Load GEMMA 2B model and tokenizer
model_name = "google/gemma-2b"
tokenizer = AutoTokenizer.from_pretrained(model_name)
# Load the base version of the model
model = AutoModelForCausalLM.from_pretrained(model_name)
# Initialize AirLLM
air_llm = airllm.AirLLM(model, tokenizer)
# Streamlit app configuration
st.set_page_config(
page_title="Chatbot with GEMMA 2B and AirLLM",
page_icon="πŸ€–",
layout="wide",
initial_sidebar_state="expanded",
)
# App title
st.title("Conversational Chatbot with GEMMA 2B and AirLLM")
# Sidebar configuration
st.sidebar.header("Chatbot Configuration")
theme = st.sidebar.selectbox("Choose a theme", ["Default", "Dark", "Light"])
# Set theme based on user selection
if theme == "Dark":
st.markdown(
"""
<style>
.reportview-container {
background: #2E2E2E;
color: #FFFFFF;
}
.sidebar .sidebar-content {
background: #333333;
}
</style>
""",
unsafe_allow_html=True
)
elif theme == "Light":
st.markdown(
"""
<style>
.reportview-container {
background: #FFFFFF;
color: #000000;
}
.sidebar .sidebar-content {
background: #F5F5F5;
}
</style>
""",
unsafe_allow_html=True
)
# Chat input and output
user_input = st.text_input("You: ", "")
if st.button("Send"):
if user_input:
# Generate response using AirLLM
response = air_llm.generate_response(user_input)
st.text_area("Bot:", value=response, height=200, max_chars=None)
else:
st.warning("Please enter a message.")
# Footer
st.sidebar.markdown(
"""
### About
This is a conversational chatbot built using the base version of the GEMMA 2B model and AirLLM.
"""
)