File size: 2,035 Bytes
b52e376
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
import streamlit as st
import airllm

# Load GEMMA 2B model and tokenizer
model_name = "google/gemma-2b"
tokenizer = AutoTokenizer.from_pretrained(model_name)

# Load the base version of the model
model = AutoModelForCausalLM.from_pretrained(model_name)

# Initialize AirLLM
air_llm = airllm.AirLLM(model, tokenizer)

# Streamlit app configuration
st.set_page_config(
    page_title="Chatbot with GEMMA 2B and AirLLM",
    page_icon="🤖",
    layout="wide",
    initial_sidebar_state="expanded",
)

# App title
st.title("Conversational Chatbot with GEMMA 2B and AirLLM")

# Sidebar configuration
st.sidebar.header("Chatbot Configuration")
theme = st.sidebar.selectbox("Choose a theme", ["Default", "Dark", "Light"])

# Set theme based on user selection
if theme == "Dark":
    st.markdown(
        """

        <style>

        .reportview-container {

            background: #2E2E2E;

            color: #FFFFFF;

        }

        .sidebar .sidebar-content {

            background: #333333;

        }

        </style>

        """,
        unsafe_allow_html=True
    )
elif theme == "Light":
    st.markdown(
        """

        <style>

        .reportview-container {

            background: #FFFFFF;

            color: #000000;

        }

        .sidebar .sidebar-content {

            background: #F5F5F5;

        }

        </style>

        """,
        unsafe_allow_html=True
    )

# Chat input and output
user_input = st.text_input("You: ", "")
if st.button("Send"):
    if user_input:
        # Generate response using AirLLM
        response = air_llm.generate_response(user_input)
        st.text_area("Bot:", value=response, height=200, max_chars=None)
    else:
        st.warning("Please enter a message.")

# Footer
st.sidebar.markdown(
    """

    ### About

    This is a conversational chatbot built using the base version of the GEMMA 2B model and AirLLM.

    """
)