File size: 2,545 Bytes
6562504 e8254f1 6562504 21bac99 53d0cb7 e8254f1 53d0cb7 e8254f1 53d0cb7 e8254f1 2971ea5 6562504 5742ae6 6562504 5742ae6 6562504 e8254f1 6562504 5742ae6 6562504 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 |
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
import streamlit as st
import os
from dotenv import load_dotenv
from airllm import AutoModel
# Load environment variables
load_dotenv()
# Retrieve the API token from the environment variables
api_token = os.getenv("HUGGINGFACEHUB_API_TOKEN")
# Initialize model and tokenizer using the AutoModel from AirLLM
MAX_LENGTH = 128
model = AutoModel.from_pretrained("internlm/internlm2_5-7b")
# Streamlit app configuration
st.set_page_config(
page_title="Conversational Chatbot with internlm2_5-7b-chat and AirLLM",
page_icon="🤖",
layout="wide",
initial_sidebar_state="expanded",
)
# App title
st.title("Conversational Chatbot with internlm2_5-7b-chat and AirLLM")
# Sidebar configuration
st.sidebar.header("Chatbot Configuration")
theme = st.sidebar.selectbox("Choose a theme", ["Default", "Dark", "Light"])
# Set theme based on user selection
if theme == "Dark":
st.markdown(
"""
<style>
.reportview-container {
background: #2E2E2E;
color: #FFFFFF;
}
.sidebar .sidebar-content {
background: #333333;
}
</style>
""",
unsafe_allow_html=True
)
elif theme == "Light":
st.markdown(
"""
<style>
.reportview-container {
background: #FFFFFF;
color: #000000;
}
.sidebar .sidebar-content {
background: #F5F5F5;
}
</style>
""",
unsafe_allow_html=True
)
# Chat input and output
user_input = st.text_input("You: ", "")
if st.button("Send"):
if user_input:
# Tokenize user input
input_tokens = model.tokenizer(user_input,
return_tensors="pt",
return_attention_mask=False,
truncation=True,
max_length=MAX_LENGTH,
padding=False)
# Generate response
generation_output = model.generate(
input_tokens['input_ids'].cuda(),
max_new_tokens=20,
use_cache=True,
return_dict_in_generate=True)
# Decode response
response = model.tokenizer.decode(generation_output.sequences[0])
st.text_area("Bot:", value=response, height=200, max_chars=None)
else:
st.warning("Please enter a message.")
# Footer
st.sidebar.markdown(
"""
### About
This is a conversational chatbot built using the internlm2_5-7b-chat model and AirLLM.
"""
)
|