chat-bot / app.py
sabahat-shakeel's picture
Update app.py
a61e06e verified
raw
history blame
4.36 kB
# import streamlit as st
# from transformers import GPT2LMHeadModel, GPT2Tokenizer
# # Load the GPT-2 model and tokenizer
# @st.cache_resource
# def load_model():
# model_name = "gpt2"
# tokenizer = GPT2Tokenizer.from_pretrained(model_name)
# model = GPT2LMHeadModel.from_pretrained(model_name)
# return model, tokenizer
# # Function to generate a response from GPT-2
# def generate_response(input_text, model, tokenizer):
# inputs = tokenizer.encode(input_text, return_tensors="pt")
# outputs = model.generate(inputs, max_length=150, do_sample=True, top_p=0.9, top_k=50)
# response = tokenizer.decode(outputs[0], skip_special_tokens=True)
# return response
# # Streamlit UI setup
# def main():
# st.title("GPT-2 Chatbot")
# # Chat history
# if 'history' not in st.session_state:
# st.session_state['history'] = []
# user_input = st.text_input("You:", "")
# # Generate and display response
# if user_input:
# model, tokenizer = load_model()
# response = generate_response(user_input, model, tokenizer)
# st.session_state['history'].append({"user": user_input, "bot": response})
# # Display chat history
# for chat in st.session_state['history']:
# st.write(f"You: {chat['user']}")
# st.write(f"Bot: {chat['bot']}")
# if __name__ == "__main__":
# main()
import streamlit as st
from transformers import pipeline
# Configure the Hugging Face API key
HUGGINGFACE_API_KEY = st.secrets['huggingface_api_key']
# Initialize the Hugging Face text-generation model (DialoGPT or other conversational models)
chatbot = pipeline("text-generation", model="microsoft/DialoGPT-medium", api_key=HUGGINGFACE_API_KEY)
# Function to get response from the Hugging Face model
def get_chatbot_response(user_input):
try:
response = chatbot(user_input, max_length=1000, pad_token_id=50256) # Generate response
return response[0]['generated_text'] # Extract the generated response
except Exception as e:
return f"Error: {str(e)}"
# Streamlit interface
st.set_page_config(page_title="Smart ChatBot", layout="centered")
# Custom CSS for chat bubbles with full width and emojis
st.markdown("""
<style>
.chat-container {
display: flex;
flex-direction: column;
width: 100%;
}
.chat-bubble {
width: 100%;
padding: 15px;
margin: 10px 0;
border-radius: 10px;
font-size: 18px;
color: white;
display: inline-block;
line-height: 1.5;
}
.user-bubble {
background: #6a82fb; /* Soft blue */
align-self: flex-end;
border-radius: 10px 10px 10px 10px;
}
.bot-bubble {
background: #fc5c7d; /* Soft pink */
align-self: flex-start;
border-radius: 10px 10px 10px 10px;
}
.chat-header {
# text-align: center;
font-size: 35px;
font-weight: bold;
margin-bottom: 20px;
color: #3d3d3d;
}
.emoji {
font-size: 22px;
margin-right: 10px;
}
</style>
""", unsafe_allow_html=True)
st.markdown('<div class="chat-header">Hugging Face Chatbot-Your AI Companion πŸ’»</div>', unsafe_allow_html=True)
st.write("Powered by Hugging Face for smart, engaging conversations. πŸ€–")
if "history" not in st.session_state:
st.session_state["history"] = []
with st.form(key="chat_form", clear_on_submit=True):
user_input = st.text_input("Your message here... ✍️", max_chars=2000, label_visibility="collapsed")
submit_button = st.form_submit_button("Send πŸš€")
if submit_button:
if user_input:
response = get_chatbot_response(user_input)
st.session_state.history.append((user_input, response))
else:
st.warning("Please Enter A Prompt πŸ˜…")
if st.session_state["history"]:
st.markdown('<div class="chat-container">', unsafe_allow_html=True)
for user_input, response in st.session_state["history"]:
st.markdown(f'<div class="chat-bubble user-bubble"><span class="emoji">πŸ‘€</span>You: {user_input}</div>', unsafe_allow_html=True)
st.markdown(f'<div class="chat-bubble bot-bubble"><span class="emoji">πŸ€–</span>Bot: {response}</div>', unsafe_allow_html=True)
st.markdown('</div>', unsafe_allow_html=True)