Spaces:
Sleeping
Sleeping
File size: 3,564 Bytes
8b51148 a8acb9b 8b51148 a8acb9b 8b51148 a8acb9b 8b51148 a8acb9b 8b51148 a8acb9b 8b51148 a8acb9b 8b51148 9c0b768 8b51148 af7d593 8b51148 af7d593 9c0b768 8b51148 af7d593 1f5c69e af7d593 8b51148 af7d593 a8acb9b 0136084 9c0b768 af7d593 0136084 8b51148 af7d593 0136084 00d9971 af7d593 0136084 af7d593 0136084 00d9971 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 |
# import streamlit as st
# from transformers import AutoModelForCausalLM, AutoTokenizer
# # Load the model and tokenizer
# @st.cache_resource
# def load_model_and_tokenizer():
# model_name = "microsoft/DialoGPT-medium" # Replace with your chosen model
# tokenizer = AutoTokenizer.from_pretrained(model_name)
# model = AutoModelForCausalLM.from_pretrained(model_name)
# return tokenizer, model
# tokenizer, model = load_model_and_tokenizer()
# # Streamlit App
# st.title("General Chatbot")
# st.write("A chatbot powered by an open-source model from Hugging Face.")
# # Initialize the conversation
# if "conversation_history" not in st.session_state:
# st.session_state["conversation_history"] = []
# # Input box for user query
# user_input = st.text_input("You:", placeholder="Ask me anything...", key="user_input")
# if st.button("Send") and user_input:
# # Append user input to history
# st.session_state["conversation_history"].append({"role": "user", "content": user_input})
# # Tokenize and generate response
# input_ids = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors="pt")
# chat_history_ids = model.generate(input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id)
# response = tokenizer.decode(chat_history_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
# # Append model response to history
# st.session_state["conversation_history"].append({"role": "assistant", "content": response})
# # Display the conversation
# for message in st.session_state["conversation_history"]:
# if message["role"] == "user":
# st.write(f"**You:** {message['content']}")
# else:
# st.write(f"**Bot:** {message['content']}")
import streamlit as st
from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
st.title("🤖 Smart Chatbot")
@st.cache_resource
def load_model():
model_name = "facebook/blenderbot-3B"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
return pipeline("text2text-generation", model=model, tokenizer=tokenizer)
chatbot = load_model()
if "conversation" not in st.session_state:
st.session_state.conversation = []
# Display chat history
for msg in st.session_state.conversation:
with st.chat_message(msg["role"]):
st.markdown(msg["content"])
if prompt := st.chat_input("Ask me anything"):
# Add user message
st.session_state.conversation.append({"role": "user", "content": prompt})
# Format context
context = "\n".join([f"{msg['role']}: {msg['content']}" for msg in st.session_state.conversation[-3:]])
try:
with st.spinner("Thinking..."):
response = chatbot(
context,
max_length=200,
temperature=0.9,
top_k=60,
top_p=0.9,
num_beams=5,
no_repeat_ngram_size=3
)[0]['generated_text']
# Clean response
response = response.split("assistant:")[-1].strip()
# Ensure meaningful response
if not response or response.lower() in ["i don't know", "i'm not sure"]:
response = "I need to learn more about that. Could you clarify?"
except Exception as e:
response = "Let me check my knowledge sources and get back to you on that."
st.session_state.conversation.append({"role": "assistant", "content": response})
st.rerun() |