Spaces:
Paused
Paused
File size: 1,293 Bytes
29f2e56 956dee2 22523ed 29f2e56 cb7d61c dad6346 29f2e56 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
import streamlit as st
from transformers import pipeline, BitsAndBytesConfig, AutoModelForCausalLM, AutoTokenizer
import torch
print(torch.cuda.is_available())
tokenizer_name = "tiiuae/falcon-7b-instruct"
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, trust_remote_code=True)
tokenizer.pad_token = tokenizer.eos_token
# Load the Hugging Face model for chatbot
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.float16,
bnb_4bit_use_double_quant=True)
model = AutoModelForCausalLM.from_pretrained(
"rawkintrevo/hf-sme-falcon-7b",
revision="v0.0.1",
quantization_config=bnb_config,
torch_dtype=torch.float16,
trust_remote_code=True
)
chatbot = pipeline("conversational",
model=model,
tokenizer=tokenizer
)
# Streamlit app title
st.title("Hugging Face Chatbot")
# User input for chat
user_input = st.text_input("You:", "")
if st.button("Ask"):
if user_input:
# Generate a response from the chatbot model
response = chatbot(user_input)[0]['generated_text']
st.text("Chatbot:")
st.write(response)
# Example conversation
st.subheader("Example Conversation:")
st.write("You: Hi, how are you?")
st.write("Chatbot: I'm good, how can I help you today?") |