Spaces:
Running
Running
import streamlit as st | |
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM | |
# Streamlit app configuration | |
st.set_page_config(page_title="AI Chatbot", layout="centered") | |
# Load the model pipeline | |
def load_pipeline(): | |
model_name = "Orenguteng/Llama-3.1-8B-Lexi-Uncensored-V2" | |
# Load tokenizer and model | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForCausalLM.from_pretrained( | |
model_name, | |
device_map="auto", # Use GPU if available | |
rope_scaling=None # Avoid issues with rope_scaling | |
) | |
return pipeline("text-generation", model=model, tokenizer=tokenizer) | |
pipe = load_pipeline() | |
# Streamlit App UI | |
st.title("🤖 AI Chatbot") | |
st.markdown( | |
""" | |
Welcome to the **AI Chatbot** powered by Hugging Face's **Llama-3.1-8B-Lexi-Uncensored-V2** model. | |
Type your message below and interact with the AI! | |
""" | |
) | |
# User input area | |
user_input = st.text_area( | |
"Your Message", | |
placeholder="Type your message here...", | |
height=100 | |
) | |
# Button to generate response | |
if st.button("Generate Response"): | |
if user_input.strip(): | |
with st.spinner("Generating response..."): | |
try: | |
response = pipe(user_input, max_length=150, num_return_sequences=1) | |
st.text_area("Response", value=response[0]["generated_text"], height=200) | |
except Exception as e: | |
st.error(f"An error occurred: {e}") | |
else: | |
st.warning("Please enter a message before clicking the button.") | |
# Footer | |
st.markdown("---") | |
st.markdown("Made with ❤️ using [Streamlit](https://streamlit.io) and [Hugging Face](https://huggingface.co).") |