Spaces:
Running
Running
import streamlit as st | |
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM | |
# Load the pipeline | |
model_name = "Orenguteng/Llama-3.1-8B-Lexi-Uncensored-V2" | |
pipe = pipeline("text-generation", model=model_name) | |
# Optionally load the tokenizer and model directly (not used directly in this example) | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
def generate_response(prompt): | |
"""Generate a response from the model given a prompt.""" | |
response = pipe(prompt, max_length=100, num_return_sequences=1) | |
return response[0]['generated_text'] | |
# Streamlit Interface | |
st.title("AI Chatbot using Hugging Face") | |
st.markdown("This app uses the Llama-3.1-8B-Lexi-Uncensored-V2 model to generate responses.") | |
user_input = st.text_input("Enter your message:", placeholder="Type something here...") | |
if st.button("Generate Response"): | |
if user_input: | |
response = generate_response(user_input) | |
st.text_area("Response:", value=response, height=200) | |
else: | |
st.warning("Please enter a message before clicking the button.") | |