uncensored_Ai / app.py
Allahbux's picture
Update app.py
928429b verified
raw
history blame
2.82 kB
import streamlit as st
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
import json
import requests
import os
# Streamlit app configuration
st.set_page_config(page_title="AI Chatbot", layout="centered")
# Fix the model's configuration before loading
def fix_model_config(model_name):
# Download the configuration file from the model repository
config_url = f"https://huggingface.co/{model_name}/resolve/main/config.json"
config_path = "config.json"
if not os.path.exists(config_path):
response = requests.get(config_url)
response.raise_for_status() # Raise an error if the request fails
with open(config_path, "w") as f:
f.write(response.text)
# Load the configuration and modify rope_scaling if necessary
with open(config_path, "r") as f:
config = json.load(f)
if "rope_scaling" in config:
config["rope_scaling"] = {
"type": "linear", # Replace the problematic rope_scaling type
"factor": config["rope_scaling"].get("factor", 1.0)
}
# Save the modified configuration
with open(config_path, "w") as f:
json.dump(config, f)
return config_path
# Load the model pipeline
@st.cache_resource
def load_pipeline():
model_name = "Orenguteng/Llama-3.1-8B-Lexi-Uncensored-V2"
# Fix the model configuration
fixed_config_path = fix_model_config(model_name)
# Load tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
model_name,
config=fixed_config_path,
device_map="auto" # Use GPU if available
)
return pipeline("text-generation", model=model, tokenizer=tokenizer)
pipe = load_pipeline()
# Streamlit App UI
st.title("🤖 AI Chatbot")
st.markdown(
"""
Welcome to the **AI Chatbot** powered by Hugging Face's **Llama-3.1-8B-Lexi-Uncensored-V2** model.
Type your message below and interact with the AI!
"""
)
# User input area
user_input = st.text_area(
"Your Message",
placeholder="Type your message here...",
height=100
)
# Button to generate response
if st.button("Generate Response"):
if user_input.strip():
with st.spinner("Generating response..."):
try:
response = pipe(user_input, max_length=150, num_return_sequences=1)
st.text_area("Response", value=response[0]["generated_text"], height=200)
except Exception as e:
st.error(f"An error occurred: {e}")
else:
st.warning("Please enter a message before clicking the button.")
# Footer
st.markdown("---")
st.markdown("Made with ❤️ using [Streamlit](https://streamlit.io) and [Hugging Face](https://huggingface.co).")