import streamlit as st import requests import logging import time # Configure logging logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) # Set page configuration st.set_page_config( page_title="DeepSeek Chatbot - NextGenWebAI", page_icon="🤖", layout="wide" ) # Custom CSS for UI Enhancements st.markdown(""" """, unsafe_allow_html=True) # Initialize session state for chat history if "messages" not in st.session_state: st.session_state.messages = [] # Sidebar Configuration with st.sidebar: st.image("https://huggingface.co/front/thumbnails/hf-logo.png", width=150) st.header("⚙️ Model Configuration") st.markdown("[🔑 Get HuggingFace Token](https://huggingface.co/settings/tokens)") # Model selection model_options = [ "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", ] selected_model = st.selectbox("🔍 Select AI Model", model_options, index=0) # System prompt system_message = st.text_area( "📜 System Instructions", value="You are a friendly chatbot. Provide clear and engaging responses.", height=80 ) # Chat settings max_tokens = st.slider("🔢 Max Tokens", 10, 4000, 300) temperature = st.slider("🔥 Temperature", 0.1, 2.0, 0.7) top_p = st.slider("🎯 Top-p", 0.1, 1.0, 0.9) # Function to query the Hugging Face API def query(payload, api_url): headers = {"Authorization": f"Bearer {st.secrets['HF_TOKEN']}"} try: response = requests.post(api_url, headers=headers, json=payload) response.raise_for_status() # Raise HTTP errors if any return response.json() except requests.exceptions.RequestException as e: logger.error(f"Request Error: {e}") return None # Main Chat Interface st.title("🤖 DeepSeek Chatbot") st.caption("🚀 AI-powered chatbot using Hugging Face API") # Display chat history with enhanced UI for message in st.session_state.messages: with st.chat_message(message["role"]): class_name = "user" if message["role"] == "user" else "assistant" st.markdown(f"
{message['content']}
", unsafe_allow_html=True) # Handle user input if prompt := st.chat_input("💬 Type your message..."): st.session_state.messages.append({"role": "user", "content": prompt}) with st.chat_message("user"): st.markdown(f"
{prompt}
", unsafe_allow_html=True) try: with st.spinner("🤖 Thinking..."): time.sleep(1) # Simulate processing time full_prompt = f"{system_message}\n\nUser: {prompt}\nAssistant:" payload = { "inputs": full_prompt, "parameters": { "max_new_tokens": max_tokens, "temperature": temperature, "top_p": top_p, "return_full_text": False } } api_url = f"https://api-inference.huggingface.co/models/{selected_model}" output = query(payload, api_url) if output and isinstance(output, list) and 'generated_text' in output[0]: assistant_response = output[0]['generated_text'].strip() with st.chat_message("assistant"): st.markdown(f"
{assistant_response}
", unsafe_allow_html=True) st.session_state.messages.append({"role": "assistant", "content": assistant_response}) else: st.error("⚠️ Unable to generate a response. Please try again.") except Exception as e: logger.error(f"Application Error: {str(e)}", exc_info=True) st.error(f"⚠️ Error: {str(e)}")