File size: 4,315 Bytes
7cc1093
 
 
23bfc7b
7cc1093
 
 
 
 
23bfc7b
7cc1093
 
 
23bfc7b
7cc1093
 
23bfc7b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7cc1093
 
 
 
23bfc7b
7cc1093
23bfc7b
 
 
7cc1093
23bfc7b
7cc1093
 
 
23bfc7b
7cc1093
23bfc7b
7cc1093
23bfc7b
 
 
7cc1093
 
23bfc7b
 
 
 
7cc1093
 
 
 
 
23bfc7b
 
7cc1093
23bfc7b
 
7cc1093
 
23bfc7b
7cc1093
23bfc7b
7cc1093
23bfc7b
7cc1093
 
23bfc7b
 
7cc1093
23bfc7b
 
7cc1093
 
 
23bfc7b
7cc1093
 
23bfc7b
 
7cc1093
 
 
 
 
 
 
 
 
 
 
 
 
 
23bfc7b
 
 
 
 
7cc1093
23bfc7b
7cc1093
 
23bfc7b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
import streamlit as st
import requests
import logging
import time

# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# Set page configuration
st.set_page_config(
    page_title="DeepSeek Chatbot - NextGenWebAI",
    page_icon="πŸ€–",
    layout="wide"
)

# Custom CSS for UI Enhancements
st.markdown("""
    <style>
    body {
        font-family: 'Arial', sans-serif;
    }
    .stChatMessage {
        border-radius: 10px;
        padding: 10px;
        margin: 5px 0;
    }
    .user {
        background-color: #007BFF;
        color: white;
        text-align: right;
        border-radius: 12px 12px 0px 12px;
    }
    .assistant {
        background-color: #F1F1F1;
        color: black;
        text-align: left;
        border-radius: 12px 12px 12px 0px;
    }
    .sidebar .sidebar-content {
        background-color: #222;
        color: white;
    }
    </style>
""", unsafe_allow_html=True)

# Initialize session state for chat history
if "messages" not in st.session_state:
    st.session_state.messages = []

# Sidebar Configuration
with st.sidebar:
    st.image("https://huggingface.co/front/thumbnails/hf-logo.png", width=150)
    st.header("βš™οΈ Model Configuration")
    st.markdown("[πŸ”‘ Get HuggingFace Token](https://huggingface.co/settings/tokens)")

    # Model selection
    model_options = [
        "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
    ]
    selected_model = st.selectbox("πŸ” Select AI Model", model_options, index=0)

    # System prompt
    system_message = st.text_area(
        "πŸ“œ System Instructions",
        value="You are a friendly chatbot. Provide clear and engaging responses.",
        height=80
    )

    # Chat settings
    max_tokens = st.slider("πŸ”’ Max Tokens", 10, 4000, 300)
    temperature = st.slider("πŸ”₯ Temperature", 0.1, 2.0, 0.7)
    top_p = st.slider("🎯 Top-p", 0.1, 1.0, 0.9)

# Function to query the Hugging Face API
def query(payload, api_url):
    headers = {"Authorization": f"Bearer {st.secrets['HF_TOKEN']}"}
    try:
        response = requests.post(api_url, headers=headers, json=payload)
        response.raise_for_status()  # Raise HTTP errors if any
        return response.json()
    except requests.exceptions.RequestException as e:
        logger.error(f"Request Error: {e}")
        return None

# Main Chat Interface
st.title("πŸ€– DeepSeek Chatbot")
st.caption("πŸš€ AI-powered chatbot using Hugging Face API")

# Display chat history with enhanced UI
for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        class_name = "user" if message["role"] == "user" else "assistant"
        st.markdown(f"<div class='{class_name}'>{message['content']}</div>", unsafe_allow_html=True)

# Handle user input
if prompt := st.chat_input("πŸ’¬ Type your message..."):
    st.session_state.messages.append({"role": "user", "content": prompt})

    with st.chat_message("user"):
        st.markdown(f"<div class='user'>{prompt}</div>", unsafe_allow_html=True)

    try:
        with st.spinner("πŸ€– Thinking..."):
            time.sleep(1)  # Simulate processing time
            full_prompt = f"{system_message}\n\nUser: {prompt}\nAssistant:"
            payload = {
                "inputs": full_prompt,
                "parameters": {
                    "max_new_tokens": max_tokens,
                    "temperature": temperature,
                    "top_p": top_p,
                    "return_full_text": False
                }
            }

            api_url = f"https://api-inference.huggingface.co/models/{selected_model}"
            output = query(payload, api_url)

            if output and isinstance(output, list) and 'generated_text' in output[0]:
                assistant_response = output[0]['generated_text'].strip()
                with st.chat_message("assistant"):
                    st.markdown(f"<div class='assistant'>{assistant_response}</div>", unsafe_allow_html=True)
                st.session_state.messages.append({"role": "assistant", "content": assistant_response})
            else:
                st.error("⚠️ Unable to generate a response. Please try again.")
    except Exception as e:
        logger.error(f"Application Error: {str(e)}", exc_info=True)
        st.error(f"⚠️ Error: {str(e)}")