File size: 3,146 Bytes
3b55d4b
9ab0176
e72ed81
51e9476
e72ed81
 
 
 
 
51e9476
 
e72ed81
 
 
 
 
 
 
1ed9ba1
e72ed81
1ed9ba1
 
 
 
 
 
 
 
 
 
e72ed81
1ed9ba1
e72ed81
 
1ed9ba1
 
e72ed81
 
 
 
 
 
1ed9ba1
e72ed81
1ed9ba1
 
e72ed81
 
 
 
 
1ed9ba1
e72ed81
 
 
 
 
 
 
1ed9ba1
e72ed81
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1ed9ba1
e72ed81
 
 
1ed9ba1
e72ed81
 
1ed9ba1
e72ed81
 
 
 
1ed9ba1
e72ed81
 
 
 
1ed9ba1
e72ed81
 
51e9476
1ed9ba1
e72ed81
 
 
1ed9ba1
e72ed81
 
1ed9ba1
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
# app.py
import streamlit as st
from models import demo

# Page configuration
st.set_page_config(
    page_title="DeepSeek Chatbot - ruslanmv.com",
    page_icon="🤖",
    layout="centered"
)

# Initialize session state for chat history
if "messages" not in st.session_state:
    st.session_state.messages = []

# Sidebar for model selection and parameters
with st.sidebar:
    st.header("Model Configuration")

    # Model selection
    model_mapping = {
        "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B": gr.load(
            name="deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
            src="huggingface"
        ),
        "deepseek-ai/DeepSeek-R1": gr.load(name="deepseek-ai/DeepSeek-R1", src="huggingface"),
        "deepseek-ai/DeepSeek-R1-Zero": gr.load(name="deepseek-ai/DeepSeek-R1-Zero", src="huggingface")
    }

    selected_model_name = st.selectbox(
        "Choose Model",
        list(model_mapping.keys()),
        index=0
    )
    selected_model = model_mapping[selected_model_name]

    # System message
    system_message = st.text_area(
        "System Message",
        value="You are a friendly Chatbot created by ruslanmv.com",
        height=100
    )

    # Generation parameters
    max_new_tokens = st.slider(
        "Max New Tokens",
        min_value=1,
        max_value=4000,
        value=512,
        step=10
    )

    temperature = st.slider(
        "Temperature",
        min_value=0.1,
        max_value=4.0,
        value=0.7,
        step=0.1
    )

    top_p = st.slider(
        "Top-p (nucleus sampling)",
        min_value=0.1,
        max_value=1.0,
        value=0.9,
        step=0.1
    )

# Main chat interface
st.title("🤖 DeepSeek Chatbot")
st.caption("Powered by ruslanmv.com - Choose your model and parameters in the sidebar")

# Display chat messages
for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])

# Chat input
if prompt := st.chat_input("Type your message..."):
    # Add user message to chat history
    st.session_state.messages.append({"role": "user", "content": prompt})

    # Display user message
    with st.chat_message("user"):
        st.markdown(prompt)

    # Prepare conversation history in the required format
    full_prompt = f"{system_message}\n\nUser: {prompt}\nAssistant:"

    try:
        # Generate response using selected model
        with st.spinner("Generating response..."):
            # The model expects a single text input with the conversation history
            response = selected_model.fn(
                full_prompt,  # Pass the full conversation history
                {
                    "temperature": temperature,
                    "top_p": top_p,
                    "max_new_tokens": max_new_tokens,
                    "repetition_penalty": 1.0
                }
            )

        # Display assistant response
        with st.chat_message("assistant"):
            st.markdown(response)

        # Add assistant response to chat history
        st.session_state.messages.append({"role": "assistant", "content": response})

    except Exception as e: