File size: 5,109 Bytes
21cbcb5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5daf42e
 
21cbcb5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
import streamlit as st
from huggingface_hub import InferenceClient
import os
from typing import Iterator


API_KEY = os.getenv("TOGETHER_API_KEY")
if not API_KEY:
    raise ValueError("API key is missing! Make sure TOGETHER_API_KEY is set in the Secrets.")


# Initialize the client with Together AI provider
@st.cache_resource
def get_client():
    return InferenceClient(
        provider="together",
         api_key=API_KEY
    )

def process_file(file) -> str:
    """Process uploaded file and return its content"""
    if file is None:
        return ""
    
    try:
        content = file.getvalue().decode('utf-8')
        return content
    except Exception as e:
        return f"Error reading file: {str(e)}"

def generate_response(
    message: str,
    history: list[tuple[str, str]],
    system_message: str,
    max_tokens: int,
    temperature: float,
    top_p: float,
    file=None
) -> Iterator[str]:
    """Generate streaming response from the model"""
    client = get_client()
    
    # Process file if uploaded
    file_content = process_file(file) if file else ""
    
    # If there's file content, append it to the message
    if file_content:
        message = f"File content:\n{file_content}\n\nUser message:\n{message}"

    messages = [{"role": "system", "content": system_message}]

    # Add conversation history
    for user_msg, assistant_msg in history:
        if user_msg:
            messages.append({"role": "user", "content": user_msg})
        if assistant_msg:
            messages.append({"role": "assistant", "content": assistant_msg})

    # Add current message
    messages.append({"role": "user", "content": message})

    try:
        stream = client.chat.completions.create(
            model="deepseek-ai/DeepSeek-R1",
            messages=messages,
            max_tokens=max_tokens,
            stream=True,
            temperature=temperature,
            top_p=top_p,
        )
        
        for chunk in stream:
            if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content:
                yield chunk.choices[0].delta.content
                
    except Exception as e:
        yield f"Error: {str(e)}"

def main():
    st.set_page_config(page_title="DeepSeek Chat", page_icon="💭", layout="wide")
    
    # Initialize session state for chat history
    if "messages" not in st.session_state:
        st.session_state.messages = []

    st.title("DeepSeek Chat with File Upload")
    st.markdown("Chat with DeepSeek AI model. You can optionally upload files for the model to analyze.")

    # Sidebar for parameters
    with st.sidebar:
        st.header("Settings")
        system_message = st.text_area(
            "System Message",
            value="You are a friendly Chatbot.",
            height=100
        )
        max_tokens = st.slider(
            "Max Tokens",
            min_value=1,
            max_value=8192,
            value=8192,
            step=1
        )
        temperature = st.slider(
            "Temperature",
            min_value=0.1,
            max_value=4.0,
            value=0.0,
            step=0.1
        )
        top_p = st.slider(
            "Top-p (nucleus sampling)",
            min_value=0.1,
            max_value=1.0,
            value=0.95,
            step=0.05
        )
        uploaded_file = st.file_uploader(
            "Upload File (optional)",
               type=['txt', 'py', 'md', 'swift', 'java', 'js', 'ts', 'rb', 'go', 
                'php', 'c', 'cpp', 'h', 'hpp', 'cs', 'html', 'css', 'kt']
        )

    # Display chat messages
    for message in st.session_state.messages:
        with st.chat_message(message["role"]):
            st.write(message["content"])

    # Chat input
    if prompt := st.chat_input("What would you like to know?"):
        # Display user message
        st.session_state.messages.append({"role": "user", "content": prompt})
        with st.chat_message("user"):
            st.write(prompt)

        # Generate and display assistant response
        with st.chat_message("assistant"):
            response_placeholder = st.empty()
            full_response = ""
            
            # Get message history for context
            history = [(msg["content"], next_msg["content"]) 
                      for msg, next_msg in zip(st.session_state.messages[::2], st.session_state.messages[1::2])]
            
            # Stream the response
            for response_chunk in generate_response(
                prompt,
                history,
                system_message,
                max_tokens,
                temperature,
                top_p,
                uploaded_file
            ):
                full_response += response_chunk
                print(full_response)
                response_placeholder.markdown(full_response + "▌")
            
            response_placeholder.markdown(full_response)
        
        # Add assistant response to chat history
        st.session_state.messages.append({"role": "assistant", "content": full_response})

if __name__ == "__main__":
    main()