Spaces:
Running
Running
File size: 7,026 Bytes
21cbcb5 d6bbfd2 21cbcb5 d6bbfd2 21cbcb5 d6bbfd2 21cbcb5 d6bbfd2 21cbcb5 d6bbfd2 21cbcb5 c6e2c6b 21cbcb5 d6bbfd2 21cbcb5 d6bbfd2 21cbcb5 d6bbfd2 21cbcb5 d6bbfd2 21cbcb5 d6bbfd2 21cbcb5 d6bbfd2 21cbcb5 d6bbfd2 21cbcb5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 |
import streamlit as st
from huggingface_hub import InferenceClient
import os
from typing import Iterator
from PIL import Image
import pytesseract
from PyPDF2 import PdfReader
import base64
from together import Together
API_KEY = os.getenv("TOGETHER_API_KEY")
if not API_KEY:
raise ValueError("API key is missing! Make sure TOGETHER_API_KEY is set in the Secrets.")
# Initialize the client with Together AI provider
@st.cache_resource
def get_client():
#return InferenceClient(
# provider="together",
# api_key=API_KEY
#)
return Together(api_key=API_KEY) # Use Together.ai's official client
def process_file(file) -> str:
"""Process uploaded file and return its content"""
if file is None:
return ""
try:
# Handle PDF files
if file.type == "application/pdf":
text = ""
pdf_reader = PdfReader(file)
for page in pdf_reader.pages:
page_text = page.extract_text()
if page_text:
text += page_text + "\n"
return text
# Handle image files
elif file.type.startswith("image/"):
return base64.b64encode(file.getvalue()).decode("utf-8")
# Handle text files
else:
return file.getvalue().decode('utf-8')
except Exception as e:
return f"Error processing file: {str(e)}"
def generate_response(
message: str,
history: list[tuple[str, str]],
system_message: str,
max_tokens: int,
temperature: float,
top_p: float,
files=None
) -> Iterator[str]:
client = get_client()
has_images = False
content_blocks = []
image_content = None # To store image data
image_mime_type = None # To store MIME type
if files:
for file in files:
content = process_file(file)
if file.type.startswith("image/"):
has_images = True
image_content = content # Already base64 encoded
image_mime_type = file.type # Store MIME type
else:
content_blocks.append({
"type": "text",
"text": f"File content:\n{content}"
})
# Build messages
messages = [{"role": "system", "content": system_message}]
# Add history
for user_msg, assistant_msg in history:
messages.append({"role": "user", "content": user_msg})
messages.append({"role": "assistant", "content": assistant_msg})
try:
if has_images:
# Vision model request
vision_messages = [{
"role": "user",
"content": [
{"type": "text", "text": message},
{
"type": "image_url",
"image_url": {
"url": f"data:{image_mime_type};base64,{image_content}",
},
},
]
}]
stream = client.chat.completions.create(
model="meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
messages=vision_messages,
stream=True,
)
else:
# Text-only model request
current_message = {
"role": "user",
"content": [{"type": "text", "text": message}] + content_blocks
}
messages.append(current_message)
stream = client.chat.completions.create(
model="deepseek-ai/DeepSeek-R1",
messages=messages,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
stream=True
)
# Stream response
for chunk in stream:
if chunk.choices and chunk.choices[0].delta.content:
yield chunk.choices[0].delta.content
except Exception as e:
yield f"Error: {str(e)}"
def main():
st.set_page_config(page_title="DeepSeek Chat", page_icon="💭", layout="wide")
# Initialize session state for chat history
if "messages" not in st.session_state:
st.session_state.messages = []
st.title("DeepSeek Chat with File Upload")
st.markdown("Chat with DeepSeek AI model. You can optionally upload files for the model to analyze.")
# Sidebar for parameters
with st.sidebar:
st.header("Settings")
system_message = st.text_area(
"System Message",
value="You are a friendly Chatbot.",
height=100
)
max_tokens = st.slider(
"Max Tokens",
min_value=1,
max_value=8192,
value=8192,
step=1
)
temperature = st.slider(
"Temperature",
min_value=0.1,
max_value=4.0,
value=0.0,
step=0.1
)
top_p = st.slider(
"Top-p (nucleus sampling)",
min_value=0.1,
max_value=1.0,
value=0.95,
step=0.05
)
uploaded_file = st.file_uploader(
"Upload File (optional)",
type=['txt', 'py', 'md', 'swift', 'java', 'js', 'ts', 'rb', 'go',
'php', 'c', 'cpp', 'h', 'hpp', 'cs', 'html', 'css', 'kt', 'svelte',
'pdf', 'png', 'jpg', 'jpeg'], # Added file types
accept_multiple_files=True
)
# Display chat messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.write(message["content"])
# Chat input
if prompt := st.chat_input("What would you like to know?"):
# Display user message
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.write(prompt)
# Generate and display assistant response
with st.chat_message("assistant"):
response_placeholder = st.empty()
full_response = ""
# Get message history for context
history = [(msg["content"], next_msg["content"])
for msg, next_msg in zip(st.session_state.messages[::2], st.session_state.messages[1::2])]
# Stream the response
for response_chunk in generate_response(
prompt,
history,
system_message,
max_tokens,
temperature,
top_p,
uploaded_file
):
full_response += response_chunk
print(full_response)
response_placeholder.markdown(full_response + "▌")
response_placeholder.markdown(full_response)
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": full_response})
if __name__ == "__main__":
main() |