Spaces:
Running
Running
File size: 6,618 Bytes
21cbcb5 915a848 21cbcb5 d6bbfd2 915a848 fbbdd25 21cbcb5 915a848 21cbcb5 ddc57e1 21cbcb5 d6bbfd2 915a848 d6bbfd2 21cbcb5 fbbdd25 915a848 21cbcb5 ee30c79 fbbdd25 ee30c79 fbbdd25 ee30c79 fbbdd25 ee30c79 21cbcb5 ee30c79 21cbcb5 c6e2c6b 21cbcb5 915a848 21cbcb5 fbbdd25 ddc57e1 915a848 fbbdd25 ddc57e1 ee30c79 ddc57e1 fbbdd25 ee30c79 915a848 fbbdd25 915a848 fbbdd25 915a848 45e761b 915a848 45e761b ee30c79 fbbdd25 ddc57e1 fbbdd25 915a848 a7c8ed0 915a848 ddc57e1 915a848 fbbdd25 915a848 fbbdd25 915a848 21cbcb5 fbbdd25 915a848 21cbcb5 fbbdd25 21cbcb5 fbbdd25 21cbcb5 fbbdd25 21cbcb5 fbbdd25 21cbcb5 fbbdd25 21cbcb5 fbbdd25 a7c8ed0 21cbcb5 fbbdd25 915a848 d6bbfd2 21cbcb5 fbbdd25 21cbcb5 fbbdd25 21cbcb5 ddc57e1 21cbcb5 fbbdd25 ee30c79 a7c8ed0 21cbcb5 ddc57e1 21cbcb5 fbbdd25 21cbcb5 fbbdd25 21cbcb5 ee30c79 21cbcb5 fbbdd25 ee30c79 21cbcb5 45e761b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 |
import streamlit as st
from together import Together
import os
from typing import Iterator
from PIL import Image
import base64
from PyPDF2 import PdfReader
import json # For debugging
API_KEY = os.getenv("TOGETHER_API_KEY")
if not API_KEY:
raise ValueError("API key is missing! Make sure TOGETHER_API_KEY is set in the Secrets.")
@st.cache_resource
def get_client():
return Together(api_key=API_KEY)
def process_file(file) -> str:
if file is None:
return ""
try:
if file.type == "application/pdf":
text = ""
pdf_reader = PdfReader(file)
for page in pdf_reader.pages:
text += page.extract_text() + "\n"
return text
elif file.type.startswith("image/"):
return base64.b64encode(file.getvalue()).decode("utf-8")
else:
return file.getvalue().decode('utf-8')
except Exception as e:
st.error(f"Error processing file: {str(e)}")
return ""
def format_message(role: str, content: str) -> dict:
"""Format message according to the API message format."""
return {
"role": role,
"content": content
}
def get_formatted_history(messages: list) -> list:
"""Convert conversation history to the API message format."""
formatted_messages = []
for msg in messages:
if isinstance(msg, dict) and "role" in msg and "content" in msg:
# Verify and correct the role if necessary
role = msg["role"]
if role not in ["system", "user", "assistant"]:
role = "user" if role == "human" else "assistant"
formatted_messages.append(format_message(role, msg["content"]))
return formatted_messages
def generate_response(
message: str,
history: list,
system_message: str,
max_tokens: int,
temperature: float,
top_p: float,
files=None
) -> Iterator[str]:
client = get_client()
try:
# Initialize message list
messages = []
# Add system message
if system_message.strip():
messages.append(format_message("system", system_message))
# Add conversation history (user messages already included)
formatted_history = get_formatted_history(history)
# If file content exists, add it to the last user message
if files:
file_contents = []
for file in files:
content = process_file(file)
if content:
file_contents.append(f"File content:\n{content}")
if file_contents:
if formatted_history and formatted_history[-1]["role"] == "user":
formatted_history[-1]["content"] += "\n\n" + "\n\n".join(file_contents)
else:
formatted_history.append(format_message("user", "\n\n".join(file_contents)))
messages.extend(formatted_history)
# Debug: Display API request messages
st.write("API Request Messages:", json.dumps(messages, ensure_ascii=False, indent=2))
# API Request
try:
stream = client.chat.completions.create(
model="deepseek-ai/DeepSeek-R1",
messages=messages,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
stream=True
)
for chunk in stream:
if hasattr(chunk.choices[0].delta, 'content') and chunk.choices[0].delta.content:
yield chunk.choices[0].delta.content
except Exception as e:
if "rate limit" in str(e).lower():
yield "API call rate limit reached. Please try again later."
else:
st.error(f"Detailed API error: {str(e)}")
yield "Sorry, please try again later."
except Exception as e:
st.error(f"Detailed error: {str(e)}")
yield "An error occurred, please try again later."
def main():
st.set_page_config(page_title="DeepSeek Chat", page_icon="💭", layout="wide")
# Initialize session state
if "messages" not in st.session_state:
st.session_state.messages = []
st.title("DeepSeek Chat")
st.markdown("Chat with the DeepSeek AI model. You can upload files if needed.")
with st.sidebar:
st.header("Settings")
system_message = st.text_area(
"System Message",
value="You are a deeply thoughtful AI. Consider problems thoroughly and derive correct solutions through systematic reasoning. Please answer in English.",
height=100
)
max_tokens = st.slider("Max Tokens", 1, 4096, 2048)
temperature = st.slider("Temperature", 0.0, 2.0, 0.7, 0.1)
top_p = st.slider("Top-p", 0.0, 1.0, 0.7, 0.1)
uploaded_file = st.file_uploader(
"File Upload (Optional)",
type=['txt', 'py', 'md', 'pdf', 'png', 'jpg', 'jpeg'],
accept_multiple_files=True
)
st.markdown("Join our Discord community: [https://discord.gg/openfreeai](https://discord.gg/openfreeai)")
# Display saved conversation messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Chat input
if prompt := st.chat_input("What would you like to know?"):
# Add user message
user_message = format_message("user", prompt)
st.session_state.messages.append(user_message)
with st.chat_message("user"):
st.markdown(prompt)
# Generate assistant response
with st.chat_message("assistant"):
response_placeholder = st.empty()
full_response = ""
# Call generate_response
for response_chunk in generate_response(
prompt,
st.session_state.messages,
system_message,
max_tokens,
temperature,
top_p,
uploaded_file
):
full_response += response_chunk
response_placeholder.markdown(full_response + "▌")
response_placeholder.markdown(full_response)
# Save assistant response
assistant_message = format_message("assistant", full_response)
st.session_state.messages.append(assistant_message)
if __name__ == "__main__":
main()
|