deepseek_r1_API / app.py
fantos's picture
Update app.py
ddc57e1 verified
raw
history blame
6.16 kB
import streamlit as st
from together import Together
import os
from typing import Iterator
from PIL import Image
import base64
from PyPDF2 import PdfReader
API_KEY = os.getenv("TOGETHER_API_KEY")
if not API_KEY:
raise ValueError("API key is missing! Make sure TOGETHER_API_KEY is set in the Secrets.")
@st.cache_resource
def get_client():
return Together(api_key=API_KEY)
def process_file(file) -> str:
if file is None:
return ""
try:
if file.type == "application/pdf":
text = ""
pdf_reader = PdfReader(file)
for page in pdf_reader.pages:
text += page.extract_text() + "\n"
return text
elif file.type.startswith("image/"):
return base64.b64encode(file.getvalue()).decode("utf-8")
else:
return file.getvalue().decode('utf-8')
except Exception as e:
st.error(f"파일 처리 쀑 였λ₯˜ λ°œμƒ: {str(e)}")
return ""
def generate_response(
message: str,
history: list[tuple[str, str]],
system_message: str,
max_tokens: int,
temperature: float,
top_p: float,
files=None
) -> Iterator[str]:
client = get_client()
try:
# λ©”μ‹œμ§€ λ°°μ—΄ μ΄ˆκΈ°ν™”
messages = []
# μ‹œμŠ€ν…œ λ©”μ‹œμ§€κ°€ μžˆλŠ” κ²½μš°μ—λ§Œ μΆ”κ°€
if system_message.strip():
messages.append({
"role": "system",
"content": system_message
})
# λŒ€ν™” νžˆμŠ€ν† λ¦¬ μΆ”κ°€
for user_msg, assistant_msg in history:
messages.append({
"role": "user",
"content": user_msg
})
messages.append({
"role": "assistant",
"content": assistant_msg
})
# ν˜„μž¬ λ©”μ‹œμ§€μ™€ 파일 λ‚΄μš© μ€€λΉ„
current_content = message
if files:
file_contents = []
for file in files:
content = process_file(file)
if content:
file_contents.append(f"파일 λ‚΄μš©:\n{content}")
if file_contents:
current_content = current_content + "\n\n" + "\n\n".join(file_contents)
# ν˜„μž¬ λ©”μ‹œμ§€ μΆ”κ°€
messages.append({
"role": "user",
"content": current_content
})
# API μš”μ²­ μ„€μ •
request_params = {
"model": "deepseek-ai/DeepSeek-R1",
"messages": messages,
"max_tokens": max_tokens,
"temperature": temperature,
"top_p": top_p,
"stream": True
}
# API 호좜
try:
stream = client.chat.completions.create(**request_params)
for chunk in stream:
if hasattr(chunk.choices[0].delta, 'content') and chunk.choices[0].delta.content:
yield chunk.choices[0].delta.content
except Exception as e:
if "rate limit" in str(e).lower():
yield "API 호좜 ν•œλ„μ— λ„λ‹¬ν–ˆμŠ΅λ‹ˆλ‹€. μž μ‹œ ν›„ λ‹€μ‹œ μ‹œλ„ν•΄μ£Όμ„Έμš”."
else:
error_message = str(e)
# Together.ai의 였λ₯˜ 응닡 뢄석
if "Input validation error" in error_message:
yield "μž…λ ₯ ν˜•μ‹μ΄ μ˜¬λ°”λ₯΄μ§€ μ•ŠμŠ΅λ‹ˆλ‹€. μ‹œμŠ€ν…œ κ΄€λ¦¬μžμ—κ²Œ λ¬Έμ˜ν•΄μ£Όμ„Έμš”."
else:
yield f"API 호좜 쀑 였λ₯˜κ°€ λ°œμƒν–ˆμŠ΅λ‹ˆλ‹€: {error_message}"
except Exception as e:
yield f"였λ₯˜κ°€ λ°œμƒν–ˆμŠ΅λ‹ˆλ‹€: {str(e)}"
def main():
st.set_page_config(page_title="DeepSeek μ±„νŒ…", page_icon="πŸ’­", layout="wide")
if "messages" not in st.session_state:
st.session_state.messages = []
st.title("DeepSeek μ±„νŒ…")
st.markdown("DeepSeek AI λͺ¨λΈκ³Ό λŒ€ν™”ν•˜μ„Έμš”. ν•„μš”ν•œ 경우 νŒŒμΌμ„ μ—…λ‘œλ“œν•  수 μžˆμŠ΅λ‹ˆλ‹€.")
with st.sidebar:
st.header("μ„€μ •")
system_message = st.text_area(
"μ‹œμŠ€ν…œ λ©”μ‹œμ§€",
value="당신은 깊이 있게 μƒκ°ν•˜λŠ” AIμž…λ‹ˆλ‹€. 문제λ₯Ό 깊이 κ³ λ €ν•˜κ³  체계적인 μΆ”λ‘  과정을 톡해 μ˜¬λ°”λ₯Έ 해결책을 λ„μΆœν•˜μ„Έμš”. λ°˜λ“œμ‹œ ν•œκΈ€λ‘œ λ‹΅λ³€ν•˜μ„Έμš”.",
height=100
)
max_tokens = st.slider("μ΅œλŒ€ 토큰 수", 1, 4096, 2048) # 토큰 μ œν•œ μ‘°μ •
temperature = st.slider("μ˜¨λ„", 0.0, 2.0, 0.7, 0.1) # μ˜¨λ„ λ²”μœ„ μ‘°μ •
top_p = st.slider("Top-p", 0.0, 1.0, 0.7, 0.1) # top_p λ²”μœ„ μ‘°μ •
uploaded_file = st.file_uploader(
"파일 μ—…λ‘œλ“œ (선택사항)",
type=['txt', 'py', 'md', 'pdf', 'png', 'jpg', 'jpeg'],
accept_multiple_files=True
)
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt := st.chat_input("무엇을 μ•Œκ³  μ‹ΆμœΌμ‹ κ°€μš”?"):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
response_placeholder = st.empty()
full_response = ""
history = [(msg["content"], next_msg["content"])
for msg, next_msg in zip(st.session_state.messages[::2], st.session_state.messages[1::2])]
for response_chunk in generate_response(
prompt,
history,
system_message,
max_tokens,
temperature,
top_p,
uploaded_file
):
full_response += response_chunk
response_placeholder.markdown(full_response + "β–Œ")
response_placeholder.markdown(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response})
if __name__ == "__main__":
main()