fantos commited on
Commit
915a848
Β·
verified Β·
1 Parent(s): 5920dc1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -126
app.py CHANGED
@@ -1,26 +1,19 @@
1
  import streamlit as st
2
- from huggingface_hub import InferenceClient
3
  import os
4
  from typing import Iterator
5
  from PIL import Image
6
- import pytesseract
7
- from PyPDF2 import PdfReader
8
  import base64
9
- from together import Together
10
 
11
  API_KEY = os.getenv("TOGETHER_API_KEY")
12
  if not API_KEY:
13
  raise ValueError("API key is missing! Make sure TOGETHER_API_KEY is set in the Secrets.")
14
 
15
-
16
- # Initialize the client with Together AI provider
17
  @st.cache_resource
18
  def get_client():
19
- #return InferenceClient(
20
- # provider="together",
21
- # api_key=API_KEY
22
- #)
23
- return Together(api_key=API_KEY) # Use Together.ai's official client
24
 
25
  def process_file(file) -> str:
26
  """Process uploaded file and return its content"""
@@ -28,25 +21,19 @@ def process_file(file) -> str:
28
  return ""
29
 
30
  try:
31
- # Handle PDF files
32
  if file.type == "application/pdf":
33
  text = ""
34
  pdf_reader = PdfReader(file)
35
  for page in pdf_reader.pages:
36
- page_text = page.extract_text()
37
- if page_text:
38
- text += page_text + "\n"
39
  return text
40
-
41
- # Handle image files
42
  elif file.type.startswith("image/"):
43
  return base64.b64encode(file.getvalue()).decode("utf-8")
44
-
45
- # Handle text files
46
  else:
47
  return file.getvalue().decode('utf-8')
48
  except Exception as e:
49
- return f"Error processing file: {str(e)}"
 
50
 
51
  def generate_response(
52
  message: str,
@@ -58,63 +45,31 @@ def generate_response(
58
  files=None
59
  ) -> Iterator[str]:
60
  client = get_client()
61
-
62
- has_images = False
63
- content_blocks = []
64
- image_content = None # To store image data
65
- image_mime_type = None # To store MIME type
66
-
67
- if files:
68
- for file in files:
69
- content = process_file(file)
70
- if file.type.startswith("image/"):
71
- has_images = True
72
- image_content = content # Already base64 encoded
73
- image_mime_type = file.type # Store MIME type
74
- else:
75
- content_blocks.append({
76
- "type": "text",
77
- "text": f"File content:\n{content}"
78
- })
79
-
80
- # Build messages
81
- messages = [{"role": "system", "content": system_message}]
82
-
83
- # Add history
84
- for user_msg, assistant_msg in history:
85
- messages.append({"role": "user", "content": user_msg})
86
- messages.append({"role": "assistant", "content": assistant_msg})
87
-
88
  try:
89
- if has_images:
90
- # Vision model request
91
- vision_messages = [{
92
- "role": "user",
93
- "content": [
94
- {"type": "text", "text": message},
95
- {
96
- "type": "image_url",
97
- "image_url": {
98
- "url": f"data:{image_mime_type};base64,{image_content}",
99
- },
100
- },
101
- ]
102
- }]
103
-
104
- stream = client.chat.completions.create(
105
- model="meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
106
- messages=vision_messages,
107
- stream=True,
108
- )
109
-
110
- else:
111
- # Text-only model request
112
- current_message = {
113
- "role": "user",
114
- "content": [{"type": "text", "text": message}] + content_blocks
115
- }
116
- messages.append(current_message)
117
-
118
  stream = client.chat.completions.create(
119
  model="deepseek-ai/DeepSeek-R1",
120
  messages=messages,
@@ -123,84 +78,61 @@ def generate_response(
123
  top_p=top_p,
124
  stream=True
125
  )
126
-
127
- # Stream response
128
- for chunk in stream:
129
- if chunk.choices and chunk.choices[0].delta.content:
130
- yield chunk.choices[0].delta.content
131
-
 
 
 
 
 
132
  except Exception as e:
133
- yield f"Error: {str(e)}"
 
134
  def main():
135
- st.set_page_config(page_title="DeepSeek Chat", page_icon="πŸ’­", layout="wide")
136
 
137
- # Initialize session state for chat history
138
  if "messages" not in st.session_state:
139
  st.session_state.messages = []
140
 
141
- st.title("DeepSeek/Llama Vision Chat with File Upload")
142
- st.markdown("Chat with DeepSeek AI model. You can optionally upload files for the model to analyze.")
143
- st.markdown("Feel free to upload images too, in this case Llama Vision will be used")
144
 
145
- # Sidebar for parameters
146
  with st.sidebar:
147
- st.header("Settings")
148
  system_message = st.text_area(
149
- "System Message",
150
- value="You are a deep thinking AI, you may use extremely long chains of thought to deeply consider the problem and deliberate with yourself via systematic reasoning processes to help come to a correct solution prior to answering. You should enclose your thoughts and internal monologue inside tags, and then provide your solution or response to the problem. λ°˜λ“œμ‹œ ν•œκΈ€λ‘œ λ‹΅λ³€ν•˜λΌ.",
151
  height=100
152
  )
153
- max_tokens = st.slider(
154
- "Max Tokens",
155
- min_value=1,
156
- max_value=8192,
157
- value=8192,
158
- step=1
159
- )
160
- temperature = st.slider(
161
- "Temperature",
162
- min_value=0.1,
163
- max_value=4.0,
164
- value=0.0,
165
- step=0.1
166
- )
167
- top_p = st.slider(
168
- "Top-p (nucleus sampling)",
169
- min_value=0.1,
170
- max_value=1.0,
171
- value=0.95,
172
- step=0.05
173
- )
174
  uploaded_file = st.file_uploader(
175
- "Upload File (optional)",
176
- type=['txt', 'py', 'md', 'swift', 'java', 'js', 'ts', 'rb', 'go',
177
- 'php', 'c', 'cpp', 'h', 'hpp', 'cs', 'html', 'css', 'kt', 'svelte',
178
- 'pdf', 'png', 'jpg', 'jpeg'], # Added file types
179
  accept_multiple_files=True
180
  )
181
 
182
- # Display chat messages
183
  for message in st.session_state.messages:
184
  with st.chat_message(message["role"]):
185
  st.write(message["content"])
186
 
187
- # Chat input
188
- if prompt := st.chat_input("What would you like to know?"):
189
- # Display user message
190
  st.session_state.messages.append({"role": "user", "content": prompt})
191
  with st.chat_message("user"):
192
  st.write(prompt)
193
 
194
- # Generate and display assistant response
195
  with st.chat_message("assistant"):
196
  response_placeholder = st.empty()
197
  full_response = ""
198
 
199
- # Get message history for context
200
  history = [(msg["content"], next_msg["content"])
201
  for msg, next_msg in zip(st.session_state.messages[::2], st.session_state.messages[1::2])]
202
 
203
- # Stream the response
204
  for response_chunk in generate_response(
205
  prompt,
206
  history,
@@ -211,12 +143,10 @@ def main():
211
  uploaded_file
212
  ):
213
  full_response += response_chunk
214
- print(full_response)
215
  response_placeholder.markdown(full_response + "β–Œ")
216
 
217
  response_placeholder.markdown(full_response)
218
 
219
- # Add assistant response to chat history
220
  st.session_state.messages.append({"role": "assistant", "content": full_response})
221
 
222
  if __name__ == "__main__":
 
1
  import streamlit as st
2
+ from together import Together
3
  import os
4
  from typing import Iterator
5
  from PIL import Image
 
 
6
  import base64
7
+ from PyPDF2 import PdfReader
8
 
9
  API_KEY = os.getenv("TOGETHER_API_KEY")
10
  if not API_KEY:
11
  raise ValueError("API key is missing! Make sure TOGETHER_API_KEY is set in the Secrets.")
12
 
13
+ # Initialize the Together client
 
14
  @st.cache_resource
15
  def get_client():
16
+ return Together(api_key=API_KEY)
 
 
 
 
17
 
18
  def process_file(file) -> str:
19
  """Process uploaded file and return its content"""
 
21
  return ""
22
 
23
  try:
 
24
  if file.type == "application/pdf":
25
  text = ""
26
  pdf_reader = PdfReader(file)
27
  for page in pdf_reader.pages:
28
+ text += page.extract_text() + "\n"
 
 
29
  return text
 
 
30
  elif file.type.startswith("image/"):
31
  return base64.b64encode(file.getvalue()).decode("utf-8")
 
 
32
  else:
33
  return file.getvalue().decode('utf-8')
34
  except Exception as e:
35
+ st.error(f"파일 처리 쀑 였λ₯˜ λ°œμƒ: {str(e)}")
36
+ return ""
37
 
38
  def generate_response(
39
  message: str,
 
45
  files=None
46
  ) -> Iterator[str]:
47
  client = get_client()
48
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  try:
50
+ # λ©”μ‹œμ§€ ν˜•μ‹ μˆ˜μ •
51
+ messages = [{"role": "system", "content": system_message}]
52
+
53
+ # νžˆμŠ€ν† λ¦¬ μΆ”κ°€
54
+ for user_msg, assistant_msg in history:
55
+ messages.append({"role": "user", "content": user_msg})
56
+ messages.append({"role": "assistant", "content": assistant_msg})
57
+
58
+ # ν˜„μž¬ λ©”μ‹œμ§€μ™€ 파일 λ‚΄μš© μΆ”κ°€
59
+ current_content = message
60
+ if files:
61
+ file_contents = []
62
+ for file in files:
63
+ content = process_file(file)
64
+ if content:
65
+ file_contents.append(f"파일 λ‚΄μš©:\n{content}")
66
+ if file_contents:
67
+ current_content = current_content + "\n\n" + "\n\n".join(file_contents)
68
+
69
+ messages.append({"role": "user", "content": current_content})
70
+
71
+ # API 호좜 μ‹œλ„
72
+ try:
 
 
 
 
 
 
73
  stream = client.chat.completions.create(
74
  model="deepseek-ai/DeepSeek-R1",
75
  messages=messages,
 
78
  top_p=top_p,
79
  stream=True
80
  )
81
+
82
+ for chunk in stream:
83
+ if chunk.choices and chunk.choices[0].delta.content:
84
+ yield chunk.choices[0].delta.content
85
+
86
+ except Exception as e:
87
+ if "rate limit" in str(e).lower():
88
+ yield "μ£„μ†‘ν•©λ‹ˆλ‹€. API 호좜 ν•œλ„μ— λ„λ‹¬ν–ˆμŠ΅λ‹ˆλ‹€. μž μ‹œ ν›„ λ‹€μ‹œ μ‹œλ„ν•΄μ£Όμ„Έμš”."
89
+ else:
90
+ yield f"API 호좜 쀑 였λ₯˜κ°€ λ°œμƒν–ˆμŠ΅λ‹ˆλ‹€: {str(e)}"
91
+
92
  except Exception as e:
93
+ yield f"였λ₯˜κ°€ λ°œμƒν–ˆμŠ΅λ‹ˆλ‹€: {str(e)}"
94
+
95
  def main():
96
+ st.set_page_config(page_title="DeepSeek μ±„νŒ…", page_icon="πŸ’­", layout="wide")
97
 
 
98
  if "messages" not in st.session_state:
99
  st.session_state.messages = []
100
 
101
+ st.title("DeepSeek μ±„νŒ…")
102
+ st.markdown("DeepSeek AI λͺ¨λΈκ³Ό λŒ€ν™”ν•˜μ„Έμš”. ν•„μš”ν•œ 경우 νŒŒμΌμ„ μ—…λ‘œλ“œν•  수 μžˆμŠ΅λ‹ˆλ‹€.")
 
103
 
 
104
  with st.sidebar:
105
+ st.header("μ„€μ •")
106
  system_message = st.text_area(
107
+ "μ‹œμŠ€ν…œ λ©”μ‹œμ§€",
108
+ value="당신은 깊이 있게 μƒκ°ν•˜λŠ” AIμž…λ‹ˆλ‹€. 문제λ₯Ό 깊이 κ³ λ €ν•˜κ³  체계적인 μΆ”λ‘  과정을 톡해 μ˜¬λ°”λ₯Έ 해결책을 λ„μΆœν•˜κΈ° μœ„ν•΄ 맀우 κΈ΄ 사고 체인을 μ‚¬μš©ν•  수 μžˆμŠ΅λ‹ˆλ‹€. λ°˜λ“œμ‹œ ν•œκΈ€λ‘œ λ‹΅λ³€ν•˜μ„Έμš”.",
109
  height=100
110
  )
111
+ max_tokens = st.slider("μ΅œλŒ€ 토큰 수", 1, 8192, 8192)
112
+ temperature = st.slider("μ˜¨λ„", 0.1, 4.0, 0.0, 0.1)
113
+ top_p = st.slider("Top-p", 0.1, 1.0, 0.95, 0.05)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
  uploaded_file = st.file_uploader(
115
+ "파일 μ—…λ‘œλ“œ (선택사항)",
116
+ type=['txt', 'py', 'md', 'pdf', 'png', 'jpg', 'jpeg'],
 
 
117
  accept_multiple_files=True
118
  )
119
 
 
120
  for message in st.session_state.messages:
121
  with st.chat_message(message["role"]):
122
  st.write(message["content"])
123
 
124
+ if prompt := st.chat_input("무엇을 μ•Œκ³  μ‹ΆμœΌμ‹ κ°€μš”?"):
 
 
125
  st.session_state.messages.append({"role": "user", "content": prompt})
126
  with st.chat_message("user"):
127
  st.write(prompt)
128
 
 
129
  with st.chat_message("assistant"):
130
  response_placeholder = st.empty()
131
  full_response = ""
132
 
 
133
  history = [(msg["content"], next_msg["content"])
134
  for msg, next_msg in zip(st.session_state.messages[::2], st.session_state.messages[1::2])]
135
 
 
136
  for response_chunk in generate_response(
137
  prompt,
138
  history,
 
143
  uploaded_file
144
  ):
145
  full_response += response_chunk
 
146
  response_placeholder.markdown(full_response + "β–Œ")
147
 
148
  response_placeholder.markdown(full_response)
149
 
 
150
  st.session_state.messages.append({"role": "assistant", "content": full_response})
151
 
152
  if __name__ == "__main__":