Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -151,6 +151,7 @@ def deepseek_respond(
|
|
151 |
message,
|
152 |
chat_history,
|
153 |
system_message,
|
|
|
154 |
max_tokens,
|
155 |
temperature,
|
156 |
top_p,
|
@@ -158,11 +159,14 @@ def deepseek_respond(
|
|
158 |
"""
|
159 |
DeepSeek 모델 응답 함수.
|
160 |
DeepSeek 토큰은 함수 내부에서 os.environ을 통해 불러온다.
|
|
|
|
|
161 |
"""
|
162 |
deepseek_token = os.environ.get("DEEPSEEK_TOKEN")
|
163 |
if not deepseek_token:
|
164 |
chat_history.append((message, "DeepSeek API 토큰이 필요합니다. (환경변수 DEEPSEEK_TOKEN 미설정)"))
|
165 |
-
|
|
|
166 |
|
167 |
openai.api_key = deepseek_token
|
168 |
openai.api_base = "https://api.deepseek.com/v1"
|
@@ -173,21 +177,36 @@ def deepseek_respond(
|
|
173 |
messages.append({"role": "assistant", "content": assistant})
|
174 |
messages.append({"role": "user", "content": message})
|
175 |
|
|
|
|
|
|
|
|
|
|
|
|
|
176 |
try:
|
177 |
response = openai.ChatCompletion.create(
|
178 |
-
model=
|
179 |
messages=messages,
|
180 |
max_tokens=max_tokens,
|
181 |
temperature=temperature,
|
182 |
top_p=top_p,
|
|
|
183 |
)
|
184 |
-
assistant_message =
|
|
|
185 |
chat_history.append((message, assistant_message))
|
186 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
187 |
except Exception as e:
|
188 |
error_message = f"오류가 발생했습니다: {str(e)}"
|
189 |
chat_history.append((message, error_message))
|
190 |
-
|
|
|
191 |
|
192 |
def clear_conversation():
|
193 |
return []
|
@@ -331,6 +350,11 @@ with gr.Blocks() as demo:
|
|
331 |
label="System Message",
|
332 |
lines=3
|
333 |
)
|
|
|
|
|
|
|
|
|
|
|
334 |
deepseek_max_tokens = gr.Slider(minimum=100, maximum=8000, value=2000, step=100, label="Max Tokens")
|
335 |
deepseek_temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature")
|
336 |
deepseek_top_p = gr.Slider(
|
@@ -351,12 +375,13 @@ with gr.Blocks() as demo:
|
|
351 |
deepseek_msg,
|
352 |
deepseek_chatbot,
|
353 |
deepseek_system_message,
|
|
|
354 |
deepseek_max_tokens,
|
355 |
deepseek_temperature,
|
356 |
deepseek_top_p
|
357 |
]
|
358 |
-
deepseek_msg.submit(deepseek_respond, inputs_for_deepseek, deepseek_chatbot)
|
359 |
-
deepseek_submit_button.click(deepseek_respond, inputs_for_deepseek, deepseek_chatbot)
|
360 |
deepseek_clear_button.click(clear_conversation, outputs=deepseek_chatbot, queue=False)
|
361 |
|
362 |
if __name__ == "__main__":
|
|
|
151 |
message,
|
152 |
chat_history,
|
153 |
system_message,
|
154 |
+
deepseek_model_choice,
|
155 |
max_tokens,
|
156 |
temperature,
|
157 |
top_p,
|
|
|
159 |
"""
|
160 |
DeepSeek 모델 응답 함수.
|
161 |
DeepSeek 토큰은 함수 내부에서 os.environ을 통해 불러온다.
|
162 |
+
deepseek_model_choice에 따라 deepseek-chat 또는 deepseek-reasoner를 선택하며,
|
163 |
+
스트리밍 방식으로 응답을 받아옵니다.
|
164 |
"""
|
165 |
deepseek_token = os.environ.get("DEEPSEEK_TOKEN")
|
166 |
if not deepseek_token:
|
167 |
chat_history.append((message, "DeepSeek API 토큰이 필요합니다. (환경변수 DEEPSEEK_TOKEN 미설정)"))
|
168 |
+
yield chat_history
|
169 |
+
return
|
170 |
|
171 |
openai.api_key = deepseek_token
|
172 |
openai.api_base = "https://api.deepseek.com/v1"
|
|
|
177 |
messages.append({"role": "assistant", "content": assistant})
|
178 |
messages.append({"role": "user", "content": message})
|
179 |
|
180 |
+
# 모델 선택: 기본은 deepseek-chat
|
181 |
+
if deepseek_model_choice == "R1(deepseek-reasoner)":
|
182 |
+
model = "deepseek-reasoner"
|
183 |
+
else:
|
184 |
+
model = "deepseek-chat"
|
185 |
+
|
186 |
try:
|
187 |
response = openai.ChatCompletion.create(
|
188 |
+
model=model,
|
189 |
messages=messages,
|
190 |
max_tokens=max_tokens,
|
191 |
temperature=temperature,
|
192 |
top_p=top_p,
|
193 |
+
stream=True
|
194 |
)
|
195 |
+
assistant_message = ""
|
196 |
+
# 새로운 대화 항목을 추가하고 초기값을 스트리밍하면서 갱신
|
197 |
chat_history.append((message, assistant_message))
|
198 |
+
yield chat_history
|
199 |
+
for chunk in response:
|
200 |
+
delta = chunk.choices[0].delta.get("content", "")
|
201 |
+
assistant_message += delta
|
202 |
+
chat_history[-1] = (message, assistant_message)
|
203 |
+
yield chat_history
|
204 |
+
return
|
205 |
except Exception as e:
|
206 |
error_message = f"오류가 발생했습니다: {str(e)}"
|
207 |
chat_history.append((message, error_message))
|
208 |
+
yield chat_history
|
209 |
+
return
|
210 |
|
211 |
def clear_conversation():
|
212 |
return []
|
|
|
350 |
label="System Message",
|
351 |
lines=3
|
352 |
)
|
353 |
+
deepseek_model_choice = gr.Radio(
|
354 |
+
choices=["V3(deepseek-chat)", "R1(deepseek-reasoner)"],
|
355 |
+
value="V3(deepseek-chat)",
|
356 |
+
label="모델 선택"
|
357 |
+
)
|
358 |
deepseek_max_tokens = gr.Slider(minimum=100, maximum=8000, value=2000, step=100, label="Max Tokens")
|
359 |
deepseek_temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature")
|
360 |
deepseek_top_p = gr.Slider(
|
|
|
375 |
deepseek_msg,
|
376 |
deepseek_chatbot,
|
377 |
deepseek_system_message,
|
378 |
+
deepseek_model_choice,
|
379 |
deepseek_max_tokens,
|
380 |
deepseek_temperature,
|
381 |
deepseek_top_p
|
382 |
]
|
383 |
+
deepseek_msg.submit(deepseek_respond, inputs_for_deepseek, deepseek_chatbot, stream=True)
|
384 |
+
deepseek_submit_button.click(deepseek_respond, inputs_for_deepseek, deepseek_chatbot, stream=True)
|
385 |
deepseek_clear_button.click(clear_conversation, outputs=deepseek_chatbot, queue=False)
|
386 |
|
387 |
if __name__ == "__main__":
|