Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
import openai
|
|
|
4 |
import os
|
5 |
|
6 |
# 제거할 모델들을 MODELS 사전에서 제외
|
@@ -109,8 +110,6 @@ def cohere_respond(
|
|
109 |
|
110 |
messages.append({"role": "user", "content": message})
|
111 |
|
112 |
-
response = ""
|
113 |
-
|
114 |
try:
|
115 |
# Cohere Command R+ 모델을 위한 비스트리밍 처리
|
116 |
response_full = client.chat_completion(
|
@@ -162,6 +161,53 @@ def chatgpt_respond(
|
|
162 |
chat_history.append((message, error_message))
|
163 |
return chat_history
|
164 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
165 |
def clear_conversation():
|
166 |
return []
|
167 |
|
@@ -275,6 +321,44 @@ with gr.Blocks() as demo:
|
|
275 |
chatgpt_chatbot
|
276 |
)
|
277 |
chatgpt_clear_button.click(clear_conversation, outputs=chatgpt_chatbot, queue=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
278 |
|
279 |
if __name__ == "__main__":
|
280 |
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
import openai
|
4 |
+
import anthropic
|
5 |
import os
|
6 |
|
7 |
# 제거할 모델들을 MODELS 사전에서 제외
|
|
|
110 |
|
111 |
messages.append({"role": "user", "content": message})
|
112 |
|
|
|
|
|
113 |
try:
|
114 |
# Cohere Command R+ 모델을 위한 비스트리밍 처리
|
115 |
response_full = client.chat_completion(
|
|
|
161 |
chat_history.append((message, error_message))
|
162 |
return chat_history
|
163 |
|
164 |
+
def claude_respond(
|
165 |
+
message,
|
166 |
+
chat_history,
|
167 |
+
system_message,
|
168 |
+
max_tokens,
|
169 |
+
temperature,
|
170 |
+
top_p,
|
171 |
+
):
|
172 |
+
anthropic_api_key = os.getenv("ANTHROPIC_API_KEY")
|
173 |
+
if not anthropic_api_key:
|
174 |
+
chat_history.append((message, "ANTHROPIC_API_KEY 환경 변수가 필요합니다."))
|
175 |
+
return chat_history
|
176 |
+
|
177 |
+
client = anthropic.Anthropic(
|
178 |
+
api_key=anthropic_api_key,
|
179 |
+
)
|
180 |
+
|
181 |
+
messages = [
|
182 |
+
{"role": "system", "content": system_message}
|
183 |
+
]
|
184 |
+
|
185 |
+
for human, assistant in chat_history:
|
186 |
+
if human:
|
187 |
+
messages.append({"role": "user", "content": human})
|
188 |
+
if assistant:
|
189 |
+
messages.append({"role": "assistant", "content": assistant})
|
190 |
+
|
191 |
+
messages.append({"role": "user", "content": message})
|
192 |
+
|
193 |
+
try:
|
194 |
+
response = client.completions.create(
|
195 |
+
model="claude-3-haiku-20240307",
|
196 |
+
max_tokens_to_sample=max_tokens,
|
197 |
+
temperature=temperature,
|
198 |
+
top_p=top_p,
|
199 |
+
messages=[
|
200 |
+
{"role": "user", "content": message}
|
201 |
+
],
|
202 |
+
)
|
203 |
+
assistant_message = response.completion
|
204 |
+
chat_history.append((message, assistant_message))
|
205 |
+
return chat_history
|
206 |
+
except Exception as e:
|
207 |
+
error_message = f"오류가 발생했습니다: {str(e)}"
|
208 |
+
chat_history.append((message, error_message))
|
209 |
+
return chat_history
|
210 |
+
|
211 |
def clear_conversation():
|
212 |
return []
|
213 |
|
|
|
321 |
chatgpt_chatbot
|
322 |
)
|
323 |
chatgpt_clear_button.click(clear_conversation, outputs=chatgpt_chatbot, queue=False)
|
324 |
+
|
325 |
+
with gr.Tab("Claude"):
|
326 |
+
with gr.Row():
|
327 |
+
claude_system_message = gr.Textbox(
|
328 |
+
value="""반드시 한글로 답변할 것.
|
329 |
+
너는 Claude, Anthropic에서 개발한 언어 모델이다.
|
330 |
+
내가 요구하는 것을 최대한 자세하고 정확하게 답변하라.
|
331 |
+
""",
|
332 |
+
label="System Message",
|
333 |
+
lines=3
|
334 |
+
)
|
335 |
+
claude_max_tokens = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max Tokens")
|
336 |
+
claude_temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature")
|
337 |
+
claude_top_p = gr.Slider(
|
338 |
+
minimum=0.1,
|
339 |
+
maximum=1.0,
|
340 |
+
value=0.95,
|
341 |
+
step=0.05,
|
342 |
+
label="Top-P",
|
343 |
+
)
|
344 |
+
|
345 |
+
claude_chatbot = gr.Chatbot(height=600)
|
346 |
+
claude_msg = gr.Textbox(label="메세지를 입력하세요")
|
347 |
+
with gr.Row():
|
348 |
+
claude_submit_button = gr.Button("전송")
|
349 |
+
claude_clear_button = gr.Button("대화 내역 지우기")
|
350 |
+
|
351 |
+
claude_msg.submit(
|
352 |
+
claude_respond,
|
353 |
+
[claude_msg, claude_chatbot, claude_system_message, claude_max_tokens, claude_temperature, claude_top_p],
|
354 |
+
claude_chatbot
|
355 |
+
)
|
356 |
+
claude_submit_button.click(
|
357 |
+
claude_respond,
|
358 |
+
[claude_msg, claude_chatbot, claude_system_message, claude_max_tokens, claude_temperature, claude_top_p],
|
359 |
+
claude_chatbot
|
360 |
+
)
|
361 |
+
claude_clear_button.click(clear_conversation, outputs=claude_chatbot, queue=False)
|
362 |
|
363 |
if __name__ == "__main__":
|
364 |
demo.launch()
|