Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -4,7 +4,6 @@ import openai
|
|
4 |
import anthropic
|
5 |
import os
|
6 |
|
7 |
-
# 제거할 모델들을 MODELS 사전에서 제외
|
8 |
MODELS = {
|
9 |
"Zephyr 7B Beta": "HuggingFaceH4/zephyr-7b-beta",
|
10 |
"Meta Llama 3.1 8B": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
@@ -15,7 +14,6 @@ MODELS = {
|
|
15 |
"Aya-23-35B": "CohereForAI/aya-23-35B"
|
16 |
}
|
17 |
|
18 |
-
# Cohere Command R+ 모델 ID 정의
|
19 |
COHERE_MODEL = "CohereForAI/c4ai-command-r-plus-08-2024"
|
20 |
|
21 |
def get_client(model_name):
|
@@ -30,6 +28,7 @@ def get_client(model_name):
|
|
30 |
raise ValueError("유효하지 않은 모델 이름입니다.")
|
31 |
return InferenceClient(model_id, token=hf_token)
|
32 |
|
|
|
33 |
def respond(
|
34 |
message,
|
35 |
chat_history,
|
@@ -39,12 +38,16 @@ def respond(
|
|
39 |
top_p,
|
40 |
system_message,
|
41 |
):
|
|
|
|
|
|
|
42 |
try:
|
43 |
client = get_client(model_name)
|
44 |
except ValueError as e:
|
45 |
chat_history.append((message, str(e)))
|
46 |
return chat_history
|
47 |
|
|
|
48 |
messages = [{"role": "system", "content": system_message}]
|
49 |
for human, assistant in chat_history:
|
50 |
messages.append({"role": "user", "content": human})
|
@@ -52,8 +55,8 @@ def respond(
|
|
52 |
messages.append({"role": "user", "content": message})
|
53 |
|
54 |
try:
|
|
|
55 |
if model_name == "Cohere Command R+":
|
56 |
-
# Cohere Command R+ 모델을 위한 비스트리밍 처리
|
57 |
response = client.chat_completion(
|
58 |
messages,
|
59 |
max_tokens=max_tokens,
|
@@ -64,7 +67,6 @@ def respond(
|
|
64 |
chat_history.append((message, assistant_message))
|
65 |
return chat_history
|
66 |
else:
|
67 |
-
# 다른 모델들을 위한 스트리밍 처리
|
68 |
stream = client.chat_completion(
|
69 |
messages,
|
70 |
max_tokens=max_tokens,
|
@@ -86,6 +88,7 @@ def respond(
|
|
86 |
chat_history.append((message, error_message))
|
87 |
yield chat_history
|
88 |
|
|
|
89 |
def cohere_respond(
|
90 |
message,
|
91 |
chat_history,
|
@@ -94,6 +97,9 @@ def cohere_respond(
|
|
94 |
temperature,
|
95 |
top_p,
|
96 |
):
|
|
|
|
|
|
|
97 |
model_name = "Cohere Command R+"
|
98 |
try:
|
99 |
client = get_client(model_name)
|
@@ -101,7 +107,11 @@ def cohere_respond(
|
|
101 |
chat_history.append((message, str(e)))
|
102 |
return chat_history
|
103 |
|
104 |
-
messages = [
|
|
|
|
|
|
|
|
|
105 |
for human, assistant in chat_history:
|
106 |
if human:
|
107 |
messages.append({"role": "user", "content": human})
|
@@ -111,7 +121,6 @@ def cohere_respond(
|
|
111 |
messages.append({"role": "user", "content": message})
|
112 |
|
113 |
try:
|
114 |
-
# Cohere Command R+ 모델을 위한 비스트리밍 처리
|
115 |
response_full = client.chat_completion(
|
116 |
messages,
|
117 |
max_tokens=max_tokens,
|
@@ -126,6 +135,7 @@ def cohere_respond(
|
|
126 |
chat_history.append((message, error_message))
|
127 |
return chat_history
|
128 |
|
|
|
129 |
def chatgpt_respond(
|
130 |
message,
|
131 |
chat_history,
|
@@ -134,11 +144,15 @@ def chatgpt_respond(
|
|
134 |
temperature,
|
135 |
top_p,
|
136 |
):
|
|
|
|
|
|
|
137 |
openai.api_key = os.getenv("OPENAI_API_KEY")
|
138 |
if not openai.api_key:
|
139 |
chat_history.append((message, "OPENAI_API_KEY 환경 변수가 필요합니다."))
|
140 |
return chat_history
|
141 |
|
|
|
142 |
messages = [{"role": "system", "content": system_message}]
|
143 |
for human, assistant in chat_history:
|
144 |
messages.append({"role": "user", "content": human})
|
@@ -147,7 +161,7 @@ def chatgpt_respond(
|
|
147 |
|
148 |
try:
|
149 |
response = openai.ChatCompletion.create(
|
150 |
-
model="gpt-4", #
|
151 |
messages=messages,
|
152 |
max_tokens=max_tokens,
|
153 |
temperature=temperature,
|
@@ -161,6 +175,7 @@ def chatgpt_respond(
|
|
161 |
chat_history.append((message, error_message))
|
162 |
return chat_history
|
163 |
|
|
|
164 |
def claude_respond(
|
165 |
message,
|
166 |
chat_history,
|
@@ -169,33 +184,39 @@ def claude_respond(
|
|
169 |
temperature,
|
170 |
top_p,
|
171 |
):
|
|
|
|
|
|
|
|
|
|
|
172 |
anthropic_api_key = os.getenv("ANTHROPIC_API_KEY")
|
173 |
if not anthropic_api_key:
|
174 |
chat_history.append((message, "ANTHROPIC_API_KEY 환경 변수가 필요합니다."))
|
175 |
return chat_history
|
176 |
|
177 |
-
client = anthropic.Anthropic(
|
178 |
-
api_key=anthropic_api_key,
|
179 |
-
)
|
180 |
|
181 |
-
#
|
182 |
-
|
|
|
|
|
183 |
for human, assistant in chat_history:
|
184 |
if human:
|
185 |
-
|
186 |
if assistant:
|
187 |
-
|
188 |
-
|
189 |
|
190 |
try:
|
191 |
response = client.messages.create(
|
192 |
-
model="claude-3-haiku-20240307", # 사용
|
193 |
-
|
|
|
|
|
194 |
temperature=temperature,
|
195 |
top_p=top_p,
|
196 |
-
messages=messages,
|
197 |
)
|
198 |
-
assistant_message = response[
|
199 |
chat_history.append((message, assistant_message))
|
200 |
return chat_history
|
201 |
except Exception as e:
|
@@ -203,13 +224,15 @@ def claude_respond(
|
|
203 |
chat_history.append((message, error_message))
|
204 |
return chat_history
|
205 |
|
|
|
206 |
def clear_conversation():
|
207 |
return []
|
208 |
|
|
|
209 |
with gr.Blocks() as demo:
|
210 |
gr.Markdown("# Prompting AI Chatbot")
|
211 |
gr.Markdown("언어모델별 프롬프트 테스트 챗봇입니다.")
|
212 |
-
|
213 |
with gr.Tab("일반 모델"):
|
214 |
with gr.Row():
|
215 |
with gr.Column(scale=1):
|
@@ -240,7 +263,7 @@ with gr.Blocks() as demo:
|
|
240 |
msg.submit(respond, [msg, chatbot, model_name, max_tokens, temperature, top_p, system_message], chatbot)
|
241 |
submit_button.click(respond, [msg, chatbot, model_name, max_tokens, temperature, top_p, system_message], chatbot)
|
242 |
clear_button.click(clear_conversation, outputs=chatbot, queue=False)
|
243 |
-
|
244 |
with gr.Tab("Cohere Command R+"):
|
245 |
with gr.Row():
|
246 |
cohere_system_message = gr.Textbox(
|
@@ -260,7 +283,7 @@ with gr.Blocks() as demo:
|
|
260 |
step=0.05,
|
261 |
label="Top-P",
|
262 |
)
|
263 |
-
|
264 |
cohere_chatbot = gr.Chatbot(height=600)
|
265 |
cohere_msg = gr.Textbox(label="메세지를 입력하세요")
|
266 |
with gr.Row():
|
@@ -278,7 +301,7 @@ with gr.Blocks() as demo:
|
|
278 |
cohere_chatbot
|
279 |
)
|
280 |
cohere_clear_button.click(clear_conversation, outputs=cohere_chatbot, queue=False)
|
281 |
-
|
282 |
with gr.Tab("ChatGPT"):
|
283 |
with gr.Row():
|
284 |
chatgpt_system_message = gr.Textbox(
|
@@ -298,7 +321,7 @@ with gr.Blocks() as demo:
|
|
298 |
step=0.05,
|
299 |
label="Top-P",
|
300 |
)
|
301 |
-
|
302 |
chatgpt_chatbot = gr.Chatbot(height=600)
|
303 |
chatgpt_msg = gr.Textbox(label="메세지를 입력하세요")
|
304 |
with gr.Row():
|
@@ -316,7 +339,7 @@ with gr.Blocks() as demo:
|
|
316 |
chatgpt_chatbot
|
317 |
)
|
318 |
chatgpt_clear_button.click(clear_conversation, outputs=chatgpt_chatbot, queue=False)
|
319 |
-
|
320 |
with gr.Tab("Claude"):
|
321 |
with gr.Row():
|
322 |
claude_system_message = gr.Textbox(
|
@@ -336,13 +359,14 @@ with gr.Blocks() as demo:
|
|
336 |
step=0.05,
|
337 |
label="Top-P",
|
338 |
)
|
339 |
-
|
340 |
claude_chatbot = gr.Chatbot(height=600)
|
341 |
claude_msg = gr.Textbox(label="메세지를 입력하세요")
|
342 |
with gr.Row():
|
343 |
claude_submit_button = gr.Button("전송")
|
344 |
claude_clear_button = gr.Button("대화 내역 지우기")
|
345 |
-
|
|
|
346 |
claude_msg.submit(
|
347 |
claude_respond,
|
348 |
[claude_msg, claude_chatbot, claude_system_message, claude_max_tokens, claude_temperature, claude_top_p],
|
|
|
4 |
import anthropic
|
5 |
import os
|
6 |
|
|
|
7 |
MODELS = {
|
8 |
"Zephyr 7B Beta": "HuggingFaceH4/zephyr-7b-beta",
|
9 |
"Meta Llama 3.1 8B": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
|
|
14 |
"Aya-23-35B": "CohereForAI/aya-23-35B"
|
15 |
}
|
16 |
|
|
|
17 |
COHERE_MODEL = "CohereForAI/c4ai-command-r-plus-08-2024"
|
18 |
|
19 |
def get_client(model_name):
|
|
|
28 |
raise ValueError("유효하지 않은 모델 이름입니다.")
|
29 |
return InferenceClient(model_id, token=hf_token)
|
30 |
|
31 |
+
|
32 |
def respond(
|
33 |
message,
|
34 |
chat_history,
|
|
|
38 |
top_p,
|
39 |
system_message,
|
40 |
):
|
41 |
+
"""
|
42 |
+
기존 모델(Zephyr, Meta Llama 등) / Cohere Command R+ 모델에 대응하는 함수
|
43 |
+
"""
|
44 |
try:
|
45 |
client = get_client(model_name)
|
46 |
except ValueError as e:
|
47 |
chat_history.append((message, str(e)))
|
48 |
return chat_history
|
49 |
|
50 |
+
# Gradio에서 유지하던 형식
|
51 |
messages = [{"role": "system", "content": system_message}]
|
52 |
for human, assistant in chat_history:
|
53 |
messages.append({"role": "user", "content": human})
|
|
|
55 |
messages.append({"role": "user", "content": message})
|
56 |
|
57 |
try:
|
58 |
+
# Cohere 모델(비스트리밍) vs 기타 모델(스트리밍) 구분
|
59 |
if model_name == "Cohere Command R+":
|
|
|
60 |
response = client.chat_completion(
|
61 |
messages,
|
62 |
max_tokens=max_tokens,
|
|
|
67 |
chat_history.append((message, assistant_message))
|
68 |
return chat_history
|
69 |
else:
|
|
|
70 |
stream = client.chat_completion(
|
71 |
messages,
|
72 |
max_tokens=max_tokens,
|
|
|
88 |
chat_history.append((message, error_message))
|
89 |
yield chat_history
|
90 |
|
91 |
+
|
92 |
def cohere_respond(
|
93 |
message,
|
94 |
chat_history,
|
|
|
97 |
temperature,
|
98 |
top_p,
|
99 |
):
|
100 |
+
"""
|
101 |
+
Cohere Command R+ 전용 응답 함수
|
102 |
+
"""
|
103 |
model_name = "Cohere Command R+"
|
104 |
try:
|
105 |
client = get_client(model_name)
|
|
|
107 |
chat_history.append((message, str(e)))
|
108 |
return chat_history
|
109 |
|
110 |
+
messages = []
|
111 |
+
# system_message는 messages 내 system 키로 추가하지 않고, 아래와 같이 적용해도 문제는 없음
|
112 |
+
# 이 부분은 기존 구조를 유지
|
113 |
+
messages.append({"role": "system", "content": system_message})
|
114 |
+
|
115 |
for human, assistant in chat_history:
|
116 |
if human:
|
117 |
messages.append({"role": "user", "content": human})
|
|
|
121 |
messages.append({"role": "user", "content": message})
|
122 |
|
123 |
try:
|
|
|
124 |
response_full = client.chat_completion(
|
125 |
messages,
|
126 |
max_tokens=max_tokens,
|
|
|
135 |
chat_history.append((message, error_message))
|
136 |
return chat_history
|
137 |
|
138 |
+
|
139 |
def chatgpt_respond(
|
140 |
message,
|
141 |
chat_history,
|
|
|
144 |
temperature,
|
145 |
top_p,
|
146 |
):
|
147 |
+
"""
|
148 |
+
ChatGPT (OpenAI) 전용 응답 함수
|
149 |
+
"""
|
150 |
openai.api_key = os.getenv("OPENAI_API_KEY")
|
151 |
if not openai.api_key:
|
152 |
chat_history.append((message, "OPENAI_API_KEY 환경 변수가 필요합니다."))
|
153 |
return chat_history
|
154 |
|
155 |
+
# ChatGPT는 system role을 messages 내에서 허용하므로 현재 방식 OK
|
156 |
messages = [{"role": "system", "content": system_message}]
|
157 |
for human, assistant in chat_history:
|
158 |
messages.append({"role": "user", "content": human})
|
|
|
161 |
|
162 |
try:
|
163 |
response = openai.ChatCompletion.create(
|
164 |
+
model="gpt-4", # 적절한 모델 ID를 사용
|
165 |
messages=messages,
|
166 |
max_tokens=max_tokens,
|
167 |
temperature=temperature,
|
|
|
175 |
chat_history.append((message, error_message))
|
176 |
return chat_history
|
177 |
|
178 |
+
|
179 |
def claude_respond(
|
180 |
message,
|
181 |
chat_history,
|
|
|
184 |
temperature,
|
185 |
top_p,
|
186 |
):
|
187 |
+
"""
|
188 |
+
Anthropic Claude 전용 응답 함수 (Messages API)
|
189 |
+
- "system" role은 messages 목록에 넣지 않고,
|
190 |
+
최상위 system 인자로 넘겨야 한다.
|
191 |
+
"""
|
192 |
anthropic_api_key = os.getenv("ANTHROPIC_API_KEY")
|
193 |
if not anthropic_api_key:
|
194 |
chat_history.append((message, "ANTHROPIC_API_KEY 환경 변수가 필요합니다."))
|
195 |
return chat_history
|
196 |
|
197 |
+
client = anthropic.Anthropic(api_key=anthropic_api_key)
|
|
|
|
|
198 |
|
199 |
+
# Anthropic Messages API에 맞게 "system" role은 top-level 파라미터로,
|
200 |
+
# 나머지는 user/assistant만 messages에 넣음
|
201 |
+
# (messages 내 "system" role이 있으면 오류 발생)
|
202 |
+
anthro_messages = []
|
203 |
for human, assistant in chat_history:
|
204 |
if human:
|
205 |
+
anthro_messages.append({"role": "user", "content": human})
|
206 |
if assistant:
|
207 |
+
anthro_messages.append({"role": "assistant", "content": assistant})
|
208 |
+
anthro_messages.append({"role": "user", "content": message})
|
209 |
|
210 |
try:
|
211 |
response = client.messages.create(
|
212 |
+
model="claude-3-haiku-20240307", # Claude 모델 예시 (사용 가능 모델 확인)
|
213 |
+
system=system_message, # ← 여기서 system_message를 전달
|
214 |
+
messages=anthro_messages, # user/assistant만 포함
|
215 |
+
max_tokens_to_sample=max_tokens,
|
216 |
temperature=temperature,
|
217 |
top_p=top_p,
|
|
|
218 |
)
|
219 |
+
assistant_message = response["completion"]
|
220 |
chat_history.append((message, assistant_message))
|
221 |
return chat_history
|
222 |
except Exception as e:
|
|
|
224 |
chat_history.append((message, error_message))
|
225 |
return chat_history
|
226 |
|
227 |
+
|
228 |
def clear_conversation():
|
229 |
return []
|
230 |
|
231 |
+
|
232 |
with gr.Blocks() as demo:
|
233 |
gr.Markdown("# Prompting AI Chatbot")
|
234 |
gr.Markdown("언어모델별 프롬프트 테스트 챗봇입니다.")
|
235 |
+
|
236 |
with gr.Tab("일반 모델"):
|
237 |
with gr.Row():
|
238 |
with gr.Column(scale=1):
|
|
|
263 |
msg.submit(respond, [msg, chatbot, model_name, max_tokens, temperature, top_p, system_message], chatbot)
|
264 |
submit_button.click(respond, [msg, chatbot, model_name, max_tokens, temperature, top_p, system_message], chatbot)
|
265 |
clear_button.click(clear_conversation, outputs=chatbot, queue=False)
|
266 |
+
|
267 |
with gr.Tab("Cohere Command R+"):
|
268 |
with gr.Row():
|
269 |
cohere_system_message = gr.Textbox(
|
|
|
283 |
step=0.05,
|
284 |
label="Top-P",
|
285 |
)
|
286 |
+
|
287 |
cohere_chatbot = gr.Chatbot(height=600)
|
288 |
cohere_msg = gr.Textbox(label="메세지를 입력하세요")
|
289 |
with gr.Row():
|
|
|
301 |
cohere_chatbot
|
302 |
)
|
303 |
cohere_clear_button.click(clear_conversation, outputs=cohere_chatbot, queue=False)
|
304 |
+
|
305 |
with gr.Tab("ChatGPT"):
|
306 |
with gr.Row():
|
307 |
chatgpt_system_message = gr.Textbox(
|
|
|
321 |
step=0.05,
|
322 |
label="Top-P",
|
323 |
)
|
324 |
+
|
325 |
chatgpt_chatbot = gr.Chatbot(height=600)
|
326 |
chatgpt_msg = gr.Textbox(label="메세지를 입력하세요")
|
327 |
with gr.Row():
|
|
|
339 |
chatgpt_chatbot
|
340 |
)
|
341 |
chatgpt_clear_button.click(clear_conversation, outputs=chatgpt_chatbot, queue=False)
|
342 |
+
|
343 |
with gr.Tab("Claude"):
|
344 |
with gr.Row():
|
345 |
claude_system_message = gr.Textbox(
|
|
|
359 |
step=0.05,
|
360 |
label="Top-P",
|
361 |
)
|
362 |
+
|
363 |
claude_chatbot = gr.Chatbot(height=600)
|
364 |
claude_msg = gr.Textbox(label="메세지를 입력하세요")
|
365 |
with gr.Row():
|
366 |
claude_submit_button = gr.Button("전송")
|
367 |
claude_clear_button = gr.Button("대화 내역 지우기")
|
368 |
+
|
369 |
+
# Claude 전용 함수 호출
|
370 |
claude_msg.submit(
|
371 |
claude_respond,
|
372 |
[claude_msg, claude_chatbot, claude_system_message, claude_max_tokens, claude_temperature, claude_top_p],
|