Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,10 +1,9 @@
|
|
1 |
-
###################### (원본코드 시작) ######################
|
2 |
import gradio as gr
|
3 |
from huggingface_hub import InferenceClient
|
4 |
import openai
|
5 |
-
import anthropic
|
6 |
import os
|
7 |
|
|
|
8 |
MODELS = {
|
9 |
"Zephyr 7B Beta": "HuggingFaceH4/zephyr-7b-beta",
|
10 |
"Meta Llama 3.1 8B": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
@@ -15,6 +14,7 @@ MODELS = {
|
|
15 |
"Aya-23-35B": "CohereForAI/aya-23-35B"
|
16 |
}
|
17 |
|
|
|
18 |
COHERE_MODEL = "CohereForAI/c4ai-command-r-plus-08-2024"
|
19 |
|
20 |
def get_client(model_name):
|
@@ -29,7 +29,6 @@ def get_client(model_name):
|
|
29 |
raise ValueError("유효하지 않은 모델 이름입니다.")
|
30 |
return InferenceClient(model_id, token=hf_token)
|
31 |
|
32 |
-
|
33 |
def respond(
|
34 |
message,
|
35 |
chat_history,
|
@@ -39,16 +38,12 @@ def respond(
|
|
39 |
top_p,
|
40 |
system_message,
|
41 |
):
|
42 |
-
"""
|
43 |
-
기존 모델(Zephyr, Meta Llama 등) / Cohere Command R+ 모델에 대응하는 함수
|
44 |
-
"""
|
45 |
try:
|
46 |
client = get_client(model_name)
|
47 |
except ValueError as e:
|
48 |
chat_history.append((message, str(e)))
|
49 |
return chat_history
|
50 |
|
51 |
-
# Gradio에서 유지하던 형식
|
52 |
messages = [{"role": "system", "content": system_message}]
|
53 |
for human, assistant in chat_history:
|
54 |
messages.append({"role": "user", "content": human})
|
@@ -56,8 +51,8 @@ def respond(
|
|
56 |
messages.append({"role": "user", "content": message})
|
57 |
|
58 |
try:
|
59 |
-
# Cohere 모델(비스트리밍) vs 기타 모델(스트리밍) 구분
|
60 |
if model_name == "Cohere Command R+":
|
|
|
61 |
response = client.chat_completion(
|
62 |
messages,
|
63 |
max_tokens=max_tokens,
|
@@ -68,6 +63,7 @@ def respond(
|
|
68 |
chat_history.append((message, assistant_message))
|
69 |
return chat_history
|
70 |
else:
|
|
|
71 |
stream = client.chat_completion(
|
72 |
messages,
|
73 |
max_tokens=max_tokens,
|
@@ -89,7 +85,6 @@ def respond(
|
|
89 |
chat_history.append((message, error_message))
|
90 |
yield chat_history
|
91 |
|
92 |
-
|
93 |
def cohere_respond(
|
94 |
message,
|
95 |
chat_history,
|
@@ -98,9 +93,6 @@ def cohere_respond(
|
|
98 |
temperature,
|
99 |
top_p,
|
100 |
):
|
101 |
-
"""
|
102 |
-
Cohere Command R+ 전용 응답 함수
|
103 |
-
"""
|
104 |
model_name = "Cohere Command R+"
|
105 |
try:
|
106 |
client = get_client(model_name)
|
@@ -108,11 +100,7 @@ def cohere_respond(
|
|
108 |
chat_history.append((message, str(e)))
|
109 |
return chat_history
|
110 |
|
111 |
-
messages = []
|
112 |
-
# system_message는 messages 내 system 키로 추가하지 않고, 아래와 같이 적용해도 문제는 없음
|
113 |
-
# 이 부분은 기존 구조를 유지
|
114 |
-
messages.append({"role": "system", "content": system_message})
|
115 |
-
|
116 |
for human, assistant in chat_history:
|
117 |
if human:
|
118 |
messages.append({"role": "user", "content": human})
|
@@ -121,7 +109,10 @@ def cohere_respond(
|
|
121 |
|
122 |
messages.append({"role": "user", "content": message})
|
123 |
|
|
|
|
|
124 |
try:
|
|
|
125 |
response_full = client.chat_completion(
|
126 |
messages,
|
127 |
max_tokens=max_tokens,
|
@@ -136,7 +127,6 @@ def cohere_respond(
|
|
136 |
chat_history.append((message, error_message))
|
137 |
return chat_history
|
138 |
|
139 |
-
|
140 |
def chatgpt_respond(
|
141 |
message,
|
142 |
chat_history,
|
@@ -145,15 +135,11 @@ def chatgpt_respond(
|
|
145 |
temperature,
|
146 |
top_p,
|
147 |
):
|
148 |
-
"""
|
149 |
-
ChatGPT (OpenAI) 전용 응답 함수
|
150 |
-
"""
|
151 |
openai.api_key = os.getenv("OPENAI_API_KEY")
|
152 |
if not openai.api_key:
|
153 |
chat_history.append((message, "OPENAI_API_KEY 환경 변수가 필요합니다."))
|
154 |
return chat_history
|
155 |
|
156 |
-
# ChatGPT는 system role을 messages 내에서 허용하므로 현재 방식 OK
|
157 |
messages = [{"role": "system", "content": system_message}]
|
158 |
for human, assistant in chat_history:
|
159 |
messages.append({"role": "user", "content": human})
|
@@ -162,7 +148,7 @@ def chatgpt_respond(
|
|
162 |
|
163 |
try:
|
164 |
response = openai.ChatCompletion.create(
|
165 |
-
model="gpt-
|
166 |
messages=messages,
|
167 |
max_tokens=max_tokens,
|
168 |
temperature=temperature,
|
@@ -176,63 +162,13 @@ def chatgpt_respond(
|
|
176 |
chat_history.append((message, error_message))
|
177 |
return chat_history
|
178 |
|
179 |
-
|
180 |
-
def claude_respond(
|
181 |
-
message,
|
182 |
-
chat_history,
|
183 |
-
system_message,
|
184 |
-
max_tokens,
|
185 |
-
temperature,
|
186 |
-
top_p,
|
187 |
-
):
|
188 |
-
"""
|
189 |
-
Anthropic Claude 전용 응답 함수 (Messages API)
|
190 |
-
- "system" role은 messages 목록에 넣지 않고,
|
191 |
-
최상위 system 인자로 넘겨야 한다.
|
192 |
-
"""
|
193 |
-
anthropic_api_key = os.getenv("ANTHROPIC_API_KEY")
|
194 |
-
if not anthropic_api_key:
|
195 |
-
chat_history.append((message, "ANTHROPIC_API_KEY 환경 변수가 필요합니다."))
|
196 |
-
return chat_history
|
197 |
-
|
198 |
-
client = anthropic.Anthropic(api_key=anthropic_api_key)
|
199 |
-
|
200 |
-
# Anthropic Messages API에 맞게 "system" role은 top-level 파라미터로,
|
201 |
-
# 나머지는 user/assistant만 messages에 넣음
|
202 |
-
anthro_messages = []
|
203 |
-
for human, assistant in chat_history:
|
204 |
-
if human:
|
205 |
-
anthro_messages.append({"role": "user", "content": human})
|
206 |
-
if assistant:
|
207 |
-
anthro_messages.append({"role": "assistant", "content": assistant})
|
208 |
-
anthro_messages.append({"role": "user", "content": message})
|
209 |
-
|
210 |
-
try:
|
211 |
-
response = client.messages.create(
|
212 |
-
model="claude-3-haiku-20240307", # Claude 모델 예시 (사용 가능 모델 확인)
|
213 |
-
system=system_message, # ← 여기서 system_message를 전달
|
214 |
-
messages=anthro_messages, # user/assistant만 포함
|
215 |
-
max_tokens_to_sample=max_tokens,
|
216 |
-
temperature=temperature,
|
217 |
-
top_p=top_p,
|
218 |
-
)
|
219 |
-
assistant_message = response["completion"]
|
220 |
-
chat_history.append((message, assistant_message))
|
221 |
-
return chat_history
|
222 |
-
except Exception as e:
|
223 |
-
error_message = f"오류가 발생했습니다: {str(e)}"
|
224 |
-
chat_history.append((message, error_message))
|
225 |
-
return chat_history
|
226 |
-
|
227 |
-
|
228 |
def clear_conversation():
|
229 |
return []
|
230 |
|
231 |
-
|
232 |
with gr.Blocks() as demo:
|
233 |
gr.Markdown("# Prompting AI Chatbot")
|
234 |
gr.Markdown("언어모델별 프롬프트 테스트 챗봇입니다.")
|
235 |
-
|
236 |
with gr.Tab("일반 모델"):
|
237 |
with gr.Row():
|
238 |
with gr.Column(scale=1):
|
@@ -263,7 +199,7 @@ with gr.Blocks() as demo:
|
|
263 |
msg.submit(respond, [msg, chatbot, model_name, max_tokens, temperature, top_p, system_message], chatbot)
|
264 |
submit_button.click(respond, [msg, chatbot, model_name, max_tokens, temperature, top_p, system_message], chatbot)
|
265 |
clear_button.click(clear_conversation, outputs=chatbot, queue=False)
|
266 |
-
|
267 |
with gr.Tab("Cohere Command R+"):
|
268 |
with gr.Row():
|
269 |
cohere_system_message = gr.Textbox(
|
@@ -283,7 +219,7 @@ with gr.Blocks() as demo:
|
|
283 |
step=0.05,
|
284 |
label="Top-P",
|
285 |
)
|
286 |
-
|
287 |
cohere_chatbot = gr.Chatbot(height=600)
|
288 |
cohere_msg = gr.Textbox(label="메세지를 입력하세요")
|
289 |
with gr.Row():
|
@@ -301,7 +237,7 @@ with gr.Blocks() as demo:
|
|
301 |
cohere_chatbot
|
302 |
)
|
303 |
cohere_clear_button.click(clear_conversation, outputs=cohere_chatbot, queue=False)
|
304 |
-
|
305 |
with gr.Tab("ChatGPT"):
|
306 |
with gr.Row():
|
307 |
chatgpt_system_message = gr.Textbox(
|
@@ -321,7 +257,7 @@ with gr.Blocks() as demo:
|
|
321 |
step=0.05,
|
322 |
label="Top-P",
|
323 |
)
|
324 |
-
|
325 |
chatgpt_chatbot = gr.Chatbot(height=600)
|
326 |
chatgpt_msg = gr.Textbox(label="메세지를 입력하세요")
|
327 |
with gr.Row():
|
@@ -340,55 +276,5 @@ with gr.Blocks() as demo:
|
|
340 |
)
|
341 |
chatgpt_clear_button.click(clear_conversation, outputs=chatgpt_chatbot, queue=False)
|
342 |
|
343 |
-
###################### (원본코드 끝) ######################
|
344 |
-
|
345 |
-
|
346 |
-
# ------------------------- 여기부터 "클로드 전용 탭" 추가 -------------------------
|
347 |
-
# - 원본코드에 이미 claude_respond 함수가 있으므로 그대로 활용
|
348 |
-
# - 탭만 추가하여 UI 구성
|
349 |
-
|
350 |
-
with demo: # 원본코드에서 "with gr.Blocks() as demo:" 를 이미 열었음
|
351 |
-
with gr.Tab("Claude"):
|
352 |
-
with gr.Row():
|
353 |
-
claude_system_message = gr.Textbox(
|
354 |
-
value="""반드시 한글로 답변할 것.
|
355 |
-
너는 Claude, Anthropic에서 개발한 언어 모델이다.
|
356 |
-
내가 요구하는 것을 최대한 자세하고 정확하게 답변하라.
|
357 |
-
""",
|
358 |
-
label="System Message",
|
359 |
-
lines=3
|
360 |
-
)
|
361 |
-
claude_max_tokens = gr.Slider(
|
362 |
-
minimum=1, maximum=2048, value=512, step=1, label="Max Tokens"
|
363 |
-
)
|
364 |
-
claude_temperature = gr.Slider(
|
365 |
-
minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature"
|
366 |
-
)
|
367 |
-
claude_top_p = gr.Slider(
|
368 |
-
minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P",
|
369 |
-
)
|
370 |
-
|
371 |
-
claude_chatbot = gr.Chatbot(height=600)
|
372 |
-
claude_msg = gr.Textbox(label="메세지를 입력하세요")
|
373 |
-
|
374 |
-
with gr.Row():
|
375 |
-
claude_submit_button = gr.Button("전송")
|
376 |
-
claude_clear_button = gr.Button("대화 내역 지우기")
|
377 |
-
|
378 |
-
# 이미 정의된 claude_respond 함수를 그대로 사용
|
379 |
-
claude_msg.submit(
|
380 |
-
claude_respond,
|
381 |
-
[claude_msg, claude_chatbot, claude_system_message, claude_max_tokens, claude_temperature, claude_top_p],
|
382 |
-
claude_chatbot
|
383 |
-
)
|
384 |
-
claude_submit_button.click(
|
385 |
-
claude_respond,
|
386 |
-
[claude_msg, claude_chatbot, claude_system_message, claude_max_tokens, claude_temperature, claude_top_p],
|
387 |
-
claude_chatbot
|
388 |
-
)
|
389 |
-
claude_clear_button.click(clear_conversation, outputs=claude_chatbot, queue=False)
|
390 |
-
|
391 |
-
|
392 |
-
# 원본 코드의 데모 런치 부분도 수정 금지
|
393 |
if __name__ == "__main__":
|
394 |
demo.launch()
|
|
|
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
import openai
|
|
|
4 |
import os
|
5 |
|
6 |
+
# 제거할 모델들을 MODELS 사전에서 제외
|
7 |
MODELS = {
|
8 |
"Zephyr 7B Beta": "HuggingFaceH4/zephyr-7b-beta",
|
9 |
"Meta Llama 3.1 8B": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
|
|
14 |
"Aya-23-35B": "CohereForAI/aya-23-35B"
|
15 |
}
|
16 |
|
17 |
+
# Cohere Command R+ 모델 ID 정의
|
18 |
COHERE_MODEL = "CohereForAI/c4ai-command-r-plus-08-2024"
|
19 |
|
20 |
def get_client(model_name):
|
|
|
29 |
raise ValueError("유효하지 않은 모델 이름입니다.")
|
30 |
return InferenceClient(model_id, token=hf_token)
|
31 |
|
|
|
32 |
def respond(
|
33 |
message,
|
34 |
chat_history,
|
|
|
38 |
top_p,
|
39 |
system_message,
|
40 |
):
|
|
|
|
|
|
|
41 |
try:
|
42 |
client = get_client(model_name)
|
43 |
except ValueError as e:
|
44 |
chat_history.append((message, str(e)))
|
45 |
return chat_history
|
46 |
|
|
|
47 |
messages = [{"role": "system", "content": system_message}]
|
48 |
for human, assistant in chat_history:
|
49 |
messages.append({"role": "user", "content": human})
|
|
|
51 |
messages.append({"role": "user", "content": message})
|
52 |
|
53 |
try:
|
|
|
54 |
if model_name == "Cohere Command R+":
|
55 |
+
# Cohere Command R+ 모델을 위한 비스트리밍 처리
|
56 |
response = client.chat_completion(
|
57 |
messages,
|
58 |
max_tokens=max_tokens,
|
|
|
63 |
chat_history.append((message, assistant_message))
|
64 |
return chat_history
|
65 |
else:
|
66 |
+
# 다른 모델들을 위한 스트리밍 처리
|
67 |
stream = client.chat_completion(
|
68 |
messages,
|
69 |
max_tokens=max_tokens,
|
|
|
85 |
chat_history.append((message, error_message))
|
86 |
yield chat_history
|
87 |
|
|
|
88 |
def cohere_respond(
|
89 |
message,
|
90 |
chat_history,
|
|
|
93 |
temperature,
|
94 |
top_p,
|
95 |
):
|
|
|
|
|
|
|
96 |
model_name = "Cohere Command R+"
|
97 |
try:
|
98 |
client = get_client(model_name)
|
|
|
100 |
chat_history.append((message, str(e)))
|
101 |
return chat_history
|
102 |
|
103 |
+
messages = [{"role": "system", "content": system_message}]
|
|
|
|
|
|
|
|
|
104 |
for human, assistant in chat_history:
|
105 |
if human:
|
106 |
messages.append({"role": "user", "content": human})
|
|
|
109 |
|
110 |
messages.append({"role": "user", "content": message})
|
111 |
|
112 |
+
response = ""
|
113 |
+
|
114 |
try:
|
115 |
+
# Cohere Command R+ 모델을 위한 비스트리밍 처리
|
116 |
response_full = client.chat_completion(
|
117 |
messages,
|
118 |
max_tokens=max_tokens,
|
|
|
127 |
chat_history.append((message, error_message))
|
128 |
return chat_history
|
129 |
|
|
|
130 |
def chatgpt_respond(
|
131 |
message,
|
132 |
chat_history,
|
|
|
135 |
temperature,
|
136 |
top_p,
|
137 |
):
|
|
|
|
|
|
|
138 |
openai.api_key = os.getenv("OPENAI_API_KEY")
|
139 |
if not openai.api_key:
|
140 |
chat_history.append((message, "OPENAI_API_KEY 환경 변수가 필요합니다."))
|
141 |
return chat_history
|
142 |
|
|
|
143 |
messages = [{"role": "system", "content": system_message}]
|
144 |
for human, assistant in chat_history:
|
145 |
messages.append({"role": "user", "content": human})
|
|
|
148 |
|
149 |
try:
|
150 |
response = openai.ChatCompletion.create(
|
151 |
+
model="gpt-4o-mini", # 또는 다른 모델 ID 사용
|
152 |
messages=messages,
|
153 |
max_tokens=max_tokens,
|
154 |
temperature=temperature,
|
|
|
162 |
chat_history.append((message, error_message))
|
163 |
return chat_history
|
164 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
165 |
def clear_conversation():
|
166 |
return []
|
167 |
|
|
|
168 |
with gr.Blocks() as demo:
|
169 |
gr.Markdown("# Prompting AI Chatbot")
|
170 |
gr.Markdown("언어모델별 프롬프트 테스트 챗봇입니다.")
|
171 |
+
|
172 |
with gr.Tab("일반 모델"):
|
173 |
with gr.Row():
|
174 |
with gr.Column(scale=1):
|
|
|
199 |
msg.submit(respond, [msg, chatbot, model_name, max_tokens, temperature, top_p, system_message], chatbot)
|
200 |
submit_button.click(respond, [msg, chatbot, model_name, max_tokens, temperature, top_p, system_message], chatbot)
|
201 |
clear_button.click(clear_conversation, outputs=chatbot, queue=False)
|
202 |
+
|
203 |
with gr.Tab("Cohere Command R+"):
|
204 |
with gr.Row():
|
205 |
cohere_system_message = gr.Textbox(
|
|
|
219 |
step=0.05,
|
220 |
label="Top-P",
|
221 |
)
|
222 |
+
|
223 |
cohere_chatbot = gr.Chatbot(height=600)
|
224 |
cohere_msg = gr.Textbox(label="메세지를 입력하세요")
|
225 |
with gr.Row():
|
|
|
237 |
cohere_chatbot
|
238 |
)
|
239 |
cohere_clear_button.click(clear_conversation, outputs=cohere_chatbot, queue=False)
|
240 |
+
|
241 |
with gr.Tab("ChatGPT"):
|
242 |
with gr.Row():
|
243 |
chatgpt_system_message = gr.Textbox(
|
|
|
257 |
step=0.05,
|
258 |
label="Top-P",
|
259 |
)
|
260 |
+
|
261 |
chatgpt_chatbot = gr.Chatbot(height=600)
|
262 |
chatgpt_msg = gr.Textbox(label="메세지를 입력하세요")
|
263 |
with gr.Row():
|
|
|
276 |
)
|
277 |
chatgpt_clear_button.click(clear_conversation, outputs=chatgpt_chatbot, queue=False)
|
278 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
279 |
if __name__ == "__main__":
|
280 |
demo.launch()
|