Kims12 commited on
Commit
5900591
·
verified ·
1 Parent(s): d875978

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +175 -8
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import openai
 
4
  import os
5
 
6
  # 제거할 모델들을 MODELS 사전에서 제외
@@ -11,13 +12,13 @@ MODELS = {
11
  "Microsoft": "microsoft/Phi-3-mini-4k-instruct",
12
  "Mixtral 8x7B": "mistralai/Mistral-7B-Instruct-v0.3",
13
  "Mixtral Nous-Hermes": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
14
- "Aya-23-35B": "CohereForAI/aya-23-35B"
 
15
  }
16
 
17
  # Cohere Command R+ 모델 ID 정의
18
  COHERE_MODEL = "CohereForAI/c4ai-command-r-plus-08-2024"
19
 
20
-
21
  def get_client(model_name, hf_token):
22
  """
23
  모델 이름에 맞춰 InferenceClient 생성.
@@ -34,7 +35,6 @@ def get_client(model_name, hf_token):
34
  raise ValueError("유효하지 않은 모델 이름입니다.")
35
  return InferenceClient(model_id, token=hf_token)
36
 
37
-
38
  def respond(
39
  message,
40
  chat_history,
@@ -92,7 +92,6 @@ def respond(
92
  chat_history.append((message, error_message))
93
  yield chat_history
94
 
95
-
96
  def cohere_respond(
97
  message,
98
  chat_history,
@@ -134,7 +133,6 @@ def cohere_respond(
134
  chat_history.append((message, error_message))
135
  return chat_history
136
 
137
-
138
  def chatgpt_respond(
139
  message,
140
  chat_history,
@@ -151,7 +149,6 @@ def chatgpt_respond(
151
  chat_history.append((message, "OpenAI API 토큰이 필요합니다."))
152
  return chat_history
153
 
154
- # openai.api_key = os.getenv("OPENAI_API_KEY") # 기존 코드 주석
155
  openai.api_key = openai_token # UI에서 받은 토큰 사용
156
 
157
  messages = [{"role": "system", "content": system_message}]
@@ -176,11 +173,91 @@ def chatgpt_respond(
176
  chat_history.append((message, error_message))
177
  return chat_history
178
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179
 
180
  def clear_conversation():
181
  return []
182
 
183
-
184
  with gr.Blocks() as demo:
185
  gr.Markdown("# Prompting AI Chatbot")
186
  gr.Markdown("언어모델별 프롬프트 테스트 챗봇입니다.")
@@ -197,6 +274,16 @@ with gr.Blocks() as demo:
197
  type="password",
198
  placeholder="OpenAI API 토큰을 입력하세요..."
199
  )
 
 
 
 
 
 
 
 
 
 
200
 
201
  with gr.Tab("일반 모델"):
202
  with gr.Row():
@@ -320,5 +407,85 @@ with gr.Blocks() as demo:
320
  chatgpt_submit_button.click(chatgpt_respond, inputs_for_chatgpt, chatgpt_chatbot)
321
  chatgpt_clear_button.click(clear_conversation, outputs=chatgpt_chatbot, queue=False)
322
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
323
  if __name__ == "__main__":
324
- demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import openai
4
+ import anthropic
5
  import os
6
 
7
  # 제거할 모델들을 MODELS 사전에서 제외
 
12
  "Microsoft": "microsoft/Phi-3-mini-4k-instruct",
13
  "Mixtral 8x7B": "mistralai/Mistral-7B-Instruct-v0.3",
14
  "Mixtral Nous-Hermes": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
15
+ "Aya-23-35B": "CohereForAI/aya-23-35B",
16
+ "DeepSeek-V3": "deepseek/deepseek-chat"
17
  }
18
 
19
  # Cohere Command R+ 모델 ID 정의
20
  COHERE_MODEL = "CohereForAI/c4ai-command-r-plus-08-2024"
21
 
 
22
  def get_client(model_name, hf_token):
23
  """
24
  모델 이름에 맞춰 InferenceClient 생성.
 
35
  raise ValueError("유효하지 않은 모델 이름입니다.")
36
  return InferenceClient(model_id, token=hf_token)
37
 
 
38
  def respond(
39
  message,
40
  chat_history,
 
92
  chat_history.append((message, error_message))
93
  yield chat_history
94
 
 
95
  def cohere_respond(
96
  message,
97
  chat_history,
 
133
  chat_history.append((message, error_message))
134
  return chat_history
135
 
 
136
  def chatgpt_respond(
137
  message,
138
  chat_history,
 
149
  chat_history.append((message, "OpenAI API 토큰이 필요합니다."))
150
  return chat_history
151
 
 
152
  openai.api_key = openai_token # UI에서 받은 토큰 사용
153
 
154
  messages = [{"role": "system", "content": system_message}]
 
173
  chat_history.append((message, error_message))
174
  return chat_history
175
 
176
+ def claude_respond(
177
+ message,
178
+ chat_history,
179
+ system_message,
180
+ max_tokens,
181
+ temperature,
182
+ top_p,
183
+ claude_token, # Claude 토큰 추가
184
+ ):
185
+ """
186
+ Claude용 응답. claude_token을 UI에서 입력받아 사용하도록 변경.
187
+ """
188
+ if not claude_token:
189
+ chat_history.append((message, "Claude API 토큰이 필요합니다."))
190
+ return chat_history
191
+
192
+ try:
193
+ client = anthropic.Anthropic(api_key=claude_token)
194
+
195
+ # 메시지 생성
196
+ response = client.messages.create(
197
+ model="claude-3-haiku-20240307",
198
+ max_tokens=max_tokens,
199
+ temperature=temperature,
200
+ system=system_message,
201
+ messages=[
202
+ {
203
+ "role": "user",
204
+ "content": message
205
+ }
206
+ ]
207
+ )
208
+
209
+ assistant_message = response.content[0].text
210
+ chat_history.append((message, assistant_message))
211
+ return chat_history
212
+ except Exception as e:
213
+ error_message = f"오류가 발생했습니다: {str(e)}"
214
+ chat_history.append((message, error_message))
215
+ return chat_history
216
+
217
+ def deepseek_respond(
218
+ message,
219
+ chat_history,
220
+ system_message,
221
+ max_tokens,
222
+ temperature,
223
+ top_p,
224
+ deepseek_token, # DeepSeek 토큰 추가
225
+ ):
226
+ """
227
+ DeepSeek용 응답. deepseek_token을 UI에서 입력받아 사용하도록 변경.
228
+ """
229
+ if not deepseek_token:
230
+ chat_history.append((message, "DeepSeek API 토큰이 필요합니다."))
231
+ return chat_history
232
+
233
+ openai.api_key = deepseek_token
234
+ openai.api_base = "https://api.deepseek.com/v1"
235
+
236
+ messages = [{"role": "system", "content": system_message}]
237
+ for human, assistant in chat_history:
238
+ messages.append({"role": "user", "content": human})
239
+ messages.append({"role": "assistant", "content": assistant})
240
+ messages.append({"role": "user", "content": message})
241
+
242
+ try:
243
+ response = openai.ChatCompletion.create(
244
+ model="deepseek-chat",
245
+ messages=messages,
246
+ max_tokens=max_tokens,
247
+ temperature=temperature,
248
+ top_p=top_p,
249
+ )
250
+ assistant_message = response.choices[0].message['content']
251
+ chat_history.append((message, assistant_message))
252
+ return chat_history
253
+ except Exception as e:
254
+ error_message = f"오류가 발생했습니다: {str(e)}"
255
+ chat_history.append((message, error_message))
256
+ return chat_history
257
 
258
  def clear_conversation():
259
  return []
260
 
 
261
  with gr.Blocks() as demo:
262
  gr.Markdown("# Prompting AI Chatbot")
263
  gr.Markdown("언어모델별 프롬프트 테스트 챗봇입니다.")
 
274
  type="password",
275
  placeholder="OpenAI API 토큰을 입력하세요..."
276
  )
277
+ claude_token_box = gr.Textbox(
278
+ label="Claude 토큰 (비공개)",
279
+ type="password",
280
+ placeholder="Claude API 토큰을 입력하세요..."
281
+ )
282
+ deepseek_token_box = gr.Textbox(
283
+ label="DeepSeek 토큰 (비공개)",
284
+ type="password",
285
+ placeholder="DeepSeek API 토큰을 입력하세요..."
286
+ )
287
 
288
  with gr.Tab("일반 모델"):
289
  with gr.Row():
 
407
  chatgpt_submit_button.click(chatgpt_respond, inputs_for_chatgpt, chatgpt_chatbot)
408
  chatgpt_clear_button.click(clear_conversation, outputs=chatgpt_chatbot, queue=False)
409
 
410
+ with gr.Tab("Claude"):
411
+ with gr.Row():
412
+ claude_system_message = gr.Textbox(
413
+ value="""반드시 한글로 답변할 것.
414
+ 너는 Anthropic에서 개발한 클로드이다.
415
+ 최대한 정확하고 친절하게 답변하라.
416
+ """,
417
+ label="System Message",
418
+ lines=3
419
+ )
420
+ claude_max_tokens = gr.Slider(minimum=1, maximum=4096, value=1024, step=1, label="Max Tokens")
421
+ claude_temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature")
422
+ claude_top_p = gr.Slider(
423
+ minimum=0.1,
424
+ maximum=1.0,
425
+ value=0.95,
426
+ step=0.05,
427
+ label="Top-P",
428
+ )
429
+
430
+ claude_chatbot = gr.Chatbot(height=600)
431
+ claude_msg = gr.Textbox(label="메세지를 입력하세요")
432
+ with gr.Row():
433
+ claude_submit_button = gr.Button("전송")
434
+ claude_clear_button = gr.Button("대화 내역 지우기")
435
+
436
+ # claude_respond 함수에 claude_token 인자를 전달하도록 수정
437
+ inputs_for_claude = [
438
+ claude_msg,
439
+ claude_chatbot,
440
+ claude_system_message,
441
+ claude_max_tokens,
442
+ claude_temperature,
443
+ claude_top_p,
444
+ claude_token_box
445
+ ]
446
+ claude_msg.submit(claude_respond, inputs_for_claude, claude_chatbot)
447
+ claude_submit_button.click(claude_respond, inputs_for_claude, claude_chatbot)
448
+ claude_clear_button.click(clear_conversation, outputs=claude_chatbot, queue=False)
449
+
450
+ with gr.Tab("DeepSeek"):
451
+ with gr.Row():
452
+ deepseek_system_message = gr.Textbox(
453
+ value="""반드시 한글로 답변할 것.
454
+ 너는 DeepSeek-V3, 최고의 언어 모델이다.
455
+ 내가 요구하는 것을 최대한 자세하고 정확하게 답변하라.
456
+ """,
457
+ label="System Message",
458
+ lines=3
459
+ )
460
+ deepseek_max_tokens = gr.Slider(minimum=1, maximum=4096, value=1024, step=1, label="Max Tokens")
461
+ deepseek_temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature")
462
+ deepseek_top_p = gr.Slider(
463
+ minimum=0.1,
464
+ maximum=1.0,
465
+ value=0.95,
466
+ step=0.05,
467
+ label="Top-P",
468
+ )
469
+
470
+ deepseek_chatbot = gr.Chatbot(height=600)
471
+ deepseek_msg = gr.Textbox(label="메세지를 입력하세요")
472
+ with gr.Row():
473
+ deepseek_submit_button = gr.Button("전송")
474
+ deepseek_clear_button = gr.Button("대화 내역 지우기")
475
+
476
+ # deepseek_respond 함수에 deepseek_token 인자를 전달하도록 수정
477
+ inputs_for_deepseek = [
478
+ deepseek_msg,
479
+ deepseek_chatbot,
480
+ deepseek_system_message,
481
+ deepseek_max_tokens,
482
+ deepseek_temperature,
483
+ deepseek_top_p,
484
+ deepseek_token_box
485
+ ]
486
+ deepseek_msg.submit(deepseek_respond, inputs_for_deepseek, deepseek_chatbot)
487
+ deepseek_submit_button.click(deepseek_respond, inputs_for_deepseek, deepseek_chatbot)
488
+ deepseek_clear_button.click(clear_conversation, outputs=deepseek_chatbot, queue=False)
489
+
490
  if __name__ == "__main__":
491
+ demo.launch()