Kims12 commited on
Commit
083023a
·
verified ·
1 Parent(s): 6fe8a58

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +96 -50
app.py CHANGED
@@ -148,17 +148,13 @@ def respond_claude_qna(
148
  try:
149
  client = anthropic.Anthropic(api_key=claude_api_key)
150
 
151
- # 선택된 모델 이름(model_name)을 사용
152
  message = client.messages.create(
153
  model=model_name,
154
  max_tokens=max_tokens,
155
  temperature=temperature,
156
  system=system_message,
157
  messages=[
158
- {
159
- "role": "user",
160
- "content": question
161
- }
162
  ]
163
  )
164
 
@@ -207,8 +203,39 @@ def respond_o1mini_qna(
207
  except Exception as e:
208
  return f"오류가 발생했습니다: {str(e)}"
209
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
210
  #############################
211
- # [기본코드] UI 부분 - 수정/삭제 불가 (탭 순서 변경 gpt-4o-mini 제거)
212
  #############################
213
 
214
  with gr.Blocks() as demo:
@@ -218,14 +245,12 @@ with gr.Blocks() as demo:
218
  # OpenAI 탭 (gpt-4o-mini / o1-mini 통합)
219
  #################
220
  with gr.Tab("OpenAI"):
221
- # 모델 선택 라디오 버튼 (gpt-4o-mini와 o1-mini)
222
  openai_model_radio = gr.Radio(
223
  choices=["gpt-4o-mini", "o1-mini"],
224
  label="모델 선택",
225
  value="gpt-4o-mini"
226
  )
227
 
228
- # gpt-4o-mini 전용 UI 그룹 (초기 visible)
229
  with gr.Column(visible=True) as chatgpt_ui:
230
  chatgpt_input1_o = gr.Textbox(label="입력1", lines=1)
231
  chatgpt_input2_o = gr.Textbox(label="입력2", lines=1)
@@ -268,7 +293,6 @@ with gr.Blocks() as demo:
268
  outputs=chatgpt_answer_output_o
269
  )
270
 
271
- # o1-mini 전용 UI 그룹 (초기 hidden)
272
  with gr.Column(visible=False) as o1mini_ui:
273
  o1mini_input1_o = gr.Textbox(label="입력1", lines=1)
274
  o1mini_input2_o = gr.Textbox(label="입력2", lines=1)
@@ -287,7 +311,6 @@ with gr.Blocks() as demo:
287
  )
288
  o1mini_max_tokens_o = gr.Slider(minimum=100, maximum=4000, value=2000, step=100, label="Max Tokens")
289
  o1mini_temperature_o = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature")
290
- # o1-mini는 top_p 지원이 없으므로 해당 옵션은 UI에서 제외함
291
  o1mini_submit_button_o = gr.Button("전송")
292
 
293
  def merge_and_call_o1mini_o(i1, i2, i3, i4, i5, sys_msg, mt, temp):
@@ -309,7 +332,6 @@ with gr.Blocks() as demo:
309
  outputs=o1mini_answer_output_o
310
  )
311
 
312
- # UI 업데이트: 라디오 버튼 선택에 따라 gpt-4o-mini / o1-mini UI 전환
313
  def update_openai_ui(model_choice):
314
  if model_choice == "gpt-4o-mini":
315
  return gr.update(visible=True), gr.update(visible=False)
@@ -322,11 +344,62 @@ with gr.Blocks() as demo:
322
  outputs=[chatgpt_ui, o1mini_ui]
323
  )
324
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
325
  #################
326
  # Claude 탭
327
  #################
328
  with gr.Tab("Claude"):
329
- # 라디오 버튼 추가: 모델 선택 (세 가지 옵션)
330
  claude_model_radio = gr.Radio(
331
  choices=[
332
  "claude-3-haiku-20240307",
@@ -334,7 +407,7 @@ with gr.Blocks() as demo:
334
  "claude-3-5-sonnet-20241022"
335
  ],
336
  label="모델 선택",
337
- value="claude-3-5-sonnet-20241022" # 기본값 설정
338
  )
339
 
340
  claude_input1 = gr.Textbox(label="입력1", lines=1)
@@ -353,28 +426,9 @@ with gr.Blocks() as demo:
353
  최대한 정확하고 친절하게 답변하라.""",
354
  lines=3
355
  )
356
- claude_max_tokens = gr.Slider(
357
- minimum=100,
358
- maximum=4000,
359
- value=2000,
360
- step=100,
361
- label="Max Tokens"
362
- )
363
- claude_temperature = gr.Slider(
364
- minimum=0.1,
365
- maximum=2.0,
366
- value=0.7,
367
- step=0.05,
368
- label="Temperature"
369
- )
370
- claude_top_p = gr.Slider(
371
- minimum=0.1,
372
- maximum=1.0,
373
- value=0.95,
374
- step=0.05,
375
- label="Top-p"
376
- )
377
-
378
  claude_submit_button = gr.Button("전송")
379
 
380
  def merge_and_call_claude(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_, model_radio):
@@ -385,9 +439,8 @@ with gr.Blocks() as demo:
385
  max_tokens=mt,
386
  temperature=temp,
387
  top_p=top_p_,
388
- model_name=model_radio # 라디오 버튼으로 선택한 모델 이름 전달
389
  )
390
-
391
  claude_submit_button.click(
392
  fn=merge_and_call_claude,
393
  inputs=[
@@ -396,7 +449,7 @@ with gr.Blocks() as demo:
396
  claude_max_tokens,
397
  claude_temperature,
398
  claude_top_p,
399
- claude_model_radio # 추가된 라디오 버튼 입력
400
  ],
401
  outputs=claude_answer_output
402
  )
@@ -405,11 +458,10 @@ with gr.Blocks() as demo:
405
  # DeepSeek 탭
406
  #################
407
  with gr.Tab("DeepSeek"):
408
- # 라디오 버튼 추가
409
  deepseek_model_radio = gr.Radio(
410
- choices=["V3 (deepseek-chat)", "R1 (deepseek-reasoner)"], # 선택지
411
- label="모델 선택", # 라벨
412
- value="V3 (deepseek-chat)" # 기본값
413
  )
414
 
415
  deepseek_input1 = gr.Textbox(label="입력1", lines=1)
@@ -432,16 +484,13 @@ with gr.Blocks() as demo:
432
  deepseek_max_tokens = gr.Slider(minimum=100, maximum=4000, value=2000, step=100, label="Max Tokens")
433
  deepseek_temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature")
434
  deepseek_top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P")
435
-
436
  deepseek_submit_button = gr.Button("전송")
437
 
438
  def merge_and_call_deepseek(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_, model_radio):
439
- # 라디오 버튼에서 선택된 모델 이름 추출
440
  if model_radio == "V3 (deepseek-chat)":
441
  model_name = "deepseek-chat"
442
  else:
443
  model_name = "deepseek-reasoner"
444
-
445
  question = " ".join([i1, i2, i3, i4, i5])
446
  return respond_deepseek_qna(
447
  question=question,
@@ -449,9 +498,8 @@ with gr.Blocks() as demo:
449
  max_tokens=mt,
450
  temperature=temp,
451
  top_p=top_p_,
452
- model_name=model_name # 선택된 모델 이름 전달
453
  )
454
-
455
  deepseek_submit_button.click(
456
  fn=merge_and_call_deepseek,
457
  inputs=[
@@ -460,7 +508,7 @@ with gr.Blocks() as demo:
460
  deepseek_max_tokens,
461
  deepseek_temperature,
462
  deepseek_top_p,
463
- deepseek_model_radio # 라디오 버튼 입력 추가
464
  ],
465
  outputs=deepseek_answer_output
466
  )
@@ -489,7 +537,6 @@ with gr.Blocks() as demo:
489
  cohere_max_tokens = gr.Slider(minimum=100, maximum=10000, value=4000, step=100, label="Max Tokens")
490
  cohere_temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature")
491
  cohere_top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P")
492
-
493
  cohere_submit_button = gr.Button("전송")
494
 
495
  def merge_and_call_cohere(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_):
@@ -501,7 +548,6 @@ with gr.Blocks() as demo:
501
  temperature=temp,
502
  top_p=top_p_
503
  )
504
-
505
  cohere_submit_button.click(
506
  fn=merge_and_call_cohere,
507
  inputs=[
 
148
  try:
149
  client = anthropic.Anthropic(api_key=claude_api_key)
150
 
 
151
  message = client.messages.create(
152
  model=model_name,
153
  max_tokens=max_tokens,
154
  temperature=temperature,
155
  system=system_message,
156
  messages=[
157
+ {"role": "user", "content": question}
 
 
 
158
  ]
159
  )
160
 
 
203
  except Exception as e:
204
  return f"오류가 발생했습니다: {str(e)}"
205
 
206
+ def respond_gemini_qna(
207
+ question: str,
208
+ system_message: str,
209
+ max_tokens: int,
210
+ temperature: float,
211
+ top_p: float,
212
+ model_id: str
213
+ ):
214
+ """
215
+ Gemini 모델(예: gemini-2.0-flash, gemini-2.0-flash-lite-preview-02-05)을 이용해
216
+ 한 번의 질문(question)에 대한 답변을 반환하는 함수.
217
+ system_message와 question을 하나의 프롬프트로 합쳐서 사용합니다.
218
+ """
219
+ hf_token = os.getenv("HF_TOKEN")
220
+ if not hf_token:
221
+ return "HuggingFace API 토큰이 필요합니다."
222
+
223
+ client = InferenceClient(model_id, token=hf_token)
224
+ prompt = f"{system_message}\n\n{question}"
225
+
226
+ try:
227
+ response = client.text_generation(
228
+ prompt,
229
+ max_new_tokens=max_tokens,
230
+ temperature=temperature,
231
+ top_p=top_p
232
+ )
233
+ return response["generated_text"]
234
+ except Exception as e:
235
+ return f"오류가 발생했습니다: {str(e)}"
236
+
237
  #############################
238
+ # [기본코드] UI 부분 - 수정/삭제 불가 (탭 순서: OpenAI, Gemini, Claude, DeepSeek, Cohere Command R+)
239
  #############################
240
 
241
  with gr.Blocks() as demo:
 
245
  # OpenAI 탭 (gpt-4o-mini / o1-mini 통합)
246
  #################
247
  with gr.Tab("OpenAI"):
 
248
  openai_model_radio = gr.Radio(
249
  choices=["gpt-4o-mini", "o1-mini"],
250
  label="모델 선택",
251
  value="gpt-4o-mini"
252
  )
253
 
 
254
  with gr.Column(visible=True) as chatgpt_ui:
255
  chatgpt_input1_o = gr.Textbox(label="입력1", lines=1)
256
  chatgpt_input2_o = gr.Textbox(label="입력2", lines=1)
 
293
  outputs=chatgpt_answer_output_o
294
  )
295
 
 
296
  with gr.Column(visible=False) as o1mini_ui:
297
  o1mini_input1_o = gr.Textbox(label="입력1", lines=1)
298
  o1mini_input2_o = gr.Textbox(label="입력2", lines=1)
 
311
  )
312
  o1mini_max_tokens_o = gr.Slider(minimum=100, maximum=4000, value=2000, step=100, label="Max Tokens")
313
  o1mini_temperature_o = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature")
 
314
  o1mini_submit_button_o = gr.Button("전송")
315
 
316
  def merge_and_call_o1mini_o(i1, i2, i3, i4, i5, sys_msg, mt, temp):
 
332
  outputs=o1mini_answer_output_o
333
  )
334
 
 
335
  def update_openai_ui(model_choice):
336
  if model_choice == "gpt-4o-mini":
337
  return gr.update(visible=True), gr.update(visible=False)
 
344
  outputs=[chatgpt_ui, o1mini_ui]
345
  )
346
 
347
+ #################
348
+ # Gemini 탭
349
+ #################
350
+ with gr.Tab("Gemini"):
351
+ gemini_model_radio = gr.Radio(
352
+ choices=["gemini-2.0-flash", "gemini-2.0-flash-lite-preview-02-05"],
353
+ label="모델 선택",
354
+ value="gemini-2.0-flash"
355
+ )
356
+ gemini_input1 = gr.Textbox(label="입력1", lines=1)
357
+ gemini_input2 = gr.Textbox(label="입력2", lines=1)
358
+ gemini_input3 = gr.Textbox(label="입력3", lines=1)
359
+ gemini_input4 = gr.Textbox(label="입력4", lines=1)
360
+ gemini_input5 = gr.Textbox(label="입력5", lines=1)
361
+ gemini_answer_output = gr.Textbox(label="결과", lines=5, interactive=False)
362
+ with gr.Accordion("고급 설정 (Gemini)", open=False):
363
+ gemini_system_message = gr.Textbox(
364
+ value="""반드시 한글로 답변할 것.
365
+ 너는 Gemini 모델이다.
366
+ 내가 요구하는 것을 최대한 자세하고 정확하게 답변하라.
367
+ """,
368
+ label="System Message",
369
+ lines=3
370
+ )
371
+ gemini_max_tokens = gr.Slider(minimum=100, maximum=4000, value=2000, step=100, label="Max Tokens")
372
+ gemini_temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature")
373
+ gemini_top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P")
374
+ gemini_submit_button = gr.Button("전송")
375
+
376
+ def merge_and_call_gemini(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_, model_radio):
377
+ question = " ".join([i1, i2, i3, i4, i5])
378
+ return respond_gemini_qna(
379
+ question=question,
380
+ system_message=sys_msg,
381
+ max_tokens=mt,
382
+ temperature=temp,
383
+ top_p=top_p_,
384
+ model_id=model_radio
385
+ )
386
+ gemini_submit_button.click(
387
+ fn=merge_and_call_gemini,
388
+ inputs=[
389
+ gemini_input1, gemini_input2, gemini_input3, gemini_input4, gemini_input5,
390
+ gemini_system_message,
391
+ gemini_max_tokens,
392
+ gemini_temperature,
393
+ gemini_top_p,
394
+ gemini_model_radio
395
+ ],
396
+ outputs=gemini_answer_output
397
+ )
398
+
399
  #################
400
  # Claude 탭
401
  #################
402
  with gr.Tab("Claude"):
 
403
  claude_model_radio = gr.Radio(
404
  choices=[
405
  "claude-3-haiku-20240307",
 
407
  "claude-3-5-sonnet-20241022"
408
  ],
409
  label="모델 선택",
410
+ value="claude-3-5-sonnet-20241022"
411
  )
412
 
413
  claude_input1 = gr.Textbox(label="입력1", lines=1)
 
426
  최대한 정확하고 친절하게 답변하라.""",
427
  lines=3
428
  )
429
+ claude_max_tokens = gr.Slider(minimum=100, maximum=4000, value=2000, step=100, label="Max Tokens")
430
+ claude_temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature")
431
+ claude_top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
432
  claude_submit_button = gr.Button("전송")
433
 
434
  def merge_and_call_claude(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_, model_radio):
 
439
  max_tokens=mt,
440
  temperature=temp,
441
  top_p=top_p_,
442
+ model_name=model_radio
443
  )
 
444
  claude_submit_button.click(
445
  fn=merge_and_call_claude,
446
  inputs=[
 
449
  claude_max_tokens,
450
  claude_temperature,
451
  claude_top_p,
452
+ claude_model_radio
453
  ],
454
  outputs=claude_answer_output
455
  )
 
458
  # DeepSeek 탭
459
  #################
460
  with gr.Tab("DeepSeek"):
 
461
  deepseek_model_radio = gr.Radio(
462
+ choices=["V3 (deepseek-chat)", "R1 (deepseek-reasoner)"],
463
+ label="모델 선택",
464
+ value="V3 (deepseek-chat)"
465
  )
466
 
467
  deepseek_input1 = gr.Textbox(label="입력1", lines=1)
 
484
  deepseek_max_tokens = gr.Slider(minimum=100, maximum=4000, value=2000, step=100, label="Max Tokens")
485
  deepseek_temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature")
486
  deepseek_top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P")
 
487
  deepseek_submit_button = gr.Button("전송")
488
 
489
  def merge_and_call_deepseek(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_, model_radio):
 
490
  if model_radio == "V3 (deepseek-chat)":
491
  model_name = "deepseek-chat"
492
  else:
493
  model_name = "deepseek-reasoner"
 
494
  question = " ".join([i1, i2, i3, i4, i5])
495
  return respond_deepseek_qna(
496
  question=question,
 
498
  max_tokens=mt,
499
  temperature=temp,
500
  top_p=top_p_,
501
+ model_name=model_name
502
  )
 
503
  deepseek_submit_button.click(
504
  fn=merge_and_call_deepseek,
505
  inputs=[
 
508
  deepseek_max_tokens,
509
  deepseek_temperature,
510
  deepseek_top_p,
511
+ deepseek_model_radio
512
  ],
513
  outputs=deepseek_answer_output
514
  )
 
537
  cohere_max_tokens = gr.Slider(minimum=100, maximum=10000, value=4000, step=100, label="Max Tokens")
538
  cohere_temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature")
539
  cohere_top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P")
 
540
  cohere_submit_button = gr.Button("전송")
541
 
542
  def merge_and_call_cohere(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_):
 
548
  temperature=temp,
549
  top_p=top_p_
550
  )
 
551
  cohere_submit_button.click(
552
  fn=merge_and_call_cohere,
553
  inputs=[