Kims12 commited on
Commit
958e155
·
verified ·
1 Parent(s): a52a599

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +95 -50
app.py CHANGED
@@ -16,7 +16,8 @@ MODELS = {
16
  "Microsoft": "microsoft/Phi-3-mini-4k-instruct",
17
  "Mixtral 8x7B": "mistralai/Mistral-7B-Instruct-v0.3",
18
  "Mixtral Nous-Hermes": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
19
- "Aya-23-35B": "CohereForAI/aya-23-35B"
 
20
  }
21
 
22
  # Cohere Command R+ 모델 ID 정의
@@ -146,6 +147,42 @@ def respond_chatgpt_qna(
146
  return f"오류가 발생했습니다: {str(e)}"
147
 
148
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149
  #############################
150
  # [기본코드] UI 부분 - 수정/삭제 불가
151
  #############################
@@ -171,6 +208,11 @@ with gr.Blocks() as demo:
171
  placeholder="Claude API 토큰을 입력하세요...",
172
  show_copy_button=False
173
  )
 
 
 
 
 
174
 
175
  #################
176
  # 일반 모델 탭
@@ -338,54 +380,6 @@ with gr.Blocks() as demo:
338
  outputs=chatgpt_answer_output
339
  )
340
 
341
- #################################################
342
- # [클로드 플레이그라운드] - 개선된 코드
343
- #################################################
344
-
345
- def validate_claude_token(token: str) -> bool:
346
- """Claude API 토큰 검증"""
347
- return bool(token and len(token.strip()) >= 10)
348
-
349
- def respond_claude_qna(
350
- question: str,
351
- system_message: str,
352
- max_tokens: int,
353
- temperature: float,
354
- top_p: float,
355
- claude_api_key: str
356
- ) -> str:
357
- """
358
- Claude API를 사용한 개선된 응답 생성 함수
359
- """
360
- if not validate_claude_token(claude_api_key):
361
- return "유효한 Claude API 토큰이 필요합니다."
362
-
363
- try:
364
- client = anthropic.Anthropic(api_key=claude_api_key)
365
-
366
- # 메시지 생성
367
- message = client.messages.create(
368
- model="claude-3-haiku-20240307",
369
- max_tokens=max_tokens,
370
- temperature=temperature,
371
- system=system_message,
372
- messages=[
373
- {
374
- "role": "user",
375
- "content": question
376
- }
377
- ]
378
- )
379
-
380
- return message.content[0].text
381
-
382
- except anthropic.APIError as ae:
383
- return f"Claude API 오류: {str(ae)}"
384
- except anthropic.RateLimitError:
385
- return "요청 한도를 초과했습니다. 잠시 후 다시 시도해주세요."
386
- except Exception as e:
387
- return f"예상치 못한 오류가 발생했습니다: {str(e)}"
388
-
389
  #################
390
  # Claude 탭
391
  #################
@@ -454,8 +448,59 @@ with gr.Blocks() as demo:
454
  outputs=claude_answer_output
455
  )
456
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
457
  #############################
458
  # 메인 실행부
459
  #############################
460
  if __name__ == "__main__":
461
- demo.launch()
 
16
  "Microsoft": "microsoft/Phi-3-mini-4k-instruct",
17
  "Mixtral 8x7B": "mistralai/Mistral-7B-Instruct-v0.3",
18
  "Mixtral Nous-Hermes": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
19
+ "Aya-23-35B": "CohereForAI/aya-23-35B",
20
+ "DeepSeek-V3": "deepseek/deepseek-chat"
21
  }
22
 
23
  # Cohere Command R+ 모델 ID 정의
 
147
  return f"오류가 발생했습니다: {str(e)}"
148
 
149
 
150
+ def respond_deepseek_qna(
151
+ question: str,
152
+ system_message: str,
153
+ max_tokens: int,
154
+ temperature: float,
155
+ top_p: float,
156
+ deepseek_token: str
157
+ ):
158
+ """
159
+ DeepSeek 모델을 이용해 한 번의 질문(question)에 대한 답변을 반환하는 함수.
160
+ """
161
+ if not deepseek_token:
162
+ return "DeepSeek API 토큰이 필요합니다."
163
+
164
+ openai.api_key = deepseek_token
165
+ openai.api_base = "https://api.deepseek.com/v1"
166
+
167
+ messages = [
168
+ {"role": "system", "content": system_message},
169
+ {"role": "user", "content": question}
170
+ ]
171
+
172
+ try:
173
+ response = openai.ChatCompletion.create(
174
+ model="deepseek-chat",
175
+ messages=messages,
176
+ max_tokens=max_tokens,
177
+ temperature=temperature,
178
+ top_p=top_p,
179
+ )
180
+ assistant_message = response.choices[0].message['content']
181
+ return assistant_message
182
+ except Exception as e:
183
+ return f"오류가 발생했습니다: {str(e)}"
184
+
185
+
186
  #############################
187
  # [기본코드] UI 부분 - 수정/삭제 불가
188
  #############################
 
208
  placeholder="Claude API 토큰을 입력하세요...",
209
  show_copy_button=False
210
  )
211
+ deepseek_token_box = gr.Textbox(
212
+ label="DeepSeek 토큰",
213
+ type="password",
214
+ placeholder="DeepSeek API 토큰을 입력하세요..."
215
+ )
216
 
217
  #################
218
  # 일반 모델 탭
 
380
  outputs=chatgpt_answer_output
381
  )
382
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
383
  #################
384
  # Claude 탭
385
  #################
 
448
  outputs=claude_answer_output
449
  )
450
 
451
+ #################
452
+ # DeepSeek 탭
453
+ #################
454
+ with gr.Tab("DeepSeek-V3"):
455
+ deepseek_input1 = gr.Textbox(label="입력1", lines=1)
456
+ deepseek_input2 = gr.Textbox(label="입력2", lines=1)
457
+ deepseek_input3 = gr.Textbox(label="입력3", lines=1)
458
+ deepseek_input4 = gr.Textbox(label="입력4", lines=1)
459
+ deepseek_input5 = gr.Textbox(label="입력5", lines=1)
460
+
461
+ deepseek_answer_output = gr.Textbox(label="결과", lines=5, interactive=False)
462
+
463
+ with gr.Accordion("고급 설정 (DeepSeek)", open=False):
464
+ deepseek_system_message = gr.Textbox(
465
+ value="""반드시 한글로 답변할 것.
466
+ 너는 DeepSeek-V3, 최고의 언어 모델이다.
467
+ 내가 요구하는 것을 최대한 자세하고 정확하게 답변하라.
468
+ """,
469
+ label="System Message",
470
+ lines=3
471
+ )
472
+ deepseek_max_tokens = gr.Slider(minimum=100, maximum=4000, value=2000, step=100, label="Max Tokens")
473
+ deepseek_temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature")
474
+ deepseek_top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P")
475
+
476
+ deepseek_submit_button = gr.Button("전송")
477
+
478
+ def merge_and_call_deepseek(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_, deepseek_token):
479
+ question = " ".join([i1, i2, i3, i4, i5])
480
+ return respond_deepseek_qna(
481
+ question=question,
482
+ system_message=sys_msg,
483
+ max_tokens=mt,
484
+ temperature=temp,
485
+ top_p=top_p_,
486
+ deepseek_token=deepseek_token
487
+ )
488
+
489
+ deepseek_submit_button.click(
490
+ fn=merge_and_call_deepseek,
491
+ inputs=[
492
+ deepseek_input1, deepseek_input2, deepseek_input3, deepseek_input4, deepseek_input5,
493
+ deepseek_system_message,
494
+ deepseek_max_tokens,
495
+ deepseek_temperature,
496
+ deepseek_top_p,
497
+ deepseek_token_box
498
+ ],
499
+ outputs=deepseek_answer_output
500
+ )
501
+
502
  #############################
503
  # 메인 실행부
504
  #############################
505
  if __name__ == "__main__":
506
+ demo.launch()