Kims12 commited on
Commit
f6afb3c
·
verified ·
1 Parent(s): 083023a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -51
app.py CHANGED
@@ -20,7 +20,6 @@ def get_client(model_name):
20
  hf_token = os.getenv("HF_TOKEN")
21
  if not hf_token:
22
  raise ValueError("HuggingFace API 토큰이 필요합니다.")
23
-
24
  if model_name == "Cohere Command R+":
25
  model_id = COHERE_MODEL
26
  else:
@@ -42,12 +41,10 @@ def respond_cohere_qna(
42
  client = get_client(model_name)
43
  except ValueError as e:
44
  return f"오류: {str(e)}"
45
-
46
  messages = [
47
  {"role": "system", "content": system_message},
48
  {"role": "user", "content": question}
49
  ]
50
-
51
  try:
52
  response_full = client.chat_completion(
53
  messages,
@@ -73,14 +70,11 @@ def respond_chatgpt_qna(
73
  openai_token = os.getenv("OPENAI_TOKEN")
74
  if not openai_token:
75
  return "OpenAI API 토큰이 필요합니다."
76
-
77
  openai.api_key = openai_token
78
-
79
  messages = [
80
  {"role": "system", "content": system_message},
81
  {"role": "user", "content": question}
82
  ]
83
-
84
  try:
85
  response = openai.ChatCompletion.create(
86
  model="gpt-4o-mini",
@@ -108,15 +102,12 @@ def respond_deepseek_qna(
108
  deepseek_token = os.getenv("DEEPSEEK_TOKEN")
109
  if not deepseek_token:
110
  return "DeepSeek API 토큰이 필요합니다."
111
-
112
  openai.api_key = deepseek_token
113
  openai.api_base = "https://api.deepseek.com/v1"
114
-
115
  messages = [
116
  {"role": "system", "content": system_message},
117
  {"role": "user", "content": question}
118
  ]
119
-
120
  try:
121
  response = openai.ChatCompletion.create(
122
  model=model_name, # 선택된 모델 사용
@@ -139,15 +130,13 @@ def respond_claude_qna(
139
  model_name: str # 모델 이름 파라미터 추가
140
  ) -> str:
141
  """
142
- Claude API를 사용한 개선된 응답 생성 함수
143
  """
144
  claude_api_key = os.getenv("CLAUDE_TOKEN")
145
  if not claude_api_key:
146
  return "Claude API 토큰이 필요합니다."
147
-
148
  try:
149
  client = anthropic.Anthropic(api_key=claude_api_key)
150
-
151
  message = client.messages.create(
152
  model=model_name,
153
  max_tokens=max_tokens,
@@ -157,9 +146,7 @@ def respond_claude_qna(
157
  {"role": "user", "content": question}
158
  ]
159
  )
160
-
161
  return message.content[0].text
162
-
163
  except anthropic.APIError as ae:
164
  return f"Claude API 오류: {str(ae)}"
165
  except anthropic.RateLimitError:
@@ -175,22 +162,15 @@ def respond_o1mini_qna(
175
  ):
176
  """
177
  o1-mini 모델을 이용해 한 번의 질문(question)에 대한 답변을 반환하는 함수.
178
- o1-mini에서는 'system' 역할 메시지를 지원하지 않으므로,
179
- system_message와 question을 하나의 'user' 메시지로 합쳐 전달합니다.
180
- 또한, o1-mini에서는 'max_tokens' 대신 'max_completion_tokens' 파라미터를 사용하며,
181
- 온도(temperature)는 고정값 1만 지원합니다.
182
  """
183
  openai_token = os.getenv("OPENAI_TOKEN")
184
  if not openai_token:
185
  return "OpenAI API 토큰이 필요합니다."
186
-
187
  openai.api_key = openai_token
188
-
189
  combined_message = f"{system_message}\n\n{question}"
190
- messages = [
191
- {"role": "user", "content": combined_message}
192
- ]
193
-
194
  try:
195
  response = openai.ChatCompletion.create(
196
  model="o1-mini",
@@ -208,29 +188,31 @@ def respond_gemini_qna(
208
  system_message: str,
209
  max_tokens: int,
210
  temperature: float,
211
- top_p: float,
212
  model_id: str
213
  ):
214
  """
215
  Gemini 모델(예: gemini-2.0-flash, gemini-2.0-flash-lite-preview-02-05)을 이용해
216
  한 번의 질문(question)에 대한 답변을 반환하는 함수.
217
- system_message와 question을 하나의 프롬프트로 합쳐서 사용합니다.
218
  """
219
- hf_token = os.getenv("HF_TOKEN")
220
- if not hf_token:
221
- return "HuggingFace API 토큰이 필요합니다."
222
-
223
- client = InferenceClient(model_id, token=hf_token)
 
224
  prompt = f"{system_message}\n\n{question}"
225
-
226
  try:
227
- response = client.text_generation(
228
- prompt,
229
- max_new_tokens=max_tokens,
230
- temperature=temperature,
231
- top_p=top_p
 
 
232
  )
233
- return response["generated_text"]
234
  except Exception as e:
235
  return f"오류가 발생했습니다: {str(e)}"
236
 
@@ -250,7 +232,6 @@ with gr.Blocks() as demo:
250
  label="모델 선택",
251
  value="gpt-4o-mini"
252
  )
253
-
254
  with gr.Column(visible=True) as chatgpt_ui:
255
  chatgpt_input1_o = gr.Textbox(label="입력1", lines=1)
256
  chatgpt_input2_o = gr.Textbox(label="입력2", lines=1)
@@ -409,28 +390,25 @@ with gr.Blocks() as demo:
409
  label="모델 선택",
410
  value="claude-3-5-sonnet-20241022"
411
  )
412
-
413
  claude_input1 = gr.Textbox(label="입력1", lines=1)
414
  claude_input2 = gr.Textbox(label="입력2", lines=1)
415
  claude_input3 = gr.Textbox(label="입력3", lines=1)
416
  claude_input4 = gr.Textbox(label="입력4", lines=1)
417
  claude_input5 = gr.Textbox(label="입력5", lines=1)
418
-
419
  claude_answer_output = gr.Textbox(label="결과", interactive=False, lines=5)
420
-
421
  with gr.Accordion("고급 설정 (Claude)", open=False):
422
  claude_system_message = gr.Textbox(
423
  label="System Message",
424
  value="""반드시 한글로 답변할 것.
425
  너는 Anthropic에서 개발한 클로드이다.
426
- 최대한 정확하고 친절하게 답변하라.""",
 
427
  lines=3
428
  )
429
  claude_max_tokens = gr.Slider(minimum=100, maximum=4000, value=2000, step=100, label="Max Tokens")
430
  claude_temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature")
431
  claude_top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p")
432
  claude_submit_button = gr.Button("전송")
433
-
434
  def merge_and_call_claude(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_, model_radio):
435
  question = " ".join([i1, i2, i3, i4, i5])
436
  return respond_claude_qna(
@@ -463,15 +441,12 @@ with gr.Blocks() as demo:
463
  label="모델 선택",
464
  value="V3 (deepseek-chat)"
465
  )
466
-
467
  deepseek_input1 = gr.Textbox(label="입력1", lines=1)
468
  deepseek_input2 = gr.Textbox(label="입력2", lines=1)
469
  deepseek_input3 = gr.Textbox(label="입력3", lines=1)
470
  deepseek_input4 = gr.Textbox(label="입력4", lines=1)
471
  deepseek_input5 = gr.Textbox(label="입력5", lines=1)
472
-
473
  deepseek_answer_output = gr.Textbox(label="결과", lines=5, interactive=False)
474
-
475
  with gr.Accordion("고급 설정 (DeepSeek)", open=False):
476
  deepseek_system_message = gr.Textbox(
477
  value="""반드시 한글로 답변할 것.
@@ -485,7 +460,6 @@ with gr.Blocks() as demo:
485
  deepseek_temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature")
486
  deepseek_top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P")
487
  deepseek_submit_button = gr.Button("전송")
488
-
489
  def merge_and_call_deepseek(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_, model_radio):
490
  if model_radio == "V3 (deepseek-chat)":
491
  model_name = "deepseek-chat"
@@ -522,9 +496,7 @@ with gr.Blocks() as demo:
522
  cohere_input3 = gr.Textbox(label="입력3", lines=1)
523
  cohere_input4 = gr.Textbox(label="입력4", lines=1)
524
  cohere_input5 = gr.Textbox(label="입력5", lines=1)
525
-
526
  cohere_answer_output = gr.Textbox(label="결과", lines=5, interactive=False)
527
-
528
  with gr.Accordion("고급 설정 (Cohere)", open=False):
529
  cohere_system_message = gr.Textbox(
530
  value="""반드시 한글로 답변할 것.
@@ -538,7 +510,6 @@ with gr.Blocks() as demo:
538
  cohere_temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature")
539
  cohere_top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P")
540
  cohere_submit_button = gr.Button("전송")
541
-
542
  def merge_and_call_cohere(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_):
543
  question = " ".join([i1, i2, i3, i4, i5])
544
  return respond_cohere_qna(
 
20
  hf_token = os.getenv("HF_TOKEN")
21
  if not hf_token:
22
  raise ValueError("HuggingFace API 토큰이 필요합니다.")
 
23
  if model_name == "Cohere Command R+":
24
  model_id = COHERE_MODEL
25
  else:
 
41
  client = get_client(model_name)
42
  except ValueError as e:
43
  return f"오류: {str(e)}"
 
44
  messages = [
45
  {"role": "system", "content": system_message},
46
  {"role": "user", "content": question}
47
  ]
 
48
  try:
49
  response_full = client.chat_completion(
50
  messages,
 
70
  openai_token = os.getenv("OPENAI_TOKEN")
71
  if not openai_token:
72
  return "OpenAI API 토큰이 필요합니다."
 
73
  openai.api_key = openai_token
 
74
  messages = [
75
  {"role": "system", "content": system_message},
76
  {"role": "user", "content": question}
77
  ]
 
78
  try:
79
  response = openai.ChatCompletion.create(
80
  model="gpt-4o-mini",
 
102
  deepseek_token = os.getenv("DEEPSEEK_TOKEN")
103
  if not deepseek_token:
104
  return "DeepSeek API 토큰이 필요합니다."
 
105
  openai.api_key = deepseek_token
106
  openai.api_base = "https://api.deepseek.com/v1"
 
107
  messages = [
108
  {"role": "system", "content": system_message},
109
  {"role": "user", "content": question}
110
  ]
 
111
  try:
112
  response = openai.ChatCompletion.create(
113
  model=model_name, # 선택된 모델 사용
 
130
  model_name: str # 모델 이름 파라미터 추가
131
  ) -> str:
132
  """
133
+ Claude API를 사용한 개선된 응답 생성 함수.
134
  """
135
  claude_api_key = os.getenv("CLAUDE_TOKEN")
136
  if not claude_api_key:
137
  return "Claude API 토큰이 필요합니다."
 
138
  try:
139
  client = anthropic.Anthropic(api_key=claude_api_key)
 
140
  message = client.messages.create(
141
  model=model_name,
142
  max_tokens=max_tokens,
 
146
  {"role": "user", "content": question}
147
  ]
148
  )
 
149
  return message.content[0].text
 
150
  except anthropic.APIError as ae:
151
  return f"Claude API 오류: {str(ae)}"
152
  except anthropic.RateLimitError:
 
162
  ):
163
  """
164
  o1-mini 모델을 이용해 한 번의 질문(question)에 대한 답변을 반환하는 함수.
165
+ o1-mini에서는 'system' 메시지를 지원하지 않으므로 system_message와 question을 하나의 'user' 메시지로 합쳐 전달합니다.
166
+ 또한, o1-mini에서는 'max_tokens' 대신 'max_completion_tokens' 사용하며, temperature는 고정값 1만 지원합니다.
 
 
167
  """
168
  openai_token = os.getenv("OPENAI_TOKEN")
169
  if not openai_token:
170
  return "OpenAI API 토큰이 필요합니다."
 
171
  openai.api_key = openai_token
 
172
  combined_message = f"{system_message}\n\n{question}"
173
+ messages = [{"role": "user", "content": combined_message}]
 
 
 
174
  try:
175
  response = openai.ChatCompletion.create(
176
  model="o1-mini",
 
188
  system_message: str,
189
  max_tokens: int,
190
  temperature: float,
191
+ top_p: float, # top_p는 Google API에서 지원하지 않으므로 사용하지 않습니다.
192
  model_id: str
193
  ):
194
  """
195
  Gemini 모델(예: gemini-2.0-flash, gemini-2.0-flash-lite-preview-02-05)을 이용해
196
  한 번의 질문(question)에 대한 답변을 반환하는 함수.
197
+ Google의 genai 라이브러리를 사용합니다.
198
  """
199
+ from google import genai
200
+ from google.genai import types
201
+ gemini_api_key = os.getenv("GEMINI_API_KEY")
202
+ if not gemini_api_key:
203
+ return "Gemini API 토큰이 필요합니다."
204
+ client = genai.Client(api_key=gemini_api_key)
205
  prompt = f"{system_message}\n\n{question}"
 
206
  try:
207
+ response = client.models.generate_content(
208
+ model=model_id,
209
+ contents=[prompt],
210
+ config=types.GenerateContentConfig(
211
+ max_output_tokens=max_tokens,
212
+ temperature=temperature
213
+ )
214
  )
215
+ return response.text
216
  except Exception as e:
217
  return f"오류가 발생했습니다: {str(e)}"
218
 
 
232
  label="모델 선택",
233
  value="gpt-4o-mini"
234
  )
 
235
  with gr.Column(visible=True) as chatgpt_ui:
236
  chatgpt_input1_o = gr.Textbox(label="입력1", lines=1)
237
  chatgpt_input2_o = gr.Textbox(label="입력2", lines=1)
 
390
  label="모델 선택",
391
  value="claude-3-5-sonnet-20241022"
392
  )
 
393
  claude_input1 = gr.Textbox(label="입력1", lines=1)
394
  claude_input2 = gr.Textbox(label="입력2", lines=1)
395
  claude_input3 = gr.Textbox(label="입력3", lines=1)
396
  claude_input4 = gr.Textbox(label="입력4", lines=1)
397
  claude_input5 = gr.Textbox(label="입력5", lines=1)
 
398
  claude_answer_output = gr.Textbox(label="결과", interactive=False, lines=5)
 
399
  with gr.Accordion("고급 설정 (Claude)", open=False):
400
  claude_system_message = gr.Textbox(
401
  label="System Message",
402
  value="""반드시 한글로 답변할 것.
403
  너는 Anthropic에서 개발한 클로드이다.
404
+ 최대한 정확하고 친절하게 답변하라.
405
+ """,
406
  lines=3
407
  )
408
  claude_max_tokens = gr.Slider(minimum=100, maximum=4000, value=2000, step=100, label="Max Tokens")
409
  claude_temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature")
410
  claude_top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p")
411
  claude_submit_button = gr.Button("전송")
 
412
  def merge_and_call_claude(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_, model_radio):
413
  question = " ".join([i1, i2, i3, i4, i5])
414
  return respond_claude_qna(
 
441
  label="모델 선택",
442
  value="V3 (deepseek-chat)"
443
  )
 
444
  deepseek_input1 = gr.Textbox(label="입력1", lines=1)
445
  deepseek_input2 = gr.Textbox(label="입력2", lines=1)
446
  deepseek_input3 = gr.Textbox(label="입력3", lines=1)
447
  deepseek_input4 = gr.Textbox(label="입력4", lines=1)
448
  deepseek_input5 = gr.Textbox(label="입력5", lines=1)
 
449
  deepseek_answer_output = gr.Textbox(label="결과", lines=5, interactive=False)
 
450
  with gr.Accordion("고급 설정 (DeepSeek)", open=False):
451
  deepseek_system_message = gr.Textbox(
452
  value="""반드시 한글로 답변할 것.
 
460
  deepseek_temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature")
461
  deepseek_top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P")
462
  deepseek_submit_button = gr.Button("전송")
 
463
  def merge_and_call_deepseek(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_, model_radio):
464
  if model_radio == "V3 (deepseek-chat)":
465
  model_name = "deepseek-chat"
 
496
  cohere_input3 = gr.Textbox(label="입력3", lines=1)
497
  cohere_input4 = gr.Textbox(label="입력4", lines=1)
498
  cohere_input5 = gr.Textbox(label="입력5", lines=1)
 
499
  cohere_answer_output = gr.Textbox(label="결과", lines=5, interactive=False)
 
500
  with gr.Accordion("고급 설정 (Cohere)", open=False):
501
  cohere_system_message = gr.Textbox(
502
  value="""반드시 한글로 답변할 것.
 
510
  cohere_temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature")
511
  cohere_top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P")
512
  cohere_submit_button = gr.Button("전송")
 
513
  def merge_and_call_cohere(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_):
514
  question = " ".join([i1, i2, i3, i4, i5])
515
  return respond_cohere_qna(