Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -56,20 +56,18 @@ def respond_hf_qna(
|
|
56 |
except ValueError as e:
|
57 |
return f"오류: {str(e)}"
|
58 |
|
59 |
-
# 시스템 메시지 + 유저 질문을 한 번만 전달
|
60 |
messages = [
|
61 |
{"role": "system", "content": system_message},
|
62 |
{"role": "user", "content": question}
|
63 |
]
|
64 |
|
65 |
try:
|
66 |
-
# 스트리밍 대신 전체 답변(비스트리밍) 호출
|
67 |
response = client.chat_completion(
|
68 |
messages,
|
69 |
max_tokens=max_tokens,
|
70 |
temperature=temperature,
|
71 |
top_p=top_p,
|
72 |
-
stream=False,
|
73 |
)
|
74 |
assistant_message = response.choices[0].message.content
|
75 |
return assistant_message
|
@@ -175,20 +173,29 @@ with gr.Blocks() as demo:
|
|
175 |
show_copy_button=False
|
176 |
)
|
177 |
|
178 |
-
|
|
|
|
|
179 |
with gr.Tab("일반 모델"):
|
180 |
-
# 모델명
|
181 |
model_name = gr.Radio(
|
182 |
choices=list(MODELS.keys()),
|
183 |
label="Language Model (HuggingFace)",
|
184 |
value="Zephyr 7B Beta"
|
185 |
)
|
186 |
|
187 |
-
#
|
188 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
189 |
answer_output = gr.Textbox(label="답변", lines=5, interactive=False)
|
190 |
|
191 |
-
#
|
192 |
with gr.Accordion("고급 설정 (일반 모델)", open=False):
|
193 |
max_tokens = gr.Slider(minimum=0, maximum=2000, value=500, step=100, label="Max Tokens")
|
194 |
temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature")
|
@@ -204,10 +211,23 @@ with gr.Blocks() as demo:
|
|
204 |
|
205 |
submit_button = gr.Button("전송")
|
206 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
207 |
submit_button.click(
|
208 |
-
fn=
|
209 |
inputs=[
|
210 |
-
|
211 |
model_name,
|
212 |
max_tokens,
|
213 |
temperature,
|
@@ -218,10 +238,19 @@ with gr.Blocks() as demo:
|
|
218 |
outputs=answer_output
|
219 |
)
|
220 |
|
221 |
-
|
|
|
|
|
222 |
with gr.Tab("Cohere Command R+"):
|
223 |
-
#
|
224 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
225 |
cohere_answer_output = gr.Textbox(label="답변", lines=5, interactive=False)
|
226 |
|
227 |
with gr.Accordion("고급 설정 (Cohere)", open=False):
|
@@ -239,10 +268,21 @@ with gr.Blocks() as demo:
|
|
239 |
|
240 |
cohere_submit_button = gr.Button("전송")
|
241 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
242 |
cohere_submit_button.click(
|
243 |
-
fn=
|
244 |
inputs=[
|
245 |
-
|
246 |
cohere_system_message,
|
247 |
cohere_max_tokens,
|
248 |
cohere_temperature,
|
@@ -252,10 +292,19 @@ with gr.Blocks() as demo:
|
|
252 |
outputs=cohere_answer_output
|
253 |
)
|
254 |
|
255 |
-
|
|
|
|
|
256 |
with gr.Tab("gpt-4o-mini"):
|
257 |
-
#
|
258 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
259 |
chatgpt_answer_output = gr.Textbox(label="답변", lines=5, interactive=False)
|
260 |
|
261 |
with gr.Accordion("고급 설정 (ChatGPT)", open=False):
|
@@ -273,10 +322,21 @@ with gr.Blocks() as demo:
|
|
273 |
|
274 |
chatgpt_submit_button = gr.Button("전송")
|
275 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
276 |
chatgpt_submit_button.click(
|
277 |
-
fn=
|
278 |
inputs=[
|
279 |
-
|
280 |
chatgpt_system_message,
|
281 |
chatgpt_max_tokens,
|
282 |
chatgpt_temperature,
|
@@ -334,21 +394,22 @@ with gr.Blocks() as demo:
|
|
334 |
except Exception as e:
|
335 |
return f"예상치 못한 오류가 발생했습니다: {str(e)}"
|
336 |
|
337 |
-
|
|
|
|
|
338 |
with gr.Tab("claude-3-haiku"):
|
339 |
gr.Markdown("claude-3-haiku모델")
|
340 |
|
341 |
-
#
|
342 |
-
|
343 |
-
label="
|
344 |
-
lines=
|
345 |
-
|
346 |
-
|
347 |
-
|
348 |
-
|
349 |
-
|
350 |
-
|
351 |
-
)
|
352 |
|
353 |
with gr.Accordion("고급 설정 (Claude)", open=False):
|
354 |
claude_system_message = gr.Textbox(
|
@@ -382,10 +443,21 @@ with gr.Blocks() as demo:
|
|
382 |
|
383 |
claude_submit_button = gr.Button("전송")
|
384 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
385 |
claude_submit_button.click(
|
386 |
-
fn=
|
387 |
inputs=[
|
388 |
-
|
389 |
claude_system_message,
|
390 |
claude_max_tokens,
|
391 |
claude_temperature,
|
|
|
56 |
except ValueError as e:
|
57 |
return f"오류: {str(e)}"
|
58 |
|
|
|
59 |
messages = [
|
60 |
{"role": "system", "content": system_message},
|
61 |
{"role": "user", "content": question}
|
62 |
]
|
63 |
|
64 |
try:
|
|
|
65 |
response = client.chat_completion(
|
66 |
messages,
|
67 |
max_tokens=max_tokens,
|
68 |
temperature=temperature,
|
69 |
top_p=top_p,
|
70 |
+
stream=False,
|
71 |
)
|
72 |
assistant_message = response.choices[0].message.content
|
73 |
return assistant_message
|
|
|
173 |
show_copy_button=False
|
174 |
)
|
175 |
|
176 |
+
#################
|
177 |
+
# 일반 모델 탭
|
178 |
+
#################
|
179 |
with gr.Tab("일반 모델"):
|
180 |
+
# 모델명 선택
|
181 |
model_name = gr.Radio(
|
182 |
choices=list(MODELS.keys()),
|
183 |
label="Language Model (HuggingFace)",
|
184 |
value="Zephyr 7B Beta"
|
185 |
)
|
186 |
|
187 |
+
# 입력1~5 (한 줄)
|
188 |
+
with gr.Row():
|
189 |
+
input1 = gr.Textbox(label="입력1", lines=1)
|
190 |
+
input2 = gr.Textbox(label="입력2", lines=1)
|
191 |
+
input3 = gr.Textbox(label="입력3", lines=1)
|
192 |
+
input4 = gr.Textbox(label="입력4", lines=1)
|
193 |
+
input5 = gr.Textbox(label="입력5", lines=1)
|
194 |
+
|
195 |
+
# 답변
|
196 |
answer_output = gr.Textbox(label="답변", lines=5, interactive=False)
|
197 |
|
198 |
+
# 고급 설정을 답변 아래에
|
199 |
with gr.Accordion("고급 설정 (일반 모델)", open=False):
|
200 |
max_tokens = gr.Slider(minimum=0, maximum=2000, value=500, step=100, label="Max Tokens")
|
201 |
temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature")
|
|
|
211 |
|
212 |
submit_button = gr.Button("전송")
|
213 |
|
214 |
+
# 다섯 입력칸을 합쳐서 question으로 만든 뒤 응답
|
215 |
+
def merge_and_call_hf(i1, i2, i3, i4, i5, m_name, mt, temp, top_p_, sys_msg, hf_token):
|
216 |
+
question = " ".join([i1, i2, i3, i4, i5])
|
217 |
+
return respond_hf_qna(
|
218 |
+
question=question,
|
219 |
+
model_name=m_name,
|
220 |
+
max_tokens=mt,
|
221 |
+
temperature=temp,
|
222 |
+
top_p=top_p_,
|
223 |
+
system_message=sys_msg,
|
224 |
+
hf_token=hf_token
|
225 |
+
)
|
226 |
+
|
227 |
submit_button.click(
|
228 |
+
fn=merge_and_call_hf,
|
229 |
inputs=[
|
230 |
+
input1, input2, input3, input4, input5, # 입력1~5
|
231 |
model_name,
|
232 |
max_tokens,
|
233 |
temperature,
|
|
|
238 |
outputs=answer_output
|
239 |
)
|
240 |
|
241 |
+
#################
|
242 |
+
# Cohere Command R+ 탭
|
243 |
+
#################
|
244 |
with gr.Tab("Cohere Command R+"):
|
245 |
+
# 입력1~5 (한 줄)
|
246 |
+
with gr.Row():
|
247 |
+
cohere_input1 = gr.Textbox(label="입력1", lines=1)
|
248 |
+
cohere_input2 = gr.Textbox(label="입력2", lines=1)
|
249 |
+
cohere_input3 = gr.Textbox(label="입력3", lines=1)
|
250 |
+
cohere_input4 = gr.Textbox(label="입력4", lines=1)
|
251 |
+
cohere_input5 = gr.Textbox(label="입력5", lines=1)
|
252 |
+
|
253 |
+
# 답변
|
254 |
cohere_answer_output = gr.Textbox(label="답변", lines=5, interactive=False)
|
255 |
|
256 |
with gr.Accordion("고급 설정 (Cohere)", open=False):
|
|
|
268 |
|
269 |
cohere_submit_button = gr.Button("전송")
|
270 |
|
271 |
+
def merge_and_call_cohere(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_, hf_token):
|
272 |
+
question = " ".join([i1, i2, i3, i4, i5])
|
273 |
+
return respond_cohere_qna(
|
274 |
+
question=question,
|
275 |
+
system_message=sys_msg,
|
276 |
+
max_tokens=mt,
|
277 |
+
temperature=temp,
|
278 |
+
top_p=top_p_,
|
279 |
+
hf_token=hf_token
|
280 |
+
)
|
281 |
+
|
282 |
cohere_submit_button.click(
|
283 |
+
fn=merge_and_call_cohere,
|
284 |
inputs=[
|
285 |
+
cohere_input1, cohere_input2, cohere_input3, cohere_input4, cohere_input5,
|
286 |
cohere_system_message,
|
287 |
cohere_max_tokens,
|
288 |
cohere_temperature,
|
|
|
292 |
outputs=cohere_answer_output
|
293 |
)
|
294 |
|
295 |
+
#################
|
296 |
+
# ChatGPT 탭
|
297 |
+
#################
|
298 |
with gr.Tab("gpt-4o-mini"):
|
299 |
+
# 입력1~5 (한 줄)
|
300 |
+
with gr.Row():
|
301 |
+
chatgpt_input1 = gr.Textbox(label="입력1", lines=1)
|
302 |
+
chatgpt_input2 = gr.Textbox(label="입력2", lines=1)
|
303 |
+
chatgpt_input3 = gr.Textbox(label="입력3", lines=1)
|
304 |
+
chatgpt_input4 = gr.Textbox(label="입력4", lines=1)
|
305 |
+
chatgpt_input5 = gr.Textbox(label="입력5", lines=1)
|
306 |
+
|
307 |
+
# 답변
|
308 |
chatgpt_answer_output = gr.Textbox(label="답변", lines=5, interactive=False)
|
309 |
|
310 |
with gr.Accordion("고급 설정 (ChatGPT)", open=False):
|
|
|
322 |
|
323 |
chatgpt_submit_button = gr.Button("전송")
|
324 |
|
325 |
+
def merge_and_call_chatgpt(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_, openai_token):
|
326 |
+
question = " ".join([i1, i2, i3, i4, i5])
|
327 |
+
return respond_chatgpt_qna(
|
328 |
+
question=question,
|
329 |
+
system_message=sys_msg,
|
330 |
+
max_tokens=mt,
|
331 |
+
temperature=temp,
|
332 |
+
top_p=top_p_,
|
333 |
+
openai_token=openai_token
|
334 |
+
)
|
335 |
+
|
336 |
chatgpt_submit_button.click(
|
337 |
+
fn=merge_and_call_chatgpt,
|
338 |
inputs=[
|
339 |
+
chatgpt_input1, chatgpt_input2, chatgpt_input3, chatgpt_input4, chatgpt_input5,
|
340 |
chatgpt_system_message,
|
341 |
chatgpt_max_tokens,
|
342 |
chatgpt_temperature,
|
|
|
394 |
except Exception as e:
|
395 |
return f"예상치 못한 오류가 발생했습니다: {str(e)}"
|
396 |
|
397 |
+
#################
|
398 |
+
# Claude 탭
|
399 |
+
#################
|
400 |
with gr.Tab("claude-3-haiku"):
|
401 |
gr.Markdown("claude-3-haiku모델")
|
402 |
|
403 |
+
# 입력1~5 (한 줄)
|
404 |
+
with gr.Row():
|
405 |
+
claude_input1 = gr.Textbox(label="입력1", lines=1)
|
406 |
+
claude_input2 = gr.Textbox(label="입력2", lines=1)
|
407 |
+
claude_input3 = gr.Textbox(label="입력3", lines=1)
|
408 |
+
claude_input4 = gr.Textbox(label="입력4", lines=1)
|
409 |
+
claude_input5 = gr.Textbox(label="입력5", lines=1)
|
410 |
+
|
411 |
+
# 답변
|
412 |
+
claude_answer_output = gr.Textbox(label="답변", interactive=False, lines=5)
|
|
|
413 |
|
414 |
with gr.Accordion("고급 설정 (Claude)", open=False):
|
415 |
claude_system_message = gr.Textbox(
|
|
|
443 |
|
444 |
claude_submit_button = gr.Button("전송")
|
445 |
|
446 |
+
def merge_and_call_claude(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_, claude_key):
|
447 |
+
question = " ".join([i1, i2, i3, i4, i5])
|
448 |
+
return respond_claude_qna(
|
449 |
+
question=question,
|
450 |
+
system_message=sys_msg,
|
451 |
+
max_tokens=mt,
|
452 |
+
temperature=temp,
|
453 |
+
top_p=top_p_,
|
454 |
+
claude_api_key=claude_key
|
455 |
+
)
|
456 |
+
|
457 |
claude_submit_button.click(
|
458 |
+
fn=merge_and_call_claude,
|
459 |
inputs=[
|
460 |
+
claude_input1, claude_input2, claude_input3, claude_input4, claude_input5,
|
461 |
claude_system_message,
|
462 |
claude_max_tokens,
|
463 |
claude_temperature,
|