Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -184,18 +184,17 @@ with gr.Blocks() as demo:
|
|
184 |
value="Zephyr 7B Beta"
|
185 |
)
|
186 |
|
187 |
-
# 입력1~5 (
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
# 고급 설정을 답변 아래에
|
199 |
with gr.Accordion("고급 설정 (일반 모델)", open=False):
|
200 |
max_tokens = gr.Slider(minimum=0, maximum=2000, value=500, step=100, label="Max Tokens")
|
201 |
temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature")
|
@@ -211,8 +210,8 @@ with gr.Blocks() as demo:
|
|
211 |
|
212 |
submit_button = gr.Button("전송")
|
213 |
|
214 |
-
# 다섯 입력칸을 합쳐서 question으로 만든 뒤 응답
|
215 |
def merge_and_call_hf(i1, i2, i3, i4, i5, m_name, mt, temp, top_p_, sys_msg, hf_token):
|
|
|
216 |
question = " ".join([i1, i2, i3, i4, i5])
|
217 |
return respond_hf_qna(
|
218 |
question=question,
|
@@ -227,7 +226,7 @@ with gr.Blocks() as demo:
|
|
227 |
submit_button.click(
|
228 |
fn=merge_and_call_hf,
|
229 |
inputs=[
|
230 |
-
input1, input2, input3, input4, input5,
|
231 |
model_name,
|
232 |
max_tokens,
|
233 |
temperature,
|
@@ -242,17 +241,17 @@ with gr.Blocks() as demo:
|
|
242 |
# Cohere Command R+ 탭
|
243 |
#################
|
244 |
with gr.Tab("Cohere Command R+"):
|
245 |
-
# 입력1~5 (
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
cohere_input5 = gr.Textbox(label="입력5", lines=1)
|
252 |
|
253 |
-
#
|
254 |
-
cohere_answer_output = gr.Textbox(label="
|
255 |
|
|
|
256 |
with gr.Accordion("고급 설정 (Cohere)", open=False):
|
257 |
cohere_system_message = gr.Textbox(
|
258 |
value="""반드시 한글로 답변할 것.
|
@@ -296,17 +295,17 @@ with gr.Blocks() as demo:
|
|
296 |
# ChatGPT 탭
|
297 |
#################
|
298 |
with gr.Tab("gpt-4o-mini"):
|
299 |
-
# 입력1~5
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
chatgpt_input5 = gr.Textbox(label="입력5", lines=1)
|
306 |
|
307 |
-
#
|
308 |
-
chatgpt_answer_output = gr.Textbox(label="
|
309 |
|
|
|
310 |
with gr.Accordion("고급 설정 (ChatGPT)", open=False):
|
311 |
chatgpt_system_message = gr.Textbox(
|
312 |
value="""반드시 한글로 답변할 것.
|
@@ -400,16 +399,15 @@ with gr.Blocks() as demo:
|
|
400 |
with gr.Tab("claude-3-haiku"):
|
401 |
gr.Markdown("claude-3-haiku모델")
|
402 |
|
403 |
-
# 입력1~5
|
404 |
-
|
405 |
-
|
406 |
-
|
407 |
-
|
408 |
-
|
409 |
-
claude_input5 = gr.Textbox(label="입력5", lines=1)
|
410 |
|
411 |
-
#
|
412 |
-
claude_answer_output = gr.Textbox(label="
|
413 |
|
414 |
with gr.Accordion("고급 설정 (Claude)", open=False):
|
415 |
claude_system_message = gr.Textbox(
|
|
|
184 |
value="Zephyr 7B Beta"
|
185 |
)
|
186 |
|
187 |
+
# 입력1 ~ 입력5 (세로로 하나씩)
|
188 |
+
input1 = gr.Textbox(label="입력1", lines=1)
|
189 |
+
input2 = gr.Textbox(label="입력2", lines=1)
|
190 |
+
input3 = gr.Textbox(label="입력3", lines=1)
|
191 |
+
input4 = gr.Textbox(label="입력4", lines=1)
|
192 |
+
input5 = gr.Textbox(label="입력5", lines=1)
|
193 |
+
|
194 |
+
# 결과 (기존 '답변' -> '결과')
|
195 |
+
answer_output = gr.Textbox(label="결과", lines=5, interactive=False)
|
196 |
+
|
197 |
+
# 고급 설정
|
|
|
198 |
with gr.Accordion("고급 설정 (일반 모델)", open=False):
|
199 |
max_tokens = gr.Slider(minimum=0, maximum=2000, value=500, step=100, label="Max Tokens")
|
200 |
temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature")
|
|
|
210 |
|
211 |
submit_button = gr.Button("전송")
|
212 |
|
|
|
213 |
def merge_and_call_hf(i1, i2, i3, i4, i5, m_name, mt, temp, top_p_, sys_msg, hf_token):
|
214 |
+
# 입력1~5를 공백 기준으로 합쳐서 question 구성
|
215 |
question = " ".join([i1, i2, i3, i4, i5])
|
216 |
return respond_hf_qna(
|
217 |
question=question,
|
|
|
226 |
submit_button.click(
|
227 |
fn=merge_and_call_hf,
|
228 |
inputs=[
|
229 |
+
input1, input2, input3, input4, input5,
|
230 |
model_name,
|
231 |
max_tokens,
|
232 |
temperature,
|
|
|
241 |
# Cohere Command R+ 탭
|
242 |
#################
|
243 |
with gr.Tab("Cohere Command R+"):
|
244 |
+
# 입력1 ~ 입력5 (세로)
|
245 |
+
cohere_input1 = gr.Textbox(label="입력1", lines=1)
|
246 |
+
cohere_input2 = gr.Textbox(label="입력2", lines=1)
|
247 |
+
cohere_input3 = gr.Textbox(label="입력3", lines=1)
|
248 |
+
cohere_input4 = gr.Textbox(label="입력4", lines=1)
|
249 |
+
cohere_input5 = gr.Textbox(label="입력5", lines=1)
|
|
|
250 |
|
251 |
+
# 결과
|
252 |
+
cohere_answer_output = gr.Textbox(label="결과", lines=5, interactive=False)
|
253 |
|
254 |
+
# 고급 설정
|
255 |
with gr.Accordion("고급 설정 (Cohere)", open=False):
|
256 |
cohere_system_message = gr.Textbox(
|
257 |
value="""반드시 한글로 답변할 것.
|
|
|
295 |
# ChatGPT 탭
|
296 |
#################
|
297 |
with gr.Tab("gpt-4o-mini"):
|
298 |
+
# 입력1 ~ 입력5
|
299 |
+
chatgpt_input1 = gr.Textbox(label="입력1", lines=1)
|
300 |
+
chatgpt_input2 = gr.Textbox(label="입력2", lines=1)
|
301 |
+
chatgpt_input3 = gr.Textbox(label="입력3", lines=1)
|
302 |
+
chatgpt_input4 = gr.Textbox(label="입력4", lines=1)
|
303 |
+
chatgpt_input5 = gr.Textbox(label="입력5", lines=1)
|
|
|
304 |
|
305 |
+
# 결과
|
306 |
+
chatgpt_answer_output = gr.Textbox(label="결과", lines=5, interactive=False)
|
307 |
|
308 |
+
# 고급 설정
|
309 |
with gr.Accordion("고급 설정 (ChatGPT)", open=False):
|
310 |
chatgpt_system_message = gr.Textbox(
|
311 |
value="""반드시 한글로 답변할 것.
|
|
|
399 |
with gr.Tab("claude-3-haiku"):
|
400 |
gr.Markdown("claude-3-haiku모델")
|
401 |
|
402 |
+
# 입력1 ~ 입력5
|
403 |
+
claude_input1 = gr.Textbox(label="입력1", lines=1)
|
404 |
+
claude_input2 = gr.Textbox(label="입력2", lines=1)
|
405 |
+
claude_input3 = gr.Textbox(label="입력3", lines=1)
|
406 |
+
claude_input4 = gr.Textbox(label="입력4", lines=1)
|
407 |
+
claude_input5 = gr.Textbox(label="입력5", lines=1)
|
|
|
408 |
|
409 |
+
# 결과
|
410 |
+
claude_answer_output = gr.Textbox(label="결과", interactive=False, lines=5)
|
411 |
|
412 |
with gr.Accordion("고급 설정 (Claude)", open=False):
|
413 |
claude_system_message = gr.Textbox(
|