Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -176,8 +176,7 @@ def respond_o3mini_qna(
|
|
176 |
system_message: str,
|
177 |
max_tokens: int,
|
178 |
temperature: float,
|
179 |
-
top_p: float
|
180 |
-
reasoning_effort: str # 추가된 파라미터
|
181 |
):
|
182 |
"""
|
183 |
o3-mini 모델을 이용해 한 번의 질문(question)에 대한 답변을 반환하는 함수.
|
@@ -195,8 +194,7 @@ def respond_o3mini_qna(
|
|
195 |
|
196 |
try:
|
197 |
response = openai.ChatCompletion.create(
|
198 |
-
model="o3-mini",
|
199 |
-
reasoning_effort=reasoning_effort,
|
200 |
messages=messages,
|
201 |
max_tokens=max_tokens,
|
202 |
temperature=temperature,
|
@@ -315,7 +313,7 @@ with gr.Blocks() as demo:
|
|
315 |
#################
|
316 |
# o3-mini 탭
|
317 |
#################
|
318 |
-
with gr.Tab("
|
319 |
o3mini_input1 = gr.Textbox(label="입력1", lines=1)
|
320 |
o3mini_input2 = gr.Textbox(label="입력2", lines=1)
|
321 |
o3mini_input3 = gr.Textbox(label="입력3", lines=1)
|
@@ -333,22 +331,20 @@ with gr.Blocks() as demo:
|
|
333 |
label="System Message",
|
334 |
lines=3
|
335 |
)
|
336 |
-
o3mini_max_tokens = gr.Slider(minimum=100, maximum=4000, value=
|
337 |
-
o3mini_temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.
|
338 |
-
o3mini_top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.
|
339 |
-
o3mini_reasoning_effort = gr.Radio(choices=["low", "medium", "high"], label="Reasoning Effort", value="medium")
|
340 |
|
341 |
o3mini_submit_button = gr.Button("전송")
|
342 |
|
343 |
-
def merge_and_call_o3mini(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_
|
344 |
question = " ".join([i1, i2, i3, i4, i5])
|
345 |
return respond_o3mini_qna(
|
346 |
question=question,
|
347 |
system_message=sys_msg,
|
348 |
max_tokens=mt,
|
349 |
temperature=temp,
|
350 |
-
top_p=top_p_
|
351 |
-
reasoning_effort=reasoning_effort
|
352 |
)
|
353 |
|
354 |
o3mini_submit_button.click(
|
@@ -358,8 +354,7 @@ with gr.Blocks() as demo:
|
|
358 |
o3mini_system_message,
|
359 |
o3mini_max_tokens,
|
360 |
o3mini_temperature,
|
361 |
-
o3mini_top_p
|
362 |
-
o3mini_reasoning_effort
|
363 |
],
|
364 |
outputs=o3mini_answer_output
|
365 |
)
|
|
|
176 |
system_message: str,
|
177 |
max_tokens: int,
|
178 |
temperature: float,
|
179 |
+
top_p: float
|
|
|
180 |
):
|
181 |
"""
|
182 |
o3-mini 모델을 이용해 한 번의 질문(question)에 대한 답변을 반환하는 함수.
|
|
|
194 |
|
195 |
try:
|
196 |
response = openai.ChatCompletion.create(
|
197 |
+
model="o3-mini", # o3-mini 모델 사용
|
|
|
198 |
messages=messages,
|
199 |
max_tokens=max_tokens,
|
200 |
temperature=temperature,
|
|
|
313 |
#################
|
314 |
# o3-mini 탭
|
315 |
#################
|
316 |
+
with gr.Tab("o3-mini"):
|
317 |
o3mini_input1 = gr.Textbox(label="입력1", lines=1)
|
318 |
o3mini_input2 = gr.Textbox(label="입력2", lines=1)
|
319 |
o3mini_input3 = gr.Textbox(label="입력3", lines=1)
|
|
|
331 |
label="System Message",
|
332 |
lines=3
|
333 |
)
|
334 |
+
o3mini_max_tokens = gr.Slider(minimum=100, maximum=4000, value=1500, step=100, label="Max Tokens")
|
335 |
+
o3mini_temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.8, step=0.05, label="Temperature")
|
336 |
+
o3mini_top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.9, step=0.05, label="Top-P")
|
|
|
337 |
|
338 |
o3mini_submit_button = gr.Button("전송")
|
339 |
|
340 |
+
def merge_and_call_o3mini(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_):
|
341 |
question = " ".join([i1, i2, i3, i4, i5])
|
342 |
return respond_o3mini_qna(
|
343 |
question=question,
|
344 |
system_message=sys_msg,
|
345 |
max_tokens=mt,
|
346 |
temperature=temp,
|
347 |
+
top_p=top_p_
|
|
|
348 |
)
|
349 |
|
350 |
o3mini_submit_button.click(
|
|
|
354 |
o3mini_system_message,
|
355 |
o3mini_max_tokens,
|
356 |
o3mini_temperature,
|
357 |
+
o3mini_top_p
|
|
|
358 |
],
|
359 |
outputs=o3mini_answer_output
|
360 |
)
|