File size: 13,548 Bytes
b34f0d5
7dcc8af
9bab38e
2c72380
2fa1592
2c72380
b34f0d5
f9b088b
 
 
 
8144da3
c65ce97
 
2fa1592
f1d1009
 
2fa1592
f1d1009
2fa1592
7dcc8af
f1d1009
 
90b6ab4
c65ce97
 
 
7dcc8af
b34f0d5
c753d25
 
 
 
 
2fa1592
092cc1c
c753d25
 
 
092cc1c
 
2fa1592
092cc1c
c753d25
092cc1c
c753d25
 
 
 
092cc1c
 
 
 
 
 
 
 
 
c753d25
092cc1c
c753d25
 
 
 
 
 
 
2fa1592
9bab38e
f1d1009
c753d25
f1d1009
2fa1592
f1d1009
c753d25
9bab38e
c753d25
f1d1009
c753d25
 
 
 
9bab38e
 
 
c753d25
9bab38e
 
 
 
 
 
c753d25
9bab38e
c753d25
b34f0d5
958e155
 
 
 
 
2fa1592
958e155
 
 
 
2fa1592
958e155
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23fcae0
 
 
 
 
2fa1592
23fcae0
 
 
 
2fa1592
23fcae0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f9b088b
 
 
 
b34f0d5
a52a599
f1d1009
a793169
 
 
23d8e09
5d836e7
 
 
 
 
a793169
5d836e7
0b12c58
e3e9a1c
 
 
7dcc8af
 
 
e3e9a1c
 
 
c29c7cd
 
e3e9a1c
c753d25
 
 
2fa1592
a793169
 
 
 
 
 
2fa1592
a793169
 
c753d25
a793169
c753d25
a793169
c753d25
 
 
2fa1592
c753d25
 
 
 
a793169
 
 
a52cf12
5d836e7
 
 
 
 
a793169
5d836e7
0b12c58
e3e9a1c
 
 
9bab38e
 
 
e3e9a1c
 
 
c29c7cd
0f5bee7
e3e9a1c
c753d25
 
 
2fa1592
a793169
 
 
 
 
 
2fa1592
a793169
 
c753d25
a793169
c753d25
a793169
c753d25
 
 
2fa1592
c753d25
 
 
6d1479a
a793169
 
 
a52cf12
5d836e7
 
 
 
 
a793169
5d836e7
0b12c58
e3e9a1c
 
 
 
2c72380
 
e3e9a1c
 
 
c29c7cd
 
 
 
e3e9a1c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f9b088b
 
 
2fa1592
a793169
 
 
 
 
 
2fa1592
a793169
 
f9b088b
a793169
f9b088b
a793169
f9b088b
 
 
2fa1592
f9b088b
 
 
 
958e155
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2fa1592
958e155
 
 
 
 
 
2fa1592
958e155
 
 
 
 
 
 
 
 
2fa1592
958e155
 
 
 
f9b088b
 
 
b34f0d5
90b6ab4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
import gradio as gr
from huggingface_hub import InferenceClient
import openai
import anthropic
import os
from typing import Optional

#############################
# [기본코드] - 수정/삭제 불가
#############################

# Cohere Command R+ 모델 ID 정의
COHERE_MODEL = "CohereForAI/c4ai-command-r-plus-08-2024"

def get_client(model_name):
    """
    모델 이름에 맞춰 InferenceClient 생성.
    토큰은 환경 변수에서 가져옴.
    """
    hf_token = os.getenv("HF_TOKEN")
    if not hf_token:
        raise ValueError("HuggingFace API 토큰이 필요합니다.")

    if model_name == "Cohere Command R+":
        model_id = COHERE_MODEL
    else:
        raise ValueError("유효하지 않은 모델 이름입니다.")
    return InferenceClient(model_id, token=hf_token)

def respond_cohere_qna(
    question: str,
    system_message: str,
    max_tokens: int,
    temperature: float,
    top_p: float
):
    """
    Cohere Command R+ 모델을 이용해 한 번의 질문(question)에 대한 답변을 반환하는 함수.
    """
    model_name = "Cohere Command R+"
    try:
        client = get_client(model_name)
    except ValueError as e:
        return f"오류: {str(e)}"

    messages = [
        {"role": "system", "content": system_message},
        {"role": "user", "content": question}
    ]

    try:
        response_full = client.chat_completion(
            messages,
            max_tokens=max_tokens,
            temperature=temperature,
            top_p=top_p,
        )
        assistant_message = response_full.choices[0].message.content
        return assistant_message
    except Exception as e:
        return f"오류가 발생했습니다: {str(e)}"

def respond_chatgpt_qna(
    question: str,
    system_message: str,
    max_tokens: int,
    temperature: float,
    top_p: float
):
    """
    ChatGPT(OpenAI) 모델을 이용해 한 번의 질문(question)에 대한 답변을 반환하는 함수.
    """
    openai_token = os.getenv("OPENAI_TOKEN")
    if not openai_token:
        return "OpenAI API 토큰이 필요합니다."

    openai.api_key = openai_token

    messages = [
        {"role": "system", "content": system_message},
        {"role": "user", "content": question}
    ]

    try:
        response = openai.ChatCompletion.create(
            model="gpt-4o-mini",  # 필요한 경우 변경
            messages=messages,
            max_tokens=max_tokens,
            temperature=temperature,
            top_p=top_p,
        )
        assistant_message = response.choices[0].message['content']
        return assistant_message
    except Exception as e:
        return f"오류가 발생했습니다: {str(e)}"

def respond_deepseek_qna(
    question: str,
    system_message: str,
    max_tokens: int,
    temperature: float,
    top_p: float
):
    """
    DeepSeek 모델을 이용해 한 번의 질문(question)에 대한 답변을 반환하는 함수.
    """
    deepseek_token = os.getenv("DEEPSEEK_TOKEN")
    if not deepseek_token:
        return "DeepSeek API 토큰이 필요합니다."

    openai.api_key = deepseek_token
    openai.api_base = "https://api.deepseek.com/v1"

    messages = [
        {"role": "system", "content": system_message},
        {"role": "user", "content": question}
    ]

    try:
        response = openai.ChatCompletion.create(
            model="deepseek-chat",
            messages=messages,
            max_tokens=max_tokens,
            temperature=temperature,
            top_p=top_p,
        )
        assistant_message = response.choices[0].message['content']
        return assistant_message
    except Exception as e:
        return f"오류가 발생했습니다: {str(e)}"

def respond_claude_qna(
    question: str,
    system_message: str,
    max_tokens: int,
    temperature: float,
    top_p: float
) -> str:
    """
    Claude API를 사용한 개선된 응답 생성 함수
    """
    claude_api_key = os.getenv("CLAUDE_TOKEN")
    if not claude_api_key:
        return "Claude API 토큰이 필요합니다."

    try:
        client = anthropic.Anthropic(api_key=claude_api_key)
        
        # 메시지 생성
        message = client.messages.create(
            model="claude-3-haiku-20240307",
            max_tokens=max_tokens,
            temperature=temperature,
            system=system_message,
            messages=[
                {
                    "role": "user",
                    "content": question
                }
            ]
        )
        
        return message.content[0].text
            
    except anthropic.APIError as ae:
        return f"Claude API 오류: {str(ae)}"
    except anthropic.RateLimitError:
        return "요청 한도를 초과했습니다. 잠시 후 다시 시도해주세요."
    except Exception as e:
        return f"예상치 못한 오류가 발생했습니다: {str(e)}"

#############################
# [기본코드] UI 부분 - 수정/삭제 불가
#############################

with gr.Blocks() as demo:
    gr.Markdown("# LLM 플레이그라운드")

    #################
    # Cohere Command R+ 탭
    #################
    with gr.Tab("Cohere Command R+"):
        cohere_input1 = gr.Textbox(label="입력1", lines=1)
        cohere_input2 = gr.Textbox(label="입력2", lines=1)
        cohere_input3 = gr.Textbox(label="입력3", lines=1)
        cohere_input4 = gr.Textbox(label="입력4", lines=1)
        cohere_input5 = gr.Textbox(label="입력5", lines=1)

        cohere_answer_output = gr.Textbox(label="결과", lines=5, interactive=False)

        with gr.Accordion("고급 설정 (Cohere)", open=False):
            cohere_system_message = gr.Textbox(
                value="""반드시 한글로 답변할 것.
너는 최고의 비서이다.
내가 요구하는것들을 최대한 자세하고 정확하게 답변하라.
""",
                label="System Message",
                lines=3
            )
            cohere_max_tokens = gr.Slider(minimum=100, maximum=10000, value=4000, step=100, label="Max Tokens")
            cohere_temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature")
            cohere_top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P")

        cohere_submit_button = gr.Button("전송")

        def merge_and_call_cohere(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_):
            question = " ".join([i1, i2, i3, i4, i5])
            return respond_cohere_qna(
                question=question,
                system_message=sys_msg,
                max_tokens=mt,
                temperature=temp,
                top_p=top_p_
            )

        cohere_submit_button.click(
            fn=merge_and_call_cohere,
            inputs=[
                cohere_input1, cohere_input2, cohere_input3, cohere_input4, cohere_input5,
                cohere_system_message,
                cohere_max_tokens,
                cohere_temperature,
                cohere_top_p
            ],
            outputs=cohere_answer_output
        )

    #################
    # ChatGPT 탭
    #################
    with gr.Tab("gpt-4o-mini"):
        chatgpt_input1 = gr.Textbox(label="입력1", lines=1)
        chatgpt_input2 = gr.Textbox(label="입력2", lines=1)
        chatgpt_input3 = gr.Textbox(label="입력3", lines=1)
        chatgpt_input4 = gr.Textbox(label="입력4", lines=1)
        chatgpt_input5 = gr.Textbox(label="입력5", lines=1)

        chatgpt_answer_output = gr.Textbox(label="결과", lines=5, interactive=False)

        with gr.Accordion("고급 설정 (ChatGPT)", open=False):
            chatgpt_system_message = gr.Textbox(
                value="""반드시 한글로 답변할 것.
너는 ChatGPT, OpenAI에서 개발한 언어 모델이다.
내가 요구하는 것을 최대한 자세하고 정확하게 답변하라.
""",
                label="System Message",
                lines=3
            )
            chatgpt_max_tokens = gr.Slider(minimum=100, maximum=4000, value=2000, step=100, label="Max Tokens")
            chatgpt_temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature")
            chatgpt_top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P")

        chatgpt_submit_button = gr.Button("전송")

        def merge_and_call_chatgpt(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_):
            question = " ".join([i1, i2, i3, i4, i5])
            return respond_chatgpt_qna(
                question=question,
                system_message=sys_msg,
                max_tokens=mt,
                temperature=temp,
                top_p=top_p_
            )

        chatgpt_submit_button.click(
            fn=merge_and_call_chatgpt,
            inputs=[
                chatgpt_input1, chatgpt_input2, chatgpt_input3, chatgpt_input4, chatgpt_input5,
                chatgpt_system_message,
                chatgpt_max_tokens,
                chatgpt_temperature,
                chatgpt_top_p
            ],
            outputs=chatgpt_answer_output
        )

    #################
    # Claude 탭
    #################
    with gr.Tab("claude-3-haiku"):
        claude_input1 = gr.Textbox(label="입력1", lines=1)
        claude_input2 = gr.Textbox(label="입력2", lines=1)
        claude_input3 = gr.Textbox(label="입력3", lines=1)
        claude_input4 = gr.Textbox(label="입력4", lines=1)
        claude_input5 = gr.Textbox(label="입력5", lines=1)

        claude_answer_output = gr.Textbox(label="결과", interactive=False, lines=5)

        with gr.Accordion("고급 설정 (Claude)", open=False):
            claude_system_message = gr.Textbox(
                label="System Message",
                value="""반드시 한글로 답변할 것.
너는 Anthropic에서 개발한 클로드이다.
최대한 정확하고 친절하게 답변하라.""",
                lines=3
            )
            claude_max_tokens = gr.Slider(
                minimum=100,
                maximum=4000,
                value=2000,
                step=100,
                label="Max Tokens"
            )
            claude_temperature = gr.Slider(
                minimum=0.1,
                maximum=2.0,
                value=0.7,
                step=0.05,
                label="Temperature"
            )
            claude_top_p = gr.Slider(
                minimum=0.1,
                maximum=1.0,
                value=0.95,
                step=0.05,
                label="Top-p"
            )

        claude_submit_button = gr.Button("전송")

        def merge_and_call_claude(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_):
            question = " ".join([i1, i2, i3, i4, i5])
            return respond_claude_qna(
                question=question,
                system_message=sys_msg,
                max_tokens=mt,
                temperature=temp,
                top_p=top_p_
            )

        claude_submit_button.click(
            fn=merge_and_call_claude,
            inputs=[
                claude_input1, claude_input2, claude_input3, claude_input4, claude_input5,
                claude_system_message,
                claude_max_tokens,
                claude_temperature,
                claude_top_p
            ],
            outputs=claude_answer_output
        )

    #################
    # DeepSeek 탭
    #################
    with gr.Tab("DeepSeek-V3"):
        deepseek_input1 = gr.Textbox(label="입력1", lines=1)
        deepseek_input2 = gr.Textbox(label="입력2", lines=1)
        deepseek_input3 = gr.Textbox(label="입력3", lines=1)
        deepseek_input4 = gr.Textbox(label="입력4", lines=1)
        deepseek_input5 = gr.Textbox(label="입력5", lines=1)

        deepseek_answer_output = gr.Textbox(label="결과", lines=5, interactive=False)

        with gr.Accordion("고급 설정 (DeepSeek)", open=False):
            deepseek_system_message = gr.Textbox(
                value="""반드시 한글로 답변할 것.
너는 DeepSeek-V3, 최고의 언어 모델이다.
내가 요구하는 것을 최대한 자세하고 정확하게 답변하라.
""",
                label="System Message",
                lines=3
            )
            deepseek_max_tokens = gr.Slider(minimum=100, maximum=4000, value=2000, step=100, label="Max Tokens")
            deepseek_temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature")
            deepseek_top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P")

        deepseek_submit_button = gr.Button("전송")

        def merge_and_call_deepseek(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_):
            question = " ".join([i1, i2, i3, i4, i5])
            return respond_deepseek_qna(
                question=question,
                system_message=sys_msg,
                max_tokens=mt,
                temperature=temp,
                top_p=top_p_
            )

        deepseek_submit_button.click(
            fn=merge_and_call_deepseek,
            inputs=[
                deepseek_input1, deepseek_input2, deepseek_input3, deepseek_input4, deepseek_input5,
                deepseek_system_message,
                deepseek_max_tokens,
                deepseek_temperature,
                deepseek_top_p
            ],
            outputs=deepseek_answer_output
        )

#############################
# 메인 실행부
#############################
if __name__ == "__main__":
    demo.launch()