Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,7 +3,6 @@ from huggingface_hub import InferenceClient
|
|
3 |
import os
|
4 |
from threading import Event
|
5 |
|
6 |
-
# Hugging Face API Token을 환경 변수로부터 가져옴
|
7 |
hf_token = os.getenv("HF_TOKEN")
|
8 |
stop_event = Event()
|
9 |
|
@@ -15,18 +14,18 @@ models = {
|
|
15 |
"CohereForAI/c4ai-command-r-plus": "Cohere Command-R Plus"
|
16 |
}
|
17 |
|
18 |
-
# Inference
|
19 |
def get_client(model):
|
20 |
return InferenceClient(model=model, token=hf_token)
|
21 |
|
22 |
-
#
|
23 |
def respond(message, system_message, max_tokens, temperature, top_p, selected_model):
|
24 |
stop_event.clear()
|
25 |
client = get_client(selected_model)
|
26 |
|
27 |
-
# 프롬프트 설정
|
28 |
messages = [
|
29 |
-
{"role": "system", "content": system_message
|
30 |
{"role": "user", "content": message}
|
31 |
]
|
32 |
|
@@ -34,7 +33,7 @@ def respond(message, system_message, max_tokens, temperature, top_p, selected_mo
|
|
34 |
response = ""
|
35 |
total_tokens_used = 0 # 사용된 토큰 수 추적
|
36 |
|
37 |
-
# 모델에서 응답을
|
38 |
for chunk in client.text_generation(
|
39 |
prompt="\n".join([f"{m['role']}: {m['content']}" for m in messages]),
|
40 |
max_new_tokens=max_tokens,
|
@@ -58,65 +57,31 @@ def get_last_response(chatbot):
|
|
58 |
return chatbot[-1][1]
|
59 |
return None
|
60 |
|
61 |
-
#
|
62 |
-
class PromptHistory:
|
63 |
-
def __init__(self):
|
64 |
-
self.history = []
|
65 |
-
|
66 |
-
def add_entry(self, prompt, response, model, settings):
|
67 |
-
self.history.append({
|
68 |
-
"prompt": prompt,
|
69 |
-
"response": response,
|
70 |
-
"model": model,
|
71 |
-
"settings": settings
|
72 |
-
})
|
73 |
-
|
74 |
-
def get_history(self):
|
75 |
-
return self.history
|
76 |
-
|
77 |
-
# 히스토리 객체 생성
|
78 |
-
prompt_history = PromptHistory()
|
79 |
-
|
80 |
-
# Gradio 인터페이스 함수 정의
|
81 |
def gradio_interface(message, system_message, max_tokens, temperature, top_p, selected_model):
|
82 |
result = None
|
83 |
for output in respond(message, system_message, max_tokens, temperature, top_p, selected_model):
|
84 |
result = output
|
85 |
-
|
86 |
-
# 프롬프트와 결과를 히스토리에 추가
|
87 |
-
prompt_history.add_entry(
|
88 |
-
message,
|
89 |
-
result[0][1], # 모델 응답
|
90 |
-
selected_model,
|
91 |
-
{"max_tokens": max_tokens, "temperature": temperature, "top_p": top_p}
|
92 |
-
)
|
93 |
-
|
94 |
return result
|
95 |
|
96 |
-
# 히스토리 확인용 함수
|
97 |
-
def view_history():
|
98 |
-
return prompt_history.get_history()
|
99 |
-
|
100 |
-
# Gradio UI 구성
|
101 |
with gr.Blocks() as demo:
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
|
|
|
|
|
|
|
|
112 |
|
113 |
# 버튼을 눌러 응답을 받는 함수 연결
|
114 |
-
submit_button =
|
115 |
-
submit_button.click(gradio_interface, inputs=[message, system_message, max_tokens, temperature, top_p, selected_model], outputs=[response_output, token_usage])
|
116 |
-
|
117 |
-
# 히스토리 보기 기능 연결
|
118 |
-
history_output = gr.Textbox(label="히스토리", interactive=False)
|
119 |
-
history_button.click(view_history, outputs=history_output)
|
120 |
|
121 |
# UI 실행
|
122 |
demo.launch()
|
|
|
3 |
import os
|
4 |
from threading import Event
|
5 |
|
|
|
6 |
hf_token = os.getenv("HF_TOKEN")
|
7 |
stop_event = Event()
|
8 |
|
|
|
14 |
"CohereForAI/c4ai-command-r-plus": "Cohere Command-R Plus"
|
15 |
}
|
16 |
|
17 |
+
# Inference 클라이언트 반환
|
18 |
def get_client(model):
|
19 |
return InferenceClient(model=model, token=hf_token)
|
20 |
|
21 |
+
# 응답 생성 함수
|
22 |
def respond(message, system_message, max_tokens, temperature, top_p, selected_model):
|
23 |
stop_event.clear()
|
24 |
client = get_client(selected_model)
|
25 |
|
26 |
+
# 프롬프트 설정 - 시스템 메시지를 자유롭게 설정 가능
|
27 |
messages = [
|
28 |
+
{"role": "system", "content": system_message},
|
29 |
{"role": "user", "content": message}
|
30 |
]
|
31 |
|
|
|
33 |
response = ""
|
34 |
total_tokens_used = 0 # 사용된 토큰 수 추적
|
35 |
|
36 |
+
# 모델에서 응답을 스트리밍
|
37 |
for chunk in client.text_generation(
|
38 |
prompt="\n".join([f"{m['role']}: {m['content']}" for m in messages]),
|
39 |
max_new_tokens=max_tokens,
|
|
|
57 |
return chatbot[-1][1]
|
58 |
return None
|
59 |
|
60 |
+
# Gradio UI 구성
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
def gradio_interface(message, system_message, max_tokens, temperature, top_p, selected_model):
|
62 |
result = None
|
63 |
for output in respond(message, system_message, max_tokens, temperature, top_p, selected_model):
|
64 |
result = output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
return result
|
66 |
|
|
|
|
|
|
|
|
|
|
|
67 |
with gr.Blocks() as demo:
|
68 |
+
with gr.Row():
|
69 |
+
with gr.Column():
|
70 |
+
selected_model = gr.Dropdown(choices=list(models.keys()), value="deepseek-ai/DeepSeek-Coder-V2-Instruct", label="모델 선택")
|
71 |
+
system_message = gr.Textbox(label="시스템 메시지", value="이 메시지를 기준으로 대화 흐름을 설정합니다.")
|
72 |
+
message = gr.Textbox(label="사용자 메시지")
|
73 |
+
|
74 |
+
max_tokens = gr.Slider(minimum=10, maximum=512, value=128, label="최대 토큰 수")
|
75 |
+
temperature = gr.Slider(minimum=0.0, maximum=1.0, value=0.7, label="Temperature")
|
76 |
+
top_p = gr.Slider(minimum=0.0, maximum=1.0, value=0.9, label="Top-p")
|
77 |
+
|
78 |
+
submit_button = gr.Button("응답 생성")
|
79 |
+
with gr.Column():
|
80 |
+
chatbot = gr.Chatbot()
|
81 |
+
token_usage = gr.Textbox(label="토큰 사용량", interactive=False)
|
82 |
|
83 |
# 버튼을 눌러 응답을 받는 함수 연결
|
84 |
+
submit_button.click(gradio_interface, inputs=[message, system_message, max_tokens, temperature, top_p, selected_model], outputs=[chatbot, token_usage])
|
|
|
|
|
|
|
|
|
|
|
85 |
|
86 |
# UI 실행
|
87 |
demo.launch()
|