EmoCube commited on
Commit
55dc5cf
·
verified ·
1 Parent(s): d272b25

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +171 -0
app.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Подключение клиентов
2
+ # - - - - - - - - - - - - - -
3
+ from huggingface_hub import InferenceClient
4
+ from together import Together
5
+
6
+ # Подключение библиотек
7
+ # - - - - - - - - - - - - - -
8
+ import gradio as gr
9
+ import json
10
+
11
+
12
+ #============================
13
+ #============================
14
+
15
+
16
+ # Список доступных моделей
17
+ # - - - - - - - - - - - - - -
18
+ models = {
19
+ "together": [
20
+ "deepseek-ai/DeepSeek-R1-Distill-Llama-70B-free",
21
+ "meta-llama/Llama-3.3-70B-Instruct-Turbo-Free"
22
+ ],
23
+ "huggingface": [
24
+ "google/gemma-3-27b-it",
25
+ "Qwen/QwQ-32B",
26
+ "Qwen/QwQ-32B-Preview",
27
+ "mistralai/Mistral-Small-24B-Instruct-2501",
28
+ "deepseek-ai/deepseek-llm-67b-chat",
29
+ "mistralai/Mixtral-8x22B-Instruct-v0.1",
30
+ "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO"
31
+ ]
32
+ }
33
+
34
+
35
+ #============================
36
+ #============================
37
+
38
+
39
+ # Функции для работы с сообщениями
40
+ # - - - - - - - - - - - - - -
41
+ def add_message(role, content, messages):
42
+ messages.append({"role": role, "content": content})
43
+ return messages, len(messages), str(messages)
44
+
45
+ def clear_messages(messages):
46
+ return [], 0, "[]"
47
+
48
+ def show_messages(messages):
49
+ return str(messages)
50
+
51
+ def get_messages_api(messages):
52
+ return json.dumps(messages, indent=4)
53
+
54
+ def run_huggingface_model(model, messages, max_tokens, temperature, top_p):
55
+ client = InferenceClient(model)
56
+ response = client.chat_completion(
57
+ messages,
58
+ max_tokens=max_tokens,
59
+ stream=False,
60
+ temperature=temperature,
61
+ top_p=top_p,
62
+ )
63
+ return response.choices[0].message.content
64
+
65
+ def run_together_model(model, messages, max_tokens, temperature, top_p):
66
+ client = Together()
67
+ response = client.chat.completions.create(
68
+ model=model,
69
+ messages=messages,
70
+ max_tokens=max_tokens,
71
+ temperature=temperature,
72
+ top_p=top_p,
73
+ )
74
+ return response.choices[0].message.content
75
+
76
+
77
+ #============================
78
+ #============================
79
+
80
+
81
+ # Создаем интерфейс с вкладками
82
+ demo = gr.Blocks()
83
+
84
+ with demo:
85
+ gr.Markdown("# Chat Interface")
86
+
87
+ # Вкладки для Together и HuggingFace
88
+ with gr.Tabs():
89
+ with gr.Tab("Together"):
90
+ together_model_input = gr.Radio(
91
+ label="Select a Together model",
92
+ choices=models["together"],
93
+ value=models["together"][0],
94
+ )
95
+ together_run_button = gr.Button("Run Together")
96
+
97
+ with gr.Tab("HuggingFace"):
98
+ huggingface_model_input = gr.Radio(
99
+ label="Select a HuggingFace model",
100
+ choices=models["huggingface"],
101
+ value=models["huggingface"][0],
102
+ )
103
+ huggingface_run_button = gr.Button("Run HuggingFace")
104
+
105
+ # Общие элементы интерфейса
106
+ role_input = gr.Dropdown(
107
+ label="Role",
108
+ choices=["system", "user", "assistant"], # Список ролей
109
+ value="user" # Значение по умолчанию
110
+ )
111
+ content_input = gr.Textbox(label="Content")
112
+ messages_state = gr.State(value=[])
113
+ messages_output = gr.Textbox(label="Messages", value="[]")
114
+ count_output = gr.Number(label="Count", value=0)
115
+ response_output = gr.Textbox(label="Response")
116
+ messages_api_output = gr.Textbox(label="Messages API")
117
+
118
+ add_button = gr.Button("Add")
119
+ clear_button = gr.Button("Clear")
120
+ show_button = gr.Button("Show messages")
121
+ get_api_button = gr.Button("Get messages API")
122
+
123
+ max_tokens_slider = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens")
124
+ temperature_slider = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
125
+ top_p_slider = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
126
+
127
+ # Обработчики событий для кнопок
128
+ add_button.click(
129
+ add_message,
130
+ inputs=[role_input, content_input, messages_state],
131
+ outputs=[messages_state, count_output, messages_output],
132
+ )
133
+
134
+ clear_button.click(
135
+ clear_messages,
136
+ inputs=[messages_state],
137
+ outputs=[messages_state, count_output, messages_output],
138
+ )
139
+
140
+ show_button.click(
141
+ show_messages,
142
+ inputs=[messages_state],
143
+ outputs=[messages_output],
144
+ )
145
+
146
+ get_api_button.click(
147
+ get_messages_api,
148
+ inputs=[messages_state],
149
+ outputs=[messages_api_output],
150
+ )
151
+
152
+ # Обработчики событий для кнопок "Run"
153
+ together_run_button.click(
154
+ run_together_model,
155
+ inputs=[together_model_input, messages_state, max_tokens_slider, temperature_slider, top_p_slider],
156
+ outputs=[response_output],
157
+ )
158
+
159
+ huggingface_run_button.click(
160
+ run_huggingface_model,
161
+ inputs=[huggingface_model_input, messages_state, max_tokens_slider, temperature_slider, top_p_slider],
162
+ outputs=[response_output],
163
+ )
164
+
165
+
166
+ #============================
167
+ #============================
168
+
169
+
170
+ if __name__ == "__main__":
171
+ demo.launch()