Spaces:
Running
Running
Liu Yiwen
commited on
Commit
·
15549a1
1
Parent(s):
1c081e2
增加了提交分数的功能
Browse files- __pycache__/comm_utils.cpython-311.pyc +0 -0
- __pycache__/utils.cpython-311.pyc +0 -0
- app.py +14 -5
- comm_utils.py +22 -1
- score.json +27 -0
- utils.py +5 -4
__pycache__/comm_utils.cpython-311.pyc
CHANGED
Binary files a/__pycache__/comm_utils.cpython-311.pyc and b/__pycache__/comm_utils.cpython-311.pyc differ
|
|
__pycache__/utils.cpython-311.pyc
CHANGED
Binary files a/__pycache__/utils.cpython-311.pyc and b/__pycache__/utils.cpython-311.pyc differ
|
|
app.py
CHANGED
@@ -16,7 +16,7 @@ from datasets import Features, Image, Audio, Sequence
|
|
16 |
from typing import List, Tuple, Callable
|
17 |
|
18 |
from utils import ndarray_to_base64, clean_up_df, create_statistic, create_plot
|
19 |
-
from comm_utils import save_to_file, send_msg_to_server
|
20 |
|
21 |
class AppError(RuntimeError):
|
22 |
pass
|
@@ -250,6 +250,7 @@ with gr.Blocks() as demo:
|
|
250 |
cp_error = gr.Markdown("", visible=False)
|
251 |
cp_info = gr.Markdown("", visible=False)
|
252 |
cp_result = gr.Markdown("", visible=False)
|
|
|
253 |
tot_samples = 0
|
254 |
# 初始化Salesforce/lotsa_data数据集展示使用的组件
|
255 |
# componets = []
|
@@ -269,12 +270,16 @@ with gr.Blocks() as demo:
|
|
269 |
with gr.Row():
|
270 |
user_input_box = gr.Textbox(label="question", interactive=False)
|
271 |
user_output_box = gr.Textbox(label="answer", interactive=False)
|
272 |
-
# user_io_buttom = gr.Button("发送", interactive=True)
|
273 |
# componets.append({"select_sample_box": select_sample_box,
|
274 |
# "statistics_textbox": statistics_textbox,
|
275 |
# "user_input_box": user_input_box,
|
276 |
# "plot": plot})
|
277 |
-
|
|
|
|
|
|
|
|
|
|
|
278 |
with gr.Row():
|
279 |
cp_page = gr.Textbox("1", label="Page", placeholder="1", visible=False)
|
280 |
cp_goto_page = gr.Button("Go to page", visible=False)
|
@@ -299,6 +304,7 @@ with gr.Blocks() as demo:
|
|
299 |
ret[plot] = gr.update(value=create_plot(df_list, id_list))
|
300 |
elif dataset == 'YY26/TS_DATASETS':
|
301 |
df, max_page, info = get_page(dataset, config, split, page)
|
|
|
302 |
# TODO: 修改lotsa_config的读取逻辑
|
303 |
lotsa_config, lotsa_split, lotsa_page = 'traffic_hourly', 'train', eval(df['ts_id'][0])
|
304 |
start_index, end_index = df['start_index'][0], df['end_index'][0]
|
@@ -308,6 +314,7 @@ with gr.Blocks() as demo:
|
|
308 |
ret[plot] = gr.update(value=create_plot(df_list, id_list, interval=[start_index, end_index]))
|
309 |
ret[user_input_box] = gr.update(value=df['question'][0])
|
310 |
ret[user_output_box] = gr.update(value=df['answer'][0])
|
|
|
311 |
else:
|
312 |
markdown_result, max_page, info = get_page(dataset, config, split, page)
|
313 |
ret[cp_result] = gr.update(visible=True, value=markdown_result)
|
@@ -376,13 +383,15 @@ with gr.Blocks() as demo:
|
|
376 |
# select_sample_box, select_subtarget_box,
|
377 |
# select_buttom,
|
378 |
statistics_textbox, plot,
|
379 |
-
|
|
|
|
|
380 |
cp_go.click(show_dataset, inputs=[cp_dataset], outputs=all_outputs)
|
381 |
cp_config.change(show_dataset_at_config, inputs=[cp_dataset, cp_config], outputs=all_outputs)
|
382 |
cp_split.change(show_dataset_at_config_and_split, inputs=[cp_dataset, cp_config, cp_split], outputs=all_outputs)
|
383 |
cp_goto_page.click(show_dataset_at_config_and_split_and_page, inputs=[cp_dataset, cp_config, cp_split, cp_page], outputs=all_outputs)
|
384 |
cp_goto_next_page.click(show_dataset_at_config_and_split_and_next_page, inputs=[cp_dataset, cp_config, cp_split, cp_page], outputs=all_outputs)
|
385 |
-
|
386 |
# select_buttom.click(show_dataset_at_config_and_split_and_page, inputs=[cp_dataset, cp_config, cp_split, select_sample_box, select_subtarget_box], outputs=all_outputs)
|
387 |
|
388 |
|
|
|
16 |
from typing import List, Tuple, Callable
|
17 |
|
18 |
from utils import ndarray_to_base64, clean_up_df, create_statistic, create_plot
|
19 |
+
from comm_utils import save_to_file, send_msg_to_server, save_score
|
20 |
|
21 |
class AppError(RuntimeError):
|
22 |
pass
|
|
|
250 |
cp_error = gr.Markdown("", visible=False)
|
251 |
cp_info = gr.Markdown("", visible=False)
|
252 |
cp_result = gr.Markdown("", visible=False)
|
253 |
+
qusetion_id_box = gr.Textbox(visible=False)
|
254 |
tot_samples = 0
|
255 |
# 初始化Salesforce/lotsa_data数据集展示使用的组件
|
256 |
# componets = []
|
|
|
270 |
with gr.Row():
|
271 |
user_input_box = gr.Textbox(label="question", interactive=False)
|
272 |
user_output_box = gr.Textbox(label="answer", interactive=False)
|
|
|
273 |
# componets.append({"select_sample_box": select_sample_box,
|
274 |
# "statistics_textbox": statistics_textbox,
|
275 |
# "user_input_box": user_input_box,
|
276 |
# "plot": plot})
|
277 |
+
score_slider = gr.Slider(1, 5, 1, label="Score for answer", interactive=True)
|
278 |
+
with gr.Row():
|
279 |
+
with gr.Column(scale=2):
|
280 |
+
user_submit_button = gr.Button("submit", interactive=True)
|
281 |
+
with gr.Column(scale=1):
|
282 |
+
submit_info_box = gr.Textbox(label="submit_info", interactive=False)
|
283 |
with gr.Row():
|
284 |
cp_page = gr.Textbox("1", label="Page", placeholder="1", visible=False)
|
285 |
cp_goto_page = gr.Button("Go to page", visible=False)
|
|
|
304 |
ret[plot] = gr.update(value=create_plot(df_list, id_list))
|
305 |
elif dataset == 'YY26/TS_DATASETS':
|
306 |
df, max_page, info = get_page(dataset, config, split, page)
|
307 |
+
ret[qusetion_id_box] = gr.update(value = df['num'][0])
|
308 |
# TODO: 修改lotsa_config的读取逻辑
|
309 |
lotsa_config, lotsa_split, lotsa_page = 'traffic_hourly', 'train', eval(df['ts_id'][0])
|
310 |
start_index, end_index = df['start_index'][0], df['end_index'][0]
|
|
|
314 |
ret[plot] = gr.update(value=create_plot(df_list, id_list, interval=[start_index, end_index]))
|
315 |
ret[user_input_box] = gr.update(value=df['question'][0])
|
316 |
ret[user_output_box] = gr.update(value=df['answer'][0])
|
317 |
+
ret[submit_info_box] = gr.update(value="")
|
318 |
else:
|
319 |
markdown_result, max_page, info = get_page(dataset, config, split, page)
|
320 |
ret[cp_result] = gr.update(visible=True, value=markdown_result)
|
|
|
383 |
# select_sample_box, select_subtarget_box,
|
384 |
# select_buttom,
|
385 |
statistics_textbox, plot,
|
386 |
+
qusetion_id_box,
|
387 |
+
user_input_box, user_output_box,
|
388 |
+
submit_info_box]
|
389 |
cp_go.click(show_dataset, inputs=[cp_dataset], outputs=all_outputs)
|
390 |
cp_config.change(show_dataset_at_config, inputs=[cp_dataset, cp_config], outputs=all_outputs)
|
391 |
cp_split.change(show_dataset_at_config_and_split, inputs=[cp_dataset, cp_config, cp_split], outputs=all_outputs)
|
392 |
cp_goto_page.click(show_dataset_at_config_and_split_and_page, inputs=[cp_dataset, cp_config, cp_split, cp_page], outputs=all_outputs)
|
393 |
cp_goto_next_page.click(show_dataset_at_config_and_split_and_next_page, inputs=[cp_dataset, cp_config, cp_split, cp_page], outputs=all_outputs)
|
394 |
+
user_submit_button.click(save_score, inputs=["none", qusetion_id_box, score_slider], outputs=[submit_info_box])
|
395 |
# select_buttom.click(show_dataset_at_config_and_split_and_page, inputs=[cp_dataset, cp_config, cp_split, select_sample_box, select_subtarget_box], outputs=all_outputs)
|
396 |
|
397 |
|
comm_utils.py
CHANGED
@@ -1,5 +1,7 @@
|
|
|
|
|
|
1 |
import requests
|
2 |
-
|
3 |
|
4 |
API_URL = "http://127.0.0.1:5000/api/process"
|
5 |
|
@@ -7,6 +9,25 @@ def save_to_file(user_input):
|
|
7 |
with open("user_input.txt", "w") as file:
|
8 |
file.write(user_input)
|
9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
def send_msg_to_server(input_text):
|
12 |
try:
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
import requests
|
4 |
+
import time
|
5 |
|
6 |
API_URL = "http://127.0.0.1:5000/api/process"
|
7 |
|
|
|
9 |
with open("user_input.txt", "w") as file:
|
10 |
file.write(user_input)
|
11 |
|
12 |
+
def save_score(user_id, question_id, score):
|
13 |
+
score_data = {
|
14 |
+
"user_id": user_id,
|
15 |
+
"question_id": question_id,
|
16 |
+
"score": score
|
17 |
+
}
|
18 |
+
|
19 |
+
if os.path.exists("score.json"):
|
20 |
+
with open("score.json", "r") as file:
|
21 |
+
data = json.load(file)
|
22 |
+
else:
|
23 |
+
data = []
|
24 |
+
|
25 |
+
data.append(score_data)
|
26 |
+
|
27 |
+
with open("score.json", "w") as file:
|
28 |
+
json.dump(data, file, indent=4)
|
29 |
+
|
30 |
+
return f'Time {time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())} UCT-8, score submitted successfully.'
|
31 |
|
32 |
def send_msg_to_server(input_text):
|
33 |
try:
|
score.json
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"user_id": 4.12,
|
4 |
+
"question_id": "1",
|
5 |
+
"score": 4.12
|
6 |
+
},
|
7 |
+
{
|
8 |
+
"user_id": 4.12,
|
9 |
+
"question_id": "1",
|
10 |
+
"score": 4.12
|
11 |
+
},
|
12 |
+
{
|
13 |
+
"user_id": 4.12,
|
14 |
+
"question_id": "1",
|
15 |
+
"score": 4.12
|
16 |
+
},
|
17 |
+
{
|
18 |
+
"user_id": 2.86,
|
19 |
+
"question_id": "1",
|
20 |
+
"score": 2.86
|
21 |
+
},
|
22 |
+
{
|
23 |
+
"user_id": 2.86,
|
24 |
+
"question_id": "2",
|
25 |
+
"score": 2.86
|
26 |
+
}
|
27 |
+
]
|
utils.py
CHANGED
@@ -96,10 +96,10 @@ def create_statistic(dfs: list[pd.DataFrame], ids: list[str], interval:list[int,
|
|
96 |
df = df.iloc[interval[0]:interval[1]]
|
97 |
df_values = df.iloc[:, 1:]
|
98 |
# 计算统计值
|
99 |
-
mean_values = df_values.mean()
|
100 |
-
std_values = df_values.std()
|
101 |
-
max_values = df_values.max()
|
102 |
-
min_values = df_values.min()
|
103 |
|
104 |
# 将这些统计信息合并成一个新的DataFrame
|
105 |
stats_df = pd.DataFrame({
|
@@ -114,6 +114,7 @@ def create_statistic(dfs: list[pd.DataFrame], ids: list[str], interval:list[int,
|
|
114 |
|
115 |
# 合并所有统计信息DataFrame
|
116 |
combined_stats_df = pd.concat(stats_list, ignore_index=True)
|
|
|
117 |
return combined_stats_df
|
118 |
|
119 |
def clean_up_df(df: pd.DataFrame, rows_to_include: list[int]) -> pd.DataFrame:
|
|
|
96 |
df = df.iloc[interval[0]:interval[1]]
|
97 |
df_values = df.iloc[:, 1:]
|
98 |
# 计算统计值
|
99 |
+
mean_values = df_values.mean()
|
100 |
+
std_values = df_values.std()
|
101 |
+
max_values = df_values.max()
|
102 |
+
min_values = df_values.min()
|
103 |
|
104 |
# 将这些统计信息合并成一个新的DataFrame
|
105 |
stats_df = pd.DataFrame({
|
|
|
114 |
|
115 |
# 合并所有统计信息DataFrame
|
116 |
combined_stats_df = pd.concat(stats_list, ignore_index=True)
|
117 |
+
combined_stats_df = combined_stats_df.applymap(lambda x: round(x, 2) if isinstance(x, (int, float)) else x)
|
118 |
return combined_stats_df
|
119 |
|
120 |
def clean_up_df(df: pd.DataFrame, rows_to_include: list[int]) -> pd.DataFrame:
|