fzkuji commited on
Commit
d7fc8c8
·
verified ·
1 Parent(s): 00d1692

Upload 2 files

Browse files
cmbexam_dataloading.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
cmbexam_preprocessing.ipynb ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "metadata": {},
5
+ "cell_type": "markdown",
6
+ "source": "预处理第一步:合并测试集的题目和答案",
7
+ "id": "be5125e8e3ab71fd"
8
+ },
9
+ {
10
+ "metadata": {
11
+ "ExecuteTime": {
12
+ "end_time": "2024-09-09T07:57:36.566966Z",
13
+ "start_time": "2024-09-09T07:57:34.545954Z"
14
+ }
15
+ },
16
+ "cell_type": "code",
17
+ "source": [
18
+ "import json\n",
19
+ "\n",
20
+ "# 文件路径\n",
21
+ "question_path = './CMB/CMB-Exam/CMB-test/CMB-test-choice-question-merge.json'\n",
22
+ "answer_path = './CMB/CMB-Exam/CMB-test/CMB-test-choice-answer.json'\n",
23
+ "\n",
24
+ "# 读取文件的内容\n",
25
+ "with open(question_path, 'r', encoding='utf-8') as answer_file:\n",
26
+ " answer_data = json.load(answer_file)\n",
27
+ "\n",
28
+ "with open(answer_path, 'r', encoding='utf-8') as question_file:\n",
29
+ " question_data = json.load(question_file)\n",
30
+ "\n",
31
+ "# 初始化存储合并结果的列表\n",
32
+ "merged_data = []\n",
33
+ "\n",
34
+ "# 遍历答案数据\n",
35
+ "for answer_item in answer_data:\n",
36
+ " for question_item in question_data:\n",
37
+ " if answer_item[\"id\"] == question_item[\"id\"]:\n",
38
+ " # 将两个数据字典合并\n",
39
+ " merged_item = {**question_item, **answer_item}\n",
40
+ " merged_data.append(merged_item)\n",
41
+ " break\n",
42
+ "\n",
43
+ "# 将合并后的数据保存到新的文件中\n",
44
+ "with open('./CMB/CMB-Exam/CMB-test/CMB-test-merged-with-id.json', 'w', encoding='utf-8') as merged_file:\n",
45
+ " json.dump(merged_data, merged_file, ensure_ascii=False, indent=4)\n",
46
+ "\n",
47
+ "print(\"合并完成,数据已保存到 CMB-test-choice-merged-with-id.json 文件中。\")\n"
48
+ ],
49
+ "id": "2ead8ebe22d1bed2",
50
+ "outputs": [
51
+ {
52
+ "name": "stdout",
53
+ "output_type": "stream",
54
+ "text": [
55
+ "合并完成,数据已保存到 CMB-test-choice-merged-with-id.json 文件中。\n"
56
+ ]
57
+ }
58
+ ],
59
+ "execution_count": 4
60
+ },
61
+ {
62
+ "metadata": {},
63
+ "cell_type": "markdown",
64
+ "source": "预处理第二步:将train、valid和test的字段统一起来,主要是删除测试集的id还有验证集的explanation字段",
65
+ "id": "2cdc6572b1bfea28"
66
+ },
67
+ {
68
+ "metadata": {
69
+ "ExecuteTime": {
70
+ "end_time": "2024-09-09T07:57:37.704989Z",
71
+ "start_time": "2024-09-09T07:57:37.599535Z"
72
+ }
73
+ },
74
+ "cell_type": "code",
75
+ "source": [
76
+ "# 文件路径\n",
77
+ "merge_path = './CMB/CMB-Exam/CMB-test/CMB-test-merged-with-id.json'\n",
78
+ "\n",
79
+ "# 加载测试集\n",
80
+ "with open(merge_path, 'r', encoding='utf-8') as merge_file:\n",
81
+ " merge_data = json.load(merge_file)\n",
82
+ "\n",
83
+ "# 删除测试集的id字段\n",
84
+ "for item in merge_data:\n",
85
+ " del item[\"id\"]\n",
86
+ " \n",
87
+ "# 保存到新的文件中\n",
88
+ "with open('./CMB/CMB-Exam/CMB-test/CMB-test-merge.json', 'w', encoding='utf-8') as merge_file:\n",
89
+ " json.dump(merge_data, merge_file, ensure_ascii=False, indent=4)\n",
90
+ " \n",
91
+ "print(\"字段统一完成,数据已保存到 CMB-test-merge.json 文件中。\")"
92
+ ],
93
+ "id": "c0542800b55838d1",
94
+ "outputs": [
95
+ {
96
+ "name": "stdout",
97
+ "output_type": "stream",
98
+ "text": [
99
+ "字段统一完成,数据已保存到 CMB-test-merged.json 文件中。\n"
100
+ ]
101
+ }
102
+ ],
103
+ "execution_count": 5
104
+ },
105
+ {
106
+ "metadata": {
107
+ "ExecuteTime": {
108
+ "end_time": "2024-09-09T08:43:40.171865Z",
109
+ "start_time": "2024-09-09T08:43:40.161872Z"
110
+ }
111
+ },
112
+ "cell_type": "code",
113
+ "source": [
114
+ "# 文件路径\n",
115
+ "train_path = './CMB/CMB-Exam/CMB-val/CMB-val-merge.json'\n",
116
+ "\n",
117
+ "# 加载验证集\n",
118
+ "with open(train_path, 'r', encoding='utf-8') as train_file:\n",
119
+ " train_data = json.load(train_file)\n",
120
+ " \n",
121
+ "# 删除验证集的explanation字段\n",
122
+ "for item in train_data:\n",
123
+ " del item[\"explanation\"]\n",
124
+ " \n",
125
+ "# 保存到新的文件中\n",
126
+ "with open('./CMB/CMB-Exam/CMB-val/CMB-val-merge.json', 'w', encoding='utf-8') as train_file:\n",
127
+ " json.dump(train_data, train_file, ensure_ascii=False, indent=4)\n",
128
+ " \n",
129
+ "print(\"字段统一完成,数据已保存到 CMB-val-merge.json 文件中。\")"
130
+ ],
131
+ "id": "566f2d1e6fc551e9",
132
+ "outputs": [
133
+ {
134
+ "name": "stdout",
135
+ "output_type": "stream",
136
+ "text": [
137
+ "字段统一完成,数据已保存到 CMB-val-merge.json 文件中。\n"
138
+ ]
139
+ }
140
+ ],
141
+ "execution_count": 8
142
+ },
143
+ {
144
+ "metadata": {},
145
+ "cell_type": "markdown",
146
+ "source": "预处理第三步:将数据集option字段的格式从字典转化为列表",
147
+ "id": "84a1b5c5e12a8dbd"
148
+ },
149
+ {
150
+ "metadata": {
151
+ "ExecuteTime": {
152
+ "end_time": "2024-09-09T09:18:26.355571Z",
153
+ "start_time": "2024-09-09T09:18:21.625915Z"
154
+ }
155
+ },
156
+ "cell_type": "code",
157
+ "source": [
158
+ "# 样例\n",
159
+ "# 从{ \"A\": \"��网膜血管炎\", \"B\": \"黄斑水肿\", \"C\": \"脉络膜炎\", \"D\": \"下方玻璃体的雪球样混浊\", \"E\": \"肉芽肿性前葡萄膜炎\", \"F\": null }\n",
160
+ "# 变成 [ { \"key\": \"A\", \"value\": \"精神紧张诱发\" }, { \"key\": \"B\", \"value\": \"含化硝酸甘油减轻\" }, { \"key\": \"C\", \"value\": \"呼吸时加重,屏气时消失\" }, { \"key\": \"D\", \"value\": \"压迫加剧\" }, { \"key\": \"E\", \"value\": \"进食加剧\" } ]\n",
161
+ "\n",
162
+ "import json\n",
163
+ "\n",
164
+ "# 文件路径\n",
165
+ "train_path = './CMB/CMB-Exam/CMB-train/CMB-train-merge.json'\n",
166
+ "val_path = './CMB/CMB-Exam/CMB-val/CMB-val-merge.json'\n",
167
+ "test_path = './CMB/CMB-Exam/CMB-test/CMB-test-merge.json'\n",
168
+ "\n",
169
+ "# 加载训练集\n",
170
+ "with open(train_path, 'r', encoding='utf-8') as train_file:\n",
171
+ " train_data = json.load(train_file)\n",
172
+ " \n",
173
+ "# 加载验证集\n",
174
+ "with open(val_path, 'r', encoding='utf-8') as val_file:\n",
175
+ " val_data = json.load(val_file)\n",
176
+ " \n",
177
+ "# 加载测试集\n",
178
+ "with open(test_path, 'r', encoding='utf-8') as test_file:\n",
179
+ " test_data = json.load(test_file)\n",
180
+ " \n",
181
+ "# 将option字段的格式从字典转化为列表\n",
182
+ "def convert_option(data):\n",
183
+ " for item in data:\n",
184
+ " option = []\n",
185
+ " for key, value in item[\"option\"].items():\n",
186
+ " if value:\n",
187
+ " option.append({\"key\": key, \"value\": value})\n",
188
+ " item[\"option\"] = option\n",
189
+ " \n",
190
+ "convert_option(train_data)\n",
191
+ "convert_option(val_data)\n",
192
+ "convert_option(test_data)\n",
193
+ "\n",
194
+ "# 保存到新的文件中\n",
195
+ "with open(train_path, 'w', encoding='utf-8') as train_file:\n",
196
+ " json.dump(train_data, train_file, ensure_ascii=False, indent=4)\n",
197
+ " \n",
198
+ "with open(val_path, 'w', encoding='utf-8') as val_file:\n",
199
+ " json.dump(val_data, val_file, ensure_ascii=False, indent=4)\n",
200
+ " \n",
201
+ "with open(test_path, 'w', encoding='utf-8') as test_file:\n",
202
+ " json.dump(test_data, test_file, ensure_ascii=False, indent=4)"
203
+ ],
204
+ "id": "35dfd0303032468a",
205
+ "outputs": [],
206
+ "execution_count": 3
207
+ },
208
+ {
209
+ "metadata": {},
210
+ "cell_type": "code",
211
+ "outputs": [],
212
+ "execution_count": null,
213
+ "source": "",
214
+ "id": "981c1a2d0dff8354"
215
+ }
216
+ ],
217
+ "metadata": {
218
+ "kernelspec": {
219
+ "display_name": "Python 3",
220
+ "language": "python",
221
+ "name": "python3"
222
+ },
223
+ "language_info": {
224
+ "codemirror_mode": {
225
+ "name": "ipython",
226
+ "version": 2
227
+ },
228
+ "file_extension": ".py",
229
+ "mimetype": "text/x-python",
230
+ "name": "python",
231
+ "nbconvert_exporter": "python",
232
+ "pygments_lexer": "ipython2",
233
+ "version": "2.7.6"
234
+ }
235
+ },
236
+ "nbformat": 4,
237
+ "nbformat_minor": 5
238
+ }