Wendy commited on
Commit
9828f35
·
verified ·
1 Parent(s): cb3fe1e

Upload train_data_collect.ipynb with huggingface_hub

Browse files
Files changed (1) hide show
  1. train_data_collect.ipynb +471 -0
train_data_collect.ipynb ADDED
@@ -0,0 +1,471 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "import json\n",
10
+ "\n",
11
+ "def read_json(file_path): \n",
12
+ " with open(file_path, 'r', encoding='utf-8') as file:\n",
13
+ " data = json.load(file)\n",
14
+ " return data\n",
15
+ "\n",
16
+ "def write_json(file_path, data):\n",
17
+ " with open(file_path, 'w', encoding='utf-8') as file:\n",
18
+ " json.dump(data, file, ensure_ascii=False, indent=4)"
19
+ ]
20
+ },
21
+ {
22
+ "cell_type": "code",
23
+ "execution_count": 2,
24
+ "metadata": {},
25
+ "outputs": [],
26
+ "source": [
27
+ "import json\n",
28
+ "\n",
29
+ "# 读取 jsonl 文件\n",
30
+ "def read_jsonl(file_path):\n",
31
+ " data = []\n",
32
+ " with open(file_path, 'r', encoding='utf-8') as file:\n",
33
+ " for line in file:\n",
34
+ " data.append(json.loads(line.strip()))\n",
35
+ " return data\n",
36
+ "\n",
37
+ "# 写入到 jsonl 文件\n",
38
+ "def write_jsonl(data, file_path):\n",
39
+ " with open(file_path, 'w', encoding='utf-8') as file:\n",
40
+ " for entry in data:\n",
41
+ " json_str = json.dumps(entry, ensure_ascii=False)\n",
42
+ " file.write(json_str + '\\n')\n",
43
+ "\n",
44
+ "# 示例使用\n",
45
+ "input_file = 'input.jsonl' # 输入的 jsonl 文件路径\n",
46
+ "output_file = 'output.jsonl' # 输出的 jsonl 文件路径"
47
+ ]
48
+ },
49
+ {
50
+ "cell_type": "code",
51
+ "execution_count": 6,
52
+ "metadata": {},
53
+ "outputs": [],
54
+ "source": [
55
+ "# data1 = read_jsonl('/inspire/hdd/ws-ba572160-47f8-4ca1-984e-d6bcdeb95dbb/a100-maybe/albus/NIPS_2025/LLaVA_Fliter/inference_demo/New_llava_cherry_loss_infer_result_NImg__2025_01_08_.jsonl')\n",
56
+ "# data2 = read_jsonl('/inspire/hdd/ws-ba572160-47f8-4ca1-984e-d6bcdeb95dbb/a100-maybe/albus/NIPS_2025/LLaVA_Fliter/inference_demo/New_llava_cherry_loss_infer_result_NImg__2025_01_09_.jsonl')\n",
57
+ "\n",
58
+ "\n",
59
+ "# data1 = read_jsonl('/inspire/hdd/ws-ba572160-47f8-4ca1-984e-d6bcdeb95dbb/a100-maybe/albus/NIPS_2025/LLaVA_Fliter/inference_demo/New_llava_Logits_Img__2025_01_10_.jsonl')\n",
60
+ "# data2 = read_jsonl('/inspire/hdd/ws-ba572160-47f8-4ca1-984e-d6bcdeb95dbb/a100-maybe/albus/NIPS_2025/LLaVA_Fliter/inference_demo/New_llava_Logits_Img__2025_01_11_.jsonl')\n",
61
+ "# data3 = read_jsonl('/inspire/hdd/ws-ba572160-47f8-4ca1-984e-d6bcdeb95dbb/a100-maybe/albus/NIPS_2025/LLaVA_Fliter/inference_demo/New_llava_Logits_NImg__2025_01_10_.jsonl')\n",
62
+ "\n",
63
+ "\n",
64
+ "\n",
65
+ "data1 = read_jsonl('/inspire/hdd/ws-ba572160-47f8-4ca1-984e-d6bcdeb95dbb/a100-maybe/albus/NIPS_2025/LLaVA_Fliter/inference_demo/New_llava_Logits_Img__2025_01_12_.jsonl')\n",
66
+ "data2 = read_jsonl('/inspire/hdd/ws-ba572160-47f8-4ca1-984e-d6bcdeb95dbb/a100-maybe/albus/NIPS_2025/LLaVA_Fliter/inference_demo/New_llava_Logits_Img__2025_01_13_.jsonl')\n",
67
+ "data4 = read_jsonl('/inspire/hdd/ws-ba572160-47f8-4ca1-984e-d6bcdeb95dbb/a100-maybe/albus/NIPS_2025/LLaVA_Fliter/inference_demo/New_llava_Logits_NImg__2025_01_12_.jsonl')\n",
68
+ "\n"
69
+ ]
70
+ },
71
+ {
72
+ "cell_type": "code",
73
+ "execution_count": 7,
74
+ "metadata": {},
75
+ "outputs": [
76
+ {
77
+ "data": {
78
+ "text/plain": [
79
+ "624640"
80
+ ]
81
+ },
82
+ "execution_count": 7,
83
+ "metadata": {},
84
+ "output_type": "execute_result"
85
+ }
86
+ ],
87
+ "source": [
88
+ "len(data4)"
89
+ ]
90
+ },
91
+ {
92
+ "cell_type": "code",
93
+ "execution_count": 8,
94
+ "metadata": {},
95
+ "outputs": [],
96
+ "source": [
97
+ "data_all = data1+data2"
98
+ ]
99
+ },
100
+ {
101
+ "cell_type": "code",
102
+ "execution_count": 9,
103
+ "metadata": {},
104
+ "outputs": [
105
+ {
106
+ "data": {
107
+ "text/plain": [
108
+ "624640"
109
+ ]
110
+ },
111
+ "execution_count": 9,
112
+ "metadata": {},
113
+ "output_type": "execute_result"
114
+ }
115
+ ],
116
+ "source": [
117
+ "len(data_all)"
118
+ ]
119
+ },
120
+ {
121
+ "cell_type": "code",
122
+ "execution_count": 11,
123
+ "metadata": {},
124
+ "outputs": [
125
+ {
126
+ "data": {
127
+ "text/plain": [
128
+ "({'id': 397,\n",
129
+ " 'image': 'llava_image_tune/coco/train2017/000000266244.jpg',\n",
130
+ " 'conversations': [{'from': 'human',\n",
131
+ " 'value': \"############\\n<image>\\nIs there a person riding a bicycle in the image? Yes, there is a woman riding a bicycle in the image. What unique feature does the bicycle have in the image? The unique feature of the bicycle in the image is that it has an umbrella attached to it, providing cover to the person riding the bike. How many people are visible in the image? In the image, only one person is visible - the woman riding the bicycle. Is attaching an umbrella to a bicycle practical or beneficial? Attaching an umbrella to a bicycle can be considered a practical idea in certain situations. Having an umbrella attached to a bicycle can provide protection from rain, keeping the rider dry during wet weather conditions. It can also shield the rider from direct sunlight on hot and sunny days, providing some shade and comfort.\\n\\nHowever, the practicality of using an umbrella attached to a bicycle could be affected by several factors:\\n\\n1. Wind: Strong winds may make it challenging to control the umbrella and the bicycle, decreasing stability and increasing the risk of accidents.\\n\\n2. Size and Weight: The size and weight of the umbrella may affect the bicycle's balance and handling.\\n\\n3. Obstructions: An umbrella may restrict the rider's field of vision, causing potential safety concerns by obstructing the view of road signs, other road users, or hazards.\\n\\n4. Local Regulations: Some jurisdictions may have rules or regulations restricting the use of umbrellas on bicycles for safety reasons.\\n\\nIn conclusion, while attaching an umbrella to a bicycle can be practical and offer benefits in some situations, it would be essential to consider various factors, such as wind conditions, size and weight, obstructions, and local regulations, to ensure safe and effective use. ############\\n \\nDoes the previous paragraph demarcated within ### and ###\\ncontain informative signal for visual instruction tuning a vision-language model?\\nAn informative datapoint should be well-formatted, contain some\\nusable knowledge of the world, and strictly NOT have any harmful,\\nracist, sexist, etc. content.\\nOPTIONS:\\n- yes\\n- no\\n\"},\n",
132
+ " {'from': 'gpt', 'value': 'response: yes'}],\n",
133
+ " 'ori_conversations': [{'from': 'human',\n",
134
+ " 'value': '<image>\\nIs there a person riding a bicycle in the image?'},\n",
135
+ " {'from': 'gpt',\n",
136
+ " 'value': 'Yes, there is a woman riding a bicycle in the image.'},\n",
137
+ " {'from': 'human',\n",
138
+ " 'value': 'What unique feature does the bicycle have in the image?'},\n",
139
+ " {'from': 'gpt',\n",
140
+ " 'value': 'The unique feature of the bicycle in the image is that it has an umbrella attached to it, providing cover to the person riding the bike.'},\n",
141
+ " {'from': 'human', 'value': 'How many people are visible in the image?'},\n",
142
+ " {'from': 'gpt',\n",
143
+ " 'value': 'In the image, only one person is visible - the woman riding the bicycle.'},\n",
144
+ " {'from': 'human',\n",
145
+ " 'value': 'Is attaching an umbrella to a bicycle practical or beneficial?'},\n",
146
+ " {'from': 'gpt',\n",
147
+ " 'value': \"Attaching an umbrella to a bicycle can be considered a practical idea in certain situations. Having an umbrella attached to a bicycle can provide protection from rain, keeping the rider dry during wet weather conditions. It can also shield the rider from direct sunlight on hot and sunny days, providing some shade and comfort.\\n\\nHowever, the practicality of using an umbrella attached to a bicycle could be affected by several factors:\\n\\n1. Wind: Strong winds may make it challenging to control the umbrella and the bicycle, decreasing stability and increasing the risk of accidents.\\n\\n2. Size and Weight: The size and weight of the umbrella may affect the bicycle's balance and handling.\\n\\n3. Obstructions: An umbrella may restrict the rider's field of vision, causing potential safety concerns by obstructing the view of road signs, other road users, or hazards.\\n\\n4. Local Regulations: Some jurisdictions may have rules or regulations restricting the use of umbrellas on bicycles for safety reasons.\\n\\nIn conclusion, while attaching an umbrella to a bicycle can be practical and offer benefits in some situations, it would be essential to consider various factors, such as wind conditions, size and weight, obstructions, and local regulations, to ensure safe and effective use.\"}],\n",
148
+ " 'yes_target_logprob_7B_Img': -14.75,\n",
149
+ " 'logits_shape': [1, 32000]},\n",
150
+ " {'id': 244066,\n",
151
+ " 'conversations': [{'from': 'human',\n",
152
+ " 'value': '############\\n\\nWhere is this taken?\\nAnswer the question using a single word or phrase. Circus ############\\n \\nDoes the previous paragraph demarcated within ### and ###\\ncontain informative signal for visual instruction tuning a vision-language model?\\nAn informative datapoint should be well-formatted, contain some\\nusable knowledge of the world, and strictly NOT have any harmful,\\nracist, sexist, etc. content.\\nOPTIONS:\\n- yes\\n- no\\n'},\n",
153
+ " {'from': 'gpt', 'value': 'response: yes'}],\n",
154
+ " 'ori_conversations': [{'from': 'human',\n",
155
+ " 'value': '<image>\\nWhere is this taken?\\nAnswer the question using a single word or phrase.'},\n",
156
+ " {'from': 'gpt', 'value': 'Circus'}],\n",
157
+ " 'Old_Path': 'llava_image_tune/coco/train2017/000000355857.jpg',\n",
158
+ " 'yes_target_logprob_7B_NImg': -15.875,\n",
159
+ " 'logits_shape': [1, 32000]})"
160
+ ]
161
+ },
162
+ "execution_count": 11,
163
+ "metadata": {},
164
+ "output_type": "execute_result"
165
+ }
166
+ ],
167
+ "source": [
168
+ "data_all[0],data4[0]"
169
+ ]
170
+ },
171
+ {
172
+ "cell_type": "code",
173
+ "execution_count": 36,
174
+ "metadata": {},
175
+ "outputs": [],
176
+ "source": [
177
+ "data_list = []\n",
178
+ "for i in data_all:\n",
179
+ " data_list.append(i['yes_target_logprob_7B_Img'])\n",
180
+ " \n",
181
+ "\n",
182
+ "# data_list = []\n",
183
+ "# for i in data4:\n",
184
+ "# data_list.append(i['yes_target_logprob_7B_NImg'])"
185
+ ]
186
+ },
187
+ {
188
+ "cell_type": "code",
189
+ "execution_count": 37,
190
+ "metadata": {},
191
+ "outputs": [
192
+ {
193
+ "data": {
194
+ "text/plain": [
195
+ "[-14.75,\n",
196
+ " -15.4375,\n",
197
+ " -14.6875,\n",
198
+ " -14.3125,\n",
199
+ " -14.375,\n",
200
+ " -14.3125,\n",
201
+ " -14.4375,\n",
202
+ " -14.0625,\n",
203
+ " -15.4375,\n",
204
+ " -13.9375,\n",
205
+ " -12.625,\n",
206
+ " -16.25,\n",
207
+ " -12.0,\n",
208
+ " -15.6875,\n",
209
+ " -13.375,\n",
210
+ " -13.5,\n",
211
+ " -13.6875,\n",
212
+ " -11.5,\n",
213
+ " -12.0625,\n",
214
+ " -11.8125,\n",
215
+ " -12.3125,\n",
216
+ " -11.5625,\n",
217
+ " -11.5,\n",
218
+ " -12.1875,\n",
219
+ " -12.9375,\n",
220
+ " -11.75,\n",
221
+ " -11.6875,\n",
222
+ " -12.75,\n",
223
+ " -10.625,\n",
224
+ " -10.1875,\n",
225
+ " -12.375,\n",
226
+ " -10.75,\n",
227
+ " -15.3125,\n",
228
+ " -15.375,\n",
229
+ " -13.4375,\n",
230
+ " -14.75,\n",
231
+ " -13.6875,\n",
232
+ " -15.5625,\n",
233
+ " -14.1875,\n",
234
+ " -14.25,\n",
235
+ " -13.8125,\n",
236
+ " -14.0625,\n",
237
+ " -14.125,\n",
238
+ " -14.0,\n",
239
+ " -13.6875,\n",
240
+ " -13.0,\n",
241
+ " -13.3125,\n",
242
+ " -13.5,\n",
243
+ " -11.3125,\n",
244
+ " -10.9375,\n",
245
+ " -11.6875,\n",
246
+ " -13.125,\n",
247
+ " -10.0625,\n",
248
+ " -9.8125,\n",
249
+ " -11.0,\n",
250
+ " -11.0,\n",
251
+ " -10.1875,\n",
252
+ " -11.625,\n",
253
+ " -10.6875,\n",
254
+ " -10.9375,\n",
255
+ " -10.1875,\n",
256
+ " -10.5,\n",
257
+ " -12.0,\n",
258
+ " -11.3125,\n",
259
+ " -15.75,\n",
260
+ " -14.875,\n",
261
+ " -14.5,\n",
262
+ " -14.5,\n",
263
+ " -14.25,\n",
264
+ " -14.1875,\n",
265
+ " -14.375,\n",
266
+ " -15.0625,\n",
267
+ " -15.0625,\n",
268
+ " -14.0,\n",
269
+ " -12.625,\n",
270
+ " -13.125,\n",
271
+ " -13.375,\n",
272
+ " -13.5625,\n",
273
+ " -13.0,\n",
274
+ " -13.4375,\n",
275
+ " -12.8125,\n",
276
+ " -10.375,\n",
277
+ " -11.6875,\n",
278
+ " -11.125,\n",
279
+ " -11.625,\n",
280
+ " -11.1875,\n",
281
+ " -12.3125,\n",
282
+ " -11.3125,\n",
283
+ " -12.25,\n",
284
+ " -14.0,\n",
285
+ " -11.3125,\n",
286
+ " -11.25,\n",
287
+ " -12.8125,\n",
288
+ " -12.3125,\n",
289
+ " -12.0,\n",
290
+ " -11.125,\n",
291
+ " -13.8125,\n",
292
+ " -15.3125,\n",
293
+ " -15.4375,\n",
294
+ " -13.125]"
295
+ ]
296
+ },
297
+ "execution_count": 37,
298
+ "metadata": {},
299
+ "output_type": "execute_result"
300
+ }
301
+ ],
302
+ "source": [
303
+ "data_list[:100]"
304
+ ]
305
+ },
306
+ {
307
+ "cell_type": "code",
308
+ "execution_count": 38,
309
+ "metadata": {},
310
+ "outputs": [],
311
+ "source": [
312
+ "\n",
313
+ "\n",
314
+ "\n",
315
+ "import numpy as np\n",
316
+ "\n",
317
+ "data = np.array(data_list)\n",
318
+ "data[np.isnan(data)] = 1\n",
319
+ "temop1 = np.percentile(data, 80)"
320
+ ]
321
+ },
322
+ {
323
+ "cell_type": "code",
324
+ "execution_count": 39,
325
+ "metadata": {},
326
+ "outputs": [],
327
+ "source": [
328
+ "New_data_list = []\n",
329
+ "for i in data_all:\n",
330
+ " if i['yes_target_logprob_7B_Img'] >= temop1:\n",
331
+ " New_data_list.append(i )\n",
332
+ " \n",
333
+ " \n",
334
+ "# for i in data4:\n",
335
+ "# if i['yes_target_logprob_7B_NImg'] >= temop1:\n",
336
+ "# New_data_list.append(i )\n"
337
+ ]
338
+ },
339
+ {
340
+ "cell_type": "code",
341
+ "execution_count": 40,
342
+ "metadata": {},
343
+ "outputs": [
344
+ {
345
+ "data": {
346
+ "text/plain": [
347
+ "128201"
348
+ ]
349
+ },
350
+ "execution_count": 40,
351
+ "metadata": {},
352
+ "output_type": "execute_result"
353
+ }
354
+ ],
355
+ "source": [
356
+ "len(New_data_list)"
357
+ ]
358
+ },
359
+ {
360
+ "cell_type": "code",
361
+ "execution_count": 30,
362
+ "metadata": {},
363
+ "outputs": [],
364
+ "source": [
365
+ "# write_json('/inspire/hdd/ws-ba572160-47f8-4ca1-984e-d6bcdeb95dbb/a100-maybe/albus/DataSet/LLaVA-Select/llava_image_tune_loss_7B_Img_20P.json',New_data_list)\n",
366
+ "# write_json('/inspire/hdd/ws-ba572160-47f8-4ca1-984e-d6bcdeb95dbb/a100-maybe/albus/DataSet/LLaVA-Select/llava_image_tune_Logits_7B_Img_20P.json',New_data_list)\n",
367
+ "# write_json('/inspire/hdd/ws-ba572160-47f8-4ca1-984e-d6bcdeb95dbb/a100-maybe/albus/DataSet/LLaVA-Select/llava_image_tune_Logits_7B_NImg_20P.json',New_data_list)\n",
368
+ "\n",
369
+ "\n"
370
+ ]
371
+ },
372
+ {
373
+ "cell_type": "code",
374
+ "execution_count": 41,
375
+ "metadata": {},
376
+ "outputs": [],
377
+ "source": [
378
+ "data_rand = read_json('/inspire/hdd/ws-ba572160-47f8-4ca1-984e-d6bcdeb95dbb/a100-maybe/albus/DataSet/LLaVA-Select/llava_image_tune_rand_20P.json')\n",
379
+ "\n"
380
+ ]
381
+ },
382
+ {
383
+ "cell_type": "code",
384
+ "execution_count": 42,
385
+ "metadata": {},
386
+ "outputs": [],
387
+ "source": [
388
+ "new_40P = New_data_list + data_rand"
389
+ ]
390
+ },
391
+ {
392
+ "cell_type": "code",
393
+ "execution_count": 43,
394
+ "metadata": {},
395
+ "outputs": [
396
+ {
397
+ "data": {
398
+ "text/plain": [
399
+ "253123"
400
+ ]
401
+ },
402
+ "execution_count": 43,
403
+ "metadata": {},
404
+ "output_type": "execute_result"
405
+ }
406
+ ],
407
+ "source": [
408
+ "len(new_40P)"
409
+ ]
410
+ },
411
+ {
412
+ "cell_type": "code",
413
+ "execution_count": 45,
414
+ "metadata": {},
415
+ "outputs": [],
416
+ "source": [
417
+ "# write_json('/inspire/hdd/ws-ba572160-47f8-4ca1-984e-d6bcdeb95dbb/a100-maybe/albus/DataSet/LLaVA-Select/llava_image_tune_loss_rand_7B_Img_40P.json',new_40P)\n",
418
+ "# write_json('/inspire/hdd/ws-ba572160-47f8-4ca1-984e-d6bcdeb95dbb/a100-maybe/albus/DataSet/LLaVA-Select/llava_image_tune_Logits_Rand_7B_Img_40P.json',new_40P)\n",
419
+ "# write_json('/inspire/hdd/ws-ba572160-47f8-4ca1-984e-d6bcdeb95dbb/a100-maybe/albus/DataSet/LLaVA-Select/llava_image_tune_Logits_Rand_7B_NImg_40P.json',new_40P)\n",
420
+ "\n",
421
+ "\n"
422
+ ]
423
+ },
424
+ {
425
+ "cell_type": "code",
426
+ "execution_count": 36,
427
+ "metadata": {},
428
+ "outputs": [
429
+ {
430
+ "data": {
431
+ "text/plain": [
432
+ "250207"
433
+ ]
434
+ },
435
+ "execution_count": 36,
436
+ "metadata": {},
437
+ "output_type": "execute_result"
438
+ }
439
+ ],
440
+ "source": []
441
+ },
442
+ {
443
+ "cell_type": "code",
444
+ "execution_count": null,
445
+ "metadata": {},
446
+ "outputs": [],
447
+ "source": []
448
+ }
449
+ ],
450
+ "metadata": {
451
+ "kernelspec": {
452
+ "display_name": "Python 3",
453
+ "language": "python",
454
+ "name": "python3"
455
+ },
456
+ "language_info": {
457
+ "codemirror_mode": {
458
+ "name": "ipython",
459
+ "version": 3
460
+ },
461
+ "file_extension": ".py",
462
+ "mimetype": "text/x-python",
463
+ "name": "python",
464
+ "nbconvert_exporter": "python",
465
+ "pygments_lexer": "ipython3",
466
+ "version": "3.10.16"
467
+ }
468
+ },
469
+ "nbformat": 4,
470
+ "nbformat_minor": 2
471
+ }