Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
SaylorTwift HF Staff commited on
Commit
66efae1
·
verified ·
1 Parent(s): 2dc8f0b

Add 'clinical_knowledge' config data files

Browse files
README.md CHANGED
@@ -181,6 +181,8 @@ dataset_info:
181
  features:
182
  - name: question
183
  dtype: string
 
 
184
  - name: choices
185
  sequence: string
186
  - name: answer
@@ -193,19 +195,19 @@ dataset_info:
193
  '3': D
194
  splits:
195
  - name: auxiliary_train
196
- num_bytes: 160601377
197
  num_examples: 99842
198
  - name: test
199
- num_bytes: 62754
200
  num_examples: 265
201
  - name: validation
202
- num_bytes: 6664
203
  num_examples: 29
204
  - name: dev
205
- num_bytes: 1210
206
  num_examples: 5
207
- download_size: 166184960
208
- dataset_size: 160672005
209
  - config_name: college_biology
210
  features:
211
  - name: question
@@ -1765,6 +1767,16 @@ configs:
1765
  path: business_ethics/validation-*
1766
  - split: dev
1767
  path: business_ethics/dev-*
 
 
 
 
 
 
 
 
 
 
1768
  ---
1769
 
1770
  # Dataset Card for MMLU
 
181
  features:
182
  - name: question
183
  dtype: string
184
+ - name: subject
185
+ dtype: string
186
  - name: choices
187
  sequence: string
188
  - name: answer
 
195
  '3': D
196
  splits:
197
  - name: auxiliary_train
198
+ num_bytes: 161000625
199
  num_examples: 99842
200
  - name: test
201
+ num_bytes: 68572
202
  num_examples: 265
203
  - name: validation
204
+ num_bytes: 7290
205
  num_examples: 29
206
  - name: dev
207
+ num_bytes: 1308
208
  num_examples: 5
209
+ download_size: 47213955
210
+ dataset_size: 161077795
211
  - config_name: college_biology
212
  features:
213
  - name: question
 
1767
  path: business_ethics/validation-*
1768
  - split: dev
1769
  path: business_ethics/dev-*
1770
+ - config_name: clinical_knowledge
1771
+ data_files:
1772
+ - split: auxiliary_train
1773
+ path: clinical_knowledge/auxiliary_train-*
1774
+ - split: test
1775
+ path: clinical_knowledge/test-*
1776
+ - split: validation
1777
+ path: clinical_knowledge/validation-*
1778
+ - split: dev
1779
+ path: clinical_knowledge/dev-*
1780
  ---
1781
 
1782
  # Dataset Card for MMLU
clinical_knowledge/auxiliary_train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2782fc860f57d9345a9233ab04f494b0af5ae85b893a27853f7014b14a3bd07
3
+ size 47163955
clinical_knowledge/dev-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ef415ee4ee8eec3b77f97726f7e264f7cf97ac880293ac14955bd4abf898925
3
+ size 3189
clinical_knowledge/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81a74fd1121699ebbdbe8c6f44a4efe4577aa8792bca091c7094be09896a2e36
3
+ size 39869
clinical_knowledge/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66a168f080bf08d4d18b9ec0db2a0416328b228e321dca24ea116ae931f8e79f
3
+ size 6942
dataset_infos.json CHANGED
@@ -287,39 +287,34 @@
287
  "features": {
288
  "question": {
289
  "dtype": "string",
290
- "id": null,
 
 
 
291
  "_type": "Value"
292
  },
293
  "choices": {
294
  "feature": {
295
  "dtype": "string",
296
- "id": null,
297
  "_type": "Value"
298
  },
299
- "length": -1,
300
- "id": null,
301
  "_type": "Sequence"
302
  },
303
  "answer": {
304
- "num_classes": 4,
305
  "names": [
306
  "A",
307
  "B",
308
  "C",
309
  "D"
310
  ],
311
- "id": null,
312
  "_type": "ClassLabel"
313
  }
314
  },
315
- "post_processed": null,
316
- "supervised_keys": null,
317
- "task_templates": null,
318
- "builder_name": "mmlu",
319
  "config_name": "clinical_knowledge",
320
  "version": {
321
  "version_str": "1.0.0",
322
- "description": null,
323
  "major": 1,
324
  "minor": 0,
325
  "patch": 0
@@ -327,39 +322,32 @@
327
  "splits": {
328
  "auxiliary_train": {
329
  "name": "auxiliary_train",
330
- "num_bytes": 160601257,
331
  "num_examples": 99842,
332
- "dataset_name": "mmlu"
333
  },
334
  "test": {
335
  "name": "test",
336
- "num_bytes": 62742,
337
  "num_examples": 265,
338
- "dataset_name": "mmlu"
339
  },
340
  "validation": {
341
  "name": "validation",
342
- "num_bytes": 6652,
343
  "num_examples": 29,
344
- "dataset_name": "mmlu"
345
  },
346
  "dev": {
347
  "name": "dev",
348
- "num_bytes": 1198,
349
  "num_examples": 5,
350
- "dataset_name": "mmlu"
351
- }
352
- },
353
- "download_checksums": {
354
- "data.tar": {
355
- "num_bytes": 166184960,
356
- "checksum": "bec563ba4bac1d6aaf04141cd7d1605d7a5ca833e38f994051e818489592989b"
357
  }
358
  },
359
- "download_size": 166184960,
360
- "post_processing_size": null,
361
- "dataset_size": 160671849,
362
- "size_in_bytes": 326856809
363
  },
364
  "college_biology": {
365
  "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n",
 
287
  "features": {
288
  "question": {
289
  "dtype": "string",
290
+ "_type": "Value"
291
+ },
292
+ "subject": {
293
+ "dtype": "string",
294
  "_type": "Value"
295
  },
296
  "choices": {
297
  "feature": {
298
  "dtype": "string",
 
299
  "_type": "Value"
300
  },
 
 
301
  "_type": "Sequence"
302
  },
303
  "answer": {
 
304
  "names": [
305
  "A",
306
  "B",
307
  "C",
308
  "D"
309
  ],
 
310
  "_type": "ClassLabel"
311
  }
312
  },
313
+ "builder_name": "parquet",
314
+ "dataset_name": "mmlu",
 
 
315
  "config_name": "clinical_knowledge",
316
  "version": {
317
  "version_str": "1.0.0",
 
318
  "major": 1,
319
  "minor": 0,
320
  "patch": 0
 
322
  "splits": {
323
  "auxiliary_train": {
324
  "name": "auxiliary_train",
325
+ "num_bytes": 161000625,
326
  "num_examples": 99842,
327
+ "dataset_name": null
328
  },
329
  "test": {
330
  "name": "test",
331
+ "num_bytes": 68572,
332
  "num_examples": 265,
333
+ "dataset_name": null
334
  },
335
  "validation": {
336
  "name": "validation",
337
+ "num_bytes": 7290,
338
  "num_examples": 29,
339
+ "dataset_name": null
340
  },
341
  "dev": {
342
  "name": "dev",
343
+ "num_bytes": 1308,
344
  "num_examples": 5,
345
+ "dataset_name": null
 
 
 
 
 
 
346
  }
347
  },
348
+ "download_size": 47213955,
349
+ "dataset_size": 161077795,
350
+ "size_in_bytes": 208291750
 
351
  },
352
  "college_biology": {
353
  "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n",