File size: 3,650 Bytes
791e650
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
264fdd7
791e650
 
 
 
 
 
 
 
 
 
 
 
 
 
264fdd7
 
 
791e650
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
"""OAB Exams dataset"""

import datasets
import pandas as pd
import re
from collections import defaultdict

_CITATION = """@misc{delfino2017passing,
      title={Passing the Brazilian OAB Exam: data preparation and some experiments}, 
      author={Pedro Delfino and Bruno Cuconato and Edward Hermann Haeusler and Alexandre Rademaker},
      year={2017},
      eprint={1712.05128},
      archivePrefix={arXiv},
      primaryClass={cs.CL}
}
"""

_DESCRIPTION = """
This repository contains the bar exams from the Ordem dos Advogados do Brasil (OAB) in Brazil from 2010 to 2018.
In Brazil, all legal professionals must demonstrate their knowledge of the law and its application by passing the OAB exams, the national bar exams. The OAB exams therefore provide an excellent benchmark for the performance of legal information systems since passing the exam would arguably signal that the system has acquired capacity of legal reasoning comparable to that of a human lawyer.
"""

_HOMEPAGE="https://github.com/legal-nlp/oab-exams"

_URL = "eduagarcia/oab_exams"

_EXAM_IDS_DEV = ["2010-01"]

class OABExamsNoTrain(datasets.GeneratorBasedBuilder):

    VERSION = datasets.Version("1.1.0")
    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "id": datasets.Value("string"),
                    "question_number": datasets.Value("int32"),
                    "exam_id": datasets.Value("string"),
                    "exam_year": datasets.Value("string"),
                    "question_type": datasets.Value("string"),
                    "nullified": datasets.Value("bool"),
                    "question": datasets.Value("string"),
                    "choices": datasets.Sequence(feature={
                        "text": datasets.Value("string"),
                        "label": datasets.Value("string")
                    }),
                    "answerKey": datasets.Value("string"),
                    }),
            supervised_keys=None,
            homepage=_HOMEPAGE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        dataset = datasets.load_dataset(_URL, split="train")
        #remove voided questions
        dataset = dataset.filter(lambda example: not example['nullified'])

        dataset_dev = dataset.filter(lambda example: example['exam_id'] in _EXAM_IDS_DEV)
        dataset_test = dataset.filter(lambda example: example['exam_id'] not in _EXAM_IDS_DEV)

        dataset_ids_by_ex_type = defaultdict(list)
        for i, ex_type in enumerate(dataset_dev['question_type']):
            dataset_ids_by_ex_type[ex_type].append(i)
        
        new_grouped_index = []
        ex_types = list(dataset_ids_by_ex_type.keys())
        while len(new_grouped_index) != len(dataset_dev):
            for ex_type in ex_types:
                if len(dataset_ids_by_ex_type[ex_type]) > 0:
                    new_grouped_index.append(dataset_ids_by_ex_type[ex_type].pop(0))

        dataset_dev_reorder = dataset_dev.select(new_grouped_index)

        return [
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                gen_kwargs={
                    "dataset": dataset_dev_reorder,
                }
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={
                    "dataset": dataset_test,
                }
            )    
        ]

    def _generate_examples(self, dataset):
        for i, example in enumerate(dataset):
            yield i, example