lenu / lenu.py
aarimond
Add jurisdictions US-MA and PT
76da769
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current
# dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTICE:
# this script is derivate work of
# https://github.com/huggingface/datasets/blob/main/templates/new_dataset_script.py
"""lenu - Legal Entity Name Understanding"""
from io import BytesIO
import os
import datasets
from datasets.builder import logging
import fsspec
import pandas
from sklearn.model_selection import train_test_split
_DESCRIPTION = """\
This dataset contains legal entity names from the Global LEI System in
which each entity is assigned with a unique Legal Entity Identifier (LEI)
code (ISO Standard 17441) along with their corresponding Entity Legal
Form (ELF) Codes (ISO Standard 20275) which specifies the legal form of
each entity.
"""
_HOMEPAGE = "gleif.org"
_LICENSE = "cc0-1.0"
# Change this URL and (classnames below) when updating to a newer version of the GLEIF dataset
URL = (
"https://goldencopy.gleif.org/api/v2/golden-copies/publishes/lei2/20240604-0000.csv"
)
classnames = {
"AT": [
"AXSB",
"EQOV",
"9999",
"ONF1",
"8888",
"JTAV",
"DX6Z",
"5WWO",
"ECWU",
"JJYT",
"E9OX",
"UI81",
"GVPD",
"NIJH",
"8XDW",
"1NOX",
"CAQ1",
"JQOI",
"O65B",
"G3R6",
"69H1",
],
"AU": [
"TXVC",
"8888",
"ADXG",
"R4KK",
"7TPC",
"LZFR",
"BC38",
"J4JC",
"Q82Q",
"6W6X",
"XHCV",
"PQHL",
],
"CH": [
"3EKS",
"9999",
"7MNN",
"MVII",
"FJG4",
"8888",
"FLNB",
"2JZ4",
"54WI",
"XJOT",
"H781",
"QSI2",
"W6A7",
"L5DU",
"5BEZ",
"E0NE",
"AZA0",
"2B81",
"HX77",
"CQMY",
"MRSY",
"GP8M",
"FFTN",
"M848",
"TL87",
"2XJA",
"DP2E",
"BF9N",
],
"CN": [
"ECAK",
"8888",
"LURL",
"B5UZ",
"1IWK",
"I39S",
"9999",
"SH05",
"RV48",
"OH9O",
"YXJ5",
"2M6Y",
"V816",
"BDTI",
"OMUD",
],
"CZ": [
"9HLU",
"6CQN",
"9RVC",
"6D9L",
"ZQO8",
"8888",
"QIEL",
"95G8",
"3G3D",
"SNWJ",
"J8PB",
"JCAD",
"CATU",
"CIO8",
"QS6A",
"CD28",
"IQ9O",
"UFDA",
"7OZQ",
"6FAI",
"NI3I",
"QQ49",
"Q25I",
"5KU5",
"BL4B",
"G2I3",
"QJ0F",
"4UB2",
"FY1B",
"VIE3",
"917C",
"LJL0",
"R2XE",
"OVKW",
"MAVU",
"3RMA",
"PFE5",
"MBUU",
"NQHQ",
"ET6Z",
"HQPK",
"XG70",
"NPH3",
"NJ87",
"D1VK",
],
"DE": [
"2HBR",
"6QQB",
"XLWA",
"V2YH",
"QZ3L",
"63KS",
"SQKS",
"8Z6G",
"YJ4C",
"8888",
"T0YJ",
"SCE1",
"FR3V",
"AZFE",
"40DB",
"2YZO",
"SGST",
"OL20",
"US8E",
"13AV",
"FEBD",
"9999",
"9JGX",
"79H0",
"D40E",
"8CM0",
"JNDX",
"7J3S",
"AMKW",
"JMVF",
"SUA1",
"YA01",
],
"DK": [
"H8VP",
"PIOI",
"599X",
"ZRPO",
"D4PU",
"40R4",
"FUKI",
"PMJW",
"NUL8",
"8888",
"9KSX",
"7WRN",
"GFXN",
"9999",
"PZ6Y",
"F7JY",
"37UT",
"1MWR",
"WU7R",
"GULL",
"FW7S",
"5QS7",
],
"EE": [
"9LJA",
"JC0Y",
"PRTB",
"8888",
"LVEQ",
"1NKP",
"VSEV",
"BE56",
"I1UP",
"752Q",
"J34T",
"LA47",
"8ZQE",
"3UPJ",
],
"ES": [
"5RDO",
"DP3Q",
"S0Z5",
"8888",
"R6UT",
"MDOL",
"1QU8",
"FH4R",
"8EHB",
"S6MS",
"JB2M",
"CUIH",
"K0RI",
"GJL1",
"956I",
"1G29",
"AXS5",
"JTV5",
"9FPZ",
"A0J6",
"TUHS",
"4SJR",
"TLCJ",
"S6X7",
"I2WU",
"A97B",
"UJ35",
"AJ9U",
"IAS6",
"SS0L",
"ARDP",
"1ZHJ",
"1SL4",
"TDD5",
"R2L8",
"4S57",
"B0V5",
"DDES",
"IT6N",
"XYGP",
],
"FI": [
"DKUW",
"5WI2",
"K6VE",
"8888",
"8WJ7",
"HEOB",
"V0TJ",
"UXEW",
"NV7C",
"2RK5",
"XJH3",
"VOTI",
"9999",
"YK5G",
"PPMX",
"BKVI",
"IYF9",
"BKQO",
"EE90",
"8HGS",
"4H61",
"DAFV",
"MRW9",
"ZMTL",
"SJL9",
"R39F",
"SDPE",
"N3LC",
"97PB",
"EDZP",
"6PEQ",
"K09E",
"DMT8",
"SKGX",
"KHI5",
"37GR",
"T3K4",
"UMF0",
"HTT9",
"SQS1",
"OXLO",
"R6UB",
"9AUC",
"Z38E",
"DL9Z",
"760X",
"V42B",
"1AFG",
"1YIR",
"SUHV",
],
"GB": [
"8888",
"H0PO",
"B6ES",
"G12F",
"Z0EY",
"57V7",
"AVYY",
"ID30",
"WBQU",
"VV0W",
"7T8N",
"XLZV",
"STX7",
"JTCO",
"Q0M5",
"9B78",
"9999",
"4GJI",
"NBTW",
"E12O",
"BX6Y",
"IYXU",
"17R0",
"468Q",
"60IF",
"ZQ6S",
"TT2H",
],
"HU": [
"P9F2",
"BKUX",
"8VH3",
"S3DA",
"EO9F",
"M1DW",
"8UEG",
"BJ8Q",
"BMYJ",
"TSVO",
"2A44",
"8888",
"DPY1",
"DN6F",
"QYV5",
"876R",
"4QRE",
"9999",
"ESTU",
"ZQAQ",
"2LB5",
"4C5L",
"OII5",
"BSK1",
"LNY0",
"V3LT",
"4WV7",
"J6MO",
"Y64R",
"995K",
"XW5U",
"HTJD",
],
"IE": [
"LGWG",
"8888",
"9999",
"MNQ7",
"VYAX",
"JXDX",
"KMFX",
"2GV9",
"LZIC",
"C58S",
"DWS3",
"HNJK",
"5AX8",
"54SK",
"URQH",
"9BPE",
"FF1D",
"ZJS8",
"363J",
],
"JP": [
"T417",
"8888",
"DYQK",
"7QQ0",
"N3JU",
"R4LR",
"9999",
"IUVI",
"MXMH",
"2NRQ",
"VQLD",
"5MVV",
],
"KY": [
"OSBR",
"8888",
"6XB7",
"MPUG",
"XAQA",
"T5UM",
"MP7S",
"4XP8",
"K575",
"JDX6",
"9999",
"8HR7",
"SNUK",
],
"LI": [
"TV8Y",
"TMU1",
"BSZ8",
"7RRP",
"1DGT",
"8888",
"53QF",
"WAK8",
"Y8LH",
"IF49",
"9999",
"32HC",
"ANSR",
"EV7F",
"1SOY",
],
"LU": [
"UDY2",
"9999",
"DVXS",
"8888",
"5GGB",
"AIR5",
"U8KA",
"81G5",
"63P9",
"SQ1A",
"V19Y",
"WCEP",
"2JEI",
"HHR4",
"EUT4",
"BEAN",
"STBC",
"V5OS",
"2S2U",
"ZFFA",
"ATQY",
"V7QY",
"9C91",
"7SIZ",
"BKAB",
"68J6",
"2IGL",
],
"NL": [
"54M6",
"V44D",
"B5PM",
"8888",
"EZQW",
"JHK5",
"CODH",
"NFFH",
"L7HX",
"A0W7",
"8VFX",
"4QXM",
"BBEB",
"62Y3",
"9999",
"33MN",
"Y3ZB",
"9AAK",
"DEO1",
"GNXT",
"M1IZ",
"UNJ2",
"5WU6",
],
"NO": [
"YI42",
"O0EU",
"326Y",
"8888",
"EXD7",
"FSBD",
"3C7U",
"LJJW",
"V06W",
"IQGE",
"K5P8",
"8S9H",
"KX7D",
"3L58",
"R71C",
"CF5L",
"BJ65",
"ZQ0Q",
"9999",
"PB3V",
"M9IQ",
"9DI1",
"AEV1",
"50TD",
"YTMC",
"Q0Q1",
"5ZTZ",
],
"PL": [
"O7XB",
"RBHP",
"FJ0E",
"BSJT",
"8888",
"ZVVM",
"13ZV",
"OMX0",
"96XK",
"629I",
"H7OD",
"8TOF",
"WUJ2",
"T7PB",
"B21W",
"SP4S",
"ZZKE",
"AL9T",
"LT9U",
"KM66",
"JCKO",
"SVA3",
"60BG",
"J3A3",
"3BJG",
"WNX1",
"QUX1",
"FQ5Y",
"5F76",
"WOK7",
"QYL4",
"GZE5",
"SMIS",
"CY1M",
"YLZL",
],
"PT": [
"6L6P",
"USOG",
"8888",
"DFE5",
"ALPT",
"Z0NE",
"P5S3",
"VF4C",
"A8CT",
"1HGD",
"QFXD",
"KUUV",
"ZSWE",
"OXUC",
"MFHR",
"N66B",
"ZILA",
"YMLD",
"W9W3",
"9999",
"6IK8",
"PIDC",
"XD16",
"VALH",
],
"SE": [
"XJHM",
"CX05",
"8888",
"BEAY",
"E9BI",
"9999",
"BYQJ",
"OJ9I",
"1TN0",
"C61P",
"AZTO",
"27AW",
"2UAX",
"O1QI",
"54P7",
"G04R",
"M0Y0",
"SSOM",
"381R",
"PDQ0",
"UKOL",
"44CQ",
],
"US-CA": [
"8888",
"K7YU",
"H1UM",
"EI4J",
"5HQ4",
"PZR6",
"7CDL",
"SQ7B",
"KQXA",
"CVXK",
"G1P6",
"LYRX",
"9999",
"O6AU",
"N295",
"BADE",
],
"US-DE": [
"HZEH",
"8888",
"XTIQ",
"T91T",
"4FSX",
"QF4W",
"1HXP",
"TGMR",
"JU79",
"9999",
"9ASJ",
],
"US-MA": [
"8888",
"Z73Z",
"BYFU",
"6I75",
"ZJTK",
"GOGQ",
"QX9N",
"G76T",
"7G8G",
"22EO",
"R7QO",
"CAGH",
"YP04",
"9999",
],
"US-NY": [
"8888",
"51RC",
"PJ10",
"SDX0",
"BO6L",
"XIZI",
"M0ER",
"4VH5",
"EPCY",
"9999",
"D6JI",
],
"VG": [
"6EH6",
"8888",
"9999",
"YOP9",
"KORB",
"1GR6",
"Q62B",
"ZHED",
"N28C",
"BST2",
"GLCI",
"JS65",
],
"ZA": [
"GQVQ",
"8888",
"XE4Z",
"3QSR",
"4YUU",
"R155",
"MZT6",
"J7L0",
"9999",
"U89P",
],
}
relevant_cols = [
"LEI",
"Entity.LegalName",
"Entity.LegalForm.EntityLegalFormCode",
"Entity.LegalJurisdiction",
"Entity.EntityCategory",
"Entity.EntityStatus",
"Registration.RegistrationStatus",
]
COL_LEI, COL_NAME, COL_ELF, COL_JUR, COL_CAT, COL_ESTATUS, COL_RSTATUS = relevant_cols
def load_data(f, jurisdiction, compression=None):
chunks = []
with pandas.read_csv(
f,
compression=compression,
low_memory=True,
dtype=str,
# the following will prevent pandas from converting words like
# 'NA' to NaN. We want to work with the LEI data as is.
na_values=[""],
keep_default_na=False,
usecols=relevant_cols,
chunksize=100000,
) as lei_data_reader:
for chunk in logging.tqdm(lei_data_reader, desc="Loading and preparing data.."):
# filter by jurisdiction
chunk = chunk[chunk[COL_JUR] == jurisdiction]
chunks.append(chunk)
lei_data = pandas.concat(chunks)
del chunks
return lei_data
def split_data(data, split_size=(0.7, 0.1, 0.2)):
# we apply two subsequent splits to perform a train, validation, test split
X_train_, X_test, y_train_, _ = train_test_split(
data,
data[COL_ELF],
test_size=split_size[2],
stratify=data[COL_ELF],
random_state=42,
)
X_train, X_val, _, _ = train_test_split(
X_train_,
y_train_,
test_size=split_size[1] / (split_size[0] + split_size[1]),
stratify=y_train_,
random_state=42,
)
return X_train, X_val, X_test
VERSION = datasets.Version("0.1.0")
class LENU(datasets.GeneratorBasedBuilder):
VERSION = VERSION
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name=jur,
version=VERSION,
description=f"LEI data (LegalName and Entity Legal Form Code) for legal entities in Jurisdiction {jur}",
)
for jur in classnames.keys()
]
DEFAULT_CONFIG_NAME = "US-DE"
def _info(self):
features = datasets.Features(
{
"LEI": datasets.Value("string"),
"Entity.LegalName": datasets.Value("string"),
"Entity.LegalForm.EntityLegalFormCode": datasets.features.ClassLabel(
names=classnames.get(self.config.name)
),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
# TODO check if the supervised_keys attribute makes sense here:
# supervised_keys=("sentence", "label"),
homepage=_HOMEPAGE,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
checkpoint = os.path.basename(URL).replace(".csv", "")
inner_file = f"{checkpoint}-gleif-goldencopy-lei2-golden-copy.csv"
if dl_manager.is_streaming: # this means we are on the hub
# this is somewhat of a hack
with fsspec.open(URL, "rb").open() as fp:
# for some reason, handing over fp to pandas.read_csv directly
# without wrapping it into a BytesIO raises BadZipFile
fp = BytesIO(fp.read())
data_jur = load_data(fp, self.config.name, compression="zip")
else: # this would be locally
data_dir = dl_manager.download_and_extract(URL)
file_path = (
os.path.join(data_dir, inner_file)
if not data_dir.endswith(inner_file)
else data_dir
)
data_jur = load_data(file_path, self.config.name)
data_jur = data_jur[
(data_jur[COL_JUR] == self.config.name)
& (data_jur[COL_ESTATUS] == "ACTIVE")
& (data_jur[COL_RSTATUS] == "ISSUED")
]
# data_jur[COL_ELF] = data_jur[COL_ELF].astype(str)
# filter ELF codes that appear less than 3 times
# to allow for stratified splitting
elf_counts = data_jur[COL_ELF].value_counts()
to_be_filtered = elf_counts[elf_counts >= 3].index
data_jur_filtered = data_jur[data_jur[COL_ELF].isin(to_be_filtered)]
train, val, test = split_data(data_jur_filtered)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data": train,
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"data": val,
"split": "validation",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"data": test,
"split": "test",
},
),
]
def _generate_examples(self, data, split):
for i, row in data.iterrows():
yield i, {
k: row[k]
for k in [
"LEI",
"Entity.LegalName",
"Entity.LegalForm.EntityLegalFormCode",
]
}