init
Browse files- .gitattributes +1 -0
- README.md +5 -13
- check_predicate.py +0 -0
- data/{t_rex.filter.test.jsonl → t_rex.filter.jsonl} +2 -2
- data/t_rex.filter.validation.jsonl +0 -3
- data/{t_rex.filter.train.jsonl → t_rex.raw.jsonl} +2 -2
- data/t_rex.raw.test.jsonl +0 -3
- data/t_rex.raw.train.jsonl +0 -3
- data/t_rex.raw.validation.jsonl +0 -3
- filtering.py → filtering_denoise.py +7 -8
- stats.py → filtering_purify.py +9 -10
- process.py +5 -9
- t_rex.py +11 -6
.gitattributes
CHANGED
|
@@ -60,3 +60,4 @@ data/t_rex.raw.test.jsonl filter=lfs diff=lfs merge=lfs -text
|
|
| 60 |
data/t_rex.filter.validation.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 61 |
data/t_rex.filter.test.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 62 |
data/t_rex.filter.train.jsonl filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 60 |
data/t_rex.filter.validation.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 61 |
data/t_rex.filter.test.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 62 |
data/t_rex.filter.train.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 63 |
+
data/t_rex.filter.jsonl filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
|
@@ -22,19 +22,11 @@ We split the raw T-REX dataset into train/validation/test split by the ratio of
|
|
| 22 |
|
| 23 |
We apply filtering to keep triples with alpha-numeric subject and object, as well as triples with at least either of subject or object is a named-entity.
|
| 24 |
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
|
| 28 |
-
|
| 29 |
-
|
|
| 30 |
-
| filter | 463,521 | 99,550 | 99,408 | 662,479 |
|
| 31 |
-
|
| 32 |
-
- Number of unique predicate.
|
| 33 |
-
|
| 34 |
-
| Dataset | `train` | `validation` | `test` | `all` |
|
| 35 |
-
|-------:|-------:|------------:|-------:|-----:|
|
| 36 |
-
| raw | 894 | 717 | 700 | 2,311 |
|
| 37 |
-
| filter | 780 | 614 | 616 | 2,010 |
|
| 38 |
|
| 39 |
### Filtering to Purify the Dataset
|
| 40 |
We reduce the size of the dataset by applying filtering based on the number of predicates and entities in the triples.
|
|
|
|
| 22 |
|
| 23 |
We apply filtering to keep triples with alpha-numeric subject and object, as well as triples with at least either of subject or object is a named-entity.
|
| 24 |
|
| 25 |
+
| Dataset | Raw | Filter |
|
| 26 |
+
|--------:|----:|-------:|
|
| 27 |
+
| Triples | 941,663 | 662,482 |
|
| 28 |
+
| Predicate| 931 | 818 |
|
| 29 |
+
| Entity | 270,801 | 197,302 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
|
| 31 |
### Filtering to Purify the Dataset
|
| 32 |
We reduce the size of the dataset by applying filtering based on the number of predicates and entities in the triples.
|
check_predicate.py
ADDED
|
File without changes
|
data/{t_rex.filter.test.jsonl → t_rex.filter.jsonl}
RENAMED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1b80224241efafb2b053bc630288a3adfbaf69422454b57c620c6f810da20aee
|
| 3 |
+
size 603995857
|
data/t_rex.filter.validation.jsonl
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:b690964fd198d372882d889cc6169184be5373335315fc31e5e34975fb4d4c89
|
| 3 |
-
size 90752371
|
|
|
|
|
|
|
|
|
|
|
|
data/{t_rex.filter.train.jsonl → t_rex.raw.jsonl}
RENAMED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f50215186e30015c38d374bdd737d989497852aacfb9ae3aeb467ec47537ad6a
|
| 3 |
+
size 841348762
|
data/t_rex.raw.test.jsonl
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:7255c9f4d2d5155e76075dcd5cee901e16903425e041ce76f7fc389265e89518
|
| 3 |
-
size 125893572
|
|
|
|
|
|
|
|
|
|
|
|
data/t_rex.raw.train.jsonl
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:9c664392be27e68c9a8172f80d76aa17eedac052b45623274afd8c2b060f2284
|
| 3 |
-
size 589023853
|
|
|
|
|
|
|
|
|
|
|
|
data/t_rex.raw.validation.jsonl
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:44f9999f051074e8684cccc9c492718be8f3b36b55963eeede4d76323fa6c496
|
| 3 |
-
size 126431335
|
|
|
|
|
|
|
|
|
|
|
|
filtering.py → filtering_denoise.py
RENAMED
|
@@ -38,11 +38,10 @@ def filtering(entry):
|
|
| 38 |
return True
|
| 39 |
|
| 40 |
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
f.write('\n'.join([json.dumps(i) for i in data]))
|
|
|
|
| 38 |
return True
|
| 39 |
|
| 40 |
|
| 41 |
+
with open(f"data/t_rex.raw.jsonl") as f:
|
| 42 |
+
data = [json.loads(i) for i in f.read().split('\n') if len(i) > 0]
|
| 43 |
+
print(f"[before]: {len(data)}")
|
| 44 |
+
data = [i for i in data if filtering(i)]
|
| 45 |
+
print(f"[after] : {len(data)}")
|
| 46 |
+
with open(f"data/t_rex.filter.jsonl", 'w') as f:
|
| 47 |
+
f.write('\n'.join([json.dumps(i) for i in data]))
|
|
|
stats.py → filtering_purify.py
RENAMED
|
@@ -15,13 +15,10 @@ from datasets import Dataset
|
|
| 15 |
sns.set_theme(style="whitegrid")
|
| 16 |
|
| 17 |
# load filtered data
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
_tmp = [json.loads(i) for i in f.read().split('\n') if len(i) > 0]
|
| 23 |
-
tmp += _tmp
|
| 24 |
-
splits += [s] * len(_tmp)
|
| 25 |
data = Dataset.from_list(tmp)
|
| 26 |
df_main = data.to_pandas()
|
| 27 |
df_main['split'] = splits
|
|
@@ -92,7 +89,7 @@ if __name__ == '__main__':
|
|
| 92 |
e_dist_full = []
|
| 93 |
data_size_full = []
|
| 94 |
config = []
|
| 95 |
-
candidates = list(product([
|
| 96 |
|
| 97 |
# run filtering with different configs
|
| 98 |
for min_e_freq, max_p_freq in candidates:
|
|
@@ -102,9 +99,11 @@ if __name__ == '__main__':
|
|
| 102 |
data_size_full.append(data_size)
|
| 103 |
config.append([min_e_freq, max_p_freq])
|
| 104 |
# save data
|
|
|
|
| 105 |
for s in ['train', 'validation', 'test']:
|
| 106 |
-
|
| 107 |
-
|
|
|
|
| 108 |
i.pop('split')
|
| 109 |
with open(f"data/t_rex.clean.min_entity_{min_e_freq}_max_predicate_{max_p_freq}.{s}.jsonl", 'w') as f:
|
| 110 |
f.write('\n'.join([json.dumps(i) for i in new_data_s]))
|
|
|
|
| 15 |
sns.set_theme(style="whitegrid")
|
| 16 |
|
| 17 |
# load filtered data
|
| 18 |
+
with open(f"data/t_rex.filter.jsonl") as f:
|
| 19 |
+
_tmp = [json.loads(i) for i in f.read().split('\n') if len(i) > 0]
|
| 20 |
+
tmp += _tmp
|
| 21 |
+
splits += [s] * len(_tmp)
|
|
|
|
|
|
|
|
|
|
| 22 |
data = Dataset.from_list(tmp)
|
| 23 |
df_main = data.to_pandas()
|
| 24 |
df_main['split'] = splits
|
|
|
|
| 89 |
e_dist_full = []
|
| 90 |
data_size_full = []
|
| 91 |
config = []
|
| 92 |
+
candidates = list(product([4, 8, 12, 16], [100, 50, 25, 10]))
|
| 93 |
|
| 94 |
# run filtering with different configs
|
| 95 |
for min_e_freq, max_p_freq in candidates:
|
|
|
|
| 99 |
data_size_full.append(data_size)
|
| 100 |
config.append([min_e_freq, max_p_freq])
|
| 101 |
# save data
|
| 102 |
+
out = {}
|
| 103 |
for s in ['train', 'validation', 'test']:
|
| 104 |
+
out[s] = [i for i in new_data if i['split'] == s]
|
| 105 |
+
for s, v in out.items():
|
| 106 |
+
for i in v:
|
| 107 |
i.pop('split')
|
| 108 |
with open(f"data/t_rex.clean.min_entity_{min_e_freq}_max_predicate_{max_p_freq}.{s}.jsonl", 'w') as f:
|
| 109 |
f.write('\n'.join([json.dumps(i) for i in new_data_s]))
|
process.py
CHANGED
|
@@ -7,7 +7,7 @@ import json
|
|
| 7 |
import os
|
| 8 |
from glob import glob
|
| 9 |
from tqdm import tqdm
|
| 10 |
-
from random import shuffle, seed
|
| 11 |
|
| 12 |
os.makedirs('data', exist_ok=True)
|
| 13 |
f_writer = open('data/t_rex.raw.jsonl', 'w')
|
|
@@ -25,13 +25,9 @@ for i in tqdm(glob("*.json")):
|
|
| 25 |
f_writer.write(json.dumps(out) + "\n")
|
| 26 |
f_writer.close()
|
| 27 |
|
| 28 |
-
|
| 29 |
with open('data/t_rex.raw.jsonl') as f:
|
| 30 |
data = [json.loads(i) for i in f.read().split('\n') if len(i) > 0]
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
test = data[int(len(data) * 0.85):]
|
| 35 |
-
for i, j in zip([train, val, test], ['train', 'validation', 'test']):
|
| 36 |
-
with open(f'data/t_rex.raw.{j}.jsonl', 'w') as f:
|
| 37 |
-
f.write('\n'.join([json.dumps(l) for l in i]))
|
|
|
|
| 7 |
import os
|
| 8 |
from glob import glob
|
| 9 |
from tqdm import tqdm
|
| 10 |
+
# from random import shuffle, seed
|
| 11 |
|
| 12 |
os.makedirs('data', exist_ok=True)
|
| 13 |
f_writer = open('data/t_rex.raw.jsonl', 'w')
|
|
|
|
| 25 |
f_writer.write(json.dumps(out) + "\n")
|
| 26 |
f_writer.close()
|
| 27 |
|
| 28 |
+
|
| 29 |
with open('data/t_rex.raw.jsonl') as f:
|
| 30 |
data = [json.loads(i) for i in f.read().split('\n') if len(i) > 0]
|
| 31 |
+
s = {i['subject'] for i in data}
|
| 32 |
+
o = {i['object'] for i in data}
|
| 33 |
+
s.update(o)
|
|
|
|
|
|
|
|
|
|
|
|
t_rex.py
CHANGED
|
@@ -5,7 +5,7 @@ import datasets
|
|
| 5 |
logger = datasets.logging.get_logger(__name__)
|
| 6 |
_DESCRIPTION = """T-Rex dataset."""
|
| 7 |
_NAME = "t_rex"
|
| 8 |
-
_VERSION = "0.0.
|
| 9 |
_CITATION = """
|
| 10 |
@inproceedings{elsahar2018t,
|
| 11 |
title={T-rex: A large scale alignment of natural language with knowledge base triples},
|
|
@@ -18,11 +18,11 @@ _CITATION = """
|
|
| 18 |
_HOME_PAGE = "https://github.com/asahi417/relbert"
|
| 19 |
_URL = f'https://huggingface.co/datasets/relbert/{_NAME}/resolve/main/data'
|
| 20 |
_TYPES = ["raw", "filter"]
|
| 21 |
-
_URLS = {i: {
|
| 22 |
str(datasets.Split.TRAIN): [f'{_URL}/t_rex.{i}.train.jsonl'],
|
| 23 |
str(datasets.Split.VALIDATION): [f'{_URL}/t_rex.{i}.validation.jsonl'],
|
| 24 |
-
str(datasets.Split.TEST): [f'{_URL}/t_rex.{i}.test.jsonl']
|
| 25 |
-
|
| 26 |
|
| 27 |
|
| 28 |
class TREXConfig(datasets.BuilderConfig):
|
|
@@ -46,8 +46,13 @@ class TREX(datasets.GeneratorBasedBuilder):
|
|
| 46 |
|
| 47 |
def _split_generators(self, dl_manager):
|
| 48 |
downloaded_file = dl_manager.download_and_extract(_URLS[self.config.name])
|
| 49 |
-
|
| 50 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 51 |
|
| 52 |
def _generate_examples(self, filepaths):
|
| 53 |
_key = 0
|
|
|
|
| 5 |
logger = datasets.logging.get_logger(__name__)
|
| 6 |
_DESCRIPTION = """T-Rex dataset."""
|
| 7 |
_NAME = "t_rex"
|
| 8 |
+
_VERSION = "0.0.3"
|
| 9 |
_CITATION = """
|
| 10 |
@inproceedings{elsahar2018t,
|
| 11 |
title={T-rex: A large scale alignment of natural language with knowledge base triples},
|
|
|
|
| 18 |
_HOME_PAGE = "https://github.com/asahi417/relbert"
|
| 19 |
_URL = f'https://huggingface.co/datasets/relbert/{_NAME}/resolve/main/data'
|
| 20 |
_TYPES = ["raw", "filter"]
|
| 21 |
+
_URLS = {i: {str(datasets.Split.TRAIN): [f'{_URL}/t_rex.{i}.jsonl']} if i in ['raw', 'filter'] else {
|
| 22 |
str(datasets.Split.TRAIN): [f'{_URL}/t_rex.{i}.train.jsonl'],
|
| 23 |
str(datasets.Split.VALIDATION): [f'{_URL}/t_rex.{i}.validation.jsonl'],
|
| 24 |
+
str(datasets.Split.TEST): [f'{_URL}/t_rex.{i}.test.jsonl']}
|
| 25 |
+
for i in _TYPES}
|
| 26 |
|
| 27 |
|
| 28 |
class TREXConfig(datasets.BuilderConfig):
|
|
|
|
| 46 |
|
| 47 |
def _split_generators(self, dl_manager):
|
| 48 |
downloaded_file = dl_manager.download_and_extract(_URLS[self.config.name])
|
| 49 |
+
if self.config.name in ['raw', 'filter']:
|
| 50 |
+
return [datasets.SplitGenerator(
|
| 51 |
+
name=datasets.Split.TRAIN,
|
| 52 |
+
gen_kwargs={"filepaths": downloaded_file[str(datasets.Split.TRAIN)]})]
|
| 53 |
+
else:
|
| 54 |
+
return [datasets.SplitGenerator(name=i, gen_kwargs={"filepaths": downloaded_file[str(i)]})
|
| 55 |
+
for i in [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]]
|
| 56 |
|
| 57 |
def _generate_examples(self, filepaths):
|
| 58 |
_key = 0
|