init
Browse files- .gitattributes +3 -0
- data/t_rex.filter.test.jsonl +3 -0
- data/t_rex.filter.train.jsonl +3 -0
- data/t_rex.filter.validation.jsonl +3 -0
- filtering.py +23 -0
- t_rex.py +2 -2
.gitattributes
CHANGED
@@ -57,3 +57,6 @@ data/t_rex.raw.train.jsonl filter=lfs diff=lfs merge=lfs -text
|
|
57 |
data/t_rex.raw.validation.jsonl filter=lfs diff=lfs merge=lfs -text
|
58 |
data/t_rex.raw.jsonl filter=lfs diff=lfs merge=lfs -text
|
59 |
data/t_rex.raw.test.jsonl filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
57 |
data/t_rex.raw.validation.jsonl filter=lfs diff=lfs merge=lfs -text
|
58 |
data/t_rex.raw.jsonl filter=lfs diff=lfs merge=lfs -text
|
59 |
data/t_rex.raw.test.jsonl filter=lfs diff=lfs merge=lfs -text
|
60 |
+
data/t_rex.filter.validation.jsonl filter=lfs diff=lfs merge=lfs -text
|
61 |
+
data/t_rex.filter.test.jsonl filter=lfs diff=lfs merge=lfs -text
|
62 |
+
data/t_rex.filter.train.jsonl filter=lfs diff=lfs merge=lfs -text
|
data/t_rex.filter.test.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a04e08eaf8b045a1bdfa357b876fd2792e122f68d4b2f31bcb3c901ad017f229
|
3 |
+
size 101090726
|
data/t_rex.filter.train.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:374202ec40472f65e081ac96afee2fe946cc87b7fe50b61dc121089f6c23f8ae
|
3 |
+
size 471939364
|
data/t_rex.filter.validation.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b0e363242c1ae0ee05ff76868740b2e8859cc3c45866b98cc4493b869e765236
|
3 |
+
size 101165971
|
filtering.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
|
3 |
+
stopwords = ["he", "she", "they", "it"]
|
4 |
+
|
5 |
+
|
6 |
+
def filtering(entry):
|
7 |
+
if entry['object'].lower() in stopwords or entry['subject'].lower() in stopwords:
|
8 |
+
return False
|
9 |
+
if entry['object'].islower() and entry['subject'].islower():
|
10 |
+
return False
|
11 |
+
return True
|
12 |
+
|
13 |
+
|
14 |
+
full_data = {}
|
15 |
+
for s in ['train', 'validation', 'test']:
|
16 |
+
with open(f"data/t_rex.raw.{s}.jsonl") as f:
|
17 |
+
data = [json.loads(i) for i in f.read().split('\n') if len(i) > 0]
|
18 |
+
data = [i for i in data if filtering(i)]
|
19 |
+
full_data[s] = data
|
20 |
+
with open(f"data/t_rex.filter.{s}.jsonl", 'w') as f:
|
21 |
+
f.write('\n'.join([json.dumps(i) for i in data]))
|
22 |
+
|
23 |
+
# full_data[s] = [i for i in data if filtering(i)]
|
t_rex.py
CHANGED
@@ -5,7 +5,7 @@ import datasets
|
|
5 |
logger = datasets.logging.get_logger(__name__)
|
6 |
_DESCRIPTION = """T-Rex dataset."""
|
7 |
_NAME = "t_rex"
|
8 |
-
_VERSION = "0.0.
|
9 |
_CITATION = """
|
10 |
@inproceedings{elsahar2018t,
|
11 |
title={T-rex: A large scale alignment of natural language with knowledge base triples},
|
@@ -17,7 +17,7 @@ _CITATION = """
|
|
17 |
|
18 |
_HOME_PAGE = "https://github.com/asahi417/relbert"
|
19 |
_URL = f'https://huggingface.co/datasets/relbert/{_NAME}/resolve/main/data'
|
20 |
-
_TYPES = ["raw"]
|
21 |
_URLS = {i: {
|
22 |
str(datasets.Split.TRAIN): [f'{_URL}/t_rex.{i}.train.jsonl'],
|
23 |
str(datasets.Split.VALIDATION): [f'{_URL}/t_rex.{i}.validation.jsonl'],
|
|
|
5 |
logger = datasets.logging.get_logger(__name__)
|
6 |
_DESCRIPTION = """T-Rex dataset."""
|
7 |
_NAME = "t_rex"
|
8 |
+
_VERSION = "0.0.2"
|
9 |
_CITATION = """
|
10 |
@inproceedings{elsahar2018t,
|
11 |
title={T-rex: A large scale alignment of natural language with knowledge base triples},
|
|
|
17 |
|
18 |
_HOME_PAGE = "https://github.com/asahi417/relbert"
|
19 |
_URL = f'https://huggingface.co/datasets/relbert/{_NAME}/resolve/main/data'
|
20 |
+
_TYPES = ["raw", "filter"]
|
21 |
_URLS = {i: {
|
22 |
str(datasets.Split.TRAIN): [f'{_URL}/t_rex.{i}.train.jsonl'],
|
23 |
str(datasets.Split.VALIDATION): [f'{_URL}/t_rex.{i}.validation.jsonl'],
|