parquet-converter commited on
Commit
5fb27ca
·
1 Parent(s): bee4f71

Update parquet files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +0 -38
  2. README.md +0 -165
  3. cameras/train_xlarge.json.gz +0 -3
  4. cameras/valid_large.json.gz +0 -3
  5. cameras/valid_xlarge.json.gz +0 -3
  6. cameras/valid_medium.json.gz → cameras_large/products-2017-test.parquet +2 -2
  7. cameras_large/products-2017-train.parquet +3 -0
  8. cameras/train_medium.json.gz → cameras_large/products-2017-validation.parquet +2 -2
  9. cameras/train_small.json.gz → cameras_medium/products-2017-test.parquet +2 -2
  10. cameras/train_large.json.gz → cameras_medium/products-2017-train.parquet +2 -2
  11. cameras/test.json.gz → cameras_medium/products-2017-validation.parquet +2 -2
  12. cameras/valid_small.json.gz → cameras_small/products-2017-test.parquet +2 -2
  13. cameras_small/products-2017-train.parquet +3 -0
  14. cameras_small/products-2017-validation.parquet +3 -0
  15. cameras_xlarge/products-2017-test.parquet +3 -0
  16. cameras_xlarge/products-2017-train.parquet +3 -0
  17. cameras_xlarge/products-2017-validation.parquet +3 -0
  18. computers/test.json.gz +0 -3
  19. computers/train_large.json.gz +0 -3
  20. computers/train_medium.json.gz +0 -3
  21. computers/train_small.json.gz +0 -3
  22. computers/train_xlarge.json.gz +0 -3
  23. computers/valid_large.json.gz +0 -3
  24. computers/valid_medium.json.gz +0 -3
  25. computers/valid_small.json.gz +0 -3
  26. computers/valid_xlarge.json.gz +0 -3
  27. computers_large/products-2017-test.parquet +3 -0
  28. computers_large/products-2017-train.parquet +3 -0
  29. computers_large/products-2017-validation.parquet +3 -0
  30. computers_medium/products-2017-test.parquet +3 -0
  31. computers_medium/products-2017-train.parquet +3 -0
  32. computers_medium/products-2017-validation.parquet +3 -0
  33. computers_small/products-2017-test.parquet +3 -0
  34. computers_small/products-2017-train.parquet +3 -0
  35. computers_small/products-2017-validation.parquet +3 -0
  36. computers_xlarge/products-2017-test.parquet +3 -0
  37. computers_xlarge/products-2017-train.parquet +3 -0
  38. computers_xlarge/products-2017-validation.parquet +3 -0
  39. products-2017.py +0 -265
  40. shoes/test.json.gz +0 -3
  41. shoes/train_large.json.gz +0 -3
  42. shoes/train_medium.json.gz +0 -3
  43. shoes/train_small.json.gz +0 -3
  44. shoes/train_xlarge.json.gz +0 -3
  45. shoes/valid_large.json.gz +0 -3
  46. shoes/valid_medium.json.gz +0 -3
  47. shoes/valid_small.json.gz +0 -3
  48. shoes/valid_xlarge.json.gz +0 -3
  49. shoes_large/products-2017-test.parquet +3 -0
  50. shoes_large/products-2017-train.parquet +3 -0
.gitattributes DELETED
@@ -1,38 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ftz filter=lfs diff=lfs merge=lfs -text
6
- *.gz filter=lfs diff=lfs merge=lfs -text
7
- *.h5 filter=lfs diff=lfs merge=lfs -text
8
- *.joblib filter=lfs diff=lfs merge=lfs -text
9
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
- *.model filter=lfs diff=lfs merge=lfs -text
11
- *.msgpack filter=lfs diff=lfs merge=lfs -text
12
- *.onnx filter=lfs diff=lfs merge=lfs -text
13
- *.ot filter=lfs diff=lfs merge=lfs -text
14
- *.parquet filter=lfs diff=lfs merge=lfs -text
15
- *.pb filter=lfs diff=lfs merge=lfs -text
16
- *.pt filter=lfs diff=lfs merge=lfs -text
17
- *.pth filter=lfs diff=lfs merge=lfs -text
18
- *.rar filter=lfs diff=lfs merge=lfs -text
19
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
20
- *.tar.* filter=lfs diff=lfs merge=lfs -text
21
- *.tflite filter=lfs diff=lfs merge=lfs -text
22
- *.tgz filter=lfs diff=lfs merge=lfs -text
23
- *.wasm filter=lfs diff=lfs merge=lfs -text
24
- *.xz filter=lfs diff=lfs merge=lfs -text
25
- *.zip filter=lfs diff=lfs merge=lfs -text
26
- *.zstandard filter=lfs diff=lfs merge=lfs -text
27
- *tfevents* filter=lfs diff=lfs merge=lfs -text
28
- # Audio files - uncompressed
29
- *.pcm filter=lfs diff=lfs merge=lfs -text
30
- *.sam filter=lfs diff=lfs merge=lfs -text
31
- *.raw filter=lfs diff=lfs merge=lfs -text
32
- # Audio files - compressed
33
- *.aac filter=lfs diff=lfs merge=lfs -text
34
- *.flac filter=lfs diff=lfs merge=lfs -text
35
- *.mp3 filter=lfs diff=lfs merge=lfs -text
36
- *.ogg filter=lfs diff=lfs merge=lfs -text
37
- *.wav filter=lfs diff=lfs merge=lfs -text
38
- *.json.gz filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md DELETED
@@ -1,165 +0,0 @@
1
- ---
2
- annotations_creators:
3
- - weak supervision
4
- - expert-generated
5
- language:
6
- - en
7
- language_bcp47:
8
- - en-US
9
- license:
10
- - unknown
11
- multilinguality:
12
- - monolingual
13
- pretty_name: products-2017
14
- size_categories:
15
- - 1K<n<10K
16
- - 10K<n<100K
17
- source_datasets:
18
- - original
19
- task_categories:
20
- - text-classification
21
- - data-integration
22
- task_ids:
23
- - entity-matching
24
- - identity-resolution
25
- - product-matching
26
- paperswithcode_id: wdc-products
27
- ---
28
-
29
- # Dataset Card for [products-2017]
30
-
31
- ## Table of Contents
32
- - [Table of Contents](#table-of-contents)
33
- - [Dataset Description](#dataset-description)
34
- - [Dataset Summary](#dataset-summary)
35
- - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
36
- - [Languages](#languages)
37
- - [Dataset Structure](#dataset-structure)
38
- - [Data Instances](#data-instances)
39
- - [Data Fields](#data-fields)
40
- - [Data Splits](#data-splits)
41
- - [Dataset Creation](#dataset-creation)
42
- - [Annotations](#annotations)
43
- - [Additional Information](#additional-information)
44
- - [Citation Information](#citation-information)
45
-
46
- ## Dataset Description
47
-
48
- - **Homepage:** [LSPCv2 Homepage](http://webdatacommons.org/largescaleproductcorpus/v2/index.html)
49
- - **Point of Contact:** [Ralph Peeters](mailto:[email protected])
50
-
51
- ### Dataset Summary
52
-
53
- Many e-shops have started to mark-up product data within their HTML pages using the schema.org vocabulary. The Web Data Commons project regularly extracts such data from the Common Crawl, a large public web crawl. The Web Data Commons Training and Test Sets for Large-Scale Product Matching contain product offers from different e-shops in the form of binary product pairs (with corresponding label "match" or "no match")
54
-
55
- In order to support the evaluation of machine learning-based matching methods, the data is split into training, validation and test set. We provide training and validation sets in four different sizes for four product categories. The labels of the test sets were manually checked while those of the training sets were derived using shared product identifiers from the Web via weak supervision.
56
-
57
- The data stems from the WDC Product Data Corpus for Large-Scale Product Matching - Version 2.0 which consists of 26 million product offers originating from 79 thousand websites.
58
-
59
-
60
- ### Supported Tasks and Leaderboards
61
-
62
- Entity Matching, Product Matching
63
-
64
- ### Languages
65
-
66
- English
67
-
68
- ## Dataset Structure
69
-
70
- ### Data Instances
71
-
72
- The data is structured as pairs of product offers with the corresponding match/non-match label. This is an example instance from the computers category:
73
-
74
- ```
75
- {"pair_id":"581109#16637861","label":0,"id_left":581109,"category_left":"Computers_and_Accessories","cluster_id_left":1324529,"brand_left":"\"Gigabyte\"@en","title_left":" \"Gigabyte Radeon RX 480 G1 Gaming 4096MB GDDR5 PCI-Express Graphics Card\"@en \"Gigabyte Gr| OcUK\"@en","description_left":"\"GV-RX480G1 GAMING-4GD, Core Clock: 1202MHz, Boost Clock: 1290MHz, Memory: 4096MB 7000MHz GDDR5, Stream Processors: 2304, Crossfire Ready, VR Ready, FreeSync Ready, 3 Years Warranty\"@en ","price_left":null,"specTableContent_left":null,"id_right":16637861,"category_right":"Computers_and_Accessories","cluster_id_right":107415,"brand_right":"\"Gigabyte\"@en","title_right":" \"Gigabyte Radeon RX 550 Gaming OC 2048MB GDDR5 PCI-Express Graphics Card\"@en \"Gigabyte Gr| OcUK\"@en","description_right":"\"GV-RX550GAMING OC-2GD, Boost: 1219MHz, Memory: 2048MB 7000MHz GDDR5, Stream Processors: 512, DirectX 12 Support, 3 Years Warranty\"@en ","price_right":null,"specTableContent_right":null}
76
- ```
77
-
78
- ### Data Fields
79
-
80
- - pair_id: unique identifier of a pair (string)
81
- - label: binary label, match or non-match (int)
82
-
83
- The following attributes are contained twice, once for the first and once for the second product offer
84
-
85
- - id: unique id of the product offer (int)
86
- - category: product category (string)
87
- - cluster_id: id of the product cluster from the original corpus this offer belongs to (int)
88
- - brand: brand of the product (string)
89
- - title: product title (string)
90
- - description: longer product description (string)
91
- - price: price of the product offer (string)
92
- - specTableContent: additional data found in specification tables on the webpage that contains the product offer (string)
93
-
94
- ### Data Splits
95
- - Computers
96
- - Test set - 1100 pairs
97
- - Small Train set - 2267 pairs
98
- - Small Validation set - 567 pairs
99
- - Medium Train set - 6475 pairs
100
- - Medium Validation set - 1619 pairs
101
- - Large Train set - 26687 pairs
102
- - Large Validation set - 6672 pairs
103
- - XLarge Train set - 54768 pairs
104
- - Xlarge Validation set - 13693 pairs
105
-
106
- - Cameras
107
- - Test set - 1100 pairs
108
- - Small Train set - 1508 pairs
109
- - Small Validation set - 378 pairs
110
- - Medium Train set - 4204 pairs
111
- - Medium Validation set - 1051 pairs
112
- - Large Train set - 16028 pairs
113
- - Large Validation set - 4008 pairs
114
- - XLarge Train set - 33821 pairs
115
- - Xlarge Validation set - 8456 pairs
116
-
117
- - Watches
118
- - Test set - 1100 pairs
119
- - Small Train set - 1804 pairs
120
- - Small Validation set - 451 pairs
121
- - Medium Train set - 5130 pairs
122
- - Medium Validation set - 1283 pairs
123
- - Large Train set - 21621 pairs
124
- - Large Validation set - 5406 pairs
125
- - XLarge Train set - 49255 pairs
126
- - Xlarge Validation set - 12314 pairs
127
-
128
- - Shoes
129
- - Test set - 1100 pairs
130
- - Small Train set - 1650 pairs
131
- - Small Validation set - 413 pairs
132
- - Medium Train set - 4644 pairs
133
- - Medium Validation set - 1161 pairs
134
- - Large Train set - 18391 pairs
135
- - Large Validation set - 4598 pairs
136
- - XLarge Train set - 33943 pairs
137
- - Xlarge Validation set - 8486 pairs
138
-
139
-
140
- ## Dataset Creation
141
-
142
- ### Annotations
143
-
144
- #### Annotation process
145
-
146
- - Training and Validation sets: distant supervision via shared schema.org product IDs
147
- - Test sets: Single expert annotator
148
-
149
- #### Who are the annotators?
150
-
151
- [Ralph Peeters](https://www.uni-mannheim.de/dws/people/researchers/phd-students/ralph-peeters/)
152
-
153
- ## Additional Information
154
-
155
- ### Citation Information
156
-
157
- ```
158
- @inproceedings{primpeli2019wdc,
159
- title={The WDC training dataset and gold standard for large-scale product matching},
160
- author={Primpeli, Anna and Peeters, Ralph and Bizer, Christian},
161
- booktitle={Companion Proceedings of The 2019 World Wide Web Conference},
162
- pages={381--386},
163
- year={2019}
164
- }
165
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cameras/train_xlarge.json.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:c23f4bea284e030bcc6bf67b143a0252b9a88baa1868ae4b91647d1f51684072
3
- size 21291906
 
 
 
 
cameras/valid_large.json.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:84236bb649899400ee816fed8c1dda06c4bea78f02f8a1c08032fdef51613ba4
3
- size 2364699
 
 
 
 
cameras/valid_xlarge.json.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:b9c0527d4275b87e8ca2d13d93059407dfb78117b5941d0c9b349394dde33dfd
3
- size 5338288
 
 
 
 
cameras/valid_medium.json.gz → cameras_large/products-2017-test.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0bc9ffdbad348e1966bd624e3725bab54f1309452f075a7f65d973f35bed186f
3
- size 614504
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ddf71b26730e8d2c1323f48eb8512fac49268c29356dac0e01bb578543073617
3
+ size 642862
cameras_large/products-2017-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:723e46d97ccff227441b7565c4405537d9238ed6775e6152002861b08f3f3f85
3
+ size 11592844
cameras/train_medium.json.gz → cameras_large/products-2017-validation.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d0d597eb300adb2de5b860e9773bd81e23878e80ab612cf3ca9257b11ae76ce9
3
- size 2508508
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ab587b72041d751181602de75b90a9f73cd18657af307c034c64beff55e886e
3
+ size 2884679
cameras/train_small.json.gz → cameras_medium/products-2017-test.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:90eafd129c495d4120b3bdc4e0004d7e0b075669b299f434f89addf25468926f
3
- size 896323
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ddf71b26730e8d2c1323f48eb8512fac49268c29356dac0e01bb578543073617
3
+ size 642862
cameras/train_large.json.gz → cameras_medium/products-2017-train.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4a39dbf9f3c5e62a4ab330e5cabe2e3ffa8268765a9b56ba6e566bb5021e1077
3
- size 9702430
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9393acea54394822f85b50c1b1c2431e562e3f44e87089b2e30eecfc41803c24
3
+ size 3021807
cameras/test.json.gz → cameras_medium/products-2017-validation.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bce167650dbf6c993e848baa9e2759ec91f4e0147c7032cec938eb0f6c777f2c
3
- size 662236
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57b65624d931df181d2ebebd6df84edfdc82dd38c3b32955881eb3ae52cc872a
3
+ size 783533
cameras/valid_small.json.gz → cameras_small/products-2017-test.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a8ad16d21e69067b593a48a8fe55bc77f5e914b98520d531474d10c7877a9dec
3
- size 240242
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ddf71b26730e8d2c1323f48eb8512fac49268c29356dac0e01bb578543073617
3
+ size 642862
cameras_small/products-2017-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd599443d32aabe2be58a9cad4ac5e8992c623ba9a09684995ccf185c143a61b
3
+ size 1123946
cameras_small/products-2017-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c502d0cebef2c1b9b9abd4ed99bac8f6dfe222e033b436c551aa72697cd52579
3
+ size 341470
cameras_xlarge/products-2017-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ddf71b26730e8d2c1323f48eb8512fac49268c29356dac0e01bb578543073617
3
+ size 642862
cameras_xlarge/products-2017-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e838c03e46f5b8de2583e6777872fa78dba57fc0c7bf185dfb95a32cf137f037
3
+ size 25446199
cameras_xlarge/products-2017-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b6545e66b54a4f9d423f6d3dba64edcf246bddba322b3dfd95996cb10513859
3
+ size 6381864
computers/test.json.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:daeee50fc827d32838365da9963239917f621e367f8944ec9c90196ee529c46b
3
- size 440473
 
 
 
 
computers/train_large.json.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:27f32be2bc7ad18c9da715c81d899391f7eaf3d5b431ab261b00acc0a608a31a
3
- size 10749627
 
 
 
 
computers/train_medium.json.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:4b22ca10d19b3e0061bd53e22a4bed67595c981bfff38d27f231be36ed8649d8
3
- size 2562779
 
 
 
 
computers/train_small.json.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:013bd9c97b0b817d299e0a8ca3583692c8c0d12de0876590b16303bed12db0f2
3
- size 913653
 
 
 
 
computers/train_xlarge.json.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d19055be29b211d7c7efd08413e1180c91fa4f7180c4d01572af9479ac31609a
3
- size 21979464
 
 
 
 
computers/valid_large.json.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:524ad560d4865226149ad6c783bf9654ee1d8660c2f5aac827940b198d57d625
3
- size 2619716
 
 
 
 
computers/valid_medium.json.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8b11017fb8560165d7cc2c4884808ac95174847e8cd27b52cf59e711dfb1bfa1
3
- size 660571
 
 
 
 
computers/valid_small.json.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:01448826a98a9b4ccf63186caf021ab650fe870dcbfdc13f3de0e1df67c81367
3
- size 211906
 
 
 
 
computers/valid_xlarge.json.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ce4789d79d228fd3cbe6c4a8de4027119c7c7c9332ceb73d014cd6b79c61ed96
3
- size 5481832
 
 
 
 
computers_large/products-2017-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0a90ad5ed298fbf92643f1be4b645fe43f39ef95f566eaf80e9af034037fcbc
3
+ size 488789
computers_large/products-2017-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13b357381cd384eb6984460f8043058af845761aed67c3b3ad9838544742a073
3
+ size 13155061
computers_large/products-2017-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3baa7000134861105ad9fe574665716dae4c9d25066e4aad1d08de14c120e188
3
+ size 3204605
computers_medium/products-2017-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0a90ad5ed298fbf92643f1be4b645fe43f39ef95f566eaf80e9af034037fcbc
3
+ size 488789
computers_medium/products-2017-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:209491ee01540503df0c8b8942331a6427c68337fb85cd567c3223e7fa8c0141
3
+ size 3156703
computers_medium/products-2017-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4b830b5f3832be5da2788b81fe4f6bb4a7b27c7f264e08df790a83cfe50c90f
3
+ size 832716
computers_small/products-2017-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0a90ad5ed298fbf92643f1be4b645fe43f39ef95f566eaf80e9af034037fcbc
3
+ size 488789
computers_small/products-2017-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a90948f593e00a6eb5fcf17ce25c9a63f86548c70c5fd7cfb649043bafab41e9
3
+ size 1172195
computers_small/products-2017-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5d89c5bcba28d36b5a3159d38fe9719f3a104ddaae458246e17e772eaa5f48b
3
+ size 297206
computers_xlarge/products-2017-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0a90ad5ed298fbf92643f1be4b645fe43f39ef95f566eaf80e9af034037fcbc
3
+ size 488789
computers_xlarge/products-2017-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:859dbb86031678be7b93cfa2fa1e668340036adb749b1a3ee57ef5042356731c
3
+ size 26587868
computers_xlarge/products-2017-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a57795a24cbf69b6f600db4dab79a1734558c438b825789ee5de9db9e13cb94
3
+ size 6661874
products-2017.py DELETED
@@ -1,265 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- """The WDC Product Data Corpus and Gold Standard for Large-Scale Product Matching - Version 2.0."""
15
-
16
- import json
17
-
18
- import datasets
19
-
20
- _CITATION = """\
21
- @inproceedings{primpeli2019wdc,
22
- title={The WDC training dataset and gold standard for large-scale product matching},
23
- author={Primpeli, Anna and Peeters, Ralph and Bizer, Christian},
24
- booktitle={Companion Proceedings of The 2019 World Wide Web Conference},
25
- pages={381--386},
26
- year={2019}
27
- }
28
- """
29
-
30
- _DESCRIPTION = """\
31
- Many e-shops have started to mark-up product data within their HTML pages using the schema.org vocabulary. The Web Data Commons project regularly extracts such data from the Common Crawl, a large public web crawl. The Web Data Commons Training and Test Sets for Large-Scale Product Matching contain product offers from different e-shops in the form of binary product pairs (with corresponding label "match" or "no match")
32
-
33
- In order to support the evaluation of machine learning-based matching methods, the data is split into training, validation and test set. We provide training and validation sets in four different sizes for four product categories. The labels of the test sets were manually checked while those of the training sets were derived using shared product identifiers from the Web via weak supervision.
34
-
35
- The data stems from the WDC Product Data Corpus for Large-Scale Product Matching - Version 2.0 which consists of 26 million product offers originating from 79 thousand websites.
36
- """
37
-
38
- _HOMEPAGE = "http://webdatacommons.org/largescaleproductcorpus/v2/index.html"
39
-
40
- _LICENSE = ""
41
-
42
- _BASE_DATA_PAT_FORMAT_STR = "{category}/"
43
-
44
- class Products2017Config(datasets.BuilderConfig):
45
- """The WDC Product Data Corpus and Gold Standard for Large-Scale Product Matching - Version 2.0."""
46
-
47
- def __init__(self, name, category: str, **kwargs):
48
- """BuilderConfig for WDC Products 2017.
49
- Args:
50
- category (str): The product category and training set size.
51
- """
52
-
53
- size = name.split('_')[1]
54
- # Initialize the base class.
55
-
56
- super(Products2017Config, self).__init__(
57
- name=name, **kwargs
58
- )
59
-
60
- # Additional attributes
61
- self.category = category
62
- self.size = size
63
- self.base_data_path = _BASE_DATA_PAT_FORMAT_STR.format(
64
- category=category
65
- )
66
-
67
- class Products2017(datasets.GeneratorBasedBuilder):
68
- """The WDC Product Data Corpus and Gold Standard for Large-Scale Product Matching - Version 2.0."""
69
-
70
- VERSION = datasets.Version("2.1.0")
71
-
72
- BUILDER_CONFIG_CLASS = Products2017Config
73
-
74
- BUILDER_CONFIGS = [
75
- Products2017Config(
76
- name='computers_xlarge',
77
- category='computers',
78
- version=VERSION,
79
- description="The computers xlarge dataset part of Products-2017"),
80
- Products2017Config(
81
- name='computers_large',
82
- category='computers',
83
- version=VERSION,
84
- description="The computers large dataset part of Products-2017"),
85
- Products2017Config(
86
- name='computers_medium',
87
- category='computers',
88
- version=VERSION,
89
- description="The computers medium dataset part of Products-2017"),
90
- Products2017Config(
91
- name='computers_small',
92
- category='computers',
93
- version=VERSION,
94
- description="The computers small dataset part of Products-2017"),
95
- Products2017Config(
96
- name='cameras_xlarge',
97
- category='cameras',
98
- version=VERSION,
99
- description="The cameras xlarge dataset part of Products-2017"),
100
- Products2017Config(
101
- name='cameras_large',
102
- category='cameras',
103
- version=VERSION,
104
- description="The cameras large dataset part of Products-2017"),
105
- Products2017Config(
106
- name='cameras_medium',
107
- category='cameras',
108
- version=VERSION,
109
- description="The cameras medium dataset part of Products-2017"),
110
- Products2017Config(
111
- name='cameras_small',
112
- category='cameras',
113
- version=VERSION,
114
- description="The cameras small dataset part of Products-2017"),
115
- Products2017Config(
116
- name='watches_xlarge',
117
- category='watches',
118
- version=VERSION,
119
- description="The watches xlarge dataset part of Products-2017"),
120
- Products2017Config(
121
- name='watches_large',
122
- category='watches',
123
- version=VERSION,
124
- description="The watches large dataset part of Products-2017"),
125
- Products2017Config(
126
- name='watches_medium',
127
- category='watches',
128
- version=VERSION,
129
- description="The watches medium dataset part of Products-2017"),
130
- Products2017Config(
131
- name='watches_small',
132
- category='watches',
133
- version=VERSION,
134
- description="The watches small dataset part of Products-2017"),
135
- Products2017Config(
136
- name='shoes_xlarge',
137
- category='shoes',
138
- version=VERSION,
139
- description="The shoes xlarge dataset part of Products-2017"),
140
- Products2017Config(
141
- name='shoes_large',
142
- category='shoes',
143
- version=VERSION,
144
- description="The shoes large dataset part of Products-2017"),
145
- Products2017Config(
146
- name='shoes_medium',
147
- category='shoes',
148
- version=VERSION,
149
- description="The shoes medium dataset part of Products-2017"),
150
- Products2017Config(
151
- name='shoes_small',
152
- category='shoes',
153
- version=VERSION,
154
- description="The shoes small dataset part of Products-2017"),
155
-
156
- ]
157
-
158
- DEFAULT_CONFIG_NAME = "computers_medium" # It's not mandatory to have a default configuration. Just use one if it make sense.
159
-
160
- def _info(self):
161
-
162
- features = datasets.Features(
163
- {
164
- "pair_id": datasets.Value("string"),
165
- "label": datasets.Value("int32"),
166
- "id_left": datasets.Value("int32"),
167
- "category_left": datasets.Value("string"),
168
- "cluster_id_left": datasets.Value("int32"),
169
- "brand_left": datasets.Value("string"),
170
- "title_left": datasets.Value("string"),
171
- "description_left": datasets.Value("string"),
172
- "price_left": datasets.Value("string"),
173
- "specTableContent_left": datasets.Value("string"),
174
- "id_right": datasets.Value("int32"),
175
- "category_right": datasets.Value("string"),
176
- "cluster_id_right": datasets.Value("int32"),
177
- "brand_right": datasets.Value("string"),
178
- "title_right": datasets.Value("string"),
179
- "description_right": datasets.Value("string"),
180
- "price_right": datasets.Value("string"),
181
- "specTableContent_right": datasets.Value("string"),
182
- }
183
- )
184
-
185
- return datasets.DatasetInfo(
186
- # This is the description that will appear on the datasets page.
187
- description=_DESCRIPTION,
188
- # This defines the different columns of the dataset and their types
189
- features=features, # Here we define them above because they are different between the two configurations
190
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
191
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
192
- supervised_keys=None,
193
- # Homepage of the dataset for documentation
194
- homepage=_HOMEPAGE,
195
- # License for the dataset if available
196
- license=_LICENSE,
197
- # Citation for the dataset
198
- citation=_CITATION,
199
- )
200
-
201
- def _split_generators(self, dl_manager):
202
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
203
-
204
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
205
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
206
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
207
- main_path = self.config.base_data_path
208
- size = self.config.size
209
- relevant_files = [f'{main_path}train_{size}.json.gz', f'{main_path}valid_{size}.json.gz', f'{main_path}test.json.gz']
210
-
211
- data_dir = dl_manager.download_and_extract(relevant_files)
212
-
213
- return [
214
- datasets.SplitGenerator(
215
- name=datasets.Split.TRAIN,
216
- # These kwargs will be passed to _generate_examples
217
- gen_kwargs={
218
- "filepath": data_dir[0],
219
- "split": "train",
220
- },
221
- ),
222
- datasets.SplitGenerator(
223
- name=datasets.Split.TEST,
224
- # These kwargs will be passed to _generate_examples
225
- gen_kwargs={
226
- "filepath": data_dir[2],
227
- "split": "test"
228
- },
229
- ),
230
- datasets.SplitGenerator(
231
- name=datasets.Split.VALIDATION,
232
- # These kwargs will be passed to _generate_examples
233
- gen_kwargs={
234
- "filepath": data_dir[1],
235
- "split": "validation",
236
- },
237
- ),
238
- ]
239
-
240
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
241
- def _generate_examples(self, filepath, split):
242
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
243
- with open(filepath, encoding="utf-8") as f:
244
- for key, row in enumerate(f):
245
- data = json.loads(row)
246
- yield key, {
247
- "pair_id": data["pair_id"],
248
- "label": data["label"],
249
- "id_left": data["id_left"],
250
- "category_left": data["category_left"],
251
- "cluster_id_left": data["cluster_id_left"],
252
- "brand_left": data["brand_left"],
253
- "title_left": data["title_left"],
254
- "description_left": data["description_left"],
255
- "price_left": data["price_left"],
256
- "specTableContent_left": data["specTableContent_left"],
257
- "id_right": data["id_right"],
258
- "category_right": data["category_right"],
259
- "cluster_id_right": data["cluster_id_right"],
260
- "brand_right": data["brand_right"],
261
- "title_right": data["title_right"],
262
- "description_right": data["description_right"],
263
- "price_right": data["price_right"],
264
- "specTableContent_right": data["specTableContent_right"]
265
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
shoes/test.json.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:cb5fb369bea226e8abc6d0fefcbbb74f4d4fb262774c6a9ecc3686d42c640683
3
- size 470891
 
 
 
 
shoes/train_large.json.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:fdf8c3b0f9516e7d7a24c41f6ed8fe8926b04f42f36969d148d68912acac5472
3
- size 8745243
 
 
 
 
shoes/train_medium.json.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:17d36cca4b6bbe6e25494245b14ddbe9323b9d4858e615ae1fa5670e02fb1c39
3
- size 2123481
 
 
 
 
shoes/train_small.json.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:cd26d5e1bae5efbac8134eb784a0b2c7d2b6691195d5b7b7eeb304683e6c557d
3
- size 757540
 
 
 
 
shoes/train_xlarge.json.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:1718653a3b4a8dcd47b5c1f55d7fae4e3fcf3c04af13e4446cf49a4b48f7f504
3
- size 16435876
 
 
 
 
shoes/valid_large.json.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:03ce9ab66ccc8569917c1ba56f627cca43780746d26ed5fd2afba9f24f155ced
3
- size 2160668
 
 
 
 
shoes/valid_medium.json.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:3b1f33af9314ea3826265af0cc4e26290da6ab8f194fd8f81d8764ae84fd9bc3
3
- size 545330
 
 
 
 
shoes/valid_small.json.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8d76d3c97888e892abbce436cc14334c862a84adc8929fd073089229c6f214b0
3
- size 194196
 
 
 
 
shoes/valid_xlarge.json.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:461e37861da72d786ce11da3b78033fe0785969bf41df0b49ef27d6e7e5f0f27
3
- size 4181524
 
 
 
 
shoes_large/products-2017-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3e12ff4e0c8343897c540d9a1d9dd40d6446d7550a083e46bf64f5d91c1c505
3
+ size 458618
shoes_large/products-2017-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92751d53b6f90e46a4603b7d3e9a40379620d39ebcb00c3aa63c00b30ba374c6
3
+ size 9942025