url
stringlengths
58
61
repository_url
stringclasses
1 value
labels_url
stringlengths
72
75
comments_url
stringlengths
67
70
events_url
stringlengths
65
68
html_url
stringlengths
46
51
id
int64
599M
2.12B
node_id
stringlengths
18
32
number
int64
1
6.65k
title
stringlengths
1
290
user
dict
labels
listlengths
0
4
state
stringclasses
2 values
locked
bool
1 class
assignee
dict
assignees
listlengths
0
4
milestone
dict
comments
int64
0
70
created_at
unknown
updated_at
unknown
closed_at
unknown
author_association
stringclasses
3 values
active_lock_reason
float64
draft
float64
0
1
pull_request
dict
body
stringlengths
0
228k
reactions
dict
timeline_url
stringlengths
67
70
performed_via_github_app
float64
state_reason
stringclasses
3 values
is_pull_request
bool
2 classes
https://api.github.com/repos/huggingface/datasets/issues/3063
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3063/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3063/comments
https://api.github.com/repos/huggingface/datasets/issues/3063/events
https://github.com/huggingface/datasets/issues/3063
1,023,588,297
I_kwDODunzps49ArfJ
3,063
Windows CI is unable to test streaming properly because of SSL issues
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[ { "color": "fef2c0", "default": false, "description": "", "id": 3287858981, "name": "streaming", "node_id": "MDU6TGFiZWwzMjg3ODU4OTgx", "url": "https://api.github.com/repos/huggingface/datasets/labels/streaming" } ]
closed
false
null
[]
null
2
"2021-10-12T09:33:40Z"
"2022-08-24T14:59:29Z"
"2022-08-24T14:59:29Z"
MEMBER
null
null
null
In https://github.com/huggingface/datasets/pull/3041 the windows tests were skipped because of SSL issues with moon-staging.huggingface.co:443 The issue appears only on windows with asyncio. On Linux it works. With requests it works as well. And with the production environment huggingface.co it also works. to reproduce on windows: ```python import fsspec # use any URL to a file in a dataset repo url = "https://moon-staging.huggingface.co/datasets/__DUMMY_TRANSFORMERS_USER__/my-dataset-16242824690709/resolve/main/.gitattributes" fsspec.open(url).open() ``` raises ```python FileNotFoundError: https://moon-staging.huggingface.co/datasets/__DUMMY_TRANSFORMERS_USER__/my-dataset-16242824690709/resolve/main/.gitattributes ``` because of ```python aiohttp.client_exceptions.ClientConnectorCertificateError: Cannot connect to host moon-staging.huggingface.co:443 ssl:True [SSLCertVerificationError: (1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: certificate has expired (_ssl.c:1131)')] ```
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3063/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3063/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/3062
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3062/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3062/comments
https://api.github.com/repos/huggingface/datasets/issues/3062/events
https://github.com/huggingface/datasets/pull/3062
1,023,209,592
PR_kwDODunzps4tCxfK
3,062
Update summary on PyPi beyond NLP
{ "avatar_url": "https://avatars.githubusercontent.com/u/7353373?v=4", "events_url": "https://api.github.com/users/thomwolf/events{/privacy}", "followers_url": "https://api.github.com/users/thomwolf/followers", "following_url": "https://api.github.com/users/thomwolf/following{/other_user}", "gists_url": "https://api.github.com/users/thomwolf/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/thomwolf", "id": 7353373, "login": "thomwolf", "node_id": "MDQ6VXNlcjczNTMzNzM=", "organizations_url": "https://api.github.com/users/thomwolf/orgs", "received_events_url": "https://api.github.com/users/thomwolf/received_events", "repos_url": "https://api.github.com/users/thomwolf/repos", "site_admin": false, "starred_url": "https://api.github.com/users/thomwolf/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/thomwolf/subscriptions", "type": "User", "url": "https://api.github.com/users/thomwolf" }
[]
closed
false
null
[]
null
0
"2021-10-11T23:27:46Z"
"2021-10-13T08:55:54Z"
"2021-10-13T08:55:54Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3062.diff", "html_url": "https://github.com/huggingface/datasets/pull/3062", "merged_at": "2021-10-13T08:55:53Z", "patch_url": "https://github.com/huggingface/datasets/pull/3062.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3062" }
More than just NLP now
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/3062/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3062/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3061
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3061/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3061/comments
https://api.github.com/repos/huggingface/datasets/issues/3061/events
https://github.com/huggingface/datasets/issues/3061
1,023,103,119
I_kwDODunzps48-1CP
3,061
Feature request : add leave=True to dataset.map to enable tqdm nested bars (and whilst we're at it couldn't we get a way to access directly tqdm underneath?)
{ "avatar_url": "https://avatars.githubusercontent.com/u/69694610?v=4", "events_url": "https://api.github.com/users/BenoitDalFerro/events{/privacy}", "followers_url": "https://api.github.com/users/BenoitDalFerro/followers", "following_url": "https://api.github.com/users/BenoitDalFerro/following{/other_user}", "gists_url": "https://api.github.com/users/BenoitDalFerro/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/BenoitDalFerro", "id": 69694610, "login": "BenoitDalFerro", "node_id": "MDQ6VXNlcjY5Njk0NjEw", "organizations_url": "https://api.github.com/users/BenoitDalFerro/orgs", "received_events_url": "https://api.github.com/users/BenoitDalFerro/received_events", "repos_url": "https://api.github.com/users/BenoitDalFerro/repos", "site_admin": false, "starred_url": "https://api.github.com/users/BenoitDalFerro/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BenoitDalFerro/subscriptions", "type": "User", "url": "https://api.github.com/users/BenoitDalFerro" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
null
[]
null
2
"2021-10-11T20:49:49Z"
"2021-10-22T09:34:10Z"
null
NONE
null
null
null
**A clear and concise description of what you want to happen.** It would be so nice to be able to nest HuggingFace `Datasets.map() ` progress bars in the grander scheme of things and whilst we're at it why not other functions. **Describe alternatives you've considered** By the way is there not a way to directly interact with underlying tqdm module ? **kwargs-ish? **Additional context** Furthering tqdm integration #2374 and huggingface/transformers#11797 solutioned by huggingface/transformers#12226 provided with tqdm description as `desc=` @sgugger @bhavitvyamalik
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3061/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3061/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/3060
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3060/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3060/comments
https://api.github.com/repos/huggingface/datasets/issues/3060/events
https://github.com/huggingface/datasets/issues/3060
1,022,936,396
I_kwDODunzps48-MVM
3,060
load_dataset('openwebtext') yields "Compressed file ended before the end-of-stream marker was reached"
{ "avatar_url": "https://avatars.githubusercontent.com/u/8942987?v=4", "events_url": "https://api.github.com/users/RylanSchaeffer/events{/privacy}", "followers_url": "https://api.github.com/users/RylanSchaeffer/followers", "following_url": "https://api.github.com/users/RylanSchaeffer/following{/other_user}", "gists_url": "https://api.github.com/users/RylanSchaeffer/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/RylanSchaeffer", "id": 8942987, "login": "RylanSchaeffer", "node_id": "MDQ6VXNlcjg5NDI5ODc=", "organizations_url": "https://api.github.com/users/RylanSchaeffer/orgs", "received_events_url": "https://api.github.com/users/RylanSchaeffer/received_events", "repos_url": "https://api.github.com/users/RylanSchaeffer/repos", "site_admin": false, "starred_url": "https://api.github.com/users/RylanSchaeffer/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/RylanSchaeffer/subscriptions", "type": "User", "url": "https://api.github.com/users/RylanSchaeffer" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" } ]
null
2
"2021-10-11T17:05:27Z"
"2021-10-28T05:52:21Z"
"2021-10-28T05:52:21Z"
NONE
null
null
null
## Describe the bug When I try `load_dataset('openwebtext')`, I receive a "EOFError: Compressed file ended before the end-of-stream marker was reached" error. ## Steps to reproduce the bug ``` from datasets import load_dataset dataset = load_dataset('openwebtext') ``` ## Expected results I expect the `dataset` variable to be properly constructed. ## Actual results ``` File "/home/rschaef/CoCoSci-Language-Distillation/distillation_v2/ratchet_learning/tasks/base.py", line 37, in create_dataset dataset_str, File "/home/rschaef/CoCoSci-Language-Distillation/cocosci/lib/python3.6/site-packages/datasets/load.py", line 1117, in load_dataset use_auth_token=use_auth_token, File "/home/rschaef/CoCoSci-Language-Distillation/cocosci/lib/python3.6/site-packages/datasets/builder.py", line 637, in download_and_prepare dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs File "/home/rschaef/CoCoSci-Language-Distillation/cocosci/lib/python3.6/site-packages/datasets/builder.py", line 704, in _download_and_prepare split_generators = self._split_generators(dl_manager, **split_generators_kwargs) File "/home/rschaef/.cache/huggingface/modules/datasets_modules/datasets/openwebtext/85b3ae7051d2d72e7c5fdf6dfb462603aaa26e9ed506202bf3a24d261c6c40a1/openwebtext.py", line 61, in _split_generators dl_dir = dl_manager.download_and_extract(_URL) File "/home/rschaef/CoCoSci-Language-Distillation/cocosci/lib/python3.6/site-packages/datasets/utils/download_manager.py", line 284, in download_and_extract return self.extract(self.download(url_or_urls)) File "/home/rschaef/CoCoSci-Language-Distillation/cocosci/lib/python3.6/site-packages/datasets/utils/download_manager.py", line 261, in extract partial(cached_path, download_config=download_config), path_or_paths, num_proc=num_proc, disable_tqdm=False File "/home/rschaef/CoCoSci-Language-Distillation/cocosci/lib/python3.6/site-packages/datasets/utils/py_utils.py", line 197, in map_nested return function(data_struct) File "/home/rschaef/CoCoSci-Language-Distillation/cocosci/lib/python3.6/site-packages/datasets/utils/file_utils.py", line 316, in cached_path output_path, force_extract=download_config.force_extract File "/home/rschaef/CoCoSci-Language-Distillation/cocosci/lib/python3.6/site-packages/datasets/utils/extract.py", line 40, in extract self.extractor.extract(input_path, output_path, extractor=extractor) File "/home/rschaef/CoCoSci-Language-Distillation/cocosci/lib/python3.6/site-packages/datasets/utils/extract.py", line 179, in extract return extractor.extract(input_path, output_path) File "/home/rschaef/CoCoSci-Language-Distillation/cocosci/lib/python3.6/site-packages/datasets/utils/extract.py", line 53, in extract tar_file.extractall(output_path) File "/usr/lib/python3.6/tarfile.py", line 2010, in extractall numeric_owner=numeric_owner) File "/usr/lib/python3.6/tarfile.py", line 2052, in extract numeric_owner=numeric_owner) File "/usr/lib/python3.6/tarfile.py", line 2122, in _extract_member self.makefile(tarinfo, targetpath) File "/usr/lib/python3.6/tarfile.py", line 2171, in makefile copyfileobj(source, target, tarinfo.size, ReadError, bufsize) File "/usr/lib/python3.6/tarfile.py", line 249, in copyfileobj buf = src.read(bufsize) File "/usr/lib/python3.6/lzma.py", line 200, in read return self._buffer.read(size) File "/usr/lib/python3.6/_compression.py", line 68, in readinto data = self.read(len(byte_view)) File "/usr/lib/python3.6/_compression.py", line 99, in read raise EOFError("Compressed file ended before the " python-BaseException EOFError: Compressed file ended before the end-of-stream marker was reached ``` ## Environment info - `datasets` version: 1.12.1 - Platform: Linux-4.4.0-173-generic-x86_64-with-Ubuntu-16.04-xenial - Python version: 3.6.10 - PyArrow version: 5.0.0
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3060/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3060/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/3059
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3059/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3059/comments
https://api.github.com/repos/huggingface/datasets/issues/3059/events
https://github.com/huggingface/datasets/pull/3059
1,022,620,057
PR_kwDODunzps4tA54w
3,059
Fix task reloading from cache
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
0
"2021-10-11T12:03:04Z"
"2021-10-11T12:23:39Z"
"2021-10-11T12:23:39Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3059.diff", "html_url": "https://github.com/huggingface/datasets/pull/3059", "merged_at": "2021-10-11T12:23:38Z", "patch_url": "https://github.com/huggingface/datasets/pull/3059.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3059" }
When reloading a dataset from the cache when doing `map`, the tasks templates were kept instead of being updated regarding the output of the `map` function. This is an issue because we drop the tasks templates that are not compatible anymore after `map`, for example if a column of the template was removed. This PR fixes this and for convenience introduces a decorator `@transmit_tasks` that takes care of doing this verification, similar to the `@transmit_format` decorator. This should fix issue https://github.com/huggingface/datasets/issues/3047 cc @sgugger
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 2, "laugh": 0, "rocket": 0, "total_count": 2, "url": "https://api.github.com/repos/huggingface/datasets/issues/3059/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3059/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3058
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3058/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3058/comments
https://api.github.com/repos/huggingface/datasets/issues/3058/events
https://github.com/huggingface/datasets/issues/3058
1,022,612,664
I_kwDODunzps4889S4
3,058
Dataset wikipedia and Bookcorpusopen cannot be fetched from dataloader.
{ "avatar_url": "https://avatars.githubusercontent.com/u/35392624?v=4", "events_url": "https://api.github.com/users/hobbitlzy/events{/privacy}", "followers_url": "https://api.github.com/users/hobbitlzy/followers", "following_url": "https://api.github.com/users/hobbitlzy/following{/other_user}", "gists_url": "https://api.github.com/users/hobbitlzy/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/hobbitlzy", "id": 35392624, "login": "hobbitlzy", "node_id": "MDQ6VXNlcjM1MzkyNjI0", "organizations_url": "https://api.github.com/users/hobbitlzy/orgs", "received_events_url": "https://api.github.com/users/hobbitlzy/received_events", "repos_url": "https://api.github.com/users/hobbitlzy/repos", "site_admin": false, "starred_url": "https://api.github.com/users/hobbitlzy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/hobbitlzy/subscriptions", "type": "User", "url": "https://api.github.com/users/hobbitlzy" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
null
2
"2021-10-11T11:54:59Z"
"2022-01-19T14:03:49Z"
"2022-01-19T14:03:49Z"
NONE
null
null
null
## Describe the bug I have used the previous version of `transformers` and `datasets`. The dataset `wikipedia` can be successfully used. Recently, I upgrade them to the newest version and find it raises errors. I also tried other datasets. The `wikitext` works and the `bookcorpusopen` raises the same errors as `wikipedia`. ## Steps to reproduce the bug Run the `run_mlm_no_trainer.py` and the given script on this [link](https://github.com/huggingface/transformers/tree/master/examples/pytorch/language-modeling). Change the dataset from wikitext to wikipedia or bookcorpusopen. BTW, the library transformers is of version 4.11.3. ## Expected results The data batchs are fetched from the data loader and train. ## Actual results The first time to fetch data batch occurs error. `Traceback (most recent call last): File "/home/zyli/anaconda3/envs/LatestStacking/lib/python3.7/site-packages/transformers/tokenization_utils_base.py", line 705, in convert_to_tensors tensor = as_tensor(value) ValueError: too many dimensions 'str' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "src/original_run_mlm_no_trainer.py", line 528, in <module> main() File "src/original_run_mlm_no_trainer.py", line 488, in main for step, batch in enumerate(train_dataloader): File "/home/zyli/anaconda3/envs/LatestStacking/lib/python3.7/site-packages/accelerate/data_loader.py", line 303, in __iter__ for batch in super().__iter__(): File "/home/zyli/anaconda3/envs/LatestStacking/lib/python3.7/site-packages/torch/utils/data/dataloader.py", line 517, in __next__ data = self._next_data() File "/home/zyli/anaconda3/envs/LatestStacking/lib/python3.7/site-packages/torch/utils/data/dataloader.py", line 557, in _next_data data = self._dataset_fetcher.fetch(index) # may raise StopIteration File "/home/zyli/anaconda3/envs/LatestStacking/lib/python3.7/site-packages/torch/utils/data/_utils/fetch.py", line 47, in fetch return self.collate_fn(data) File "/home/zyli/anaconda3/envs/LatestStacking/lib/python3.7/site-packages/transformers/data/data_collator.py", line 41, in __call__ return self.torch_call(features) File "/home/zyli/anaconda3/envs/LatestStacking/lib/python3.7/site-packages/transformers/data/data_collator.py", line 671, in torch_call batch = self.tokenizer.pad(examples, return_tensors="pt", pad_to_multiple_of=self.pad_to_multiple_of) File "/home/zyli/anaconda3/envs/LatestStacking/lib/python3.7/site-packages/transformers/tokenization_utils_base.py", line 2774, in pad return BatchEncoding(batch_outputs, tensor_type=return_tensors) File "/home/zyli/anaconda3/envs/LatestStacking/lib/python3.7/site-packages/transformers/tokenization_utils_base.py", line 210, in __init__ self.convert_to_tensors(tensor_type=tensor_type, prepend_batch_axis=prepend_batch_axis) File "/home/zyli/anaconda3/envs/LatestStacking/lib/python3.7/site-packages/transformers/tokenization_utils_base.py", line 722, in convert_to_tensors "Unable to create tensor, you should probably activate truncation and/or padding " ValueError: Unable to create tensor, you should probably activate truncation and/or padding with 'padding=True' 'truncation=True' to have batched tensors with the same length. ` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.12.1 - Platform: Linux-5.8.0-59-generic-x86_64-with-debian-bullseye-sid - Python version: 3.7.6 - PyArrow version: 5.0.0
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3058/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3058/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/3057
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3057/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3057/comments
https://api.github.com/repos/huggingface/datasets/issues/3057/events
https://github.com/huggingface/datasets/issues/3057
1,022,508,315
I_kwDODunzps488j0b
3,057
Error in per class precision computation
{ "avatar_url": "https://avatars.githubusercontent.com/u/38906722?v=4", "events_url": "https://api.github.com/users/tidhamecha2/events{/privacy}", "followers_url": "https://api.github.com/users/tidhamecha2/followers", "following_url": "https://api.github.com/users/tidhamecha2/following{/other_user}", "gists_url": "https://api.github.com/users/tidhamecha2/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/tidhamecha2", "id": 38906722, "login": "tidhamecha2", "node_id": "MDQ6VXNlcjM4OTA2NzIy", "organizations_url": "https://api.github.com/users/tidhamecha2/orgs", "received_events_url": "https://api.github.com/users/tidhamecha2/received_events", "repos_url": "https://api.github.com/users/tidhamecha2/repos", "site_admin": false, "starred_url": "https://api.github.com/users/tidhamecha2/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/tidhamecha2/subscriptions", "type": "User", "url": "https://api.github.com/users/tidhamecha2" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
null
1
"2021-10-11T10:05:19Z"
"2021-10-11T10:17:44Z"
"2021-10-11T10:16:16Z"
NONE
null
null
null
## Describe the bug When trying to get the per class precision values by providing `average=None`, following error is thrown `ValueError: can only convert an array of size 1 to a Python scalar` ## Steps to reproduce the bug ```python from datasets import load_dataset, load_metric precision_metric = load_metric("precision") predictions = [0, 2, 1, 0, 0, 1] references = [0, 1, 2, 0, 1, 2] results = precision_metric.compute(predictions=predictions, references=references, average=None) ``` ## Expected results ` {'precision': array([0.66666667, 0. , 0. ])}` as per https://github.com/huggingface/datasets/blob/master/metrics/precision/precision.py ## Actual results ``` output = self._compute(predictions=predictions, references=references, **kwargs) File "~/.cache/huggingface/modules/datasets_modules/metrics/precision/94709a71c6fe37171ef49d3466fec24dee9a79846c9f176dff66a649e9811690/precision.py", line 110, in _compute sample_weight=sample_weight, ValueError: can only convert an array of size 1 to a Python scalar ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.12.1 - Platform: linux - Python version: 3.6.9 - PyArrow version: 5.0.0
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3057/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3057/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/3056
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3056/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3056/comments
https://api.github.com/repos/huggingface/datasets/issues/3056/events
https://github.com/huggingface/datasets/pull/3056
1,022,345,564
PR_kwDODunzps4tAB9h
3,056
Fix meteor metric for version >= 3.6.4
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
0
"2021-10-11T07:11:44Z"
"2021-10-11T07:29:20Z"
"2021-10-11T07:29:19Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3056.diff", "html_url": "https://github.com/huggingface/datasets/pull/3056", "merged_at": "2021-10-11T07:29:19Z", "patch_url": "https://github.com/huggingface/datasets/pull/3056.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3056" }
After `nltk` update, the meteor metric expects pre-tokenized inputs (breaking change). This PR fixes this issue, while maintaining compatibility with older versions.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3056/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3056/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3055
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3055/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3055/comments
https://api.github.com/repos/huggingface/datasets/issues/3055/events
https://github.com/huggingface/datasets/issues/3055
1,022,319,238
I_kwDODunzps4871qG
3,055
CI test suite fails after meteor metric update
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" } ]
null
0
"2021-10-11T06:37:12Z"
"2021-10-11T07:30:31Z"
"2021-10-11T07:30:31Z"
MEMBER
null
null
null
## Describe the bug CI test suite fails: https://app.circleci.com/pipelines/github/huggingface/datasets/8110/workflows/f059ba43-9154-4632-bebb-82318447ddc9/jobs/50010 Stack trace: ``` ___________________ LocalMetricTest.test_load_metric_meteor ____________________ [gw1] linux -- Python 3.6.15 /home/circleci/.pyenv/versions/3.6.15/bin/python3.6 self = <tests.test_metric_common.LocalMetricTest testMethod=test_load_metric_meteor> metric_name = 'meteor' def test_load_metric(self, metric_name): doctest.ELLIPSIS_MARKER = "[...]" metric_module = importlib.import_module(datasets.load.prepare_module(os.path.join("metrics", metric_name))[0]) metric = datasets.load.import_main_class(metric_module.__name__, dataset=False) # check parameters parameters = inspect.signature(metric._compute).parameters self.assertTrue("predictions" in parameters) self.assertTrue("references" in parameters) self.assertTrue(all([p.kind != p.VAR_KEYWORD for p in parameters.values()])) # no **kwargs # run doctest with self.patch_intensive_calls(metric_name, metric_module.__name__): with self.use_local_metrics(): > results = doctest.testmod(metric_module, verbose=True, raise_on_error=True) tests/test_metric_common.py:75: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ../.pyenv/versions/3.6.15/lib/python3.6/doctest.py:1951: in testmod runner.run(test) ../.pyenv/versions/3.6.15/lib/python3.6/doctest.py:1839: in run r = DocTestRunner.run(self, test, compileflags, out, False) ../.pyenv/versions/3.6.15/lib/python3.6/doctest.py:1476: in run return self.__run(test, compileflags, out) ../.pyenv/versions/3.6.15/lib/python3.6/doctest.py:1382: in __run exception) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <doctest.DebugRunner object at 0x7f4c26bd3da0> out = <built-in method write of _io.TextIOWrapper object at 0x7f51a21852d0> test = <DocTest datasets_modules.datasets.meteor.6201bb45d2c144ea7963680949d20f523d74a741fa0f8a806f836e6caa5245d7.meteor.Mete...ets_modules/datasets/meteor/6201bb45d2c144ea7963680949d20f523d74a741fa0f8a806f836e6caa5245d7/meteor.py:87 (5 examples)> example = <doctest.Example object at 0x7f4c26bd3eb8> exc_info = (<class 'TypeError'>, TypeError('"hypothesis" expects pre-tokenized hypothesis (Iterable[str]): It is a guide to action which ensures that the military always obeys the commands of the party',), <traceback object at 0x7f4cd01afec8>) def report_unexpected_exception(self, out, test, example, exc_info): > raise UnexpectedException(test, example, exc_info) E doctest.UnexpectedException: <DocTest datasets_modules.datasets.meteor.6201bb45d2c144ea7963680949d20f523d74a741fa0f8a806f836e6caa5245d7.meteor.Meteor from /tmp/pytest-of-circleci/pytest-0/popen-gw1/cache/modules/datasets_modules/datasets/meteor/6201bb45d2c144ea7963680949d20f523d74a741fa0f8a806f836e6caa5245d7/meteor.py:87 (5 examples)> ../.pyenv/versions/3.6.15/lib/python3.6/doctest.py:1845: UnexpectedException ```
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3055/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3055/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/3054
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3054/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3054/comments
https://api.github.com/repos/huggingface/datasets/issues/3054/events
https://github.com/huggingface/datasets/pull/3054
1,022,108,186
PR_kwDODunzps4s_TmE
3,054
Update Biosses
{ "avatar_url": "https://avatars.githubusercontent.com/u/6764450?v=4", "events_url": "https://api.github.com/users/bwang482/events{/privacy}", "followers_url": "https://api.github.com/users/bwang482/followers", "following_url": "https://api.github.com/users/bwang482/following{/other_user}", "gists_url": "https://api.github.com/users/bwang482/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/bwang482", "id": 6764450, "login": "bwang482", "node_id": "MDQ6VXNlcjY3NjQ0NTA=", "organizations_url": "https://api.github.com/users/bwang482/orgs", "received_events_url": "https://api.github.com/users/bwang482/received_events", "repos_url": "https://api.github.com/users/bwang482/repos", "site_admin": false, "starred_url": "https://api.github.com/users/bwang482/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bwang482/subscriptions", "type": "User", "url": "https://api.github.com/users/bwang482" }
[]
closed
false
null
[]
null
0
"2021-10-10T22:25:12Z"
"2021-10-13T09:04:27Z"
"2021-10-13T09:04:27Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3054.diff", "html_url": "https://github.com/huggingface/datasets/pull/3054", "merged_at": "2021-10-13T09:04:27Z", "patch_url": "https://github.com/huggingface/datasets/pull/3054.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3054" }
Fix variable naming
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3054/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3054/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3053
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3053/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3053/comments
https://api.github.com/repos/huggingface/datasets/issues/3053/events
https://github.com/huggingface/datasets/issues/3053
1,022,076,905
I_kwDODunzps4866fp
3,053
load_dataset('the_pile_openwebtext2') produces ArrowInvalid, value too large to fit in C integer type
{ "avatar_url": "https://avatars.githubusercontent.com/u/3458792?v=4", "events_url": "https://api.github.com/users/davidbau/events{/privacy}", "followers_url": "https://api.github.com/users/davidbau/followers", "following_url": "https://api.github.com/users/davidbau/following{/other_user}", "gists_url": "https://api.github.com/users/davidbau/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/davidbau", "id": 3458792, "login": "davidbau", "node_id": "MDQ6VXNlcjM0NTg3OTI=", "organizations_url": "https://api.github.com/users/davidbau/orgs", "received_events_url": "https://api.github.com/users/davidbau/received_events", "repos_url": "https://api.github.com/users/davidbau/repos", "site_admin": false, "starred_url": "https://api.github.com/users/davidbau/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/davidbau/subscriptions", "type": "User", "url": "https://api.github.com/users/davidbau" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
null
5
"2021-10-10T19:55:21Z"
"2023-02-24T14:02:20Z"
"2023-02-24T14:02:20Z"
NONE
null
null
null
## Describe the bug When loading `the_pile_openwebtext2`, we get the error `pyarrow.lib.ArrowInvalid: Value 2111 too large to fit in C integer type` ## Steps to reproduce the bug ```python import datasets ds = datasets.load_dataset('the_pile_openwebtext2') ``` ## Expected results Should download the dataset, convert it to an arrow file, and return a working Dataset object. ## Actual results The download works, but conversion to the arrow file fails as follows: ``` >>> ds = datasets.load_dataset('the_pile_openwebtext2') Downloading and preparing dataset openwebtext2/plain_text (download: 27.33 GiB, generated: 63.86 GiB , post-processed: Unknown size, total: 91.19 GiB) to /home/davidbau/.cache/huggingface/datasets/open webtext2/plain_text/1.0.0/c48ec73ba3483bac673463f48f67e9a4fd8cb49a9d6ec4fb957f0b424b97cf25... Traceback (most recent call last): File "/home/davidbau/.conda/envs/tenv/lib/python3.9/site-packages/datasets/builder.py", line 1133, in _prepare_split writer.write(example, key) File "/home/davidbau/.conda/envs/tenv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 366, in write self.write_examples_on_file() File "/home/davidbau/.conda/envs/tenv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 311, in write_examples_on_file pa_array = pa.array(typed_sequence) File "pyarrow/array.pxi", line 222, in pyarrow.lib.array File "pyarrow/array.pxi", line 110, in pyarrow.lib._handle_arrow_array_protocol File "/home/davidbau/.conda/envs/tenv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 115, in __arrow_array__ out = pa.array(cast_to_python_objects(self.data, only_1d_for_numpy=True), type=type) File "pyarrow/array.pxi", line 305, in pyarrow.lib.array File "pyarrow/array.pxi", line 39, in pyarrow.lib._sequence_to_array File "pyarrow/error.pxi", line 122, in pyarrow.lib.pyarrow_internal_check_status File "pyarrow/error.pxi", line 84, in pyarrow.lib.check_status pyarrow.lib.ArrowInvalid: Value 2111 too large to fit in C integer type ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: ``` - Platform: Ubuntu 20.04 - Python version: python 3.9 - PyArrow version: 3.0.0
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/3053/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3053/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/3052
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3052/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3052/comments
https://api.github.com/repos/huggingface/datasets/issues/3052/events
https://github.com/huggingface/datasets/issues/3052
1,021,944,435
I_kwDODunzps486aJz
3,052
load_dataset cannot download the data and hangs on forever if cache dir specified
{ "avatar_url": "https://avatars.githubusercontent.com/u/69694610?v=4", "events_url": "https://api.github.com/users/BenoitDalFerro/events{/privacy}", "followers_url": "https://api.github.com/users/BenoitDalFerro/followers", "following_url": "https://api.github.com/users/BenoitDalFerro/following{/other_user}", "gists_url": "https://api.github.com/users/BenoitDalFerro/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/BenoitDalFerro", "id": 69694610, "login": "BenoitDalFerro", "node_id": "MDQ6VXNlcjY5Njk0NjEw", "organizations_url": "https://api.github.com/users/BenoitDalFerro/orgs", "received_events_url": "https://api.github.com/users/BenoitDalFerro/received_events", "repos_url": "https://api.github.com/users/BenoitDalFerro/repos", "site_admin": false, "starred_url": "https://api.github.com/users/BenoitDalFerro/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BenoitDalFerro/subscriptions", "type": "User", "url": "https://api.github.com/users/BenoitDalFerro" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
null
1
"2021-10-10T10:31:36Z"
"2021-10-11T10:57:09Z"
"2021-10-11T10:56:36Z"
NONE
null
null
null
## Describe the bug After updating datasets, a code that ran just fine for ages began to fail. Specifying _datasets.load_dataset_'s _cache_dir_ optional argument on Windows 10 machine results in data download to hang on forever. Same call without cache_dir works just fine. Surprisingly exact same code just runs perfectly fine on Linux docker instance running in cloud. Unfortunately I updated Windows also at the same time and I can't remember which version of datasets was running in my conda environment prior to the update otherwise I would have tried both to check this out. :( ## Steps to reproduce the bug ```python # Sample code to reproduce the bug ``` cache_dir = 'c:/data/datasets' dataset = load_dataset('wikipedia', '20200501.en', split='train',cache_dir=cache_dir) ``` Note that exact same code without specifying _cache_dir_ argument works perfectly fine. ``` cache_dir = 'c:/data/datasets' dataset = load_dataset('wikipedia', '20200501.en', split='train') ``` ## Expected results Downloads the dataset and cache is handled in the _cache_dir_ directory ## Actual results Data download keeps hanging on forever, **NO TRACEBACK**! ## Environment info - `datasets` version: 1.12.1 - Platform: Windows-10-10.0.19042-SP0 - Python version: 3.8.11 - PyArrow version: 3.0.0
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3052/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3052/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/3051
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3051/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3051/comments
https://api.github.com/repos/huggingface/datasets/issues/3051/events
https://github.com/huggingface/datasets/issues/3051
1,021,852,234
I_kwDODunzps486DpK
3,051
Non-Matching Checksum Error with crd3 dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/8942987?v=4", "events_url": "https://api.github.com/users/RylanSchaeffer/events{/privacy}", "followers_url": "https://api.github.com/users/RylanSchaeffer/followers", "following_url": "https://api.github.com/users/RylanSchaeffer/following{/other_user}", "gists_url": "https://api.github.com/users/RylanSchaeffer/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/RylanSchaeffer", "id": 8942987, "login": "RylanSchaeffer", "node_id": "MDQ6VXNlcjg5NDI5ODc=", "organizations_url": "https://api.github.com/users/RylanSchaeffer/orgs", "received_events_url": "https://api.github.com/users/RylanSchaeffer/received_events", "repos_url": "https://api.github.com/users/RylanSchaeffer/repos", "site_admin": false, "starred_url": "https://api.github.com/users/RylanSchaeffer/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/RylanSchaeffer/subscriptions", "type": "User", "url": "https://api.github.com/users/RylanSchaeffer" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" } ]
null
2
"2021-10-10T01:32:43Z"
"2022-03-15T15:54:26Z"
"2022-03-15T15:54:26Z"
NONE
null
null
null
## Describe the bug When I try loading the crd3 dataset (https://huggingface.co/datasets/crd3), an error is thrown. ## Steps to reproduce the bug ```python dataset = load_dataset('crd3', split='train') ``` ## Expected results I expect no error to be thrown. ## Actual results A non-matching checksum error is thrown. ``` datasets.utils.info_utils.NonMatchingChecksumError: Checksums didn't match for dataset source files: ['https://github.com/RevanthRameshkumar/CRD3/archive/master.zip'] ``` ## Environment info - `datasets` version: 1.12.1 - Platform: Linux-4.4.0-173-generic-x86_64-with-Ubuntu-16.04-xenial - Python version: 3.6.10 - PyArrow version: 5.0.0
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3051/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3051/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/3050
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3050/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3050/comments
https://api.github.com/repos/huggingface/datasets/issues/3050/events
https://github.com/huggingface/datasets/pull/3050
1,021,772,622
PR_kwDODunzps4s-anK
3,050
Fix streaming: catch Timeout error
{ "avatar_url": "https://avatars.githubusercontent.com/u/715491?v=4", "events_url": "https://api.github.com/users/borisdayma/events{/privacy}", "followers_url": "https://api.github.com/users/borisdayma/followers", "following_url": "https://api.github.com/users/borisdayma/following{/other_user}", "gists_url": "https://api.github.com/users/borisdayma/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/borisdayma", "id": 715491, "login": "borisdayma", "node_id": "MDQ6VXNlcjcxNTQ5MQ==", "organizations_url": "https://api.github.com/users/borisdayma/orgs", "received_events_url": "https://api.github.com/users/borisdayma/received_events", "repos_url": "https://api.github.com/users/borisdayma/repos", "site_admin": false, "starred_url": "https://api.github.com/users/borisdayma/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/borisdayma/subscriptions", "type": "User", "url": "https://api.github.com/users/borisdayma" }
[]
closed
false
null
[]
null
5
"2021-10-09T18:19:20Z"
"2021-10-12T15:28:18Z"
"2021-10-11T09:35:38Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3050.diff", "html_url": "https://github.com/huggingface/datasets/pull/3050", "merged_at": "2021-10-11T09:35:38Z", "patch_url": "https://github.com/huggingface/datasets/pull/3050.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3050" }
Catches Timeout error during streaming. fix #3049
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3050/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3050/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3049
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3049/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3049/comments
https://api.github.com/repos/huggingface/datasets/issues/3049/events
https://github.com/huggingface/datasets/issues/3049
1,021,770,008
I_kwDODunzps485vkY
3,049
TimeoutError during streaming
{ "avatar_url": "https://avatars.githubusercontent.com/u/715491?v=4", "events_url": "https://api.github.com/users/borisdayma/events{/privacy}", "followers_url": "https://api.github.com/users/borisdayma/followers", "following_url": "https://api.github.com/users/borisdayma/following{/other_user}", "gists_url": "https://api.github.com/users/borisdayma/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/borisdayma", "id": 715491, "login": "borisdayma", "node_id": "MDQ6VXNlcjcxNTQ5MQ==", "organizations_url": "https://api.github.com/users/borisdayma/orgs", "received_events_url": "https://api.github.com/users/borisdayma/received_events", "repos_url": "https://api.github.com/users/borisdayma/repos", "site_admin": false, "starred_url": "https://api.github.com/users/borisdayma/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/borisdayma/subscriptions", "type": "User", "url": "https://api.github.com/users/borisdayma" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
null
0
"2021-10-09T18:06:51Z"
"2021-10-11T09:35:38Z"
"2021-10-11T09:35:38Z"
CONTRIBUTOR
null
null
null
## Describe the bug I got a TimeoutError after streaming for about 10h. ## Steps to reproduce the bug Very long code but we could do a test of streaming indefinitely data, though error may take a while to appear. ## Expected results This error was not expected in the code which considers only `ClientError` but not `TimeoutError`. See [this line](https://github.com/huggingface/datasets/blob/2814fbd0e18150be409f10804670e98d9ecb87d4/src/datasets/utils/streaming_download_manager.py#L129). Based on the traceback, it looks like the `TimeoutError` was not captured. ## Actual results ``` File "/home/koush/.pyenv/versions/dev/lib/python3.9/site-packages/fsspec/asyn.py", line 25, in _runner result[0] = await coro File "/home/koush/.pyenv/versions/dev/lib/python3.9/site-packages/fsspec/implementations/http.py", line 614, in async_fetch_range out = await r.read() File "/home/koush/.pyenv/versions/dev/lib/python3.9/site-packages/aiohttp/client_reqrep.py", line 1032, in read self._body = await self.content.read() File "/home/koush/.pyenv/versions/dev/lib/python3.9/site-packages/aiohttp/streams.py", line 370, in read block = await self.readany() File "/home/koush/.pyenv/versions/dev/lib/python3.9/site-packages/aiohttp/streams.py", line 392, in readany await self._wait("readany") File "/home/koush/.pyenv/versions/dev/lib/python3.9/site-packages/aiohttp/streams.py", line 306, in _wait await waiter File "/home/koush/.pyenv/versions/dev/lib/python3.9/site-packages/aiohttp/helpers.py", line 656, in __exit__ raise asyncio.TimeoutError from None asyncio.exceptions.TimeoutError The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/home/koush/dalle-mini/dev/seq2seq/run_seq2seq_flax.py", line 1027, in <module> main() File "/home/koush/dalle-mini/dev/seq2seq/run_seq2seq_flax.py", line 991, in main for batch in tqdm( File "/home/koush/.pyenv/versions/dev/lib/python3.9/site-packages/tqdm/std.py", line 1180, in __iter__ for obj in iterable: File "/home/koush/dalle-mini/dev/seq2seq/run_seq2seq_flax.py", line 376, in data_loader_streaming for item in dataset: File "/home/koush/datasets/src/datasets/iterable_dataset.py", line 341, in __iter__ for key, example in self._iter(): File "/home/koush/datasets/src/datasets/iterable_dataset.py", line 338, in _iter yield from ex_iterable File "/home/koush/datasets/src/datasets/iterable_dataset.py", line 179, in __iter__ key_examples_list = [(key, example)] + [ File "/home/koush/datasets/src/datasets/iterable_dataset.py", line 179, in <listcomp> key_examples_list = [(key, example)] + [ File "/home/koush/datasets/src/datasets/iterable_dataset.py", line 176, in __iter__ for key, example in iterator: File "/home/koush/datasets/src/datasets/iterable_dataset.py", line 225, in __iter__ for x in self.ex_iterable: File "/home/koush/datasets/src/datasets/iterable_dataset.py", line 99, in __iter__ for key, example in self.generate_examples_fn(**kwargs_with_shuffled_shards): File "/home/koush/datasets/src/datasets/iterable_dataset.py", line 287, in wrapper for key, table in generate_tables_fn(**kwargs): File "/home/koush/datasets/src/datasets/packaged_modules/json/json.py", line 107, in _generate_tables batch = f.read(self.config.chunksize) File "/home/koush/datasets/src/datasets/utils/streaming_download_manager.py", line 126, in read_with_retries out = read(*args, **kwargs) File "/home/koush/.pyenv/versions/dev/lib/python3.9/site-packages/fsspec/implementations/http.py", line 572, in read return super().read(length) File "/home/koush/.pyenv/versions/dev/lib/python3.9/site-packages/fsspec/spec.py", line 1533, in read out = self.cache._fetch(self.loc, self.loc + length) File "/home/koush/.pyenv/versions/dev/lib/python3.9/site-packages/fsspec/caching.py", line 390, in _fetch self.cache = self.fetcher(start, bend) File "/home/koush/.pyenv/versions/dev/lib/python3.9/site-packages/fsspec/asyn.py", line 91, in wrapper return sync(self.loop, func, *args, **kwargs) File "/home/koush/.pyenv/versions/dev/lib/python3.9/site-packages/fsspec/asyn.py", line 69, in sync raise FSTimeoutError from return_result fsspec.exceptions.FSTimeoutError ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.12.2.dev0 - Platform: Linux-5.4.0-1043-gcp-x86_64-with-glibc2.31 - Python version: 3.9.7 - PyArrow version: 5.0.0
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3049/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3049/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/3048
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3048/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3048/comments
https://api.github.com/repos/huggingface/datasets/issues/3048/events
https://github.com/huggingface/datasets/issues/3048
1,021,765,661
I_kwDODunzps485ugd
3,048
Identify which shard data belongs to
{ "avatar_url": "https://avatars.githubusercontent.com/u/715491?v=4", "events_url": "https://api.github.com/users/borisdayma/events{/privacy}", "followers_url": "https://api.github.com/users/borisdayma/followers", "following_url": "https://api.github.com/users/borisdayma/following{/other_user}", "gists_url": "https://api.github.com/users/borisdayma/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/borisdayma", "id": 715491, "login": "borisdayma", "node_id": "MDQ6VXNlcjcxNTQ5MQ==", "organizations_url": "https://api.github.com/users/borisdayma/orgs", "received_events_url": "https://api.github.com/users/borisdayma/received_events", "repos_url": "https://api.github.com/users/borisdayma/repos", "site_admin": false, "starred_url": "https://api.github.com/users/borisdayma/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/borisdayma/subscriptions", "type": "User", "url": "https://api.github.com/users/borisdayma" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
null
[]
null
1
"2021-10-09T17:46:35Z"
"2021-10-09T20:24:17Z"
null
CONTRIBUTOR
null
null
null
**Is your feature request related to a problem? Please describe.** I'm training on a large dataset made of multiple sub-datasets. During training I can observe some jumps in loss which may correspond to different shards. ![image](https://user-images.githubusercontent.com/715491/136668758-521263aa-a9b2-4ad2-8d22-060b6bf86a1c.png) My suspicion is that either: * some of the sub-datasets are harder for the model than others * some of the sub-datasets are not formatted properly I'd like to identify which shards correspond to those jumps. **Describe the solution you'd like** It would be nice to have a key associated to each data sample or data batch containing details on where the data comes from (shard idx + item idx within the shard). This should be supported both in local and streaming mode. **Describe alternatives you've considered** A fix would be for me to add myself details (shard id, sample id) as part of each data sample. The inconvenient is that it requires users to process/reupload every dataset when they need this feature.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3048/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3048/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/3047
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3047/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3047/comments
https://api.github.com/repos/huggingface/datasets/issues/3047/events
https://github.com/huggingface/datasets/issues/3047
1,021,360,616
I_kwDODunzps484Lno
3,047
Loading from cache a dataset for LM built from a text classification dataset sometimes errors
{ "avatar_url": "https://avatars.githubusercontent.com/u/35901082?v=4", "events_url": "https://api.github.com/users/sgugger/events{/privacy}", "followers_url": "https://api.github.com/users/sgugger/followers", "following_url": "https://api.github.com/users/sgugger/following{/other_user}", "gists_url": "https://api.github.com/users/sgugger/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/sgugger", "id": 35901082, "login": "sgugger", "node_id": "MDQ6VXNlcjM1OTAxMDgy", "organizations_url": "https://api.github.com/users/sgugger/orgs", "received_events_url": "https://api.github.com/users/sgugger/received_events", "repos_url": "https://api.github.com/users/sgugger/repos", "site_admin": false, "starred_url": "https://api.github.com/users/sgugger/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sgugger/subscriptions", "type": "User", "url": "https://api.github.com/users/sgugger" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" } ]
null
1
"2021-10-08T18:23:11Z"
"2021-11-03T17:13:08Z"
"2021-11-03T17:13:08Z"
CONTRIBUTOR
null
null
null
## Describe the bug Yes, I know, that description sucks. So the problem is arising in the course when we build a masked language modeling dataset using the IMDB dataset. To reproduce (or try since it's a bit fickle). Create a dataset for masled-language modeling from the IMDB dataset. ```python from datasets import load_dataset from transformers import Autotokenizer tokenizer = AutoTokenizer.from_pretrained("distilbert-base-cased) imdb_dataset = load_dataset("imdb", split="train") def tokenize_function(examples): return tokenizer(examples["text"]) tokenized_dataset = imdb_dataset.map( tokenize_function, batched=True, remove_columns=["text", "label"] ) chunk_size = 128 def group_texts(examples): # Concatenate all texts. concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()} # Compute length of concatenated texts total_length = len(concatenated_examples[list(examples.keys())[0]]) # We drop the last chunk if it's smaller than chunk_size total_length = (total_length // chunk_size) * chunk_size # Split by chunks of max_len. result = { k: [t[i : i + chunk_size] for i in range(0, total_length, chunk_size)] for k, t in concatenated_examples.items() } # Create a new labels column result["labels"] = result["input_ids"].copy() return result lm_dataset = tokenized_dataset.map(group_texts, batched=True) ``` Until now, all is well. The problem comes when you re-execute that code, more specifically: ```python tokenized_dataset = imdb_dataset.map( tokenize_function, batched=True, remove_columns=["text", "label"] ) lm_dataset = tokenized_dataset.map(group_texts, batched=True) ``` Try several times if the bug doesn't appear instantly, or just each line at a time, ideally in a notebook/Colab and you should get at some point: ```python --------------------------------------------------------------------------- KeyError Traceback (most recent call last) <ipython-input-40-357a56ee3d53> in <module> ----> 1 lm_dataset = tokenized_dataset.map(group_texts, batched=True) ~/git/datasets/src/datasets/arrow_dataset.py in map(self, function, with_indices, input_columns, batched, batch_size, drop_last_batch, remove_columns, keep_in_memory, load_from_cache_file, cache_file_name, writer_batch_size, features, disable_nullable, fn_kwargs, num_proc, suffix_template, new_fingerprint, desc) 1947 new_fingerprint=new_fingerprint, 1948 disable_tqdm=disable_tqdm, -> 1949 desc=desc, 1950 ) 1951 else: ~/git/datasets/src/datasets/arrow_dataset.py in wrapper(*args, **kwargs) 424 } 425 # apply actual function --> 426 out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) 427 datasets: List["Dataset"] = list(out.values()) if isinstance(out, dict) else [out] 428 # re-apply format to the output ~/git/datasets/src/datasets/fingerprint.py in wrapper(*args, **kwargs) 404 # Call actual function 405 --> 406 out = func(self, *args, **kwargs) 407 408 # Update fingerprint of in-place transforms + update in-place history of transforms ~/git/datasets/src/datasets/arrow_dataset.py in _map_single(self, function, with_indices, input_columns, batched, batch_size, drop_last_batch, remove_columns, keep_in_memory, load_from_cache_file, cache_file_name, writer_batch_size, features, disable_nullable, fn_kwargs, new_fingerprint, rank, offset, disable_tqdm, desc, cache_only) 2138 if os.path.exists(cache_file_name) and load_from_cache_file: 2139 logger.warning("Loading cached processed dataset at %s", cache_file_name) -> 2140 info = self.info.copy() 2141 info.features = features 2142 return Dataset.from_file(cache_file_name, info=info, split=self.split) ~/git/datasets/src/datasets/info.py in copy(self) 278 279 def copy(self) -> "DatasetInfo": --> 280 return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()}) 281 282 ~/git/datasets/src/datasets/info.py in __init__(self, description, citation, homepage, license, features, post_processed, supervised_keys, task_templates, builder_name, config_name, version, splits, download_checksums, download_size, post_processing_size, dataset_size, size_in_bytes) ~/git/datasets/src/datasets/info.py in __post_init__(self) 177 for idx, template in enumerate(self.task_templates): 178 if isinstance(template, TextClassification): --> 179 labels = self.features[template.label_column].names 180 self.task_templates[idx] = TextClassification( 181 text_column=template.text_column, label_column=template.label_column, labels=labels KeyError: 'label' ``` It seems that when loading the cache, the dataset tries to access some kind of text classification template (which I imagine comes from the original dataset) and to look at a key that has since been removed.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3047/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3047/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/3046
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3046/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3046/comments
https://api.github.com/repos/huggingface/datasets/issues/3046/events
https://github.com/huggingface/datasets/pull/3046
1,021,021,368
PR_kwDODunzps4s8MjS
3,046
Fix MedDialog metadata JSON
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
0
"2021-10-08T12:04:40Z"
"2021-10-11T07:46:43Z"
"2021-10-11T07:46:42Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3046.diff", "html_url": "https://github.com/huggingface/datasets/pull/3046", "merged_at": "2021-10-11T07:46:42Z", "patch_url": "https://github.com/huggingface/datasets/pull/3046.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3046" }
Fix #2969.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3046/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3046/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3045
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3045/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3045/comments
https://api.github.com/repos/huggingface/datasets/issues/3045/events
https://github.com/huggingface/datasets/pull/3045
1,020,968,704
PR_kwDODunzps4s8B2b
3,045
Fix inconsistent caching behaviour in Dataset.map() with multiprocessing #3044
{ "avatar_url": "https://avatars.githubusercontent.com/u/9859840?v=4", "events_url": "https://api.github.com/users/vlievin/events{/privacy}", "followers_url": "https://api.github.com/users/vlievin/followers", "following_url": "https://api.github.com/users/vlievin/following{/other_user}", "gists_url": "https://api.github.com/users/vlievin/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/vlievin", "id": 9859840, "login": "vlievin", "node_id": "MDQ6VXNlcjk4NTk4NDA=", "organizations_url": "https://api.github.com/users/vlievin/orgs", "received_events_url": "https://api.github.com/users/vlievin/received_events", "repos_url": "https://api.github.com/users/vlievin/repos", "site_admin": false, "starred_url": "https://api.github.com/users/vlievin/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/vlievin/subscriptions", "type": "User", "url": "https://api.github.com/users/vlievin" }
[]
closed
false
null
[]
null
8
"2021-10-08T10:59:21Z"
"2021-10-21T16:58:32Z"
"2021-10-21T14:22:44Z"
NONE
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3045.diff", "html_url": "https://github.com/huggingface/datasets/pull/3045", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/3045.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3045" }
Fix #3044 1. A rough unit test that fails without the fix. It probably doesn't comply with your code standards, but that just to draft the idea. 2. A one liner fix
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3045/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3045/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3044
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3044/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3044/comments
https://api.github.com/repos/huggingface/datasets/issues/3044/events
https://github.com/huggingface/datasets/issues/3044
1,020,869,778
I_kwDODunzps482TyS
3,044
Inconsistent caching behaviour when using `Dataset.map()` with a `new_fingerprint` and `num_proc>1`
{ "avatar_url": "https://avatars.githubusercontent.com/u/9859840?v=4", "events_url": "https://api.github.com/users/vlievin/events{/privacy}", "followers_url": "https://api.github.com/users/vlievin/followers", "following_url": "https://api.github.com/users/vlievin/following{/other_user}", "gists_url": "https://api.github.com/users/vlievin/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/vlievin", "id": 9859840, "login": "vlievin", "node_id": "MDQ6VXNlcjk4NTk4NDA=", "organizations_url": "https://api.github.com/users/vlievin/orgs", "received_events_url": "https://api.github.com/users/vlievin/received_events", "repos_url": "https://api.github.com/users/vlievin/repos", "site_admin": false, "starred_url": "https://api.github.com/users/vlievin/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/vlievin/subscriptions", "type": "User", "url": "https://api.github.com/users/vlievin" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
open
false
null
[]
null
3
"2021-10-08T09:07:10Z"
"2022-09-07T21:01:36Z"
null
NONE
null
null
null
## Describe the bug Caching does not work when using `Dataset.map()` with: 1. a function that cannot be deterministically fingerprinted 2. `num_proc>1` 3. using a custom fingerprint set with the argument `new_fingerprint`. This means that the dataset will be mapped with the function for each and every call, which does not happen if `num_proc==1`. In that case (`num_proc==1`) subsequent calls will load the transformed dataset from the cache, which is the expected behaviour. The example can easily be translated into a unit test. I have a fix and will submit a pull request asap. ## Steps to reproduce the bug ```python import hashlib import json import os from typing import Dict, Any import numpy as np from datasets import load_dataset, Dataset Batch = Dict[str, Any] filename = 'example.json' class Transformation(): """A transformation with a random state that cannot be fingerprinted""" def __init__(self): self.state = np.random.random() def __call__(self, batch: Batch) -> Batch: batch['x'] = [np.random.random() for _ in batch['x']] return batch def generate_dataset(): """generate a simple dataset""" rgn = np.random.RandomState(24) data = { 'data': [{'x': float(y), 'y': -float(y)} for y in rgn.random(size=(1000,))]} if not os.path.exists(filename): with open(filename, 'w') as f: f.write(json.dumps(data)) return filename def process_dataset_with_cache(num_proc=1, remove_cache=False, cache_expected_to_exist=False): # load the generated dataset dset: Dataset = next( iter(load_dataset('json', data_files=filename, field='data').values())) new_fingerprint = hashlib.md5("static-id".encode("utf8")).hexdigest() # get the expected cached path cache_path = dset._get_cache_file_path(new_fingerprint) if remove_cache and os.path.exists(cache_path): os.remove(cache_path) # check that the cache exists, and print a statement # if was actually expected to exist cache_exist = os.path.exists(cache_path) print(f"> cache file exists={cache_exist}") if cache_expected_to_exist and not cache_exist: print("=== Cache does not exist! ====") # apply the transformation with the new fingerprint dset = dset.map( Transformation(), batched=True, num_proc=num_proc, new_fingerprint=new_fingerprint, desc="mapping dataset with transformation") generate_dataset() for num_proc in [1, 2]: print(f"# num_proc={num_proc}, first pass") # first pass to generate the cache (always create a new cache here) process_dataset_with_cache(remove_cache=True, num_proc=num_proc, cache_expected_to_exist=False) print(f"# num_proc={num_proc}, second pass") # second pass, expects the cache to exist process_dataset_with_cache(remove_cache=False, num_proc=num_proc, cache_expected_to_exist=True) os.remove(filename) ``` ## Expected results In the above python example, with `num_proc=2`, the **cache file should exist in the second call** of `process_dataset_with_cache` ("=== Cache does not exist! ====" should not be printed). When the cache is successfully created, `map()` is called only one time. ## Actual results In the above python example, with `num_proc=2`, the **cache does not exist in the second call** of `process_dataset_with_cache` (this results in printing "=== Cache does not exist! ===="). Because the cache doesn't exist, the `map()` method is executed a second time and the dataset is not loaded from the cache. ## Environment info - `datasets` version: 1.12.1 - Platform: macOS-10.16-x86_64-i386-64bit - Python version: 3.8.8 - PyArrow version: 5.0.0
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/3044/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3044/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/3043
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3043/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3043/comments
https://api.github.com/repos/huggingface/datasets/issues/3043/events
https://github.com/huggingface/datasets/issues/3043
1,020,252,114
I_kwDODunzps48z8_S
3,043
Add PASS dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/7246357?v=4", "events_url": "https://api.github.com/users/osanseviero/events{/privacy}", "followers_url": "https://api.github.com/users/osanseviero/followers", "following_url": "https://api.github.com/users/osanseviero/following{/other_user}", "gists_url": "https://api.github.com/users/osanseviero/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/osanseviero", "id": 7246357, "login": "osanseviero", "node_id": "MDQ6VXNlcjcyNDYzNTc=", "organizations_url": "https://api.github.com/users/osanseviero/orgs", "received_events_url": "https://api.github.com/users/osanseviero/received_events", "repos_url": "https://api.github.com/users/osanseviero/repos", "site_admin": false, "starred_url": "https://api.github.com/users/osanseviero/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/osanseviero/subscriptions", "type": "User", "url": "https://api.github.com/users/osanseviero" }
[ { "color": "e99695", "default": false, "description": "Requesting to add a new dataset", "id": 2067376369, "name": "dataset request", "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request" }, { "color": "bfdadc", "default": false, "description": "Vision datasets", "id": 3608941089, "name": "vision", "node_id": "LA_kwDODunzps7XHBIh", "url": "https://api.github.com/repos/huggingface/datasets/labels/vision" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko" } ]
null
0
"2021-10-07T16:43:43Z"
"2022-01-20T16:50:47Z"
"2022-01-20T16:50:47Z"
MEMBER
null
null
null
## Adding a Dataset - **Name:** PASS - **Description:** An ImageNet replacement for self-supervised pretraining without humans - **Data:** https://www.robots.ox.ac.uk/~vgg/research/pass/ https://github.com/yukimasano/PASS Instructions to add a new dataset can be found [here](https://github.com/huggingface/datasets/blob/master/ADD_NEW_DATASET.md).
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3043/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3043/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/3042
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3042/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3042/comments
https://api.github.com/repos/huggingface/datasets/issues/3042/events
https://github.com/huggingface/datasets/pull/3042
1,020,047,289
PR_kwDODunzps4s5Lxo
3,042
Improving elasticsearch integration
{ "avatar_url": "https://avatars.githubusercontent.com/u/5583410?v=4", "events_url": "https://api.github.com/users/ggdupont/events{/privacy}", "followers_url": "https://api.github.com/users/ggdupont/followers", "following_url": "https://api.github.com/users/ggdupont/following{/other_user}", "gists_url": "https://api.github.com/users/ggdupont/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ggdupont", "id": 5583410, "login": "ggdupont", "node_id": "MDQ6VXNlcjU1ODM0MTA=", "organizations_url": "https://api.github.com/users/ggdupont/orgs", "received_events_url": "https://api.github.com/users/ggdupont/received_events", "repos_url": "https://api.github.com/users/ggdupont/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ggdupont/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ggdupont/subscriptions", "type": "User", "url": "https://api.github.com/users/ggdupont" }
[]
open
false
null
[]
null
1
"2021-10-07T13:28:35Z"
"2022-07-06T15:19:48Z"
null
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3042.diff", "html_url": "https://github.com/huggingface/datasets/pull/3042", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/3042.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3042" }
- adding murmurhash signature to sample in index - adding optional credentials for remote elasticsearch server - enabling sample update in index - upgrade the elasticsearch 7.10.1 python client - adding ElasticsearchBulider to instantiate a dataset from an index and a filtering query
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3042/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3042/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3041
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3041/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3041/comments
https://api.github.com/repos/huggingface/datasets/issues/3041/events
https://github.com/huggingface/datasets/pull/3041
1,018,911,385
PR_kwDODunzps4s1ZAc
3,041
Load private data files + use glob on ZIP archives for json/csv/etc. module inference
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
4
"2021-10-06T18:16:36Z"
"2021-10-12T15:25:48Z"
"2021-10-12T15:25:46Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3041.diff", "html_url": "https://github.com/huggingface/datasets/pull/3041", "merged_at": "2021-10-12T15:25:46Z", "patch_url": "https://github.com/huggingface/datasets/pull/3041.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3041" }
As mentioned in https://github.com/huggingface/datasets/issues/3032 loading data files from private repository isn't working correctly because of the data files resolved. #2986 did a refactor of the data files resolver. I added authentication to it. I also improved it to glob inside ZIP archives to look for json/csv/etc. files and infer which dataset builder (json/csv/etc.) to use. Fix https://github.com/huggingface/datasets/issues/3032 Note that #2986 needs to get merged first
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 1, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 2, "url": "https://api.github.com/repos/huggingface/datasets/issues/3041/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3041/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3040
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3040/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3040/comments
https://api.github.com/repos/huggingface/datasets/issues/3040/events
https://github.com/huggingface/datasets/issues/3040
1,018,782,475
I_kwDODunzps48uWML
3,040
[save_to_disk] Using `select()` followed by `save_to_disk` saves complete dataset making it hard to create dummy dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }, { "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" } ]
null
5
"2021-10-06T17:08:47Z"
"2021-11-02T15:41:08Z"
"2021-11-02T15:41:08Z"
MEMBER
null
null
null
## Describe the bug When only keeping a dummy size of a dataset (say the first 100 samples), and then saving it to disk to upload it in the following to the hub for easy demo/use - not just the small dataset is saved but the whole dataset with an indices file. The problem with this is that the dataset is still very big. ## Steps to reproduce the bug E.g. run the following: ```python from datasets import load_dataset, save_to_disk nlp = load_dataset("glue", "mnli", split="train") nlp.save_to_disk("full") nlp = nlp.select(range(100)) nlp.save_to_disk("dummy") ``` Now one can see that both `"dummy"` and `"full"` have the same size. This shouldn't be the case IMO. ## Expected results IMO `"dummy"` should be much smaller so that one can easily play around with the dataset on the hub. ## Actual results Specify the actual results or traceback. ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.12.2.dev0 - Platform: Linux-5.11.0-34-generic-x86_64-with-glibc2.10 - Python version: 3.8.5 - PyArrow version: 5.0.0
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3040/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3040/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/3039
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3039/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3039/comments
https://api.github.com/repos/huggingface/datasets/issues/3039/events
https://github.com/huggingface/datasets/pull/3039
1,018,219,800
PR_kwDODunzps4sy_J-
3,039
Add sberquad dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/13781234?v=4", "events_url": "https://api.github.com/users/Alenush/events{/privacy}", "followers_url": "https://api.github.com/users/Alenush/followers", "following_url": "https://api.github.com/users/Alenush/following{/other_user}", "gists_url": "https://api.github.com/users/Alenush/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Alenush", "id": 13781234, "login": "Alenush", "node_id": "MDQ6VXNlcjEzNzgxMjM0", "organizations_url": "https://api.github.com/users/Alenush/orgs", "received_events_url": "https://api.github.com/users/Alenush/received_events", "repos_url": "https://api.github.com/users/Alenush/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Alenush/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Alenush/subscriptions", "type": "User", "url": "https://api.github.com/users/Alenush" }
[]
closed
false
null
[]
null
0
"2021-10-06T12:32:02Z"
"2021-10-13T10:19:11Z"
"2021-10-13T10:16:04Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3039.diff", "html_url": "https://github.com/huggingface/datasets/pull/3039", "merged_at": "2021-10-13T10:16:04Z", "patch_url": "https://github.com/huggingface/datasets/pull/3039.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3039" }
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3039/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3039/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3038
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3038/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3038/comments
https://api.github.com/repos/huggingface/datasets/issues/3038/events
https://github.com/huggingface/datasets/pull/3038
1,018,113,499
PR_kwDODunzps4syno_
3,038
add sberquad dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/13781234?v=4", "events_url": "https://api.github.com/users/Alenush/events{/privacy}", "followers_url": "https://api.github.com/users/Alenush/followers", "following_url": "https://api.github.com/users/Alenush/following{/other_user}", "gists_url": "https://api.github.com/users/Alenush/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Alenush", "id": 13781234, "login": "Alenush", "node_id": "MDQ6VXNlcjEzNzgxMjM0", "organizations_url": "https://api.github.com/users/Alenush/orgs", "received_events_url": "https://api.github.com/users/Alenush/received_events", "repos_url": "https://api.github.com/users/Alenush/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Alenush/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Alenush/subscriptions", "type": "User", "url": "https://api.github.com/users/Alenush" }
[]
closed
false
null
[]
null
0
"2021-10-06T11:33:39Z"
"2021-10-06T11:58:01Z"
"2021-10-06T11:58:01Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3038.diff", "html_url": "https://github.com/huggingface/datasets/pull/3038", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/3038.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3038" }
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3038/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3038/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3037
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3037/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3037/comments
https://api.github.com/repos/huggingface/datasets/issues/3037/events
https://github.com/huggingface/datasets/pull/3037
1,018,091,919
PR_kwDODunzps4syi15
3,037
SberQuad
{ "avatar_url": "https://avatars.githubusercontent.com/u/13781234?v=4", "events_url": "https://api.github.com/users/Alenush/events{/privacy}", "followers_url": "https://api.github.com/users/Alenush/followers", "following_url": "https://api.github.com/users/Alenush/following{/other_user}", "gists_url": "https://api.github.com/users/Alenush/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Alenush", "id": 13781234, "login": "Alenush", "node_id": "MDQ6VXNlcjEzNzgxMjM0", "organizations_url": "https://api.github.com/users/Alenush/orgs", "received_events_url": "https://api.github.com/users/Alenush/received_events", "repos_url": "https://api.github.com/users/Alenush/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Alenush/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Alenush/subscriptions", "type": "User", "url": "https://api.github.com/users/Alenush" }
[]
closed
false
null
[]
null
0
"2021-10-06T11:21:08Z"
"2021-10-06T11:33:08Z"
"2021-10-06T11:33:08Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3037.diff", "html_url": "https://github.com/huggingface/datasets/pull/3037", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/3037.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3037" }
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3037/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3037/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3036
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3036/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3036/comments
https://api.github.com/repos/huggingface/datasets/issues/3036/events
https://github.com/huggingface/datasets/issues/3036
1,017,687,944
I_kwDODunzps48qK-I
3,036
Protect master branch to force contributions via Pull Requests
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
closed
false
null
[]
null
3
"2021-10-06T07:34:17Z"
"2021-10-07T06:51:47Z"
"2021-10-07T06:49:52Z"
MEMBER
null
null
null
In order to have a clearer Git history in the master branch, I propose to protect it so that all contributions must be done through a Pull Request and no direct commits to master are allowed. - The Pull Request allows to give context, discuss any potential issues and improve the quality of the contribution - The Pull Request will eventually be squashed and merged into master with a single commit that links to the Pull Request page (with all the context/discussions) Note that we already implemented a protection in the master branch to avoid *merge* commits and ensure a linear history. This proposal goes one step further by avoiding all kind of direct commits and forcing contributions **only** through Pull Requests. Please note that we can temporarily deactivate this protection if we need to make a direct commit, e.g. at each new version release. The only way GitHub allows this kind or protection is by requiring a minimal number (at least one) of approvals of the Pull Request. The inconvenient is that the PR creator cannot approve their own PR: another person must approve it before it can be merged into master. To circumvent this, we could eventually disable this protection in the master branch when an urgent commit is needed (e.g. for a hotfix) and there is no other person available at that time to approve the PR.
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/3036/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3036/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/3035
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3035/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3035/comments
https://api.github.com/repos/huggingface/datasets/issues/3035/events
https://github.com/huggingface/datasets/issues/3035
1,016,770,071
I_kwDODunzps48mq4X
3,035
`load_dataset` does not work with uploaded arrow file
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }, { "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" } ]
null
2
"2021-10-05T20:15:10Z"
"2021-10-06T17:01:37Z"
null
MEMBER
null
null
null
## Describe the bug I've preprocessed and uploaded a dataset here: https://huggingface.co/datasets/ami-wav2vec2/ami_headset_single_preprocessed . The dataset is in `.arrow` format. The dataset can correctly be loaded when doing: ```bash git lfs install git clone https://huggingface.co/datasets/ami-wav2vec2/ami_headset_single_preprocessed ``` followed by ```python from datasets import load_from_disk ds = load_from_disk("./ami_headset_single_preprocessed") ``` However when I try to directly download the dataset as follows: ```python from datasets import load_dataset ds = load_dataset("ami-wav2vec2/ami_headset_single_preprocessed") ``` the following error occurs: ```bash /usr/local/lib/python3.7/dist-packages/datasets/load.py in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, ignore_verifications, keep_in_memory, save_infos, script_version, use_auth_token, task, streaming, **config_kwargs) 1115 ignore_verifications=ignore_verifications, 1116 try_from_hf_gcs=try_from_hf_gcs, -> 1117 use_auth_token=use_auth_token, 1118 ) 1119 /usr/local/lib/python3.7/dist-packages/datasets/builder.py in download_and_prepare(self, download_config, download_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, **download_and_prepare_kwargs) 635 if not downloaded_from_gcs: 636 self._download_and_prepare( --> 637 dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs 638 ) 639 # Sync info /usr/local/lib/python3.7/dist-packages/datasets/builder.py in _download_and_prepare(self, dl_manager, verify_infos, **prepare_split_kwargs) 724 try: 725 # Prepare split will record examples associated to the split --> 726 self._prepare_split(split_generator, **prepare_split_kwargs) 727 except OSError as e: 728 raise OSError( /usr/local/lib/python3.7/dist-packages/datasets/builder.py in _prepare_split(self, split_generator) 1186 generator, unit=" tables", leave=False, disable=bool(logging.get_verbosity() == logging.NOTSET) 1187 ): -> 1188 writer.write_table(table) 1189 num_examples, num_bytes = writer.finalize() 1190 /usr/local/lib/python3.7/dist-packages/datasets/arrow_writer.py in write_table(self, pa_table, writer_batch_size) 424 # reorder the arrays if necessary + cast to self._schema 425 # we can't simply use .cast here because we may need to change the order of the columns --> 426 pa_table = pa.Table.from_arrays([pa_table[name] for name in self._schema.names], schema=self._schema) 427 batches: List[pa.RecordBatch] = pa_table.to_batches(max_chunksize=writer_batch_size) 428 self._num_bytes += sum(batch.nbytes for batch in batches) /usr/local/lib/python3.7/dist-packages/pyarrow/table.pxi in pyarrow.lib.Table.from_arrays() /usr/local/lib/python3.7/dist-packages/pyarrow/table.pxi in pyarrow.lib._sanitize_arrays() /usr/local/lib/python3.7/dist-packages/pyarrow/array.pxi in pyarrow.lib.asarray() /usr/local/lib/python3.7/dist-packages/pyarrow/table.pxi in pyarrow.lib.ChunkedArray.cast() /usr/local/lib/python3.7/dist-packages/pyarrow/compute.py in cast(arr, target_type, safe) 279 else: 280 options = CastOptions.unsafe(target_type) --> 281 return call_function("cast", [arr], options) 282 283 /usr/local/lib/python3.7/dist-packages/pyarrow/_compute.pyx in pyarrow._compute.call_function() /usr/local/lib/python3.7/dist-packages/pyarrow/_compute.pyx in pyarrow._compute.Function.call() /usr/local/lib/python3.7/dist-packages/pyarrow/error.pxi in pyarrow.lib.pyarrow_internal_check_status() /usr/local/lib/python3.7/dist-packages/pyarrow/error.pxi in pyarrow.lib.check_status() ArrowNotImplementedError: Unsupported cast from struct<train: struct<name: string, num_bytes: int64, num_examples: int64, dataset_name: string>, validation: struct<name: string, num_bytes: int64, num_examples: int64, dataset_name: string>, test: struct<name: string, num_bytes: int64, num_examples: int64, dataset_name: string>> to list using function cast_list ``` ## Expected results The dataset should be correctly loaded with `load_dataset` IMO. ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.12.2.dev0 - Platform: Linux-5.11.0-34-generic-x86_64-with-glibc2.10 - Python version: 3.8.5 - PyArrow version: 5.0.0
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3035/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3035/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/3034
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3034/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3034/comments
https://api.github.com/repos/huggingface/datasets/issues/3034/events
https://github.com/huggingface/datasets/issues/3034
1,016,759,202
I_kwDODunzps48moOi
3,034
Errors loading dataset using fs = a gcsfs.GCSFileSystem
{ "avatar_url": "https://avatars.githubusercontent.com/u/74556552?v=4", "events_url": "https://api.github.com/users/dconatha/events{/privacy}", "followers_url": "https://api.github.com/users/dconatha/followers", "following_url": "https://api.github.com/users/dconatha/following{/other_user}", "gists_url": "https://api.github.com/users/dconatha/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/dconatha", "id": 74556552, "login": "dconatha", "node_id": "MDQ6VXNlcjc0NTU2NTUy", "organizations_url": "https://api.github.com/users/dconatha/orgs", "received_events_url": "https://api.github.com/users/dconatha/received_events", "repos_url": "https://api.github.com/users/dconatha/repos", "site_admin": false, "starred_url": "https://api.github.com/users/dconatha/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dconatha/subscriptions", "type": "User", "url": "https://api.github.com/users/dconatha" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
open
false
null
[]
null
0
"2021-10-05T20:07:08Z"
"2021-10-05T20:26:39Z"
null
NONE
null
null
null
## Describe the bug Cannot load dataset using a `gcsfs.GCSFileSystem`. I'm not sure if this should be a bug in `gcsfs` or here... Basically what seems to be happening is that since datasets saves datasets as folders and folders aren't "real objects" in gcs, gcsfs raises a 404 error. There are workarounds if you use gcsfs directly to download the file, but as is I can't get `load_from_disk` to work. ## Steps to reproduce the bug ```python from datasets import load_dataset # load some dataset dataset = load_dataset("squad", split="train") # save it to gcs import gcsfs fs = gcsfs.GCSFileSystem(project="my-gs-project") dataset.save_to_disk("gs://my-bucket/squad", fs=fs) # try to load it from gcs from datasets import load_from_disk dataset2 = load_from_disk("my-bucket/squad", fs=fs) ``` ## Expected results `dataset2` would be a copy of `dataset` but loaded from my bucket. ## Actual results Long traceback but essentially it's a 404 error from gcsfs saying the object `my-bucket/squad` doesn't exist when this is called: https://github.com/huggingface/datasets/blob/9c81b7d2e6d9feae69a084a3abda265a4ca07fb5/src/datasets/arrow_dataset.py#L977 This is because there is no actual object called `my-bucket/squad`, there are objects called `my-bucket/squad/dataset.arrow`, etc. Note that *this* works fine, since it's explicitly saying "download all the objects with this prefix": ```python fs.download(src_dataset_path + "/*", dataset_path.as_posix(), recursive=True) ``` For example, I can do a workaround this way: ```python import tempfile with tempfile.TemporaryDirectory() as temppath: fs.download("gs://my-bucket/squad/*", temppath) dataset2 = load_from_disk(temppath) ``` It's unclear to me if it's `gcsfs`'s responsibility to say "hey that's folder not a file, I should try to get objects inside of it not the object itself", or if that's `datasets`'s responsibility... I'm leaning towards the latter since you're never loading a dataset from one file using this function/method, only a dataset folder? Another minor thing that should maybe should be rolled into this bug... https://github.com/huggingface/datasets/blob/9c81b7d2e6d9feae69a084a3abda265a4ca07fb5/src/datasets/arrow_dataset.py#L968 These fail if you pass in a `gs://` path, e.g. ```python dataset2 = load_from_disk("gs://my-bucket/squad", fs=fs) ``` Because at this point, `dataset_info_path` is `gs:/my-bucket/squad/dataset_info.json`, gcsfs throws a: ``` Invalid bucket name: 'gs:' ``` error ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.12.1 - Platform: macOS Big Sur 11.6 - Python version: 3.7.12 - PyArrow version: 5.0.0
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3034/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3034/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/3033
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3033/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3033/comments
https://api.github.com/repos/huggingface/datasets/issues/3033/events
https://github.com/huggingface/datasets/pull/3033
1,016,619,572
PR_kwDODunzps4std7u
3,033
Actual "proper" install of ruamel.yaml in the windows CI
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
0
"2021-10-05T17:52:07Z"
"2021-10-05T17:54:57Z"
"2021-10-05T17:54:57Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3033.diff", "html_url": "https://github.com/huggingface/datasets/pull/3033", "merged_at": "2021-10-05T17:54:56Z", "patch_url": "https://github.com/huggingface/datasets/pull/3033.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3033" }
It was impossible to update the package directly with `pip`. Indeed it was installed with `distutils` which prevents `pip` or `conda` to uninstall it. I had to `rm` a directory from the `site-packages` python directory, and then do `pip install ruamel.yaml` It's not that "proper" but I couldn't find better solutions
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3033/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3033/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3032
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3032/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3032/comments
https://api.github.com/repos/huggingface/datasets/issues/3032/events
https://github.com/huggingface/datasets/issues/3032
1,016,488,475
I_kwDODunzps48lmIb
3,032
Error when loading private dataset with "data_files" arg
{ "avatar_url": "https://avatars.githubusercontent.com/u/715491?v=4", "events_url": "https://api.github.com/users/borisdayma/events{/privacy}", "followers_url": "https://api.github.com/users/borisdayma/followers", "following_url": "https://api.github.com/users/borisdayma/following{/other_user}", "gists_url": "https://api.github.com/users/borisdayma/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/borisdayma", "id": 715491, "login": "borisdayma", "node_id": "MDQ6VXNlcjcxNTQ5MQ==", "organizations_url": "https://api.github.com/users/borisdayma/orgs", "received_events_url": "https://api.github.com/users/borisdayma/received_events", "repos_url": "https://api.github.com/users/borisdayma/repos", "site_admin": false, "starred_url": "https://api.github.com/users/borisdayma/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/borisdayma/subscriptions", "type": "User", "url": "https://api.github.com/users/borisdayma" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
null
1
"2021-10-05T15:46:27Z"
"2021-10-12T15:26:22Z"
"2021-10-12T15:25:46Z"
CONTRIBUTOR
null
null
null
## Describe the bug A clear and concise description of what the bug is. Private datasets with no loading script can't be loaded using `data_files` parameter. ## Steps to reproduce the bug ```python from datasets import load_dataset data_files = {"train": "**/train/*/*.jsonl", "valid": "**/valid/*/*.jsonl"} dataset = load_dataset('dalle-mini/encoded', data_files=data_files, use_auth_token=True, streaming=True) ``` Same error happens in non-streaming mode. ## Expected results Files should be loaded (whether in streaming or not). ## Actual results Error: ``` --------------------------------------------------------------------------- FileNotFoundError Traceback (most recent call last) /usr/local/lib/python3.7/dist-packages/datasets/load.py in prepare_module(path, script_version, download_config, download_mode, dataset, force_local_path, dynamic_modules_path, return_resolved_file_path, return_associated_base_path, data_files, **download_kwargs) 539 try: --> 540 local_path = cached_path(file_path, download_config=download_config) 541 except FileNotFoundError: 8 frames FileNotFoundError: Couldn't find file at https://huggingface.co/datasets/dalle-mini/encoded/resolve/main/encoded.py During handling of the above exception, another exception occurred: HTTPError Traceback (most recent call last) HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/api/datasets/dalle-mini/encoded?full=true During handling of the above exception, another exception occurred: FileNotFoundError Traceback (most recent call last) /usr/local/lib/python3.7/dist-packages/datasets/load.py in prepare_module(path, script_version, download_config, download_mode, dataset, force_local_path, dynamic_modules_path, return_resolved_file_path, return_associated_base_path, data_files, **download_kwargs) 547 except Exception: 548 raise FileNotFoundError( --> 549 f"Couldn't find a directory or a {resource_type} named '{path}'. " 550 f"It doesn't exist locally at {expected_dir_for_combined_path_abs} or remotely on {hf_api.endpoint}/datasets" 551 ) FileNotFoundError: Couldn't find a directory or a dataset named 'dalle-mini/encoded'. It doesn't exist locally at /content/dalle-mini/encoded or remotely on https://huggingface.co/datasets ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.12.1 - Platform: Linux-5.4.104+-x86_64-with-Ubuntu-18.04-bionic - Python version: 3.7.12 - PyArrow version: 3.0.0 @lhoestq
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3032/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3032/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/3031
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3031/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3031/comments
https://api.github.com/repos/huggingface/datasets/issues/3031/events
https://github.com/huggingface/datasets/pull/3031
1,016,458,496
PR_kwDODunzps4ss9jn
3,031
Align tqdm control with cache control
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko" }
[]
closed
false
null
[]
null
1
"2021-10-05T15:18:49Z"
"2021-10-18T15:00:21Z"
"2021-10-18T14:59:30Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3031.diff", "html_url": "https://github.com/huggingface/datasets/pull/3031", "merged_at": "2021-10-18T14:59:30Z", "patch_url": "https://github.com/huggingface/datasets/pull/3031.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3031" }
Currently, once disabled with `disable_progress_bar`, progress bars cannot be re-enabled again. To overcome this limitation, this PR introduces the `set_progress_bar_enabled` function that accepts a boolean indicating whether to display progress bars. The goal is to provide a similar API to the existing cache control API. Following the Zen of Python (😄), there should be one and preferably only one obvious way to do it, so I'm also deprecating the aforementioned `disable_progress_bar` function. Additionally, I justify the deprecation with the fact that this function has never been in the docs. Moreover, similar API changes have recently been introduced to [`tfds`](https://github.com/tensorflow/datasets/blob/a1e8b98f45b0214082b546cc967c67c43fffda55/tensorflow_datasets/core/utils/tqdm_utils.py#L98-L112). Considering the popularity of the [comment](https://github.com/huggingface/datasets/issues/1627#issuecomment-751383559) I made a while ago, this API (`set_progress_bar_enabled` and `is_progress_bar_enabled`) should be mentioned in the docs, but I'm not sure where to put it exactly. Maybe we can replace the `logging_methods` page under `package_reference` with `utility_methods` and then introduce two subsections on that page: `Logging methods` and `tqdm control`. Additionally, this PR: * adds the `disable_tqdm` keyword arg of `Dataset._map_single` to the `ignore_kwargs` list to ignore it when computing the fingerprint (forgot to add it in #2696) * deletes the unused components in `tqdm_utils.py`, which seem to be inherited from `tfds` * disables the tqdm output in the test suite. As I see it, this output doesn't seem informative, but let me know if this is not a good idea
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3031/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3031/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3030
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3030/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3030/comments
https://api.github.com/repos/huggingface/datasets/issues/3030/events
https://github.com/huggingface/datasets/pull/3030
1,016,435,324
PR_kwDODunzps4ss41W
3,030
Add `remove_columns` to `IterableDataset`
{ "avatar_url": "https://avatars.githubusercontent.com/u/31893406?v=4", "events_url": "https://api.github.com/users/cccntu/events{/privacy}", "followers_url": "https://api.github.com/users/cccntu/followers", "following_url": "https://api.github.com/users/cccntu/following{/other_user}", "gists_url": "https://api.github.com/users/cccntu/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/cccntu", "id": 31893406, "login": "cccntu", "node_id": "MDQ6VXNlcjMxODkzNDA2", "organizations_url": "https://api.github.com/users/cccntu/orgs", "received_events_url": "https://api.github.com/users/cccntu/received_events", "repos_url": "https://api.github.com/users/cccntu/repos", "site_admin": false, "starred_url": "https://api.github.com/users/cccntu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cccntu/subscriptions", "type": "User", "url": "https://api.github.com/users/cccntu" }
[]
closed
false
null
[]
null
4
"2021-10-05T14:58:33Z"
"2021-10-08T15:33:15Z"
"2021-10-08T15:31:53Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3030.diff", "html_url": "https://github.com/huggingface/datasets/pull/3030", "merged_at": "2021-10-08T15:31:53Z", "patch_url": "https://github.com/huggingface/datasets/pull/3030.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3030" }
Fixes #2944 WIP * Not tested yet. * We might want to allow batched remove for efficiency. @lhoestq Do you think it should have `batched=` and `batch_size=`?
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3030/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3030/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3029
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3029/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3029/comments
https://api.github.com/repos/huggingface/datasets/issues/3029/events
https://github.com/huggingface/datasets/pull/3029
1,016,389,901
PR_kwDODunzps4ssvkr
3,029
Use standard open-domain validation split in nq_open
{ "avatar_url": "https://avatars.githubusercontent.com/u/417568?v=4", "events_url": "https://api.github.com/users/craffel/events{/privacy}", "followers_url": "https://api.github.com/users/craffel/followers", "following_url": "https://api.github.com/users/craffel/following{/other_user}", "gists_url": "https://api.github.com/users/craffel/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/craffel", "id": 417568, "login": "craffel", "node_id": "MDQ6VXNlcjQxNzU2OA==", "organizations_url": "https://api.github.com/users/craffel/orgs", "received_events_url": "https://api.github.com/users/craffel/received_events", "repos_url": "https://api.github.com/users/craffel/repos", "site_admin": false, "starred_url": "https://api.github.com/users/craffel/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/craffel/subscriptions", "type": "User", "url": "https://api.github.com/users/craffel" }
[]
closed
false
null
[]
null
2
"2021-10-05T14:19:27Z"
"2021-10-05T14:56:46Z"
"2021-10-05T14:56:45Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3029.diff", "html_url": "https://github.com/huggingface/datasets/pull/3029", "merged_at": "2021-10-05T14:56:45Z", "patch_url": "https://github.com/huggingface/datasets/pull/3029.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3029" }
The nq_open dataset originally drew the validation set from this file: https://github.com/google-research-datasets/natural-questions/blob/master/nq_open/NQ-open.efficientqa.dev.1.1.sample.jsonl However, that's the dev set used specifically and only for the efficientqa competition, and it's not the same dev set as is used in every open-domain question answering paper (including the Lee et al paper that introduced the open-domain variant of NQ, cited at the top of the dataset file). This PR changes nq_open to use the standard validation split and bumps the version to 2.0.0 since this is a breaking change.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3029/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3029/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3028
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3028/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3028/comments
https://api.github.com/repos/huggingface/datasets/issues/3028/events
https://github.com/huggingface/datasets/pull/3028
1,016,230,272
PR_kwDODunzps4ssO4s
3,028
Properly install ruamel-yaml for windows CI
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
3
"2021-10-05T11:51:15Z"
"2021-10-05T14:02:12Z"
"2021-10-05T11:51:22Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3028.diff", "html_url": "https://github.com/huggingface/datasets/pull/3028", "merged_at": "2021-10-05T11:51:22Z", "patch_url": "https://github.com/huggingface/datasets/pull/3028.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3028" }
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3028/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3028/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3027
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3027/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3027/comments
https://api.github.com/repos/huggingface/datasets/issues/3027/events
https://github.com/huggingface/datasets/issues/3027
1,016,150,117
I_kwDODunzps48kThl
3,027
Resolve data_files by split name
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" } ]
null
3
"2021-10-05T10:24:36Z"
"2021-11-05T17:49:58Z"
"2021-11-05T17:49:57Z"
MEMBER
null
null
null
This issue is about discussing the default behavior when someone loads a dataset that consists in data files. For example: ```python load_dataset("lhoestq/demo1") ``` should return two splits "train" and "test" since the dataset repostiory is like ``` data/ ├── train.csv └── test.csv ``` Currently it returns only one split "train" which contains the data of both files I started playing with this idea on this branch btw: `resolve-data_files-by-split-name` Basically the idea is that if you named you data files after split names then the default pattern is ```python { "train": ["*train*"], "test": ["*test*"], "validation": ["*dev*", "valid"], } ``` otherwise it's ```python { "train": ["*"] } ``` Let me know what you think ! cc @albertvillanova @LysandreJik @vblagoje
{ "+1": 2, "-1": 0, "confused": 0, "eyes": 0, "heart": 1, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 3, "url": "https://api.github.com/repos/huggingface/datasets/issues/3027/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3027/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/3026
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3026/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3026/comments
https://api.github.com/repos/huggingface/datasets/issues/3026/events
https://github.com/huggingface/datasets/pull/3026
1,016,067,794
PR_kwDODunzps4srtyc
3,026
added arxiv paper inswiss_judgment_prediction dataset card
{ "avatar_url": "https://avatars.githubusercontent.com/u/3775944?v=4", "events_url": "https://api.github.com/users/JoelNiklaus/events{/privacy}", "followers_url": "https://api.github.com/users/JoelNiklaus/followers", "following_url": "https://api.github.com/users/JoelNiklaus/following{/other_user}", "gists_url": "https://api.github.com/users/JoelNiklaus/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/JoelNiklaus", "id": 3775944, "login": "JoelNiklaus", "node_id": "MDQ6VXNlcjM3NzU5NDQ=", "organizations_url": "https://api.github.com/users/JoelNiklaus/orgs", "received_events_url": "https://api.github.com/users/JoelNiklaus/received_events", "repos_url": "https://api.github.com/users/JoelNiklaus/repos", "site_admin": false, "starred_url": "https://api.github.com/users/JoelNiklaus/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/JoelNiklaus/subscriptions", "type": "User", "url": "https://api.github.com/users/JoelNiklaus" }
[]
closed
false
null
[]
null
0
"2021-10-05T09:02:01Z"
"2021-10-08T16:01:44Z"
"2021-10-08T16:01:24Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3026.diff", "html_url": "https://github.com/huggingface/datasets/pull/3026", "merged_at": "2021-10-08T16:01:24Z", "patch_url": "https://github.com/huggingface/datasets/pull/3026.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3026" }
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3026/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3026/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3025
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3025/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3025/comments
https://api.github.com/repos/huggingface/datasets/issues/3025/events
https://github.com/huggingface/datasets/pull/3025
1,016,061,222
PR_kwDODunzps4srsgG
3,025
Fix Windows test suite
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
0
"2021-10-05T08:55:22Z"
"2021-10-05T09:58:28Z"
"2021-10-05T09:58:27Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3025.diff", "html_url": "https://github.com/huggingface/datasets/pull/3025", "merged_at": "2021-10-05T09:58:27Z", "patch_url": "https://github.com/huggingface/datasets/pull/3025.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3025" }
Try a hotfix to restore Windows test suite. Fix #3024.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3025/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3025/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3024
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3024/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3024/comments
https://api.github.com/repos/huggingface/datasets/issues/3024/events
https://github.com/huggingface/datasets/issues/3024
1,016,052,911
I_kwDODunzps48j7yv
3,024
Windows test suite fails
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" } ]
null
0
"2021-10-05T08:46:46Z"
"2021-10-05T09:58:27Z"
"2021-10-05T09:58:27Z"
MEMBER
null
null
null
## Describe the bug There is an error during installation of tests dependencies for Windows: https://app.circleci.com/pipelines/github/huggingface/datasets/7981/workflows/9b6a0114-2b8e-4069-94e5-e844dbbdba4e/jobs/49206 ``` ERROR: Cannot uninstall 'ruamel-yaml'. It is a distutils installed project and thus we cannot accurately determine which files belong to it which would lead to only a partial uninstall. ```
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3024/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3024/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/3023
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3023/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3023/comments
https://api.github.com/repos/huggingface/datasets/issues/3023/events
https://github.com/huggingface/datasets/pull/3023
1,015,923,031
PR_kwDODunzps4srQ4i
3,023
Fix typo
{ "avatar_url": "https://avatars.githubusercontent.com/u/24835382?v=4", "events_url": "https://api.github.com/users/qqaatw/events{/privacy}", "followers_url": "https://api.github.com/users/qqaatw/followers", "following_url": "https://api.github.com/users/qqaatw/following{/other_user}", "gists_url": "https://api.github.com/users/qqaatw/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/qqaatw", "id": 24835382, "login": "qqaatw", "node_id": "MDQ6VXNlcjI0ODM1Mzgy", "organizations_url": "https://api.github.com/users/qqaatw/orgs", "received_events_url": "https://api.github.com/users/qqaatw/received_events", "repos_url": "https://api.github.com/users/qqaatw/repos", "site_admin": false, "starred_url": "https://api.github.com/users/qqaatw/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/qqaatw/subscriptions", "type": "User", "url": "https://api.github.com/users/qqaatw" }
[]
closed
false
null
[]
null
0
"2021-10-05T06:06:11Z"
"2021-10-05T11:56:55Z"
"2021-10-05T11:56:55Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3023.diff", "html_url": "https://github.com/huggingface/datasets/pull/3023", "merged_at": "2021-10-05T11:56:55Z", "patch_url": "https://github.com/huggingface/datasets/pull/3023.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3023" }
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3023/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3023/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3022
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3022/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3022/comments
https://api.github.com/repos/huggingface/datasets/issues/3022/events
https://github.com/huggingface/datasets/pull/3022
1,015,750,221
PR_kwDODunzps4sqve6
3,022
MeDAL dataset: Add further description and update download URL
{ "avatar_url": "https://avatars.githubusercontent.com/u/21180505?v=4", "events_url": "https://api.github.com/users/xhluca/events{/privacy}", "followers_url": "https://api.github.com/users/xhluca/followers", "following_url": "https://api.github.com/users/xhluca/following{/other_user}", "gists_url": "https://api.github.com/users/xhluca/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/xhluca", "id": 21180505, "login": "xhluca", "node_id": "MDQ6VXNlcjIxMTgwNTA1", "organizations_url": "https://api.github.com/users/xhluca/orgs", "received_events_url": "https://api.github.com/users/xhluca/received_events", "repos_url": "https://api.github.com/users/xhluca/repos", "site_admin": false, "starred_url": "https://api.github.com/users/xhluca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/xhluca/subscriptions", "type": "User", "url": "https://api.github.com/users/xhluca" }
[]
closed
false
null
[]
null
4
"2021-10-05T00:13:28Z"
"2021-10-13T09:03:09Z"
"2021-10-13T09:03:09Z"
NONE
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3022.diff", "html_url": "https://github.com/huggingface/datasets/pull/3022", "merged_at": "2021-10-13T09:03:09Z", "patch_url": "https://github.com/huggingface/datasets/pull/3022.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3022" }
Added more details in the following sections: * Dataset Structure * Data Instances * Data Splits * Source Data * Annotations * Discussions of Biases * LIcensing Information
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3022/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3022/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3021
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3021/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3021/comments
https://api.github.com/repos/huggingface/datasets/issues/3021/events
https://github.com/huggingface/datasets/pull/3021
1,015,444,094
PR_kwDODunzps4spzJU
3,021
Support loading dataset from multiple zipped CSV data files
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
0
"2021-10-04T17:33:57Z"
"2021-10-06T08:36:46Z"
"2021-10-06T08:36:45Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3021.diff", "html_url": "https://github.com/huggingface/datasets/pull/3021", "merged_at": "2021-10-06T08:36:45Z", "patch_url": "https://github.com/huggingface/datasets/pull/3021.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3021" }
Fix partially #3018. CC: @lewtun
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 1, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/3021/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3021/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3020
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3020/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3020/comments
https://api.github.com/repos/huggingface/datasets/issues/3020/events
https://github.com/huggingface/datasets/pull/3020
1,015,406,105
PR_kwDODunzps4sprfa
3,020
Add a metric for the MATH dataset (competition_math).
{ "avatar_url": "https://avatars.githubusercontent.com/u/91226467?v=4", "events_url": "https://api.github.com/users/hacobe/events{/privacy}", "followers_url": "https://api.github.com/users/hacobe/followers", "following_url": "https://api.github.com/users/hacobe/following{/other_user}", "gists_url": "https://api.github.com/users/hacobe/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/hacobe", "id": 91226467, "login": "hacobe", "node_id": "MDQ6VXNlcjkxMjI2NDY3", "organizations_url": "https://api.github.com/users/hacobe/orgs", "received_events_url": "https://api.github.com/users/hacobe/received_events", "repos_url": "https://api.github.com/users/hacobe/repos", "site_admin": false, "starred_url": "https://api.github.com/users/hacobe/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/hacobe/subscriptions", "type": "User", "url": "https://api.github.com/users/hacobe" }
[]
closed
false
null
[]
null
4
"2021-10-04T16:52:16Z"
"2021-10-22T10:29:31Z"
"2021-10-22T10:29:31Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3020.diff", "html_url": "https://github.com/huggingface/datasets/pull/3020", "merged_at": "2021-10-22T10:29:31Z", "patch_url": "https://github.com/huggingface/datasets/pull/3020.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3020" }
This metric computes accuracy for the MATH dataset (https://arxiv.org/abs/2103.03874) after canonicalizing the prediction and the reference (e.g., converting "1/2" to "\\\\frac{1}{2}").
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3020/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3020/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3019
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3019/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3019/comments
https://api.github.com/repos/huggingface/datasets/issues/3019/events
https://github.com/huggingface/datasets/pull/3019
1,015,339,983
PR_kwDODunzps4speOB
3,019
Fix filter leaking
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
0
"2021-10-04T15:42:58Z"
"2022-06-03T08:28:14Z"
"2021-10-05T08:33:07Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3019.diff", "html_url": "https://github.com/huggingface/datasets/pull/3019", "merged_at": "2021-10-05T08:33:07Z", "patch_url": "https://github.com/huggingface/datasets/pull/3019.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3019" }
If filter is called after using a first transform `shuffle`, `select`, `shard`, `train_test_split`, or `filter`, then it could not work as expected and return examples from before the first transform. This is because the indices mapping was not taken into account when saving the indices to keep when doing the filtering Affected versions: 1.12.0 and 1.12.1 This should fix #3010
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3019/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3019/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3018
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3018/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3018/comments
https://api.github.com/repos/huggingface/datasets/issues/3018/events
https://github.com/huggingface/datasets/issues/3018
1,015,311,877
I_kwDODunzps48hG4F
3,018
Support multiple zipped CSV data files
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" } ]
null
3
"2021-10-04T15:16:59Z"
"2021-10-05T14:32:57Z"
null
MEMBER
null
null
null
As requested by @lewtun, support loading multiple zipped CSV data files. ```python from datasets import load_dataset url = "https://domain.org/filename.zip" data_files = {"train": "train_filename.csv", "test": "test_filename.csv"} dataset = load_dataset("csv", data_dir=url, data_files=data_files) ```
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 1, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/3018/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3018/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/3017
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3017/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3017/comments
https://api.github.com/repos/huggingface/datasets/issues/3017/events
https://github.com/huggingface/datasets/pull/3017
1,015,215,528
PR_kwDODunzps4spE9m
3,017
Remove unused parameter in xdirname
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
0
"2021-10-04T13:55:53Z"
"2021-10-05T11:37:01Z"
"2021-10-05T11:37:00Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3017.diff", "html_url": "https://github.com/huggingface/datasets/pull/3017", "merged_at": "2021-10-05T11:37:00Z", "patch_url": "https://github.com/huggingface/datasets/pull/3017.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3017" }
Minor fix to remove unused args `*p` in `xdirname`.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3017/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3017/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3016
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3016/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3016/comments
https://api.github.com/repos/huggingface/datasets/issues/3016/events
https://github.com/huggingface/datasets/pull/3016
1,015,208,654
PR_kwDODunzps4spDlX
3,016
Fix Windows paths in LJ Speech dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
0
"2021-10-04T13:49:37Z"
"2021-10-04T15:23:05Z"
"2021-10-04T15:23:04Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3016.diff", "html_url": "https://github.com/huggingface/datasets/pull/3016", "merged_at": "2021-10-04T15:23:04Z", "patch_url": "https://github.com/huggingface/datasets/pull/3016.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3016" }
Minor fix in LJ Speech dataset for Windows pathname component separator. Related to #1878.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3016/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3016/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3015
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3015/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3015/comments
https://api.github.com/repos/huggingface/datasets/issues/3015/events
https://github.com/huggingface/datasets/pull/3015
1,015,130,845
PR_kwDODunzps4so0GX
3,015
Extend support for streaming datasets that use glob.glob
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
0
"2021-10-04T12:42:37Z"
"2021-10-05T13:46:39Z"
"2021-10-05T13:46:38Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3015.diff", "html_url": "https://github.com/huggingface/datasets/pull/3015", "merged_at": "2021-10-05T13:46:38Z", "patch_url": "https://github.com/huggingface/datasets/pull/3015.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3015" }
This PR extends the support in streaming mode for datasets that use `glob`, by patching the function `glob.glob`. Related to #2880, #2876, #2874
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3015/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3015/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3014
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3014/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3014/comments
https://api.github.com/repos/huggingface/datasets/issues/3014/events
https://github.com/huggingface/datasets/pull/3014
1,015,070,751
PR_kwDODunzps4son8A
3,014
Fix Windows path in MATH dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
0
"2021-10-04T11:41:07Z"
"2021-10-04T12:46:44Z"
"2021-10-04T12:46:44Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3014.diff", "html_url": "https://github.com/huggingface/datasets/pull/3014", "merged_at": "2021-10-04T12:46:44Z", "patch_url": "https://github.com/huggingface/datasets/pull/3014.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3014" }
Minor fix in MATH dataset for Windows pathname component separator. Related to #2982.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3014/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3014/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3013
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3013/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3013/comments
https://api.github.com/repos/huggingface/datasets/issues/3013/events
https://github.com/huggingface/datasets/issues/3013
1,014,960,419
I_kwDODunzps48fxEj
3,013
Improve `get_dataset_infos`?
{ "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "events_url": "https://api.github.com/users/severo/events{/privacy}", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/severo", "id": 1676121, "login": "severo", "node_id": "MDQ6VXNlcjE2NzYxMjE=", "organizations_url": "https://api.github.com/users/severo/orgs", "received_events_url": "https://api.github.com/users/severo/received_events", "repos_url": "https://api.github.com/users/severo/repos", "site_admin": false, "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "type": "User", "url": "https://api.github.com/users/severo" }
[ { "color": "d876e3", "default": true, "description": "Further information is requested", "id": 1935892912, "name": "question", "node_id": "MDU6TGFiZWwxOTM1ODkyOTEy", "url": "https://api.github.com/repos/huggingface/datasets/labels/question" }, { "color": "E5583E", "default": false, "description": "Related to the dataset viewer on huggingface.co", "id": 3470211881, "name": "dataset-viewer", "node_id": "LA_kwDODunzps7O1zsp", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset-viewer" } ]
closed
false
null
[]
null
1
"2021-10-04T09:47:04Z"
"2022-02-21T15:57:10Z"
"2022-02-21T15:57:10Z"
CONTRIBUTOR
null
null
null
Using the dedicated function `get_dataset_infos` on a dataset that has no dataset-info.json file returns an empty info: ``` >>> from datasets import get_dataset_infos >>> get_dataset_infos('wit') {} ``` While it's totally possible to get it (regenerate it) with: ``` >>> from datasets import load_dataset_builder >>> builder = load_dataset_builder('wit') >>> builder.info DatasetInfo(description='Wikipedia-based Image Text (WIT) Dataset is a large multimodal multilingual dataset. WIT is composed of a curated set\n of 37.6 million entity rich image-text examples with 11.5 million unique images across 108 Wikipedia languages. Its\n size enables WIT to be used as a pretraining dataset for multimodal machine learning models.\n', citation='@article{srinivasan2021wit,\n title={WIT: Wikipedia-based Image Text Dataset for Multimodal Multilingual Machine Learning},\n author={Srinivasan, Krishna and Raman, Karthik and Chen, Jiecao and Bendersky, Michael and Najork, Marc},\n journal={arXiv preprint arXiv:2103.01913},\n year={2021}\n}\n', homepage='https://github.com/google-research-datasets/wit', license='', features={'b64_bytes': Value(dtype='string', id=None), 'embedding': Sequence(feature=Value(dtype='float64', id=None), length=-1, id=None), 'image_url': Value(dtype='string', id=None), 'metadata_url': Value(dtype='string', id=None), 'original_height': Value(dtype='int32', id=None), 'original_width': Value(dtype='int32', id=None), 'mime_type': Value(dtype='string', id=None), 'caption_attribution_description': Value(dtype='string', id=None), 'wit_features': Sequence(feature={'language': Value(dtype='string', id=None), 'page_url': Value(dtype='string', id=None), 'attribution_passes_lang_id': Value(dtype='string', id=None), 'caption_alt_text_description': Value(dtype='string', id=None), 'caption_reference_description': Value(dtype='string', id=None), 'caption_title_and_reference_description': Value(dtype='string', id=None), 'context_page_description': Value(dtype='string', id=None), 'context_section_description': Value(dtype='string', id=None), 'hierarchical_section_title': Value(dtype='string', id=None), 'is_main_image': Value(dtype='string', id=None), 'page_changed_recently': Value(dtype='string', id=None), 'page_title': Value(dtype='string', id=None), 'section_title': Value(dtype='string', id=None)}, length=-1, id=None)}, post_processed=None, supervised_keys=None, task_templates=None, builder_name='wit', config_name='default', version=0.0.0, splits=None, download_checksums=None, download_size=None, post_processing_size=None, dataset_size=None, size_in_bytes=None) ``` Should we test if info is empty, and in that case regenerate it? Or always generate it?
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3013/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3013/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/3012
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3012/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3012/comments
https://api.github.com/repos/huggingface/datasets/issues/3012/events
https://github.com/huggingface/datasets/pull/3012
1,014,958,931
PR_kwDODunzps4soRTu
3,012
Replace item with float in metrics
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
0
"2021-10-04T09:45:28Z"
"2021-10-04T11:30:34Z"
"2021-10-04T11:30:33Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3012.diff", "html_url": "https://github.com/huggingface/datasets/pull/3012", "merged_at": "2021-10-04T11:30:33Z", "patch_url": "https://github.com/huggingface/datasets/pull/3012.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3012" }
As pointed out by @mariosasko in #3001, calling `float()` instad of `.item()` is faster. Moreover, it might avoid potential issues if any of the third-party functions eventually returns a `float` instead of an `np.float64`. Related to #3001.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3012/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3012/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3011
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3011/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3011/comments
https://api.github.com/repos/huggingface/datasets/issues/3011/events
https://github.com/huggingface/datasets/issues/3011
1,014,935,713
I_kwDODunzps48frCh
3,011
load_dataset_builder should error if "name" does not exist?
{ "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "events_url": "https://api.github.com/users/severo/events{/privacy}", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/severo", "id": 1676121, "login": "severo", "node_id": "MDQ6VXNlcjE2NzYxMjE=", "organizations_url": "https://api.github.com/users/severo/orgs", "received_events_url": "https://api.github.com/users/severo/received_events", "repos_url": "https://api.github.com/users/severo/repos", "site_admin": false, "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "type": "User", "url": "https://api.github.com/users/severo" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" }, { "color": "E5583E", "default": false, "description": "Related to the dataset viewer on huggingface.co", "id": 3470211881, "name": "dataset-viewer", "node_id": "LA_kwDODunzps7O1zsp", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset-viewer" } ]
open
false
null
[]
null
1
"2021-10-04T09:20:46Z"
"2022-09-20T13:05:07Z"
null
CONTRIBUTOR
null
null
null
``` import datasets as ds builder = ds.load_dataset_builder('sent_comp', name="doesnotexist") builder.info.config_name ``` returns ``` 'doesnotexist' ``` Shouldn't it raise an error instead? For this dataset, the only valid values for `name` should be: `"default"` or `None` (ie. argument not passed)
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3011/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3011/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/3010
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3010/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3010/comments
https://api.github.com/repos/huggingface/datasets/issues/3010/events
https://github.com/huggingface/datasets/issues/3010
1,014,918,470
I_kwDODunzps48fm1G
3,010
Chain filtering is leaking
{ "avatar_url": "https://avatars.githubusercontent.com/u/22641583?v=4", "events_url": "https://api.github.com/users/DrMatters/events{/privacy}", "followers_url": "https://api.github.com/users/DrMatters/followers", "following_url": "https://api.github.com/users/DrMatters/following{/other_user}", "gists_url": "https://api.github.com/users/DrMatters/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/DrMatters", "id": 22641583, "login": "DrMatters", "node_id": "MDQ6VXNlcjIyNjQxNTgz", "organizations_url": "https://api.github.com/users/DrMatters/orgs", "received_events_url": "https://api.github.com/users/DrMatters/received_events", "repos_url": "https://api.github.com/users/DrMatters/repos", "site_admin": false, "starred_url": "https://api.github.com/users/DrMatters/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/DrMatters/subscriptions", "type": "User", "url": "https://api.github.com/users/DrMatters" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" } ]
null
4
"2021-10-04T09:04:55Z"
"2022-06-01T17:36:44Z"
"2022-06-01T17:36:44Z"
NONE
null
null
null
## Describe the bug As there's no support for lists within dataset fields, I convert my lists to json-string format. However, the bug described is occurring even when the data format is 'string'. These samples show that filtering behavior diverges from what's expected when chaining filterings. On sample 2 the second filtering leads to "leaking" of data that should've been filtered on the first filtering into the results. ## Steps to reproduce the bug Sample 1: ```python import datasets import json items = [[1, 2], [3], [4]] jsoned_items = map(json.dumps, [[1, 2], [3], [4]]) ds = datasets.Dataset.from_dict({'a': jsoned_items}) print(list(ds)) # > Prints: [{'a': '[1, 2]'}, {'a': '[3]'}, {'a': '[4]'}] as expected filtered = ds # get all lists that are shorter than 2 filtered = filtered.filter(lambda x: len(json.loads(x['a'])) < 2, load_from_cache_file=False) print(list(filtered)) # > Prints: [{'a': '[3]'}, {'a': '[4]'}] as expected # get all lists, which have a value bigger than 3 on its zero index filtered = filtered.filter(lambda x: json.loads(x['a'])[0] > 3, load_from_cache_file=False) print(list(filtered)) # > Should be: [{'a': [4]}] # > Prints: [{'a': [3]}] ``` Sample 2: ```python import datasets import json items = [[1, 2], [3], [4]] jsoned_items = map(json.dumps, [[1, 2], [3], [4]]) ds = datasets.Dataset.from_dict({'a': jsoned_items}) print(list(ds)) # > Prints: [{'a': '[1, 2]'}, {'a': '[3]'}, {'a': '[4]'}] filtered = ds # get all lists, which have a value bigger than 3 on its zero index filtered = filtered.filter(lambda x: json.loads(x['a'])[0] > 3, load_from_cache_file=False) print(list(filtered)) # > Prints: [{'a': '[4]'}] as expected # get all lists that are shorter than 2 filtered = filtered.filter(lambda x: len(json.loads(x['a'])) < 2, load_from_cache_file=False) print(list(filtered)) # > Prints: [{'a': '[1, 2]'}] # > Should be: [{'a': '[4]'}] (remain intact) ``` ## Expected results Expected and actual results are attached to the code snippets. ## Actual results Expected and actual results are attached to the code snippets. ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.12.1 - Platform: Windows-10-10.0.19042-SP0 - Python version: 3.9.7 - PyArrow version: 5.0.0
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3010/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3010/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/3009
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3009/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3009/comments
https://api.github.com/repos/huggingface/datasets/issues/3009/events
https://github.com/huggingface/datasets/pull/3009
1,014,868,235
PR_kwDODunzps4sn_YG
3,009
Fix Windows paths in SUPERB benchmark datasets
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
0
"2021-10-04T08:13:49Z"
"2021-10-04T13:43:25Z"
"2021-10-04T13:43:25Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3009.diff", "html_url": "https://github.com/huggingface/datasets/pull/3009", "merged_at": "2021-10-04T13:43:24Z", "patch_url": "https://github.com/huggingface/datasets/pull/3009.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3009" }
Minor fix in SUPERB benchmark datasets for Windows pathname component separator. Related to #2884, #2783 and #2619.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3009/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3009/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3008
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3008/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3008/comments
https://api.github.com/repos/huggingface/datasets/issues/3008/events
https://github.com/huggingface/datasets/pull/3008
1,014,849,163
PR_kwDODunzps4sn7iU
3,008
Fix precision/recall metrics with None average
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
0
"2021-10-04T07:54:15Z"
"2021-10-04T09:29:37Z"
"2021-10-04T09:29:36Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3008.diff", "html_url": "https://github.com/huggingface/datasets/pull/3008", "merged_at": "2021-10-04T09:29:36Z", "patch_url": "https://github.com/huggingface/datasets/pull/3008.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3008" }
Related to issue #2979 and PR #2992.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3008/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3008/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3007
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3007/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3007/comments
https://api.github.com/repos/huggingface/datasets/issues/3007/events
https://github.com/huggingface/datasets/pull/3007
1,014,775,450
PR_kwDODunzps4sns-n
3,007
Correct a typo
{ "avatar_url": "https://avatars.githubusercontent.com/u/35955430?v=4", "events_url": "https://api.github.com/users/Yann21/events{/privacy}", "followers_url": "https://api.github.com/users/Yann21/followers", "following_url": "https://api.github.com/users/Yann21/following{/other_user}", "gists_url": "https://api.github.com/users/Yann21/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Yann21", "id": 35955430, "login": "Yann21", "node_id": "MDQ6VXNlcjM1OTU1NDMw", "organizations_url": "https://api.github.com/users/Yann21/orgs", "received_events_url": "https://api.github.com/users/Yann21/received_events", "repos_url": "https://api.github.com/users/Yann21/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Yann21/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Yann21/subscriptions", "type": "User", "url": "https://api.github.com/users/Yann21" }
[]
closed
false
null
[]
null
0
"2021-10-04T06:15:47Z"
"2021-10-04T09:27:57Z"
"2021-10-04T09:27:57Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3007.diff", "html_url": "https://github.com/huggingface/datasets/pull/3007", "merged_at": "2021-10-04T09:27:57Z", "patch_url": "https://github.com/huggingface/datasets/pull/3007.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3007" }
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3007/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3007/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3006
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3006/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3006/comments
https://api.github.com/repos/huggingface/datasets/issues/3006/events
https://github.com/huggingface/datasets/pull/3006
1,014,770,821
PR_kwDODunzps4snsBm
3,006
Fix Windows paths in CommonLanguage dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
0
"2021-10-04T06:08:58Z"
"2021-10-04T09:07:58Z"
"2021-10-04T09:07:58Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3006.diff", "html_url": "https://github.com/huggingface/datasets/pull/3006", "merged_at": "2021-10-04T09:07:58Z", "patch_url": "https://github.com/huggingface/datasets/pull/3006.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3006" }
Minor fix in CommonLanguage dataset for Windows pathname component separator. Related to #2989.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3006/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3006/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3005
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3005/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3005/comments
https://api.github.com/repos/huggingface/datasets/issues/3005/events
https://github.com/huggingface/datasets/issues/3005
1,014,615,420
I_kwDODunzps48ec18
3,005
DatasetDict.filter and Dataset.filter crashes with any "fn_kwargs" argument
{ "avatar_url": "https://avatars.githubusercontent.com/u/22641583?v=4", "events_url": "https://api.github.com/users/DrMatters/events{/privacy}", "followers_url": "https://api.github.com/users/DrMatters/followers", "following_url": "https://api.github.com/users/DrMatters/following{/other_user}", "gists_url": "https://api.github.com/users/DrMatters/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/DrMatters", "id": 22641583, "login": "DrMatters", "node_id": "MDQ6VXNlcjIyNjQxNTgz", "organizations_url": "https://api.github.com/users/DrMatters/orgs", "received_events_url": "https://api.github.com/users/DrMatters/received_events", "repos_url": "https://api.github.com/users/DrMatters/repos", "site_admin": false, "starred_url": "https://api.github.com/users/DrMatters/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/DrMatters/subscriptions", "type": "User", "url": "https://api.github.com/users/DrMatters" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" } ]
null
2
"2021-10-04T00:49:29Z"
"2021-10-11T10:18:01Z"
"2021-10-04T08:46:13Z"
NONE
null
null
null
## Describe the bug The ".filter" method of DatasetDict or Dataset objects fails when passing any "fn_kwargs" argument ## Steps to reproduce the bug ```python import datasets example_dataset = datasets.Dataset.from_dict({"a": {1, 2, 3, 4}}) def filter_value(example, value): return example['a'] == value filtered = example_dataset.filter(filter_value, fn_kwargs={'value': 3}) ``` ## Expected results `filtered` is a dataset containing {"a": {3}} ## Actual results > Traceback (most recent call last): > File "C:\Users\qsemi\Documents\git\nlp_experiments\gpt_celebrity\src\test_faulty_filter.py", line 8, in <module> > filtered = example_dataset.filter(filter_value, fn_kwargs={'value': 3}) > File "C:\Users\qsemi\miniconda3\envs\main\lib\site-packages\datasets\arrow_dataset.py", line 185, in wrapper > out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) > File "C:\Users\qsemi\miniconda3\envs\main\lib\site-packages\datasets\fingerprint.py", line 398, in wrapper > out = func(self, *args, **kwargs) > File "C:\Users\qsemi\miniconda3\envs\main\lib\site-packages\datasets\arrow_dataset.py", line 2169, in filter > indices = self.map( > File "C:\Users\qsemi\miniconda3\envs\main\lib\site-packages\datasets\arrow_dataset.py", line 1686, in map > return self._map_single( > File "C:\Users\qsemi\miniconda3\envs\main\lib\site-packages\datasets\arrow_dataset.py", line 185, in wrapper > out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) > File "C:\Users\qsemi\miniconda3\envs\main\lib\site-packages\datasets\fingerprint.py", line 398, in wrapper > out = func(self, *args, **kwargs) > File "C:\Users\qsemi\miniconda3\envs\main\lib\site-packages\datasets\arrow_dataset.py", line 2048, in _map_single > batch = apply_function_on_filtered_inputs( > File "C:\Users\qsemi\miniconda3\envs\main\lib\site-packages\datasets\arrow_dataset.py", line 1939, in apply_function_on_filtered_inputs > function(*fn_args, effective_indices, **fn_kwargs) if with_indices else function(*fn_args, **fn_kwargs) > TypeError: get_indices_from_mask_function() got an unexpected keyword argument 'value' ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.12.1 - Platform: Windows-10-10.0.19042-SP0 - Python version: 3.9.7 - PyArrow version: 5.0.0
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3005/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3005/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/3004
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3004/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3004/comments
https://api.github.com/repos/huggingface/datasets/issues/3004/events
https://github.com/huggingface/datasets/pull/3004
1,014,336,617
PR_kwDODunzps4smfPF
3,004
LexGLUE: A Benchmark Dataset for Legal Language Understanding in English.
{ "avatar_url": "https://avatars.githubusercontent.com/u/1626984?v=4", "events_url": "https://api.github.com/users/iliaschalkidis/events{/privacy}", "followers_url": "https://api.github.com/users/iliaschalkidis/followers", "following_url": "https://api.github.com/users/iliaschalkidis/following{/other_user}", "gists_url": "https://api.github.com/users/iliaschalkidis/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/iliaschalkidis", "id": 1626984, "login": "iliaschalkidis", "node_id": "MDQ6VXNlcjE2MjY5ODQ=", "organizations_url": "https://api.github.com/users/iliaschalkidis/orgs", "received_events_url": "https://api.github.com/users/iliaschalkidis/received_events", "repos_url": "https://api.github.com/users/iliaschalkidis/repos", "site_admin": false, "starred_url": "https://api.github.com/users/iliaschalkidis/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/iliaschalkidis/subscriptions", "type": "User", "url": "https://api.github.com/users/iliaschalkidis" }
[]
closed
false
null
[]
null
4
"2021-10-03T10:03:25Z"
"2021-10-13T13:37:02Z"
"2021-10-13T13:37:01Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3004.diff", "html_url": "https://github.com/huggingface/datasets/pull/3004", "merged_at": "2021-10-13T13:37:01Z", "patch_url": "https://github.com/huggingface/datasets/pull/3004.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3004" }
Inspired by the recent widespread use of the GLUE multi-task benchmark NLP dataset (Wang et al., 2018), the subsequent more difficult SuperGLUE (Wang et al., 2019), other previous multi-task NLP benchmarks (Conneau and Kiela, 2018; McCann et al., 2018), and similar initiatives in other domains (Peng et al., 2019), we introduce the Legal General Language Understanding Evaluation (LexGLUE) benchmark, a benchmark dataset to evaluate the performance of NLP methods in legal tasks. LexGLUE is based on seven existing legal NLP datasets, selected using criteria largely from SuperGLUE. As in GLUE and SuperGLUE (Wang et al., 2019b,a), one of our goals is to push towards generic (or ‘foundation’) models that can cope with multiple NLP tasks, in our case legal NLP tasks possibly with limited task-specific fine-tuning. Another goal is to provide a convenient and informative entry point for NLP researchers and practitioners wishing to explore or develop methods for legalNLP. Having these goals in mind, the datasets we include in LexGLUE and the tasks they address have been simplified in several ways to make it easier for newcomers and generic models to address all tasks. LexGLUE benchmark is accompanied by experimental infrastructure that relies on Hugging Face Transformers library and resides at: https://github.com/coastalcph/lex-glue.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 1, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/3004/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3004/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3003
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3003/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3003/comments
https://api.github.com/repos/huggingface/datasets/issues/3003/events
https://github.com/huggingface/datasets/pull/3003
1,014,137,933
PR_kwDODunzps4smExP
3,003
common_language: Fix license in README.md
{ "avatar_url": "https://avatars.githubusercontent.com/u/227350?v=4", "events_url": "https://api.github.com/users/jimregan/events{/privacy}", "followers_url": "https://api.github.com/users/jimregan/followers", "following_url": "https://api.github.com/users/jimregan/following{/other_user}", "gists_url": "https://api.github.com/users/jimregan/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jimregan", "id": 227350, "login": "jimregan", "node_id": "MDQ6VXNlcjIyNzM1MA==", "organizations_url": "https://api.github.com/users/jimregan/orgs", "received_events_url": "https://api.github.com/users/jimregan/received_events", "repos_url": "https://api.github.com/users/jimregan/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jimregan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jimregan/subscriptions", "type": "User", "url": "https://api.github.com/users/jimregan" }
[]
closed
false
null
[]
null
0
"2021-10-02T18:47:37Z"
"2021-10-04T09:27:01Z"
"2021-10-04T09:27:01Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3003.diff", "html_url": "https://github.com/huggingface/datasets/pull/3003", "merged_at": "2021-10-04T09:27:01Z", "patch_url": "https://github.com/huggingface/datasets/pull/3003.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3003" }
...it's correct elsewhere
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3003/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3003/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3002
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3002/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3002/comments
https://api.github.com/repos/huggingface/datasets/issues/3002/events
https://github.com/huggingface/datasets/pull/3002
1,014,120,524
PR_kwDODunzps4smCNO
3,002
Remove a reference to the open Arrow file when deleting a TF dataset created with to_tf_dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko" }
[]
closed
false
null
[]
null
2
"2021-10-02T17:44:09Z"
"2021-10-13T11:48:00Z"
"2021-10-13T09:03:23Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3002.diff", "html_url": "https://github.com/huggingface/datasets/pull/3002", "merged_at": "2021-10-13T09:03:23Z", "patch_url": "https://github.com/huggingface/datasets/pull/3002.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3002" }
This [comment](https://github.com/huggingface/datasets/issues/2934#issuecomment-922970919) explains the issue. This PR fixes that with a `weakref` callback, and additionally: * renames `TensorflowDatasetMixIn` to `TensorflowDatasetMixin` for consistency * correctly indents `TensorflowDatasetMixin`'s docstring * replaces `tf.data.AUTOTUNE` with `tf.data.experimental.AUTOTUNE` (we support TF>=2.2 according to the [setup.py](https://github.com/huggingface/datasets/blob/fc46bba66ba4f432cc10501c16a677112e13984c/setup.py#L188) and `AUTOTUNE` has been moved to the experimental part of `tf.data` in 1.X if I'm not mistaken) Fixes #2934
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3002/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3002/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3001
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3001/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3001/comments
https://api.github.com/repos/huggingface/datasets/issues/3001/events
https://github.com/huggingface/datasets/pull/3001
1,014,024,982
PR_kwDODunzps4sl0BY
3,001
Fix cast to Python scalar in Matthews Correlation metric
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko" }
[]
closed
false
null
[]
null
0
"2021-10-02T11:44:59Z"
"2021-10-04T09:54:04Z"
"2021-10-04T09:26:12Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3001.diff", "html_url": "https://github.com/huggingface/datasets/pull/3001", "merged_at": "2021-10-04T09:26:12Z", "patch_url": "https://github.com/huggingface/datasets/pull/3001.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3001" }
This PR is motivated by issue #2964. The Matthews Correlation metric relies on sklearn's `matthews_corrcoef` function to compute the result. This function returns either `float` or `np.float64` (see the [source](https://github.com/scikit-learn/scikit-learn/blob/844b4be24d20fc42cc13b957374c718956a0db39/sklearn/metrics/_classification.py#L906-L909)). Obviously, calling `.item()` on the float value will fail, so I'm fixing this with the built-in `float()` function, which covers both cases. Surprisingly, on my machine, casting `np.float64` to a Python scalar with `float()` is even faster than with the `.item()` method.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3001/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3001/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3000
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3000/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3000/comments
https://api.github.com/repos/huggingface/datasets/issues/3000/events
https://github.com/huggingface/datasets/pull/3000
1,013,613,219
PR_kwDODunzps4skusL
3,000
Fix json loader when conversion not implemented
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
2
"2021-10-01T17:47:22Z"
"2021-10-01T18:05:00Z"
"2021-10-01T17:54:23Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3000.diff", "html_url": "https://github.com/huggingface/datasets/pull/3000", "merged_at": "2021-10-01T17:54:23Z", "patch_url": "https://github.com/huggingface/datasets/pull/3000.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3000" }
Sometimes the arrow json parser fails if the `block_size` is too small and returns an `ArrowNotImplementedError: JSON conversion to struct...` error. By increasing the block size it makes it work again. Hopefully it should help with https://github.com/huggingface/datasets/issues/2799 I tried with the file mentioned in the issue and it worked for me cc @lewtun can you try again from this branch ?
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 1, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/3000/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3000/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/2999
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2999/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2999/comments
https://api.github.com/repos/huggingface/datasets/issues/2999/events
https://github.com/huggingface/datasets/pull/2999
1,013,536,933
PR_kwDODunzps4skgCm
2,999
Set trivia_qa writer batch size
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
0
"2021-10-01T16:23:26Z"
"2021-10-01T16:34:55Z"
"2021-10-01T16:34:55Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/2999.diff", "html_url": "https://github.com/huggingface/datasets/pull/2999", "merged_at": "2021-10-01T16:34:55Z", "patch_url": "https://github.com/huggingface/datasets/pull/2999.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/2999" }
Save some RAM when generating trivia_qa
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2999/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2999/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/2998
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2998/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2998/comments
https://api.github.com/repos/huggingface/datasets/issues/2998/events
https://github.com/huggingface/datasets/issues/2998
1,013,372,871
I_kwDODunzps48ZtfH
2,998
cannot shuffle dataset loaded from disk
{ "avatar_url": "https://avatars.githubusercontent.com/u/54274249?v=4", "events_url": "https://api.github.com/users/pya25/events{/privacy}", "followers_url": "https://api.github.com/users/pya25/followers", "following_url": "https://api.github.com/users/pya25/following{/other_user}", "gists_url": "https://api.github.com/users/pya25/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/pya25", "id": 54274249, "login": "pya25", "node_id": "MDQ6VXNlcjU0Mjc0MjQ5", "organizations_url": "https://api.github.com/users/pya25/orgs", "received_events_url": "https://api.github.com/users/pya25/received_events", "repos_url": "https://api.github.com/users/pya25/repos", "site_admin": false, "starred_url": "https://api.github.com/users/pya25/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pya25/subscriptions", "type": "User", "url": "https://api.github.com/users/pya25" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
open
false
null
[]
null
0
"2021-10-01T13:49:52Z"
"2021-10-01T13:49:52Z"
null
NONE
null
null
null
## Describe the bug dataset loaded from disk cannot be shuffled. ## Steps to reproduce the bug ``` my_dataset = load_from_disk('s3://my_file/validate', fs=s3) sample = my_dataset.select(range(100)).shuffle(seed=1234) ``` ## Actual results ``` sample = my_dataset .select(range(100)).shuffle(seed=1234) File "/home/ubuntu/anaconda3/envs/pytorch_p37/lib/python3.7/site-packages/datasets/arrow_dataset.py", line 185, in wrapper out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) File "/home/ubuntu/anaconda3/envs/pytorch_p37/lib/python3.7/site-packages/datasets/fingerprint.py", line 398, in wrapper out = func(self, *args, **kwargs) File "/home/ubuntu/anaconda3/envs/pytorch_p37/lib/python3.7/site-packages/datasets/arrow_dataset.py", line 2494, in shuffle new_fingerprint=new_fingerprint, File "/home/ubuntu/anaconda3/envs/pytorch_p37/lib/python3.7/site-packages/datasets/arrow_dataset.py", line 185, in wrapper out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) File "/home/ubuntu/anaconda3/envs/pytorch_p37/lib/python3.7/site-packages/datasets/fingerprint.py", line 398, in wrapper out = func(self, *args, **kwargs) File "/home/ubuntu/anaconda3/envs/pytorch_p37/lib/python3.7/site-packages/datasets/arrow_dataset.py", line 2303, in select tmp_file = tempfile.NamedTemporaryFile("wb", dir=os.path.dirname(indices_cache_file_name), delete=False) File "/home/ubuntu/anaconda3/envs/pytorch_p37/lib/python3.7/tempfile.py", line 547, in NamedTemporaryFile (fd, name) = _mkstemp_inner(dir, prefix, suffix, flags, output_type) File "/home/ubuntu/anaconda3/envs/pytorch_p37/lib/python3.7/tempfile.py", line 258, in _mkstemp_inner fd = _os.open(file, flags, 0o600) FileNotFoundError: [Errno 2] No such file or directory: '/tmp/tmpnnu5uhnx/my_file/validate/tmpy76d70g4' ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.12.1 - Python version: 3.7 - PyArrow version: 5.0.0
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2998/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2998/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2997
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2997/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2997/comments
https://api.github.com/repos/huggingface/datasets/issues/2997/events
https://github.com/huggingface/datasets/issues/2997
1,013,270,069
I_kwDODunzps48ZUY1
2,997
Dataset has incorrect labels
{ "avatar_url": "https://avatars.githubusercontent.com/u/63367770?v=4", "events_url": "https://api.github.com/users/marshmellow77/events{/privacy}", "followers_url": "https://api.github.com/users/marshmellow77/followers", "following_url": "https://api.github.com/users/marshmellow77/following{/other_user}", "gists_url": "https://api.github.com/users/marshmellow77/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/marshmellow77", "id": 63367770, "login": "marshmellow77", "node_id": "MDQ6VXNlcjYzMzY3Nzcw", "organizations_url": "https://api.github.com/users/marshmellow77/orgs", "received_events_url": "https://api.github.com/users/marshmellow77/received_events", "repos_url": "https://api.github.com/users/marshmellow77/repos", "site_admin": false, "starred_url": "https://api.github.com/users/marshmellow77/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/marshmellow77/subscriptions", "type": "User", "url": "https://api.github.com/users/marshmellow77" }
[]
closed
false
null
[]
null
3
"2021-10-01T12:09:06Z"
"2021-10-01T15:32:00Z"
"2021-10-01T13:54:34Z"
NONE
null
null
null
The dataset https://huggingface.co/datasets/turkish_product_reviews has incorrect labels - all reviews are labelled with "1" (positive sentiment). None of the reviews is labelled with "0". See screenshot attached: ![Capture](https://user-images.githubusercontent.com/63367770/135617428-14ce0b27-5208-4e66-a3ee-71542e3257b4.PNG)
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2997/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2997/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/2996
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2996/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2996/comments
https://api.github.com/repos/huggingface/datasets/issues/2996/events
https://github.com/huggingface/datasets/pull/2996
1,013,266,373
PR_kwDODunzps4sjrP6
2,996
Remove all query parameters when extracting protocol
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
4
"2021-10-01T12:05:34Z"
"2021-10-04T08:48:13Z"
"2021-10-04T08:48:13Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/2996.diff", "html_url": "https://github.com/huggingface/datasets/pull/2996", "merged_at": "2021-10-04T08:48:13Z", "patch_url": "https://github.com/huggingface/datasets/pull/2996.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/2996" }
Fix `_get_extraction_protocol` to remove all query parameters, like `?raw=true`, `?dl=1`,...
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2996/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2996/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/2995
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2995/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2995/comments
https://api.github.com/repos/huggingface/datasets/issues/2995/events
https://github.com/huggingface/datasets/pull/2995
1,013,143,868
PR_kwDODunzps4sjThd
2,995
Fix trivia_qa unfiltered
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
1
"2021-10-01T09:53:43Z"
"2021-10-01T10:04:11Z"
"2021-10-01T10:04:10Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/2995.diff", "html_url": "https://github.com/huggingface/datasets/pull/2995", "merged_at": "2021-10-01T10:04:10Z", "patch_url": "https://github.com/huggingface/datasets/pull/2995.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/2995" }
Fix https://github.com/huggingface/datasets/issues/2993
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2995/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2995/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/2994
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2994/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2994/comments
https://api.github.com/repos/huggingface/datasets/issues/2994/events
https://github.com/huggingface/datasets/pull/2994
1,013,000,475
PR_kwDODunzps4si4I2
2,994
Fix loading compressed CSV without streaming
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
0
"2021-10-01T07:28:59Z"
"2021-10-01T15:53:16Z"
"2021-10-01T15:53:16Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/2994.diff", "html_url": "https://github.com/huggingface/datasets/pull/2994", "merged_at": "2021-10-01T15:53:15Z", "patch_url": "https://github.com/huggingface/datasets/pull/2994.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/2994" }
When implementing support to stream CSV files (https://github.com/huggingface/datasets/commit/ad489d4597381fc2d12c77841642cbeaecf7a2e0#diff-6f60f8d0552b75be8b3bfd09994480fd60dcd4e7eb08d02f721218c3acdd2782), a regression was introduced preventing loading compressed CSV files in non-streaming mode. This PR fixes it, allowing loading compressed/uncompressed CSV files in streaming/non-streaming mode. Fix #2977.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2994/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2994/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/2993
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2993/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2993/comments
https://api.github.com/repos/huggingface/datasets/issues/2993/events
https://github.com/huggingface/datasets/issues/2993
1,012,702,665
I_kwDODunzps48XJ3J
2,993
Can't download `trivia_qa/unfiltered`
{ "avatar_url": "https://avatars.githubusercontent.com/u/16107619?v=4", "events_url": "https://api.github.com/users/VictorSanh/events{/privacy}", "followers_url": "https://api.github.com/users/VictorSanh/followers", "following_url": "https://api.github.com/users/VictorSanh/following{/other_user}", "gists_url": "https://api.github.com/users/VictorSanh/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/VictorSanh", "id": 16107619, "login": "VictorSanh", "node_id": "MDQ6VXNlcjE2MTA3NjE5", "organizations_url": "https://api.github.com/users/VictorSanh/orgs", "received_events_url": "https://api.github.com/users/VictorSanh/received_events", "repos_url": "https://api.github.com/users/VictorSanh/repos", "site_admin": false, "starred_url": "https://api.github.com/users/VictorSanh/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/VictorSanh/subscriptions", "type": "User", "url": "https://api.github.com/users/VictorSanh" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" } ]
null
3
"2021-09-30T23:00:18Z"
"2021-10-01T19:07:23Z"
"2021-10-01T19:07:22Z"
MEMBER
null
null
null
## Describe the bug For some reason, I can't download `trivia_qa/unfilted`. A file seems to be missing... I am able to see it fine though the viewer tough... ## Steps to reproduce the bug ```python >>> from datasets import load_dataset >>> load_dataset("trivia_qa", "unfiltered") Downloading and preparing dataset trivia_qa/unfiltered (download: 3.07 GiB, generated: 27.23 GiB, post-processed: Unknown size, total: 30.30 GiB) to /gpfsscratch/rech/six/commun/datasets/trivia_qa/unfiltered/1.1.0/9977a5d6f72acfd92f587de052403e8138b43bb0d1ce595016c3baf7e14deba6... Traceback (most recent call last): File "/gpfswork/rech/six/commun/modules/datasets_modules/datasets/trivia_qa/9977a5d6f72acfd92f587de052403e8138b43bb0d1ce595016c3baf7e14deba6/trivia_qa.py", line 251, in _add_context with open(os.path.join(file_dir, fname), encoding="utf-8") as f: FileNotFoundError: [Errno 2] No such file or directory: '/gpfsscratch/rech/six/commun/datasets/downloads/extracted/9fcb7eddc6afd46fd074af3c5128931dfe4b548f933c925a23847faf4c1995ad/evidence/wikipedia/Peanuts.txt' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/gpfswork/rech/six/commun/conda/victor/lib/python3.7/site-packages/datasets/load.py", line 852, in load_dataset use_auth_token=use_auth_token, File "/gpfswork/rech/six/commun/conda/victor/lib/python3.7/site-packages/datasets/builder.py", line 616, in download_and_prepare dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs File "/gpfswork/rech/six/commun/conda/victor/lib/python3.7/site-packages/datasets/builder.py", line 693, in _download_and_prepare self._prepare_split(split_generator, **prepare_split_kwargs) File "/gpfswork/rech/six/commun/conda/victor/lib/python3.7/site-packages/datasets/builder.py", line 1107, in _prepare_split disable=bool(logging.get_verbosity() == logging.NOTSET), File "/gpfswork/rech/six/commun/conda/victor/lib/python3.7/site-packages/tqdm/std.py", line 1133, in __iter__ for obj in iterable: File "/gpfswork/rech/six/commun/modules/datasets_modules/datasets/trivia_qa/9977a5d6f72acfd92f587de052403e8138b43bb0d1ce595016c3baf7e14deba6/trivia_qa.py", line 303, in _generate_examples example = parse_example(article) File "/gpfswork/rech/six/commun/modules/datasets_modules/datasets/trivia_qa/9977a5d6f72acfd92f587de052403e8138b43bb0d1ce595016c3baf7e14deba6/trivia_qa.py", line 274, in parse_example _add_context(article.get("EntityPages", []), "WikiContext", wiki_dir), File "/gpfswork/rech/six/commun/modules/datasets_modules/datasets/trivia_qa/9977a5d6f72acfd92f587de052403e8138b43bb0d1ce595016c3baf7e14deba6/trivia_qa.py", line 253, in _add_context except (IOError, datasets.Value("errors").NotFoundError): File "<string>", line 5, in __init__ File "/gpfswork/rech/six/commun/conda/victor/lib/python3.7/site-packages/datasets/features.py", line 265, in __post_init__ self.pa_type = string_to_arrow(self.dtype) File "/gpfswork/rech/six/commun/conda/victor/lib/python3.7/site-packages/datasets/features.py", line 134, in string_to_arrow f"Neither {datasets_dtype} nor {datasets_dtype + '_'} seems to be a pyarrow data type. " ValueError: Neither errors nor errors_ seems to be a pyarrow data type. Please make sure to use a correct data type, see: https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions ``` ## Expected results I am able to load another subset (`rc`), but unable to load. I am not sure why the try/except doesn't catch it... https://github.com/huggingface/datasets/blob/9675a5a1e7b99a86f9c250f6ea5fa5d1e6d5cc7d/datasets/trivia_qa/trivia_qa.py#L253 ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.11.0 - Platform: Linux-4.18.0-147.51.2.el8_1.x86_64-x86_64-with-redhat-8.1-Ootpa - Python version: 3.7.10 - PyArrow version: 3.0.0
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2993/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2993/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/2992
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2992/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2992/comments
https://api.github.com/repos/huggingface/datasets/issues/2992/events
https://github.com/huggingface/datasets/pull/2992
1,012,325,594
PR_kwDODunzps4sg4ZP
2,992
Fix f1 metric with None average
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
0
"2021-09-30T15:31:57Z"
"2021-10-01T14:17:39Z"
"2021-10-01T14:17:38Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/2992.diff", "html_url": "https://github.com/huggingface/datasets/pull/2992", "merged_at": "2021-10-01T14:17:38Z", "patch_url": "https://github.com/huggingface/datasets/pull/2992.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/2992" }
Fix #2979.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2992/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2992/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/2991
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2991/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2991/comments
https://api.github.com/repos/huggingface/datasets/issues/2991/events
https://github.com/huggingface/datasets/issues/2991
1,012,174,823
I_kwDODunzps48VI_n
2,991
add docmentation for the `Unix style pattern` matching feature that can be leverage for `data_files` into `load_dataset`
{ "avatar_url": "https://avatars.githubusercontent.com/u/55560583?v=4", "events_url": "https://api.github.com/users/SaulLu/events{/privacy}", "followers_url": "https://api.github.com/users/SaulLu/followers", "following_url": "https://api.github.com/users/SaulLu/following{/other_user}", "gists_url": "https://api.github.com/users/SaulLu/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/SaulLu", "id": 55560583, "login": "SaulLu", "node_id": "MDQ6VXNlcjU1NTYwNTgz", "organizations_url": "https://api.github.com/users/SaulLu/orgs", "received_events_url": "https://api.github.com/users/SaulLu/received_events", "repos_url": "https://api.github.com/users/SaulLu/repos", "site_admin": false, "starred_url": "https://api.github.com/users/SaulLu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SaulLu/subscriptions", "type": "User", "url": "https://api.github.com/users/SaulLu" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
null
[]
null
0
"2021-09-30T13:22:01Z"
"2021-09-30T13:22:01Z"
null
CONTRIBUTOR
null
null
null
Unless I'm mistaken, it seems that in the new documentation it is no longer mentioned that you can use Unix style pattern matching in the `data_files` argument of the `load_dataset` method. This feature was mentioned [here](https://huggingface.co/docs/datasets/loading_datasets.html#from-a-community-dataset-on-the-hugging-face-hub) in the previous documentation. I'd love to hear your opinion @lhoestq , @albertvillanova and @stevhliu
{ "+1": 2, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 2, "url": "https://api.github.com/repos/huggingface/datasets/issues/2991/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2991/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2990
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2990/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2990/comments
https://api.github.com/repos/huggingface/datasets/issues/2990/events
https://github.com/huggingface/datasets/pull/2990
1,012,097,418
PR_kwDODunzps4sgLt5
2,990
Make Dataset.map accept list of np.array
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
0
"2021-09-30T12:08:54Z"
"2021-10-01T13:57:46Z"
"2021-10-01T13:57:46Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/2990.diff", "html_url": "https://github.com/huggingface/datasets/pull/2990", "merged_at": "2021-10-01T13:57:45Z", "patch_url": "https://github.com/huggingface/datasets/pull/2990.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/2990" }
Fix #2987.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2990/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2990/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/2989
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2989/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2989/comments
https://api.github.com/repos/huggingface/datasets/issues/2989/events
https://github.com/huggingface/datasets/pull/2989
1,011,220,375
PR_kwDODunzps4sdlt1
2,989
Add CommonLanguage
{ "avatar_url": "https://avatars.githubusercontent.com/u/26864830?v=4", "events_url": "https://api.github.com/users/anton-l/events{/privacy}", "followers_url": "https://api.github.com/users/anton-l/followers", "following_url": "https://api.github.com/users/anton-l/following{/other_user}", "gists_url": "https://api.github.com/users/anton-l/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/anton-l", "id": 26864830, "login": "anton-l", "node_id": "MDQ6VXNlcjI2ODY0ODMw", "organizations_url": "https://api.github.com/users/anton-l/orgs", "received_events_url": "https://api.github.com/users/anton-l/received_events", "repos_url": "https://api.github.com/users/anton-l/repos", "site_admin": false, "starred_url": "https://api.github.com/users/anton-l/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/anton-l/subscriptions", "type": "User", "url": "https://api.github.com/users/anton-l" }
[]
closed
false
null
[]
null
0
"2021-09-29T17:21:30Z"
"2021-10-01T17:36:39Z"
"2021-10-01T17:00:03Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/2989.diff", "html_url": "https://github.com/huggingface/datasets/pull/2989", "merged_at": "2021-10-01T17:00:03Z", "patch_url": "https://github.com/huggingface/datasets/pull/2989.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/2989" }
This PR adds the Common Language dataset (https://zenodo.org/record/5036977) The dataset is intended for language-identification speech classifiers and is already used by models on the Hub: * https://huggingface.co/speechbrain/lang-id-commonlanguage_ecapa * https://huggingface.co/anton-l/wav2vec2-base-langid cc @patrickvonplaten
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 1, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/2989/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2989/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/2988
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2988/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2988/comments
https://api.github.com/repos/huggingface/datasets/issues/2988/events
https://github.com/huggingface/datasets/issues/2988
1,011,148,017
I_kwDODunzps48ROTx
2,988
IndexError: Invalid key: 14 is out of bounds for size 0
{ "avatar_url": "https://avatars.githubusercontent.com/u/79165106?v=4", "events_url": "https://api.github.com/users/dorost1234/events{/privacy}", "followers_url": "https://api.github.com/users/dorost1234/followers", "following_url": "https://api.github.com/users/dorost1234/following{/other_user}", "gists_url": "https://api.github.com/users/dorost1234/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/dorost1234", "id": 79165106, "login": "dorost1234", "node_id": "MDQ6VXNlcjc5MTY1MTA2", "organizations_url": "https://api.github.com/users/dorost1234/orgs", "received_events_url": "https://api.github.com/users/dorost1234/received_events", "repos_url": "https://api.github.com/users/dorost1234/repos", "site_admin": false, "starred_url": "https://api.github.com/users/dorost1234/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dorost1234/subscriptions", "type": "User", "url": "https://api.github.com/users/dorost1234" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
null
13
"2021-09-29T16:04:24Z"
"2022-04-10T14:49:49Z"
"2022-04-10T14:49:49Z"
NONE
null
null
null
## Describe the bug A clear and concise description of what the bug is. Hi. I am trying to implement stochastic weighted averaging optimizer with transformer library as described here https://pytorch.org/blog/pytorch-1.6-now-includes-stochastic-weight-averaging/ , for this I am using a run_clm.py codes which is working fine before adding SWA optimizer, the moment I modify the model with `swa_model = AveragedModel(model)` in this script, I am getting the below error, since I am NOT touching the dataloader part, I am confused why this is occurring, I very much appreciate your opinion on this @lhoestq ## Steps to reproduce the bug ``` Traceback (most recent call last): File "run_clm.py", line 723, in <module> main() File "run_clm.py", line 669, in main train_result = trainer.train(resume_from_checkpoint=checkpoint) File "/user/dara/libs/anaconda3/envs/success/lib/python3.7/site-packages/transformers/trainer.py", line 1258, in train for step, inputs in enumerate(epoch_iterator): File "/user/dara/libs/anaconda3/envs/success/lib/python3.7/site-packages/torch/utils/data/dataloader.py", line 435, in __next__ data = self._next_data() File "/user/dara/libs/anaconda3/envs/success/lib/python3.7/site-packages/torch/utils/data/dataloader.py", line 475, in _next_data data = self._dataset_fetcher.fetch(index) # may raise StopIteration File "/user/dara/libs/anaconda3/envs/success/lib/python3.7/site-packages/torch/utils/data/_utils/fetch.py", line 44, in fetch data = [self.dataset[idx] for idx in possibly_batched_index] File "/user/dara/libs/anaconda3/envs/success/lib/python3.7/site-packages/torch/utils/data/_utils/fetch.py", line 44, in <listcomp> data = [self.dataset[idx] for idx in possibly_batched_index] File "/user/dara/libs/anaconda3/envs/success/lib/python3.7/site-packages/datasets/arrow_dataset.py", line 1530, in __getitem__ format_kwargs=self._format_kwargs, File "/user/dara/libs/anaconda3/envs/success/lib/python3.7/site-packages/datasets/arrow_dataset.py", line 1517, in _getitem pa_subtable = query_table(self._data, key, indices=self._indices if self._indices is not None else None) File "/user/dara/libs/anaconda3/envs/success/lib/python3.7/site-packages/datasets/formatting/formatting.py", line 368, in query_table _check_valid_index_key(key, size) File "/user/dara/libs/anaconda3/envs/success/lib/python3.7/site-packages/datasets/formatting/formatting.py", line 311, in _check_valid_index_key raise IndexError(f"Invalid key: {key} is out of bounds for size {size}") IndexError: Invalid key: 14 is out of bounds for size 0 ``` ## Expected results not getting the index error ## Actual results Please see the above ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: datasets 1.12.1 - Platform: linux - Python version: 3.7.11 - PyArrow version: 5.0.0
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2988/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2988/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/2987
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2987/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2987/comments
https://api.github.com/repos/huggingface/datasets/issues/2987/events
https://github.com/huggingface/datasets/issues/2987
1,011,026,141
I_kwDODunzps48Qwjd
2,987
ArrowInvalid: Can only convert 1-dimensional array values
{ "avatar_url": "https://avatars.githubusercontent.com/u/48327001?v=4", "events_url": "https://api.github.com/users/NielsRogge/events{/privacy}", "followers_url": "https://api.github.com/users/NielsRogge/followers", "following_url": "https://api.github.com/users/NielsRogge/following{/other_user}", "gists_url": "https://api.github.com/users/NielsRogge/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/NielsRogge", "id": 48327001, "login": "NielsRogge", "node_id": "MDQ6VXNlcjQ4MzI3MDAx", "organizations_url": "https://api.github.com/users/NielsRogge/orgs", "received_events_url": "https://api.github.com/users/NielsRogge/received_events", "repos_url": "https://api.github.com/users/NielsRogge/repos", "site_admin": false, "starred_url": "https://api.github.com/users/NielsRogge/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/NielsRogge/subscriptions", "type": "User", "url": "https://api.github.com/users/NielsRogge" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" } ]
null
1
"2021-09-29T14:18:52Z"
"2021-10-01T13:57:45Z"
"2021-10-01T13:57:45Z"
CONTRIBUTOR
null
null
null
## Describe the bug For the ViT and LayoutLMv2 demo notebooks in my [Transformers-Tutorials repo](https://github.com/NielsRogge/Transformers-Tutorials), people reported an ArrowInvalid issue after applying the following function to a Dataset: ``` def preprocess_data(examples): images = [Image.open(path).convert("RGB") for path in examples['image_path']] words = examples['words'] boxes = examples['bboxes'] word_labels = examples['ner_tags'] encoded_inputs = processor(images, words, boxes=boxes, word_labels=word_labels, padding="max_length", truncation=True) return encoded_inputs ``` ``` Full trace: --------------------------------------------------------------------------- ArrowInvalid Traceback (most recent call last) <ipython-input-8-0fc3efc6f0c2> in <module>() 27 28 train_dataset = datasets['train'].map(preprocess_data, batched=True, remove_columns=datasets['train'].column_names, ---> 29 features=features) 30 test_dataset = datasets['test'].map(preprocess_data, batched=True, remove_columns=datasets['test'].column_names, 31 features=features) 13 frames /usr/local/lib/python3.7/dist-packages/datasets/arrow_dataset.py in map(self, function, with_indices, input_columns, batched, batch_size, drop_last_batch, remove_columns, keep_in_memory, load_from_cache_file, cache_file_name, writer_batch_size, features, disable_nullable, fn_kwargs, num_proc, suffix_template, new_fingerprint, desc) 1701 new_fingerprint=new_fingerprint, 1702 disable_tqdm=disable_tqdm, -> 1703 desc=desc, 1704 ) 1705 else: /usr/local/lib/python3.7/dist-packages/datasets/arrow_dataset.py in wrapper(*args, **kwargs) 183 } 184 # apply actual function --> 185 out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) 186 datasets: List["Dataset"] = list(out.values()) if isinstance(out, dict) else [out] 187 # re-apply format to the output /usr/local/lib/python3.7/dist-packages/datasets/fingerprint.py in wrapper(*args, **kwargs) 396 # Call actual function 397 --> 398 out = func(self, *args, **kwargs) 399 400 # Update fingerprint of in-place transforms + update in-place history of transforms /usr/local/lib/python3.7/dist-packages/datasets/arrow_dataset.py in _map_single(self, function, with_indices, input_columns, batched, batch_size, drop_last_batch, remove_columns, keep_in_memory, load_from_cache_file, cache_file_name, writer_batch_size, features, disable_nullable, fn_kwargs, new_fingerprint, rank, offset, disable_tqdm, desc, cache_only) 2063 writer.write_table(batch) 2064 else: -> 2065 writer.write_batch(batch) 2066 if update_data and writer is not None: 2067 writer.finalize() # close_stream=bool(buf_writer is None)) # We only close if we are writing in a file /usr/local/lib/python3.7/dist-packages/datasets/arrow_writer.py in write_batch(self, batch_examples, writer_batch_size) 409 typed_sequence = OptimizedTypedSequence(batch_examples[col], type=col_type, try_type=col_try_type, col=col) 410 typed_sequence_examples[col] = typed_sequence --> 411 pa_table = pa.Table.from_pydict(typed_sequence_examples) 412 self.write_table(pa_table, writer_batch_size) 413 /usr/local/lib/python3.7/dist-packages/pyarrow/table.pxi in pyarrow.lib.Table.from_pydict() /usr/local/lib/python3.7/dist-packages/pyarrow/array.pxi in pyarrow.lib.asarray() /usr/local/lib/python3.7/dist-packages/pyarrow/array.pxi in pyarrow.lib.array() /usr/local/lib/python3.7/dist-packages/pyarrow/array.pxi in pyarrow.lib._handle_arrow_array_protocol() /usr/local/lib/python3.7/dist-packages/datasets/arrow_writer.py in __arrow_array__(self, type) 106 storage = numpy_to_pyarrow_listarray(self.data, type=type.value_type) 107 else: --> 108 storage = pa.array(self.data, type.storage_dtype) 109 out = pa.ExtensionArray.from_storage(type, storage) 110 elif isinstance(self.data, np.ndarray): /usr/local/lib/python3.7/dist-packages/pyarrow/array.pxi in pyarrow.lib.array() /usr/local/lib/python3.7/dist-packages/pyarrow/array.pxi in pyarrow.lib._sequence_to_array() /usr/local/lib/python3.7/dist-packages/pyarrow/error.pxi in pyarrow.lib.pyarrow_internal_check_status() /usr/local/lib/python3.7/dist-packages/pyarrow/error.pxi in pyarrow.lib.check_status() ArrowInvalid: Can only convert 1-dimensional array values ``` It can be fixed by adding the following line: ```diff def preprocess_data(examples): images = [Image.open(path).convert("RGB") for path in examples['image_path']] words = examples['words'] boxes = examples['bboxes'] word_labels = examples['ner_tags'] encoded_inputs = processor(images, words, boxes=boxes, word_labels=word_labels, padding="max_length", truncation=True) + encoded_inputs["image"] = np.array(encoded_inputs["image"]) return encoded_inputs ``` However, would be great if this can be fixed within Datasets itself.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2987/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2987/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/2986
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2986/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2986/comments
https://api.github.com/repos/huggingface/datasets/issues/2986/events
https://github.com/huggingface/datasets/pull/2986
1,010,792,783
PR_kwDODunzps4scSHR
2,986
Refac module factory + avoid etag requests for hub datasets
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
6
"2021-09-29T10:42:00Z"
"2021-10-11T11:05:53Z"
"2021-10-11T11:05:52Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/2986.diff", "html_url": "https://github.com/huggingface/datasets/pull/2986", "merged_at": "2021-10-11T11:05:51Z", "patch_url": "https://github.com/huggingface/datasets/pull/2986.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/2986" }
## Refactor the module factory When trying to extend the `data_files` logic to avoid doing unnecessary ETag requests, I noticed that the module preparation mechanism needed a refactor: - the function was 600 lines long - it was not readable - it contained many different cases that made it complex to maintain - it was hard to properly test it - it was hard to extend without breaking anything The module preparation mechanism is in charge of taking the name of a dataset or a metric given by the user (ex: "squad", "accuracy", "lhoestq/test", "path/to/my/script.py", "path/to/my/data/directory", "json", "csv") and return a module (possibly downloaded from the Hub) that contains the dataset builder or the metric class to use. ### Implementation details I decided to separate all these use cases into different dataset/metric module factories. First, the metric module factories: - **CanonicalMetricModuleFactory**: "accuracy", "rouge", ... - **LocalMetricModuleFactory**: "path/to/my/metric.py" Then, the dataset module factories: - **CanonicalDatasetModuleFactory**: "squad", "glue", ... - **CommunityDatasetModuleFactoryWithScript**: "lhoestq/test" - **CommunityDatasetModuleFactoryWithoutScript**: "lhoestq/demo1" - **PackagedDatasetModuleFactory**: "json", "csv", ... - **LocalDatasetModuleFactoryWithScript**: "path/to/my/script.py" - **LocalDatasetModuleFactoryWithoutScript**: "path/to/my/data/directory" And finally, additional factories when users have no internet: - **CachedDatasetModuleFactory** - **CachedMetricModuleFactory** ### Breaking changes One thing is that I still don't know at what extent we want to keep backward compatibility for `prepare_module`. For now I just kept it (except I removed two parameters) just in case, but it's not used anywhere anymore. ## Avoid etag requests for hub datasets To do this I added a class `DataFilesDict` that can be hashed to define the cache directory of the dataset. It contains the usual data files formatted as `{"train": ["train.txt"]}` for example. But each list of file is a `DataFilesList` that also has a `origin_metadata` attribute that contains metadata about the origin of each file: - for URLs: it stores the ETags of the files - for local files: it stores the last modification data - for files from a Hugging Face repository on the Hub: it stores the pattern (`*`, `*.csv`, "train.txt", etc.) and the commit sha of the repository (so there're no ETag requests !) This way if any file changes, the hash of the `DataFilesDict` changes too ! You can instantiate a `DataFilesDict` by using patterns for local/remote files or files in a HF repository: - for local/remote files: `DataFilesDict.from_local_or_remote(patterns)` - for files in a HF repository: `DataFilesDict.from_hf_repo(patterns, dataset_info)` Fix #2859 ## TODO Fix the latest test: - [x] fix the call to dataset_info in offline mode (related to https://github.com/huggingface/huggingface_hub/issues/372) Add some more tests: - [x] test all the factories - [x] test the new data files logic Other: - [x] docstrings - [x] comments
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 1, "hooray": 2, "laugh": 0, "rocket": 0, "total_count": 3, "url": "https://api.github.com/repos/huggingface/datasets/issues/2986/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2986/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/2985
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2985/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2985/comments
https://api.github.com/repos/huggingface/datasets/issues/2985/events
https://github.com/huggingface/datasets/pull/2985
1,010,500,433
PR_kwDODunzps4sbbbo
2,985
add new dataset kan_hope
{ "avatar_url": "https://avatars.githubusercontent.com/u/46108405?v=4", "events_url": "https://api.github.com/users/adeepH/events{/privacy}", "followers_url": "https://api.github.com/users/adeepH/followers", "following_url": "https://api.github.com/users/adeepH/following{/other_user}", "gists_url": "https://api.github.com/users/adeepH/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/adeepH", "id": 46108405, "login": "adeepH", "node_id": "MDQ6VXNlcjQ2MTA4NDA1", "organizations_url": "https://api.github.com/users/adeepH/orgs", "received_events_url": "https://api.github.com/users/adeepH/received_events", "repos_url": "https://api.github.com/users/adeepH/repos", "site_admin": false, "starred_url": "https://api.github.com/users/adeepH/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/adeepH/subscriptions", "type": "User", "url": "https://api.github.com/users/adeepH" }
[]
closed
false
null
[]
null
0
"2021-09-29T05:20:28Z"
"2021-10-01T16:55:19Z"
"2021-10-01T16:55:19Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/2985.diff", "html_url": "https://github.com/huggingface/datasets/pull/2985", "merged_at": "2021-10-01T16:55:19Z", "patch_url": "https://github.com/huggingface/datasets/pull/2985.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/2985" }
## Adding a Dataset - **Name:** *KanHope* - **Description:** *A code-mixed English-Kannada dataset for Hope speech detection* - **Task:** *Binary Text Classification* - **Paper:** *https://arxiv.org/abs/2108.04616* - **Data:** *https://github.com/adeepH/kan_hope/tree/main/dataset* - **Motivation:** *The dataset is amongst the very few resources available for code-mixed low-resourced Dravidian languages of India*
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2985/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2985/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/2984
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2984/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2984/comments
https://api.github.com/repos/huggingface/datasets/issues/2984/events
https://github.com/huggingface/datasets/issues/2984
1,010,484,326
I_kwDODunzps48OsRm
2,984
Exceeded maximum rows when reading large files
{ "avatar_url": "https://avatars.githubusercontent.com/u/25057983?v=4", "events_url": "https://api.github.com/users/zijwang/events{/privacy}", "followers_url": "https://api.github.com/users/zijwang/followers", "following_url": "https://api.github.com/users/zijwang/following{/other_user}", "gists_url": "https://api.github.com/users/zijwang/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/zijwang", "id": 25057983, "login": "zijwang", "node_id": "MDQ6VXNlcjI1MDU3OTgz", "organizations_url": "https://api.github.com/users/zijwang/orgs", "received_events_url": "https://api.github.com/users/zijwang/received_events", "repos_url": "https://api.github.com/users/zijwang/repos", "site_admin": false, "starred_url": "https://api.github.com/users/zijwang/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zijwang/subscriptions", "type": "User", "url": "https://api.github.com/users/zijwang" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" } ]
null
1
"2021-09-29T04:49:22Z"
"2021-10-12T06:05:42Z"
"2021-10-12T06:05:42Z"
NONE
null
null
null
## Describe the bug A clear and concise description of what the bug is. When using `load_dataset` with json files, if the files are too large, there will be "Exceeded maximum rows" error. ## Steps to reproduce the bug ```python dataset = load_dataset('json', data_files=data_files) # data files have 3M rows in a single file ``` ## Expected results No error ## Actual results ``` ~/anaconda3/envs/python/lib/python3.9/site-packages/datasets/packaged_modules/json/json.py in _generate_tables(self, files) 134 with open(file, encoding="utf-8") as f: --> 135 dataset = json.load(f) 136 except json.JSONDecodeError: ~/anaconda3/envs/python/lib/python3.9/json/__init__.py in load(fp, cls, object_hook, parse_float, parse_int, parse_constant, object_pairs_hook, **kw) 292 """ --> 293 return loads(fp.read(), 294 cls=cls, object_hook=object_hook, ~/anaconda3/envs/python/lib/python3.9/json/__init__.py in loads(s, cls, object_hook, parse_float, parse_int, parse_constant, object_pairs_hook, **kw) 345 parse_constant is None and object_pairs_hook is None and not kw): --> 346 return _default_decoder.decode(s) 347 if cls is None: ~/anaconda3/envs/python/lib/python3.9/json/decoder.py in decode(self, s, _w) 339 if end != len(s): --> 340 raise JSONDecodeError("Extra data", s, end) 341 return obj JSONDecodeError: Extra data: line 2 column 1 (char 20321) During handling of the above exception, another exception occurred: ArrowInvalid Traceback (most recent call last) <ipython-input-20-ab3718a6482f> in <module> ----> 1 dataset = load_dataset('json', data_files=data_files) ~/anaconda3/envs/python/lib/python3.9/site-packages/datasets/load.py in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, ignore_verifications, keep_in_memory, save_infos, script_version, use_auth_token, task, streaming, **config_kwargs) 841 842 # Download and prepare data --> 843 builder_instance.download_and_prepare( 844 download_config=download_config, 845 download_mode=download_mode, ~/anaconda3/envs/python/lib/python3.9/site-packages/datasets/builder.py in download_and_prepare(self, download_config, download_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, **download_and_prepare_kwargs) 606 logger.warning("HF google storage unreachable. Downloading and preparing it from source") 607 if not downloaded_from_gcs: --> 608 self._download_and_prepare( 609 dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs 610 ) ~/anaconda3/envs/python/lib/python3.9/site-packages/datasets/builder.py in _download_and_prepare(self, dl_manager, verify_infos, **prepare_split_kwargs) 684 try: 685 # Prepare split will record examples associated to the split --> 686 self._prepare_split(split_generator, **prepare_split_kwargs) 687 except OSError as e: 688 raise OSError( ~/anaconda3/envs/python/lib/python3.9/site-packages/datasets/builder.py in _prepare_split(self, split_generator) 1153 generator = self._generate_tables(**split_generator.gen_kwargs) 1154 with ArrowWriter(features=self.info.features, path=fpath) as writer: -> 1155 for key, table in utils.tqdm( 1156 generator, unit=" tables", leave=False, disable=bool(logging.get_verbosity() == logging.NOTSET) 1157 ): ~/anaconda3/envs/python/lib/python3.9/site-packages/datasets/packaged_modules/json/json.py in _generate_tables(self, files) 135 dataset = json.load(f) 136 except json.JSONDecodeError: --> 137 raise e 138 raise ValueError( 139 f"Not able to read records in the JSON file at {file}. " ~/anaconda3/envs/python/lib/python3.9/site-packages/datasets/packaged_modules/json/json.py in _generate_tables(self, files) 114 while True: 115 try: --> 116 pa_table = paj.read_json( 117 BytesIO(batch), read_options=paj.ReadOptions(block_size=block_size) 118 ) ~/anaconda3/envs/python/lib/python3.9/site-packages/pyarrow/_json.pyx in pyarrow._json.read_json() ~/anaconda3/envs/python/lib/python3.9/site-packages/pyarrow/error.pxi in pyarrow.lib.pyarrow_internal_check_status() ~/anaconda3/envs/python/lib/python3.9/site-packages/pyarrow/error.pxi in pyarrow.lib.check_status() ArrowInvalid: Exceeded maximum rows ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: - Platform: Linux - Python version: 3.9 - PyArrow version: 4.0.1
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2984/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2984/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/2983
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2983/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2983/comments
https://api.github.com/repos/huggingface/datasets/issues/2983/events
https://github.com/huggingface/datasets/pull/2983
1,010,263,058
PR_kwDODunzps4saw_v
2,983
added SwissJudgmentPrediction dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/3775944?v=4", "events_url": "https://api.github.com/users/JoelNiklaus/events{/privacy}", "followers_url": "https://api.github.com/users/JoelNiklaus/followers", "following_url": "https://api.github.com/users/JoelNiklaus/following{/other_user}", "gists_url": "https://api.github.com/users/JoelNiklaus/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/JoelNiklaus", "id": 3775944, "login": "JoelNiklaus", "node_id": "MDQ6VXNlcjM3NzU5NDQ=", "organizations_url": "https://api.github.com/users/JoelNiklaus/orgs", "received_events_url": "https://api.github.com/users/JoelNiklaus/received_events", "repos_url": "https://api.github.com/users/JoelNiklaus/repos", "site_admin": false, "starred_url": "https://api.github.com/users/JoelNiklaus/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/JoelNiklaus/subscriptions", "type": "User", "url": "https://api.github.com/users/JoelNiklaus" }
[]
closed
false
null
[]
null
0
"2021-09-28T22:17:56Z"
"2021-10-01T16:03:05Z"
"2021-10-01T16:03:05Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/2983.diff", "html_url": "https://github.com/huggingface/datasets/pull/2983", "merged_at": "2021-10-01T16:03:05Z", "patch_url": "https://github.com/huggingface/datasets/pull/2983.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/2983" }
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2983/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2983/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/2982
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2982/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2982/comments
https://api.github.com/repos/huggingface/datasets/issues/2982/events
https://github.com/huggingface/datasets/pull/2982
1,010,118,418
PR_kwDODunzps4saVLh
2,982
Add the Math Aptitude Test of Heuristics dataset.
{ "avatar_url": "https://avatars.githubusercontent.com/u/91226467?v=4", "events_url": "https://api.github.com/users/hacobe/events{/privacy}", "followers_url": "https://api.github.com/users/hacobe/followers", "following_url": "https://api.github.com/users/hacobe/following{/other_user}", "gists_url": "https://api.github.com/users/hacobe/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/hacobe", "id": 91226467, "login": "hacobe", "node_id": "MDQ6VXNlcjkxMjI2NDY3", "organizations_url": "https://api.github.com/users/hacobe/orgs", "received_events_url": "https://api.github.com/users/hacobe/received_events", "repos_url": "https://api.github.com/users/hacobe/repos", "site_admin": false, "starred_url": "https://api.github.com/users/hacobe/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/hacobe/subscriptions", "type": "User", "url": "https://api.github.com/users/hacobe" }
[]
closed
false
null
[]
null
0
"2021-09-28T19:18:37Z"
"2021-10-01T19:51:23Z"
"2021-10-01T12:21:00Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/2982.diff", "html_url": "https://github.com/huggingface/datasets/pull/2982", "merged_at": "2021-10-01T12:21:00Z", "patch_url": "https://github.com/huggingface/datasets/pull/2982.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/2982" }
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2982/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2982/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/2981
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2981/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2981/comments
https://api.github.com/repos/huggingface/datasets/issues/2981/events
https://github.com/huggingface/datasets/pull/2981
1,009,969,310
PR_kwDODunzps4sZ4ke
2,981
add wit dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/32437151?v=4", "events_url": "https://api.github.com/users/nateraw/events{/privacy}", "followers_url": "https://api.github.com/users/nateraw/followers", "following_url": "https://api.github.com/users/nateraw/following{/other_user}", "gists_url": "https://api.github.com/users/nateraw/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/nateraw", "id": 32437151, "login": "nateraw", "node_id": "MDQ6VXNlcjMyNDM3MTUx", "organizations_url": "https://api.github.com/users/nateraw/orgs", "received_events_url": "https://api.github.com/users/nateraw/received_events", "repos_url": "https://api.github.com/users/nateraw/repos", "site_admin": false, "starred_url": "https://api.github.com/users/nateraw/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/nateraw/subscriptions", "type": "User", "url": "https://api.github.com/users/nateraw" }
[]
closed
false
null
[]
null
5
"2021-09-28T16:34:49Z"
"2022-05-05T14:26:41Z"
"2022-05-05T14:26:41Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/2981.diff", "html_url": "https://github.com/huggingface/datasets/pull/2981", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/2981.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/2981" }
Resolves #2902 based on conversation there - would also close #2810. Open to suggestions/help 😀 CC @hassiahk @lhoestq @yjernite
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 2, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 2, "url": "https://api.github.com/repos/huggingface/datasets/issues/2981/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2981/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/2980
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2980/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2980/comments
https://api.github.com/repos/huggingface/datasets/issues/2980/events
https://github.com/huggingface/datasets/issues/2980
1,009,873,482
I_kwDODunzps48MXJK
2,980
OpenSLR 25: ASR data for Amharic, Swahili and Wolof
{ "avatar_url": "https://avatars.githubusercontent.com/u/4109253?v=4", "events_url": "https://api.github.com/users/cdleong/events{/privacy}", "followers_url": "https://api.github.com/users/cdleong/followers", "following_url": "https://api.github.com/users/cdleong/following{/other_user}", "gists_url": "https://api.github.com/users/cdleong/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/cdleong", "id": 4109253, "login": "cdleong", "node_id": "MDQ6VXNlcjQxMDkyNTM=", "organizations_url": "https://api.github.com/users/cdleong/orgs", "received_events_url": "https://api.github.com/users/cdleong/received_events", "repos_url": "https://api.github.com/users/cdleong/repos", "site_admin": false, "starred_url": "https://api.github.com/users/cdleong/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cdleong/subscriptions", "type": "User", "url": "https://api.github.com/users/cdleong" }
[ { "color": "e99695", "default": false, "description": "Requesting to add a new dataset", "id": 2067376369, "name": "dataset request", "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request" } ]
open
false
null
[]
null
3
"2021-09-28T15:04:36Z"
"2021-09-29T17:25:14Z"
null
CONTRIBUTOR
null
null
null
## Adding a Dataset - **Name:** *SLR25* - **Description:** *Subset 25 from OpenSLR. Other subsets have been added to https://huggingface.co/datasets/openslr, 25 covers Amharic, Swahili and Wolof data* - **Paper:** *https://www.openslr.org/25/ has citations for each of the three subsubsets. * - **Data:** *Currently the three links to the .tar.bz2 files can be found a thttps://www.openslr.org/25/* - **Motivation:** *Increase ASR data for underrepresented African languages. Also, other subsets of OpenSLR speech recognition have been uploaded, so this would be easy.* https://github.com/huggingface/datasets/blob/master/datasets/openslr/openslr.py already has been created for various other OpenSLR subsets, this should be relatively straightforward to do.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2980/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2980/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2979
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2979/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2979/comments
https://api.github.com/repos/huggingface/datasets/issues/2979/events
https://github.com/huggingface/datasets/issues/2979
1,009,634,147
I_kwDODunzps48Lctj
2,979
ValueError when computing f1 metric with average None
{ "avatar_url": "https://avatars.githubusercontent.com/u/74454835?v=4", "events_url": "https://api.github.com/users/asofiaoliveira/events{/privacy}", "followers_url": "https://api.github.com/users/asofiaoliveira/followers", "following_url": "https://api.github.com/users/asofiaoliveira/following{/other_user}", "gists_url": "https://api.github.com/users/asofiaoliveira/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/asofiaoliveira", "id": 74454835, "login": "asofiaoliveira", "node_id": "MDQ6VXNlcjc0NDU0ODM1", "organizations_url": "https://api.github.com/users/asofiaoliveira/orgs", "received_events_url": "https://api.github.com/users/asofiaoliveira/received_events", "repos_url": "https://api.github.com/users/asofiaoliveira/repos", "site_admin": false, "starred_url": "https://api.github.com/users/asofiaoliveira/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/asofiaoliveira/subscriptions", "type": "User", "url": "https://api.github.com/users/asofiaoliveira" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" } ]
null
1
"2021-09-28T11:34:53Z"
"2021-10-01T14:17:38Z"
"2021-10-01T14:17:38Z"
CONTRIBUTOR
null
null
null
## Describe the bug When I try to compute the f1 score for each class in a multiclass classification problem, I get a ValueError. The same happens with recall and precision. I traced the error to the `.item()` in these scripts, which is probably there for the other averages. E.g. from f1.py: ```python return { "f1": f1_score( references, predictions, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight, ).item(), } ``` Since the result is an array with more than one item, the `.item()` throws the error. I didn't submit a PR because this might be needed for the other averages, I'm not very familiar with the library ## Steps to reproduce the bug ```python from datasets import load_metric metric = load_metric("f1") metric.add_batch(predictions=[2,34,1,34,1,2,3], references=[23,52,1,3,523,5,8]) metric.compute(average=None) ``` ## Expected results `array([0.66666667, 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ])` ## Actual results ValueError: can only convert an array of size 1 to a Python scalar ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.12.1 - Platform: Windows-10-10.0.19041-SP0 - Python version: 3.9.5 - PyArrow version: 5.0.0
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2979/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2979/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/2978
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2978/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2978/comments
https://api.github.com/repos/huggingface/datasets/issues/2978/events
https://github.com/huggingface/datasets/issues/2978
1,009,521,419
I_kwDODunzps48LBML
2,978
Run CI tests against non-production server
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
open
false
null
[]
null
2
"2021-09-28T09:41:26Z"
"2021-09-28T15:23:50Z"
null
MEMBER
null
null
null
Currently, the CI test suite performs requests to the HF production server. As discussed with @elishowk, we should refactor our tests to use the HF staging server instead, like `huggingface_hub` and `transformers`.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2978/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2978/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2977
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2977/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2977/comments
https://api.github.com/repos/huggingface/datasets/issues/2977/events
https://github.com/huggingface/datasets/issues/2977
1,009,378,692
I_kwDODunzps48KeWE
2,977
Impossible to load compressed csv
{ "avatar_url": "https://avatars.githubusercontent.com/u/19476123?v=4", "events_url": "https://api.github.com/users/Valahaar/events{/privacy}", "followers_url": "https://api.github.com/users/Valahaar/followers", "following_url": "https://api.github.com/users/Valahaar/following{/other_user}", "gists_url": "https://api.github.com/users/Valahaar/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Valahaar", "id": 19476123, "login": "Valahaar", "node_id": "MDQ6VXNlcjE5NDc2MTIz", "organizations_url": "https://api.github.com/users/Valahaar/orgs", "received_events_url": "https://api.github.com/users/Valahaar/received_events", "repos_url": "https://api.github.com/users/Valahaar/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Valahaar/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Valahaar/subscriptions", "type": "User", "url": "https://api.github.com/users/Valahaar" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" } ]
null
1
"2021-09-28T07:18:54Z"
"2021-10-01T15:53:16Z"
"2021-10-01T15:53:15Z"
CONTRIBUTOR
null
null
null
## Describe the bug It is not possible to load from a compressed csv anymore. ## Steps to reproduce the bug ```python load_dataset('csv', data_files=['/path/to/csv.bz2']) ``` ## Problem and possible solution This used to work, but the commit that broke it is [this one](https://github.com/huggingface/datasets/commit/ad489d4597381fc2d12c77841642cbeaecf7a2e0#diff-6f60f8d0552b75be8b3bfd09994480fd60dcd4e7eb08d02f721218c3acdd2782). `pandas` usually gets the compression information from the filename itself (which was previously directly passed). Now, since it gets a file descriptor, it might be good to auto-infer the compression or let the user pass the `compression` kwarg to `load_dataset` (or maybe warn the user if the file ends with a commonly known compression scheme?). ## Environment info - `datasets` version: 1.10.0 (and over) - Platform: Linux-5.8.0-45-generic-x86_64-with-glibc2.17 - Python version: 3.8.10 - PyArrow version: 3.0.0
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2977/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2977/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/2976
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2976/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2976/comments
https://api.github.com/repos/huggingface/datasets/issues/2976/events
https://github.com/huggingface/datasets/issues/2976
1,008,647,889
I_kwDODunzps48Hr7R
2,976
Can't load dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/77006774?v=4", "events_url": "https://api.github.com/users/mskovalova/events{/privacy}", "followers_url": "https://api.github.com/users/mskovalova/followers", "following_url": "https://api.github.com/users/mskovalova/following{/other_user}", "gists_url": "https://api.github.com/users/mskovalova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mskovalova", "id": 77006774, "login": "mskovalova", "node_id": "MDQ6VXNlcjc3MDA2Nzc0", "organizations_url": "https://api.github.com/users/mskovalova/orgs", "received_events_url": "https://api.github.com/users/mskovalova/received_events", "repos_url": "https://api.github.com/users/mskovalova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mskovalova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mskovalova/subscriptions", "type": "User", "url": "https://api.github.com/users/mskovalova" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" } ]
null
2
"2021-09-27T21:38:14Z"
"2022-12-01T09:12:29Z"
"2021-09-28T06:53:01Z"
NONE
null
null
null
I'm trying to load a wikitext dataset ``` from datasets import load_dataset raw_datasets = load_dataset("wikitext") ``` ValueError: Config name is missing. Please pick one among the available configs: ['wikitext-103-raw-v1', 'wikitext-2-raw-v1', 'wikitext-103-v1', 'wikitext-2-v1'] Example of usage: `load_dataset('wikitext', 'wikitext-103-raw-v1')`. If I try ``` from datasets import load_dataset raw_datasets = load_dataset("wikitext-2-v1") ``` FileNotFoundError: Couldn't find file at https://raw.githubusercontent.com/huggingface/datasets/1.12.1/datasets/wikitext-2-v1/wikitext-2-v1.py #### Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.12.1 - Platform: Linux-5.4.104+-x86_64-with-Ubuntu-18.04-bionic (colab) - Python version: 3.7.12 - PyArrow version: 3.0.0
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2976/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2976/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/2975
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2975/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2975/comments
https://api.github.com/repos/huggingface/datasets/issues/2975/events
https://github.com/huggingface/datasets/pull/2975
1,008,444,654
PR_kwDODunzps4sVAOt
2,975
ignore dummy folder and dataset_infos.json
{ "avatar_url": "https://avatars.githubusercontent.com/u/46553104?v=4", "events_url": "https://api.github.com/users/Ishan-Kumar2/events{/privacy}", "followers_url": "https://api.github.com/users/Ishan-Kumar2/followers", "following_url": "https://api.github.com/users/Ishan-Kumar2/following{/other_user}", "gists_url": "https://api.github.com/users/Ishan-Kumar2/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Ishan-Kumar2", "id": 46553104, "login": "Ishan-Kumar2", "node_id": "MDQ6VXNlcjQ2NTUzMTA0", "organizations_url": "https://api.github.com/users/Ishan-Kumar2/orgs", "received_events_url": "https://api.github.com/users/Ishan-Kumar2/received_events", "repos_url": "https://api.github.com/users/Ishan-Kumar2/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Ishan-Kumar2/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Ishan-Kumar2/subscriptions", "type": "User", "url": "https://api.github.com/users/Ishan-Kumar2" }
[]
closed
false
null
[]
null
0
"2021-09-27T18:09:03Z"
"2021-09-29T09:45:38Z"
"2021-09-29T09:05:38Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/2975.diff", "html_url": "https://github.com/huggingface/datasets/pull/2975", "merged_at": "2021-09-29T09:05:38Z", "patch_url": "https://github.com/huggingface/datasets/pull/2975.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/2975" }
Fixes #2877 Added the `dataset_infos.json` to the ignored files list and also added check to ignore files which have parent directory as `dummy`. Let me know if it is correct. Thanks :)
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 1, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/2975/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2975/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/2974
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2974/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2974/comments
https://api.github.com/repos/huggingface/datasets/issues/2974/events
https://github.com/huggingface/datasets/pull/2974
1,008,247,787
PR_kwDODunzps4sUZCX
2,974
Actually disable dummy labels by default
{ "avatar_url": "https://avatars.githubusercontent.com/u/12866554?v=4", "events_url": "https://api.github.com/users/Rocketknight1/events{/privacy}", "followers_url": "https://api.github.com/users/Rocketknight1/followers", "following_url": "https://api.github.com/users/Rocketknight1/following{/other_user}", "gists_url": "https://api.github.com/users/Rocketknight1/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Rocketknight1", "id": 12866554, "login": "Rocketknight1", "node_id": "MDQ6VXNlcjEyODY2NTU0", "organizations_url": "https://api.github.com/users/Rocketknight1/orgs", "received_events_url": "https://api.github.com/users/Rocketknight1/received_events", "repos_url": "https://api.github.com/users/Rocketknight1/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Rocketknight1/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Rocketknight1/subscriptions", "type": "User", "url": "https://api.github.com/users/Rocketknight1" }
[]
closed
false
null
[]
null
0
"2021-09-27T14:50:20Z"
"2021-09-29T09:04:42Z"
"2021-09-29T09:04:41Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/2974.diff", "html_url": "https://github.com/huggingface/datasets/pull/2974", "merged_at": "2021-09-29T09:04:41Z", "patch_url": "https://github.com/huggingface/datasets/pull/2974.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/2974" }
So I might have just changed the docstring instead of the actual default argument value and not realized. @lhoestq I'm sorry >.>
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2974/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2974/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/2973
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2973/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2973/comments
https://api.github.com/repos/huggingface/datasets/issues/2973/events
https://github.com/huggingface/datasets/pull/2973
1,007,894,592
PR_kwDODunzps4sTRvk
2,973
Fix JSON metadata of masakhaner dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
0
"2021-09-27T09:09:08Z"
"2021-09-27T12:59:59Z"
"2021-09-27T12:59:59Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/2973.diff", "html_url": "https://github.com/huggingface/datasets/pull/2973", "merged_at": "2021-09-27T12:59:58Z", "patch_url": "https://github.com/huggingface/datasets/pull/2973.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/2973" }
Fix #2971.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2973/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2973/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/2972
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2972/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2972/comments
https://api.github.com/repos/huggingface/datasets/issues/2972/events
https://github.com/huggingface/datasets/issues/2972
1,007,808,714
I_kwDODunzps48EfDK
2,972
OSError: Not enough disk space.
{ "avatar_url": "https://avatars.githubusercontent.com/u/24835382?v=4", "events_url": "https://api.github.com/users/qqaatw/events{/privacy}", "followers_url": "https://api.github.com/users/qqaatw/followers", "following_url": "https://api.github.com/users/qqaatw/following{/other_user}", "gists_url": "https://api.github.com/users/qqaatw/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/qqaatw", "id": 24835382, "login": "qqaatw", "node_id": "MDQ6VXNlcjI0ODM1Mzgy", "organizations_url": "https://api.github.com/users/qqaatw/orgs", "received_events_url": "https://api.github.com/users/qqaatw/received_events", "repos_url": "https://api.github.com/users/qqaatw/repos", "site_admin": false, "starred_url": "https://api.github.com/users/qqaatw/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/qqaatw/subscriptions", "type": "User", "url": "https://api.github.com/users/qqaatw" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" } ]
null
5
"2021-09-27T07:41:22Z"
"2022-08-29T23:21:36Z"
"2021-09-28T06:43:15Z"
CONTRIBUTOR
null
null
null
## Describe the bug I'm trying to download `natural_questions` dataset from the Internet, and I've specified the cache_dir which locates in a mounted disk and has enough disk space. However, even though the space is enough, the disk space checking function still reports the space of root `/` disk having no enough space. The file system structure is like below. The root `/` has `115G` disk space available, and the `sda1` is mounted to `/mnt`, which has `1.2T` disk space available: ``` / /mnt/sda1/path/to/args.dataset_cache_dir ``` ## Steps to reproduce the bug ```python dataset_config = DownloadConfig( cache_dir=os.path.abspath(args.dataset_cache_dir), resume_download=True, ) dataset = load_dataset("natural_questions", download_config=dataset_config) ``` ## Expected results Can download the dataset without an error. ## Actual results The following error raised: ``` OSError: Not enough disk space. Needed: 134.92 GiB (download: 41.97 GiB, generated: 92.95 GiB, post-processed: Unknown size) ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.9.0 - Platform: Ubuntu 18.04 - Python version: 3.8.10 - PyArrow version:
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2972/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2972/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/2971
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2971/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2971/comments
https://api.github.com/repos/huggingface/datasets/issues/2971/events
https://github.com/huggingface/datasets/issues/2971
1,007,696,522
I_kwDODunzps48EDqK
2,971
masakhaner dataset load problem
{ "avatar_url": "https://avatars.githubusercontent.com/u/8900094?v=4", "events_url": "https://api.github.com/users/ontocord/events{/privacy}", "followers_url": "https://api.github.com/users/ontocord/followers", "following_url": "https://api.github.com/users/ontocord/following{/other_user}", "gists_url": "https://api.github.com/users/ontocord/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ontocord", "id": 8900094, "login": "ontocord", "node_id": "MDQ6VXNlcjg5MDAwOTQ=", "organizations_url": "https://api.github.com/users/ontocord/orgs", "received_events_url": "https://api.github.com/users/ontocord/received_events", "repos_url": "https://api.github.com/users/ontocord/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ontocord/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ontocord/subscriptions", "type": "User", "url": "https://api.github.com/users/ontocord" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" } ]
null
1
"2021-09-27T04:59:07Z"
"2021-09-27T12:59:59Z"
"2021-09-27T12:59:59Z"
CONTRIBUTOR
null
null
null
## Describe the bug Masakhaner dataset is not loading ## Steps to reproduce the bug ``` from datasets import load_dataset dataset = load_dataset("masakhaner",'amh') ``` ## Expected results Expected the return of a dataset ## Actual results ``` NonMatchingSplitsSizesError Traceback (most recent call last) <ipython-input-3-a6abc1161d4c> in <module>() 1 from datasets import load_dataset 2 ----> 3 dataset = load_dataset("masakhaner",'amh') 3 frames /usr/local/lib/python3.7/dist-packages/datasets/utils/info_utils.py in verify_splits(expected_splits, recorded_splits) 72 ] 73 if len(bad_splits) > 0: ---> 74 raise NonMatchingSplitsSizesError(str(bad_splits)) 75 logger.info("All the splits matched successfully.") 76 NonMatchingSplitsSizesError: [{'expected': SplitInfo(name='train', num_bytes=639927, num_examples=1751, dataset_name='masakhaner'), 'recorded': SplitInfo(name='train', num_bytes=639911, num_examples=1750, dataset_name='masakhaner')}, {'expected': SplitInfo(name='validation', num_bytes=92768, num_examples=251, dataset_name='masakhaner'), 'recorded': SplitInfo(name='validation', num_bytes=92753, num_examples=250, dataset_name='masakhaner')}, {'expected': SplitInfo(name='test', num_bytes=184286, num_examples=501, dataset_name='masakhaner'), 'recorded': SplitInfo(name='test', num_bytes=184271, num_examples=500, dataset_name='masakhaner')}] ``` ## Environment info Google Colab
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2971/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2971/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/2970
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2970/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2970/comments
https://api.github.com/repos/huggingface/datasets/issues/2970/events
https://github.com/huggingface/datasets/issues/2970
1,007,340,089
I_kwDODunzps48Cso5
2,970
Magnet’s
{ "avatar_url": "https://avatars.githubusercontent.com/u/90449239?v=4", "events_url": "https://api.github.com/users/rcacho172/events{/privacy}", "followers_url": "https://api.github.com/users/rcacho172/followers", "following_url": "https://api.github.com/users/rcacho172/following{/other_user}", "gists_url": "https://api.github.com/users/rcacho172/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/rcacho172", "id": 90449239, "login": "rcacho172", "node_id": "MDQ6VXNlcjkwNDQ5MjM5", "organizations_url": "https://api.github.com/users/rcacho172/orgs", "received_events_url": "https://api.github.com/users/rcacho172/received_events", "repos_url": "https://api.github.com/users/rcacho172/repos", "site_admin": false, "starred_url": "https://api.github.com/users/rcacho172/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rcacho172/subscriptions", "type": "User", "url": "https://api.github.com/users/rcacho172" }
[ { "color": "e99695", "default": false, "description": "Requesting to add a new dataset", "id": 2067376369, "name": "dataset request", "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request" } ]
closed
false
null
[]
null
0
"2021-09-26T09:50:29Z"
"2021-09-26T10:38:59Z"
"2021-09-26T10:38:59Z"
NONE
null
null
null
## Adding a Dataset - **Name:** *name of the dataset* - **Description:** *short description of the dataset (or link to social media or blog post)* - **Paper:** *link to the dataset paper if available* - **Data:** *link to the Github repository or current dataset location* - **Motivation:** *what are some good reasons to have this dataset* Instructions to add a new dataset can be found [here](https://github.com/huggingface/datasets/blob/master/ADD_NEW_DATASET.md).
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2970/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2970/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/2969
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2969/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2969/comments
https://api.github.com/repos/huggingface/datasets/issues/2969/events
https://github.com/huggingface/datasets/issues/2969
1,007,217,867
I_kwDODunzps48COzL
2,969
medical-dialog error
{ "avatar_url": "https://avatars.githubusercontent.com/u/43877130?v=4", "events_url": "https://api.github.com/users/smeyerhot/events{/privacy}", "followers_url": "https://api.github.com/users/smeyerhot/followers", "following_url": "https://api.github.com/users/smeyerhot/following{/other_user}", "gists_url": "https://api.github.com/users/smeyerhot/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/smeyerhot", "id": 43877130, "login": "smeyerhot", "node_id": "MDQ6VXNlcjQzODc3MTMw", "organizations_url": "https://api.github.com/users/smeyerhot/orgs", "received_events_url": "https://api.github.com/users/smeyerhot/received_events", "repos_url": "https://api.github.com/users/smeyerhot/repos", "site_admin": false, "starred_url": "https://api.github.com/users/smeyerhot/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/smeyerhot/subscriptions", "type": "User", "url": "https://api.github.com/users/smeyerhot" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" } ]
null
3
"2021-09-25T23:08:44Z"
"2024-01-08T09:55:12Z"
"2021-10-11T07:46:42Z"
NONE
null
null
null
## Describe the bug A clear and concise description of what the bug is. When I attempt to download the huggingface datatset medical_dialog it errors out midway through ## Steps to reproduce the bug ```python raw_datasets = load_dataset("medical_dialog", "en", split="train", download_mode="force_redownload", data_dir="./Medical-Dialogue-Dataset-English") ``` ## Expected results A clear and concise description of the expected results. No error ## Actual results ``` 3 frames /usr/local/lib/python3.7/dist-packages/datasets/utils/info_utils.py in verify_splits(expected_splits, recorded_splits) 72 ] 73 if len(bad_splits) > 0: ---> 74 raise NonMatchingSplitsSizesError(str(bad_splits)) 75 logger.info("All the splits matched successfully.") 76 NonMatchingSplitsSizesError: [{'expected': SplitInfo(name='train', num_bytes=0, num_examples=0, dataset_name='medical_dialog'), 'recorded': SplitInfo(name='train', num_bytes=295097913, num_examples=229674, dataset_name='medical_dialog')}] ``` Specify the actual results or traceback. ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.21.1 - Platform: colab - Python version: colab 3.7 - PyArrow version: N/A
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2969/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2969/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/2968
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2968/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2968/comments
https://api.github.com/repos/huggingface/datasets/issues/2968/events
https://github.com/huggingface/datasets/issues/2968
1,007,209,488
I_kwDODunzps48CMwQ
2,968
`DatasetDict` cannot be exported to parquet if the splits have different features
{ "avatar_url": "https://avatars.githubusercontent.com/u/30755778?v=4", "events_url": "https://api.github.com/users/LysandreJik/events{/privacy}", "followers_url": "https://api.github.com/users/LysandreJik/followers", "following_url": "https://api.github.com/users/LysandreJik/following{/other_user}", "gists_url": "https://api.github.com/users/LysandreJik/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/LysandreJik", "id": 30755778, "login": "LysandreJik", "node_id": "MDQ6VXNlcjMwNzU1Nzc4", "organizations_url": "https://api.github.com/users/LysandreJik/orgs", "received_events_url": "https://api.github.com/users/LysandreJik/received_events", "repos_url": "https://api.github.com/users/LysandreJik/repos", "site_admin": false, "starred_url": "https://api.github.com/users/LysandreJik/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/LysandreJik/subscriptions", "type": "User", "url": "https://api.github.com/users/LysandreJik" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
null
9
"2021-09-25T22:18:39Z"
"2021-10-07T22:47:42Z"
"2021-10-07T22:47:26Z"
MEMBER
null
null
null
## Describe the bug I'm trying to use parquet as a means of serialization for both `Dataset` and `DatasetDict` objects. Using `to_parquet` alongside `from_parquet` or `load_dataset` for a `Dataset` works perfectly. For `DatasetDict`, I use `to_parquet` on each split to save the parquet files in individual folders representing individual splits. This works too, as long as the splits have identical features. If a split has different features to neighboring splits, then loading the dataset will fail: a single schema is used to load both splits, resulting in a failure to load the second parquet file. ## Steps to reproduce the bug The following works as expected: ```python from datasets import load_dataset ds = load_dataset("lhoestq/custom_squad") ds['train'].to_parquet("./ds/train/split.parquet") ds['validation'].to_parquet("./ds/validation/split.parquet") brand_new_dataset = load_dataset("ds") ``` Modifying a single split to add a new feature ends up in a crash: ```python from datasets import load_dataset ds = load_dataset("lhoestq/custom_squad") def identical_answers(e): e['identical_answers'] = len(set(e['answers']['text'])) == 1 return e ds['validation'] = ds['validation'].map(identical_answers) ds['train'].to_parquet("./ds/train/split.parquet") ds['validation'].to_parquet("./ds/validation/split.parquet") brand_new_dataset = load_dataset("ds") ``` ``` File "/home/lysandre/.config/JetBrains/PyCharm2021.2/scratches/datasets/upload_dataset.py", line 26, in <module> brand_new_dataset = load_dataset("ds") File "/home/lysandre/Workspaces/Python/datasets/src/datasets/load.py", line 1151, in load_dataset builder_instance.download_and_prepare( File "/home/lysandre/Workspaces/Python/datasets/src/datasets/builder.py", line 642, in download_and_prepare self._download_and_prepare( File "/home/lysandre/Workspaces/Python/datasets/src/datasets/builder.py", line 732, in _download_and_prepare self._prepare_split(split_generator, **prepare_split_kwargs) File "/home/lysandre/Workspaces/Python/datasets/src/datasets/builder.py", line 1194, in _prepare_split writer.write_table(table) File "/home/lysandre/Workspaces/Python/datasets/src/datasets/arrow_writer.py", line 428, in write_table pa_table = pa.Table.from_arrays([pa_table[name] for name in self._schema.names], schema=self._schema) File "/home/lysandre/Workspaces/Python/datasets/src/datasets/arrow_writer.py", line 428, in <listcomp> pa_table = pa.Table.from_arrays([pa_table[name] for name in self._schema.names], schema=self._schema) File "pyarrow/table.pxi", line 1257, in pyarrow.lib.Table.__getitem__ File "pyarrow/table.pxi", line 1833, in pyarrow.lib.Table.column File "pyarrow/table.pxi", line 1808, in pyarrow.lib.Table._ensure_integer_index KeyError: 'Field "identical_answers" does not exist in table schema' ``` It does work, however, to use the `save_to_disk` and `load_from_disk` methods: ```py from datasets import load_from_disk ds = load_dataset("lhoestq/custom_squad") def identical_answers(e): e['identical_answers'] = len(set(e['answers']['text'])) == 1 return e ds['validation'] = ds['validation'].map(identical_answers) ds.save_to_disk("local_path") brand_new_dataset = load_from_disk("local_path") ``` ## Expected results The saving works correctly - but the loading fails. I would expect either an error when saving or an error-less instantiation of the dataset through the parquet files. If it's helpful, I've traced a possible patch to the `write_table` method here: https://github.com/huggingface/datasets/blob/26ff41aa3a642e46489db9e95be1e9a8c4e64bea/src/datasets/arrow_writer.py#L424-L425 The writer is built only if the parquet writer is `None`, but I expect we would want to build a new writer as the table schema has changed. Furthermore, it relies on having the property `update_features` set to `True` in order to update the features: https://github.com/huggingface/datasets/blob/26ff41aa3a642e46489db9e95be1e9a8c4e64bea/src/datasets/arrow_writer.py#L254-L255 but the `ArrowWriter` is instantiated without that option in the `_prepare_split` method of the `ArrowBasedBuilder`: https://github.com/huggingface/datasets/blob/26ff41aa3a642e46489db9e95be1e9a8c4e64bea/src/datasets/builder.py#L1190 Updating these two parts to recreate a schema on each split results in an error that is, unfortunately, out of my expertise: ``` File "/home/lysandre/.config/JetBrains/PyCharm2021.2/scratches/datasets/upload_dataset.py", line 27, in <module> brand_new_dataset = load_dataset("ds") File "/home/lysandre/Workspaces/Python/datasets/src/datasets/load.py", line 1163, in load_dataset ds = builder_instance.as_dataset(split=split, ignore_verifications=ignore_verifications, in_memory=keep_in_memory) File "/home/lysandre/Workspaces/Python/datasets/src/datasets/builder.py", line 819, in as_dataset datasets = utils.map_nested( File "/home/lysandre/Workspaces/Python/datasets/src/datasets/utils/py_utils.py", line 207, in map_nested mapped = [ File "/home/lysandre/Workspaces/Python/datasets/src/datasets/utils/py_utils.py", line 208, in <listcomp> _single_map_nested((function, obj, types, None, True)) File "/home/lysandre/Workspaces/Python/datasets/src/datasets/utils/py_utils.py", line 143, in _single_map_nested return function(data_struct) File "/home/lysandre/Workspaces/Python/datasets/src/datasets/builder.py", line 850, in _build_single_dataset ds = self._as_dataset( File "/home/lysandre/Workspaces/Python/datasets/src/datasets/builder.py", line 920, in _as_dataset dataset_kwargs = ArrowReader(self._cache_dir, self.info).read( File "/home/lysandre/Workspaces/Python/datasets/src/datasets/arrow_reader.py", line 217, in read return self.read_files(files=files, original_instructions=instructions, in_memory=in_memory) File "/home/lysandre/Workspaces/Python/datasets/src/datasets/arrow_reader.py", line 238, in read_files pa_table = self._read_files(files, in_memory=in_memory) File "/home/lysandre/Workspaces/Python/datasets/src/datasets/arrow_reader.py", line 173, in _read_files pa_table: Table = self._get_table_from_filename(f_dict, in_memory=in_memory) File "/home/lysandre/Workspaces/Python/datasets/src/datasets/arrow_reader.py", line 308, in _get_table_from_filename table = ArrowReader.read_table(filename, in_memory=in_memory) File "/home/lysandre/Workspaces/Python/datasets/src/datasets/arrow_reader.py", line 327, in read_table return table_cls.from_file(filename) File "/home/lysandre/Workspaces/Python/datasets/src/datasets/table.py", line 458, in from_file table = _memory_mapped_arrow_table_from_file(filename) File "/home/lysandre/Workspaces/Python/datasets/src/datasets/table.py", line 45, in _memory_mapped_arrow_table_from_file pa_table = opened_stream.read_all() File "pyarrow/ipc.pxi", line 563, in pyarrow.lib.RecordBatchReader.read_all File "pyarrow/error.pxi", line 114, in pyarrow.lib.check_status OSError: Header-type of flatbuffer-encoded Message is not RecordBatch. ``` ## Environment info - `datasets` version: 1.12.2.dev0 - Platform: Linux-5.14.7-arch1-1-x86_64-with-glibc2.33 - Python version: 3.9.7 - PyArrow version: 5.0.0
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2968/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2968/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/2967
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2967/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2967/comments
https://api.github.com/repos/huggingface/datasets/issues/2967/events
https://github.com/huggingface/datasets/issues/2967
1,007,194,837
I_kwDODunzps48CJLV
2,967
Adding vision-and-language datasets (e.g., VQA, VCR) to Datasets
{ "avatar_url": "https://avatars.githubusercontent.com/u/42200725?v=4", "events_url": "https://api.github.com/users/WadeYin9712/events{/privacy}", "followers_url": "https://api.github.com/users/WadeYin9712/followers", "following_url": "https://api.github.com/users/WadeYin9712/following{/other_user}", "gists_url": "https://api.github.com/users/WadeYin9712/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/WadeYin9712", "id": 42200725, "login": "WadeYin9712", "node_id": "MDQ6VXNlcjQyMjAwNzI1", "organizations_url": "https://api.github.com/users/WadeYin9712/orgs", "received_events_url": "https://api.github.com/users/WadeYin9712/received_events", "repos_url": "https://api.github.com/users/WadeYin9712/repos", "site_admin": false, "starred_url": "https://api.github.com/users/WadeYin9712/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/WadeYin9712/subscriptions", "type": "User", "url": "https://api.github.com/users/WadeYin9712" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
closed
false
null
[]
null
0
"2021-09-25T20:58:15Z"
"2021-10-03T20:34:22Z"
"2021-10-03T20:34:22Z"
NONE
null
null
null
**Is your feature request related to a problem? Please describe.** Would you like to add any vision-and-language datasets (e.g., VQA, VCR) to Huggingface Datasets? **Describe the solution you'd like** N/A **Describe alternatives you've considered** N/A **Additional context** This is Da Yin at UCLA. Recently, we have published an EMNLP 2021 paper about geo-diverse visual commonsense reasoning (https://arxiv.org/abs/2109.06860). We propose a new dataset called GD-VCR, a vision-and-language dataset to evaluate how well V&L models perform on scenarios involving geo-location-specific commonsense. We hope to have our V&L dataset incorporated into Huggingface to further promote our project, but I haven't seen much V&L datasets in the current package. Is it possible to add V&L datasets, and if so, how should we prepare for the loading? Thank you very much!
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2967/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2967/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/2966
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2966/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2966/comments
https://api.github.com/repos/huggingface/datasets/issues/2966/events
https://github.com/huggingface/datasets/pull/2966
1,007,142,233
PR_kwDODunzps4sRRMs
2,966
Upload greek-legal-code dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/9130406?v=4", "events_url": "https://api.github.com/users/christospi/events{/privacy}", "followers_url": "https://api.github.com/users/christospi/followers", "following_url": "https://api.github.com/users/christospi/following{/other_user}", "gists_url": "https://api.github.com/users/christospi/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/christospi", "id": 9130406, "login": "christospi", "node_id": "MDQ6VXNlcjkxMzA0MDY=", "organizations_url": "https://api.github.com/users/christospi/orgs", "received_events_url": "https://api.github.com/users/christospi/received_events", "repos_url": "https://api.github.com/users/christospi/repos", "site_admin": false, "starred_url": "https://api.github.com/users/christospi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/christospi/subscriptions", "type": "User", "url": "https://api.github.com/users/christospi" }
[]
closed
false
null
[]
null
1
"2021-09-25T16:52:15Z"
"2021-10-13T13:37:30Z"
"2021-10-13T13:37:30Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/2966.diff", "html_url": "https://github.com/huggingface/datasets/pull/2966", "merged_at": "2021-10-13T13:37:30Z", "patch_url": "https://github.com/huggingface/datasets/pull/2966.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/2966" }
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2966/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2966/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/2965
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2965/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2965/comments
https://api.github.com/repos/huggingface/datasets/issues/2965/events
https://github.com/huggingface/datasets/issues/2965
1,007,084,153
I_kwDODunzps48BuJ5
2,965
Invalid download URL of WMT17 `zh-en` data
{ "avatar_url": "https://avatars.githubusercontent.com/u/3339950?v=4", "events_url": "https://api.github.com/users/Ririkoo/events{/privacy}", "followers_url": "https://api.github.com/users/Ririkoo/followers", "following_url": "https://api.github.com/users/Ririkoo/following{/other_user}", "gists_url": "https://api.github.com/users/Ririkoo/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Ririkoo", "id": 3339950, "login": "Ririkoo", "node_id": "MDQ6VXNlcjMzMzk5NTA=", "organizations_url": "https://api.github.com/users/Ririkoo/orgs", "received_events_url": "https://api.github.com/users/Ririkoo/received_events", "repos_url": "https://api.github.com/users/Ririkoo/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Ririkoo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Ririkoo/subscriptions", "type": "User", "url": "https://api.github.com/users/Ririkoo" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" }, { "color": "2edb81", "default": false, "description": "A bug in a dataset script provided in the library", "id": 2067388877, "name": "dataset bug", "node_id": "MDU6TGFiZWwyMDY3Mzg4ODc3", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20bug" } ]
closed
false
null
[]
null
1
"2021-09-25T13:17:32Z"
"2022-08-31T06:47:11Z"
"2022-08-31T06:47:10Z"
NONE
null
null
null
## Describe the bug Partial data (wmt17 zh-en) cannot be downloaded due to an invalid URL. ## Steps to reproduce the bug ```python from datasets import load_dataset dataset = load_dataset('wmt17','zh-en') ``` ## Expected results ConnectionError: Couldn't reach ftp://cwmt-wmt:[email protected]/parallel/casia2015.zip
{ "+1": 0, "-1": 0, "confused": 1, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/2965/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2965/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/2964
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2964/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2964/comments
https://api.github.com/repos/huggingface/datasets/issues/2964/events
https://github.com/huggingface/datasets/issues/2964
1,006,605,904
I_kwDODunzps47_5ZQ
2,964
Error when calculating Matthews Correlation Coefficient loaded with `load_metric`
{ "avatar_url": "https://avatars.githubusercontent.com/u/36760800?v=4", "events_url": "https://api.github.com/users/alvarobartt/events{/privacy}", "followers_url": "https://api.github.com/users/alvarobartt/followers", "following_url": "https://api.github.com/users/alvarobartt/following{/other_user}", "gists_url": "https://api.github.com/users/alvarobartt/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/alvarobartt", "id": 36760800, "login": "alvarobartt", "node_id": "MDQ6VXNlcjM2NzYwODAw", "organizations_url": "https://api.github.com/users/alvarobartt/orgs", "received_events_url": "https://api.github.com/users/alvarobartt/received_events", "repos_url": "https://api.github.com/users/alvarobartt/repos", "site_admin": false, "starred_url": "https://api.github.com/users/alvarobartt/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/alvarobartt/subscriptions", "type": "User", "url": "https://api.github.com/users/alvarobartt" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
null
2
"2021-09-24T15:55:21Z"
"2021-09-25T08:06:07Z"
"2021-09-25T08:06:07Z"
CONTRIBUTOR
null
null
null
## Describe the bug After loading the metric named "[Matthews Correlation Coefficient](https://huggingface.co/metrics/matthews_correlation)" from `🤗datasets`, the `.compute` method fails with the following exception `AttributeError: 'float' object has no attribute 'item'` (complete stack trace can be provided if required). ## Steps to reproduce the bug ```python import torch predictions = torch.ones((10,)) references = torch.zeros((10,)) from datasets import load_metric METRIC = load_metric("matthews_correlation") result = METRIC.compute(predictions=predictions, references=references) ``` ## Expected results We should expect a Python `dict` as it follows: ``` { "matthews_correlation": float() } ``` as defined in https://github.com/huggingface/datasets/blob/master/metrics/matthews_correlation/matthews_correlation.py, so the fix will imply removing `.item()`, since the value returned by the `scikit-learn` function is not a `torch.Tensor` but a `float`, which means that the `.item()` will fail. ## Actual results ``` Traceback (most recent call last): File "/home/alvaro.bartolome/XXX/xxx/cli.py", line 59, in main app() File "/home/alvaro.bartolome/miniconda3/envs/xxx/lib/python3.9/site-packages/typer/main.py", line 214, in __call__ return get_command(self)(*args, **kwargs) File "/home/alvaro.bartolome/miniconda3/envs/xxx/lib/python3.9/site-packages/click/core.py", line 1137, in __call__ return self.main(*args, **kwargs) File "/home/alvaro.bartolome/miniconda3/envs/xxx/lib/python3.9/site-packages/click/core.py", line 1062, in main rv = self.invoke(ctx) File "/home/alvaro.bartolome/miniconda3/envs/xxx/lib/python3.9/site-packages/click/core.py", line 1668, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/home/alvaro.bartolome/miniconda3/envs/xxx/lib/python3.9/site-packages/click/core.py", line 1404, in invoke return ctx.invoke(self.callback, **ctx.params) File "/home/alvaro.bartolome/miniconda3/envs/xxx/lib/python3.9/site-packages/click/core.py", line 763, in invoke return __callback(*args, **kwargs) File "/home/alvaro.bartolome/miniconda3/envs/xxx/lib/python3.9/site-packages/typer/main.py", line 500, in wrapper return callback(**use_params) # type: ignore File "/home/alvaro.bartolome/XXX/xxx/cli.py", line 43, in train metrics = trainer.evaluate() File "/home/alvaro.bartolome/miniconda3/envs/xxx/lib/python3.9/site-packages/transformers/trainer.py", line 2051, in evaluate output = eval_loop( File "/home/alvaro.bartolome/miniconda3/envs/xxx/lib/python3.9/site-packages/transformers/trainer.py", line 2292, in evaluation_loop metrics = self.compute_metrics(EvalPrediction(predictions=all_preds, label_ids=all_labels)) File "/home/alvaro.bartolome/XXX/xxx/metrics.py", line 20, in compute_metrics res = METRIC.compute(predictions=predictions, references=eval_preds.label_ids) File "/home/alvaro.bartolome/miniconda3/envs/lang/lib/python3.9/site-packages/datasets/metric.py", line 402, in compute output = self._compute(predictions=predictions, references=references, **kwargs) File "/home/alvaro.bartolome/.cache/huggingface/modules/datasets_modules/metrics/matthews_correlation/0275f1e9a4d318e3ea8cdd87547ee0d58d894966616052e3d18444ac8ddd2357/matthews_correlation.py", line 88, in _compute "matthews_correlation": matthews_corrcoef(references, predictions, sample_weight=sample_weight).item(), AttributeError: 'float' object has no attribute 'item' ``` ## Environment info - `datasets` version: 1.12.1 - Platform: Linux-4.15.0-1113-azure-x86_64-with-glibc2.23 - Python version: 3.9.7 - PyArrow version: 5.0.0
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2964/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2964/timeline
null
completed
false