url
stringlengths
58
61
repository_url
stringclasses
1 value
labels_url
stringlengths
72
75
comments_url
stringlengths
67
70
events_url
stringlengths
65
68
html_url
stringlengths
48
51
id
int64
600M
3.67B
node_id
stringlengths
18
24
number
int64
2
7.88k
title
stringlengths
1
290
user
dict
labels
listlengths
0
4
state
stringclasses
2 values
locked
bool
1 class
assignee
dict
assignees
listlengths
0
4
comments
listlengths
0
30
created_at
timestamp[s]date
2020-04-14 18:18:51
2025-11-26 16:16:56
updated_at
timestamp[s]date
2020-04-29 09:23:05
2025-11-30 03:52:07
closed_at
timestamp[s]date
2020-04-29 09:23:05
2025-11-21 12:31:19
author_association
stringclasses
4 values
type
null
active_lock_reason
null
draft
null
pull_request
null
body
stringlengths
0
228k
closed_by
dict
reactions
dict
timeline_url
stringlengths
67
70
performed_via_github_app
null
state_reason
stringclasses
4 values
sub_issues_summary
dict
issue_dependencies_summary
dict
is_pull_request
bool
1 class
closed_at_time_taken
duration[s]
https://api.github.com/repos/huggingface/datasets/issues/2526
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2526/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2526/comments
https://api.github.com/repos/huggingface/datasets/issues/2526/events
https://github.com/huggingface/datasets/issues/2526
925,929,228
MDU6SXNzdWU5MjU5MjkyMjg=
2,526
Add COCO datasets
{ "avatar_url": "https://avatars.githubusercontent.com/u/48327001?v=4", "events_url": "https://api.github.com/users/NielsRogge/events{/privacy}", "followers_url": "https://api.github.com/users/NielsRogge/followers", "following_url": "https://api.github.com/users/NielsRogge/following{/other_user}", "gists_url": "https://api.github.com/users/NielsRogge/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/NielsRogge", "id": 48327001, "login": "NielsRogge", "node_id": "MDQ6VXNlcjQ4MzI3MDAx", "organizations_url": "https://api.github.com/users/NielsRogge/orgs", "received_events_url": "https://api.github.com/users/NielsRogge/received_events", "repos_url": "https://api.github.com/users/NielsRogge/repos", "site_admin": false, "starred_url": "https://api.github.com/users/NielsRogge/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/NielsRogge/subscriptions", "type": "User", "url": "https://api.github.com/users/NielsRogge", "user_view_type": "public" }
[ { "color": "e99695", "default": false, "description": "Requesting to add a new dataset", "id": 2067376369, "name": "dataset request", "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request" }, { "color": "bfdadc", "default": false, "description": "Vision datasets", "id": 3608941089, "name": "vision", "node_id": "LA_kwDODunzps7XHBIh", "url": "https://api.github.com/repos/huggingface/datasets/labels/vision" } ]
open
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/53175384?v=4", "events_url": "https://api.github.com/users/merveenoyan/events{/privacy}", "followers_url": "https://api.github.com/users/merveenoyan/followers", "following_url": "https://api.github.com/users/merveenoyan/following{/other_user}", "gists_url": "https://api.github.com/users/merveenoyan/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/merveenoyan", "id": 53175384, "login": "merveenoyan", "node_id": "MDQ6VXNlcjUzMTc1Mzg0", "organizations_url": "https://api.github.com/users/merveenoyan/orgs", "received_events_url": "https://api.github.com/users/merveenoyan/received_events", "repos_url": "https://api.github.com/users/merveenoyan/repos", "site_admin": false, "starred_url": "https://api.github.com/users/merveenoyan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/merveenoyan/subscriptions", "type": "User", "url": "https://api.github.com/users/merveenoyan", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/53175384?v=4", "events_url": "https://api.github.com/users/merveenoyan/events{/privacy}", "followers_url": "https://api.github.com/users/merveenoyan/followers", "following_url": "https://api.github.com/users/merveenoyan/following{/other_user}", "gists_url": "https://api.github.com/users/merveenoyan/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/merveenoyan", "id": 53175384, "login": "merveenoyan", "node_id": "MDQ6VXNlcjUzMTc1Mzg0", "organizations_url": "https://api.github.com/users/merveenoyan/orgs", "received_events_url": "https://api.github.com/users/merveenoyan/received_events", "repos_url": "https://api.github.com/users/merveenoyan/repos", "site_admin": false, "starred_url": "https://api.github.com/users/merveenoyan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/merveenoyan/subscriptions", "type": "User", "url": "https://api.github.com/users/merveenoyan", "user_view_type": "public" } ]
[ "I'm currently adding it, the entire dataset is quite big around 30 GB so I add splits separately. You can take a look here https://huggingface.co/datasets/merve/coco", "I talked to @lhoestq and it's best if I download this dataset through TensorFlow datasets instead, so I'll be implementing that one really soon.\r\n@NielsRogge ", "I started adding COCO, will be done tomorrow EOD\r\nmy work so far https://github.com/merveenoyan/datasets (my fork)", "Hi Merve @merveenoyan , thank you so much for your great contribution! May I ask about the current progress of your implementation? Cuz I see the pull request is still in progess here. Or can I just run the COCO scripts in your fork repo?", "Hello @yixuanren I had another prioritized project about to be merged, but I'll start continuing today will finish up soon. ", "> Hello @yixuanren I had another prioritized project about to be merged, but I'll start continuing today will finish up soon.\r\n\r\nIt's really nice of you!! I see you've commited another version just now", "@yixuanren we're working on it, will be available soon, thanks a lot for your patience", "Hi @NielsRogge and @merveenoyan, did you find a way to load a dataset with COCO annotations to HF's hub?\r\nI have a panoptic segmentation dataset in COCO format and would like to share it with the community.\r\nThanks in advance :)", "The COCO format is not supported out of the box in the HF's hub - you'd need to reformat it to an [ImageFolder](https://huggingface.co/docs/datasets/image_dataset#imagefolder) with metadata format, or write a [loading script](https://huggingface.co/docs/datasets/image_dataset#loading-script)", "> The COCO format is not supported out of the box in the HF's hub - you'd need to reformat it to an [ImageFolder](https://huggingface.co/docs/datasets/image_dataset#imagefolder) with metadata format, or write a [loading script](https://huggingface.co/docs/datasets/image_dataset#loading-script)\r\n\r\nHi @lhoestq , thank you for your quick reply.\r\nI've correctly created a metadata.jsonl file for a dataset with instance segmentation annotations [here](https://huggingface.co/datasets/lombardata/data_2017)\r\nbut do not understand how I can integrate panoptic annotations with the metadata format of ImageFolder datasets. The \"problem\" with panoptic annotations is that we have a folder with images, a json file with annotations and another folder with png annotations.\r\n\r\nI checked between all the datasets already published on HuggingFace and, the only one who has uploaded a correct panoptic dataset is @NielsRogge [here](https://huggingface.co/datasets/nielsr/coco-panoptic-val2017) and [here](https://huggingface.co/datasets/nielsr/ade20k-panoptic-demo). Indeed he accomplished to have three fields : \r\n1.image (image)\r\n2.label (image)\r\n3.segments_info (list)\r\nbut I not find the corresponding code that allows to upload a panoptic dataset from this 3 sources.\r\nCan you please share an example code?\r\nThanks !", "Both were uploaded using `ds.push_to_hub()` :)\r\n\r\nYou can get a Dataset from a python dictionary using `ds = Dataset.from_dict(...)` and casts the paths to images to the `Image()` type using `ds = ds.cast_column(\"image\", Image())`.\r\n\r\n```python\r\nfrom datasets import Dataset, Image\r\n\r\nds = Dataset.from_dict(...)\r\nds = ds.cast_column(\"image\", Image())\r\nds = ds.cast_column(\"label\", Image())\r\nds.push_to_hub(...)\r\n```", "> Both were uploaded using `ds.push_to_hub()` :)\r\n> \r\n> You can get a Dataset from a python dictionary using `ds = Dataset.from_dict(...)` and casts the paths to images to the `Image()` type using `ds = ds.cast_column(\"image\", Image())`.\r\n> \r\n> ```python\r\n> from datasets import Dataset, Image\r\n> \r\n> ds = Dataset.from_dict(...)\r\n> ds = ds.cast_column(\"image\", Image())\r\n> ds = ds.cast_column(\"label\", Image())\r\n> ds.push_to_hub(...)\r\n> ```\r\n\r\nThank you very much @lhoestq , I succesfully created a hf dataset [here](https://huggingface.co/datasets/lombardata/panoptic_2023_06_21) with the two fields :\r\n1.image (image)\r\n2.label (image)\r\nfollowing your suggestions. Now still remain the problem of uploading **segments_info** information to the dataset.\r\nThere is a function that easily imports the _panoptic_coco_annotation.json_ file to a segment_info field?\r\nI think we must define a **list_of_segment**, i.e. a list of lists of this type : \r\n```python\r\n[ { \"area\": 214858, \"bbox\": [ 0, 0, 511, 760 ], \"category_id\": 0, \"id\": 7895160, \"iscrowd\": 0 }, { \"area\": 73067, \"bbox\": [ 98, 719, 413, 253 ], \"category_id\": 3, \"id\": 3289680, \"iscrowd\": 0 }, { \"area\": 832, \"bbox\": [ 53, 0, 101, 16 ], \"category_id\": 5, \"id\": 5273720, \"iscrowd\": 0 }, { \"area\": 70668, \"bbox\": [ 318, 60, 191, 392 ], \"category_id\": 8, \"id\": 15132390, \"iscrowd\": 0 }, { \"area\": 32696, \"bbox\": [ 0, 100, 78, 872 ], \"category_id\": 18, \"id\": 472063, \"iscrowd\": 0 }, { \"area\": 76045, \"bbox\": [ 42, 48, 264, 924 ], \"category_id\": 37, \"id\": 16713830, \"iscrowd\": 0 }, { \"area\": 27103, \"bbox\": [ 288, 482, 216, 306 ], \"category_id\": 47, \"id\": 16753408, \"iscrowd\": 0 } ]\r\n```\r\nand then apply again the **cast_column** function [here](https://github.com/huggingface/datasets/blob/2.13.0/src/datasets/arrow_dataset.py#L2060) but with a list as a second argument, like : \r\n```python\r\nfrom datasets import Dataset, Image\r\nds = ds.cast_column(\"image\", Image())\r\nds = ds.cast_column(\"label\", Image())\r\nds = ds.cast_column(\"segments_info\", list)\r\n```\r\nbut I do not see how to transfer the information of the _panoptic_coco_annotation.json_ to a list of lists of this type : \r\n```python\r\n[ { \"area\": 214858, \"bbox\": [ 0, 0, 511, 760 ], \"category_id\": 0, \"id\": 7895160, \"iscrowd\": 0 }, { \"area\": 73067, \"bbox\": [ 98, 719, 413, 253 ], \"category_id\": 3, \"id\": 3289680, \"iscrowd\": 0 }, { \"area\": 832, \"bbox\": [ 53, 0, 101, 16 ], \"category_id\": 5, \"id\": 5273720, \"iscrowd\": 0 }, { \"area\": 70668, \"bbox\": [ 318, 60, 191, 392 ], \"category_id\": 8, \"id\": 15132390, \"iscrowd\": 0 }, { \"area\": 32696, \"bbox\": [ 0, 100, 78, 872 ], \"category_id\": 18, \"id\": 472063, \"iscrowd\": 0 }, { \"area\": 76045, \"bbox\": [ 42, 48, 264, 924 ], \"category_id\": 37, \"id\": 16713830, \"iscrowd\": 0 }, { \"area\": 27103, \"bbox\": [ 288, 482, 216, 306 ], \"category_id\": 47, \"id\": 16753408, \"iscrowd\": 0 } ]\r\n```\r\nlike @NielsRogge has done [here](https://huggingface.co/datasets/nielsr/coco-panoptic-val2017) and [here](https://huggingface.co/datasets/nielsr/ade20k-panoptic-demo).\r\nThank you again for your help and have a good day !", "You can pass this data in .from_dict() - no need to cast anything for this column\r\n\r\n```python\r\nds = Dataset.from_dict({\r\n \"image\": [...],\r\n \"label\": [...],\r\n \"segments_info\": [...],\r\n)}\r\n```\r\n\r\nwhere `segments_info` is the list of the segment_infos of all the examples in the dataset, and therefore is a list of lists of dicts.", "> You can pass this data in .from_dict() - no need to cast anything for this column\r\n> \r\n> ```python\r\n> ds = Dataset.from_dict({\r\n> \"image\": [...],\r\n> \"label\": [...],\r\n> \"segments_info\": [...],\r\n> )}\r\n> ```\r\n> \r\n> where `segments_info` is the list of the segment_infos of all the examples in the dataset, and therefore is a list of lists of dicts.\r\n\r\nThank you for the quick reply @lhoestq , but then how to generate the `segments_info` list of lists of dicts starting from a _panoptic_coco_annotation.json_ file ?\r\n\r\n\r\n", "You read the JSON file and transform the data yourself. I don't think there's an automatic converter somewhere", "> You read the JSON file and transform the data yourself. I don't think there's an automatic converter somewhere\r\n\r\nPerfect, I've done it and succesfully uploaded a new dataset [here](https://huggingface.co/datasets/lombardata/panoptic_2023_06_22), but I've (I hope) a last problem.\r\nThe dataset has currently 302 images and, when I upload it to the hub, only the first page of images is correctly uploaded.\r\nWhen I try to see the second/third/fourth page of items of my dataset, I can see that the fields **segments_info** and **image_name** are correctly uploaded, while the images are not (the \"null\" string is shown everywhere).\r\n\r\nI've checked the path of images that are not uploaded and they exists, is there a problem with the size of the dataset ?\r\nHow can I upload the whole dataset to the hub ?\r\nThank you again @lhoestq and have a good day !", "Awesome ! Your dataset looks all good 🤗 \r\n\r\nThe `null` in the viewer is a bug on our side, let me investigate" ]
2021-06-21T07:48:32
2023-06-22T14:12:18
null
CONTRIBUTOR
null
null
null
null
## Adding a Dataset - **Name:** COCO - **Description:** COCO is a large-scale object detection, segmentation, and captioning dataset. - **Paper + website:** https://cocodataset.org/#home - **Data:** https://cocodataset.org/#download - **Motivation:** It would be great to have COCO available in HuggingFace datasets, as we are moving beyond just text. COCO includes multi-modalities (images + text), as well as a huge amount of images annotated with objects, segmentation masks, keypoints etc., on which models like DETR (which I recently added to HuggingFace Transformers) are trained. Currently, one needs to download everything from the website and place it in a local folder, but it would be much easier if we can directly access it through the datasets API. Instructions to add a new dataset can be found [here](https://github.com/huggingface/datasets/blob/master/ADD_NEW_DATASET.md).
null
{ "+1": 3, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 3, "url": "https://api.github.com/repos/huggingface/datasets/issues/2526/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2526/timeline
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
null
https://api.github.com/repos/huggingface/datasets/issues/2523
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2523/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2523/comments
https://api.github.com/repos/huggingface/datasets/issues/2523/events
https://github.com/huggingface/datasets/issues/2523
925,421,008
MDU6SXNzdWU5MjU0MjEwMDg=
2,523
Fr
{ "avatar_url": "https://avatars.githubusercontent.com/u/71971234?v=4", "events_url": "https://api.github.com/users/aDrIaNo34500/events{/privacy}", "followers_url": "https://api.github.com/users/aDrIaNo34500/followers", "following_url": "https://api.github.com/users/aDrIaNo34500/following{/other_user}", "gists_url": "https://api.github.com/users/aDrIaNo34500/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/aDrIaNo34500", "id": 71971234, "login": "aDrIaNo34500", "node_id": "MDQ6VXNlcjcxOTcxMjM0", "organizations_url": "https://api.github.com/users/aDrIaNo34500/orgs", "received_events_url": "https://api.github.com/users/aDrIaNo34500/received_events", "repos_url": "https://api.github.com/users/aDrIaNo34500/repos", "site_admin": false, "starred_url": "https://api.github.com/users/aDrIaNo34500/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/aDrIaNo34500/subscriptions", "type": "User", "url": "https://api.github.com/users/aDrIaNo34500", "user_view_type": "public" }
[]
closed
false
null
[]
[]
2021-06-19T15:56:32
2021-06-19T18:48:23
2021-06-19T18:48:23
NONE
null
null
null
null
__Originally posted by @lewtun in https://github.com/huggingface/datasets/pull/2469__
{ "avatar_url": "https://avatars.githubusercontent.com/u/7353373?v=4", "events_url": "https://api.github.com/users/thomwolf/events{/privacy}", "followers_url": "https://api.github.com/users/thomwolf/followers", "following_url": "https://api.github.com/users/thomwolf/following{/other_user}", "gists_url": "https://api.github.com/users/thomwolf/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/thomwolf", "id": 7353373, "login": "thomwolf", "node_id": "MDQ6VXNlcjczNTMzNzM=", "organizations_url": "https://api.github.com/users/thomwolf/orgs", "received_events_url": "https://api.github.com/users/thomwolf/received_events", "repos_url": "https://api.github.com/users/thomwolf/repos", "site_admin": false, "starred_url": "https://api.github.com/users/thomwolf/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/thomwolf/subscriptions", "type": "User", "url": "https://api.github.com/users/thomwolf", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2523/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2523/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
2:51:51
https://api.github.com/repos/huggingface/datasets/issues/2522
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2522/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2522/comments
https://api.github.com/repos/huggingface/datasets/issues/2522/events
https://github.com/huggingface/datasets/issues/2522
925,334,379
MDU6SXNzdWU5MjUzMzQzNzk=
2,522
Documentation Mistakes in Dataset: emotion
{ "avatar_url": "https://avatars.githubusercontent.com/u/62606251?v=4", "events_url": "https://api.github.com/users/GDGauravDutta/events{/privacy}", "followers_url": "https://api.github.com/users/GDGauravDutta/followers", "following_url": "https://api.github.com/users/GDGauravDutta/following{/other_user}", "gists_url": "https://api.github.com/users/GDGauravDutta/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/GDGauravDutta", "id": 62606251, "login": "GDGauravDutta", "node_id": "MDQ6VXNlcjYyNjA2MjUx", "organizations_url": "https://api.github.com/users/GDGauravDutta/orgs", "received_events_url": "https://api.github.com/users/GDGauravDutta/received_events", "repos_url": "https://api.github.com/users/GDGauravDutta/repos", "site_admin": false, "starred_url": "https://api.github.com/users/GDGauravDutta/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/GDGauravDutta/subscriptions", "type": "User", "url": "https://api.github.com/users/GDGauravDutta", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
[ "Hi,\r\n\r\nthis issue has been already reported in the dataset repo (https://github.com/dair-ai/emotion_dataset/issues/2), so this is a bug on their side.", "The documentation has another bug in the dataset card [here](https://huggingface.co/datasets/emotion). \r\n\r\nIn the dataset summary **six** emotions are mentioned: *\"six basic emotions: anger, fear, joy, love, sadness, and surprise\"*, however, in the datafields section we have only **five**:\r\n```\r\nlabel: a classification label, with possible values including sadness (0), joy (1), love (2), anger (3), fear (4).\r\n```", "@GDGauravDutta the dataset author replied in their issue: https://github.com/dair-ai/emotion_dataset/issues/2\r\n> The dataset released is a preprocessed six emotion variant released for educational and research purposes.\r\n\r\n@albertovilla the dataset card was fixed with 6 emotions." ]
2021-06-19T07:08:57
2023-01-02T12:04:58
2023-01-02T12:04:58
NONE
null
null
null
null
As per documentation, Dataset: emotion Homepage: https://github.com/dair-ai/emotion_dataset Dataset: https://github.com/huggingface/datasets/blob/master/datasets/emotion/emotion.py Permalink: https://huggingface.co/datasets/viewer/?dataset=emotion Emotion is a dataset of English Twitter messages with eight basic emotions: anger, anticipation, disgust, fear, joy, sadness, surprise, and trust. For more detailed information please refer to the paper. But when we view the data, there are only 6 emotions, anger, fear, joy, sadness, surprise, and trust.
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/2522/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2522/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
562 days, 4:56:01
https://api.github.com/repos/huggingface/datasets/issues/2520
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2520/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2520/comments
https://api.github.com/repos/huggingface/datasets/issues/2520/events
https://github.com/huggingface/datasets/issues/2520
925,015,004
MDU6SXNzdWU5MjUwMTUwMDQ=
2,520
Datasets with tricky task templates
{ "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lewtun", "id": 26859204, "login": "lewtun", "node_id": "MDQ6VXNlcjI2ODU5MjA0", "organizations_url": "https://api.github.com/users/lewtun/orgs", "received_events_url": "https://api.github.com/users/lewtun/received_events", "repos_url": "https://api.github.com/users/lewtun/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "type": "User", "url": "https://api.github.com/users/lewtun", "user_view_type": "public" }
[ { "color": "72f99f", "default": false, "description": "Discussions on the datasets", "id": 2067401494, "name": "Dataset discussion", "node_id": "MDU6TGFiZWwyMDY3NDAxNDk0", "url": "https://api.github.com/repos/huggingface/datasets/labels/Dataset%20discussion" } ]
closed
false
null
[]
[ "The `task_templates` API is deprecated in favor of the `train-eval-index` YAML field, so I'm closing this issue." ]
2021-06-18T15:33:57
2023-07-20T13:20:32
2023-07-20T13:20:32
MEMBER
null
null
null
null
I'm collecting a list of datasets here that don't follow the "standard" taxonomy and require further investigation to implement task templates for. ## Text classification * [hatexplain](https://huggingface.co/datasets/hatexplain): ostensibly a form of text classification, but not in the standard `(text, target)` format and each sample appears to be tokenized. * [muchocine](https://huggingface.co/datasets/muchocine): contains two candidate text columns (long-form and summary) which in principle requires two `TextClassification` templates which is not currently supported
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2520/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2520/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
761 days, 21:46:35
https://api.github.com/repos/huggingface/datasets/issues/2516
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2516/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2516/comments
https://api.github.com/repos/huggingface/datasets/issues/2516/events
https://github.com/huggingface/datasets/issues/2516
924,597,470
MDU6SXNzdWU5MjQ1OTc0NzA=
2,516
datasets.map pickle issue resulting in invalid mapping function
{ "avatar_url": "https://avatars.githubusercontent.com/u/5028974?v=4", "events_url": "https://api.github.com/users/david-waterworth/events{/privacy}", "followers_url": "https://api.github.com/users/david-waterworth/followers", "following_url": "https://api.github.com/users/david-waterworth/following{/other_user}", "gists_url": "https://api.github.com/users/david-waterworth/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/david-waterworth", "id": 5028974, "login": "david-waterworth", "node_id": "MDQ6VXNlcjUwMjg5NzQ=", "organizations_url": "https://api.github.com/users/david-waterworth/orgs", "received_events_url": "https://api.github.com/users/david-waterworth/received_events", "repos_url": "https://api.github.com/users/david-waterworth/repos", "site_admin": false, "starred_url": "https://api.github.com/users/david-waterworth/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/david-waterworth/subscriptions", "type": "User", "url": "https://api.github.com/users/david-waterworth", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
open
false
null
[]
[ "Hi ! `map` calls `__getstate__` using `dill` to hash your map function. This is used by the caching mechanism to recover previously computed results. That's why you don't see any `__setstate__` call.\r\n\r\nWhy do you change an attribute of your tokenizer when `__getstate__` is called ?", "@lhoestq because if I try to pickle my custom tokenizer (it contains a pure python pretokenization step in an otherwise rust backed tokenizer) I get\r\n\r\n> Exception: Error while attempting to pickle Tokenizer: Custom PreTokenizer cannot be serialized\r\n\r\nSo I remove the Custom PreTokenizer in `__getstate__` and then restore it in `__setstate__` (since it doesn't contain any state). This is what my `__getstate__` / `__setstate__` looks like:\r\n\r\n def __getstate__(self):\r\n \"\"\"\r\n Removes pre_tokenizer since it cannot be pickled\r\n \"\"\"\r\n logger.debug(\"Copy state dict\")\r\n out = self.__dict__.copy()\r\n logger.debug(\"Detaching pre_tokenizer\")\r\n out['_tokenizer'].pre_tokenizer = tokenizers.pre_tokenizers.Sequence([]) \r\n return out\r\n\r\n def __setstate__(self, d):\r\n \"\"\"\r\n Reinstates pre_tokenizer\r\n \"\"\"\r\n logger.debug(\"Reattaching pre_tokenizer\")\r\n self.__dict__ = d\r\n self.backend_tokenizer.pre_tokenizer = self._pre_tokenizer()\r\n\r\nIf this is the case can you think of another way of avoiding my issue?", "Actually, maybe I need to deep copy `self.__dict__`? That way `self` isn't modified. That was my intention and I thought it was working - I'll double-check after the weekend.", "Doing a deep copy results in the warning:\r\n\r\n> 06/20/2021 16:02:15 - WARNING - datasets.fingerprint - Parameter 'function'=<function tokenize_function at 0x7f1e95f05d40> of the transform datasets.arrow_dataset.Dataset._map_single couldn't be hashed properly, a random hash was used instead. Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. This warning is only showed once. Subsequent hashing failures won't be showed.\r\n\r\n\r\n```\r\ndef __getstate__(self):\r\n \"\"\"\r\n Removes pre_tokenizer since it cannot be pickled\r\n \"\"\"\r\n logger.debug(\"Copy state dict\")\r\n out = copy.deepcopy(self.__dict__)\r\n logger.debug(\"Detaching pre_tokenizer\")\r\n out['_tokenizer'].pre_tokenizer = tokenizers.pre_tokenizers.Sequence([]) \r\n return out\r\n```", "Looks like there is still an object that is not pickable in your `tokenize_function` function.\r\n\r\nYou can test if an object can be pickled and hashed by using \r\n```python\r\nfrom datasets.fingerprint import Hasher\r\n\r\nHasher.hash(my_object)\r\n```\r\n\r\nUnder the hood it pickles the object to compute its hash, so it calls `__getstate__` when applicable.", "I figured it out, the problem is deep copy itself uses pickle (unless you implement `__deepcopy__`). So when I changed `__getstate__` it started throwing an error.\r\n\r\nI'm sure there's a better way of doing this, but in order to return the `__dict__` without the non-pikelable pre-tokeniser and without modifying self I removed the pre-tokenizers, did a deep copy and then re-generated it.\r\n\r\nIt does work - although I noticed Hasher doesn't call `__hash__` if the object being hashed implements it which I feel it should? If it did I could return a hash of the tokenizers.json file instead.\r\n\r\n```\r\n def __getstate__(self):\r\n \"\"\"\r\n Removes pre_tokenizer since it cannot be pickled\r\n \"\"\"\r\n logger.debug(\"Copy state dict\")\r\n self.backend_tokenizer.pre_tokenizer = tokenizers.pre_tokenizers.Sequence([])\r\n out = copy.deepcopy(self.__dict__) #self.__dict__.copy()\r\n self.backend_tokenizer.pre_tokenizer = self._pre_tokenizer()\r\n\r\n return out\r\n```\r\n", "I'm glad you figured something out :)\r\n\r\nRegarding hashing: we're not using hashing for the same purpose as the python `__hash__` purpose (which is in general for dictionary lookups). For example it is allowed for python hashing to not return the same hash across sessions, while our hashing must return the same hashes across sessions for the caching to work properly." ]
2021-06-18T06:47:26
2021-06-23T13:47:49
null
NONE
null
null
null
null
I trained my own tokenizer, and I needed to use a python custom class. Because of this I have to detach the custom step before saving and reattach after restore. I did this using the standard pickle `__get_state__` / `__set_state__` mechanism. I think it's correct but it fails when I use it inside a function which is mapped to a dataset, i.e. in the manner of run_mlm.py and other huggingface scripts. The following reproduces the issue - most likely I'm missing something A simulated tokeniser which can be pickled ``` class CustomTokenizer: def __init__(self): self.state = "init" def __getstate__(self): print("__getstate__ called") out = self.__dict__.copy() self.state = "pickled" return out def __setstate__(self, d): print("__setstate__ called") self.__dict__ = d self.state = "restored" tokenizer = CustomTokenizer() ``` Test that it actually works - prints "__getstate__ called" and "__setstate__ called" ``` import pickle serialized = pickle.dumps(tokenizer) restored = pickle.loads(serialized) assert restored.state == "restored" ``` Simulate a function that tokenises examples, when dataset.map is called, this function ``` def tokenize_function(examples): assert tokenizer.state == "restored" # this shouldn't fail but it does output = tokenizer(examples) # this will fail as tokenizer isn't really a tokenizer return output ``` Use map to simulate tokenization ``` import glob from datasets import load_dataset assert tokenizer.state == "restored" train_files = glob.glob('train*.csv') validation_files = glob.glob('validation*.csv') datasets = load_dataset("csv", data_files=dict(train=train_files, validation=validation_files)) tokenized_datasets = datasets.map( tokenize_function, batched=True, ) ``` What's happening is I can see that __getstate__ is called but not __setstate__, so the state of `tokenize_function` is invalid at the point that it's actually executed. This doesn't matter as far as I can see for the standard tokenizers as they don't use __getstate__ / __setstate__. I'm not sure if there's another hook I'm supposed to implement as well? --------------------------------------------------------------------------- AssertionError Traceback (most recent call last) <ipython-input-22-a2aef4f74aaa> in <module> 8 tokenized_datasets = datasets.map( 9 tokenize_function, ---> 10 batched=True, 11 ) ~/.pyenv/versions/3.7.6/envs/xxx/lib/python3.7/site-packages/datasets/dataset_dict.py in map(self, function, with_indices, input_columns, batched, batch_size, remove_columns, keep_in_memory, load_from_cache_file, cache_file_names, writer_batch_size, features, disable_nullable, fn_kwargs, num_proc, desc) 487 desc=desc, 488 ) --> 489 for k, dataset in self.items() 490 } 491 ) ~/.pyenv/versions/3.7.6/envs/xxx/lib/python3.7/site-packages/datasets/dataset_dict.py in <dictcomp>(.0) 487 desc=desc, 488 ) --> 489 for k, dataset in self.items() 490 } 491 ) ~/.pyenv/versions/3.7.6/envs/xxx/lib/python3.7/site-packages/datasets/arrow_dataset.py in map(self, function, with_indices, input_columns, batched, batch_size, drop_last_batch, remove_columns, keep_in_memory, load_from_cache_file, cache_file_name, writer_batch_size, features, disable_nullable, fn_kwargs, num_proc, suffix_template, new_fingerprint, desc) 1633 fn_kwargs=fn_kwargs, 1634 new_fingerprint=new_fingerprint, -> 1635 desc=desc, 1636 ) 1637 else: ~/.pyenv/versions/3.7.6/envs/xxx/lib/python3.7/site-packages/datasets/arrow_dataset.py in wrapper(*args, **kwargs) 184 } 185 # apply actual function --> 186 out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) 187 datasets: List["Dataset"] = list(out.values()) if isinstance(out, dict) else [out] 188 # re-apply format to the output ~/.pyenv/versions/3.7.6/envs/xxx/lib/python3.7/site-packages/datasets/fingerprint.py in wrapper(*args, **kwargs) 395 # Call actual function 396 --> 397 out = func(self, *args, **kwargs) 398 399 # Update fingerprint of in-place transforms + update in-place history of transforms ~/.pyenv/versions/3.7.6/envs/xxx/lib/python3.7/site-packages/datasets/arrow_dataset.py in _map_single(self, function, with_indices, input_columns, batched, batch_size, drop_last_batch, remove_columns, keep_in_memory, load_from_cache_file, cache_file_name, writer_batch_size, features, disable_nullable, fn_kwargs, new_fingerprint, rank, offset, desc) 1961 indices, 1962 check_same_num_examples=len(input_dataset.list_indexes()) > 0, -> 1963 offset=offset, 1964 ) 1965 except NumExamplesMismatch: ~/.pyenv/versions/3.7.6/envs/xxx/lib/python3.7/site-packages/datasets/arrow_dataset.py in apply_function_on_filtered_inputs(inputs, indices, check_same_num_examples, offset) 1853 effective_indices = [i + offset for i in indices] if isinstance(indices, list) else indices + offset 1854 processed_inputs = ( -> 1855 function(*fn_args, effective_indices, **fn_kwargs) if with_indices else function(*fn_args, **fn_kwargs) 1856 ) 1857 if update_data is None: <ipython-input-21-8ee4a8ba5b1b> in tokenize_function(examples) 1 def tokenize_function(examples): ----> 2 assert tokenizer.state == "restored" 3 tokenizer(examples) 4 return examples
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2516/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2516/timeline
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
null
https://api.github.com/repos/huggingface/datasets/issues/2514
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2514/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2514/comments
https://api.github.com/repos/huggingface/datasets/issues/2514/events
https://github.com/huggingface/datasets/issues/2514
924,417,172
MDU6SXNzdWU5MjQ0MTcxNzI=
2,514
Can datasets remove duplicated rows?
{ "avatar_url": "https://avatars.githubusercontent.com/u/16516583?v=4", "events_url": "https://api.github.com/users/liuxinglan/events{/privacy}", "followers_url": "https://api.github.com/users/liuxinglan/followers", "following_url": "https://api.github.com/users/liuxinglan/following{/other_user}", "gists_url": "https://api.github.com/users/liuxinglan/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/liuxinglan", "id": 16516583, "login": "liuxinglan", "node_id": "MDQ6VXNlcjE2NTE2NTgz", "organizations_url": "https://api.github.com/users/liuxinglan/orgs", "received_events_url": "https://api.github.com/users/liuxinglan/received_events", "repos_url": "https://api.github.com/users/liuxinglan/repos", "site_admin": false, "starred_url": "https://api.github.com/users/liuxinglan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/liuxinglan/subscriptions", "type": "User", "url": "https://api.github.com/users/liuxinglan", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
null
[]
[ "Hi ! For now this is probably the best option.\r\nWe might add a feature like this in the feature as well.\r\n\r\nDo you know any deduplication method that works on arbitrary big datasets without filling up RAM ?\r\nOtherwise we can have do the deduplication in memory like pandas but I feel like this is going to be limiting for some cases", "Yes, I'd like to work on this feature once I'm done with #2500, but first I have to do some research, and see if the implementation wouldn't be too complex.\r\n\r\nIn the meantime, maybe [this lib](https://github.com/TomScheffers/pyarrow_ops) can help. However, note that this lib operates directly on pyarrow tables and relies only on `hash` to find duplicates (e.g. `-1` and `-2` have the same hash in Python 3, so this lib will treat them as duplicates), which doesn't make much sense.", "> Hi ! For now this is probably the best option.\r\n> We might add a feature like this in the feature as well.\r\n> \r\n> Do you know any deduplication method that works on arbitrary big datasets without filling up RAM ?\r\n> Otherwise we can have do the deduplication in memory like pandas but I feel like this is going to be limiting for some cases\r\n\r\nGreat if this is can be done. Thanks!!\r\n\r\nNot sure if you are asking me. In any case I don't know of any unfortunately :( in practice if data is really large we normally do it with spark (only for info. I understand this is not useful in developing this library..)", "Hello,\r\n\r\nI'm also interested in this feature.\r\nHas there been progress on this issue?\r\n\r\nCould we use a similar trick as above, but with a better hashing algorithm like SHA?\r\n\r\nWe could also use a [bloom filter](https://en.wikipedia.org/wiki/Bloom_filter), should we care a lot about collision in this case?", "For reference, we can get a solution fairly easily if we assume that we can hold in memory all unique values. \r\n\r\n```python\r\nfrom datasets import Dataset\r\nfrom itertools import cycle\r\nfrom functools import partial\r\n\r\nmemory = set()\r\ndef is_unique(elem:Any , column: str, memory: set) -> bool:\r\n if elem[column] in memory:\r\n return False\r\n else:\r\n memory.add(elem[column])\r\n return True\r\n\r\n# Example dataset\r\nds = Dataset.from_dict({\"col1\" : [sent for i, sent in zip(range(10), cycle([\"apple\", \"orange\", \"pear\"]))],\r\n \"col2\": [i % 5 for i in range(10)]})\r\n\r\n# Drop duplicates in `ds` on \"col1\"\r\nds2 = ds.filter(partial(is_unique, column=\"col1\", memory=memory))\r\n```\r\n\r\nOf course, we can improve the API so that we can introduce `Dataset.drop_duplicates`.\r\nFor the parallel version, we can use a shared memory set.", "An approach that works assuming you can hold the all the unique document hashes in memory:\r\n```python\r\nfrom datasets import load_dataset\r\n\r\ndef get_hash(example):\r\n \"\"\"Get hash of content field.\"\"\"\r\n return {\"hash\": hash(example[\"content\"])} # can use any hashing function here\r\n \r\ndef check_uniques(example, uniques):\r\n \"\"\"Check if current hash is still in set of unique hashes and remove if true.\"\"\"\r\n if example[\"hash\"] in uniques:\r\n uniques.remove(example[\"hash\"])\r\n return True\r\n else:\r\n return False\r\n\r\nds = load_dataset(\"some_dataset\")\r\nds = ds.map(get_hash)\r\nuniques = set(ds.unique(\"hash\"))\r\nds_filter = ds.filter(check_uniques, fn_kwargs={\"uniques\": uniques})\r\n```\r\nIf the `uniques` could be stored in arrow then no additional memory would used at all but I don't know if this is possible.\r\n", "@lvwerra hey, could you tell me how reliable is this deduplication method. i am currently using the same deduplication strategy to deduplicate a large text corpus to pretrain LLMs ~ 11B to 20B. just needed to ensure if this strategy would be fine on large datasets for LLMs pretraining. ", "Hi @StephennFernandes I'm also trying to pretrain an llm, and need to do deduplication for my dataset, \r\nwhich method you applied please?", "Hey @Manel-Hik \n\nThe following is a simpler yet really effective deduplication code that i has used in the past. \n\ngiven that I have limited training corpus for the languages I wanted to train i relied on this code. https://huggingface.co/datasets/Finnish-NLP/mc4_fi_cleaned/blob/main/deduplicate.py\n\n\nfor more robust and stronger deduplication, refer to this huggingface repo, that's newly released: https://github.com/huggingface/datatrove\n\n\n\n", "Thanks a lot Sure I will check it @StephennFernandes ", "Hi, is there any updates? Thanks!", "Update July 2024\r\n\r\nPyArrow now supports `first`/`last` aggregation which would allow us to implement this functionality. [Link](https://github.com/apache/arrow/issues/30950)\r\n\r\nso if we want to move in this direction we can :) Is that something we want to do? Would be happy to contribute.", "Hello, just checking to see if any updates are available. Thank you!" ]
2021-06-17T23:35:38
2025-11-05T06:27:43
null
NONE
null
null
null
null
**Is your feature request related to a problem? Please describe.** i find myself more and more relying on datasets just to do all the preprocessing. One thing however, for removing duplicated rows, I couldn't find out how and am always converting datasets to pandas to do that.. **Describe the solution you'd like** have a functionality of " remove duplicated rows" **Describe alternatives you've considered** convert dataset to pandas, remove duplicate, and convert back... **Additional context** no
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2514/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2514/timeline
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
null
https://api.github.com/repos/huggingface/datasets/issues/2513
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2513/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2513/comments
https://api.github.com/repos/huggingface/datasets/issues/2513/events
https://github.com/huggingface/datasets/issues/2513
924,174,413
MDU6SXNzdWU5MjQxNzQ0MTM=
2,513
Corelation should be Correlation
{ "avatar_url": "https://avatars.githubusercontent.com/u/71514164?v=4", "events_url": "https://api.github.com/users/colbym-MM/events{/privacy}", "followers_url": "https://api.github.com/users/colbym-MM/followers", "following_url": "https://api.github.com/users/colbym-MM/following{/other_user}", "gists_url": "https://api.github.com/users/colbym-MM/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/colbym-MM", "id": 71514164, "login": "colbym-MM", "node_id": "MDQ6VXNlcjcxNTE0MTY0", "organizations_url": "https://api.github.com/users/colbym-MM/orgs", "received_events_url": "https://api.github.com/users/colbym-MM/received_events", "repos_url": "https://api.github.com/users/colbym-MM/repos", "site_admin": false, "starred_url": "https://api.github.com/users/colbym-MM/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/colbym-MM/subscriptions", "type": "User", "url": "https://api.github.com/users/colbym-MM", "user_view_type": "public" }
[]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" } ]
[ "Hi @colbym-MM, thanks for reporting. We are fixing it." ]
2021-06-17T17:28:48
2021-06-18T08:43:55
2021-06-18T08:43:55
NONE
null
null
null
null
https://github.com/huggingface/datasets/blob/0e87e1d053220e8ecddfa679bcd89a4c7bc5af62/metrics/matthews_correlation/matthews_correlation.py#L66
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2513/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2513/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
15:15:07
https://api.github.com/repos/huggingface/datasets/issues/2512
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2512/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2512/comments
https://api.github.com/repos/huggingface/datasets/issues/2512/events
https://github.com/huggingface/datasets/issues/2512
924,069,353
MDU6SXNzdWU5MjQwNjkzNTM=
2,512
seqeval metric does not work with a recent version of sklearn: classification_report() got an unexpected keyword argument 'output_dict'
{ "avatar_url": "https://avatars.githubusercontent.com/u/8642136?v=4", "events_url": "https://api.github.com/users/avidale/events{/privacy}", "followers_url": "https://api.github.com/users/avidale/followers", "following_url": "https://api.github.com/users/avidale/following{/other_user}", "gists_url": "https://api.github.com/users/avidale/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/avidale", "id": 8642136, "login": "avidale", "node_id": "MDQ6VXNlcjg2NDIxMzY=", "organizations_url": "https://api.github.com/users/avidale/orgs", "received_events_url": "https://api.github.com/users/avidale/received_events", "repos_url": "https://api.github.com/users/avidale/repos", "site_admin": false, "starred_url": "https://api.github.com/users/avidale/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/avidale/subscriptions", "type": "User", "url": "https://api.github.com/users/avidale", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
[ "Sorry, I was using an old version of sequeval" ]
2021-06-17T15:36:02
2021-06-17T15:46:07
2021-06-17T15:46:07
NONE
null
null
null
null
## Describe the bug A clear and concise description of what the bug is. ## Steps to reproduce the bug ```python from datasets import load_dataset, load_metric seqeval = load_metric("seqeval") seqeval.compute(predictions=[['A']], references=[['A']]) ``` ## Expected results The function computes a dict with metrics ## Actual results ``` --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-39-69a57f5cf06f> in <module> 1 from datasets import load_dataset, load_metric 2 seqeval = load_metric("seqeval") ----> 3 seqeval.compute(predictions=[['A']], references=[['A']]) ~/p3/lib/python3.7/site-packages/datasets/metric.py in compute(self, *args, **kwargs) 396 references = self.data["references"] 397 with temp_seed(self.seed): --> 398 output = self._compute(predictions=predictions, references=references, **kwargs) 399 400 if self.buf_writer is not None: ~/.cache/huggingface/modules/datasets_modules/metrics/seqeval/81eda1ff004361d4fa48754a446ec69bb7aa9cf4d14c7215f407d1475941c5ff/seqeval.py in _compute(self, predictions, references, suffix) 95 96 def _compute(self, predictions, references, suffix=False): ---> 97 report = classification_report(y_true=references, y_pred=predictions, suffix=suffix, output_dict=True) 98 report.pop("macro avg") 99 report.pop("weighted avg") TypeError: classification_report() got an unexpected keyword argument 'output_dict' ``` ## Environment info sklearn=0.24 datasets=1.1.3
{ "avatar_url": "https://avatars.githubusercontent.com/u/8642136?v=4", "events_url": "https://api.github.com/users/avidale/events{/privacy}", "followers_url": "https://api.github.com/users/avidale/followers", "following_url": "https://api.github.com/users/avidale/following{/other_user}", "gists_url": "https://api.github.com/users/avidale/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/avidale", "id": 8642136, "login": "avidale", "node_id": "MDQ6VXNlcjg2NDIxMzY=", "organizations_url": "https://api.github.com/users/avidale/orgs", "received_events_url": "https://api.github.com/users/avidale/received_events", "repos_url": "https://api.github.com/users/avidale/repos", "site_admin": false, "starred_url": "https://api.github.com/users/avidale/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/avidale/subscriptions", "type": "User", "url": "https://api.github.com/users/avidale", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2512/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2512/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
0:10:05
https://api.github.com/repos/huggingface/datasets/issues/2511
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2511/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2511/comments
https://api.github.com/repos/huggingface/datasets/issues/2511/events
https://github.com/huggingface/datasets/issues/2511
923,762,133
MDU6SXNzdWU5MjM3NjIxMzM=
2,511
Add C4
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[ { "color": "e99695", "default": false, "description": "Requesting to add a new dataset", "id": 2067376369, "name": "dataset request", "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" } ]
[ "Update on this: I'm computing the checksums of the data files. It will be available soon", "Added in #2575 :)" ]
2021-06-17T10:31:04
2021-07-05T12:36:58
2021-07-05T12:36:57
MEMBER
null
null
null
null
## Adding a Dataset - **Name:** *C4* - **Description:** *https://github.com/allenai/allennlp/discussions/5056* - **Paper:** *https://arxiv.org/abs/1910.10683* - **Data:** *https://huggingface.co/datasets/allenai/c4* - **Motivation:** *Used a lot for pretraining* Instructions to add a new dataset can be found [here](https://github.com/huggingface/datasets/blob/master/ADD_NEW_DATASET.md). Should fix https://github.com/huggingface/datasets/issues/1710
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2511/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2511/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
18 days, 2:05:53
https://api.github.com/repos/huggingface/datasets/issues/2508
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2508/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2508/comments
https://api.github.com/repos/huggingface/datasets/issues/2508/events
https://github.com/huggingface/datasets/issues/2508
921,863,173
MDU6SXNzdWU5MjE4NjMxNzM=
2,508
Load Image Classification Dataset from Local
{ "avatar_url": "https://avatars.githubusercontent.com/u/8428198?v=4", "events_url": "https://api.github.com/users/Jacobsolawetz/events{/privacy}", "followers_url": "https://api.github.com/users/Jacobsolawetz/followers", "following_url": "https://api.github.com/users/Jacobsolawetz/following{/other_user}", "gists_url": "https://api.github.com/users/Jacobsolawetz/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Jacobsolawetz", "id": 8428198, "login": "Jacobsolawetz", "node_id": "MDQ6VXNlcjg0MjgxOTg=", "organizations_url": "https://api.github.com/users/Jacobsolawetz/orgs", "received_events_url": "https://api.github.com/users/Jacobsolawetz/received_events", "repos_url": "https://api.github.com/users/Jacobsolawetz/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Jacobsolawetz/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Jacobsolawetz/subscriptions", "type": "User", "url": "https://api.github.com/users/Jacobsolawetz", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/32437151?v=4", "events_url": "https://api.github.com/users/nateraw/events{/privacy}", "followers_url": "https://api.github.com/users/nateraw/followers", "following_url": "https://api.github.com/users/nateraw/following{/other_user}", "gists_url": "https://api.github.com/users/nateraw/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/nateraw", "id": 32437151, "login": "nateraw", "node_id": "MDQ6VXNlcjMyNDM3MTUx", "organizations_url": "https://api.github.com/users/nateraw/orgs", "received_events_url": "https://api.github.com/users/nateraw/received_events", "repos_url": "https://api.github.com/users/nateraw/repos", "site_admin": false, "starred_url": "https://api.github.com/users/nateraw/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/nateraw/subscriptions", "type": "User", "url": "https://api.github.com/users/nateraw", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/32437151?v=4", "events_url": "https://api.github.com/users/nateraw/events{/privacy}", "followers_url": "https://api.github.com/users/nateraw/followers", "following_url": "https://api.github.com/users/nateraw/following{/other_user}", "gists_url": "https://api.github.com/users/nateraw/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/nateraw", "id": 32437151, "login": "nateraw", "node_id": "MDQ6VXNlcjMyNDM3MTUx", "organizations_url": "https://api.github.com/users/nateraw/orgs", "received_events_url": "https://api.github.com/users/nateraw/received_events", "repos_url": "https://api.github.com/users/nateraw/repos", "site_admin": false, "starred_url": "https://api.github.com/users/nateraw/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/nateraw/subscriptions", "type": "User", "url": "https://api.github.com/users/nateraw", "user_view_type": "public" }, { "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" } ]
[ "Hi ! Is this folder structure a standard, a bit like imagenet ?\r\nIn this case maybe we can consider having a dataset loader for cifar-like, imagenet-like, squad-like, conll-like etc. datasets ?\r\n```python\r\nfrom datasets import load_dataset\r\n\r\nmy_custom_cifar = load_dataset(\"cifar_like\", data_dir=\"path/to/data/dir\")\r\n```\r\n\r\nLet me know what you think", "Yep that would be sweet - closing for now as we found a workaround. ", "@lhoestq I think we'll want a generic `image-folder` dataset (same as 'imagenet-like'). This is like `torchvision.datasets.ImageFolder`, and is something vision folks are used to seeing.", "Opening this back up, since I'm planning on tackling this. Already posted a quick version of it on my account on the hub.\r\n\r\n```python\r\nfrom datasets import load_dataset\r\n\r\nds = load_dataset('nateraw/image-folder', data_files='PetImages/')\r\n```", "Bumping this one following our recent discussion @mariosasko @nateraw :)" ]
2021-06-15T22:43:33
2022-03-01T16:29:44
2022-03-01T16:29:44
NONE
null
null
null
null
**Is your feature request related to a problem? Please describe.** Yes - we would like to load an image classification dataset with datasets without having to write a custom data loader. **Describe the solution you'd like** Given a folder structure with images of each class in each folder, the ability to load these folders into a HuggingFace dataset like "cifar10". **Describe alternatives you've considered** Implement ViT training outside of the HuggingFace Trainer and without datasets (we did this but prefer to stay on the main path) Write custom data loader logic **Additional context** We're training ViT on custom dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2508/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2508/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
258 days, 17:46:11
https://api.github.com/repos/huggingface/datasets/issues/2503
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2503/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2503/comments
https://api.github.com/repos/huggingface/datasets/issues/2503/events
https://github.com/huggingface/datasets/issues/2503
920,636,186
MDU6SXNzdWU5MjA2MzYxODY=
2,503
SubjQA wrong boolean values in entries
{ "avatar_url": "https://avatars.githubusercontent.com/u/26485052?v=4", "events_url": "https://api.github.com/users/arnaudstiegler/events{/privacy}", "followers_url": "https://api.github.com/users/arnaudstiegler/followers", "following_url": "https://api.github.com/users/arnaudstiegler/following{/other_user}", "gists_url": "https://api.github.com/users/arnaudstiegler/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/arnaudstiegler", "id": 26485052, "login": "arnaudstiegler", "node_id": "MDQ6VXNlcjI2NDg1MDUy", "organizations_url": "https://api.github.com/users/arnaudstiegler/orgs", "received_events_url": "https://api.github.com/users/arnaudstiegler/received_events", "repos_url": "https://api.github.com/users/arnaudstiegler/repos", "site_admin": false, "starred_url": "https://api.github.com/users/arnaudstiegler/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/arnaudstiegler/subscriptions", "type": "User", "url": "https://api.github.com/users/arnaudstiegler", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
open
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" } ]
[ "Hi @arnaudstiegler, thanks for reporting. I'm investigating it.", "@arnaudstiegler I have just checked that these mismatches are already present in the original dataset: https://github.com/megagonlabs/SubjQA\r\n\r\nWe are going to contact the dataset owners to report this.", "I have:\r\n- opened an issue in their repo: https://github.com/megagonlabs/SubjQA/issues/3\r\n- written an email to all the paper authors", "Please [see my response](https://github.com/megagonlabs/SubjQA/issues/3#issuecomment-905160010). There will be a fix in a couple of days." ]
2021-06-14T17:42:46
2021-08-25T03:52:06
null
NONE
null
null
null
null
## Describe the bug SubjQA seems to have a boolean that's consistently wrong. It defines: - question_subj_level: The subjectiviy level of the question (on a 1 to 5 scale with 1 being the most subjective). - is_ques_subjective: A boolean subjectivity label derived from question_subj_level (i.e., scores below 4 are considered as subjective) However, `is_ques_subjective` seems to have wrong values in the entire dataset. For instance, in the example in the dataset card, we have: - "question_subj_level": 2 - "is_ques_subjective": false However, according to the description, the question should be subjective since the `question_subj_level` is below 4
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2503/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2503/timeline
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
null
https://api.github.com/repos/huggingface/datasets/issues/2499
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2499/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2499/comments
https://api.github.com/repos/huggingface/datasets/issues/2499/events
https://github.com/huggingface/datasets/issues/2499
920,413,021
MDU6SXNzdWU5MjA0MTMwMjE=
2,499
Python Programming Puzzles
{ "avatar_url": "https://avatars.githubusercontent.com/u/16107619?v=4", "events_url": "https://api.github.com/users/VictorSanh/events{/privacy}", "followers_url": "https://api.github.com/users/VictorSanh/followers", "following_url": "https://api.github.com/users/VictorSanh/following{/other_user}", "gists_url": "https://api.github.com/users/VictorSanh/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/VictorSanh", "id": 16107619, "login": "VictorSanh", "node_id": "MDQ6VXNlcjE2MTA3NjE5", "organizations_url": "https://api.github.com/users/VictorSanh/orgs", "received_events_url": "https://api.github.com/users/VictorSanh/received_events", "repos_url": "https://api.github.com/users/VictorSanh/repos", "site_admin": false, "starred_url": "https://api.github.com/users/VictorSanh/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/VictorSanh/subscriptions", "type": "User", "url": "https://api.github.com/users/VictorSanh", "user_view_type": "public" }
[ { "color": "e99695", "default": false, "description": "Requesting to add a new dataset", "id": 2067376369, "name": "dataset request", "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request" } ]
open
false
null
[]
[ "👀 @TalSchuster", "Thanks @VictorSanh!\r\nThere's also a [notebook](https://aka.ms/python_puzzles) and [demo](https://aka.ms/python_puzzles_study) available now to try out some of the puzzles" ]
2021-06-14T13:27:18
2021-06-15T18:14:14
null
CONTRIBUTOR
null
null
null
null
## Adding a Dataset - **Name:** Python Programming Puzzles - **Description:** Programming challenge called programming puzzles, as an objective and comprehensive evaluation of program synthesis - **Paper:** https://arxiv.org/pdf/2106.05784.pdf - **Data:** https://github.com/microsoft/PythonProgrammingPuzzles ([Scrolling through the data](https://github.com/microsoft/PythonProgrammingPuzzles/blob/main/problems/README.md)) - **Motivation:** Spans a large range of difficulty, problems, and domains. A useful resource for evaluation as we don't have a clear understanding of the abilities and skills of extremely large LMs. Note: it's a growing dataset (contributions are welcome), so we'll need careful versioning for this dataset. Instructions to add a new dataset can be found [here](https://github.com/huggingface/datasets/blob/master/ADD_NEW_DATASET.md).
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 1, "hooray": 0, "laugh": 0, "rocket": 1, "total_count": 2, "url": "https://api.github.com/repos/huggingface/datasets/issues/2499/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2499/timeline
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
null
https://api.github.com/repos/huggingface/datasets/issues/2498
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2498/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2498/comments
https://api.github.com/repos/huggingface/datasets/issues/2498/events
https://github.com/huggingface/datasets/issues/2498
920,411,285
MDU6SXNzdWU5MjA0MTEyODU=
2,498
Improve torch formatting performance
{ "avatar_url": "https://avatars.githubusercontent.com/u/458335?v=4", "events_url": "https://api.github.com/users/vblagoje/events{/privacy}", "followers_url": "https://api.github.com/users/vblagoje/followers", "following_url": "https://api.github.com/users/vblagoje/following{/other_user}", "gists_url": "https://api.github.com/users/vblagoje/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/vblagoje", "id": 458335, "login": "vblagoje", "node_id": "MDQ6VXNlcjQ1ODMzNQ==", "organizations_url": "https://api.github.com/users/vblagoje/orgs", "received_events_url": "https://api.github.com/users/vblagoje/received_events", "repos_url": "https://api.github.com/users/vblagoje/repos", "site_admin": false, "starred_url": "https://api.github.com/users/vblagoje/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/vblagoje/subscriptions", "type": "User", "url": "https://api.github.com/users/vblagoje", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
null
[]
[ "That’s interesting thanks, let’s see what we can do. Can you detail your last sentence? I’m not sure I understand it well.", "Hi ! I just re-ran a quick benchmark and using `to_numpy()` seems to be faster now:\r\n\r\n```python\r\nimport pyarrow as pa # I used pyarrow 3.0.0\r\nimport numpy as np\r\n\r\nn, max_length = 1_000, 512\r\nlow, high, size = 0, 2 << 16, (n, max_length)\r\n\r\ntable = pa.Table.from_pydict({\r\n \"input_ids\": np.random.default_rng(42).integers(low=low, high=high, size=size).tolist()\r\n})\r\n\r\n\r\n%%timeit\r\n_ = table.to_pandas()[\"input_ids\"].to_numpy()\r\n# 1.44 ms ± 80.1 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)\r\n\r\n%%timeit\r\n_ = table[\"input_ids\"].to_pandas().to_numpy()\r\n# 461 µs ± 14.2 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)\r\n\r\n%%timeit\r\n_ = table[\"input_ids\"].to_numpy()\r\n# 317 µs ± 5.06 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)\r\n```\r\n\r\nCurrently the conversion from arrow to numpy is done in the NumpyArrowExtractor here:\r\n\r\nhttps://github.com/huggingface/datasets/blob/d6d0ede9486ffad7944642ca9a326e058b676788/src/datasets/formatting/formatting.py#L143-L166\r\n\r\nLet's update the NumpyArrowExtractor to call `to_numpy` directly and see how our github benchmarks evolve ?__", "Sounds like a plan @lhoestq If you create a PR I'll pick it up and try it out right away! ", "@lhoestq I can also prepare the PR, just lmk. ", "I’m not exactly sure how to read the graph but it seems that to_categorical take a lot of time here. Could you share more informations on the features/stats of your datasets so we could maybe design a synthetic datasets that looks more similar for debugging testing?", "I created https://github.com/huggingface/datasets/pull/2505 if you want to play with it @vblagoje ", "> I’m not exactly sure how to read the graph but it seems that to_categorical take a lot of time here. Could you share more informations on the features/stats of your datasets so we could maybe design a synthetic datasets that looks more similar for debugging testing?\r\n\r\n@thomwolf starting from the top, each rectangle represents the cumulative amount of it takes to execute the method call. Therefore, format_batch in torch_formatter.py takes ~20 sec, and the largest portion of that call is taken by to_pandas call and the smaller portion (grey rectangle) by the other method invocation(s) in format_batch (series_to_numpy etc). \r\n\r\nFeatures of the dataset are BERT pre-training model input columns i.e:\r\n```\r\nf = Features({ \r\n \"input_ids\": Sequence(feature=Value(dtype=\"int32\")), \r\n \"attention_mask\": Sequence(feature=Value(dtype=\"int8\")), \r\n \"token_type_ids\": Sequence(feature=Value(dtype=\"int8\")), \r\n \"labels\": Sequence(feature=Value(dtype=\"int32\")), \r\n \"next_sentence_label\": Value(dtype=\"int8\")\r\n})\r\n```\r\n\r\nI'll work with @lhoestq till we get to the bottom of this one. \r\n ", "@lhoestq the proposed branch is faster, but overall training speedup is a few percentage points. I couldn't figure out how to include the GitHub branch into setup.py, so I couldn't start NVidia optimized Docker-based pre-training run. But on bare metal, there is a slight improvement. I'll do some more performance traces. ", "Hi @vblagoje, to install Datasets from @lhoestq PR reference #2505, you can use:\r\n```shell\r\npip install git+ssh://git@github.com/huggingface/datasets.git@refs/pull/2505/head#egg=datasets\r\n```", "Hey @albertvillanova yes thank you, I am aware, I can easily pull it from a terminal command line but then I can't automate docker image builds as dependencies are picked up from setup.py and for some reason setup.py doesn't accept this string format.", "@vblagoje in that case, you can add this to your `setup.py`:\r\n```python\r\n install_requires=[\r\n \"datasets @ git+ssh://git@github.com/huggingface/datasets.git@refs/pull/2505/head\",\r\n```", "@lhoestq @thomwolf @albertvillanova The new approach is definitely faster, dataloader now takes less than 3% cumulative time (pink rectangle two rectangles to the right of tensor.py backward invocation)\r\n\r\n![Screen Shot 2021-06-16 at 3 05 06 PM](https://user-images.githubusercontent.com/458335/122224432-19de4700-ce82-11eb-982f-d45d4bcc1e41.png)\r\n\r\nWhen we drill down into dataloader next invocation we get:\r\n\r\n![Screen Shot 2021-06-16 at 3 09 56 PM](https://user-images.githubusercontent.com/458335/122224976-a1c45100-ce82-11eb-8d40-59194740d616.png)\r\n\r\nAnd finally format_batch:\r\n\r\n![Screen Shot 2021-06-16 at 3 11 07 PM](https://user-images.githubusercontent.com/458335/122225132-cae4e180-ce82-11eb-8a16-967ab7c1c2aa.png)\r\n\r\n\r\nNot sure this could be further improved but this is definitely a decent step forward.\r\n\r\n", "> ```python\r\n> datasets @ git+ssh://git@github.com/huggingface/datasets.git@refs/pull/2505/head\r\n> ```\r\n\r\n@albertvillanova how would I replace datasets dependency in https://github.com/huggingface/transformers/blob/master/setup.py as the above approach is not working. ", "@vblagoje I tested my proposed approach before posting it here and it worked for me. \r\n\r\nIs it not working in your case because of the SSH protocol? In that case you could try the same approach but using HTTPS:\r\n```\r\n\"datasets @ git+https://github.com/huggingface/datasets.git@refs/pull/2505/head\",\r\n``` ", "Also note the blanks before and after the `@`.", "@albertvillanova of course it works. Apologies. I needed to change datasets in all deps references , like [here](https://github.com/huggingface/transformers/blob/master/setup.py#L235) for example. ", "Is time spent casting an issue here? See https://github.com/huggingface/datasets/issues/4676 that Datasets can spend huge amounts of time repeatedly casting to Python objects." ]
2021-06-14T13:25:24
2022-07-15T17:12:04
null
CONTRIBUTOR
null
null
null
null
**Is your feature request related to a problem? Please describe.** It would be great, if possible, to further improve read performance of raw encoded datasets and their subsequent conversion to torch tensors. A bit more background. I am working on LM pre-training using HF ecosystem. We use encoded HF Wikipedia and BookCorpus datasets. The training machines are similar to DGX-1 workstations. We use HF trainer torch.distributed training approach on a single machine with 8 GPUs. The current performance is about 30% slower than NVidia optimized BERT [examples](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/LanguageModeling) baseline. Quite a bit of customized code and training loop tricks were used to achieve the baseline performance. It would be great to achieve the same performance while using nothing more than off the shelf HF ecosystem. Perhaps, in the future, with @stas00 work on deepspeed integration, it could even be exceeded. **Describe the solution you'd like** Using profiling tools we've observed that appx. 25% of cumulative run time is spent on data loader next call. ![dataloader_next](https://user-images.githubusercontent.com/458335/121895543-59742a00-ccee-11eb-85fb-f07715e3f1f6.png) As you can observe most of the data loader next call is spent in HF datasets torch_formatter.py format_batch call. Digging a bit deeper into format_batch we can see the following profiler data: ![torch_formatter](https://user-images.githubusercontent.com/458335/121895944-c7b8ec80-ccee-11eb-95d5-5875c5716c30.png) Once again, a lot of time is spent in pyarrow table conversion to pandas which seems like an intermediary step. Offline @lhoestq told me that this approach was, for some unknown reason, faster than direct to numpy conversion. **Describe alternatives you've considered** I am not familiar with pyarrow and have not yet considered the alternatives to the current approach. Most of the online advice around data loader performance improvements revolve around increasing number of workers, using pin memory for copying tensors from host device to gpus but we've already tried these avenues without much performance improvement. Weights & Biases dashboard for the pre-training task reports CPU utilization of ~ 10%, GPUs are completely saturated (GPU utilization is above 95% on all GPUs), while disk utilization is above 90%.
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2498/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2498/timeline
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
null
https://api.github.com/repos/huggingface/datasets/issues/2496
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2496/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2496/comments
https://api.github.com/repos/huggingface/datasets/issues/2496/events
https://github.com/huggingface/datasets/issues/2496
920,216,314
MDU6SXNzdWU5MjAyMTYzMTQ=
2,496
Dataset fingerprint changes after moving the cache directory, which prevent cache reload when using `map`
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" } ]
[]
2021-06-14T09:20:26
2021-06-21T15:05:03
2021-06-21T15:05:03
MEMBER
null
null
null
null
`Dataset.map` uses the dataset fingerprint (a hash) for caching. However the fingerprint seems to change when someone moves the cache directory of the dataset. This is because it uses the default fingerprint generation: 1. the dataset path is used to get the fingerprint 2. the modification times of the arrow file is also used to get the fingerprint To fix that we could set the fingerprint of the dataset to be a hash of (<dataset_name>, <config_name>, <version>, <script_hash>), i.e. a hash of the the cache path relative to the cache directory.
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 1, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 2, "url": "https://api.github.com/repos/huggingface/datasets/issues/2496/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2496/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
7 days, 5:44:37
https://api.github.com/repos/huggingface/datasets/issues/2495
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2495/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2495/comments
https://api.github.com/repos/huggingface/datasets/issues/2495/events
https://github.com/huggingface/datasets/issues/2495
920,170,030
MDU6SXNzdWU5MjAxNzAwMzA=
2,495
JAX formatting
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" } ]
[]
2021-06-14T08:32:07
2021-06-21T16:15:49
2021-06-21T16:15:49
MEMBER
null
null
null
null
We already support pytorch, tensorflow, numpy, pandas and arrow dataset formatting. Let's add jax as well
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/2495/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2495/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
7 days, 7:43:42
https://api.github.com/repos/huggingface/datasets/issues/2494
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2494/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2494/comments
https://api.github.com/repos/huggingface/datasets/issues/2494/events
https://github.com/huggingface/datasets/issues/2494
920,149,183
MDU6SXNzdWU5MjAxNDkxODM=
2,494
Improve docs on Enhancing performance
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "color": "0075ca", "default": true, "description": "Improvements or additions to documentation", "id": 1935892861, "name": "documentation", "node_id": "MDU6TGFiZWwxOTM1ODkyODYx", "url": "https://api.github.com/repos/huggingface/datasets/labels/documentation" } ]
open
false
null
[]
[ "Hi @albertvillanova, I hope you are doing well.\r\n\r\nI am interested in this issue, is this still unresolved and open ?\r\n\r\nThe link you have provided in the above message directs to a webpage that does not exist.\r\n\r\nThanks and Regards", "#self-assign" ]
2021-06-14T08:11:48
2025-06-28T18:55:38
null
MEMBER
null
null
null
null
In the ["Enhancing performance"](https://huggingface.co/docs/datasets/loading_datasets.html#enhancing-performance) section of docs, add specific use cases: - How to make datasets the fastest - How to make datasets take the less RAM - How to make datasets take the less hard drive mem cc: @thomwolf
null
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/2494/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2494/timeline
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
null
https://api.github.com/repos/huggingface/datasets/issues/2489
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2489/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2489/comments
https://api.github.com/repos/huggingface/datasets/issues/2489/events
https://github.com/huggingface/datasets/issues/2489
919,569,749
MDU6SXNzdWU5MTk1Njk3NDk=
2,489
Allow latest pyarrow version once segfault bug is fixed
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" } ]
[]
2021-06-12T14:09:52
2021-06-14T07:53:23
2021-06-14T07:53:23
MEMBER
null
null
null
null
As pointed out by @symeneses (see https://github.com/huggingface/datasets/pull/2268#issuecomment-860048613), pyarrow has fixed the segfault bug present in version 4.0.0 (see https://issues.apache.org/jira/browse/ARROW-12568): - it was fixed on 3 May 2021 - version 4.0.1 was released on 19 May 2021 with the bug fix
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2489/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2489/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
1 day, 17:43:31
https://api.github.com/repos/huggingface/datasets/issues/2485
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2485/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2485/comments
https://api.github.com/repos/huggingface/datasets/issues/2485/events
https://github.com/huggingface/datasets/issues/2485
919,099,218
MDU6SXNzdWU5MTkwOTkyMTg=
2,485
Implement layered building
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
null
[]
[]
2021-06-11T18:54:25
2021-06-11T18:54:25
null
MEMBER
null
null
null
null
As discussed with @stas00 and @lhoestq (see also here https://github.com/huggingface/datasets/issues/2481#issuecomment-859712190): > My suggestion for this would be to have this enabled by default. > > Plus I don't know if there should be a dedicated issue to that is another functionality. But I propose layered building rather than all at once. That is: > > 1. uncompress a handful of files via a generator enough to generate one arrow file > 2. process arrow file 1 > 3. delete all the files that went in and aren't needed anymore. > > rinse and repeat. > > 1. This way much less disc space will be required - e.g. on JZ we won't be running into inode limitation, also it'd help with the collaborative hub training project > 2. The user doesn't need to go and manually clean up all the huge files that were left after pre-processing > 3. It would already include deleting temp files this issue is talking about > > I wonder if the new streaming API would be of help, except here the streaming would be into arrow files as the destination, rather than dataloaders.
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 1, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/2485/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2485/timeline
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
null
https://api.github.com/repos/huggingface/datasets/issues/2484
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2484/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2484/comments
https://api.github.com/repos/huggingface/datasets/issues/2484/events
https://github.com/huggingface/datasets/issues/2484
919,092,635
MDU6SXNzdWU5MTkwOTI2MzU=
2,484
Implement loading a dataset builder
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" } ]
[ "#self-assign" ]
2021-06-11T18:47:22
2021-07-05T10:45:57
2021-07-05T10:45:57
MEMBER
null
null
null
null
As discussed with @stas00 and @lhoestq, this would allow things like: ```python from datasets import load_dataset_builder dataset_name = "openwebtext" builder = load_dataset_builder(dataset_name) print(builder.cache_dir) ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 1, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/2484/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2484/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
23 days, 15:58:35
https://api.github.com/repos/huggingface/datasets/issues/2481
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2481/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2481/comments
https://api.github.com/repos/huggingface/datasets/issues/2481/events
https://github.com/huggingface/datasets/issues/2481
918,680,168
MDU6SXNzdWU5MTg2ODAxNjg=
2,481
Delete extracted files to save disk space
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" } ]
[ "My suggestion for this would be to have this enabled by default.\r\n\r\nPlus I don't know if there should be a dedicated issue to that is another functionality. But I propose layered building rather than all at once. That is:\r\n\r\n1. uncompress a handful of files via a generator enough to generate one arrow file\r\n2. process arrow file 1\r\n3. delete all the files that went in and aren't needed anymore.\r\n\r\nrinse and repeat.\r\n\r\n1. This way much less disc space will be required - e.g. on JZ we won't be running into inode limitation, also it'd help with the collaborative hub training project\r\n2. The user doesn't need to go and manually clean up all the huge files that were left after pre-processing\r\n3. It would already include deleting temp files this issue is talking about\r\n\r\nI wonder if the new streaming API would be of help, except here the streaming would be into arrow files as the destination, rather than dataloaders." ]
2021-06-11T12:21:52
2021-07-19T09:08:18
2021-07-19T09:08:18
MEMBER
null
null
null
null
As discussed with @stas00 and @lhoestq, allowing the deletion of extracted files would save a great amount of disk space to typical user.
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 1, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/2481/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2481/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
37 days, 20:46:26
https://api.github.com/repos/huggingface/datasets/issues/2480
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2480/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2480/comments
https://api.github.com/repos/huggingface/datasets/issues/2480/events
https://github.com/huggingface/datasets/issues/2480
918,678,578
MDU6SXNzdWU5MTg2Nzg1Nzg=
2,480
Set download/extracted paths configurable
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" } ]
[ "For example to be able to send uncompressed and temp build files to another volume/partition, so that the user gets the minimal disk usage on their primary setup - and ends up with just the downloaded compressed data + arrow files, but outsourcing the huge files and building to another partition. e.g. on JZ there is a special partition for fast data, but it's also volatile, so only temp files should go there.\r\n\r\nThink of it as `TMPDIR` so we need the equivalent for `datasets`." ]
2021-06-11T12:20:24
2021-06-15T14:23:49
null
MEMBER
null
null
null
null
As discussed with @stas00 and @lhoestq, setting these paths configurable may allow to overcome disk space limitation on different partitions/drives. TODO: - [x] Set configurable extracted datasets path: #2487 - [x] Set configurable downloaded datasets path: #2488 - [ ] Set configurable "incomplete" datasets path?
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 1, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/2480/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2480/timeline
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
null
https://api.github.com/repos/huggingface/datasets/issues/2478
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2478/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2478/comments
https://api.github.com/repos/huggingface/datasets/issues/2478/events
https://github.com/huggingface/datasets/issues/2478
918,507,510
MDU6SXNzdWU5MTg1MDc1MTA=
2,478
Create release script
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" } ]
[ "I've aligned the release script with Transformers in #6004, so I think this issue can be closed." ]
2021-06-11T09:38:02
2023-07-20T13:22:23
null
MEMBER
null
null
null
null
Create a script so that releases can be done automatically (as done in `transformers`).
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2478/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2478/timeline
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
null
https://api.github.com/repos/huggingface/datasets/issues/2475
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2475/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2475/comments
https://api.github.com/repos/huggingface/datasets/issues/2475/events
https://github.com/huggingface/datasets/issues/2475
917,650,882
MDU6SXNzdWU5MTc2NTA4ODI=
2,475
Issue in timit_asr database
{ "avatar_url": "https://avatars.githubusercontent.com/u/85702107?v=4", "events_url": "https://api.github.com/users/hrahamim/events{/privacy}", "followers_url": "https://api.github.com/users/hrahamim/followers", "following_url": "https://api.github.com/users/hrahamim/following{/other_user}", "gists_url": "https://api.github.com/users/hrahamim/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/hrahamim", "id": 85702107, "login": "hrahamim", "node_id": "MDQ6VXNlcjg1NzAyMTA3", "organizations_url": "https://api.github.com/users/hrahamim/orgs", "received_events_url": "https://api.github.com/users/hrahamim/received_events", "repos_url": "https://api.github.com/users/hrahamim/repos", "site_admin": false, "starred_url": "https://api.github.com/users/hrahamim/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/hrahamim/subscriptions", "type": "User", "url": "https://api.github.com/users/hrahamim", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
[ "This bug was fixed in #1995. Upgrading datasets to version 1.6 fixes the issue!", "Indeed was a fixed bug.\r\nWorks on version 1.8\r\nThanks " ]
2021-06-10T18:05:29
2021-06-13T08:13:50
2021-06-13T08:13:13
NONE
null
null
null
null
## Describe the bug I am trying to load the timit_asr dataset however only the first record is shown (duplicated over all the rows). I am using the next code line dataset = load_dataset(“timit_asr”, split=“test”).shuffle().select(range(10)) The above code result with the same sentence duplicated ten times. It also happens when I use the dataset viewer at Streamlit . ## Steps to reproduce the bug from datasets import load_dataset dataset = load_dataset(“timit_asr”, split=“test”).shuffle().select(range(10)) data = dataset.to_pandas() # Sample code to reproduce the bug ``` ## Expected results table with different row information ## Actual results Specify the actual results or traceback. ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.4.1 (also occur in the latest version) - Platform: Linux-4.15.0-143-generic-x86_64-with-Ubuntu-18.04-bionic - Python version: 3.6.9 - PyTorch version (GPU?): 1.8.1+cu102 (False) - Tensorflow version (GPU?): 1.15.3 (False) - Using GPU in script?: No - Using distributed or parallel set-up in script?: No - `datasets` version: - Platform: - Python version: - PyArrow version:
{ "avatar_url": "https://avatars.githubusercontent.com/u/85702107?v=4", "events_url": "https://api.github.com/users/hrahamim/events{/privacy}", "followers_url": "https://api.github.com/users/hrahamim/followers", "following_url": "https://api.github.com/users/hrahamim/following{/other_user}", "gists_url": "https://api.github.com/users/hrahamim/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/hrahamim", "id": 85702107, "login": "hrahamim", "node_id": "MDQ6VXNlcjg1NzAyMTA3", "organizations_url": "https://api.github.com/users/hrahamim/orgs", "received_events_url": "https://api.github.com/users/hrahamim/received_events", "repos_url": "https://api.github.com/users/hrahamim/repos", "site_admin": false, "starred_url": "https://api.github.com/users/hrahamim/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/hrahamim/subscriptions", "type": "User", "url": "https://api.github.com/users/hrahamim", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2475/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2475/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
2 days, 14:07:44
https://api.github.com/repos/huggingface/datasets/issues/2474
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2474/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2474/comments
https://api.github.com/repos/huggingface/datasets/issues/2474/events
https://github.com/huggingface/datasets/issues/2474
917,622,055
MDU6SXNzdWU5MTc2MjIwNTU=
2,474
cache_dir parameter for load_from_disk ?
{ "avatar_url": "https://avatars.githubusercontent.com/u/7063207?v=4", "events_url": "https://api.github.com/users/chbensch/events{/privacy}", "followers_url": "https://api.github.com/users/chbensch/followers", "following_url": "https://api.github.com/users/chbensch/following{/other_user}", "gists_url": "https://api.github.com/users/chbensch/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/chbensch", "id": 7063207, "login": "chbensch", "node_id": "MDQ6VXNlcjcwNjMyMDc=", "organizations_url": "https://api.github.com/users/chbensch/orgs", "received_events_url": "https://api.github.com/users/chbensch/received_events", "repos_url": "https://api.github.com/users/chbensch/repos", "site_admin": false, "starred_url": "https://api.github.com/users/chbensch/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chbensch/subscriptions", "type": "User", "url": "https://api.github.com/users/chbensch", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" } ]
[ "Hi ! `load_from_disk` doesn't move the data. If you specify a local path to your mounted drive, then the dataset is going to be loaded directly from the arrow file in this directory. The cache files that result from `map` operations are also stored in the same directory by default.\r\n\r\nHowever note than writing data to your google drive actually fills the VM's disk (see https://github.com/huggingface/datasets/issues/643)\r\n\r\nGiven that, I don't think that changing the cache directory changes anything.\r\n\r\nLet me know what you think", "Thanks for your answer! I am a little surprised since I just want to read the dataset.\r\n\r\nAfter debugging a bit, I noticed that the VM’s disk fills up when the tables (generator) are converted to a list:\r\n\r\nhttps://github.com/huggingface/datasets/blob/5ba149773d23369617563d752aca922081277ec2/src/datasets/table.py#L850\r\n\r\nIf I try to iterate through the table’s generator e.g.: \r\n\r\n`length = sum(1 for x in tables)`\r\n\r\nthe VM’s disk fills up as well.\r\n\r\nI’m running out of Ideas 😄 ", "Indeed reading the data shouldn't increase the VM's disk. Not sure what google colab does under the hood for that to happen", "Apparently, Colab uses a local cache of the data files read/written from Google Drive. See:\r\n- https://github.com/googlecolab/colabtools/issues/2087#issuecomment-860818457\r\n- https://github.com/googlecolab/colabtools/issues/1915#issuecomment-804234540\r\n- https://github.com/googlecolab/colabtools/issues/2147#issuecomment-885052636" ]
2021-06-10T17:39:36
2022-02-16T14:55:01
2022-02-16T14:55:00
NONE
null
null
null
null
**Is your feature request related to a problem? Please describe.** When using Google Colab big datasets can be an issue, as they won't fit on the VM's disk. Therefore mounting google drive could be a possible solution. Unfortunatly when loading my own dataset by using the _load_from_disk_ function, the data gets cached to the VM's disk: ` from datasets import load_from_disk myPreprocessedData = load_from_disk("/content/gdrive/MyDrive/ASR_data/myPreprocessedData") ` I know that chaching on google drive could slow down learning. But at least it would run. **Describe the solution you'd like** Add cache_Dir parameter to the load_from_disk function. **Describe alternatives you've considered** It looks like you could write a custom loading script for the load_dataset function. But this seems to be much too complex for my use case. Is there perhaps a template here that uses the load_from_disk function?
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2474/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2474/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
250 days, 21:15:24
https://api.github.com/repos/huggingface/datasets/issues/2472
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2472/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2472/comments
https://api.github.com/repos/huggingface/datasets/issues/2472/events
https://github.com/huggingface/datasets/issues/2472
917,463,821
MDU6SXNzdWU5MTc0NjM4MjE=
2,472
Fix automatic generation of Zenodo DOI
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" } ]
[ "I have received a reply from Zenodo support:\r\n> We are currently investigating and fixing this issue related to GitHub releases. As soon as we have solved it we will reach back to you.", "Other repo maintainers had the same problem with Zenodo. \r\n\r\nThere is an open issue on their GitHub repo: zenodo/zenodo#2181", "I have received the following request from Zenodo support:\r\n> Could you send us the link to the repository as well as the release tag?\r\n\r\nMy reply:\r\n> Sure, here it is:\r\n> - Link to the repository: https://github.com/huggingface/datasets\r\n> - Link to the repository at the release tag: https://github.com/huggingface/datasets/releases/tag/1.8.0\r\n> - Release tag: 1.8.0", "Zenodo issue has been fixed. The 1.8.0 release DOI can be found here: https://zenodo.org/record/4946100#.YMd6vKj7RPY" ]
2021-06-10T15:15:46
2021-06-14T16:49:42
2021-06-14T16:49:42
MEMBER
null
null
null
null
After the last release of Datasets (1.8.0), the automatic generation of the Zenodo DOI failed: it appears in yellow as "Received", instead of in green as "Published". I have contacted Zenodo support to fix this issue. TODO: - [x] Check with Zenodo to fix the issue - [x] Check BibTeX entry is right
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2472/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2472/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
4 days, 1:33:56
https://api.github.com/repos/huggingface/datasets/issues/2471
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2471/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2471/comments
https://api.github.com/repos/huggingface/datasets/issues/2471/events
https://github.com/huggingface/datasets/issues/2471
917,067,165
MDU6SXNzdWU5MTcwNjcxNjU=
2,471
Fix PermissionError on Windows when using tqdm >=4.50.0
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
[]
2021-06-10T08:31:49
2021-06-11T15:11:50
2021-06-11T15:11:50
MEMBER
null
null
null
null
See: https://app.circleci.com/pipelines/github/huggingface/datasets/235/workflows/cfb6a39f-68eb-4802-8b17-2cd5e8ea7369/jobs/1111 ``` PermissionError: [WinError 32] The process cannot access the file because it is being used by another process ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2471/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2471/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
1 day, 6:40:01
https://api.github.com/repos/huggingface/datasets/issues/2470
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2470/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2470/comments
https://api.github.com/repos/huggingface/datasets/issues/2470/events
https://github.com/huggingface/datasets/issues/2470
916,724,260
MDU6SXNzdWU5MTY3MjQyNjA=
2,470
Crash when `num_proc` > dataset length for `map()` on a `datasets.Dataset`.
{ "avatar_url": "https://avatars.githubusercontent.com/u/1170062?v=4", "events_url": "https://api.github.com/users/mbforbes/events{/privacy}", "followers_url": "https://api.github.com/users/mbforbes/followers", "following_url": "https://api.github.com/users/mbforbes/following{/other_user}", "gists_url": "https://api.github.com/users/mbforbes/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mbforbes", "id": 1170062, "login": "mbforbes", "node_id": "MDQ6VXNlcjExNzAwNjI=", "organizations_url": "https://api.github.com/users/mbforbes/orgs", "received_events_url": "https://api.github.com/users/mbforbes/received_events", "repos_url": "https://api.github.com/users/mbforbes/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mbforbes/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mbforbes/subscriptions", "type": "User", "url": "https://api.github.com/users/mbforbes", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
[ "Hi ! It looks like the issue comes from pyarrow. What version of pyarrow are you using ? How did you install it ?", "Thank you for the quick reply! I have `pyarrow==4.0.0`, and I am installing with `pip`. It's not one of my explicit dependencies, so I assume it came along with something else.", "Could you trying reinstalling pyarrow with pip ?\r\nI'm not sure why it would check in your multicurtural-sc directory for source files.", "Sure! I tried reinstalling to get latest. pip was mad because it looks like Datasets currently wants <4.0.0 (which is interesting, because apparently I ended up with 4.0.0 already?), but I gave it a shot anyway:\r\n\r\n```bash\r\n$ pip install --upgrade --force-reinstall pyarrow\r\nCollecting pyarrow\r\n Downloading pyarrow-4.0.1-cp39-cp39-manylinux2014_x86_64.whl (21.9 MB)\r\n |████████████████████████████████| 21.9 MB 23.8 MB/s\r\nCollecting numpy>=1.16.6\r\n Using cached numpy-1.20.3-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (15.4 MB)\r\nInstalling collected packages: numpy, pyarrow\r\n Attempting uninstall: numpy\r\n Found existing installation: numpy 1.20.3\r\n Uninstalling numpy-1.20.3:\r\n Successfully uninstalled numpy-1.20.3\r\n Attempting uninstall: pyarrow\r\n Found existing installation: pyarrow 3.0.0\r\n Uninstalling pyarrow-3.0.0:\r\n Successfully uninstalled pyarrow-3.0.0\r\nERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\r\ndatasets 1.8.0 requires pyarrow<4.0.0,>=1.0.0, but you have pyarrow 4.0.1 which is incompatible.\r\nSuccessfully installed numpy-1.20.3 pyarrow-4.0.1\r\n```\r\n\r\nTrying it, the same issue:\r\n\r\n![image](https://user-images.githubusercontent.com/1170062/121730226-3f470b80-caa4-11eb-85a5-684c44c816da.png)\r\n\r\nI tried installing `\"pyarrow<4.0.0\"`, which gave me 3.0.0. Running, still, same issue.\r\n\r\nI agree it's weird that pyarrow is checking the source code directory for its files. (There is no `pyarrow/` directory there.) To me, that makes it seem like an issue with how pyarrow is called.\r\n\r\nOut of curiosity, I tried running this with fewer workers to see when the error arises:\r\n\r\n- 1: ✅\r\n- 2: ✅\r\n- 4: ✅\r\n- 8: ✅\r\n- 10: ✅\r\n- 11: ❌ 🤔\r\n- 12: ❌\r\n- 16: ❌\r\n- 32: ❌\r\n\r\nchecking my datasets:\r\n\r\n```python\r\n>>> datasets\r\nDatasetDict({\r\n train: Dataset({\r\n features: ['text'],\r\n num_rows: 389290\r\n })\r\n validation.sc: Dataset({\r\n features: ['text'],\r\n num_rows: 10 # 🤔\r\n })\r\n validation.wvs: Dataset({\r\n features: ['text'],\r\n num_rows: 93928\r\n })\r\n})\r\n```\r\n\r\nNew hypothesis: crash if `num_proc` > length of a dataset? 😅\r\n\r\nIf so, this might be totally my fault, as the caller. Could be a docs fix, or maybe this library could do a check to limit `num_proc` for this case?", "Good catch ! Not sure why it could raise such a weird issue from pyarrow though\r\nWe should definitely reduce num_proc to the length of the dataset if needed and log a warning.", "This has been fixed in #2566, thanks @connor-mccarthy !\r\nWe'll make a new release soon that includes the fix ;)" ]
2021-06-09T22:40:22
2021-07-01T09:34:54
2021-07-01T09:11:13
NONE
null
null
null
null
## Describe the bug Crash if when using `num_proc` > 1 (I used 16) for `map()` on a `datasets.Dataset`. I believe I've had cases where `num_proc` > 1 works before, but now it seems either inconsistent, or depends on my data. I'm not sure whether the issue is on my end, because it's difficult for me to debug! Any tips greatly appreciated, I'm happy to provide more info if it would helps us diagnose. ## Steps to reproduce the bug ```python # this function will be applied with map() def tokenize_function(examples): return tokenizer( examples["text"], padding=PaddingStrategy.DO_NOT_PAD, truncation=True, ) # data_files is a Dict[str, str] mapping name -> path datasets = load_dataset("text", data_files={...}) # this is where the error happens if num_proc = 16, # but is fine if num_proc = 1 tokenized_datasets = datasets.map( tokenize_function, batched=True, num_proc=num_workers, ) ``` ## Expected results The `map()` function succeeds with `num_proc` > 1. ## Actual results ![image](https://user-images.githubusercontent.com/1170062/121404271-a6cc5200-c910-11eb-8e27-5c893bd04042.png) ![image](https://user-images.githubusercontent.com/1170062/121404362-be0b3f80-c910-11eb-9117-658943029aef.png) ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.6.2 - Platform: Linux-5.4.0-73-generic-x86_64-with-glibc2.31 - Python version: 3.9.5 - PyTorch version (GPU?): 1.8.1+cu111 (True) - Tensorflow version (GPU?): not installed (NA) - Using GPU in script?: Yes, but I think N/A for this issue - Using distributed or parallel set-up in script?: Multi-GPU on one machine, but I think also N/A for this issue
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2470/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2470/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
21 days, 10:30:51
https://api.github.com/repos/huggingface/datasets/issues/2462
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2462/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2462/comments
https://api.github.com/repos/huggingface/datasets/issues/2462/events
https://github.com/huggingface/datasets/issues/2462
915,384,613
MDU6SXNzdWU5MTUzODQ2MTM=
2,462
Merge DatasetDict and Dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" }, { "color": "c5def5", "default": false, "description": "Generic discussion on the library", "id": 2067400324, "name": "generic discussion", "node_id": "MDU6TGFiZWwyMDY3NDAwMzI0", "url": "https://api.github.com/repos/huggingface/datasets/labels/generic%20discussion" } ]
open
false
null
[]
[ "Any update on this? @lhoestq ", "Unless there is high demande I don't think we will end up implementing this. This is a lot of work with very few advantages" ]
2021-06-08T19:22:04
2023-08-16T09:34:34
null
MEMBER
null
null
null
null
As discussed in #2424 and #2437 (please see there for detailed conversation): - It would be desirable to improve UX with respect the confusion between DatasetDict and Dataset. - The difference between Dataset and DatasetDict is an additional abstraction complexity that confuses "typical" end users. - A user expects a "Dataset" (whatever it contains multiple or a single split) and maybe it could be interesting to try to simplify the user-facing API as much as possible to hide this complexity from the end user. Here is a proposal for discussion and refined (and potential abandon if it's not good enough): - let's consider that a DatasetDict is also a Dataset with the various split concatenated one after the other - let's disallow the use of integers in split names (probably not a very big breaking change) - when you index with integers you access the examples progressively in split after the other is finished (in a deterministic order) - when you index with strings/split name you have the same behavior as now (full backward compat) - let's then also have all the methods of a Dataset on the DatasetDict The end goal would be to merge both Dataset and DatasetDict object in a single object that would be (pretty much totally) backward compatible with both. There are a few things that we could discuss if we want to merge Dataset and DatasetDict: 1. what happens if you index by a string ? Does it return the column or the split ? We could disallow conflicts between column names and split names to avoid ambiguities. It can be surprising to be able to get a column or a split using the same indexing feature ``` from datasets import load_dataset dataset = load_dataset(...) dataset["train"] dataset["input_ids"] ``` 2. what happens when you iterate over the object ? I guess it should iterate over the examples as a Dataset object, but a DatasetDict used to iterate over the splits as they are the dictionary keys. This is a breaking change that we can discuss. Moreover regarding your points: - integers are not allowed as split names already - it's definitely doable to have all the methods. Maybe some of them like train_test_split that is currently only available for Dataset can be tweaked to work for a split dataset cc: @thomwolf @lhoestq
null
{ "+1": 2, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 2, "url": "https://api.github.com/repos/huggingface/datasets/issues/2462/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2462/timeline
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
null
https://api.github.com/repos/huggingface/datasets/issues/2459
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2459/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2459/comments
https://api.github.com/repos/huggingface/datasets/issues/2459/events
https://github.com/huggingface/datasets/issues/2459
915,222,015
MDU6SXNzdWU5MTUyMjIwMTU=
2,459
`Proto_qa` hosting seems to be broken
{ "avatar_url": "https://avatars.githubusercontent.com/u/16107619?v=4", "events_url": "https://api.github.com/users/VictorSanh/events{/privacy}", "followers_url": "https://api.github.com/users/VictorSanh/followers", "following_url": "https://api.github.com/users/VictorSanh/following{/other_user}", "gists_url": "https://api.github.com/users/VictorSanh/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/VictorSanh", "id": 16107619, "login": "VictorSanh", "node_id": "MDQ6VXNlcjE2MTA3NjE5", "organizations_url": "https://api.github.com/users/VictorSanh/orgs", "received_events_url": "https://api.github.com/users/VictorSanh/received_events", "repos_url": "https://api.github.com/users/VictorSanh/repos", "site_admin": false, "starred_url": "https://api.github.com/users/VictorSanh/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/VictorSanh/subscriptions", "type": "User", "url": "https://api.github.com/users/VictorSanh", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
[ "@VictorSanh , I think @mariosasko is already working on it. " ]
2021-06-08T16:16:32
2021-06-10T08:31:09
2021-06-10T08:31:09
CONTRIBUTOR
null
null
null
null
## Describe the bug The hosting (on Github) of the `proto_qa` dataset seems broken. I haven't investigated more yet, just flagging it for now. @zaidalyafeai if you want to dive into it, I think it's just a matter of changing the links in `proto_qa.py` ## Steps to reproduce the bug ```python from datasets import load_dataset dataset = load_dataset("proto_qa") ``` ## Actual results ``` Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/hf/dev/promptsource/.venv/lib/python3.7/site-packages/datasets/load.py", line 751, in load_dataset use_auth_token=use_auth_token, File "/home/hf/dev/promptsource/.venv/lib/python3.7/site-packages/datasets/builder.py", line 575, in download_and_prepare dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs File "/home/hf/dev/promptsource/.venv/lib/python3.7/site-packages/datasets/builder.py", line 630, in _download_and_prepare split_generators = self._split_generators(dl_manager, **split_generators_kwargs) File "/home/hf/.cache/huggingface/modules/datasets_modules/datasets/proto_qa/445346efaad5c5f200ecda4aa7f0fb50ff1b55edde3003be424a2112c3e8102e/proto_qa.py", line 131, in _split_generators train_fpath = dl_manager.download(_URLs[self.config.name]["train"]) File "/home/hf/dev/promptsource/.venv/lib/python3.7/site-packages/datasets/utils/download_manager.py", line 199, in download num_proc=download_config.num_proc, File "/home/hf/dev/promptsource/.venv/lib/python3.7/site-packages/datasets/utils/py_utils.py", line 195, in map_nested return function(data_struct) File "/home/hf/dev/promptsource/.venv/lib/python3.7/site-packages/datasets/utils/download_manager.py", line 218, in _download return cached_path(url_or_filename, download_config=download_config) File "/home/hf/dev/promptsource/.venv/lib/python3.7/site-packages/datasets/utils/file_utils.py", line 291, in cached_path use_auth_token=download_config.use_auth_token, File "/home/hf/dev/promptsource/.venv/lib/python3.7/site-packages/datasets/utils/file_utils.py", line 621, in get_from_cache raise FileNotFoundError("Couldn't find file at {}".format(url)) FileNotFoundError: Couldn't find file at https://raw.githubusercontent.com/iesl/protoqa-data/master/data/train/protoqa_train.jsonl ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2459/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2459/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
1 day, 16:14:37
https://api.github.com/repos/huggingface/datasets/issues/2458
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2458/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2458/comments
https://api.github.com/repos/huggingface/datasets/issues/2458/events
https://github.com/huggingface/datasets/issues/2458
915,199,693
MDU6SXNzdWU5MTUxOTk2OTM=
2,458
Revert default in-memory for small datasets
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" } ]
[ "cc: @krandiash (pinged in reverted PR)." ]
2021-06-08T15:51:41
2021-06-08T18:57:11
2021-06-08T17:55:43
MEMBER
null
null
null
null
Users are reporting issues and confusion about setting default in-memory to True for small datasets. We see 2 clear use cases of Datasets: - the "canonical" way, where you can work with very large datasets, as they are memory-mapped and cached (after every transformation) - some edge cases (speed benchmarks, interactive/exploratory analysis,...), where default in-memory can explicitly be enabled, and no caching will be done After discussing with @lhoestq we have agreed to: - revert this feature (implemented in #2182) - explain in the docs how to optimize speed/performance by setting default in-memory cc: @stas00 https://github.com/huggingface/datasets/pull/2409#issuecomment-856210552
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 1, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 2, "url": "https://api.github.com/repos/huggingface/datasets/issues/2458/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2458/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
2:04:02
https://api.github.com/repos/huggingface/datasets/issues/2452
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2452/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2452/comments
https://api.github.com/repos/huggingface/datasets/issues/2452/events
https://github.com/huggingface/datasets/issues/2452
913,603,877
MDU6SXNzdWU5MTM2MDM4Nzc=
2,452
MRPC test set differences between torch and tensorflow datasets
{ "avatar_url": "https://avatars.githubusercontent.com/u/50372080?v=4", "events_url": "https://api.github.com/users/FredericOdermatt/events{/privacy}", "followers_url": "https://api.github.com/users/FredericOdermatt/followers", "following_url": "https://api.github.com/users/FredericOdermatt/following{/other_user}", "gists_url": "https://api.github.com/users/FredericOdermatt/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/FredericOdermatt", "id": 50372080, "login": "FredericOdermatt", "node_id": "MDQ6VXNlcjUwMzcyMDgw", "organizations_url": "https://api.github.com/users/FredericOdermatt/orgs", "received_events_url": "https://api.github.com/users/FredericOdermatt/received_events", "repos_url": "https://api.github.com/users/FredericOdermatt/repos", "site_admin": false, "starred_url": "https://api.github.com/users/FredericOdermatt/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/FredericOdermatt/subscriptions", "type": "User", "url": "https://api.github.com/users/FredericOdermatt", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
[ "Realized that `tensorflow_datasets` is not provided by Huggingface and should therefore raise the issue there." ]
2021-06-07T14:20:26
2021-06-07T14:34:32
2021-06-07T14:34:32
NONE
null
null
null
null
## Describe the bug When using `load_dataset("glue", "mrpc")` to load the MRPC dataset, the test set includes the labels. When using `tensorflow_datasets.load('glue/{}'.format('mrpc'))` to load the dataset the test set does not contain the labels. There should be consistency between torch and tensorflow ways of importing the GLUE datasets. ## Steps to reproduce the bug Minimal working code ```python from datasets import load_dataset import tensorflow as tf import tensorflow_datasets # torch dataset = load_dataset("glue", "mrpc") # tf data = tensorflow_datasets.load('glue/{}'.format('mrpc')) data = list(data['test'].as_numpy_iterator()) for i in range(40,50): tf_sentence1 = data[i]['sentence1'].decode("utf-8") tf_sentence2 = data[i]['sentence2'].decode("utf-8") tf_label = data[i]['label'] index = data[i]['idx'] print('Index {}'.format(index)) torch_sentence1 = dataset['test']['sentence1'][index] torch_sentence2 = dataset['test']['sentence2'][index] torch_label = dataset['test']['label'][index] print('Tensorflow: \n\tSentence1 {}\n\tSentence2 {}\n\tLabel {}'.format(tf_sentence1, tf_sentence2, tf_label)) print('Torch: \n\tSentence1 {}\n\tSentence2 {}\n\tLabel {}'.format(torch_sentence1, torch_sentence2, torch_label)) ``` Sample output ``` Index 954 Tensorflow: Sentence1 Sabri Yakou , an Iraqi native who is a legal U.S. resident , appeared before a federal magistrate yesterday on charges of violating U.S. arms-control laws . Sentence2 The elder Yakou , an Iraqi native who is a legal U.S. resident , appeared before a federal magistrate Wednesday on charges of violating U.S. arms control laws . Label -1 Torch: Sentence1 Sabri Yakou , an Iraqi native who is a legal U.S. resident , appeared before a federal magistrate yesterday on charges of violating U.S. arms-control laws . Sentence2 The elder Yakou , an Iraqi native who is a legal U.S. resident , appeared before a federal magistrate Wednesday on charges of violating U.S. arms control laws . Label 1 Index 711 Tensorflow: Sentence1 Others keep records sealed for as little as five years or as much as 30 . Sentence2 Some states make them available immediately ; others keep them sealed for as much as 30 years . Label -1 Torch: Sentence1 Others keep records sealed for as little as five years or as much as 30 . Sentence2 Some states make them available immediately ; others keep them sealed for as much as 30 years . Label 0 ``` ## Expected results I would expect the datasets to be independent of whether I am working with torch or tensorflow. ## Actual results Test set labels are provided in the `datasets.load_datasets()` for MRPC. However MRPC is the only task where the test set labels are not -1. ## Environment info - `datasets` version: 1.7.0 - Platform: Linux-5.4.109+-x86_64-with-Ubuntu-18.04-bionic - Python version: 3.7.10 - PyArrow version: 3.0.0
{ "avatar_url": "https://avatars.githubusercontent.com/u/50372080?v=4", "events_url": "https://api.github.com/users/FredericOdermatt/events{/privacy}", "followers_url": "https://api.github.com/users/FredericOdermatt/followers", "following_url": "https://api.github.com/users/FredericOdermatt/following{/other_user}", "gists_url": "https://api.github.com/users/FredericOdermatt/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/FredericOdermatt", "id": 50372080, "login": "FredericOdermatt", "node_id": "MDQ6VXNlcjUwMzcyMDgw", "organizations_url": "https://api.github.com/users/FredericOdermatt/orgs", "received_events_url": "https://api.github.com/users/FredericOdermatt/received_events", "repos_url": "https://api.github.com/users/FredericOdermatt/repos", "site_admin": false, "starred_url": "https://api.github.com/users/FredericOdermatt/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/FredericOdermatt/subscriptions", "type": "User", "url": "https://api.github.com/users/FredericOdermatt", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2452/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2452/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
0:14:06
https://api.github.com/repos/huggingface/datasets/issues/2450
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2450/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2450/comments
https://api.github.com/repos/huggingface/datasets/issues/2450/events
https://github.com/huggingface/datasets/issues/2450
912,890,291
MDU6SXNzdWU5MTI4OTAyOTE=
2,450
BLUE file not found
{ "avatar_url": "https://avatars.githubusercontent.com/u/3822565?v=4", "events_url": "https://api.github.com/users/mirfan899/events{/privacy}", "followers_url": "https://api.github.com/users/mirfan899/followers", "following_url": "https://api.github.com/users/mirfan899/following{/other_user}", "gists_url": "https://api.github.com/users/mirfan899/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mirfan899", "id": 3822565, "login": "mirfan899", "node_id": "MDQ6VXNlcjM4MjI1NjU=", "organizations_url": "https://api.github.com/users/mirfan899/orgs", "received_events_url": "https://api.github.com/users/mirfan899/received_events", "repos_url": "https://api.github.com/users/mirfan899/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mirfan899/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mirfan899/subscriptions", "type": "User", "url": "https://api.github.com/users/mirfan899", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Hi ! The `blue` metric doesn't exist, but the `bleu` metric does.\r\nYou can get the full list of metrics [here](https://github.com/huggingface/datasets/tree/master/metrics) or by running\r\n```python\r\nfrom datasets import list_metrics\r\n\r\nprint(list_metrics())\r\n```", "Ah, my mistake. Thanks for correcting" ]
2021-06-06T17:01:54
2021-06-07T10:46:15
2021-06-07T10:46:15
NONE
null
null
null
null
Hi, I'm having the following issue when I try to load the `blue` metric. ```shell import datasets metric = datasets.load_metric('blue') Traceback (most recent call last): File "/home/irfan/environments/Perplexity_Transformers/lib/python3.6/site-packages/datasets/load.py", line 320, in prepare_module local_path = cached_path(file_path, download_config=download_config) File "/home/irfan/environments/Perplexity_Transformers/lib/python3.6/site-packages/datasets/utils/file_utils.py", line 291, in cached_path use_auth_token=download_config.use_auth_token, File "/home/irfan/environments/Perplexity_Transformers/lib/python3.6/site-packages/datasets/utils/file_utils.py", line 621, in get_from_cache raise FileNotFoundError("Couldn't find file at {}".format(url)) FileNotFoundError: Couldn't find file at https://raw.githubusercontent.com/huggingface/datasets/1.7.0/metrics/blue/blue.py During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/irfan/environments/Perplexity_Transformers/lib/python3.6/site-packages/datasets/load.py", line 332, in prepare_module local_path = cached_path(file_path, download_config=download_config) File "/home/irfan/environments/Perplexity_Transformers/lib/python3.6/site-packages/datasets/utils/file_utils.py", line 291, in cached_path use_auth_token=download_config.use_auth_token, File "/home/irfan/environments/Perplexity_Transformers/lib/python3.6/site-packages/datasets/utils/file_utils.py", line 621, in get_from_cache raise FileNotFoundError("Couldn't find file at {}".format(url)) FileNotFoundError: Couldn't find file at https://raw.githubusercontent.com/huggingface/datasets/master/metrics/blue/blue.py During handling of the above exception, another exception occurred: Traceback (most recent call last): File "<input>", line 1, in <module> File "/home/irfan/environments/Perplexity_Transformers/lib/python3.6/site-packages/datasets/load.py", line 605, in load_metric dataset=False, File "/home/irfan/environments/Perplexity_Transformers/lib/python3.6/site-packages/datasets/load.py", line 343, in prepare_module combined_path, github_file_path FileNotFoundError: Couldn't find file locally at blue/blue.py, or remotely at https://raw.githubusercontent.com/huggingface/datasets/1.7.0/metrics/blue/blue.py. The file is also not present on the master branch on github. ``` Here is dataset installed version info ```shell pip freeze | grep datasets datasets==1.7.0 ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/3822565?v=4", "events_url": "https://api.github.com/users/mirfan899/events{/privacy}", "followers_url": "https://api.github.com/users/mirfan899/followers", "following_url": "https://api.github.com/users/mirfan899/following{/other_user}", "gists_url": "https://api.github.com/users/mirfan899/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mirfan899", "id": 3822565, "login": "mirfan899", "node_id": "MDQ6VXNlcjM4MjI1NjU=", "organizations_url": "https://api.github.com/users/mirfan899/orgs", "received_events_url": "https://api.github.com/users/mirfan899/received_events", "repos_url": "https://api.github.com/users/mirfan899/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mirfan899/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mirfan899/subscriptions", "type": "User", "url": "https://api.github.com/users/mirfan899", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2450/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2450/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
17:44:21
https://api.github.com/repos/huggingface/datasets/issues/2447
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2447/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2447/comments
https://api.github.com/repos/huggingface/datasets/issues/2447/events
https://github.com/huggingface/datasets/issues/2447
912,299,527
MDU6SXNzdWU5MTIyOTk1Mjc=
2,447
dataset adversarial_qa has no answers in the "test" set
{ "avatar_url": "https://avatars.githubusercontent.com/u/22728060?v=4", "events_url": "https://api.github.com/users/bjascob/events{/privacy}", "followers_url": "https://api.github.com/users/bjascob/followers", "following_url": "https://api.github.com/users/bjascob/following{/other_user}", "gists_url": "https://api.github.com/users/bjascob/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/bjascob", "id": 22728060, "login": "bjascob", "node_id": "MDQ6VXNlcjIyNzI4MDYw", "organizations_url": "https://api.github.com/users/bjascob/orgs", "received_events_url": "https://api.github.com/users/bjascob/received_events", "repos_url": "https://api.github.com/users/bjascob/repos", "site_admin": false, "starred_url": "https://api.github.com/users/bjascob/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bjascob/subscriptions", "type": "User", "url": "https://api.github.com/users/bjascob", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
[ "Hi ! I'm pretty sure that the answers are not made available for the test set on purpose because it is part of the DynaBench benchmark, for which you can submit your predictions on the website.\r\nIn any case we should mention this in the dataset card of this dataset.", "Makes sense, but not intuitive for someone searching through the datasets. Thanks for adding the note to clarify." ]
2021-06-05T14:57:38
2021-06-07T11:13:07
2021-06-07T11:13:07
NONE
null
null
null
null
## Describe the bug When loading the adversarial_qa dataset the 'test' portion has no answers. Only the 'train' and 'validation' portions do. This occurs with all four of the configs ('adversarialQA', 'dbidaf', 'dbert', 'droberta') ## Steps to reproduce the bug ``` from datasets import load_dataset examples = load_dataset('adversarial_qa', 'adversarialQA', script_version="master")['test'] print('Loaded {:,} examples'.format(len(examples))) has_answers = 0 for e in examples: if e['answers']['text']: has_answers += 1 print('{:,} have answers'.format(has_answers)) >>> Loaded 3,000 examples >>> 0 have answers examples = load_dataset('adversarial_qa', 'adversarialQA', script_version="master")['validation'] <...code above...> >>> Loaded 3,000 examples >>> 3,000 have answers ``` ## Expected results If 'test' is a valid dataset, it should have answers. Also note that all of the 'train' and 'validation' sets have answers, there are no "no answer" questions with this set (not sure if this is correct or not). ## Environment info - `datasets` version: 1.7.0 - Platform: Linux-5.8.0-53-generic-x86_64-with-glibc2.29 - Python version: 3.8.5 - PyArrow version: 1.0.0
{ "avatar_url": "https://avatars.githubusercontent.com/u/22728060?v=4", "events_url": "https://api.github.com/users/bjascob/events{/privacy}", "followers_url": "https://api.github.com/users/bjascob/followers", "following_url": "https://api.github.com/users/bjascob/following{/other_user}", "gists_url": "https://api.github.com/users/bjascob/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/bjascob", "id": 22728060, "login": "bjascob", "node_id": "MDQ6VXNlcjIyNzI4MDYw", "organizations_url": "https://api.github.com/users/bjascob/orgs", "received_events_url": "https://api.github.com/users/bjascob/received_events", "repos_url": "https://api.github.com/users/bjascob/repos", "site_admin": false, "starred_url": "https://api.github.com/users/bjascob/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bjascob/subscriptions", "type": "User", "url": "https://api.github.com/users/bjascob", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2447/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2447/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
1 day, 20:15:29
https://api.github.com/repos/huggingface/datasets/issues/2446
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2446/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2446/comments
https://api.github.com/repos/huggingface/datasets/issues/2446/events
https://github.com/huggingface/datasets/issues/2446
911,635,399
MDU6SXNzdWU5MTE2MzUzOTk=
2,446
`yelp_polarity` is broken
{ "avatar_url": "https://avatars.githubusercontent.com/u/22514219?v=4", "events_url": "https://api.github.com/users/JetRunner/events{/privacy}", "followers_url": "https://api.github.com/users/JetRunner/followers", "following_url": "https://api.github.com/users/JetRunner/following{/other_user}", "gists_url": "https://api.github.com/users/JetRunner/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/JetRunner", "id": 22514219, "login": "JetRunner", "node_id": "MDQ6VXNlcjIyNTE0MjE5", "organizations_url": "https://api.github.com/users/JetRunner/orgs", "received_events_url": "https://api.github.com/users/JetRunner/received_events", "repos_url": "https://api.github.com/users/JetRunner/repos", "site_admin": false, "starred_url": "https://api.github.com/users/JetRunner/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/JetRunner/subscriptions", "type": "User", "url": "https://api.github.com/users/JetRunner", "user_view_type": "public" }
[]
closed
false
null
[]
[ "```\r\nFile \"/home/sasha/.local/share/virtualenvs/lib-ogGKnCK_/lib/python3.7/site-packages/streamlit/script_runner.py\", line 332, in _run_script\r\n exec(code, module.__dict__)\r\nFile \"/home/sasha/nlp-viewer/run.py\", line 233, in <module>\r\n configs = get_confs(option)\r\nFile \"/home/sasha/.local/share/virtualenvs/lib-ogGKnCK_/lib/python3.7/site-packages/streamlit/caching.py\", line 604, in wrapped_func\r\n return get_or_create_cached_value()\r\nFile \"/home/sasha/.local/share/virtualenvs/lib-ogGKnCK_/lib/python3.7/site-packages/streamlit/caching.py\", line 588, in get_or_create_cached_value\r\n return_value = func(*args, **kwargs)\r\nFile \"/home/sasha/nlp-viewer/run.py\", line 148, in get_confs\r\n builder_cls = nlp.load.import_main_class(module_path[0], dataset=True)\r\nFile \"/home/sasha/.local/share/virtualenvs/lib-ogGKnCK_/lib/python3.7/site-packages/datasets/load.py\", line 85, in import_main_class\r\n module = importlib.import_module(module_path)\r\nFile \"/usr/lib/python3.7/importlib/__init__.py\", line 127, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\nFile \"<frozen importlib._bootstrap>\", line 1006, in _gcd_import\r\nFile \"<frozen importlib._bootstrap>\", line 983, in _find_and_load\r\nFile \"<frozen importlib._bootstrap>\", line 967, in _find_and_load_unlocked\r\nFile \"<frozen importlib._bootstrap>\", line 677, in _load_unlocked\r\nFile \"<frozen importlib._bootstrap_external>\", line 728, in exec_module\r\nFile \"<frozen importlib._bootstrap>\", line 219, in _call_with_frames_removed\r\nFile \"/home/sasha/.cache/huggingface/modules/datasets_modules/datasets/yelp_polarity/a770787b2526bdcbfc29ac2d9beb8e820fbc15a03afd3ebc4fb9d8529de57544/yelp_polarity.py\", line 36, in <module>\r\n from datasets.tasks import TextClassification\r\n```", "Solved by updating the `nlpviewer`" ]
2021-06-04T15:44:29
2021-06-04T18:56:47
2021-06-04T18:56:47
CONTRIBUTOR
null
null
null
null
![image](https://user-images.githubusercontent.com/22514219/120828150-c4a35b00-c58e-11eb-8083-a537cee4dbb3.png)
{ "avatar_url": "https://avatars.githubusercontent.com/u/22514219?v=4", "events_url": "https://api.github.com/users/JetRunner/events{/privacy}", "followers_url": "https://api.github.com/users/JetRunner/followers", "following_url": "https://api.github.com/users/JetRunner/following{/other_user}", "gists_url": "https://api.github.com/users/JetRunner/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/JetRunner", "id": 22514219, "login": "JetRunner", "node_id": "MDQ6VXNlcjIyNTE0MjE5", "organizations_url": "https://api.github.com/users/JetRunner/orgs", "received_events_url": "https://api.github.com/users/JetRunner/received_events", "repos_url": "https://api.github.com/users/JetRunner/repos", "site_admin": false, "starred_url": "https://api.github.com/users/JetRunner/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/JetRunner/subscriptions", "type": "User", "url": "https://api.github.com/users/JetRunner", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2446/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2446/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
3:12:18
https://api.github.com/repos/huggingface/datasets/issues/2444
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2444/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2444/comments
https://api.github.com/repos/huggingface/datasets/issues/2444/events
https://github.com/huggingface/datasets/issues/2444
911,297,139
MDU6SXNzdWU5MTEyOTcxMzk=
2,444
Sentence Boundaries missing in Dataset: xtreme / udpos
{ "avatar_url": "https://avatars.githubusercontent.com/u/50871412?v=4", "events_url": "https://api.github.com/users/cosmeowpawlitan/events{/privacy}", "followers_url": "https://api.github.com/users/cosmeowpawlitan/followers", "following_url": "https://api.github.com/users/cosmeowpawlitan/following{/other_user}", "gists_url": "https://api.github.com/users/cosmeowpawlitan/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/cosmeowpawlitan", "id": 50871412, "login": "cosmeowpawlitan", "node_id": "MDQ6VXNlcjUwODcxNDEy", "organizations_url": "https://api.github.com/users/cosmeowpawlitan/orgs", "received_events_url": "https://api.github.com/users/cosmeowpawlitan/received_events", "repos_url": "https://api.github.com/users/cosmeowpawlitan/repos", "site_admin": false, "starred_url": "https://api.github.com/users/cosmeowpawlitan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cosmeowpawlitan/subscriptions", "type": "User", "url": "https://api.github.com/users/cosmeowpawlitan", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
[ "Hi,\r\n\r\nThis is a known issue. More info on this issue can be found in #2061. If you are looking for an open-source contribution, there are step-by-step instructions in the linked issue that you can follow to fix it.", "Closed by #2466." ]
2021-06-04T09:10:26
2021-06-18T11:53:43
2021-06-18T11:53:43
CONTRIBUTOR
null
null
null
null
I was browsing through annotation guidelines, as suggested by the datasets introduction. The guidlines saids "There must be exactly one blank line after every sentence, including the last sentence in the file. Empty sentences are not allowed." in the [Sentence Boundaries and Comments section](https://universaldependencies.org/format.html#sentence-boundaries-and-comments) But the sentence boundaries seems not to be represented by huggingface datasets features well. I found out that multiple sentence are concatenated together as a 1D array, without any delimiter. PAN-x, which is another token classification subset from xtreme do represent the sentence boundary using a 2D array. You may compare in PAN-x.en and udpos.English in the explorer: https://huggingface.co/datasets/viewer/?dataset=xtreme
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2444/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2444/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
14 days, 2:43:17
https://api.github.com/repos/huggingface/datasets/issues/2443
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2443/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2443/comments
https://api.github.com/repos/huggingface/datasets/issues/2443/events
https://github.com/huggingface/datasets/issues/2443
909,983,574
MDU6SXNzdWU5MDk5ODM1NzQ=
2,443
Some tests hang on Windows
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
[ "Hi ! That would be nice indeed to at least have a warning, since we don't handle the max path length limit.\r\nAlso if we could have an error instead of an infinite loop I'm sure windows users will appreciate that", "Unfortunately, I know this problem very well... 😅 \r\n\r\nI remember having proposed to throw an error instead of hanging in an infinite loop #2220: 60c7d1b6b71469599a27147a08100f594e7a3f84, 8c8ab60018b00463edf1eca500e434ff061546fc \r\nbut @lhoestq told me:\r\n> Note that the filelock module comes from this project that hasn't changed in years - while still being used by ten of thousands of projects:\r\nhttps://github.com/benediktschmitt/py-filelock\r\n> \r\n> Unless we have proper tests for this, I wouldn't recommend to change it\r\n\r\nI opened an Issue requesting a warning/error at startup for that case: #2224", "@albertvillanova Thanks for additional info on this issue.\r\n\r\nYes, I think the best option is to throw an error instead of suppressing it in a loop. I've considered 2 more options, but I don't really like them:\r\n1. create a temporary file with a filename longer than 255 characters on import; if this fails, long paths are not enabled and raise a warning. I'm not sure about this approach because I don't like the idea of creating a temporary file on import for this purpose.\r\n2. check if long paths are enabled with [this code](https://stackoverflow.com/a/46546731/14095927). As mentioned in the comment, this code relies on an undocumented function and Win10-specific." ]
2021-06-03T00:27:30
2021-06-28T08:47:39
2021-06-28T08:47:39
COLLABORATOR
null
null
null
null
Currently, several tests hang on Windows if the max path limit of 260 characters is not disabled. This happens due to the changes introduced by #2223 that cause an infinite loop in `WindowsFileLock` described in #2220. This can be very tricky to debug, so I think now is a good time to address these issues/PRs. IMO throwing an error is too harsh, but maybe we can emit a warning in the top-level `__init__.py ` on startup if long paths are not enabled.
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2443/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2443/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
25 days, 8:20:09
https://api.github.com/repos/huggingface/datasets/issues/2441
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2441/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2441/comments
https://api.github.com/repos/huggingface/datasets/issues/2441/events
https://github.com/huggingface/datasets/issues/2441
908,554,713
MDU6SXNzdWU5MDg1NTQ3MTM=
2,441
DuplicatedKeysError on personal dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/22605313?v=4", "events_url": "https://api.github.com/users/lucaguarro/events{/privacy}", "followers_url": "https://api.github.com/users/lucaguarro/followers", "following_url": "https://api.github.com/users/lucaguarro/following{/other_user}", "gists_url": "https://api.github.com/users/lucaguarro/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lucaguarro", "id": 22605313, "login": "lucaguarro", "node_id": "MDQ6VXNlcjIyNjA1MzEz", "organizations_url": "https://api.github.com/users/lucaguarro/orgs", "received_events_url": "https://api.github.com/users/lucaguarro/received_events", "repos_url": "https://api.github.com/users/lucaguarro/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lucaguarro/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lucaguarro/subscriptions", "type": "User", "url": "https://api.github.com/users/lucaguarro", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
[ "Hi ! In your dataset script you must be yielding examples like\r\n```python\r\nfor line in file:\r\n ...\r\n yield key, {...}\r\n```\r\n\r\nSince `datasets` 1.7.0 we enforce the keys to be unique.\r\nHowever it looks like your examples generator creates duplicate keys: at least two examples have key 0.\r\n\r\nYou can fix that by making sure that your keys are unique.\r\n\r\nFor example if you use a counter to define the key of each example, make sure that your counter is not reset to 0 in during examples generation (between two open files for examples).\r\n\r\nLet me know if you have other questions :)", "Yup, I indeed was generating duplicate keys. Fixed it and now it's working." ]
2021-06-01T17:59:41
2021-06-04T23:50:03
2021-06-04T23:50:03
NONE
null
null
null
null
## Describe the bug Ever since today, I have been getting a DuplicatedKeysError while trying to load my dataset from my own script. Error returned when running this line: `dataset = load_dataset('/content/drive/MyDrive/Thesis/Datasets/book_preprocessing/goodreads_maharjan_trimmed_and_nered/goodreadsnered.py')` Note that my script was working fine with earlier versions of the Datasets library. Cannot say with 100% certainty if I have been doing something wrong with my dataset script this whole time or if this is simply a bug with the new version of datasets. ## Steps to reproduce the bug I cannot provide code to reproduce the error as I am working with my own dataset. I can however provide my script if requested. ## Expected results For my data to be loaded. ## Actual results **DuplicatedKeysError** exception is raised ``` Downloading and preparing dataset good_reads_practice_dataset/main_domain (download: Unknown size, generated: Unknown size, post-processed: Unknown size, total: Unknown size) to /root/.cache/huggingface/datasets/good_reads_practice_dataset/main_domain/1.1.0/64ff7c3fee2693afdddea75002eb6887d4fedc3d812ae3622128c8504ab21655... --------------------------------------------------------------------------- DuplicatedKeysError Traceback (most recent call last) <ipython-input-6-c342ea0dae9d> in <module>() ----> 1 dataset = load_dataset('/content/drive/MyDrive/Thesis/Datasets/book_preprocessing/goodreads_maharjan_trimmed_and_nered/goodreadsnered.py') 5 frames /usr/local/lib/python3.7/dist-packages/datasets/load.py in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, ignore_verifications, keep_in_memory, save_infos, script_version, use_auth_token, task, **config_kwargs) 749 try_from_hf_gcs=try_from_hf_gcs, 750 base_path=base_path, --> 751 use_auth_token=use_auth_token, 752 ) 753 /usr/local/lib/python3.7/dist-packages/datasets/builder.py in download_and_prepare(self, download_config, download_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, **download_and_prepare_kwargs) 573 if not downloaded_from_gcs: 574 self._download_and_prepare( --> 575 dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs 576 ) 577 # Sync info /usr/local/lib/python3.7/dist-packages/datasets/builder.py in _download_and_prepare(self, dl_manager, verify_infos, **prepare_split_kwargs) 650 try: 651 # Prepare split will record examples associated to the split --> 652 self._prepare_split(split_generator, **prepare_split_kwargs) 653 except OSError as e: 654 raise OSError( /usr/local/lib/python3.7/dist-packages/datasets/builder.py in _prepare_split(self, split_generator) 990 writer.write(example, key) 991 finally: --> 992 num_examples, num_bytes = writer.finalize() 993 994 split_generator.split_info.num_examples = num_examples /usr/local/lib/python3.7/dist-packages/datasets/arrow_writer.py in finalize(self, close_stream) 407 # In case current_examples < writer_batch_size, but user uses finalize() 408 if self._check_duplicates: --> 409 self.check_duplicate_keys() 410 # Re-intializing to empty list for next batch 411 self.hkey_record = [] /usr/local/lib/python3.7/dist-packages/datasets/arrow_writer.py in check_duplicate_keys(self) 347 for hash, key in self.hkey_record: 348 if hash in tmp_record: --> 349 raise DuplicatedKeysError(key) 350 else: 351 tmp_record.add(hash) DuplicatedKeysError: FAILURE TO GENERATE DATASET ! Found duplicate Key: 0 Keys should be unique and deterministic in nature ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.7.0 - Platform: Windows-10-10.0.19041-SP0 - Python version: 3.7.9 - PyArrow version: 3.0.0
{ "avatar_url": "https://avatars.githubusercontent.com/u/22605313?v=4", "events_url": "https://api.github.com/users/lucaguarro/events{/privacy}", "followers_url": "https://api.github.com/users/lucaguarro/followers", "following_url": "https://api.github.com/users/lucaguarro/following{/other_user}", "gists_url": "https://api.github.com/users/lucaguarro/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lucaguarro", "id": 22605313, "login": "lucaguarro", "node_id": "MDQ6VXNlcjIyNjA1MzEz", "organizations_url": "https://api.github.com/users/lucaguarro/orgs", "received_events_url": "https://api.github.com/users/lucaguarro/received_events", "repos_url": "https://api.github.com/users/lucaguarro/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lucaguarro/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lucaguarro/subscriptions", "type": "User", "url": "https://api.github.com/users/lucaguarro", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2441/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2441/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
3 days, 5:50:22
https://api.github.com/repos/huggingface/datasets/issues/2440
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2440/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2440/comments
https://api.github.com/repos/huggingface/datasets/issues/2440/events
https://github.com/huggingface/datasets/issues/2440
908,521,954
MDU6SXNzdWU5MDg1MjE5NTQ=
2,440
Remove `extended` field from dataset tagger
{ "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lewtun", "id": 26859204, "login": "lewtun", "node_id": "MDQ6VXNlcjI2ODU5MjA0", "organizations_url": "https://api.github.com/users/lewtun/orgs", "received_events_url": "https://api.github.com/users/lewtun/received_events", "repos_url": "https://api.github.com/users/lewtun/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "type": "User", "url": "https://api.github.com/users/lewtun", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
[ "The tagger also doesn't insert the value for the `size_categories` field automatically, so this should be fixed too", "Thanks for reporting. Indeed the `extended` tag doesn't exist. Not sure why we had that in the tagger.\r\nThe repo of the tagger is here if someone wants to give this a try: https://github.com/huggingface/datasets-tagging\r\nOtherwise I can probably fix it next week", "I've opened a PR on `datasets-tagging` to fix the issue 🚀 ", "thanks ! this is fixed now" ]
2021-06-01T17:18:42
2021-06-09T09:06:31
2021-06-09T09:06:30
MEMBER
null
null
null
null
## Describe the bug While working on #2435 I used the [dataset tagger](https://huggingface.co/datasets/tagging/) to generate the missing tags for the YAML metadata of each README.md file. However, it seems that our CI raises an error when the `extended` field is included: ``` dataset_name = 'arcd' @pytest.mark.parametrize("dataset_name", get_changed_datasets(repo_path)) def test_changed_dataset_card(dataset_name): card_path = repo_path / "datasets" / dataset_name / "README.md" assert card_path.exists() error_messages = [] try: ReadMe.from_readme(card_path) except Exception as readme_error: error_messages.append(f"The following issues have been found in the dataset cards:\nREADME:\n{readme_error}") try: DatasetMetadata.from_readme(card_path) except Exception as metadata_error: error_messages.append( f"The following issues have been found in the dataset cards:\nYAML tags:\n{metadata_error}" ) if error_messages: > raise ValueError("\n".join(error_messages)) E ValueError: The following issues have been found in the dataset cards: E YAML tags: E __init__() got an unexpected keyword argument 'extended' tests/test_dataset_cards.py:70: ValueError ``` Consider either removing this tag from the tagger or including it as part of the validation step in the CI. cc @yjernite
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 1, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/2440/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2440/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
7 days, 15:47:48
https://api.github.com/repos/huggingface/datasets/issues/2434
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2434/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2434/comments
https://api.github.com/repos/huggingface/datasets/issues/2434/events
https://github.com/huggingface/datasets/issues/2434
907,503,557
MDU6SXNzdWU5MDc1MDM1NTc=
2,434
Extend QuestionAnsweringExtractive template to handle nested columns
{ "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lewtun", "id": 26859204, "login": "lewtun", "node_id": "MDQ6VXNlcjI2ODU5MjA0", "organizations_url": "https://api.github.com/users/lewtun/orgs", "received_events_url": "https://api.github.com/users/lewtun/received_events", "repos_url": "https://api.github.com/users/lewtun/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "type": "User", "url": "https://api.github.com/users/lewtun", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
closed
false
null
[]
[ "this is also the case for the following datasets and configurations:\r\n\r\n* `mlqa` with config `mlqa-translate-train.ar`\r\n\r\n", "The current task API is somewhat deprecated (we plan to align it with `train eval index` at some point), so I think we can close this issue." ]
2021-05-31T14:06:51
2022-10-05T17:06:28
2022-10-05T17:06:28
MEMBER
null
null
null
null
Currently the `QuestionAnsweringExtractive` task template and `preprare_for_task` only support "flat" features. We should extend the functionality to cover QA datasets like: * `iapp_wiki_qa_squad` * `parsinlu_reading_comprehension` where the nested features differ with those from `squad` and trigger an `ArrowNotImplementedError`: ``` --------------------------------------------------------------------------- ArrowNotImplementedError Traceback (most recent call last) <ipython-input-12-50e5b8f69c20> in <module> ----> 1 ds.prepare_for_task("question-answering-extractive")[0] ~/git/datasets/src/datasets/arrow_dataset.py in prepare_for_task(self, task) 1436 # We found a template so now flush `DatasetInfo` to skip the template update in `DatasetInfo.__post_init__` 1437 dataset.info.task_templates = None -> 1438 dataset = dataset.cast(features=template.features) 1439 return dataset 1440 ~/git/datasets/src/datasets/arrow_dataset.py in cast(self, features, batch_size, keep_in_memory, load_from_cache_file, cache_file_name, writer_batch_size, num_proc) 977 format = self.format 978 dataset = self.with_format("arrow") --> 979 dataset = dataset.map( 980 lambda t: t.cast(schema), 981 batched=True, ~/git/datasets/src/datasets/arrow_dataset.py in map(self, function, with_indices, input_columns, batched, batch_size, drop_last_batch, remove_columns, keep_in_memory, load_from_cache_file, cache_file_name, writer_batch_size, features, disable_nullable, fn_kwargs, num_proc, suffix_template, new_fingerprint, desc) 1600 1601 if num_proc is None or num_proc == 1: -> 1602 return self._map_single( 1603 function=function, 1604 with_indices=with_indices, ~/git/datasets/src/datasets/arrow_dataset.py in wrapper(*args, **kwargs) 176 } 177 # apply actual function --> 178 out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) 179 datasets: List["Dataset"] = list(out.values()) if isinstance(out, dict) else [out] 180 # re-apply format to the output ~/git/datasets/src/datasets/fingerprint.py in wrapper(*args, **kwargs) 395 # Call actual function 396 --> 397 out = func(self, *args, **kwargs) 398 399 # Update fingerprint of in-place transforms + update in-place history of transforms ~/git/datasets/src/datasets/arrow_dataset.py in _map_single(self, function, with_indices, input_columns, batched, batch_size, drop_last_batch, remove_columns, keep_in_memory, load_from_cache_file, cache_file_name, writer_batch_size, features, disable_nullable, fn_kwargs, new_fingerprint, rank, offset, desc) 1940 ) # Something simpler? 1941 try: -> 1942 batch = apply_function_on_filtered_inputs( 1943 batch, 1944 indices, ~/git/datasets/src/datasets/arrow_dataset.py in apply_function_on_filtered_inputs(inputs, indices, check_same_num_examples, offset) 1836 effective_indices = [i + offset for i in indices] if isinstance(indices, list) else indices + offset 1837 processed_inputs = ( -> 1838 function(*fn_args, effective_indices, **fn_kwargs) if with_indices else function(*fn_args, **fn_kwargs) 1839 ) 1840 if update_data is None: ~/git/datasets/src/datasets/arrow_dataset.py in <lambda>(t) 978 dataset = self.with_format("arrow") 979 dataset = dataset.map( --> 980 lambda t: t.cast(schema), 981 batched=True, 982 batch_size=batch_size, ~/miniconda3/envs/datasets/lib/python3.8/site-packages/pyarrow/table.pxi in pyarrow.lib.Table.cast() ~/miniconda3/envs/datasets/lib/python3.8/site-packages/pyarrow/table.pxi in pyarrow.lib.ChunkedArray.cast() ~/miniconda3/envs/datasets/lib/python3.8/site-packages/pyarrow/compute.py in cast(arr, target_type, safe) 241 else: 242 options = CastOptions.unsafe(target_type) --> 243 return call_function("cast", [arr], options) 244 245 ~/miniconda3/envs/datasets/lib/python3.8/site-packages/pyarrow/_compute.pyx in pyarrow._compute.call_function() ~/miniconda3/envs/datasets/lib/python3.8/site-packages/pyarrow/_compute.pyx in pyarrow._compute.Function.call() ~/miniconda3/envs/datasets/lib/python3.8/site-packages/pyarrow/error.pxi in pyarrow.lib.pyarrow_internal_check_status() ~/miniconda3/envs/datasets/lib/python3.8/site-packages/pyarrow/error.pxi in pyarrow.lib.check_status() ArrowNotImplementedError: Unsupported cast from struct<answer_end: list<item: int32>, answer_start: list<item: int32>, text: list<item: string>> to struct using function cast_struct ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2434/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2434/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
492 days, 2:59:37
https://api.github.com/repos/huggingface/datasets/issues/2431
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2431/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2431/comments
https://api.github.com/repos/huggingface/datasets/issues/2431/events
https://github.com/huggingface/datasets/issues/2431
907,413,691
MDU6SXNzdWU5MDc0MTM2OTE=
2,431
DuplicatedKeysError when trying to load adversarial_qa
{ "avatar_url": "https://avatars.githubusercontent.com/u/21348833?v=4", "events_url": "https://api.github.com/users/hanss0n/events{/privacy}", "followers_url": "https://api.github.com/users/hanss0n/followers", "following_url": "https://api.github.com/users/hanss0n/following{/other_user}", "gists_url": "https://api.github.com/users/hanss0n/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/hanss0n", "id": 21348833, "login": "hanss0n", "node_id": "MDQ6VXNlcjIxMzQ4ODMz", "organizations_url": "https://api.github.com/users/hanss0n/orgs", "received_events_url": "https://api.github.com/users/hanss0n/received_events", "repos_url": "https://api.github.com/users/hanss0n/repos", "site_admin": false, "starred_url": "https://api.github.com/users/hanss0n/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/hanss0n/subscriptions", "type": "User", "url": "https://api.github.com/users/hanss0n", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
[ "Thanks for reporting !\r\n#2433 fixed the issue, thanks @mariosasko :)\r\n\r\nWe'll do a patch release soon of the library.\r\nIn the meantime, you can use the fixed version of adversarial_qa by adding `script_version=\"master\"` in `load_dataset`" ]
2021-05-31T12:11:19
2021-06-01T08:54:03
2021-06-01T08:52:11
NONE
null
null
null
null
## Describe the bug A clear and concise description of what the bug is. ## Steps to reproduce the bug ```python dataset = load_dataset('adversarial_qa', 'adversarialQA') ``` ## Expected results The dataset should be loaded into memory ## Actual results >DuplicatedKeysError: FAILURE TO GENERATE DATASET ! >Found duplicate Key: 4d3cb5677211ee32895ca9c66dad04d7152254d4 >Keys should be unique and deterministic in nature > > >During handling of the above exception, another exception occurred: > >DuplicatedKeysError Traceback (most recent call last) > >/usr/local/lib/python3.7/dist-packages/datasets/arrow_writer.py in check_duplicate_keys(self) > 347 for hash, key in self.hkey_record: > 348 if hash in tmp_record: >--> 349 raise DuplicatedKeysError(key) > 350 else: > 351 tmp_record.add(hash) > >DuplicatedKeysError: FAILURE TO GENERATE DATASET ! >Found duplicate Key: 4d3cb5677211ee32895ca9c66dad04d7152254d4 >Keys should be unique and deterministic in nature ## Environment info - `datasets` version: 1.7.0 - Platform: Linux-5.4.109+-x86_64-with-Ubuntu-18.04-bionic - Python version: 3.7.10 - PyArrow version: 3.0.0
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2431/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2431/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
20:40:52
https://api.github.com/repos/huggingface/datasets/issues/2426
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2426/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2426/comments
https://api.github.com/repos/huggingface/datasets/issues/2426/events
https://github.com/huggingface/datasets/issues/2426
906,473,546
MDU6SXNzdWU5MDY0NzM1NDY=
2,426
Saving Graph/Structured Data in Datasets
{ "avatar_url": "https://avatars.githubusercontent.com/u/3295342?v=4", "events_url": "https://api.github.com/users/gsh199449/events{/privacy}", "followers_url": "https://api.github.com/users/gsh199449/followers", "following_url": "https://api.github.com/users/gsh199449/following{/other_user}", "gists_url": "https://api.github.com/users/gsh199449/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/gsh199449", "id": 3295342, "login": "gsh199449", "node_id": "MDQ6VXNlcjMyOTUzNDI=", "organizations_url": "https://api.github.com/users/gsh199449/orgs", "received_events_url": "https://api.github.com/users/gsh199449/received_events", "repos_url": "https://api.github.com/users/gsh199449/repos", "site_admin": false, "starred_url": "https://api.github.com/users/gsh199449/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gsh199449/subscriptions", "type": "User", "url": "https://api.github.com/users/gsh199449", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
closed
false
null
[]
[ "It should probably work out of the box to save structured data. If you want to show an example we can help you.", "An example of a toy dataset is like:\r\n```json\r\n[\r\n {\r\n \"name\": \"mike\",\r\n \"friends\": [\r\n \"tom\",\r\n \"lily\"\r\n ],\r\n \"articles\": [\r\n {\r\n \"title\": \"aaaaa\",\r\n \"reader\": [\r\n \"tom\",\r\n \"lucy\"\r\n ]\r\n }\r\n ]\r\n },\r\n {\r\n \"name\": \"tom\",\r\n \"friends\": [\r\n \"mike\",\r\n \"bbb\"\r\n ],\r\n \"articles\": [\r\n {\r\n \"title\": \"xxxxx\",\r\n \"reader\": [\r\n \"tom\",\r\n \"qqqq\"\r\n ]\r\n }\r\n ]\r\n }\r\n]\r\n```\r\nWe can use the friendship relation to build a directional graph, and a user node can be represented using the articles written by himself. And the relationship between articles can be built when the article has read by the same user.\r\nThis dataset can be used to model the heterogeneous relationship between users and articles, and this graph can be used to build recommendation systems to recommend articles to the user, or potential friends to the user.", "Hi,\r\n\r\nyou can do the following to load this data into a `Dataset`:\r\n```python\r\nfrom datasets import Dataset\r\nexamples = [\r\n {\r\n \"name\": \"mike\",\r\n \"friends\": [\r\n \"tom\",\r\n \"lily\"\r\n ],\r\n \"articles\": [\r\n {\r\n \"title\": \"aaaaa\",\r\n \"reader\": [\r\n \"tom\",\r\n \"lucy\"\r\n ]\r\n }\r\n ]\r\n },\r\n {\r\n \"name\": \"tom\",\r\n \"friends\": [\r\n \"mike\",\r\n \"bbb\"\r\n ],\r\n \"articles\": [\r\n {\r\n \"title\": \"xxxxx\",\r\n \"reader\": [\r\n \"tom\",\r\n \"qqqq\"\r\n ]\r\n }\r\n ]\r\n }\r\n]\r\n\r\nkeys = examples[0].keys()\r\nvalues = [ex.values() for ex in examples]\r\ndataset = Dataset.from_dict({k: list(v) for k, v in zip(keys, zip(*values))})\r\n```\r\n\r\nLet us know if this works for you.", "Thank you so much, and that works! I also have a question that if the dataset is very large, that cannot be loaded into the memory. How to create the Dataset?", "If your dataset doesn't fit in memory, store it in a local file and load it from there. Check out [this chapter](https://huggingface.co/docs/datasets/master/loading_datasets.html#from-local-files) in the docs for more info.", "Nice! Thanks for your help." ]
2021-05-29T13:35:21
2021-06-02T01:21:03
2021-06-02T01:21:03
NONE
null
null
null
null
Thanks for this amazing library! And my question is I have structured data that is organized with a graph. For example, a dataset with users' friendship relations and user's articles. When I try to save a python dict in the dataset, an error occurred ``did not recognize Python value type when inferring an Arrow data type''. Although I also know that storing a python dict in pyarrow datasets is not the best practice, but I have no idea about how to save structured data in the Datasets. Thank you very much for your help.
{ "avatar_url": "https://avatars.githubusercontent.com/u/3295342?v=4", "events_url": "https://api.github.com/users/gsh199449/events{/privacy}", "followers_url": "https://api.github.com/users/gsh199449/followers", "following_url": "https://api.github.com/users/gsh199449/following{/other_user}", "gists_url": "https://api.github.com/users/gsh199449/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/gsh199449", "id": 3295342, "login": "gsh199449", "node_id": "MDQ6VXNlcjMyOTUzNDI=", "organizations_url": "https://api.github.com/users/gsh199449/orgs", "received_events_url": "https://api.github.com/users/gsh199449/received_events", "repos_url": "https://api.github.com/users/gsh199449/repos", "site_admin": false, "starred_url": "https://api.github.com/users/gsh199449/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gsh199449/subscriptions", "type": "User", "url": "https://api.github.com/users/gsh199449", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2426/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2426/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
3 days, 11:45:42
https://api.github.com/repos/huggingface/datasets/issues/2424
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2424/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2424/comments
https://api.github.com/repos/huggingface/datasets/issues/2424/events
https://github.com/huggingface/datasets/issues/2424
906,193,679
MDU6SXNzdWU5MDYxOTM2Nzk=
2,424
load_from_disk and save_to_disk are not compatible with each other
{ "avatar_url": "https://avatars.githubusercontent.com/u/7584674?v=4", "events_url": "https://api.github.com/users/roholazandie/events{/privacy}", "followers_url": "https://api.github.com/users/roholazandie/followers", "following_url": "https://api.github.com/users/roholazandie/following{/other_user}", "gists_url": "https://api.github.com/users/roholazandie/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/roholazandie", "id": 7584674, "login": "roholazandie", "node_id": "MDQ6VXNlcjc1ODQ2NzQ=", "organizations_url": "https://api.github.com/users/roholazandie/orgs", "received_events_url": "https://api.github.com/users/roholazandie/received_events", "repos_url": "https://api.github.com/users/roholazandie/repos", "site_admin": false, "starred_url": "https://api.github.com/users/roholazandie/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/roholazandie/subscriptions", "type": "User", "url": "https://api.github.com/users/roholazandie", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Hi,\r\n\r\n`load_dataset` returns an instance of `DatasetDict` if `split` is not specified, so instead of `Dataset.load_from_disk`, use `DatasetDict.load_from_disk` to load the dataset from disk.", "Thanks it worked!", "Though I see a stream of issues open by people lost between datasets and datasets dicts so maybe there is here something that could be better in terms of UX. Could be better error handling or something else smarter to even avoid said errors but maybe we should think about this. Reopening to use this issue as a discussion place but feel free to open a new open if you prefer @lhoestq @albertvillanova ", "We should probably improve the error message indeed.\r\n\r\nAlso note that there exists a function `load_from_disk` that can load a Dataset or a DatasetDict. Under the hood it calls either `Dataset.load_from_disk` or `DatasetDict.load_from_disk`:\r\n\r\n\r\n```python\r\nfrom datasets import load_from_disk\r\n\r\ndataset_dict = load_from_disk(\"path/to/dataset/dict\")\r\nsingle_dataset = load_from_disk(\"path/to/single/dataset\")\r\n```", "I just opened #2437 to improve the error message", "Superseded by #2462 " ]
2021-05-28T23:07:10
2021-06-08T19:22:32
2021-06-08T19:22:32
NONE
null
null
null
null
## Describe the bug load_from_disk and save_to_disk are not compatible. When I use save_to_disk to save a dataset to disk it works perfectly but given the same directory load_from_disk throws an error that it can't find state.json. looks like the load_from_disk only works on one split ## Steps to reproduce the bug ```python from datasets import load_dataset dataset = load_dataset("art") dataset.save_to_disk("mydir") d = Dataset.load_from_disk("mydir") ``` ## Expected results It is expected that these two functions be the reverse of each other without more manipulation ## Actual results FileNotFoundError: [Errno 2] No such file or directory: 'mydir/art/state.json' ## Environment info - `datasets` version: 1.6.2 - Platform: Linux-5.4.0-73-generic-x86_64-with-Ubuntu-18.04-bionic - Python version: 3.7.10 - PyTorch version (GPU?): 1.8.1+cu102 (True) - Tensorflow version (GPU?): not installed (NA) - Using GPU in script?: <fill in> - Using distributed or parallel set-up in script?: <fill in>
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/2424/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2424/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
10 days, 20:15:22
https://api.github.com/repos/huggingface/datasets/issues/2415
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2415/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2415/comments
https://api.github.com/repos/huggingface/datasets/issues/2415/events
https://github.com/huggingface/datasets/issues/2415
903,923,097
MDU6SXNzdWU5MDM5MjMwOTc=
2,415
Cached dataset not loaded
{ "avatar_url": "https://avatars.githubusercontent.com/u/715491?v=4", "events_url": "https://api.github.com/users/borisdayma/events{/privacy}", "followers_url": "https://api.github.com/users/borisdayma/followers", "following_url": "https://api.github.com/users/borisdayma/following{/other_user}", "gists_url": "https://api.github.com/users/borisdayma/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/borisdayma", "id": 715491, "login": "borisdayma", "node_id": "MDQ6VXNlcjcxNTQ5MQ==", "organizations_url": "https://api.github.com/users/borisdayma/orgs", "received_events_url": "https://api.github.com/users/borisdayma/received_events", "repos_url": "https://api.github.com/users/borisdayma/repos", "site_admin": false, "starred_url": "https://api.github.com/users/borisdayma/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/borisdayma/subscriptions", "type": "User", "url": "https://api.github.com/users/borisdayma", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
[ "It actually seems to happen all the time in above configuration:\r\n* the function `filter_by_duration` correctly loads cached processed dataset\r\n* the function `prepare_dataset` is always reexecuted\r\n\r\nI end up solving the issue by saving to disk my dataset at the end but I'm still wondering if it's a bug or limitation here.", "Hi ! The hash used for caching `map` results is the fingerprint of the resulting dataset. It is computed using three things:\r\n- the old fingerprint of the dataset\r\n- the hash of the function\r\n- the hash of the other parameters passed to `map`\r\n\r\nYou can compute the hash of your function (or any python object) with\r\n```python\r\nfrom datasets.fingerprint import Hasher\r\n\r\nmy_func = lambda x: x + 1\r\nprint(Hasher.hash(my_func))\r\n```\r\n\r\nIf `prepare_dataset` is always executed, maybe this is because your `processor` has a different hash each time you want to execute it.", "> If `prepare_dataset` is always executed, maybe this is because your `processor` has a different hash each time you want to execute it.\r\n\r\nYes I think that was the issue.\r\n\r\nFor the hash of the function:\r\n* does it consider just the name or the actual code of the function\r\n* does it consider variables that are not passed explicitly as parameters to the functions (such as the processor here)", "> does it consider just the name or the actual code of the function\r\n\r\nIt looks at the name and the actual code and all variables such as recursively. It uses `dill` to do so, which is based on `pickle`.\r\nBasically the hash is computed using the pickle bytes of your function (computed using `dill` to support most python objects).\r\n\r\n> does it consider variables that are not passed explicitly as parameters to the functions (such as the processor here)\r\n\r\nYes it does thanks to recursive pickling.", "Thanks for these explanations. I'm closing the issue." ]
2021-05-27T15:40:06
2021-06-02T13:15:47
2021-06-02T13:15:47
CONTRIBUTOR
null
null
null
null
## Describe the bug I have a large dataset (common_voice, english) where I use several map and filter functions. Sometimes my cached datasets after specific functions are not loaded. I always use the same arguments, same functions, no seed… ## Steps to reproduce the bug ```python def filter_by_duration(batch): return ( batch["duration"] <= 10 and batch["duration"] >= 1 and len(batch["target_text"]) > 5 ) def prepare_dataset(batch): batch["input_values"] = processor( batch["speech"], sampling_rate=batch["sampling_rate"][0] ).input_values with processor.as_target_processor(): batch["labels"] = processor(batch["target_text"]).input_ids return batch train_dataset = train_dataset.filter( filter_by_duration, remove_columns=["duration"], num_proc=data_args.preprocessing_num_workers, ) # PROBLEM HERE -> below function is reexecuted and cache is not loaded train_dataset = train_dataset.map( prepare_dataset, remove_columns=train_dataset.column_names, batch_size=training_args.per_device_train_batch_size, batched=True, num_proc=data_args.preprocessing_num_workers, ) # Later in script set_caching_enabled(False) # apply map on trained model to eval/test sets ``` ## Expected results The cached dataset should always be reloaded. ## Actual results The function is reexecuted. I have access to cached files `cache-xxxxx.arrow`. Is there a way I can somehow load manually 2 versions and see how the hash was created for debug purposes (to know if it's an issue with dataset or function)? ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.6.2 - Platform: Linux-5.8.0-45-generic-x86_64-with-glibc2.29 - Python version: 3.8.5 - PyTorch version (GPU?): 1.8.1+cu102 (True) - Tensorflow version (GPU?): not installed (NA) - Using GPU in script?: Yes - Using distributed or parallel set-up in script?: No
{ "avatar_url": "https://avatars.githubusercontent.com/u/715491?v=4", "events_url": "https://api.github.com/users/borisdayma/events{/privacy}", "followers_url": "https://api.github.com/users/borisdayma/followers", "following_url": "https://api.github.com/users/borisdayma/following{/other_user}", "gists_url": "https://api.github.com/users/borisdayma/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/borisdayma", "id": 715491, "login": "borisdayma", "node_id": "MDQ6VXNlcjcxNTQ5MQ==", "organizations_url": "https://api.github.com/users/borisdayma/orgs", "received_events_url": "https://api.github.com/users/borisdayma/received_events", "repos_url": "https://api.github.com/users/borisdayma/repos", "site_admin": false, "starred_url": "https://api.github.com/users/borisdayma/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/borisdayma/subscriptions", "type": "User", "url": "https://api.github.com/users/borisdayma", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2415/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2415/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
5 days, 21:35:41
https://api.github.com/repos/huggingface/datasets/issues/2413
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2413/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2413/comments
https://api.github.com/repos/huggingface/datasets/issues/2413/events
https://github.com/huggingface/datasets/issues/2413
903,777,557
MDU6SXNzdWU5MDM3Nzc1NTc=
2,413
AttributeError: 'DatasetInfo' object has no attribute 'task_templates'
{ "avatar_url": "https://avatars.githubusercontent.com/u/53588015?v=4", "events_url": "https://api.github.com/users/jungwhank/events{/privacy}", "followers_url": "https://api.github.com/users/jungwhank/followers", "following_url": "https://api.github.com/users/jungwhank/following{/other_user}", "gists_url": "https://api.github.com/users/jungwhank/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jungwhank", "id": 53588015, "login": "jungwhank", "node_id": "MDQ6VXNlcjUzNTg4MDE1", "organizations_url": "https://api.github.com/users/jungwhank/orgs", "received_events_url": "https://api.github.com/users/jungwhank/received_events", "repos_url": "https://api.github.com/users/jungwhank/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jungwhank/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jungwhank/subscriptions", "type": "User", "url": "https://api.github.com/users/jungwhank", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
[ "Hi ! Can you try using a more up-to-date version ? We added the task_templates in `datasets` 1.7.0.\r\n\r\nIdeally when you're working on new datasets, you should install and use the local version of your fork of `datasets`. Here I think you tried to run the 1.7.0 tests with the 1.6.2 code" ]
2021-05-27T13:44:28
2021-06-01T01:05:47
2021-06-01T01:05:47
CONTRIBUTOR
null
null
null
null
## Describe the bug Hello, I'm trying to add dataset and contribute, but test keep fail with below cli. ` RUN_SLOW=1 pytest tests/test_dataset_common.py::LocalDatasetTest::test_load_dataset_all_configs_<my_dataset>` ## Steps to reproduce the bug It seems like a bug when I see an error with the existing dataset, not the dataset I'm trying to add. ` RUN_SLOW=1 pytest tests/test_dataset_common.py::LocalDatasetTest::test_load_dataset_all_configs_<any_dataset>` ## Expected results All test passed ## Actual results ``` # check that dataset is not empty self.parent.assertListEqual(sorted(dataset_builder.info.splits.keys()), sorted(dataset)) for split in dataset_builder.info.splits.keys(): # check that loaded datset is not empty self.parent.assertTrue(len(dataset[split]) > 0) # check that we can cast features for each task template > task_templates = dataset_builder.info.task_templates E AttributeError: 'DatasetInfo' object has no attribute 'task_templates' tests/test_dataset_common.py:175: AttributeError ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.6.2 - Platform: Darwin-20.4.0-x86_64-i386-64bit - Python version: 3.7.7 - PyTorch version (GPU?): 1.7.0 (False) - Tensorflow version (GPU?): 2.3.0 (False) - Using GPU in script?: No - Using distributed or parallel set-up in script?: No
{ "avatar_url": "https://avatars.githubusercontent.com/u/53588015?v=4", "events_url": "https://api.github.com/users/jungwhank/events{/privacy}", "followers_url": "https://api.github.com/users/jungwhank/followers", "following_url": "https://api.github.com/users/jungwhank/following{/other_user}", "gists_url": "https://api.github.com/users/jungwhank/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jungwhank", "id": 53588015, "login": "jungwhank", "node_id": "MDQ6VXNlcjUzNTg4MDE1", "organizations_url": "https://api.github.com/users/jungwhank/orgs", "received_events_url": "https://api.github.com/users/jungwhank/received_events", "repos_url": "https://api.github.com/users/jungwhank/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jungwhank/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jungwhank/subscriptions", "type": "User", "url": "https://api.github.com/users/jungwhank", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2413/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2413/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
4 days, 11:21:19
https://api.github.com/repos/huggingface/datasets/issues/2412
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2412/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2412/comments
https://api.github.com/repos/huggingface/datasets/issues/2412/events
https://github.com/huggingface/datasets/issues/2412
903,769,151
MDU6SXNzdWU5MDM3NjkxNTE=
2,412
Docstring mistake: dataset vs. metric
{ "avatar_url": "https://avatars.githubusercontent.com/u/229382?v=4", "events_url": "https://api.github.com/users/PhilipMay/events{/privacy}", "followers_url": "https://api.github.com/users/PhilipMay/followers", "following_url": "https://api.github.com/users/PhilipMay/following{/other_user}", "gists_url": "https://api.github.com/users/PhilipMay/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/PhilipMay", "id": 229382, "login": "PhilipMay", "node_id": "MDQ6VXNlcjIyOTM4Mg==", "organizations_url": "https://api.github.com/users/PhilipMay/orgs", "received_events_url": "https://api.github.com/users/PhilipMay/received_events", "repos_url": "https://api.github.com/users/PhilipMay/repos", "site_admin": false, "starred_url": "https://api.github.com/users/PhilipMay/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/PhilipMay/subscriptions", "type": "User", "url": "https://api.github.com/users/PhilipMay", "user_view_type": "public" }
[]
closed
false
null
[]
[ "> I can provide a PR l8er...\r\n\r\nSee #2425 " ]
2021-05-27T13:39:11
2021-06-01T08:18:04
2021-06-01T08:18:04
CONTRIBUTOR
null
null
null
null
This: https://github.com/huggingface/datasets/blob/d95b95f8cf3cb0cff5f77a675139b584dcfcf719/src/datasets/load.py#L582 Should better be something like: `a metric identifier on HuggingFace AWS bucket (list all available metrics and ids with ``datasets.list_metrics()``)` I can provide a PR l8er...
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2412/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2412/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
4 days, 18:38:53
https://api.github.com/repos/huggingface/datasets/issues/2407
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2407/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2407/comments
https://api.github.com/repos/huggingface/datasets/issues/2407/events
https://github.com/huggingface/datasets/issues/2407
903,111,755
MDU6SXNzdWU5MDMxMTE3NTU=
2,407
.map() function got an unexpected keyword argument 'cache_file_name'
{ "avatar_url": "https://avatars.githubusercontent.com/u/7390482?v=4", "events_url": "https://api.github.com/users/cindyxinyiwang/events{/privacy}", "followers_url": "https://api.github.com/users/cindyxinyiwang/followers", "following_url": "https://api.github.com/users/cindyxinyiwang/following{/other_user}", "gists_url": "https://api.github.com/users/cindyxinyiwang/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/cindyxinyiwang", "id": 7390482, "login": "cindyxinyiwang", "node_id": "MDQ6VXNlcjczOTA0ODI=", "organizations_url": "https://api.github.com/users/cindyxinyiwang/orgs", "received_events_url": "https://api.github.com/users/cindyxinyiwang/received_events", "repos_url": "https://api.github.com/users/cindyxinyiwang/repos", "site_admin": false, "starred_url": "https://api.github.com/users/cindyxinyiwang/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cindyxinyiwang/subscriptions", "type": "User", "url": "https://api.github.com/users/cindyxinyiwang", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
[ "Hi @cindyxinyiwang,\r\nDid you try adding `.arrow` after `cache_file_name` argument? Here I think they're expecting something like that only for a cache file:\r\nhttps://github.com/huggingface/datasets/blob/e08362256fb157c0b3038437fc0d7a0bbb50de5c/src/datasets/arrow_dataset.py#L1556-L1558", "Hi ! `cache_file_name` is an argument of the `Dataset.map` method. Can you check that your `dataset` is indeed a `Dataset` object ?\r\n\r\nIf you loaded several splits, then it would actually be a `DatasetDict` (one dataset per split, in a dictionary).\r\nIn this case, since there are several datasets in the dict, the `DatasetDict.map` method requires a `cache_file_names` argument (with an 's'), so that you can provide one file name per split.", "I think you are right. I used cache_file_names={data1: name1, data2: name2} and it works. Thank you!" ]
2021-05-27T01:54:26
2021-05-27T13:46:40
2021-05-27T13:46:40
NONE
null
null
null
null
## Describe the bug I'm trying to save the result of datasets.map() to a specific file, so that I can easily share it among multiple computers without reprocessing the dataset. However, when I try to pass an argument 'cache_file_name' to the .map() function, it throws an error that ".map() function got an unexpected keyword argument 'cache_file_name'". I believe I'm using the latest dataset 1.6.2. Also seems like the document and the actual code indicates there is an argument 'cache_file_name' for the .map() function. Here is the code I use ## Steps to reproduce the bug ```datasets = load_from_disk(dataset_path=my_path) [...] def tokenize_function(examples): return tokenizer(examples[text_column_name]) logger.info("Mapping dataset to tokenized dataset.") tokenized_datasets = datasets.map( tokenize_function, batched=True, num_proc=preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=True, cache_file_name="my_tokenized_file" ) ``` ## Actual results tokenized_datasets = datasets.map( TypeError: map() got an unexpected keyword argument 'cache_file_name' ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version:1.6.2 - Platform:Linux-4.18.0-193.28.1.el8_2.x86_64-x86_64-with-glibc2.10 - Python version:3.8.5 - PyArrow version:3.0.0
{ "avatar_url": "https://avatars.githubusercontent.com/u/7390482?v=4", "events_url": "https://api.github.com/users/cindyxinyiwang/events{/privacy}", "followers_url": "https://api.github.com/users/cindyxinyiwang/followers", "following_url": "https://api.github.com/users/cindyxinyiwang/following{/other_user}", "gists_url": "https://api.github.com/users/cindyxinyiwang/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/cindyxinyiwang", "id": 7390482, "login": "cindyxinyiwang", "node_id": "MDQ6VXNlcjczOTA0ODI=", "organizations_url": "https://api.github.com/users/cindyxinyiwang/orgs", "received_events_url": "https://api.github.com/users/cindyxinyiwang/received_events", "repos_url": "https://api.github.com/users/cindyxinyiwang/repos", "site_admin": false, "starred_url": "https://api.github.com/users/cindyxinyiwang/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cindyxinyiwang/subscriptions", "type": "User", "url": "https://api.github.com/users/cindyxinyiwang", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2407/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2407/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
11:52:14
https://api.github.com/repos/huggingface/datasets/issues/2406
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2406/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2406/comments
https://api.github.com/repos/huggingface/datasets/issues/2406/events
https://github.com/huggingface/datasets/issues/2406
902,643,844
MDU6SXNzdWU5MDI2NDM4NDQ=
2,406
Add guide on using task templates to documentation
{ "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lewtun", "id": 26859204, "login": "lewtun", "node_id": "MDQ6VXNlcjI2ODU5MjA0", "organizations_url": "https://api.github.com/users/lewtun/orgs", "received_events_url": "https://api.github.com/users/lewtun/received_events", "repos_url": "https://api.github.com/users/lewtun/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "type": "User", "url": "https://api.github.com/users/lewtun", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lewtun", "id": 26859204, "login": "lewtun", "node_id": "MDQ6VXNlcjI2ODU5MjA0", "organizations_url": "https://api.github.com/users/lewtun/orgs", "received_events_url": "https://api.github.com/users/lewtun/received_events", "repos_url": "https://api.github.com/users/lewtun/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "type": "User", "url": "https://api.github.com/users/lewtun", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lewtun", "id": 26859204, "login": "lewtun", "node_id": "MDQ6VXNlcjI2ODU5MjA0", "organizations_url": "https://api.github.com/users/lewtun/orgs", "received_events_url": "https://api.github.com/users/lewtun/received_events", "repos_url": "https://api.github.com/users/lewtun/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "type": "User", "url": "https://api.github.com/users/lewtun", "user_view_type": "public" } ]
[]
2021-05-26T16:28:26
2022-10-05T17:07:00
2022-10-05T17:07:00
MEMBER
null
null
null
null
Once we have a stable API on the text classification and question answering task templates, add a guide on how to use them in the documentation.
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2406/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2406/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
497 days, 0:38:34
https://api.github.com/repos/huggingface/datasets/issues/2402
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2402/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2402/comments
https://api.github.com/repos/huggingface/datasets/issues/2402/events
https://github.com/huggingface/datasets/issues/2402
900,025,329
MDU6SXNzdWU5MDAwMjUzMjk=
2,402
PermissionError on Windows when using temp dir for caching
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
[]
2021-05-24T21:22:59
2021-05-26T16:39:29
2021-05-26T16:39:29
COLLABORATOR
null
null
null
null
Currently, the following code raises a PermissionError on master if working on Windows: ```python # run as a script or call exit() in REPL to initiate the temp dir cleanup from datasets import * d = load_dataset("sst", split="train", keep_in_memory=False) set_caching_enabled(False) d.map(lambda ex: ex) ``` Error stack trace: ``` Traceback (most recent call last): File "C:\Users\Mario\Anaconda3\envs\hf-datasets\lib\weakref.py", line 624, in _exitfunc f() File "C:\Users\Mario\Anaconda3\envs\hf-datasets\lib\weakref.py", line 548, in __call__ return info.func(*info.args, **(info.kwargs or {})) File "C:\Users\Mario\Anaconda3\envs\hf-datasets\lib\tempfile.py", line 799, in _cleanup _shutil.rmtree(name) File "C:\Users\Mario\Anaconda3\envs\hf-datasets\lib\shutil.py", line 500, in rmtree return _rmtree_unsafe(path, onerror) File "C:\Users\Mario\Anaconda3\envs\hf-datasets\lib\shutil.py", line 395, in _rmtree_unsafe onerror(os.unlink, fullname, sys.exc_info()) File "C:\Users\Mario\Anaconda3\envs\hf-datasets\lib\shutil.py", line 393, in _rmtree_unsafe os.unlink(fullname) PermissionError: [WinError 5] Access is denied: 'C:\\Users\\Mario\\AppData\\Local\\Temp\\tmp20epyhmq\\cache-87a87ffb5a956e68.arrow' ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2402/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2402/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
1 day, 19:16:30
https://api.github.com/repos/huggingface/datasets/issues/2401
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2401/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2401/comments
https://api.github.com/repos/huggingface/datasets/issues/2401/events
https://github.com/huggingface/datasets/issues/2401
899,910,521
MDU6SXNzdWU4OTk5MTA1MjE=
2,401
load_dataset('natural_questions') fails with "ValueError: External features info don't match the dataset"
{ "avatar_url": "https://avatars.githubusercontent.com/u/15602718?v=4", "events_url": "https://api.github.com/users/jonrbates/events{/privacy}", "followers_url": "https://api.github.com/users/jonrbates/followers", "following_url": "https://api.github.com/users/jonrbates/following{/other_user}", "gists_url": "https://api.github.com/users/jonrbates/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jonrbates", "id": 15602718, "login": "jonrbates", "node_id": "MDQ6VXNlcjE1NjAyNzE4", "organizations_url": "https://api.github.com/users/jonrbates/orgs", "received_events_url": "https://api.github.com/users/jonrbates/received_events", "repos_url": "https://api.github.com/users/jonrbates/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jonrbates/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jonrbates/subscriptions", "type": "User", "url": "https://api.github.com/users/jonrbates", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" } ]
[ "I faced the similar problem. Downgrading datasets to 1.5.0 fixed it.", "Thanks for reporting, I'm looking into it", "I just opened #2438 to fix this :)", "Hi ! This has been fixed in the 1.8.0 release of `datasets`" ]
2021-05-24T18:38:53
2021-06-09T09:07:25
2021-06-09T09:07:25
NONE
null
null
null
null
## Describe the bug load_dataset('natural_questions') throws ValueError ## Steps to reproduce the bug ```python from datasets import load_dataset datasets = load_dataset('natural_questions', split='validation[:10]') ``` ## Expected results Call to load_dataset returns data. ## Actual results ``` Using custom data configuration default Reusing dataset natural_questions (/mnt/d/huggingface/datasets/natural_questions/default/0.0.2/19bc04755018a3ad02ee74f7045cde4ba9b4162cb64450a87030ab786b123b76) --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-2-d55ab8a8cc1c> in <module> ----> 1 datasets = load_dataset('natural_questions', split='validation[:10]', cache_dir='/mnt/d/huggingface/datasets') ~/miniconda3/lib/python3.8/site-packages/datasets/load.py in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, ignore_verifications, keep_in_memory, save_infos, script_version, use_auth_token, **config_kwargs) 756 keep_in_memory if keep_in_memory is not None else is_small_dataset(builder_instance.info.dataset_size) 757 ) --> 758 ds = builder_instance.as_dataset(split=split, ignore_verifications=ignore_verifications, in_memory=keep_in_memory) 759 if save_infos: 760 builder_instance._save_infos() ~/miniconda3/lib/python3.8/site-packages/datasets/builder.py in as_dataset(self, split, run_post_process, ignore_verifications, in_memory) 735 736 # Create a dataset for each of the given splits --> 737 datasets = utils.map_nested( 738 partial( 739 self._build_single_dataset, ~/miniconda3/lib/python3.8/site-packages/datasets/utils/py_utils.py in map_nested(function, data_struct, dict_only, map_list, map_tuple, map_numpy, num_proc, types) 193 # Singleton 194 if not isinstance(data_struct, dict) and not isinstance(data_struct, types): --> 195 return function(data_struct) 196 197 disable_tqdm = bool(logger.getEffectiveLevel() > INFO) ~/miniconda3/lib/python3.8/site-packages/datasets/builder.py in _build_single_dataset(self, split, run_post_process, ignore_verifications, in_memory) 762 763 # Build base dataset --> 764 ds = self._as_dataset( 765 split=split, 766 in_memory=in_memory, ~/miniconda3/lib/python3.8/site-packages/datasets/builder.py in _as_dataset(self, split, in_memory) 838 in_memory=in_memory, 839 ) --> 840 return Dataset(**dataset_kwargs) 841 842 def _post_process(self, dataset: Dataset, resources_paths: Dict[str, str]) -> Optional[Dataset]: ~/miniconda3/lib/python3.8/site-packages/datasets/arrow_dataset.py in __init__(self, arrow_table, info, split, indices_table, fingerprint) 271 assert self._fingerprint is not None, "Fingerprint can't be None in a Dataset object" 272 if self.info.features.type != inferred_features.type: --> 273 raise ValueError( 274 "External features info don't match the dataset:\nGot\n{}\nwith type\n{}\n\nbut expected something like\n{}\nwith type\n{}".format( 275 self.info.features, self.info.features.type, inferred_features, inferred_features.type ValueError: External features info don't match the dataset: Got {'id': Value(dtype='string', id=None), 'document': {'title': Value(dtype='string', id=None), 'url': Value(dtype='string', id=None), 'html': Value(dtype='string', id=None), 'tokens': Sequence(feature={'token': Value(dtype='string', id=None), 'is_html': Value(dtype='bool', id=None)}, length=-1, id=None)}, 'question': {'text': Value(dtype='string', id=None), 'tokens': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None)}, 'annotations': Sequence(feature={'id': Value(dtype='string', id=None), 'long_answer': {'start_token': Value(dtype='int64', id=None), 'end_token': Value(dtype='int64', id=None), 'start_byte': Value(dtype='int64', id=None), 'end_byte': Value(dtype='int64', id=None)}, 'short_answers': Sequence(feature={'start_token': Value(dtype='int64', id=None), 'end_token': Value(dtype='int64', id=None), 'start_byte': Value(dtype='int64', id=None), 'end_byte': Value(dtype='int64', id=None), 'text': Value(dtype='string', id=None)}, length=-1, id=None), 'yes_no_answer': ClassLabel(num_classes=2, names=['NO', 'YES'], names_file=None, id=None)}, length=-1, id=None)} with type struct<annotations: struct<id: list<item: string>, long_answer: list<item: struct<start_token: int64, end_token: int64, start_byte: int64, end_byte: int64>>, short_answers: list<item: struct<end_byte: list<item: int64>, end_token: list<item: int64>, start_byte: list<item: int64>, start_token: list<item: int64>, text: list<item: string>>>, yes_no_answer: list<item: int64>>, document: struct<title: string, url: string, html: string, tokens: struct<is_html: list<item: bool>, token: list<item: string>>>, id: string, question: struct<text: string, tokens: list<item: string>>> but expected something like {'id': Value(dtype='string', id=None), 'document': {'html': Value(dtype='string', id=None), 'title': Value(dtype='string', id=None), 'tokens': {'is_html': Sequence(feature=Value(dtype='bool', id=None), length=-1, id=None), 'token': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None)}, 'url': Value(dtype='string', id=None)}, 'question': {'text': Value(dtype='string', id=None), 'tokens': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None)}, 'annotations': {'id': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None), 'long_answer': [{'end_byte': Value(dtype='int64', id=None), 'end_token': Value(dtype='int64', id=None), 'start_byte': Value(dtype='int64', id=None), 'start_token': Value(dtype='int64', id=None)}], 'short_answers': [{'end_byte': Sequence(feature=Value(dtype='int64', id=None), length=-1, id=None), 'end_token': Sequence(feature=Value(dtype='int64', id=None), length=-1, id=None), 'start_byte': Sequence(feature=Value(dtype='int64', id=None), length=-1, id=None), 'start_token': Sequence(feature=Value(dtype='int64', id=None), length=-1, id=None), 'text': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None)}], 'yes_no_answer': Sequence(feature=Value(dtype='int64', id=None), length=-1, id=None)}} with type struct<annotations: struct<id: list<item: string>, long_answer: list<item: struct<end_byte: int64, end_token: int64, start_byte: int64, start_token: int64>>, short_answers: list<item: struct<end_byte: list<item: int64>, end_token: list<item: int64>, start_byte: list<item: int64>, start_token: list<item: int64>, text: list<item: string>>>, yes_no_answer: list<item: int64>>, document: struct<html: string, title: string, tokens: struct<is_html: list<item: bool>, token: list<item: string>>, url: string>, id: string, question: struct<text: string, tokens: list<item: string>>> ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.6.2 - Platform: Linux-5.4.72-microsoft-standard-WSL2-x86_64-with-glibc2.10 - Python version: 3.8.3 - PyTorch version (GPU?): 1.6.0 (False) - Tensorflow version (GPU?): not installed (NA) - Using GPU in script?: No - Using distributed or parallel set-up in script?: No
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2401/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2401/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
15 days, 14:28:32
https://api.github.com/repos/huggingface/datasets/issues/2400
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2400/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2400/comments
https://api.github.com/repos/huggingface/datasets/issues/2400/events
https://github.com/huggingface/datasets/issues/2400
899,867,212
MDU6SXNzdWU4OTk4NjcyMTI=
2,400
Concatenate several datasets with removed columns is not working.
{ "avatar_url": "https://avatars.githubusercontent.com/u/32632186?v=4", "events_url": "https://api.github.com/users/philschmid/events{/privacy}", "followers_url": "https://api.github.com/users/philschmid/followers", "following_url": "https://api.github.com/users/philschmid/following{/other_user}", "gists_url": "https://api.github.com/users/philschmid/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/philschmid", "id": 32632186, "login": "philschmid", "node_id": "MDQ6VXNlcjMyNjMyMTg2", "organizations_url": "https://api.github.com/users/philschmid/orgs", "received_events_url": "https://api.github.com/users/philschmid/received_events", "repos_url": "https://api.github.com/users/philschmid/repos", "site_admin": false, "starred_url": "https://api.github.com/users/philschmid/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/philschmid/subscriptions", "type": "User", "url": "https://api.github.com/users/philschmid", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
[ "Hi,\r\n\r\ndid you fill out the env info section manually or by copy-pasting the output of the `datasets-cli env` command?\r\n\r\nThis code should work without issues on 1.6.2 version (I'm working on master (1.6.2.dev0 version) and can't reproduce this error).", "@mariosasko you are right I was still on `1.5.0`. " ]
2021-05-24T17:40:15
2021-05-25T05:52:01
2021-05-25T05:51:59
CONTRIBUTOR
null
null
null
null
## Describe the bug You can't concatenate datasets when you removed columns before. ## Steps to reproduce the bug ```python from datasets import load_dataset, concatenate_datasets wikiann= load_dataset("wikiann","en") wikiann["train"] = wikiann["train"].remove_columns(["langs","spans"]) wikiann["test"] = wikiann["test"].remove_columns(["langs","spans"]) assert wikiann["train"].features.type == wikiann["test"].features.type concate = concatenate_datasets([wikiann["train"],wikiann["test"]]) ``` ## Expected results Merged dataset ## Actual results ```python ValueError: External features info don't match the dataset: Got {'tokens': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None), 'ner_tags': Sequence(feature=ClassLabel(num_classes=7, names=['O', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC'], names_file=None, id=None), length=-1, id=None), 'langs': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None), 'spans': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None)} with type struct<langs: list<item: string>, ner_tags: list<item: int64>, spans: list<item: string>, tokens: list<item: string>> but expected something like {'ner_tags': Sequence(feature=Value(dtype='int64', id=None), length=-1, id=None), 'tokens': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None)} with type struct<ner_tags: list<item: int64>, tokens: list<item: string>> ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: ~1.6.2~ 1.5.0 - Platform: macos - Python version: 3.8.5 - PyArrow version: 3.0.0
{ "avatar_url": "https://avatars.githubusercontent.com/u/32632186?v=4", "events_url": "https://api.github.com/users/philschmid/events{/privacy}", "followers_url": "https://api.github.com/users/philschmid/followers", "following_url": "https://api.github.com/users/philschmid/following{/other_user}", "gists_url": "https://api.github.com/users/philschmid/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/philschmid", "id": 32632186, "login": "philschmid", "node_id": "MDQ6VXNlcjMyNjMyMTg2", "organizations_url": "https://api.github.com/users/philschmid/orgs", "received_events_url": "https://api.github.com/users/philschmid/received_events", "repos_url": "https://api.github.com/users/philschmid/repos", "site_admin": false, "starred_url": "https://api.github.com/users/philschmid/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/philschmid/subscriptions", "type": "User", "url": "https://api.github.com/users/philschmid", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2400/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2400/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
12:11:44
https://api.github.com/repos/huggingface/datasets/issues/2398
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2398/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2398/comments
https://api.github.com/repos/huggingface/datasets/issues/2398/events
https://github.com/huggingface/datasets/issues/2398
899,511,837
MDU6SXNzdWU4OTk1MTE4Mzc=
2,398
News_commentary Dataset Translation Pairs are of Incorrect Language Specified Pairs
{ "avatar_url": "https://avatars.githubusercontent.com/u/8571003?v=4", "events_url": "https://api.github.com/users/anassalamah/events{/privacy}", "followers_url": "https://api.github.com/users/anassalamah/followers", "following_url": "https://api.github.com/users/anassalamah/following{/other_user}", "gists_url": "https://api.github.com/users/anassalamah/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/anassalamah", "id": 8571003, "login": "anassalamah", "node_id": "MDQ6VXNlcjg1NzEwMDM=", "organizations_url": "https://api.github.com/users/anassalamah/orgs", "received_events_url": "https://api.github.com/users/anassalamah/received_events", "repos_url": "https://api.github.com/users/anassalamah/repos", "site_admin": false, "starred_url": "https://api.github.com/users/anassalamah/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/anassalamah/subscriptions", "type": "User", "url": "https://api.github.com/users/anassalamah", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
[ "These ranges seem to be valid English. Closing." ]
2021-05-24T10:03:34
2022-10-05T17:13:49
2022-10-05T17:13:49
NONE
null
null
null
null
I used load_dataset to load the news_commentary dataset for "ar-en" translation pairs but found translations from Arabic to Hindi. ``` train_ds = load_dataset("news_commentary", "ar-en", split='train[:98%]') val_ds = load_dataset("news_commentary", "ar-en", split='train[98%:]') # filtering out examples that are not ar-en translations but ar-hi val_ds = val_ds.filter(lambda example, indice: indice not in chain(range(1312,1327) ,range(1384,1399), range(1030,1042)), with_indices=True) ``` * I'm fairly new to using datasets so I might be doing something wrong
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2398/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2398/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
499 days, 7:10:15
https://api.github.com/repos/huggingface/datasets/issues/2396
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2396/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2396/comments
https://api.github.com/repos/huggingface/datasets/issues/2396/events
https://github.com/huggingface/datasets/issues/2396
899,016,308
MDU6SXNzdWU4OTkwMTYzMDg=
2,396
strange datasets from OSCAR corpus
{ "avatar_url": "https://avatars.githubusercontent.com/u/50871412?v=4", "events_url": "https://api.github.com/users/cosmeowpawlitan/events{/privacy}", "followers_url": "https://api.github.com/users/cosmeowpawlitan/followers", "following_url": "https://api.github.com/users/cosmeowpawlitan/following{/other_user}", "gists_url": "https://api.github.com/users/cosmeowpawlitan/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/cosmeowpawlitan", "id": 50871412, "login": "cosmeowpawlitan", "node_id": "MDQ6VXNlcjUwODcxNDEy", "organizations_url": "https://api.github.com/users/cosmeowpawlitan/orgs", "received_events_url": "https://api.github.com/users/cosmeowpawlitan/received_events", "repos_url": "https://api.github.com/users/cosmeowpawlitan/repos", "site_admin": false, "starred_url": "https://api.github.com/users/cosmeowpawlitan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cosmeowpawlitan/subscriptions", "type": "User", "url": "https://api.github.com/users/cosmeowpawlitan", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
open
false
null
[]
[ "Hi ! Thanks for reporting\r\ncc @pjox is this an issue from the data ?\r\n\r\nAnyway we should at least mention that OSCAR could contain such contents in the dataset card, you're totally right @jerryIsHere ", "Hi @jerryIsHere , sorry for the late response! Sadly this is normal, the problem comes form fasttext's classifier which we used to create the original corpus. In general the classifier is not really capable of properly recognizing Yue Chineese so the file ends un being just noise from Common Crawl. Some of these problems with OSCAR were already discussed [here](https://arxiv.org/pdf/2103.12028.pdf) but we are working on explicitly documenting the problems by language on our website. In fact, could please you open an issue on [our repo](https://github.com/oscar-corpus/oscar-website/issues) as well so that we can track it?" ]
2021-05-23T13:06:02
2021-06-17T13:54:37
null
CONTRIBUTOR
null
null
null
null
![image](https://user-images.githubusercontent.com/50871412/119260850-4f876b80-bc07-11eb-8894-124302600643.png) ![image](https://user-images.githubusercontent.com/50871412/119260875-675eef80-bc07-11eb-9da4-ee27567054ac.png) From the [official site ](https://oscar-corpus.com/), the Yue Chinese dataset should have 2.2KB data. 7 training instances is obviously not a right number. As I can read Yue Chinese, I call tell the last instance is definitely not something that would appear on Common Crawl. And even if you don't read Yue Chinese, you can tell the first six instance are problematic. (It is embarrassing, as the 7 training instances look exactly like something from a pornographic novel or flitting messages in a chat of a dating app) It might not be the problem of the huggingface/datasets implementation, because when I tried to download the dataset from the official site, I found out that the zip file is corrupted. I will try to inform the host of OSCAR corpus later. Awy a remake about this dataset in huggingface/datasets is needed, perhaps after the host of the dataset fixes the issue. > Hi @jerryIsHere , sorry for the late response! Sadly this is normal, the problem comes form fasttext's classifier which we used to create the original corpus. In general the classifier is not really capable of properly recognizing Yue Chineese so the file ends un being just noise from Common Crawl. Some of these problems with OSCAR were already discussed [here](https://arxiv.org/pdf/2103.12028.pdf) but we are working on explicitly documenting the problems by language on our website. In fact, could please you open an issue on [our repo](https://github.com/oscar-corpus/oscar-website/issues) as well so that we can track it? Thanks a lot, the new post is here: https://github.com/oscar-corpus/oscar-website/issues/11
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2396/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2396/timeline
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
null
https://api.github.com/repos/huggingface/datasets/issues/2391
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2391/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2391/comments
https://api.github.com/repos/huggingface/datasets/issues/2391/events
https://github.com/huggingface/datasets/issues/2391
898,128,099
MDU6SXNzdWU4OTgxMjgwOTk=
2,391
Missing original answers in kilt-TriviaQA
{ "avatar_url": "https://avatars.githubusercontent.com/u/25532159?v=4", "events_url": "https://api.github.com/users/PaulLerner/events{/privacy}", "followers_url": "https://api.github.com/users/PaulLerner/followers", "following_url": "https://api.github.com/users/PaulLerner/following{/other_user}", "gists_url": "https://api.github.com/users/PaulLerner/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/PaulLerner", "id": 25532159, "login": "PaulLerner", "node_id": "MDQ6VXNlcjI1NTMyMTU5", "organizations_url": "https://api.github.com/users/PaulLerner/orgs", "received_events_url": "https://api.github.com/users/PaulLerner/received_events", "repos_url": "https://api.github.com/users/PaulLerner/repos", "site_admin": false, "starred_url": "https://api.github.com/users/PaulLerner/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/PaulLerner/subscriptions", "type": "User", "url": "https://api.github.com/users/PaulLerner", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
[ "That could be useful indeed! Feel free to open a PR on the dataset card if you already have some code that runs, otherwise we'll take care of it soon :) ", "I can open a PR but there is 2 details to fix:\r\n- the name for the corresponding key (e.g. `original_answer`)\r\n- how to implement it: I’m not sure what happens when you map `lambda x: {'input': ...}` as it keeps the other keys (e.g. `output`) intact but here since we want to set a nested value (e.g. `x['output']['original_answer']`) I implemented it with a regular function (not lambda), see below\r\n\r\n```py\r\ndef add_original_answer(x, trivia_qa, triviaqa_map):\r\n i = triviaqa_map[x['id']]\r\n x['output']['original_answer'] = trivia_qa['validation'][i]['answer']['value']\r\n return x\r\n```" ]
2021-05-21T14:57:07
2021-06-14T17:29:11
2021-06-14T17:29:11
CONTRIBUTOR
null
null
null
null
I previously opened an issue at https://github.com/facebookresearch/KILT/issues/42 but from the answer of @fabiopetroni it seems that the problem comes from HF-datasets ## Describe the bug The `answer` field in kilt-TriviaQA, e.g. `kilt_tasks['train_triviaqa'][0]['output']['answer']` contains a list of alternative answer which are accepted for the question. However it'd be nice to know the original answer to the question (the only fields in `output` are `'answer', 'meta', 'provenance'`) ## How to fix It can be fixed by retrieving the original answer from the original TriviaQA (e.g. `trivia_qa['train'][0]['answer']['value']`), perhaps at the same place as here where one retrieves the questions https://github.com/huggingface/datasets/blob/master/datasets/kilt_tasks/README.md#loading-the-kilt-knowledge-source-and-task-data cc @yjernite who previously answered to an issue about KILT and TriviaQA :)
{ "avatar_url": "https://avatars.githubusercontent.com/u/10469459?v=4", "events_url": "https://api.github.com/users/yjernite/events{/privacy}", "followers_url": "https://api.github.com/users/yjernite/followers", "following_url": "https://api.github.com/users/yjernite/following{/other_user}", "gists_url": "https://api.github.com/users/yjernite/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/yjernite", "id": 10469459, "login": "yjernite", "node_id": "MDQ6VXNlcjEwNDY5NDU5", "organizations_url": "https://api.github.com/users/yjernite/orgs", "received_events_url": "https://api.github.com/users/yjernite/received_events", "repos_url": "https://api.github.com/users/yjernite/repos", "site_admin": false, "starred_url": "https://api.github.com/users/yjernite/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yjernite/subscriptions", "type": "User", "url": "https://api.github.com/users/yjernite", "user_view_type": "public" }
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/2391/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2391/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
24 days, 2:32:04
https://api.github.com/repos/huggingface/datasets/issues/2388
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2388/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2388/comments
https://api.github.com/repos/huggingface/datasets/issues/2388/events
https://github.com/huggingface/datasets/issues/2388
897,767,470
MDU6SXNzdWU4OTc3Njc0NzA=
2,388
Incorrect URLs for some datasets
{ "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lewtun", "id": 26859204, "login": "lewtun", "node_id": "MDQ6VXNlcjI2ODU5MjA0", "organizations_url": "https://api.github.com/users/lewtun/orgs", "received_events_url": "https://api.github.com/users/lewtun/received_events", "repos_url": "https://api.github.com/users/lewtun/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "type": "User", "url": "https://api.github.com/users/lewtun", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lewtun", "id": 26859204, "login": "lewtun", "node_id": "MDQ6VXNlcjI2ODU5MjA0", "organizations_url": "https://api.github.com/users/lewtun/orgs", "received_events_url": "https://api.github.com/users/lewtun/received_events", "repos_url": "https://api.github.com/users/lewtun/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "type": "User", "url": "https://api.github.com/users/lewtun", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lewtun", "id": 26859204, "login": "lewtun", "node_id": "MDQ6VXNlcjI2ODU5MjA0", "organizations_url": "https://api.github.com/users/lewtun/orgs", "received_events_url": "https://api.github.com/users/lewtun/received_events", "repos_url": "https://api.github.com/users/lewtun/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "type": "User", "url": "https://api.github.com/users/lewtun", "user_view_type": "public" } ]
[]
2021-05-21T07:22:35
2021-06-04T17:39:45
2021-06-04T17:39:45
MEMBER
null
null
null
null
## Describe the bug It seems that the URLs for the following datasets are invalid: - [ ] `bn_hate_speech` has been renamed: https://github.com/rezacsedu/Bengali-Hate-Speech-Dataset/commit/c67ecfc4184911e12814f6b36901f9828df8a63a - [ ] `covid_tweets_japanese` has been renamed: http://www.db.info.gifu-u.ac.jp/covid-19-twitter-dataset/ As a result we can no longer load these datasets using `load_dataset`. The simple fix is to rename the URL in the dataset script - will do this asap. ## Steps to reproduce the bug ```python from datasets import load_dataset # pick one of the datasets from the list above ds = load_dataset("bn_hate_speech") ``` ## Expected results Dataset loads without error. ## Actual results ``` Downloading: 3.36kB [00:00, 1.07MB/s] Downloading: 2.03kB [00:00, 678kB/s] Using custom data configuration default Downloading and preparing dataset bn_hate_speech/default (download: 951.48 KiB, generated: 949.84 KiB, post-processed: Unknown size, total: 1.86 MiB) to /Users/lewtun/.cache/huggingface/datasets/bn_hate_speech/default/0.0.0/a2dc726e511a2177523301bcad196af05d4d8a2cff30d2769ba8aacc1f5fdb5c... Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/lewtun/miniconda3/envs/hf-hub_eval/lib/python3.8/site-packages/datasets/load.py", line 744, in load_dataset builder_instance.download_and_prepare( File "/Users/lewtun/miniconda3/envs/hf-hub_eval/lib/python3.8/site-packages/datasets/builder.py", line 574, in download_and_prepare self._download_and_prepare( File "/Users/lewtun/miniconda3/envs/hf-hub_eval/lib/python3.8/site-packages/datasets/builder.py", line 630, in _download_and_prepare split_generators = self._split_generators(dl_manager, **split_generators_kwargs) File "/Users/lewtun/.cache/huggingface/modules/datasets_modules/datasets/bn_hate_speech/a2dc726e511a2177523301bcad196af05d4d8a2cff30d2769ba8aacc1f5fdb5c/bn_hate_speech.py", line 76, in _split_generators train_path = dl_manager.download_and_extract(_URL) File "/Users/lewtun/miniconda3/envs/hf-hub_eval/lib/python3.8/site-packages/datasets/utils/download_manager.py", line 287, in download_and_extract return self.extract(self.download(url_or_urls)) File "/Users/lewtun/miniconda3/envs/hf-hub_eval/lib/python3.8/site-packages/datasets/utils/download_manager.py", line 195, in download downloaded_path_or_paths = map_nested( File "/Users/lewtun/miniconda3/envs/hf-hub_eval/lib/python3.8/site-packages/datasets/utils/py_utils.py", line 195, in map_nested return function(data_struct) File "/Users/lewtun/miniconda3/envs/hf-hub_eval/lib/python3.8/site-packages/datasets/utils/download_manager.py", line 218, in _download return cached_path(url_or_filename, download_config=download_config) File "/Users/lewtun/miniconda3/envs/hf-hub_eval/lib/python3.8/site-packages/datasets/utils/file_utils.py", line 281, in cached_path output_path = get_from_cache( File "/Users/lewtun/miniconda3/envs/hf-hub_eval/lib/python3.8/site-packages/datasets/utils/file_utils.py", line 621, in get_from_cache raise FileNotFoundError("Couldn't find file at {}".format(url)) FileNotFoundError: Couldn't find file at https://raw.githubusercontent.com/rezacsedu/Bengali-Hate-Speech-Dataset/main/Bengali_%20Hate_Speech_Dataset_Subset.csv ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.6.2.dev0 - Platform: macOS-10.16-x86_64-i386-64bit - Python version: 3.8.8 - PyArrow version: 3.0.0
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2388/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2388/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
14 days, 10:17:10
https://api.github.com/repos/huggingface/datasets/issues/2387
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2387/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2387/comments
https://api.github.com/repos/huggingface/datasets/issues/2387/events
https://github.com/huggingface/datasets/issues/2387
897,566,666
MDU6SXNzdWU4OTc1NjY2NjY=
2,387
datasets 1.6 ignores cache
{ "avatar_url": "https://avatars.githubusercontent.com/u/10676103?v=4", "events_url": "https://api.github.com/users/stas00/events{/privacy}", "followers_url": "https://api.github.com/users/stas00/followers", "following_url": "https://api.github.com/users/stas00/following{/other_user}", "gists_url": "https://api.github.com/users/stas00/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/stas00", "id": 10676103, "login": "stas00", "node_id": "MDQ6VXNlcjEwNjc2MTAz", "organizations_url": "https://api.github.com/users/stas00/orgs", "received_events_url": "https://api.github.com/users/stas00/received_events", "repos_url": "https://api.github.com/users/stas00/repos", "site_admin": false, "starred_url": "https://api.github.com/users/stas00/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stas00/subscriptions", "type": "User", "url": "https://api.github.com/users/stas00", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" } ]
[ "Looks like there are multiple issues regarding this (#2386, #2322) and it's a WIP #2329. Currently these datasets are being loaded in-memory which is causing this issue. Quoting @mariosasko here for a quick fix:\r\n\r\n> set `keep_in_memory` to `False` when loading a dataset (`sst = load_dataset(\"sst\", keep_in_memory=False)`) to prevent it from loading in-memory. Currently, in-memory datasets fail to find cached files due to this check (always False for them)\r\n\r\n", "Hi ! Since `datasets` 1.6.0 we no longer keep small datasets (<250MB) on disk and load them in RAM instead by default. This makes data processing and iterating on data faster. However datasets in RAM currently have no way to reload previous results from the cache (since nothing is written on disk). We are working on making the caching work for datasets in RAM.\r\n\r\nUntil then, I'd recommend passing `keep_in_memory=False` to the calls to `load_dataset` like here:\r\n\r\nhttps://github.com/huggingface/transformers/blob/223943872e8c9c3fc11db3c6e93da07f5177423f/examples/pytorch/language-modeling/run_clm.py#L233\r\n\r\nThis way you say explicitly that you want your dataset to stay on the disk, and it will be able to recover previously computed results from the cache.", "gotcha! thanks Quentin", "OK, It doesn't look like we can use the proposed workaround - see https://github.com/huggingface/transformers/issues/11801\r\n\r\nCould you please add an env var for us to be able to turn off this unwanted in our situation behavior? It is really problematic for dev work, when one needs to restart the training very often and needs a quick startup time. Manual editing of standard scripts is not a practical option when one uses examples.\r\n\r\nThis could also be a problem for tests, which will be slower because of lack of cache, albeit usually we use tiny datasets there. I think we want caching for tests.\r\n\r\nThank you.", "Hi @stas00, \r\n\r\nYou are right: an env variable is needed to turn off this behavior. I am adding it.\r\n\r\nFor the moment there is a config parameter to turn off this behavior: `datasets.config.MAX_IN_MEMORY_DATASET_SIZE_IN_BYTES = None`\r\n\r\nYou can find this info in the docs:\r\n- in the docstring of the parameter `keep_in_memory` of the function [`load_datasets`](https://huggingface.co/docs/datasets/package_reference/loading_methods.html#datasets.load_dataset):\r\n- in a Note in the docs about [Loading a Dataset](https://huggingface.co/docs/datasets/loading_datasets.html#from-the-huggingface-hub)\r\n\r\n> The default in 🤗Datasets is to memory-map the dataset on drive if its size is larger than datasets.config.MAX_IN_MEMORY_DATASET_SIZE_IN_BYTES (default 250 MiB); otherwise, the dataset is copied in-memory. This behavior can be disabled by setting datasets.config.MAX_IN_MEMORY_DATASET_SIZE_IN_BYTES = None, and in this case the dataset is not loaded in memory.", "Yes, but this still requires one to edit the standard example scripts, so if I'm doing that already I just as well can add `keep_in_memory=False`.\r\n\r\nMay be the low hanging fruit is to add `MAX_IN_MEMORY_DATASET_SIZE_IN_BYTES` env var to match the config, and if the user sets it to 0, then it'll be the same as `keep_in_memory=False` or `datasets.config.MAX_IN_MEMORY_DATASET_SIZE_IN_BYTES=0`?", "@stas00, however, for the moment, setting the value to `0` is equivalent to the opposite, i.e. `keep_in_memory=True`. This means the max size until which I load in memory is 0 bytes.\r\n\r\nTell me if this is logical/convenient, or I should change it.", "In my PR, to turn off current default bahavior, you should set env variable to one of: `{\"\", \"OFF\", \"NO\", \"FALSE\"}`.\r\n\r\nFor example:\r\n```\r\nMAX_IN_MEMORY_DATASET_SIZE_IN_BYTES=\r\n```", "IMHO, this behaviour is not very intuitive, as 0 is a normal quantity of bytes. So `MAX_IN_MEMORY_DATASET_SIZE_IN_BYTES=0` to me reads as don't cache ever.\r\n\r\nAlso \"SIZE_IN_BYTES\" that can take one of `{\"\", \"OFF\", \"NO\", \"FALSE\"}` is also quite odd.\r\n\r\nI think supporting a very simple `MAX_IN_MEMORY_DATASET_SIZE_IN_BYTES` that can accept any numerical value to match the name of the variable, requires minimal logic and is very straightforward. \r\n\r\nSo if you could adjust this logic - then `MAX_IN_MEMORY_DATASET_SIZE_IN_BYTES=0` is all that's needed to not do in-memory datasets.\r\n\r\nDoes it make sense?", "I understand your point @stas00, as I am not very convinced with current implementation.\r\n\r\nMy concern is: which numerical value should then pass a user who wants `keep_in_memory=True` by default, independently of dataset size? Currently it is `0` for this case.", "That's a good question, and again the normal bytes can be used for that:\r\n```\r\nMAX_IN_MEMORY_DATASET_SIZE_IN_BYTES=1e12 # (~2**40)\r\n```\r\nSince it's unlikely that anybody will have more than 1TB RAM.\r\n\r\nIt's also silly that it uses BYTES and not MBYTES - that level of refinement doesn't seem to be of a practical use in this context.\r\n\r\nNot sure when it was added and if there are back-compat issues here, but perhaps it could be renamed `MAX_IN_MEMORY_DATASET_SIZE` and support 1M, 1G, 1T, etc. \r\n\r\nBut scientific notation is quite intuitive too, as each 000 zeros is the next M, G, T multiplier. Minus the discrepancy of 1024 vs 1000, which adds up. And it is easy to write down `1e12`, as compared to `1099511627776` (2**40). (`1.1e12` is more exact).\r\n", "Great! Thanks, @stas00.\r\n\r\nI am implementing your suggestion to turn off default value when set to `0`.\r\n\r\nFor the other suggestion (allowing different metric prefixes), I will discuss with @lhoestq to agree on its implementation.", "Awesome! Thank you, @albertvillanova!!!\r\n\r\n" ]
2021-05-21T00:12:58
2021-05-26T16:07:54
2021-05-26T16:07:54
CONTRIBUTOR
null
null
null
null
Moving from https://github.com/huggingface/transformers/issues/11801#issuecomment-845546612 Quoting @VictorSanh: > > I downgraded datasets to `1.5.0` and printed `tokenized_datasets.cache_files` (L335): > > > `{'train': [{'filename': '/home/victor/.cache/huggingface/datasets/openwebtext10k/plain_text/1.0.0/3a8df094c671b4cb63ed0b41f40fb3bd855e9ce2e3765e5df50abcdfb5ec144b/cache-c6aefe81ca4e5152.arrow'}], 'validation': [{'filename': '/home/victor/.cache/huggingface/datasets/openwebtext10k/plain_text/1.0.0/3a8df094c671b4cb63ed0b41f40fb3bd855e9ce2e3765e5df50abcdfb5ec144b/cache-97cf4c813e6469c6.arrow'}]}` > > while the same command with the latest version of datasets (actually starting at `1.6.0`) gives: > > `{'train': [], 'validation': []}` > I also confirm that downgrading to `datasets==1.5.0` makes things fast again - i.e. cache is used. to reproduce: ``` USE_TF=0 python examples/pytorch/language-modeling/run_clm.py \ --model_name_or_path gpt2 \ --dataset_name "stas/openwebtext-10k" \ --output_dir output_dir \ --overwrite_output_dir \ --do_train \ --do_eval \ --max_train_samples 1000 \ --max_eval_samples 200 \ --per_device_train_batch_size 4 \ --per_device_eval_batch_size 4 \ --num_train_epochs 1 \ --warmup_steps 8 \ --block_size 64 \ --fp16 \ --report_to none ``` the first time the startup is slow and some 5 tqdm bars. It shouldn't do it on consequent runs. but with `datasets>1.5.0` it rebuilds on every run. @lhoestq
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2387/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2387/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
5 days, 15:54:56
https://api.github.com/repos/huggingface/datasets/issues/2386
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2386/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2386/comments
https://api.github.com/repos/huggingface/datasets/issues/2386/events
https://github.com/huggingface/datasets/issues/2386
897,560,049
MDU6SXNzdWU4OTc1NjAwNDk=
2,386
Accessing Arrow dataset cache_files
{ "avatar_url": "https://avatars.githubusercontent.com/u/28717374?v=4", "events_url": "https://api.github.com/users/Mehrad0711/events{/privacy}", "followers_url": "https://api.github.com/users/Mehrad0711/followers", "following_url": "https://api.github.com/users/Mehrad0711/following{/other_user}", "gists_url": "https://api.github.com/users/Mehrad0711/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Mehrad0711", "id": 28717374, "login": "Mehrad0711", "node_id": "MDQ6VXNlcjI4NzE3Mzc0", "organizations_url": "https://api.github.com/users/Mehrad0711/orgs", "received_events_url": "https://api.github.com/users/Mehrad0711/received_events", "repos_url": "https://api.github.com/users/Mehrad0711/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Mehrad0711/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Mehrad0711/subscriptions", "type": "User", "url": "https://api.github.com/users/Mehrad0711", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
[ "Thanks @bhavitvyamalik for referencing the workaround. Setting `keep_in_memory=False` is working." ]
2021-05-20T23:57:43
2021-05-21T19:18:03
2021-05-21T19:18:03
NONE
null
null
null
null
## Describe the bug In datasets 1.5.0 the following code snippet would have printed the cache_files: ``` train_data = load_dataset('conll2003', split='train', cache_dir='data') print(train_data.cache_files[0]['filename']) ``` However, in the newest release (1.6.1), it prints an empty list. I also tried loading the dataset with `keep_in_memory=True` argument but still `cache_files` is empty. Was wondering if this is a bug or I need to pass additional arguments so I can access the cache_files.
{ "avatar_url": "https://avatars.githubusercontent.com/u/28717374?v=4", "events_url": "https://api.github.com/users/Mehrad0711/events{/privacy}", "followers_url": "https://api.github.com/users/Mehrad0711/followers", "following_url": "https://api.github.com/users/Mehrad0711/following{/other_user}", "gists_url": "https://api.github.com/users/Mehrad0711/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Mehrad0711", "id": 28717374, "login": "Mehrad0711", "node_id": "MDQ6VXNlcjI4NzE3Mzc0", "organizations_url": "https://api.github.com/users/Mehrad0711/orgs", "received_events_url": "https://api.github.com/users/Mehrad0711/received_events", "repos_url": "https://api.github.com/users/Mehrad0711/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Mehrad0711/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Mehrad0711/subscriptions", "type": "User", "url": "https://api.github.com/users/Mehrad0711", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2386/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2386/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
19:20:20
https://api.github.com/repos/huggingface/datasets/issues/2382
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2382/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2382/comments
https://api.github.com/repos/huggingface/datasets/issues/2382/events
https://github.com/huggingface/datasets/issues/2382
895,610,216
MDU6SXNzdWU4OTU2MTAyMTY=
2,382
DuplicatedKeysError: FAILURE TO GENERATE DATASET ! load_dataset('head_qa', 'en')
{ "avatar_url": "https://avatars.githubusercontent.com/u/75953751?v=4", "events_url": "https://api.github.com/users/helloworld123-lab/events{/privacy}", "followers_url": "https://api.github.com/users/helloworld123-lab/followers", "following_url": "https://api.github.com/users/helloworld123-lab/following{/other_user}", "gists_url": "https://api.github.com/users/helloworld123-lab/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/helloworld123-lab", "id": 75953751, "login": "helloworld123-lab", "node_id": "MDQ6VXNlcjc1OTUzNzUx", "organizations_url": "https://api.github.com/users/helloworld123-lab/orgs", "received_events_url": "https://api.github.com/users/helloworld123-lab/received_events", "repos_url": "https://api.github.com/users/helloworld123-lab/repos", "site_admin": false, "starred_url": "https://api.github.com/users/helloworld123-lab/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/helloworld123-lab/subscriptions", "type": "User", "url": "https://api.github.com/users/helloworld123-lab", "user_view_type": "public" }
[]
closed
false
null
[]
[]
2021-05-19T15:49:48
2021-05-30T13:26:16
2021-05-30T13:26:16
NONE
null
null
null
null
Hello everyone, I try to use head_qa dataset in [https://huggingface.co/datasets/viewer/?dataset=head_qa&config=en](url) ``` !pip install datasets from datasets import load_dataset dataset = load_dataset( 'head_qa', 'en') ``` When I write above load_dataset(.), it throws the following: ``` DuplicatedKeysError Traceback (most recent call last) <ipython-input-6-ea87002d32f0> in <module>() 2 from datasets import load_dataset 3 dataset = load_dataset( ----> 4 'head_qa', 'en') 5 frames /usr/local/lib/python3.7/dist-packages/datasets/arrow_writer.py in check_duplicate_keys(self) 347 for hash, key in self.hkey_record: 348 if hash in tmp_record: --> 349 raise DuplicatedKeysError(key) 350 else: 351 tmp_record.add(hash) DuplicatedKeysError: FAILURE TO GENERATE DATASET ! Found duplicate Key: 1 Keys should be unique and deterministic in nature ``` How can I fix the error? Thanks
{ "avatar_url": "https://avatars.githubusercontent.com/u/75953751?v=4", "events_url": "https://api.github.com/users/helloworld123-lab/events{/privacy}", "followers_url": "https://api.github.com/users/helloworld123-lab/followers", "following_url": "https://api.github.com/users/helloworld123-lab/following{/other_user}", "gists_url": "https://api.github.com/users/helloworld123-lab/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/helloworld123-lab", "id": 75953751, "login": "helloworld123-lab", "node_id": "MDQ6VXNlcjc1OTUzNzUx", "organizations_url": "https://api.github.com/users/helloworld123-lab/orgs", "received_events_url": "https://api.github.com/users/helloworld123-lab/received_events", "repos_url": "https://api.github.com/users/helloworld123-lab/repos", "site_admin": false, "starred_url": "https://api.github.com/users/helloworld123-lab/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/helloworld123-lab/subscriptions", "type": "User", "url": "https://api.github.com/users/helloworld123-lab", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2382/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2382/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
10 days, 21:36:28
https://api.github.com/repos/huggingface/datasets/issues/2378
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2378/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2378/comments
https://api.github.com/repos/huggingface/datasets/issues/2378/events
https://github.com/huggingface/datasets/issues/2378
895,131,774
MDU6SXNzdWU4OTUxMzE3NzQ=
2,378
Add missing dataset_infos.json files
{ "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lewtun", "id": 26859204, "login": "lewtun", "node_id": "MDQ6VXNlcjI2ODU5MjA0", "organizations_url": "https://api.github.com/users/lewtun/orgs", "received_events_url": "https://api.github.com/users/lewtun/received_events", "repos_url": "https://api.github.com/users/lewtun/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "type": "User", "url": "https://api.github.com/users/lewtun", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lewtun", "id": 26859204, "login": "lewtun", "node_id": "MDQ6VXNlcjI2ODU5MjA0", "organizations_url": "https://api.github.com/users/lewtun/orgs", "received_events_url": "https://api.github.com/users/lewtun/received_events", "repos_url": "https://api.github.com/users/lewtun/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "type": "User", "url": "https://api.github.com/users/lewtun", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lewtun", "id": 26859204, "login": "lewtun", "node_id": "MDQ6VXNlcjI2ODU5MjA0", "organizations_url": "https://api.github.com/users/lewtun/orgs", "received_events_url": "https://api.github.com/users/lewtun/received_events", "repos_url": "https://api.github.com/users/lewtun/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "type": "User", "url": "https://api.github.com/users/lewtun", "user_view_type": "public" } ]
[]
2021-05-19T08:11:12
2021-05-19T08:11:12
null
MEMBER
null
null
null
null
Some of the datasets in `datasets` are missing a `dataset_infos.json` file, e.g. ``` [PosixPath('datasets/chr_en/chr_en.py'), PosixPath('datasets/chr_en/README.md')] [PosixPath('datasets/telugu_books/README.md'), PosixPath('datasets/telugu_books/telugu_books.py')] [PosixPath('datasets/reclor/README.md'), PosixPath('datasets/reclor/reclor.py')] [PosixPath('datasets/json/README.md')] [PosixPath('datasets/csv/README.md')] [PosixPath('datasets/wikihow/wikihow.py'), PosixPath('datasets/wikihow/README.md')] [PosixPath('datasets/c4/c4.py'), PosixPath('datasets/c4/README.md')] [PosixPath('datasets/text/README.md')] [PosixPath('datasets/lm1b/README.md'), PosixPath('datasets/lm1b/lm1b.py')] [PosixPath('datasets/pandas/README.md')] ``` For `json`, `text`, csv`, and `pandas` this is expected, but not for the others which should be fixed
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2378/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2378/timeline
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
null
https://api.github.com/repos/huggingface/datasets/issues/2377
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2377/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2377/comments
https://api.github.com/repos/huggingface/datasets/issues/2377/events
https://github.com/huggingface/datasets/issues/2377
894,918,927
MDU6SXNzdWU4OTQ5MTg5Mjc=
2,377
ArrowDataset.save_to_disk produces files that cannot be read using pyarrow.feather
{ "avatar_url": "https://avatars.githubusercontent.com/u/1829149?v=4", "events_url": "https://api.github.com/users/Ark-kun/events{/privacy}", "followers_url": "https://api.github.com/users/Ark-kun/followers", "following_url": "https://api.github.com/users/Ark-kun/following{/other_user}", "gists_url": "https://api.github.com/users/Ark-kun/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Ark-kun", "id": 1829149, "login": "Ark-kun", "node_id": "MDQ6VXNlcjE4MjkxNDk=", "organizations_url": "https://api.github.com/users/Ark-kun/orgs", "received_events_url": "https://api.github.com/users/Ark-kun/received_events", "repos_url": "https://api.github.com/users/Ark-kun/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Ark-kun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Ark-kun/subscriptions", "type": "User", "url": "https://api.github.com/users/Ark-kun", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
open
false
null
[]
[ "Hi ! This is because we are actually using the arrow streaming format. We plan to switch to the arrow IPC format.\r\nMore info at #1933 ", "Not sure if this was resolved, but I am getting a similar error when trying to load a dataset.arrow file directly: `ArrowInvalid: Not an Arrow file`", "Since we're using the streaming format, you need to use `open_stream`:\r\n\r\n```python\r\nimport pyarrow as pa\r\n\r\ndef in_memory_arrow_table_from_file(filename: str) -> pa.Table:\r\n in_memory_stream = pa.input_stream(filename)\r\n opened_stream = pa.ipc.open_stream(in_memory_stream)\r\n pa_table = opened_stream.read_all()\r\n return pa_table\r\n\r\ndef memory_mapped_arrow_table_from_file(filename: str) -> pa.Table:\r\n memory_mapped_stream = pa.memory_map(filename)\r\n opened_stream = pa.ipc.open_stream(memory_mapped_stream)\r\n pa_table = opened_stream.read_all()\r\n return pa_table\r\n```", "> 由于我们使用流格式,因此您需要使用`open_stream`:\r\n> \r\n> ```python\r\n> import pyarrow as pa\r\n> \r\n> def in_memory_arrow_table_from_file(filename: str) -> pa.Table:\r\n> in_memory_stream = pa.input_stream(filename)\r\n> opened_stream = pa.ipc.open_stream(in_memory_stream)\r\n> pa_table = opened_stream.read_all()\r\n> return pa_table\r\n> \r\n> def memory_mapped_arrow_table_from_file(filename: str) -> pa.Table:\r\n> memory_mapped_stream = pa.memory_map(filename)\r\n> opened_stream = pa.ipc.open_stream(memory_mapped_stream)\r\n> pa_table = opened_stream.read_all()\r\n> return pa_table\r\n> ```\r\nThank you very much for providing the code that can read arrow file to pa_table and finally to dict, but how to implement the reverse process, how to restore a dict to arrow file?\r\n" ]
2021-05-19T02:04:37
2024-01-18T08:06:15
null
NONE
null
null
null
null
## Describe the bug A clear and concise description of what the bug is. ## Steps to reproduce the bug ```python from datasets import load_dataset from pyarrow import feather dataset = load_dataset('imdb', split='train') dataset.save_to_disk('dataset_dir') table = feather.read_table('dataset_dir/dataset.arrow') ``` ## Expected results I expect that the saved dataset can be read by the official Apache Arrow methods. ## Actual results ``` File "/usr/local/lib/python3.7/site-packages/pyarrow/feather.py", line 236, in read_table reader.open(source, use_memory_map=memory_map) File "pyarrow/feather.pxi", line 67, in pyarrow.lib.FeatherReader.open File "pyarrow/error.pxi", line 123, in pyarrow.lib.pyarrow_internal_check_status File "pyarrow/error.pxi", line 85, in pyarrow.lib.check_status pyarrow.lib.ArrowInvalid: Not a Feather V1 or Arrow IPC file ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: datasets-1.6.2 - Platform: Linux - Python version: 3.7 - PyArrow version: 0.17.1, also 2.0.0
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2377/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2377/timeline
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
null
https://api.github.com/repos/huggingface/datasets/issues/2373
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2373/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2373/comments
https://api.github.com/repos/huggingface/datasets/issues/2373/events
https://github.com/huggingface/datasets/issues/2373
894,499,909
MDU6SXNzdWU4OTQ0OTk5MDk=
2,373
Loading dataset from local path
{ "avatar_url": "https://avatars.githubusercontent.com/u/34172905?v=4", "events_url": "https://api.github.com/users/kolakows/events{/privacy}", "followers_url": "https://api.github.com/users/kolakows/followers", "following_url": "https://api.github.com/users/kolakows/following{/other_user}", "gists_url": "https://api.github.com/users/kolakows/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/kolakows", "id": 34172905, "login": "kolakows", "node_id": "MDQ6VXNlcjM0MTcyOTA1", "organizations_url": "https://api.github.com/users/kolakows/orgs", "received_events_url": "https://api.github.com/users/kolakows/received_events", "repos_url": "https://api.github.com/users/kolakows/repos", "site_admin": false, "starred_url": "https://api.github.com/users/kolakows/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/kolakows/subscriptions", "type": "User", "url": "https://api.github.com/users/kolakows", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Version below works, checked again in the docs, and data_files should be a path.\r\n```\r\nds = datasets.load_dataset('my_script.py', \r\n data_files='/data/dir/corpus.txt', \r\n cache_dir='.')\r\n```" ]
2021-05-18T15:20:50
2021-05-18T15:36:36
2021-05-18T15:36:35
NONE
null
null
null
null
I'm trying to load a local dataset with the code below ``` ds = datasets.load_dataset('my_script.py', data_files='corpus.txt', data_dir='/data/dir', cache_dir='.') ``` But internally a BuilderConfig is created, which tries to use getmtime on the data_files string, without using data_dir. Is this a bug or am I not using the load_dataset correctly? https://github.com/huggingface/datasets/blob/bc61954083f74e6460688202e9f77dde2475319c/src/datasets/builder.py#L153
{ "avatar_url": "https://avatars.githubusercontent.com/u/34172905?v=4", "events_url": "https://api.github.com/users/kolakows/events{/privacy}", "followers_url": "https://api.github.com/users/kolakows/followers", "following_url": "https://api.github.com/users/kolakows/following{/other_user}", "gists_url": "https://api.github.com/users/kolakows/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/kolakows", "id": 34172905, "login": "kolakows", "node_id": "MDQ6VXNlcjM0MTcyOTA1", "organizations_url": "https://api.github.com/users/kolakows/orgs", "received_events_url": "https://api.github.com/users/kolakows/received_events", "repos_url": "https://api.github.com/users/kolakows/repos", "site_admin": false, "starred_url": "https://api.github.com/users/kolakows/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/kolakows/subscriptions", "type": "User", "url": "https://api.github.com/users/kolakows", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2373/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2373/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
0:15:45
https://api.github.com/repos/huggingface/datasets/issues/2371
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2371/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2371/comments
https://api.github.com/repos/huggingface/datasets/issues/2371/events
https://github.com/huggingface/datasets/issues/2371
894,193,403
MDU6SXNzdWU4OTQxOTM0MDM=
2,371
Align question answering tasks with sub-domains
{ "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lewtun", "id": 26859204, "login": "lewtun", "node_id": "MDQ6VXNlcjI2ODU5MjA0", "organizations_url": "https://api.github.com/users/lewtun/orgs", "received_events_url": "https://api.github.com/users/lewtun/received_events", "repos_url": "https://api.github.com/users/lewtun/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "type": "User", "url": "https://api.github.com/users/lewtun", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lewtun", "id": 26859204, "login": "lewtun", "node_id": "MDQ6VXNlcjI2ODU5MjA0", "organizations_url": "https://api.github.com/users/lewtun/orgs", "received_events_url": "https://api.github.com/users/lewtun/received_events", "repos_url": "https://api.github.com/users/lewtun/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "type": "User", "url": "https://api.github.com/users/lewtun", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lewtun", "id": 26859204, "login": "lewtun", "node_id": "MDQ6VXNlcjI2ODU5MjA0", "organizations_url": "https://api.github.com/users/lewtun/orgs", "received_events_url": "https://api.github.com/users/lewtun/received_events", "repos_url": "https://api.github.com/users/lewtun/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "type": "User", "url": "https://api.github.com/users/lewtun", "user_view_type": "public" } ]
[ "Closing this issue as the `task_templates` API has been deprecated." ]
2021-05-18T09:47:59
2023-07-25T16:52:05
2023-07-25T16:52:04
MEMBER
null
null
null
null
As pointed out by @thomwolf in #2255 we should consider breaking with the pipeline taxonomy of `transformers` to account for the various types of question-answering domains: > `question-answering` exists in two forms: abstractive and extractive question answering. > > we can keep a generic `question-answering` but then it will probably mean diferrent schema of input/output for both (abstractive will have text for both while extractive can use spans indication as well as text). > > Or we can also propose to use `abstractive-question-answering` and `extractive-question-answering` for instance. > Maybe we could have `question-answering-abstractive` and `question-answering-extractive` if somehow we can use a for a completion or search in the future (detail). > Actually I see that people are more organizing in terms of general and sub-tasks, for instance on paperwithcode: https://paperswithcode.com/area/natural-language-processing and on nlpprogress: https://github.com/sebastianruder/NLP-progress/blob/master/english/question_answering.md#squad > > Probably the best is to align with one of these in terms of denomination, PaperWithCode is probably the most active and maintained and we work with them as well. > Maybe you want to check with a few QA datasets that this schema make sense. Typically NaturalQuestions, TriviaQA and can be good second datasets to compare to and be sure of the generality of the schema. > > A good recent list of QA datasets to compare the schemas among, is for instance in the UnitedQA paper: https://arxiv.org/abs/2101.00178 Investigate which grouping of QA is best suited for `datasets` and adapt / extend the QA task template accordingly.
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2371/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2371/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
798 days, 7:04:05
https://api.github.com/repos/huggingface/datasets/issues/2366
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2366/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2366/comments
https://api.github.com/repos/huggingface/datasets/issues/2366/events
https://github.com/huggingface/datasets/issues/2366
893,185,266
MDU6SXNzdWU4OTMxODUyNjY=
2,366
Json loader fails if user-specified features don't match the json data fields order
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" } ]
[]
2021-05-17T10:26:08
2021-06-16T10:47:49
2021-06-16T10:47:49
MEMBER
null
null
null
null
If you do ```python dataset = load_dataset("json", data_files=data_files, features=features) ``` Then depending on the order of the features in the json data field it fails: ```python [...] ~/Desktop/hf/datasets/src/datasets/packaged_modules/json/json.py in _generate_tables(self, files) 94 if self.config.schema: 95 # Cast allows str <-> int/float, while parse_option explicit_schema does NOT ---> 96 pa_table = pa_table.cast(self.config.schema) 97 yield i, pa_table [...] ValueError: Target schema's field names are not matching the table's field names: ['tokens', 'ner_tags'], ['ner_tags', 'tokens'] ``` This is because one must first re-order the columns of the table to match the `self.config.schema` before calling cast. One way to fix the `cast` would be to replace it with: ```python # reorder the arrays if necessary + cast to schema # we can't simply use .cast here because we may need to change the order of the columns pa_table = pa.Table.from_arrays([pa_table[name] for name in schema.names], schema=schema) ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2366/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2366/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
30 days, 0:21:41
https://api.github.com/repos/huggingface/datasets/issues/2365
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2365/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2365/comments
https://api.github.com/repos/huggingface/datasets/issues/2365/events
https://github.com/huggingface/datasets/issues/2365
893,179,697
MDU6SXNzdWU4OTMxNzk2OTc=
2,365
Missing ClassLabel encoding in Json loader
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" } ]
[]
2021-05-17T10:19:10
2021-06-28T15:05:34
2021-06-28T15:05:34
MEMBER
null
null
null
null
Currently if you want to load a json dataset this way ```python dataset = load_dataset("json", data_files=data_files, features=features) ``` Then if your features has ClassLabel types and if your json data needs class label encoding (i.e. if the labels in the json files are strings and not integers), then it would fail: ```python [...] ~/Desktop/hf/datasets/src/datasets/packaged_modules/json/json.py in _generate_tables(self, files) 94 if self.config.schema: 95 # Cast allows str <-> int/float, while parse_option explicit_schema does NOT ---> 96 pa_table = pa_table.cast(self.config.schema) 97 yield i, pa_table [...] ArrowInvalid: Failed to parse string: 'O' as a scalar of type int64 ``` This is because it just tries to cast the string data to integers, without applying the mapping str->int first The current workaround is to do instead ```python dataset = load_dataset("json", data_files=data_files) dataset = dataset.map(features.encode_example, features=features) ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 2, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 2, "url": "https://api.github.com/repos/huggingface/datasets/issues/2365/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2365/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
42 days, 4:46:24
https://api.github.com/repos/huggingface/datasets/issues/2360
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2360/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2360/comments
https://api.github.com/repos/huggingface/datasets/issues/2360/events
https://github.com/huggingface/datasets/issues/2360
891,965,964
MDU6SXNzdWU4OTE5NjU5NjQ=
2,360
Automatically detect datasets with compatible task schemas
{ "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lewtun", "id": 26859204, "login": "lewtun", "node_id": "MDQ6VXNlcjI2ODU5MjA0", "organizations_url": "https://api.github.com/users/lewtun/orgs", "received_events_url": "https://api.github.com/users/lewtun/received_events", "repos_url": "https://api.github.com/users/lewtun/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "type": "User", "url": "https://api.github.com/users/lewtun", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lewtun", "id": 26859204, "login": "lewtun", "node_id": "MDQ6VXNlcjI2ODU5MjA0", "organizations_url": "https://api.github.com/users/lewtun/orgs", "received_events_url": "https://api.github.com/users/lewtun/received_events", "repos_url": "https://api.github.com/users/lewtun/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "type": "User", "url": "https://api.github.com/users/lewtun", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lewtun", "id": 26859204, "login": "lewtun", "node_id": "MDQ6VXNlcjI2ODU5MjA0", "organizations_url": "https://api.github.com/users/lewtun/orgs", "received_events_url": "https://api.github.com/users/lewtun/received_events", "repos_url": "https://api.github.com/users/lewtun/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "type": "User", "url": "https://api.github.com/users/lewtun", "user_view_type": "public" } ]
[]
2021-05-14T14:23:40
2021-05-14T14:23:40
null
MEMBER
null
null
null
null
See description of #2255 for details.
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2360/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2360/timeline
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
null
https://api.github.com/repos/huggingface/datasets/issues/2359
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2359/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2359/comments
https://api.github.com/repos/huggingface/datasets/issues/2359/events
https://github.com/huggingface/datasets/issues/2359
891,946,017
MDU6SXNzdWU4OTE5NDYwMTc=
2,359
Allow model labels to be passed during task preparation
{ "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lewtun", "id": 26859204, "login": "lewtun", "node_id": "MDQ6VXNlcjI2ODU5MjA0", "organizations_url": "https://api.github.com/users/lewtun/orgs", "received_events_url": "https://api.github.com/users/lewtun/received_events", "repos_url": "https://api.github.com/users/lewtun/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "type": "User", "url": "https://api.github.com/users/lewtun", "user_view_type": "public" }
[]
closed
false
null
[]
[ "We now have the `align_labels_with_mapping` method in the API for this purpose." ]
2021-05-14T13:58:28
2022-10-05T17:37:22
2022-10-05T17:37:22
MEMBER
null
null
null
null
Models have a config with label2id. And we have the same for datasets with the ClassLabel feature type. At one point either the model or the dataset must sync with the other. It would be great to do that on the dataset side. For example for sentiment classification on amazon reviews with you could have these labels: - "1 star", "2 stars", "3 stars", "4 stars", "5 stars" - "1", "2", "3", "4", "5" Some models may use the first set, while other models use the second set. Here in the `TextClassification` class, the user can only specify one set of labels, while many models could actually be compatible but have different sets of labels. Should we allow users to pass a list of compatible labels sets ? Then in terms of API, users could use `dataset.prepare_for_task("text-classification", labels=model.labels)` or something like that. The label set could also be the same but not in the same order. For NLI for example, some models use `["neutral", "entailment", "contradiction"]` and some others use `["neutral", "contradiction", "entailment"]`, so we should take care of updating the order of the labels in the dataset to match the labels order of the model. Let me know what you think ! This can be done in a future PR _Originally posted by @lhoestq in https://github.com/huggingface/datasets/pull/2255#discussion_r632412792_
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2359/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2359/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
509 days, 3:38:54
https://api.github.com/repos/huggingface/datasets/issues/2354
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2354/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2354/comments
https://api.github.com/repos/huggingface/datasets/issues/2354/events
https://github.com/huggingface/datasets/issues/2354
890,439,523
MDU6SXNzdWU4OTA0Mzk1MjM=
2,354
Document DatasetInfo attributes
{ "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lewtun", "id": 26859204, "login": "lewtun", "node_id": "MDQ6VXNlcjI2ODU5MjA0", "organizations_url": "https://api.github.com/users/lewtun/orgs", "received_events_url": "https://api.github.com/users/lewtun/received_events", "repos_url": "https://api.github.com/users/lewtun/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "type": "User", "url": "https://api.github.com/users/lewtun", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lewtun", "id": 26859204, "login": "lewtun", "node_id": "MDQ6VXNlcjI2ODU5MjA0", "organizations_url": "https://api.github.com/users/lewtun/orgs", "received_events_url": "https://api.github.com/users/lewtun/received_events", "repos_url": "https://api.github.com/users/lewtun/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "type": "User", "url": "https://api.github.com/users/lewtun", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lewtun", "id": 26859204, "login": "lewtun", "node_id": "MDQ6VXNlcjI2ODU5MjA0", "organizations_url": "https://api.github.com/users/lewtun/orgs", "received_events_url": "https://api.github.com/users/lewtun/received_events", "repos_url": "https://api.github.com/users/lewtun/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "type": "User", "url": "https://api.github.com/users/lewtun", "user_view_type": "public" } ]
[]
2021-05-12T20:01:29
2021-05-22T09:26:14
2021-05-22T09:26:14
MEMBER
null
null
null
null
**Is your feature request related to a problem? Please describe.** As noted in PR #2255, the attributes of `DatasetInfo` are not documented in the [docs](https://huggingface.co/docs/datasets/package_reference/main_classes.html?highlight=datasetinfo#datasetinfo). It would be nice to do so :)
{ "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lewtun", "id": 26859204, "login": "lewtun", "node_id": "MDQ6VXNlcjI2ODU5MjA0", "organizations_url": "https://api.github.com/users/lewtun/orgs", "received_events_url": "https://api.github.com/users/lewtun/received_events", "repos_url": "https://api.github.com/users/lewtun/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "type": "User", "url": "https://api.github.com/users/lewtun", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2354/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2354/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
9 days, 13:24:45
https://api.github.com/repos/huggingface/datasets/issues/2350
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2350/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2350/comments
https://api.github.com/repos/huggingface/datasets/issues/2350/events
https://github.com/huggingface/datasets/issues/2350
889,580,247
MDU6SXNzdWU4ODk1ODAyNDc=
2,350
`FaissIndex.save` throws error on GPU
{ "avatar_url": "https://avatars.githubusercontent.com/u/2821124?v=4", "events_url": "https://api.github.com/users/Guitaricet/events{/privacy}", "followers_url": "https://api.github.com/users/Guitaricet/followers", "following_url": "https://api.github.com/users/Guitaricet/following{/other_user}", "gists_url": "https://api.github.com/users/Guitaricet/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Guitaricet", "id": 2821124, "login": "Guitaricet", "node_id": "MDQ6VXNlcjI4MjExMjQ=", "organizations_url": "https://api.github.com/users/Guitaricet/orgs", "received_events_url": "https://api.github.com/users/Guitaricet/received_events", "repos_url": "https://api.github.com/users/Guitaricet/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Guitaricet/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Guitaricet/subscriptions", "type": "User", "url": "https://api.github.com/users/Guitaricet", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
[ "Just in case, this is a workaround that I use in my code and it seems to do the job.\r\n\r\n```python\r\nif use_gpu_index:\r\n data[\"train\"]._indexes[\"text_emb\"].faiss_index = faiss.index_gpu_to_cpu(data[\"train\"]._indexes[\"text_emb\"].faiss_index)\r\n```" ]
2021-05-12T03:41:56
2021-05-17T13:41:41
2021-05-17T13:41:41
CONTRIBUTOR
null
null
null
null
## Describe the bug After training an index with a factory string `OPQ16_128,IVF512,PQ32` on GPU, `.save_faiss_index` throws this error. ``` File "index_wikipedia.py", line 119, in <module> data["train"].save_faiss_index("text_emb", index_save_path) File "/home/vlialin/miniconda3/envs/cat/lib/python3.8/site-packages/datasets/search.py", line 470, in save_faiss_index index.save(file) File "/home/vlialin/miniconda3/envs/cat/lib/python3.8/site-packages/datasets/search.py", line 334, in save faiss.write_index(index, str(file)) File "/home/vlialin/miniconda3/envs/cat/lib/python3.8/site-packages/faiss/swigfaiss_avx2.py", line 5654, in write_index return _swigfaiss.write_index(*args) RuntimeError: Error in void faiss::write_index(const faiss::Index*, faiss::IOWriter*) at /root/miniconda3/conda-bld/faiss-pkg_1613235005464/work/faiss/impl/index_write.cpp:453: don't know how to serialize this type of index ``` ## Steps to reproduce the bug Any dataset will do, I just selected a familiar one. ```python import numpy as np import datasets INDEX_STR = "OPQ16_128,IVF512,PQ32" INDEX_SAVE_PATH = "will_not_save.faiss" data = datasets.load_dataset("Fraser/news-category-dataset", split=f"train[:10000]") def encode(item): return {"text_emb": np.random.randn(768).astype(np.float32)} data = data.map(encode) data.add_faiss_index(column="text_emb", string_factory=INDEX_STR, train_size=10_000, device=0) data.save_faiss_index("text_emb", INDEX_SAVE_PATH) ``` ## Expected results Saving the index ## Actual results Error in void faiss::write_index(const faiss::Index*, faiss::IOWriter*) ... don't know how to serialize this type of index ## Environment info - `datasets` version: 1.6.2 - Platform: Linux-4.15.0-142-generic-x86_64-with-glibc2.10 - Python version: 3.8.8 - PyTorch version (GPU?): 1.8.1+cu111 (True) - Tensorflow version (GPU?): 2.2.0 (False) - Using GPU in script?: Yes - Using distributed or parallel set-up in script?: No I will be proposing a fix in a couple of minutes
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2350/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2350/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
5 days, 9:59:45
https://api.github.com/repos/huggingface/datasets/issues/2347
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2347/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2347/comments
https://api.github.com/repos/huggingface/datasets/issues/2347/events
https://github.com/huggingface/datasets/issues/2347
887,404,868
MDU6SXNzdWU4ODc0MDQ4Njg=
2,347
Add an API to access the language and pretty name of a dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/35901082?v=4", "events_url": "https://api.github.com/users/sgugger/events{/privacy}", "followers_url": "https://api.github.com/users/sgugger/followers", "following_url": "https://api.github.com/users/sgugger/following{/other_user}", "gists_url": "https://api.github.com/users/sgugger/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/sgugger", "id": 35901082, "login": "sgugger", "node_id": "MDQ6VXNlcjM1OTAxMDgy", "organizations_url": "https://api.github.com/users/sgugger/orgs", "received_events_url": "https://api.github.com/users/sgugger/received_events", "repos_url": "https://api.github.com/users/sgugger/repos", "site_admin": false, "starred_url": "https://api.github.com/users/sgugger/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sgugger/subscriptions", "type": "User", "url": "https://api.github.com/users/sgugger", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
closed
false
null
[]
[ "Hi ! With @bhavitvyamalik we discussed about having something like\r\n```python\r\nfrom datasets import load_dataset_card\r\n\r\ndataset_card = load_dataset_card(\"squad\")\r\nprint(dataset_card.metadata.pretty_name)\r\n# Stanford Question Answering Dataset (SQuAD)\r\nprint(dataset_card.metadata.languages)\r\n# [\"en\"]\r\n\r\n```\r\nWhat do you think ?\r\n\r\nI don't know if you already have a way to load the model tags in `transformers` but we can agree on the API to have something consistent.\r\n\r\nAlso note that the pretty name would only be used to show users something prettier than a dataset id, but in the end the source of truth will stay the dataset id (here `squad`).", "That works for me!", "maybe use the hub-backed dataset_info method? (so there's only one parser of README.md metadata)?", "What dataset_info method are you talking about @julien-c ? In `huggingface_hub` I can only see `model_info`.", "hmm the equivalent method in `datasets` (which could go into `huggingface_hub` at some point)", "Indeed, this info can now be fetched with `huggingface_hub.dataset_info`, so I think we can close this issue." ]
2021-05-11T14:10:08
2022-10-05T17:16:54
2022-10-05T17:16:53
CONTRIBUTOR
null
null
null
null
It would be super nice to have an API to get some metadata of the dataset from the name and args passed to `load_dataset`. This way we could programmatically infer the language and the name of a dataset when creating model cards automatically in the Transformers examples scripts.
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 2, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 2, "url": "https://api.github.com/repos/huggingface/datasets/issues/2347/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2347/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
512 days, 3:06:45
https://api.github.com/repos/huggingface/datasets/issues/2345
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2345/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2345/comments
https://api.github.com/repos/huggingface/datasets/issues/2345/events
https://github.com/huggingface/datasets/issues/2345
886,586,872
MDU6SXNzdWU4ODY1ODY4NzI=
2,345
[Question] How to move and reuse preprocessed dataset?
{ "avatar_url": "https://avatars.githubusercontent.com/u/15045402?v=4", "events_url": "https://api.github.com/users/AtmaHou/events{/privacy}", "followers_url": "https://api.github.com/users/AtmaHou/followers", "following_url": "https://api.github.com/users/AtmaHou/following{/other_user}", "gists_url": "https://api.github.com/users/AtmaHou/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/AtmaHou", "id": 15045402, "login": "AtmaHou", "node_id": "MDQ6VXNlcjE1MDQ1NDAy", "organizations_url": "https://api.github.com/users/AtmaHou/orgs", "received_events_url": "https://api.github.com/users/AtmaHou/received_events", "repos_url": "https://api.github.com/users/AtmaHou/repos", "site_admin": false, "starred_url": "https://api.github.com/users/AtmaHou/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/AtmaHou/subscriptions", "type": "User", "url": "https://api.github.com/users/AtmaHou", "user_view_type": "public" }
[]
closed
false
null
[]
[ "@lhoestq @LysandreJik", "<s>Hi :) Can you share with us the code you used ?</s>\r\n\r\nEDIT: from https://github.com/huggingface/transformers/issues/11665#issuecomment-838348291 I understand you're using the run_clm.py script. Can you share your logs ?\r\n", "Also note that for the caching to work, you must reuse the exact same parameters as in the first run. Did you change any parameter ? The `preprocessing_num_workers` should also stay the same", "> Also note that for the caching to work, you must reuse the exact same parameters as in the first run. Did you change any parameter ? The `preprocessing_num_workers` should also stay the same\r\n\r\nI only changed the `preprocessing_num_workers` maybe it is the problem~ I will try again~" ]
2021-05-11T09:09:17
2021-06-11T04:39:11
2021-06-11T04:39:11
NONE
null
null
null
null
Hi, I am training a gpt-2 from scratch using run_clm.py. I want to move and reuse the preprocessed dataset (It take 2 hour to preprocess), I tried to : copy path_to_cache_dir/datasets to new_cache_dir/datasets set export HF_DATASETS_CACHE="new_cache_dir/" but the program still re-preprocess the whole dataset without loading cache. I also tried to torch.save(lm_datasets, fw), but the saved file is only 14M. What is the proper way to do this?
{ "avatar_url": "https://avatars.githubusercontent.com/u/15045402?v=4", "events_url": "https://api.github.com/users/AtmaHou/events{/privacy}", "followers_url": "https://api.github.com/users/AtmaHou/followers", "following_url": "https://api.github.com/users/AtmaHou/following{/other_user}", "gists_url": "https://api.github.com/users/AtmaHou/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/AtmaHou", "id": 15045402, "login": "AtmaHou", "node_id": "MDQ6VXNlcjE1MDQ1NDAy", "organizations_url": "https://api.github.com/users/AtmaHou/orgs", "received_events_url": "https://api.github.com/users/AtmaHou/received_events", "repos_url": "https://api.github.com/users/AtmaHou/repos", "site_admin": false, "starred_url": "https://api.github.com/users/AtmaHou/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/AtmaHou/subscriptions", "type": "User", "url": "https://api.github.com/users/AtmaHou", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2345/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2345/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
30 days, 19:29:54
https://api.github.com/repos/huggingface/datasets/issues/2344
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2344/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2344/comments
https://api.github.com/repos/huggingface/datasets/issues/2344/events
https://github.com/huggingface/datasets/issues/2344
885,331,505
MDU6SXNzdWU4ODUzMzE1MDU=
2,344
Is there a way to join multiple datasets in one?
{ "avatar_url": "https://avatars.githubusercontent.com/u/35173563?v=4", "events_url": "https://api.github.com/users/avacaondata/events{/privacy}", "followers_url": "https://api.github.com/users/avacaondata/followers", "following_url": "https://api.github.com/users/avacaondata/following{/other_user}", "gists_url": "https://api.github.com/users/avacaondata/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/avacaondata", "id": 35173563, "login": "avacaondata", "node_id": "MDQ6VXNlcjM1MTczNTYz", "organizations_url": "https://api.github.com/users/avacaondata/orgs", "received_events_url": "https://api.github.com/users/avacaondata/received_events", "repos_url": "https://api.github.com/users/avacaondata/repos", "site_admin": false, "starred_url": "https://api.github.com/users/avacaondata/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/avacaondata/subscriptions", "type": "User", "url": "https://api.github.com/users/avacaondata", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
null
[]
[ "Hi ! We don't have `join`/`merge` on a certain column as in pandas.\r\nMaybe you can just use the [concatenate_datasets](https://huggingface.co/docs/datasets/package_reference/main_classes.html?highlight=concatenate#datasets.concatenate_datasets) function.\r\n", "Hi! You can use `datasets_sql` for that now. As of recently, PyArrow also supports querying tables via Substrait, so I think we can start adding these methods to the API soon." ]
2021-05-10T23:16:10
2022-10-05T17:27:05
null
NONE
null
null
null
null
**Is your feature request related to a problem? Please describe.** I need to join 2 datasets, one that is in the hub and another I've created from my files. Is there an easy way to join these 2? **Describe the solution you'd like** Id like to join them with a merge or join method, just like pandas dataframes. **Additional context** If you want to extend an existing dataset with more data, for example for training a language model, you need that functionality. I've not found it in the documentation.
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2344/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2344/timeline
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
null
https://api.github.com/repos/huggingface/datasets/issues/2343
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2343/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2343/comments
https://api.github.com/repos/huggingface/datasets/issues/2343/events
https://github.com/huggingface/datasets/issues/2343
883,208,539
MDU6SXNzdWU4ODMyMDg1Mzk=
2,343
Columns are removed before or after map function applied?
{ "avatar_url": "https://avatars.githubusercontent.com/u/8199406?v=4", "events_url": "https://api.github.com/users/taghizad3h/events{/privacy}", "followers_url": "https://api.github.com/users/taghizad3h/followers", "following_url": "https://api.github.com/users/taghizad3h/following{/other_user}", "gists_url": "https://api.github.com/users/taghizad3h/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/taghizad3h", "id": 8199406, "login": "taghizad3h", "node_id": "MDQ6VXNlcjgxOTk0MDY=", "organizations_url": "https://api.github.com/users/taghizad3h/orgs", "received_events_url": "https://api.github.com/users/taghizad3h/received_events", "repos_url": "https://api.github.com/users/taghizad3h/repos", "site_admin": false, "starred_url": "https://api.github.com/users/taghizad3h/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/taghizad3h/subscriptions", "type": "User", "url": "https://api.github.com/users/taghizad3h", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
open
false
null
[]
[ "Hi! Columns are removed **after** applying the function and **before** updating the examples with the function's output (as per the docs [here](https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Dataset.map.remove_columns)). I agree the docs on this should be more clear." ]
2021-05-10T02:36:20
2022-10-24T11:31:55
null
NONE
null
null
null
null
## Describe the bug According to the documentation when applying map function the [remove_columns ](https://huggingface.co/docs/datasets/processing.html#removing-columns) will be removed after they are passed to the function, but in the [source code](https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map) it's documented that they are removed before applying function. I thinks the source code doc is more accurate, right?
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2343/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2343/timeline
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
null
https://api.github.com/repos/huggingface/datasets/issues/2337
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2337/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2337/comments
https://api.github.com/repos/huggingface/datasets/issues/2337/events
https://github.com/huggingface/datasets/issues/2337
881,610,567
MDU6SXNzdWU4ODE2MTA1Njc=
2,337
NonMatchingChecksumError for web_of_science dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/24982805?v=4", "events_url": "https://api.github.com/users/nbroad1881/events{/privacy}", "followers_url": "https://api.github.com/users/nbroad1881/followers", "following_url": "https://api.github.com/users/nbroad1881/following{/other_user}", "gists_url": "https://api.github.com/users/nbroad1881/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/nbroad1881", "id": 24982805, "login": "nbroad1881", "node_id": "MDQ6VXNlcjI0OTgyODA1", "organizations_url": "https://api.github.com/users/nbroad1881/orgs", "received_events_url": "https://api.github.com/users/nbroad1881/received_events", "repos_url": "https://api.github.com/users/nbroad1881/repos", "site_admin": false, "starred_url": "https://api.github.com/users/nbroad1881/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/nbroad1881/subscriptions", "type": "User", "url": "https://api.github.com/users/nbroad1881", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
[ "I've raised a PR for this. Should work with `dataset = load_dataset(\"web_of_science\", \"WOS11967\", ignore_verifications=True)`once it gets merged into the main branch. Thanks for reporting this! " ]
2021-05-09T02:02:02
2021-05-10T13:35:53
2021-05-10T13:35:53
NONE
null
null
null
null
NonMatchingChecksumError when trying to download the web_of_science dataset. >NonMatchingChecksumError: Checksums didn't match for dataset source files: ['https://data.mendeley.com/datasets/9rw3vkcfy4/6/files/c9ea673d-5542-44c0-ab7b-f1311f7d61df/WebOfScience.zip?dl=1'] Setting `ignore_verfications=True` results in OSError. >OSError: Cannot find data file. Original error: [Errno 20] Not a directory: '/root/.cache/huggingface/datasets/downloads/37ab2c42f50d553c1d0ea432baca3e9e11fedea4aeec63a81e6b7e25dd10d4e7/WOS5736/X.txt' ```python dataset = load_dataset('web_of_science', 'WOS5736') ``` There are 3 data instances and they all don't work. 'WOS5736', 'WOS11967', 'WOS46985' datasets 1.6.2 python 3.7.10 Ubuntu 18.04.5 LTS
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2337/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2337/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
1 day, 11:33:51
https://api.github.com/repos/huggingface/datasets/issues/2335
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2335/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2335/comments
https://api.github.com/repos/huggingface/datasets/issues/2335/events
https://github.com/huggingface/datasets/issues/2335
881,291,887
MDU6SXNzdWU4ODEyOTE4ODc=
2,335
Index error in Dataset.map
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
[]
2021-05-08T20:44:57
2021-05-10T13:26:12
2021-05-10T13:26:12
COLLABORATOR
null
null
null
null
The following code, if executed on master, raises an IndexError (due to overflow): ```python >>> from datasets import * >>> d = load_dataset("bookcorpus", split="train") Reusing dataset bookcorpus (C:\Users\Mario\.cache\huggingface\datasets\bookcorpus\plain_text\1.0.0\44662c4a114441c35200992bea923b170e6f13f2f0beb7c14e43759cec498700) 2021-05-08 21:23:46.859818: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library cudart64_101.dll >>> d.map(lambda ex: ex) 0%|▎ | 289430/74004228 [00:13<58:41, 20935.33ex/s]c:\users\mario\desktop\projects\datasets-1\src\datasets\table.py:84: RuntimeWarning: overflow encountered in int_scalars k = i + ((j - i) * (x - arr[i]) // (arr[j] - arr[i])) 0%|▎ | 290162/74004228 [00:13<59:11, 20757.23ex/s] Traceback (most recent call last): File "<stdin>", line 1, in <module> File "c:\users\mario\desktop\projects\datasets-1\src\datasets\arrow_dataset.py", line 1498, in map new_fingerprint=new_fingerprint, File "c:\users\mario\desktop\projects\datasets-1\src\datasets\arrow_dataset.py", line 174, in wrapper out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) File "c:\users\mario\desktop\projects\datasets-1\src\datasets\fingerprint.py", line 340, in wrapper out = func(self, *args, **kwargs) File "c:\users\mario\desktop\projects\datasets-1\src\datasets\arrow_dataset.py", line 1799, in _map_single for i, example in enumerate(pbar): File "C:\Users\Mario\Anaconda3\envs\hf-datasets\lib\site-packages\tqdm\std.py", line 1133, in __iter__ for obj in iterable: File "c:\users\mario\desktop\projects\datasets-1\src\datasets\arrow_dataset.py", line 1145, in __iter__ format_kwargs=format_kwargs, File "c:\users\mario\desktop\projects\datasets-1\src\datasets\arrow_dataset.py", line 1337, in _getitem pa_subtable = query_table(self._data, key, indices=self._indices if self._indices is not None else None) File "c:\users\mario\desktop\projects\datasets-1\src\datasets\formatting\formatting.py", line 368, in query_table pa_subtable = _query_table(table, key) File "c:\users\mario\desktop\projects\datasets-1\src\datasets\formatting\formatting.py", line 79, in _query_table return table.fast_slice(key % table.num_rows, 1) File "c:\users\mario\desktop\projects\datasets-1\src\datasets\table.py", line 128, in fast_slice i = _interpolation_search(self._offsets, offset) File "c:\users\mario\desktop\projects\datasets-1\src\datasets\table.py", line 91, in _interpolation_search raise IndexError(f"Invalid query '{x}' for size {arr[-1] if len(arr) else 'none'}.") IndexError: Invalid query '290162' for size 74004228. ``` Tested on Windows, can run on Linux if needed. EDIT: It seems like for this to happen, the default NumPy dtype has to be np.int32.
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2335/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2335/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
1 day, 16:41:15
https://api.github.com/repos/huggingface/datasets/issues/2331
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2331/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2331/comments
https://api.github.com/repos/huggingface/datasets/issues/2331/events
https://github.com/huggingface/datasets/issues/2331
879,031,427
MDU6SXNzdWU4NzkwMzE0Mjc=
2,331
Add Topical-Chat
{ "avatar_url": "https://avatars.githubusercontent.com/u/22266659?v=4", "events_url": "https://api.github.com/users/ktangri/events{/privacy}", "followers_url": "https://api.github.com/users/ktangri/followers", "following_url": "https://api.github.com/users/ktangri/following{/other_user}", "gists_url": "https://api.github.com/users/ktangri/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ktangri", "id": 22266659, "login": "ktangri", "node_id": "MDQ6VXNlcjIyMjY2NjU5", "organizations_url": "https://api.github.com/users/ktangri/orgs", "received_events_url": "https://api.github.com/users/ktangri/received_events", "repos_url": "https://api.github.com/users/ktangri/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ktangri/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ktangri/subscriptions", "type": "User", "url": "https://api.github.com/users/ktangri", "user_view_type": "public" }
[ { "color": "e99695", "default": false, "description": "Requesting to add a new dataset", "id": 2067376369, "name": "dataset request", "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request" } ]
open
false
null
[]
[]
2021-05-07T13:43:59
2021-05-07T13:43:59
null
NONE
null
null
null
null
## Adding a Dataset - **Name:** Topical-Chat - **Description:** a knowledge-grounded human-human conversation dataset where the underlying knowledge spans 8 broad topics and conversation partners don’t have explicitly defined roles - **Paper:** https://www.isca-speech.org/archive/Interspeech_2019/pdfs/3079.pdf - **Data:** https://github.com/alexa/Topical-Chat - **Motivation:** Good quality, knowledge-grounded dataset that spans a broad range of topics Instructions to add a new dataset can be found [here](https://github.com/huggingface/datasets/blob/master/ADD_NEW_DATASET.md).
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2331/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2331/timeline
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
null
https://api.github.com/repos/huggingface/datasets/issues/2330
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2330/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2330/comments
https://api.github.com/repos/huggingface/datasets/issues/2330/events
https://github.com/huggingface/datasets/issues/2330
878,490,927
MDU6SXNzdWU4Nzg0OTA5Mjc=
2,330
Allow passing `desc` to `tqdm` in `Dataset.map()`
{ "avatar_url": "https://avatars.githubusercontent.com/u/31893406?v=4", "events_url": "https://api.github.com/users/changjonathanc/events{/privacy}", "followers_url": "https://api.github.com/users/changjonathanc/followers", "following_url": "https://api.github.com/users/changjonathanc/following{/other_user}", "gists_url": "https://api.github.com/users/changjonathanc/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/changjonathanc", "id": 31893406, "login": "changjonathanc", "node_id": "MDQ6VXNlcjMxODkzNDA2", "organizations_url": "https://api.github.com/users/changjonathanc/orgs", "received_events_url": "https://api.github.com/users/changjonathanc/received_events", "repos_url": "https://api.github.com/users/changjonathanc/repos", "site_admin": false, "starred_url": "https://api.github.com/users/changjonathanc/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/changjonathanc/subscriptions", "type": "User", "url": "https://api.github.com/users/changjonathanc", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" }, { "color": "7057ff", "default": true, "description": "Good for newcomers", "id": 1935892877, "name": "good first issue", "node_id": "MDU6TGFiZWwxOTM1ODkyODc3", "url": "https://api.github.com/repos/huggingface/datasets/labels/good%20first%20issue" } ]
closed
false
null
[]
[ "Hi @lhoestq,\r\nShould we change `desc` in [pbar](https://github.com/huggingface/datasets/blob/81fcf88172ed5e3026ef68aed4c0ec6980372333/src/datasets/arrow_dataset.py#L1860) to something meaningful?", "I think the user could pass the `desc` parameter to `map` so that it can be displayed in the tqdm progress bar, as suggested by @cccntu.\r\n\r\nWhen there's no multiprocessing, the `desc` of the progress bar could be the `desc` passed by the user.\r\nIn multiprocessing, we were already using a `desc` equal to `\"#\" + str(rank)`.\r\nWe can change it to be `(desc or \"\") + \"#\" + str(rank)` instead.\r\n\r\nIn the end, since both `desc` and `rank` could be None, we can have:\r\n```python\r\npbar_desc = (desc or \"\") + \"#\" + str(rank) if rank is not None else desc\r\n```\r\n\r\nFinally let's remember that if we add `desc` as a new parameter to `map`, we should add it to the `ignore_kwargs` list of the `@fingerprint_transform` decorator of `Dataset._map_single` since we don't want this parameter to affect the fingerprint of the resulting dataset." ]
2021-05-07T05:52:54
2021-05-26T14:59:21
2021-05-26T14:59:21
CONTRIBUTOR
null
null
null
null
It's normal to have many `map()` calls, and some of them can take a few minutes, it would be nice to have a description on the progress bar. Alternative solution: Print the description before/after the `map()` call.
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 1, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/2330/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2330/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
19 days, 9:06:27
https://api.github.com/repos/huggingface/datasets/issues/2327
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2327/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2327/comments
https://api.github.com/repos/huggingface/datasets/issues/2327/events
https://github.com/huggingface/datasets/issues/2327
877,565,831
MDU6SXNzdWU4Nzc1NjU4MzE=
2,327
A syntax error in example
{ "avatar_url": "https://avatars.githubusercontent.com/u/6883957?v=4", "events_url": "https://api.github.com/users/mymusise/events{/privacy}", "followers_url": "https://api.github.com/users/mymusise/followers", "following_url": "https://api.github.com/users/mymusise/following{/other_user}", "gists_url": "https://api.github.com/users/mymusise/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mymusise", "id": 6883957, "login": "mymusise", "node_id": "MDQ6VXNlcjY4ODM5NTc=", "organizations_url": "https://api.github.com/users/mymusise/orgs", "received_events_url": "https://api.github.com/users/mymusise/received_events", "repos_url": "https://api.github.com/users/mymusise/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mymusise/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mymusise/subscriptions", "type": "User", "url": "https://api.github.com/users/mymusise", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
[ "cc @beurkinger but I think this has been fixed internally and will soon be updated right ?", "This issue has been fixed." ]
2021-05-06T14:34:44
2021-05-20T03:04:19
2021-05-20T03:04:19
NONE
null
null
null
null
![image](https://user-images.githubusercontent.com/6883957/117315905-b47a5c00-aeba-11eb-91eb-b2a4a0212a56.png) Sorry to report with an image, I can't find the template source code of this snippet.
{ "avatar_url": "https://avatars.githubusercontent.com/u/6883957?v=4", "events_url": "https://api.github.com/users/mymusise/events{/privacy}", "followers_url": "https://api.github.com/users/mymusise/followers", "following_url": "https://api.github.com/users/mymusise/following{/other_user}", "gists_url": "https://api.github.com/users/mymusise/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mymusise", "id": 6883957, "login": "mymusise", "node_id": "MDQ6VXNlcjY4ODM5NTc=", "organizations_url": "https://api.github.com/users/mymusise/orgs", "received_events_url": "https://api.github.com/users/mymusise/received_events", "repos_url": "https://api.github.com/users/mymusise/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mymusise/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mymusise/subscriptions", "type": "User", "url": "https://api.github.com/users/mymusise", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2327/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2327/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
13 days, 12:29:35
https://api.github.com/repos/huggingface/datasets/issues/2323
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2323/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2323/comments
https://api.github.com/repos/huggingface/datasets/issues/2323/events
https://github.com/huggingface/datasets/issues/2323
876,438,507
MDU6SXNzdWU4NzY0Mzg1MDc=
2,323
load_dataset("timit_asr") gives back duplicates of just one sample text
{ "avatar_url": "https://avatars.githubusercontent.com/u/33647474?v=4", "events_url": "https://api.github.com/users/ekeleshian/events{/privacy}", "followers_url": "https://api.github.com/users/ekeleshian/followers", "following_url": "https://api.github.com/users/ekeleshian/following{/other_user}", "gists_url": "https://api.github.com/users/ekeleshian/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ekeleshian", "id": 33647474, "login": "ekeleshian", "node_id": "MDQ6VXNlcjMzNjQ3NDc0", "organizations_url": "https://api.github.com/users/ekeleshian/orgs", "received_events_url": "https://api.github.com/users/ekeleshian/received_events", "repos_url": "https://api.github.com/users/ekeleshian/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ekeleshian/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ekeleshian/subscriptions", "type": "User", "url": "https://api.github.com/users/ekeleshian", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
[ "Upgrading datasets to version 1.6 fixes the issue", "This bug was fixed in #1995. Upgrading the `datasets` should work! ", "Thanks @ekeleshian for having reported.\r\n\r\nI am closing this issue once that you updated `datasets`. Feel free to reopen it if the problem persists." ]
2021-05-05T13:14:48
2021-05-07T10:32:30
2021-05-07T10:32:30
NONE
null
null
null
null
## Describe the bug When you look up on key ["train"] and then ['text'], you get back a list with just one sentence duplicated 4620 times. Namely, the sentence "Would such an act of refusal be useful?". Similarly when you look up ['test'] and then ['text'], the list is one sentence repeated "The bungalow was pleasantly situated near the shore." 1680 times. I tried to work around the issue by downgrading to datasets version 1.3.0, inspired by [this post](https://www.gitmemory.com/issue/huggingface/datasets/2052/798904836) and removing the entire huggingface directory from ~/.cache, but I still get the same issue. ## Steps to reproduce the bug ```python from datasets import load_dataset timit = load_dataset("timit_asr") print(timit['train']['text']) print(timit['test']['text']) ``` ## Expected Result Rows of diverse text, like how it is shown in the [wav2vec2.0 tutorial](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/Fine_tuning_Wav2Vec2_for_English_ASR.ipynb) <img width="485" alt="Screen Shot 2021-05-05 at 9 09 57 AM" src="https://user-images.githubusercontent.com/33647474/117146094-d9b77f00-ad81-11eb-8306-f281850c127a.png"> ## Actual results Rows of repeated text. <img width="319" alt="Screen Shot 2021-05-05 at 9 11 53 AM" src="https://user-images.githubusercontent.com/33647474/117146231-f8b61100-ad81-11eb-834a-fc10410b0c9c.png"> ## Versions - Datasets: 1.3.0 - Python: 3.9.1 - Platform: macOS-11.2.1-x86_64-i386-64bit}
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2323/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2323/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
1 day, 21:17:42
https://api.github.com/repos/huggingface/datasets/issues/2322
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2322/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2322/comments
https://api.github.com/repos/huggingface/datasets/issues/2322/events
https://github.com/huggingface/datasets/issues/2322
876,383,853
MDU6SXNzdWU4NzYzODM4NTM=
2,322
Calls to map are not cached.
{ "avatar_url": "https://avatars.githubusercontent.com/u/2743060?v=4", "events_url": "https://api.github.com/users/villmow/events{/privacy}", "followers_url": "https://api.github.com/users/villmow/followers", "following_url": "https://api.github.com/users/villmow/following{/other_user}", "gists_url": "https://api.github.com/users/villmow/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/villmow", "id": 2743060, "login": "villmow", "node_id": "MDQ6VXNlcjI3NDMwNjA=", "organizations_url": "https://api.github.com/users/villmow/orgs", "received_events_url": "https://api.github.com/users/villmow/received_events", "repos_url": "https://api.github.com/users/villmow/repos", "site_admin": false, "starred_url": "https://api.github.com/users/villmow/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/villmow/subscriptions", "type": "User", "url": "https://api.github.com/users/villmow", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
[ "I tried upgrading to `datasets==1.6.2` and downgrading to `1.6.0`. Both versions produce the same output.\r\n\r\nDowngrading to `1.5.0` works and produces the following output for me:\r\n\r\n```bash\r\nDownloading: 9.20kB [00:00, 3.94MB/s] \r\nDownloading: 5.99kB [00:00, 3.29MB/s] \r\nNo config specified, defaulting to: sst/default\r\nDownloading and preparing dataset sst/default (download: 6.83 MiB, generated: 3.73 MiB, post-processed: Unknown size, total: 10.56 MiB) to /home/johannes/.cache/huggingface/datasets/sst/default/1.0.0/a16a45566b63b2c3179e6c1d0f8edadde56e45570ee8cf99394fbb738491d34b...\r\n Dataset sst downloaded and prepared to /home/johannes/.cache/huggingface/datasets/sst/default/1.0.0/a16a45566b63b2c3179e6c1d0f8edadde56e45570ee8cf99394fbb738491d34b. Subsequent calls will reuse this data.\r\nexecuted [0, 1]\r\n#0: 0%| | 0/5 [00:00<?, ?ba/s]\r\n#1: 0%| | 0/5 [00:00<?, ?ba/s]\r\nexecuted [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\r\nexecuted [4272, 4273, 4274, 4275, 4276, 4277, 4278, 4279, 4280, 4281]\r\nexecuted [1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009]\r\nexecuted [5272, 5273, 5274, 5275, 5276, 5277, 5278, 5279, 5280, 5281]\r\nexecuted [2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009]\r\nexecuted [6272, 6273, 6274, 6275, 6276, 6277, 6278, 6279, 6280, 6281]\r\nexecuted [3000, 3001, 3002, 3003, 3004, 3005, 3006, 3007, 3008, 3009]\r\nexecuted [7272, 7273, 7274, 7275, 7276, 7277, 7278, 7279, 7280, 7281]\r\nexecuted [4000, 4001, 4002, 4003, 4004, 4005, 4006, 4007, 4008, 4009]\r\n#0: 100%|██████████| 5/5 [00:00<00:00, 94.83ba/s]\r\nexecuted [8272, 8273, 8274, 8275, 8276, 8277, 8278, 8279, 8280, 8281]\r\n#1: 100%|██████████| 5/5 [00:00<00:00, 92.75ba/s]\r\nexecuted [0, 1]\r\n#0: 0%| | 0/1 [00:00<?, ?ba/s]\r\n#1: 0%| | 0/1 [00:00<?, ?ba/s]\r\nexecuted [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\r\nexecuted [551, 552, 553, 554, 555, 556, 557, 558, 559, 560]\r\n#0: 100%|██████████| 1/1 [00:00<00:00, 118.81ba/s]\r\n#1: 100%|██████████| 1/1 [00:00<00:00, 123.06ba/s]\r\nexecuted [0, 1]\r\n#0: 0%| | 0/2 [00:00<?, ?ba/s]\r\n#1: 0%| | 0/2 [00:00<?, ?ba/s]\r\nexecuted [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\r\nexecuted [1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, 1113, 1114]\r\nexecuted [1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009]\r\n#0: 100%|██████████| 2/2 [00:00<00:00, 119.42ba/s]\r\nexecuted [2105, 2106, 2107, 2108, 2109, 2110, 2111, 2112, 2113, 2114]\r\n#1: 100%|██████████| 2/2 [00:00<00:00, 123.33ba/s]\r\n\r\n\r\n\r\n ############################## \r\n\r\n\r\n\r\nexecuted [0, 1]\r\nLoading cached processed dataset at /home/johannes/.cache/huggingface/datasets/sst/default/1.0.0/a16a45566b63b2c3179e6c1d0f8edadde56e45570ee8cf99394fbb738491d34b/cache-6079777aa097c8f8.arrow\r\nLoading cached processed dataset at /home/johannes/.cache/huggingface/datasets/sst/default/1.0.0/a16a45566b63b2c3179e6c1d0f8edadde56e45570ee8cf99394fbb738491d34b/cache-2dc05c46f68eda6e.arrow\r\nexecuted [0, 1]\r\nLoading cached processed dataset at /home/johannes/.cache/huggingface/datasets/sst/default/1.0.0/a16a45566b63b2c3179e6c1d0f8edadde56e45570ee8cf99394fbb738491d34b/cache-1ca347e7430b98f1.arrow\r\nLoading cached processed dataset at /home/johannes/.cache/huggingface/datasets/sst/default/1.0.0/a16a45566b63b2c3179e6c1d0f8edadde56e45570ee8cf99394fbb738491d34b/cache-c0f1a73ce3ba40cd.arrow\r\nexecuted [0, 1]\r\nLoading cached processed dataset at /home/johannes/.cache/huggingface/datasets/sst/default/1.0.0/a16a45566b63b2c3179e6c1d0f8edadde56e45570ee8cf99394fbb738491d34b/cache-832a1407bf1ac5b7.arrow\r\nLoading cached processed dataset at /home/johannes/.cache/huggingface/datasets/sst/default/1.0.0/a16a45566b63b2c3179e6c1d0f8edadde56e45570ee8cf99394fbb738491d34b/cache-036316a259b773c4.arrow\r\n- Datasets: 1.5.0\r\n- Python: 3.8.3 (default, May 19 2020, 18:47:26) \r\n[GCC 7.3.0]\r\n- Platform: Linux-5.4.0-72-generic-x86_64-with-glibc2.10\r\n```", "Hi,\r\n\r\nset `keep_in_memory` to False when loading a dataset (`sst = load_dataset(\"sst\", keep_in_memory=False)`) to prevent it from loading in-memory. Currently, in-memory datasets fail to find cached files due to this check (always False for them):\r\n\r\nhttps://github.com/huggingface/datasets/blob/241a0b4a3a868778ee91e767ad406f9da7610df2/src/datasets/arrow_dataset.py#L1718\r\n\r\n@albertvillanova It seems like this behavior was overlooked in #2182.\r\n\r\n", "Hi @villmow, thanks for reporting. \r\n\r\nAs @mariosasko has pointed out, we did not consider this case when introducing the feature of automatic in-memory for small datasets. This needs to be fixed.", "Hi ! Currently a dataset that is in memory doesn't know doesn't know in which directory it has to read/write cache files.\r\nOn the other hand, a dataset that loaded from the disk (via memory mapping) uses the directory from which the dataset is located to read/write cache files.\r\n\r\nBecause of that, currently in-memory datasets simply don't use caching.\r\n\r\nMaybe a Dataset object could have a `cache_dir` that is set to the directory where the arrow files are created during `load_dataset` ?", "Fixed once reverted the default in-memory feature:\r\nClosed by #2460 (to close issue #2458).", "Please @villmow, feel free to update to `Datasets` latest version (1.8)." ]
2021-05-05T12:11:27
2021-06-08T19:10:02
2021-06-08T19:08:21
NONE
null
null
null
null
## Describe the bug Somehow caching does not work for me anymore. Am I doing something wrong, or is there anything that I missed? ## Steps to reproduce the bug ```python import datasets datasets.set_caching_enabled(True) sst = datasets.load_dataset("sst") def foo(samples, i): print("executed", i[:10]) return samples # first call x = sst.map(foo, batched=True, with_indices=True, num_proc=2) print('\n'*3, "#" * 30, '\n'*3) # second call y = sst.map(foo, batched=True, with_indices=True, num_proc=2) # print version import sys import platform print(f""" - Datasets: {datasets.__version__} - Python: {sys.version} - Platform: {platform.platform()} """) ``` ## Actual results This code prints the following output for me: ```bash No config specified, defaulting to: sst/default Reusing dataset sst (/home/johannes/.cache/huggingface/datasets/sst/default/1.0.0/b8a7889ef01c5d3ae8c379b84cc4080f8aad3ac2bc538701cbe0ac6416fb76ff) #0: 0%| | 0/5 [00:00<?, ?ba/s] #1: 0%| | 0/5 [00:00<?, ?ba/s] executed [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] executed [4272, 4273, 4274, 4275, 4276, 4277, 4278, 4279, 4280, 4281] executed [1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009] executed [5272, 5273, 5274, 5275, 5276, 5277, 5278, 5279, 5280, 5281] executed [2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009] executed [6272, 6273, 6274, 6275, 6276, 6277, 6278, 6279, 6280, 6281] executed [3000, 3001, 3002, 3003, 3004, 3005, 3006, 3007, 3008, 3009] executed [7272, 7273, 7274, 7275, 7276, 7277, 7278, 7279, 7280, 7281] executed [4000, 4001, 4002, 4003, 4004, 4005, 4006, 4007, 4008, 4009] #0: 100%|██████████| 5/5 [00:00<00:00, 59.85ba/s] executed [8272, 8273, 8274, 8275, 8276, 8277, 8278, 8279, 8280, 8281] #1: 100%|██████████| 5/5 [00:00<00:00, 60.85ba/s] #0: 0%| | 0/1 [00:00<?, ?ba/s] #1: 0%| | 0/1 [00:00<?, ?ba/s]executed [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] #0: 100%|██████████| 1/1 [00:00<00:00, 69.32ba/s] executed [551, 552, 553, 554, 555, 556, 557, 558, 559, 560] #1: 100%|██████████| 1/1 [00:00<00:00, 70.93ba/s] #0: 0%| | 0/2 [00:00<?, ?ba/s] #1: 0%| | 0/2 [00:00<?, ?ba/s]executed [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] executed [1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009] #0: 100%|██████████| 2/2 [00:00<00:00, 63.25ba/s] executed [1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, 1113, 1114] executed [2105, 2106, 2107, 2108, 2109, 2110, 2111, 2112, 2113, 2114] #1: 100%|██████████| 2/2 [00:00<00:00, 57.69ba/s] ############################## #0: 0%| | 0/5 [00:00<?, ?ba/s] #1: 0%| | 0/5 [00:00<?, ?ba/s] executed [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] executed [4272, 4273, 4274, 4275, 4276, 4277, 4278, 4279, 4280, 4281] executed [1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009] executed [5272, 5273, 5274, 5275, 5276, 5277, 5278, 5279, 5280, 5281] executed [2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009] executed [6272, 6273, 6274, 6275, 6276, 6277, 6278, 6279, 6280, 6281] executed [3000, 3001, 3002, 3003, 3004, 3005, 3006, 3007, 3008, 3009] executed [4000, 4001, 4002, 4003, 4004, 4005, 4006, 4007, 4008, 4009] #0: 100%|██████████| 5/5 [00:00<00:00, 58.10ba/s] executed [7272, 7273, 7274, 7275, 7276, 7277, 7278, 7279, 7280, 7281] executed [8272, 8273, 8274, 8275, 8276, 8277, 8278, 8279, 8280, 8281] #1: 100%|██████████| 5/5 [00:00<00:00, 57.19ba/s] #0: 0%| | 0/1 [00:00<?, ?ba/s] #1: 0%| | 0/1 [00:00<?, ?ba/s] executed [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] #0: 100%|██████████| 1/1 [00:00<00:00, 60.10ba/s] executed [551, 552, 553, 554, 555, 556, 557, 558, 559, 560] #1: 100%|██████████| 1/1 [00:00<00:00, 53.82ba/s] #0: 0%| | 0/2 [00:00<?, ?ba/s] #1: 0%| | 0/2 [00:00<?, ?ba/s] executed [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] executed [1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009] executed [1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, 1113, 1114] #0: 100%|██████████| 2/2 [00:00<00:00, 72.76ba/s] executed [2105, 2106, 2107, 2108, 2109, 2110, 2111, 2112, 2113, 2114] #1: 100%|██████████| 2/2 [00:00<00:00, 71.55ba/s] - Datasets: 1.6.1 - Python: 3.8.3 (default, May 19 2020, 18:47:26) [GCC 7.3.0] - Platform: Linux-5.4.0-72-generic-x86_64-with-glibc2.10 ``` ## Expected results Caching should work.
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2322/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2322/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
34 days, 6:56:54
https://api.github.com/repos/huggingface/datasets/issues/2319
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2319/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2319/comments
https://api.github.com/repos/huggingface/datasets/issues/2319/events
https://github.com/huggingface/datasets/issues/2319
876,251,376
MDU6SXNzdWU4NzYyNTEzNzY=
2,319
UnicodeDecodeError for OSCAR (Afrikaans)
{ "avatar_url": "https://avatars.githubusercontent.com/u/8904453?v=4", "events_url": "https://api.github.com/users/sgraaf/events{/privacy}", "followers_url": "https://api.github.com/users/sgraaf/followers", "following_url": "https://api.github.com/users/sgraaf/following{/other_user}", "gists_url": "https://api.github.com/users/sgraaf/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/sgraaf", "id": 8904453, "login": "sgraaf", "node_id": "MDQ6VXNlcjg5MDQ0NTM=", "organizations_url": "https://api.github.com/users/sgraaf/orgs", "received_events_url": "https://api.github.com/users/sgraaf/received_events", "repos_url": "https://api.github.com/users/sgraaf/repos", "site_admin": false, "starred_url": "https://api.github.com/users/sgraaf/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sgraaf/subscriptions", "type": "User", "url": "https://api.github.com/users/sgraaf", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" } ]
[ "Thanks for reporting, @sgraaf.\r\n\r\nI am going to have a look at it. \r\n\r\nI guess the expected codec is \"UTF-8\". Normally, when no explicitly codec is passed, Python uses one which is platform-dependent. For Linux machines, the default codec is `utf_8`, which is OK. However for Windows machine, the default codec is `cp1252`, which causes the problem.", "Awesome, thank you. 😃 ", "@sgraaf, I have just merged the fix in the master branch.\r\n\r\nYou can either:\r\n- install `datasets` from source code\r\n- wait until we make the next release of `datasets`\r\n- set the `utf-8` codec as your default instead of `cp1252`. This can be done by activating the Python [UTF-8 mode](https://www.python.org/dev/peps/pep-0540) either by passing the command-line option `-X utf8` or by setting the environment variable `PYTHONUTF8=1`." ]
2021-05-05T09:22:52
2021-05-05T10:57:31
2021-05-05T10:50:55
NONE
null
null
null
null
## Describe the bug When loading the [OSCAR dataset](https://huggingface.co/datasets/oscar) (specifically `unshuffled_deduplicated_af`), I encounter a `UnicodeDecodeError`. ## Steps to reproduce the bug ```python from datasets import load_dataset dataset = load_dataset("oscar", "unshuffled_deduplicated_af") ``` ## Expected results Anything but an error, really. ## Actual results ```python >>> from datasets import load_dataset >>> dataset = load_dataset("oscar", "unshuffled_deduplicated_af") Downloading: 14.7kB [00:00, 4.91MB/s] Downloading: 3.07MB [00:00, 32.6MB/s] Downloading and preparing dataset oscar/unshuffled_deduplicated_af (download: 62.93 MiB, generated: 163.38 MiB, post-processed: Unknown size, total: 226.32 MiB) to C:\Users\sgraaf\.cache\huggingface\datasets\oscar\unshuffled_deduplicated_af\1.0.0\bd4f96df5b4512007ef9fd17bbc1ecde459fa53d2fc0049cf99392ba2efcc464... Downloading: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 81.0/81.0 [00:00<00:00, 40.5kB/s] Downloading: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 66.0M/66.0M [00:18<00:00, 3.50MB/s] Traceback (most recent call last): File "<stdin>", line 1, in <module> File "C:\Users\sgraaf\AppData\Local\Programs\Python\Python39\lib\site-packages\datasets\load.py", line 745, in load_dataset builder_instance.download_and_prepare( File "C:\Users\sgraaf\AppData\Local\Programs\Python\Python39\lib\site-packages\datasets\builder.py", line 574, in download_and_prepare self._download_and_prepare( File "C:\Users\sgraaf\AppData\Local\Programs\Python\Python39\lib\site-packages\datasets\builder.py", line 652, in _download_and_prepare self._prepare_split(split_generator, **prepare_split_kwargs) File "C:\Users\sgraaf\AppData\Local\Programs\Python\Python39\lib\site-packages\datasets\builder.py", line 979, in _prepare_split for key, record in utils.tqdm( File "C:\Users\sgraaf\AppData\Local\Programs\Python\Python39\lib\site-packages\tqdm\std.py", line 1133, in __iter__ for obj in iterable: File "C:\Users\sgraaf\.cache\huggingface\modules\datasets_modules\datasets\oscar\bd4f96df5b4512007ef9fd17bbc1ecde459fa53d2fc0049cf99392ba2efcc464\oscar.py", line 359, in _generate_examples for line in f: File "C:\Users\sgraaf\AppData\Local\Programs\Python\Python39\lib\encodings\cp1252.py", line 23, in decode return codecs.charmap_decode(input,self.errors,decoding_table)[0] UnicodeDecodeError: 'charmap' codec can't decode byte 0x9d in position 7454: character maps to <undefined> ``` ## Versions Paste the output of the following code: ```python import datasets import sys import platform print(f""" - Datasets: {datasets.__version__} - Python: {sys.version} - Platform: {platform.platform()} """) ``` - Datasets: 1.6.2 - Python: 3.9.4 (tags/v3.9.4:1f2e308, Apr 6 2021, 13:40:21) [MSC v.1928 64 bit (AMD64)] - Platform: Windows-10-10.0.19041-SP0
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2319/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2319/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
1:28:03
https://api.github.com/repos/huggingface/datasets/issues/2318
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2318/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2318/comments
https://api.github.com/repos/huggingface/datasets/issues/2318/events
https://github.com/huggingface/datasets/issues/2318
876,212,460
MDU6SXNzdWU4NzYyMTI0NjA=
2,318
[api request] API to obtain "dataset_module" dynamic path?
{ "avatar_url": "https://avatars.githubusercontent.com/u/4529381?v=4", "events_url": "https://api.github.com/users/richardliaw/events{/privacy}", "followers_url": "https://api.github.com/users/richardliaw/followers", "following_url": "https://api.github.com/users/richardliaw/following{/other_user}", "gists_url": "https://api.github.com/users/richardliaw/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/richardliaw", "id": 4529381, "login": "richardliaw", "node_id": "MDQ6VXNlcjQ1MjkzODE=", "organizations_url": "https://api.github.com/users/richardliaw/orgs", "received_events_url": "https://api.github.com/users/richardliaw/received_events", "repos_url": "https://api.github.com/users/richardliaw/repos", "site_admin": false, "starred_url": "https://api.github.com/users/richardliaw/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/richardliaw/subscriptions", "type": "User", "url": "https://api.github.com/users/richardliaw", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" } ]
[ "Hi @richardliaw, \r\n\r\nFirst, thanks for the compliments.\r\n\r\nIn relation with your request, currently, the dynamic modules path is obtained this way:\r\n```python\r\nfrom datasets.load import init_dynamic_modules, MODULE_NAME_FOR_DYNAMIC_MODULES\r\n\r\ndynamic_modules_path = init_dynamic_modules(MODULE_NAME_FOR_DYNAMIC_MODULES)\r\n```\r\n\r\nLet me know if it is OK for you this way. \r\n\r\nI could set `MODULE_NAME_FOR_DYNAMIC_MODULES` as default value, so that you could instead obtain the path with:\r\n```\r\ndynamic_modules_path = datasets.load.init_dynamic_modules()\r\n```", "Hi @albertvillanova, the default value proposal seems great :) Looking forward to this!", "I like the idea as well ! thanks @albertvillanova ", "Hi @richardliaw, the feature is on the master branch and will be included in the next release in a couple of weeks.", "awesome work @albertvillanova !" ]
2021-05-05T08:40:48
2021-05-06T08:45:45
2021-05-06T07:57:54
NONE
null
null
null
null
**Is your feature request related to a problem? Please describe.** A clear and concise description of what the problem is. This is an awesome library. It seems like the dynamic module path in this library has broken some of hyperparameter tuning functionality: https://discuss.huggingface.co/t/using-hyperparameter-search-in-trainer/785/34 This is because Ray will spawn new processes, and each process will load modules by path. However, we need to explicitly inform Ray to load the right modules, or else it will error upon import. I'd like an API to obtain the dynamic paths. This will allow us to support this functionality in this awesome library while being future proof. **Describe the solution you'd like** A clear and concise description of what you want to happen. `datasets.get_dynamic_paths -> List[str]` will be sufficient for my use case. By offering this API, we will be able to address the following issues (by patching the ray integration sufficiently): https://github.com/huggingface/blog/issues/106 https://github.com/huggingface/transformers/issues/11565 https://discuss.huggingface.co/t/using-hyperparameter-search-in-trainer/785/34 https://discuss.huggingface.co/t/using-hyperparameter-search-in-trainer/785/35
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2318/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2318/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
23:17:06
https://api.github.com/repos/huggingface/datasets/issues/2316
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2316/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2316/comments
https://api.github.com/repos/huggingface/datasets/issues/2316/events
https://github.com/huggingface/datasets/issues/2316
875,756,353
MDU6SXNzdWU4NzU3NTYzNTM=
2,316
Incorrect version specification for pyarrow
{ "avatar_url": "https://avatars.githubusercontent.com/u/32267027?v=4", "events_url": "https://api.github.com/users/cemilcengiz/events{/privacy}", "followers_url": "https://api.github.com/users/cemilcengiz/followers", "following_url": "https://api.github.com/users/cemilcengiz/following{/other_user}", "gists_url": "https://api.github.com/users/cemilcengiz/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/cemilcengiz", "id": 32267027, "login": "cemilcengiz", "node_id": "MDQ6VXNlcjMyMjY3MDI3", "organizations_url": "https://api.github.com/users/cemilcengiz/orgs", "received_events_url": "https://api.github.com/users/cemilcengiz/received_events", "repos_url": "https://api.github.com/users/cemilcengiz/repos", "site_admin": false, "starred_url": "https://api.github.com/users/cemilcengiz/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cemilcengiz/subscriptions", "type": "User", "url": "https://api.github.com/users/cemilcengiz", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
[ "Fixed by #2317." ]
2021-05-04T19:15:11
2021-05-05T10:10:03
2021-05-05T10:10:03
CONTRIBUTOR
null
null
null
null
## Describe the bug The pyarrow dependency is incorrectly specified in setup.py file, in [this line](https://github.com/huggingface/datasets/blob/3a3e5a4da20bfcd75f8b6a6869b240af8feccc12/setup.py#L77). Also as a snippet: ```python "pyarrow>=1.0.0<4.0.0", ``` ## Steps to reproduce the bug ```bash pip install "pyarrow>=1.0.0<4.0.0" ``` ## Expected results It is expected to get a pyarrow version between 1.0.0 (inclusive) and 4.0.0 (exclusive). ## Actual results pip ignores the specified versions since there is a missing comma between the lower and upper limits. Therefore, pip installs the latest pyarrow version from PYPI, which is 4.0.0. This is especially problematic since "conda env export" fails due to incorrect version specification. Here is the conda error as well: ```bash conda env export InvalidVersionSpec: Invalid version '1.0.0<4.0.0': invalid character(s) ``` ## Fix suggestion Put a comma between the version limits which means replacing the line in setup.py file with the following: ```python "pyarrow>=1.0.0,<4.0.0", ``` ## Versions Paste the output of the following code: ```python - Datasets: 1.6.2 - Python: 3.7.10 (default, Feb 26 2021, 18:47:35) [GCC 7.3.0] - Platform: Linux-5.4.0-42-generic-x86_64-with-debian-buster-sid ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/2316/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2316/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
14:54:52
https://api.github.com/repos/huggingface/datasets/issues/2301
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2301/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2301/comments
https://api.github.com/repos/huggingface/datasets/issues/2301/events
https://github.com/huggingface/datasets/issues/2301
873,941,266
MDU6SXNzdWU4NzM5NDEyNjY=
2,301
Unable to setup dev env on Windows
{ "avatar_url": "https://avatars.githubusercontent.com/u/29076344?v=4", "events_url": "https://api.github.com/users/gchhablani/events{/privacy}", "followers_url": "https://api.github.com/users/gchhablani/followers", "following_url": "https://api.github.com/users/gchhablani/following{/other_user}", "gists_url": "https://api.github.com/users/gchhablani/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/gchhablani", "id": 29076344, "login": "gchhablani", "node_id": "MDQ6VXNlcjI5MDc2MzQ0", "organizations_url": "https://api.github.com/users/gchhablani/orgs", "received_events_url": "https://api.github.com/users/gchhablani/received_events", "repos_url": "https://api.github.com/users/gchhablani/repos", "site_admin": false, "starred_url": "https://api.github.com/users/gchhablani/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gchhablani/subscriptions", "type": "User", "url": "https://api.github.com/users/gchhablani", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Hi @gchhablani, \r\n\r\nThere are some 3rd-party dependencies that require to build code in C. In this case, it is the library `python-Levenshtein`.\r\n\r\nOn Windows, in order to be able to build C code, you need to install at least `Microsoft C++ Build Tools` version 14. You can find more info here: https://visualstudio.microsoft.com/visual-cpp-build-tools/", "Hi @albertvillanova \r\n\r\nSorry for such a trivial issue ;-; \r\n\r\nThanks a lot." ]
2021-05-02T13:20:42
2021-05-03T15:18:01
2021-05-03T15:17:34
CONTRIBUTOR
null
null
null
null
Hi I tried installing the `".[dev]"` version on Windows 10 after cloning. Here is the error I'm facing: ```bat (env) C:\testing\datasets>pip install -e ".[dev]" Obtaining file:///C:/testing/datasets Requirement already satisfied: numpy>=1.17 in c:\programdata\anaconda3\envs\env\lib\site-packages (from datasets==1.5.0.dev0) (1.19.5) Collecting pyarrow>=0.17.1 Using cached pyarrow-4.0.0-cp37-cp37m-win_amd64.whl (13.3 MB) Requirement already satisfied: dill in c:\programdata\anaconda3\envs\env\lib\site-packages (from datasets==1.5.0.dev0) (0.3.1.1) Collecting pandas Using cached pandas-1.2.4-cp37-cp37m-win_amd64.whl (9.1 MB) Requirement already satisfied: requests>=2.19.0 in c:\programdata\anaconda3\envs\env\lib\site-packages (from datasets==1.5.0.dev0) (2.25.1) Requirement already satisfied: tqdm<4.50.0,>=4.27 in c:\programdata\anaconda3\envs\env\lib\site-packages (from datasets==1.5.0.dev0) (4.49.0) Requirement already satisfied: xxhash in c:\programdata\anaconda3\envs\env\lib\site-packages (from datasets==1.5.0.dev0) (2.0.2) Collecting multiprocess Using cached multiprocess-0.70.11.1-py37-none-any.whl (108 kB) Requirement already satisfied: fsspec in c:\programdata\anaconda3\envs\env\lib\site-packages (from datasets==1.5.0.dev0) (2021.4.0) Collecting huggingface_hub<0.1.0 Using cached huggingface_hub-0.0.8-py3-none-any.whl (34 kB) Requirement already satisfied: importlib_metadata in c:\programdata\anaconda3\envs\env\lib\site-packages (from datasets==1.5.0.dev0) (4.0.1) Requirement already satisfied: absl-py in c:\programdata\anaconda3\envs\env\lib\site-packages (from datasets==1.5.0.dev0) (0.12.0) Requirement already satisfied: pytest in c:\programdata\anaconda3\envs\env\lib\site-packages (from datasets==1.5.0.dev0) (6.2.3) Collecting pytest-xdist Using cached pytest_xdist-2.2.1-py3-none-any.whl (37 kB) Collecting apache-beam>=2.24.0 Using cached apache_beam-2.29.0-cp37-cp37m-win_amd64.whl (3.7 MB) Collecting elasticsearch Using cached elasticsearch-7.12.1-py2.py3-none-any.whl (339 kB) Requirement already satisfied: boto3==1.16.43 in c:\programdata\anaconda3\envs\env\lib\site-packages (from datasets==1.5.0.dev0) (1.16.43) Requirement already satisfied: botocore==1.19.43 in c:\programdata\anaconda3\envs\env\lib\site-packages (from datasets==1.5.0.dev0) (1.19.43) Collecting moto[s3]==1.3.16 Using cached moto-1.3.16-py2.py3-none-any.whl (879 kB) Collecting rarfile>=4.0 Using cached rarfile-4.0-py3-none-any.whl (28 kB) Collecting tensorflow>=2.3 Using cached tensorflow-2.4.1-cp37-cp37m-win_amd64.whl (370.7 MB) Requirement already satisfied: torch in c:\programdata\anaconda3\envs\env\lib\site-packages (from datasets==1.5.0.dev0) (1.8.1) Requirement already satisfied: transformers in c:\programdata\anaconda3\envs\env\lib\site-packages (from datasets==1.5.0.dev0) (4.5.1) Collecting bs4 Using cached bs4-0.0.1-py3-none-any.whl Collecting conllu Using cached conllu-4.4-py2.py3-none-any.whl (15 kB) Collecting langdetect Using cached langdetect-1.0.8-py3-none-any.whl Collecting lxml Using cached lxml-4.6.3-cp37-cp37m-win_amd64.whl (3.5 MB) Collecting mwparserfromhell Using cached mwparserfromhell-0.6-cp37-cp37m-win_amd64.whl (101 kB) Collecting nltk Using cached nltk-3.6.2-py3-none-any.whl (1.5 MB) Collecting openpyxl Using cached openpyxl-3.0.7-py2.py3-none-any.whl (243 kB) Collecting py7zr Using cached py7zr-0.15.2-py3-none-any.whl (66 kB) Collecting tldextract Using cached tldextract-3.1.0-py2.py3-none-any.whl (87 kB) Collecting zstandard Using cached zstandard-0.15.2-cp37-cp37m-win_amd64.whl (582 kB) Collecting bert_score>=0.3.6 Using cached bert_score-0.3.9-py3-none-any.whl (59 kB) Collecting rouge_score Using cached rouge_score-0.0.4-py2.py3-none-any.whl (22 kB) Collecting sacrebleu Using cached sacrebleu-1.5.1-py3-none-any.whl (54 kB) Requirement already satisfied: scipy in c:\programdata\anaconda3\envs\env\lib\site-packages (from datasets==1.5.0.dev0) (1.6.3) Collecting seqeval Using cached seqeval-1.2.2-py3-none-any.whl Collecting sklearn Using cached sklearn-0.0-py2.py3-none-any.whl Collecting jiwer Using cached jiwer-2.2.0-py3-none-any.whl (13 kB) Requirement already satisfied: toml>=0.10.1 in c:\programdata\anaconda3\envs\env\lib\site-packages (from datasets==1.5.0.dev0) (0.10.2) Requirement already satisfied: requests_file>=1.5.1 in c:\programdata\anaconda3\envs\env\lib\site-packages (from datasets==1.5.0.dev0) (1.5.1) Requirement already satisfied: texttable>=1.6.3 in c:\programdata\anaconda3\envs\env\lib\site-packages (from datasets==1.5.0.dev0) (1.6.3) Requirement already satisfied: s3fs>=0.4.2 in c:\programdata\anaconda3\envs\env\lib\site-packages (from datasets==1.5.0.dev0) (0.4.2) Requirement already satisfied: Werkzeug>=1.0.1 in c:\programdata\anaconda3\envs\env\lib\site-packages (from datasets==1.5.0.dev0) (1.0.1) Collecting black Using cached black-21.4b2-py3-none-any.whl (130 kB) Collecting isort Using cached isort-5.8.0-py3-none-any.whl (103 kB) Collecting flake8==3.7.9 Using cached flake8-3.7.9-py2.py3-none-any.whl (69 kB) Requirement already satisfied: jmespath<1.0.0,>=0.7.1 in c:\programdata\anaconda3\envs\env\lib\site-packages (from boto3==1.16.43->datasets==1.5.0.dev0) (0.10.0) Requirement already satisfied: s3transfer<0.4.0,>=0.3.0 in c:\programdata\anaconda3\envs\env\lib\site-packages (from boto3==1.16.43->datasets==1.5.0.dev0) (0.3.7) Requirement already satisfied: urllib3<1.27,>=1.25.4 in c:\programdata\anaconda3\envs\env\lib\site-packages (from botocore==1.19.43->datasets==1.5.0.dev0) (1.26.4) Requirement already satisfied: python-dateutil<3.0.0,>=2.1 in c:\programdata\anaconda3\envs\env\lib\site-packages (from botocore==1.19.43->datasets==1.5.0.dev0) (2.8.1) Collecting entrypoints<0.4.0,>=0.3.0 Using cached entrypoints-0.3-py2.py3-none-any.whl (11 kB) Collecting pyflakes<2.2.0,>=2.1.0 Using cached pyflakes-2.1.1-py2.py3-none-any.whl (59 kB) Collecting pycodestyle<2.6.0,>=2.5.0 Using cached pycodestyle-2.5.0-py2.py3-none-any.whl (51 kB) Collecting mccabe<0.7.0,>=0.6.0 Using cached mccabe-0.6.1-py2.py3-none-any.whl (8.6 kB) Requirement already satisfied: jsondiff>=1.1.2 in c:\programdata\anaconda3\envs\env\lib\site-packages (from moto[s3]==1.3.16->datasets==1.5.0.dev0) (1.3.0) Requirement already satisfied: pytz in c:\programdata\anaconda3\envs\env\lib\site-packages (from moto[s3]==1.3.16->datasets==1.5.0.dev0) (2021.1) Requirement already satisfied: mock in c:\programdata\anaconda3\envs\env\lib\site-packages (from moto[s3]==1.3.16->datasets==1.5.0.dev0) (4.0.3) Requirement already satisfied: MarkupSafe<2.0 in c:\programdata\anaconda3\envs\env\lib\site-packages (from moto[s3]==1.3.16->datasets==1.5.0.dev0) (1.1.1) Requirement already satisfied: python-jose[cryptography]<4.0.0,>=3.1.0 in c:\programdata\anaconda3\envs\env\lib\site-packages (from moto[s3]==1.3.16->datasets==1.5.0.dev0) (3.2.0) Requirement already satisfied: aws-xray-sdk!=0.96,>=0.93 in c:\programdata\anaconda3\envs\env\lib\site-packages (from moto[s3]==1.3.16->datasets==1.5.0.dev0) (2.8.0) Requirement already satisfied: cryptography>=2.3.0 in c:\programdata\anaconda3\envs\env\lib\site-packages (from moto[s3]==1.3.16->datasets==1.5.0.dev0) (3.4.7) Requirement already satisfied: more-itertools in c:\programdata\anaconda3\envs\env\lib\site-packages (from moto[s3]==1.3.16->datasets==1.5.0.dev0) (8.7.0) Requirement already satisfied: PyYAML>=5.1 in c:\programdata\anaconda3\envs\env\lib\site-packages (from moto[s3]==1.3.16->datasets==1.5.0.dev0) (5.4.1) Requirement already satisfied: boto>=2.36.0 in c:\programdata\anaconda3\envs\env\lib\site-packages (from moto[s3]==1.3.16->datasets==1.5.0.dev0) (2.49.0) Requirement already satisfied: idna<3,>=2.5 in c:\programdata\anaconda3\envs\env\lib\site-packages (from moto[s3]==1.3.16->datasets==1.5.0.dev0) (2.10) Requirement already satisfied: sshpubkeys>=3.1.0 in c:\programdata\anaconda3\envs\env\lib\site-packages (from moto[s3]==1.3.16->datasets==1.5.0.dev0) (3.3.1) Requirement already satisfied: responses>=0.9.0 in c:\programdata\anaconda3\envs\env\lib\site-packages (from moto[s3]==1.3.16->datasets==1.5.0.dev0) (0.13.3) Requirement already satisfied: xmltodict in c:\programdata\anaconda3\envs\env\lib\site-packages (from moto[s3]==1.3.16->datasets==1.5.0.dev0) (0.12.0) Requirement already satisfied: setuptools in c:\programdata\anaconda3\envs\env\lib\site-packages (from moto[s3]==1.3.16->datasets==1.5.0.dev0) (52.0.0.post20210125) Requirement already satisfied: Jinja2>=2.10.1 in c:\programdata\anaconda3\envs\env\lib\site-packages (from moto[s3]==1.3.16->datasets==1.5.0.dev0) (2.11.3) Requirement already satisfied: zipp in c:\programdata\anaconda3\envs\env\lib\site-packages (from moto[s3]==1.3.16->datasets==1.5.0.dev0) (3.4.1) Requirement already satisfied: six>1.9 in c:\programdata\anaconda3\envs\env\lib\site-packages (from moto[s3]==1.3.16->datasets==1.5.0.dev0) (1.15.0) Requirement already satisfied: ecdsa<0.15 in c:\programdata\anaconda3\envs\env\lib\site-packages (from moto[s3]==1.3.16->datasets==1.5.0.dev0) (0.14.1) Requirement already satisfied: docker>=2.5.1 in c:\programdata\anaconda3\envs\env\lib\site-packages (from moto[s3]==1.3.16->datasets==1.5.0.dev0) (5.0.0) Requirement already satisfied: cfn-lint>=0.4.0 in c:\programdata\anaconda3\envs\env\lib\site-packages (from moto[s3]==1.3.16->datasets==1.5.0.dev0) (0.49.0) Requirement already satisfied: grpcio<2,>=1.29.0 in c:\programdata\anaconda3\envs\env\lib\site-packages (from apache-beam>=2.24.0->datasets==1.5.0.dev0) (1.32.0) Collecting hdfs<3.0.0,>=2.1.0 Using cached hdfs-2.6.0-py3-none-any.whl (33 kB) Collecting pyarrow>=0.17.1 Using cached pyarrow-3.0.0-cp37-cp37m-win_amd64.whl (12.6 MB) Collecting fastavro<2,>=0.21.4 Using cached fastavro-1.4.0-cp37-cp37m-win_amd64.whl (394 kB) Requirement already satisfied: httplib2<0.18.0,>=0.8 in c:\programdata\anaconda3\envs\env\lib\site-packages (from apache-beam>=2.24.0->datasets==1.5.0.dev0) (0.17.4) Collecting pymongo<4.0.0,>=3.8.0 Using cached pymongo-3.11.3-cp37-cp37m-win_amd64.whl (382 kB) Collecting crcmod<2.0,>=1.7 Using cached crcmod-1.7-py3-none-any.whl Collecting avro-python3!=1.9.2,<1.10.0,>=1.8.1 Using cached avro_python3-1.9.2.1-py3-none-any.whl Requirement already satisfied: typing-extensions<3.8.0,>=3.7.0 in c:\programdata\anaconda3\envs\env\lib\site-packages (from apache-beam>=2.24.0->datasets==1.5.0.dev0) (3.7.4.3) Requirement already satisfied: future<1.0.0,>=0.18.2 in c:\programdata\anaconda3\envs\env\lib\site-packages (from apache-beam>=2.24.0->datasets==1.5.0.dev0) (0.18.2) Collecting oauth2client<5,>=2.0.1 Using cached oauth2client-4.1.3-py2.py3-none-any.whl (98 kB) Collecting pydot<2,>=1.2.0 Using cached pydot-1.4.2-py2.py3-none-any.whl (21 kB) Requirement already satisfied: protobuf<4,>=3.12.2 in c:\programdata\anaconda3\envs\env\lib\site-packages (from apache-beam>=2.24.0->datasets==1.5.0.dev0) (3.15.8) Requirement already satisfied: wrapt in c:\programdata\anaconda3\envs\env\lib\site-packages (from aws-xray-sdk!=0.96,>=0.93->moto[s3]==1.3.16->datasets==1.5.0.dev0) (1.12.1) Collecting matplotlib Using cached matplotlib-3.4.1-cp37-cp37m-win_amd64.whl (7.1 MB) Requirement already satisfied: junit-xml~=1.9 in c:\programdata\anaconda3\envs\env\lib\site-packages (from cfn-lint>=0.4.0->moto[s3]==1.3.16->datasets==1.5.0.dev0) (1.9) Requirement already satisfied: jsonpatch in c:\programdata\anaconda3\envs\env\lib\site-packages (from cfn-lint>=0.4.0->moto[s3]==1.3.16->datasets==1.5.0.dev0) (1.32) Requirement already satisfied: jsonschema~=3.0 in c:\programdata\anaconda3\envs\env\lib\site-packages (from cfn-lint>=0.4.0->moto[s3]==1.3.16->datasets==1.5.0.dev0) (3.2.0) Requirement already satisfied: networkx~=2.4 in c:\programdata\anaconda3\envs\env\lib\site-packages (from cfn-lint>=0.4.0->moto[s3]==1.3.16->datasets==1.5.0.dev0) (2.5.1) Requirement already satisfied: aws-sam-translator>=1.35.0 in c:\programdata\anaconda3\envs\env\lib\site-packages (from cfn-lint>=0.4.0->moto[s3]==1.3.16->datasets==1.5.0.dev0) (1.35.0) Requirement already satisfied: cffi>=1.12 in c:\programdata\anaconda3\envs\env\lib\site-packages (from cryptography>=2.3.0->moto[s3]==1.3.16->datasets==1.5.0.dev0) (1.14.5) Requirement already satisfied: pycparser in c:\programdata\anaconda3\envs\env\lib\site-packages (from cffi>=1.12->cryptography>=2.3.0->moto[s3]==1.3.16->datasets==1.5.0.dev0) (2.20) Requirement already satisfied: pywin32==227 in c:\programdata\anaconda3\envs\env\lib\site-packages (from docker>=2.5.1->moto[s3]==1.3.16->datasets==1.5.0.dev0) (227) Requirement already satisfied: websocket-client>=0.32.0 in c:\programdata\anaconda3\envs\env\lib\site-packages (from docker>=2.5.1->moto[s3]==1.3.16->datasets==1.5.0.dev0) (0.58.0) Requirement already satisfied: docopt in c:\programdata\anaconda3\envs\env\lib\site-packages (from hdfs<3.0.0,>=2.1.0->apache-beam>=2.24.0->datasets==1.5.0.dev0) (0.6.2) Requirement already satisfied: filelock in c:\programdata\anaconda3\envs\env\lib\site-packages (from huggingface_hub<0.1.0->datasets==1.5.0.dev0) (3.0.12) Requirement already satisfied: pyrsistent>=0.14.0 in c:\programdata\anaconda3\envs\env\lib\site-packages (from jsonschema~=3.0->cfn-lint>=0.4.0->moto[s3]==1.3.16->datasets==1.5.0.dev0) (0.17.3) Requirement already satisfied: attrs>=17.4.0 in c:\programdata\anaconda3\envs\env\lib\site-packages (from jsonschema~=3.0->cfn-lint>=0.4.0->moto[s3]==1.3.16->datasets==1.5.0.dev0) (20.3.0) Requirement already satisfied: decorator<5,>=4.3 in c:\programdata\anaconda3\envs\env\lib\site-packages (from networkx~=2.4->cfn-lint>=0.4.0->moto[s3]==1.3.16->datasets==1.5.0.dev0) (4.4.2) Requirement already satisfied: rsa>=3.1.4 in c:\programdata\anaconda3\envs\env\lib\site-packages (from oauth2client<5,>=2.0.1->apache-beam>=2.24.0->datasets==1.5.0.dev0) (4.7.2) Requirement already satisfied: pyasn1-modules>=0.0.5 in c:\programdata\anaconda3\envs\env\lib\site-packages (from oauth2client<5,>=2.0.1->apache-beam>=2.24.0->datasets==1.5.0.dev0) (0.2.8) Requirement already satisfied: pyasn1>=0.1.7 in c:\programdata\anaconda3\envs\env\lib\site-packages (from oauth2client<5,>=2.0.1->apache-beam>=2.24.0->datasets==1.5.0.dev0) (0.4.8) Requirement already satisfied: pyparsing>=2.1.4 in c:\programdata\anaconda3\envs\env\lib\site-packages (from pydot<2,>=1.2.0->apache-beam>=2.24.0->datasets==1.5.0.dev0) (2.4.7) Requirement already satisfied: certifi>=2017.4.17 in c:\programdata\anaconda3\envs\env\lib\site-packages (from requests>=2.19.0->datasets==1.5.0.dev0) (2020.12.5) Requirement already satisfied: chardet<5,>=3.0.2 in c:\programdata\anaconda3\envs\env\lib\site-packages (from requests>=2.19.0->datasets==1.5.0.dev0) (4.0.0) Collecting keras-preprocessing~=1.1.2 Using cached Keras_Preprocessing-1.1.2-py2.py3-none-any.whl (42 kB) Requirement already satisfied: termcolor~=1.1.0 in c:\programdata\anaconda3\envs\env\lib\site-packages (from tensorflow>=2.3->datasets==1.5.0.dev0) (1.1.0) Requirement already satisfied: tensorboard~=2.4 in c:\programdata\anaconda3\envs\env\lib\site-packages (from tensorflow>=2.3->datasets==1.5.0.dev0) (2.5.0) Requirement already satisfied: wheel~=0.35 in c:\programdata\anaconda3\envs\env\lib\site-packages (from tensorflow>=2.3->datasets==1.5.0.dev0) (0.36.2) Collecting opt-einsum~=3.3.0 Using cached opt_einsum-3.3.0-py3-none-any.whl (65 kB) Collecting gast==0.3.3 Using cached gast-0.3.3-py2.py3-none-any.whl (9.7 kB) Collecting google-pasta~=0.2 Using cached google_pasta-0.2.0-py3-none-any.whl (57 kB) Requirement already satisfied: tensorflow-estimator<2.5.0,>=2.4.0 in c:\programdata\anaconda3\envs\env\lib\site-packages (from tensorflow>=2.3->datasets==1.5.0.dev0) (2.4.0) Collecting astunparse~=1.6.3 Using cached astunparse-1.6.3-py2.py3-none-any.whl (12 kB) Collecting flatbuffers~=1.12.0 Using cached flatbuffers-1.12-py2.py3-none-any.whl (15 kB) Collecting h5py~=2.10.0 Using cached h5py-2.10.0-cp37-cp37m-win_amd64.whl (2.5 MB) Requirement already satisfied: markdown>=2.6.8 in c:\programdata\anaconda3\envs\env\lib\site-packages (from tensorboard~=2.4->tensorflow>=2.3->datasets==1.5.0.dev0) (3.3.4) Requirement already satisfied: tensorboard-plugin-wit>=1.6.0 in c:\programdata\anaconda3\envs\env\lib\site-packages (from tensorboard~=2.4->tensorflow>=2.3->datasets==1.5.0.dev0) (1.8.0) Requirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in c:\programdata\anaconda3\envs\env\lib\site-packages (from tensorboard~=2.4->tensorflow>=2.3->datasets==1.5.0.dev0) (0.4.4) Requirement already satisfied: tensorboard-data-server<0.7.0,>=0.6.0 in c:\programdata\anaconda3\envs\env\lib\site-packages (from tensorboard~=2.4->tensorflow>=2.3->datasets==1.5.0.dev0) (0.6.0) Requirement already satisfied: google-auth<2,>=1.6.3 in c:\programdata\anaconda3\envs\env\lib\site-packages (from tensorboard~=2.4->tensorflow>=2.3->datasets==1.5.0.dev0) (1.30.0) Requirement already satisfied: cachetools<5.0,>=2.0.0 in c:\programdata\anaconda3\envs\env\lib\site-packages (from google-auth<2,>=1.6.3->tensorboard~=2.4->tensorflow>=2.3->datasets==1.5.0.dev0) (4.2.2) Requirement already satisfied: requests-oauthlib>=0.7.0 in c:\programdata\anaconda3\envs\env\lib\site-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard~=2.4->tensorflow>=2.3->datasets==1.5.0.dev0) (1.3.0) Requirement already satisfied: oauthlib>=3.0.0 in c:\programdata\anaconda3\envs\env\lib\site-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard~=2.4->tensorflow>=2.3->datasets==1.5.0.dev0) (3.1.0) Requirement already satisfied: regex!=2019.12.17 in c:\programdata\anaconda3\envs\env\lib\site-packages (from transformers->datasets==1.5.0.dev0) (2021.4.4) Requirement already satisfied: tokenizers<0.11,>=0.10.1 in c:\programdata\anaconda3\envs\env\lib\site-packages (from transformers->datasets==1.5.0.dev0) (0.10.2) Requirement already satisfied: sacremoses in c:\programdata\anaconda3\envs\env\lib\site-packages (from transformers->datasets==1.5.0.dev0) (0.0.45) Requirement already satisfied: packaging in c:\programdata\anaconda3\envs\env\lib\site-packages (from transformers->datasets==1.5.0.dev0) (20.9) Collecting pathspec<1,>=0.8.1 Using cached pathspec-0.8.1-py2.py3-none-any.whl (28 kB) Requirement already satisfied: click>=7.1.2 in c:\programdata\anaconda3\envs\env\lib\site-packages (from black->datasets==1.5.0.dev0) (7.1.2) Collecting appdirs Using cached appdirs-1.4.4-py2.py3-none-any.whl (9.6 kB) Collecting mypy-extensions>=0.4.3 Using cached mypy_extensions-0.4.3-py2.py3-none-any.whl (4.5 kB) Requirement already satisfied: typed-ast>=1.4.2 in c:\programdata\anaconda3\envs\env\lib\site-packages (from black->datasets==1.5.0.dev0) (1.4.3) Collecting beautifulsoup4 Using cached beautifulsoup4-4.9.3-py3-none-any.whl (115 kB) Requirement already satisfied: soupsieve>1.2 in c:\programdata\anaconda3\envs\env\lib\site-packages (from beautifulsoup4->bs4->datasets==1.5.0.dev0) (2.2.1) Collecting python-Levenshtein Using cached python-Levenshtein-0.12.2.tar.gz (50 kB) Requirement already satisfied: jsonpointer>=1.9 in c:\programdata\anaconda3\envs\env\lib\site-packages (from jsonpatch->cfn-lint>=0.4.0->moto[s3]==1.3.16->datasets==1.5.0.dev0) (2.1) Requirement already satisfied: pillow>=6.2.0 in c:\programdata\anaconda3\envs\env\lib\site-packages (from matplotlib->bert_score>=0.3.6->datasets==1.5.0.dev0) (8.2.0) Requirement already satisfied: cycler>=0.10 in c:\programdata\anaconda3\envs\env\lib\site-packages (from matplotlib->bert_score>=0.3.6->datasets==1.5.0.dev0) (0.10.0) Requirement already satisfied: kiwisolver>=1.0.1 in c:\programdata\anaconda3\envs\env\lib\site-packages (from matplotlib->bert_score>=0.3.6->datasets==1.5.0.dev0) (1.3.1) Collecting multiprocess Using cached multiprocess-0.70.11-py3-none-any.whl (98 kB) Using cached multiprocess-0.70.10.zip (2.4 MB) Using cached multiprocess-0.70.9-py3-none-any.whl Requirement already satisfied: joblib in c:\programdata\anaconda3\envs\env\lib\site-packages (from nltk->datasets==1.5.0.dev0) (1.0.1) Collecting et-xmlfile Using cached et_xmlfile-1.1.0-py3-none-any.whl (4.7 kB) Requirement already satisfied: pyzstd<0.15.0,>=0.14.4 in c:\programdata\anaconda3\envs\env\lib\site-packages (from py7zr->datasets==1.5.0.dev0) (0.14.4) Collecting pyppmd<0.13.0,>=0.12.1 Using cached pyppmd-0.12.1-cp37-cp37m-win_amd64.whl (32 kB) Collecting pycryptodome>=3.6.6 Using cached pycryptodome-3.10.1-cp35-abi3-win_amd64.whl (1.6 MB) Collecting bcj-cffi<0.6.0,>=0.5.1 Using cached bcj_cffi-0.5.1-cp37-cp37m-win_amd64.whl (21 kB) Collecting multivolumefile<0.3.0,>=0.2.0 Using cached multivolumefile-0.2.3-py3-none-any.whl (17 kB) Requirement already satisfied: iniconfig in c:\programdata\anaconda3\envs\env\lib\site-packages (from pytest->datasets==1.5.0.dev0) (1.1.1) Requirement already satisfied: py>=1.8.2 in c:\programdata\anaconda3\envs\env\lib\site-packages (from pytest->datasets==1.5.0.dev0) (1.10.0) Requirement already satisfied: pluggy<1.0.0a1,>=0.12 in c:\programdata\anaconda3\envs\env\lib\site-packages (from pytest->datasets==1.5.0.dev0) (0.13.1) Requirement already satisfied: atomicwrites>=1.0 in c:\programdata\anaconda3\envs\env\lib\site-packages (from pytest->datasets==1.5.0.dev0) (1.4.0) Requirement already satisfied: colorama in c:\programdata\anaconda3\envs\env\lib\site-packages (from pytest->datasets==1.5.0.dev0) (0.4.4) Collecting pytest-forked Using cached pytest_forked-1.3.0-py2.py3-none-any.whl (4.7 kB) Collecting execnet>=1.1 Using cached execnet-1.8.0-py2.py3-none-any.whl (39 kB) Requirement already satisfied: apipkg>=1.4 in c:\programdata\anaconda3\envs\env\lib\site-packages (from execnet>=1.1->pytest-xdist->datasets==1.5.0.dev0) (1.5) Collecting portalocker==2.0.0 Using cached portalocker-2.0.0-py2.py3-none-any.whl (11 kB) Requirement already satisfied: scikit-learn>=0.21.3 in c:\programdata\anaconda3\envs\env\lib\site-packages (from seqeval->datasets==1.5.0.dev0) (0.24.2) Requirement already satisfied: threadpoolctl>=2.0.0 in c:\programdata\anaconda3\envs\env\lib\site-packages (from scikit-learn>=0.21.3->seqeval->datasets==1.5.0.dev0) (2.1.0) Building wheels for collected packages: python-Levenshtein Building wheel for python-Levenshtein (setup.py) ... error ERROR: Command errored out with exit status 1: command: 'C:\ProgramData\Anaconda3\envs\env\python.exe' -u -c 'import sys, setuptools, tokenize; sys.argv[0] = '"'"'C:\\Users\\VKC~1\\AppData\\Local\\Temp\\pip-install-ynt_dbm4\\python-levenshtein_c02e7e6f9def4629a475349654670ae9\\setup.py'"'"'; __file__='"'"'C:\\Users\\VKC~1\\AppData\\Local\\Temp\\pip-install-ynt_dbm4\\python-levenshtein_c02e7e6f9def4629a475349654670ae9\\setup.py'"'"';f=getattr(tokenize, '"'"'open'"'"', open)(__file__);code=f.read().replace('"'"'\r\n'"'"', '"'"'\n'"'"');f.close();exec(compile(code, __file__, '"'"'exec'"'"'))' bdist_wheel -d 'C:\Users\VKC~1\AppData\Local\Temp\pip-wheel-8jh7fm18' cwd: C:\Users\VKC~1\AppData\Local\Temp\pip-install-ynt_dbm4\python-levenshtein_c02e7e6f9def4629a475349654670ae9\ Complete output (27 lines): running bdist_wheel running build running build_py creating build creating build\lib.win-amd64-3.7 creating build\lib.win-amd64-3.7\Levenshtein copying Levenshtein\StringMatcher.py -> build\lib.win-amd64-3.7\Levenshtein copying Levenshtein\__init__.py -> build\lib.win-amd64-3.7\Levenshtein running egg_info writing python_Levenshtein.egg-info\PKG-INFO writing dependency_links to python_Levenshtein.egg-info\dependency_links.txt writing entry points to python_Levenshtein.egg-info\entry_points.txt writing namespace_packages to python_Levenshtein.egg-info\namespace_packages.txt writing requirements to python_Levenshtein.egg-info\requires.txt writing top-level names to python_Levenshtein.egg-info\top_level.txt reading manifest file 'python_Levenshtein.egg-info\SOURCES.txt' reading manifest template 'MANIFEST.in' warning: no previously-included files matching '*pyc' found anywhere in distribution warning: no previously-included files matching '*so' found anywhere in distribution warning: no previously-included files matching '.project' found anywhere in distribution warning: no previously-included files matching '.pydevproject' found anywhere in distribution writing manifest file 'python_Levenshtein.egg-info\SOURCES.txt' copying Levenshtein\_levenshtein.c -> build\lib.win-amd64-3.7\Levenshtein copying Levenshtein\_levenshtein.h -> build\lib.win-amd64-3.7\Levenshtein running build_ext building 'Levenshtein._levenshtein' extension error: Microsoft Visual C++ 14.0 or greater is required. Get it with "Microsoft C++ Build Tools": https://visualstudio.microsoft.com/visual-cpp-build-tools/ ---------------------------------------- ERROR: Failed building wheel for python-Levenshtein Running setup.py clean for python-Levenshtein Failed to build python-Levenshtein Installing collected packages: python-Levenshtein, pytest-forked, pyppmd, pymongo, pyflakes, pydot, pycryptodome, pycodestyle, pyarrow, portalocker, pathspec, pandas, opt-einsum, oauth2client, nltk, mypy-extensions, multivolumefile, multiprocess, moto, mccabe, matplotlib, keras-preprocessing, huggingface-hub, hdfs, h5py, google-pasta, gast, flatbuffers, fastavro, execnet, et-xmlfile, entrypoints, crcmod, beautifulsoup4, bcj-cffi, avro-python3, astunparse, appdirs, zstandard, tldextract, tensorflow, sklearn, seqeval, sacrebleu, rouge-score, rarfile, pytest-xdist, py7zr, openpyxl, mwparserfromhell, lxml, langdetect, jiwer, isort, flake8, elasticsearch, datasets, conllu, bs4, black, bert-score, apache-beam Running setup.py install for python-Levenshtein ... error ERROR: Command errored out with exit status 1: command: 'C:\ProgramData\Anaconda3\envs\env\python.exe' -u -c 'import sys, setuptools, tokenize; sys.argv[0] = '"'"'C:\\Users\\VKC~1\\AppData\\Local\\Temp\\pip-install-ynt_dbm4\\python-levenshtein_c02e7e6f9def4629a475349654670ae9\\setup.py'"'"'; __file__='"'"'C:\\Users\\VKC~1\\AppData\\Local\\Temp\\pip-install-ynt_dbm4\\python-levenshtein_c02e7e6f9def4629a475349654670ae9\\setup.py'"'"';f=getattr(tokenize, '"'"'open'"'"', open)(__file__);code=f.read().replace('"'"'\r\n'"'"', '"'"'\n'"'"');f.close();exec(compile(code, __file__, '"'"'exec'"'"'))' install --record 'C:\Users\VKC~1\AppData\Local\Temp\pip-record-v7l7zitb\install-record.txt' --single-version-externally-managed --compile --install-headers 'C:\ProgramData\Anaconda3\envs\env\Include\python-Levenshtein' cwd: C:\Users\VKC~1\AppData\Local\Temp\pip-install-ynt_dbm4\python-levenshtein_c02e7e6f9def4629a475349654670ae9\ Complete output (27 lines): running install running build running build_py creating build creating build\lib.win-amd64-3.7 creating build\lib.win-amd64-3.7\Levenshtein copying Levenshtein\StringMatcher.py -> build\lib.win-amd64-3.7\Levenshtein copying Levenshtein\__init__.py -> build\lib.win-amd64-3.7\Levenshtein running egg_info writing python_Levenshtein.egg-info\PKG-INFO writing dependency_links to python_Levenshtein.egg-info\dependency_links.txt writing entry points to python_Levenshtein.egg-info\entry_points.txt writing namespace_packages to python_Levenshtein.egg-info\namespace_packages.txt writing requirements to python_Levenshtein.egg-info\requires.txt writing top-level names to python_Levenshtein.egg-info\top_level.txt reading manifest file 'python_Levenshtein.egg-info\SOURCES.txt' reading manifest template 'MANIFEST.in' warning: no previously-included files matching '*pyc' found anywhere in distribution warning: no previously-included files matching '*so' found anywhere in distribution warning: no previously-included files matching '.project' found anywhere in distribution warning: no previously-included files matching '.pydevproject' found anywhere in distribution writing manifest file 'python_Levenshtein.egg-info\SOURCES.txt' copying Levenshtein\_levenshtein.c -> build\lib.win-amd64-3.7\Levenshtein copying Levenshtein\_levenshtein.h -> build\lib.win-amd64-3.7\Levenshtein running build_ext building 'Levenshtein._levenshtein' extension error: Microsoft Visual C++ 14.0 or greater is required. Get it with "Microsoft C++ Build Tools": https://visualstudio.microsoft.com/visual-cpp-build-tools/ ---------------------------------------- ERROR: Command errored out with exit status 1: 'C:\ProgramData\Anaconda3\envs\env\python.exe' -u -c 'import sys, setuptools, tokenize; sys.argv[0] = '"'"'C:\\Users\\VKC~1\\AppData\\Local\\Temp\\pip-install-ynt_dbm4\\python-levenshtein_c02e7e6f9def4629a475349654670ae9\\setup.py'"'"'; __file__='"'"'C:\\Users\\VKC~1\\AppData\\Local\\Temp\\pip-install-ynt_dbm4\\python-levenshtein_c02e7e6f9def4629a475349654670ae9\\setup.py'"'"';f=getattr(tokenize, '"'"'open'"'"', open)(__file__);code=f.read().replace('"'"'\r\n'"'"', '"'"'\n'"'"');f.close();exec(compile(code, __file__, '"'"'exec'"'"'))' install --record 'C:\Users\VKC~1\AppData\Local\Temp\pip-record-v7l7zitb\install-record.txt' --single-version-externally-managed --compile --install-headers 'C:\ProgramData\Anaconda3\envs\env\Include\python-Levenshtein' Check the logs for full command output. ``` Here are conda and python versions: ```bat (env) C:\testing\datasets>conda --version conda 4.9.2 (env) C:\testing\datasets>python --version Python 3.7.10 ``` Please help me out. Thanks.
{ "avatar_url": "https://avatars.githubusercontent.com/u/29076344?v=4", "events_url": "https://api.github.com/users/gchhablani/events{/privacy}", "followers_url": "https://api.github.com/users/gchhablani/followers", "following_url": "https://api.github.com/users/gchhablani/following{/other_user}", "gists_url": "https://api.github.com/users/gchhablani/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/gchhablani", "id": 29076344, "login": "gchhablani", "node_id": "MDQ6VXNlcjI5MDc2MzQ0", "organizations_url": "https://api.github.com/users/gchhablani/orgs", "received_events_url": "https://api.github.com/users/gchhablani/received_events", "repos_url": "https://api.github.com/users/gchhablani/repos", "site_admin": false, "starred_url": "https://api.github.com/users/gchhablani/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gchhablani/subscriptions", "type": "User", "url": "https://api.github.com/users/gchhablani", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2301/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2301/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
1 day, 1:56:52
https://api.github.com/repos/huggingface/datasets/issues/2300
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2300/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2300/comments
https://api.github.com/repos/huggingface/datasets/issues/2300/events
https://github.com/huggingface/datasets/issues/2300
873,928,169
MDU6SXNzdWU4NzM5MjgxNjk=
2,300
Add VoxPopuli
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten", "user_view_type": "public" }
[ { "color": "e99695", "default": false, "description": "Requesting to add a new dataset", "id": 2067376369, "name": "dataset request", "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request" }, { "color": "d93f0b", "default": false, "description": "", "id": 2725241052, "name": "speech", "node_id": "MDU6TGFiZWwyNzI1MjQxMDUy", "url": "https://api.github.com/repos/huggingface/datasets/labels/speech" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/16348744?v=4", "events_url": "https://api.github.com/users/polinaeterna/events{/privacy}", "followers_url": "https://api.github.com/users/polinaeterna/followers", "following_url": "https://api.github.com/users/polinaeterna/following{/other_user}", "gists_url": "https://api.github.com/users/polinaeterna/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/polinaeterna", "id": 16348744, "login": "polinaeterna", "node_id": "MDQ6VXNlcjE2MzQ4NzQ0", "organizations_url": "https://api.github.com/users/polinaeterna/orgs", "received_events_url": "https://api.github.com/users/polinaeterna/received_events", "repos_url": "https://api.github.com/users/polinaeterna/repos", "site_admin": false, "starred_url": "https://api.github.com/users/polinaeterna/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/polinaeterna/subscriptions", "type": "User", "url": "https://api.github.com/users/polinaeterna", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/16348744?v=4", "events_url": "https://api.github.com/users/polinaeterna/events{/privacy}", "followers_url": "https://api.github.com/users/polinaeterna/followers", "following_url": "https://api.github.com/users/polinaeterna/following{/other_user}", "gists_url": "https://api.github.com/users/polinaeterna/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/polinaeterna", "id": 16348744, "login": "polinaeterna", "node_id": "MDQ6VXNlcjE2MzQ4NzQ0", "organizations_url": "https://api.github.com/users/polinaeterna/orgs", "received_events_url": "https://api.github.com/users/polinaeterna/received_events", "repos_url": "https://api.github.com/users/polinaeterna/repos", "site_admin": false, "starred_url": "https://api.github.com/users/polinaeterna/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/polinaeterna/subscriptions", "type": "User", "url": "https://api.github.com/users/polinaeterna", "user_view_type": "public" } ]
[ "I'm happy to take this on:) One question: The original unlabelled data is stored unsegmented (see e.g. https://github.com/facebookresearch/voxpopuli/blob/main/voxpopuli/get_unlabelled_data.py#L30), but segmenting the audio in the dataset would require a dependency on something like soundfile or torchaudio. An alternative could be to provide the segments start and end times as a Sequence and then it's up to the user to perform the segmentation on-the-fly if they wish?", "Hey @jfainberg,\r\n\r\nThis sounds great! I think adding a dependency would not be a big problem, however automatically segmenting the data probably means that it would take a very long time to do:\r\n\r\n```python\r\ndataset = load_dataset(\"voxpopuli\", \"french\")\r\n```\r\n\r\n=> so as a start I think your option 2 is the way to go!", "@polinaeterna VoxPopuli is available [here](https://huggingface.co/datasets/facebook/voxpopuli), so we can close this issue, right?\r\n", "@mariosasko yes, sure, closing it" ]
2021-05-02T12:17:40
2023-02-28T17:43:52
2023-02-28T17:43:51
CONTRIBUTOR
null
null
null
null
## Adding a Dataset - **Name:** Voxpopuli - **Description:** VoxPopuli is raw data is collected from 2009-2020 European Parliament event recordings - **Paper:** https://arxiv.org/abs/2101.00390 - **Data:** https://github.com/facebookresearch/voxpopuli - **Motivation:** biggest unlabeled speech dataset **Note**: Since the dataset is so huge, we should only add the config `10k` in the beginning. Instructions to add a new dataset can be found [here](https://github.com/huggingface/datasets/blob/master/ADD_NEW_DATASET.md).
{ "avatar_url": "https://avatars.githubusercontent.com/u/16348744?v=4", "events_url": "https://api.github.com/users/polinaeterna/events{/privacy}", "followers_url": "https://api.github.com/users/polinaeterna/followers", "following_url": "https://api.github.com/users/polinaeterna/following{/other_user}", "gists_url": "https://api.github.com/users/polinaeterna/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/polinaeterna", "id": 16348744, "login": "polinaeterna", "node_id": "MDQ6VXNlcjE2MzQ4NzQ0", "organizations_url": "https://api.github.com/users/polinaeterna/orgs", "received_events_url": "https://api.github.com/users/polinaeterna/received_events", "repos_url": "https://api.github.com/users/polinaeterna/repos", "site_admin": false, "starred_url": "https://api.github.com/users/polinaeterna/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/polinaeterna/subscriptions", "type": "User", "url": "https://api.github.com/users/polinaeterna", "user_view_type": "public" }
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/2300/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2300/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
667 days, 5:26:11
https://api.github.com/repos/huggingface/datasets/issues/2299
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2299/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2299/comments
https://api.github.com/repos/huggingface/datasets/issues/2299/events
https://github.com/huggingface/datasets/issues/2299
873,914,717
MDU6SXNzdWU4NzM5MTQ3MTc=
2,299
My iPhone
{ "avatar_url": "https://avatars.githubusercontent.com/u/82856229?v=4", "events_url": "https://api.github.com/users/Jasonbuchanan1983/events{/privacy}", "followers_url": "https://api.github.com/users/Jasonbuchanan1983/followers", "following_url": "https://api.github.com/users/Jasonbuchanan1983/following{/other_user}", "gists_url": "https://api.github.com/users/Jasonbuchanan1983/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Jasonbuchanan1983", "id": 82856229, "login": "Jasonbuchanan1983", "node_id": "MDQ6VXNlcjgyODU2MjI5", "organizations_url": "https://api.github.com/users/Jasonbuchanan1983/orgs", "received_events_url": "https://api.github.com/users/Jasonbuchanan1983/received_events", "repos_url": "https://api.github.com/users/Jasonbuchanan1983/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Jasonbuchanan1983/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Jasonbuchanan1983/subscriptions", "type": "User", "url": "https://api.github.com/users/Jasonbuchanan1983", "user_view_type": "public" }
[]
closed
false
null
[]
[]
2021-05-02T11:11:11
2021-07-23T09:24:16
2021-05-03T08:17:38
NONE
null
null
null
null
## Adding a Dataset - **Name:** *name of the dataset* - **Description:** *short description of the dataset (or link to social media or blog post)* - **Paper:** *link to the dataset paper if available* - **Data:** *link to the Github repository or current dataset location* - **Motivation:** *what are some good reasons to have this dataset* Instructions to add a new dataset can be found [here](https://github.com/huggingface/datasets/blob/master/ADD_NEW_DATASET.md).
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2299/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2299/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
21:06:27
https://api.github.com/repos/huggingface/datasets/issues/2296
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2296/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2296/comments
https://api.github.com/repos/huggingface/datasets/issues/2296/events
https://github.com/huggingface/datasets/issues/2296
872,974,907
MDU6SXNzdWU4NzI5NzQ5MDc=
2,296
1
{ "avatar_url": "https://avatars.githubusercontent.com/u/82880142?v=4", "events_url": "https://api.github.com/users/zinnyi/events{/privacy}", "followers_url": "https://api.github.com/users/zinnyi/followers", "following_url": "https://api.github.com/users/zinnyi/following{/other_user}", "gists_url": "https://api.github.com/users/zinnyi/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/zinnyi", "id": 82880142, "login": "zinnyi", "node_id": "MDQ6VXNlcjgyODgwMTQy", "organizations_url": "https://api.github.com/users/zinnyi/orgs", "received_events_url": "https://api.github.com/users/zinnyi/received_events", "repos_url": "https://api.github.com/users/zinnyi/repos", "site_admin": false, "starred_url": "https://api.github.com/users/zinnyi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zinnyi/subscriptions", "type": "User", "url": "https://api.github.com/users/zinnyi", "user_view_type": "public" }
[ { "color": "e99695", "default": false, "description": "Requesting to add a new dataset", "id": 2067376369, "name": "dataset request", "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request" } ]
closed
false
null
[]
[]
2021-04-30T17:53:49
2021-05-03T08:17:31
2021-05-03T08:17:31
NONE
null
null
null
null
## Adding a Dataset - **Name:** *name of the dataset* - **Description:** *short description of the dataset (or link to social media or blog post)* - **Paper:** *link to the dataset paper if available* - **Data:** *link to the Github repository or current dataset location* - **Motivation:** *what are some good reasons to have this dataset* Instructions to add a new dataset can be found [here](https://github.com/huggingface/datasets/blob/master/ADD_NEW_DATASET.md).
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2296/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2296/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
2 days, 14:23:42
https://api.github.com/repos/huggingface/datasets/issues/2294
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2294/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2294/comments
https://api.github.com/repos/huggingface/datasets/issues/2294/events
https://github.com/huggingface/datasets/issues/2294
872,136,075
MDU6SXNzdWU4NzIxMzYwNzU=
2,294
Slow #0 when using map to tokenize.
{ "avatar_url": "https://avatars.githubusercontent.com/u/31714566?v=4", "events_url": "https://api.github.com/users/VerdureChen/events{/privacy}", "followers_url": "https://api.github.com/users/VerdureChen/followers", "following_url": "https://api.github.com/users/VerdureChen/following{/other_user}", "gists_url": "https://api.github.com/users/VerdureChen/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/VerdureChen", "id": 31714566, "login": "VerdureChen", "node_id": "MDQ6VXNlcjMxNzE0NTY2", "organizations_url": "https://api.github.com/users/VerdureChen/orgs", "received_events_url": "https://api.github.com/users/VerdureChen/received_events", "repos_url": "https://api.github.com/users/VerdureChen/repos", "site_admin": false, "starred_url": "https://api.github.com/users/VerdureChen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/VerdureChen/subscriptions", "type": "User", "url": "https://api.github.com/users/VerdureChen", "user_view_type": "public" }
[]
open
false
null
[]
[ "Hi ! Have you tried other values for `preprocessing_num_workers` ? Is it always process 0 that is slower ?\r\nThere are no difference between process 0 and the others except that it processes the first shard of the dataset.", "Hi, I have found the reason of it. Before using the map function to tokenize the data, I concatenate the wikipedia and bookcorpus first, like this:\r\n```if args.dataset_name1 is not None:\r\n dataset1 = load_dataset(args.dataset_name1, args.dataset_config_name1, split=\"train\")\r\n dataset1 = dataset1.remove_columns('title')\r\n if args.dataset_name2 is not None:\r\n dataset2 = load_dataset(args.dataset_name2, args.dataset_config_name2,split=\"train\")\r\n assert dataset1.features.type == dataset2.features.type, str(dataset1.features.type)+';'+str(dataset2.features.type)\r\n datasets12 = concatenate_datasets([dataset1, dataset2], split='train')\r\n```\r\nWhen I just use one datasets, e.g. wikipedia, the problem seems no longer exist:\r\n![image](https://user-images.githubusercontent.com/31714566/116967059-13d24380-ace4-11eb-8d14-b7b9c9a275cc.png)\r\n\r\nBookcorpus has more row numbers than Wikipedia, however, it takes much more time to process each batch of wiki than that of bookcorpus. When we first concatenate two datasets and then use _map_ to process the concatenated datasets, e.g. `num_proc=5`, process 0 has to process all of the wikipedia data, causing the problem that #0 takes a longer time to finish the job. \r\n\r\nThe problem is caused by the different characteristic of different datasets. One solution might be using _map_ first to process two datasets seperately, then concatenate the tokenized and processed datasets before input to the `Dataloader`.\r\n\r\n", "That makes sense ! You can indeed use `map` on both datasets separately and then concatenate.\r\nAnother option is to concatenate, then shuffle, and then `map`." ]
2021-04-30T08:00:33
2021-05-04T11:00:11
null
NONE
null
null
null
null
Hi, _datasets_ is really amazing! I am following [run_mlm_no_trainer.py](url) to pre-train BERT, and it uses `tokenized_datasets = raw_datasets.map( tokenize_function, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, )` to tokenize by multiprocessing. However, I have found that when `num_proc`>1,the process _#0_ is much slower than others. It looks like this: ![image](https://user-images.githubusercontent.com/31714566/116665555-81246280-a9cc-11eb-8a37-6e608ab310d0.png) It takes more than 12 hours for #0, while others just about half an hour. Could anyone tell me it is normal or not, and is there any methods to speed up it?
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2294/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2294/timeline
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
null
https://api.github.com/repos/huggingface/datasets/issues/2288
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2288/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2288/comments
https://api.github.com/repos/huggingface/datasets/issues/2288/events
https://github.com/huggingface/datasets/issues/2288
871,111,235
MDU6SXNzdWU4NzExMTEyMzU=
2,288
Load_dataset for local CSV files
{ "avatar_url": "https://avatars.githubusercontent.com/u/17052700?v=4", "events_url": "https://api.github.com/users/sstojanoska/events{/privacy}", "followers_url": "https://api.github.com/users/sstojanoska/followers", "following_url": "https://api.github.com/users/sstojanoska/following{/other_user}", "gists_url": "https://api.github.com/users/sstojanoska/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/sstojanoska", "id": 17052700, "login": "sstojanoska", "node_id": "MDQ6VXNlcjE3MDUyNzAw", "organizations_url": "https://api.github.com/users/sstojanoska/orgs", "received_events_url": "https://api.github.com/users/sstojanoska/received_events", "repos_url": "https://api.github.com/users/sstojanoska/repos", "site_admin": false, "starred_url": "https://api.github.com/users/sstojanoska/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sstojanoska/subscriptions", "type": "User", "url": "https://api.github.com/users/sstojanoska", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
[ "Hi,\r\n\r\nthis is not a standard CSV file (requires additional preprocessing) so I wouldn't label this as s bug. You could parse the examples with the regex module or the string API to extract the data, but the following approach is probably the easiest (once you load the data):\r\n```python\r\nimport ast\r\n# load the dataset and copy the features\r\ndef process(ex):\r\n return {\"tokens\": ast.literal_eval(ex[\"tokens\"]), \"labels\": ast.literal_eval(ex[\"labels\"])}\r\ndataset = dataset.map(process, features=new_features)\r\n```\r\n", "Hi,\r\n\r\nThanks for the reply.\r\nI have already used ```ast.literal_eval``` to evaluate the string into list, but I was getting another error:\r\n```\r\nArrowInvalid: Could not convert X with type str: tried to convert to int\r\n```\r\nWhy this happens ? Should labels be mapped to their ids and use int instead of str ?", "Yes, just map the labels to their ids." ]
2021-04-29T15:01:10
2021-06-15T13:49:26
2021-06-15T13:49:26
NONE
null
null
null
null
The method load_dataset fails to correctly load a dataset from csv. Moreover, I am working on a token-classification task ( POS tagging) , where each row in my CSV contains two columns each of them having a list of strings. row example: ```tokens | labels ['I' , 'am', 'John'] | ['PRON', 'AUX', 'PROPN' ] ``` The method, loads each list as a string: (i.g "['I' , 'am', 'John']"). To solve this issue, I copied the Datasets.Features, created Sequence types ( instead of Value) and tried to cast the features type ``` new_features['tokens'] = Sequence(feature=Value(dtype='string', id=None)) new_features['labels'] = Sequence(feature=ClassLabel(num_classes=len(tag2idx), names=list(unique_tags))) dataset = dataset.cast(new_features) ``` but I got the following error ``` ArrowNotImplementedError: Unsupported cast from string to list using function cast_list ``` Moreover, I tried to set feature parameter in load_dataset method, to my new_features, but this fails as well. How can this be solved ?
{ "avatar_url": "https://avatars.githubusercontent.com/u/17052700?v=4", "events_url": "https://api.github.com/users/sstojanoska/events{/privacy}", "followers_url": "https://api.github.com/users/sstojanoska/followers", "following_url": "https://api.github.com/users/sstojanoska/following{/other_user}", "gists_url": "https://api.github.com/users/sstojanoska/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/sstojanoska", "id": 17052700, "login": "sstojanoska", "node_id": "MDQ6VXNlcjE3MDUyNzAw", "organizations_url": "https://api.github.com/users/sstojanoska/orgs", "received_events_url": "https://api.github.com/users/sstojanoska/received_events", "repos_url": "https://api.github.com/users/sstojanoska/repos", "site_admin": false, "starred_url": "https://api.github.com/users/sstojanoska/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sstojanoska/subscriptions", "type": "User", "url": "https://api.github.com/users/sstojanoska", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2288/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2288/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
46 days, 22:48:16
https://api.github.com/repos/huggingface/datasets/issues/2285
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2285/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2285/comments
https://api.github.com/repos/huggingface/datasets/issues/2285/events
https://github.com/huggingface/datasets/issues/2285
871,005,236
MDU6SXNzdWU4NzEwMDUyMzY=
2,285
Help understanding how to build a dataset for language modeling as with the old TextDataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/46021411?v=4", "events_url": "https://api.github.com/users/danieldiezmallo/events{/privacy}", "followers_url": "https://api.github.com/users/danieldiezmallo/followers", "following_url": "https://api.github.com/users/danieldiezmallo/following{/other_user}", "gists_url": "https://api.github.com/users/danieldiezmallo/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/danieldiezmallo", "id": 46021411, "login": "danieldiezmallo", "node_id": "MDQ6VXNlcjQ2MDIxNDEx", "organizations_url": "https://api.github.com/users/danieldiezmallo/orgs", "received_events_url": "https://api.github.com/users/danieldiezmallo/received_events", "repos_url": "https://api.github.com/users/danieldiezmallo/repos", "site_admin": false, "starred_url": "https://api.github.com/users/danieldiezmallo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/danieldiezmallo/subscriptions", "type": "User", "url": "https://api.github.com/users/danieldiezmallo", "user_view_type": "public" }
[]
closed
false
null
[]
[ "\r\nI received an answer for this question on the HuggingFace Datasets forum by @lhoestq\r\n\r\nHi !\r\n\r\nIf you want to tokenize line by line, you can use this:\r\n\r\n```\r\nmax_seq_length = 512\r\nnum_proc = 4\r\n\r\ndef tokenize_function(examples):\r\n# Remove empty lines\r\nexamples[\"text\"] = [line for line in examples[\"text\"] if len(line) > 0 and not line.isspace()]\r\nreturn tokenizer(\r\n examples[\"text\"],\r\n truncation=True,\r\n max_length=max_seq_length,\r\n)\r\n\r\ntokenized_dataset = dataset.map(\r\ntokenize_function,\r\nbatched=True,\r\nnum_proc=num_proc,\r\nremove_columns=[\"text\"],\r\n)\r\n```\r\n\r\nThough the TextDataset was doing a different processing by concatenating all the texts and building blocks of size 512. If you need this behavior, then you must apply an additional map function after the tokenization:\r\n\r\n```\r\n# Main data processing function that will concatenate all texts from\r\n# our dataset and generate chunks of max_seq_length.\r\ndef group_texts(examples):\r\n# Concatenate all texts.\r\nconcatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}\r\ntotal_length = len(concatenated_examples[list(examples.keys())[0]])\r\n# We drop the small remainder, we could add padding if the model supported it instead of this drop,\r\n# you can customize this part to your needs.\r\ntotal_length = (total_length // max_seq_length) * max_seq_length\r\n# Split by chunks of max_len.\r\nresult = {\r\n k: [t[i : i + max_seq_length] for i in range(0, total_length, max_seq_length)]\r\n for k, t in concatenated_examples.items()\r\n}\r\nreturn result\r\n\r\n# Note that with `batched=True`, this map processes 1,000 texts together,\r\n# so group_texts throws away a remainder for each of those groups of 1,000 texts.\r\n# You can adjust that batch_size here but a higher value might be slower to preprocess.\r\n\r\ntokenized_dataset = tokenized_dataset.map(\r\ngroup_texts,\r\nbatched=True,\r\nnum_proc=num_proc,\r\n)\r\n```\r\n\r\nThis code comes from the processing of the run_mlm.py example script of transformers\r\n\r\n", "Resolved" ]
2021-04-29T13:16:45
2021-05-19T07:22:45
2021-05-19T07:22:39
NONE
null
null
null
null
Hello, I am trying to load a custom dataset that I will then use for language modeling. The dataset consists of a text file that has a whole document in each line, meaning that each line overpasses the normal 512 tokens limit of most tokenizers. I would like to understand what is the process to build a text dataset that tokenizes each line, having previously split the documents in the dataset into lines of a "tokenizable" size, as the old TextDataset class would do, where you only had to do the following, and a tokenized dataset without text loss would be available to pass to a DataCollator: ``` model_checkpoint = 'distilbert-base-uncased' from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) from transformers import TextDataset dataset = TextDataset( tokenizer=tokenizer, file_path="path/to/text_file.txt", block_size=512, ) ``` For now, what I have is the following, which, of course, throws an error because each line is longer than the maximum block size in the tokenizer: ``` import datasets dataset = datasets.load_dataset('path/to/text_file.txt') model_checkpoint = 'distilbert-base-uncased' tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) def tokenize_function(examples): return tokenizer(examples["text"]) tokenized_datasets = dataset.map(tokenize_function, batched=True, num_proc=4, remove_columns=["text"]) tokenized_datasets ``` So what would be the "standard" way of creating a dataset in the way it was done before? Thank you very much for the help :))
{ "avatar_url": "https://avatars.githubusercontent.com/u/46021411?v=4", "events_url": "https://api.github.com/users/danieldiezmallo/events{/privacy}", "followers_url": "https://api.github.com/users/danieldiezmallo/followers", "following_url": "https://api.github.com/users/danieldiezmallo/following{/other_user}", "gists_url": "https://api.github.com/users/danieldiezmallo/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/danieldiezmallo", "id": 46021411, "login": "danieldiezmallo", "node_id": "MDQ6VXNlcjQ2MDIxNDEx", "organizations_url": "https://api.github.com/users/danieldiezmallo/orgs", "received_events_url": "https://api.github.com/users/danieldiezmallo/received_events", "repos_url": "https://api.github.com/users/danieldiezmallo/repos", "site_admin": false, "starred_url": "https://api.github.com/users/danieldiezmallo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/danieldiezmallo/subscriptions", "type": "User", "url": "https://api.github.com/users/danieldiezmallo", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2285/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2285/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
19 days, 18:05:54
https://api.github.com/repos/huggingface/datasets/issues/2279
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2279/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2279/comments
https://api.github.com/repos/huggingface/datasets/issues/2279/events
https://github.com/huggingface/datasets/issues/2279
870,431,662
MDU6SXNzdWU4NzA0MzE2NjI=
2,279
Compatibility with Ubuntu 18 and GLIBC 2.27?
{ "avatar_url": "https://avatars.githubusercontent.com/u/11379648?v=4", "events_url": "https://api.github.com/users/tginart/events{/privacy}", "followers_url": "https://api.github.com/users/tginart/followers", "following_url": "https://api.github.com/users/tginart/following{/other_user}", "gists_url": "https://api.github.com/users/tginart/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/tginart", "id": 11379648, "login": "tginart", "node_id": "MDQ6VXNlcjExMzc5NjQ4", "organizations_url": "https://api.github.com/users/tginart/orgs", "received_events_url": "https://api.github.com/users/tginart/received_events", "repos_url": "https://api.github.com/users/tginart/repos", "site_admin": false, "starred_url": "https://api.github.com/users/tginart/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/tginart/subscriptions", "type": "User", "url": "https://api.github.com/users/tginart", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
[ "From the trace this seems like an error in the tokenizer library instead.\r\n\r\nDo you mind opening an issue at https://github.com/huggingface/tokenizers instead?", "Hi @tginart, thanks for reporting.\r\n\r\nI think this issue is already open at `tokenizers` library: https://github.com/huggingface/tokenizers/issues/685" ]
2021-04-28T22:08:07
2021-04-29T07:42:42
2021-04-29T07:42:42
NONE
null
null
null
null
## Describe the bug For use on Ubuntu systems, it seems that datasets requires GLIBC 2.29. However, Ubuntu 18 runs with GLIBC 2.27 and it seems [non-trivial to upgrade GLIBC to 2.29 for Ubuntu 18 users](https://www.digitalocean.com/community/questions/how-install-glibc-2-29-or-higher-in-ubuntu-18-04). I'm not sure if there is anything that can be done about this, but I'd like to confirm that using huggingface/datasets requires either an upgrade to Ubuntu 19/20 or a hand-rolled install of a higher version of GLIBC. ## Steps to reproduce the bug 1. clone the transformers repo 2. move to examples/pytorch/language-modeling 3. run example command: ```python run_clm.py --model_name_or_path gpt2 --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 --do_train --do_eval --output_dir /tmp/test-clm``` ## Expected results As described in the transformers repo. ## Actual results ```Traceback (most recent call last): File "run_clm.py", line 34, in <module> from transformers import ( File "/home/tginart/anaconda3/envs/huggingface/lib/python3.7/site-packages/transformers/__init__.py", line 2487, in __getattr__ return super().__getattr__(name) File "/home/tginart/anaconda3/envs/huggingface/lib/python3.7/site-packages/transformers/file_utils.py", line 1699, in __getattr__ module = self._get_module(self._class_to_module[name]) File "/home/tginart/anaconda3/envs/huggingface/lib/python3.7/site-packages/transformers/__init__.py", line 2481, in _get_module return importlib.import_module("." + module_name, self.__name__) File "/home/tginart/anaconda3/envs/huggingface/lib/python3.7/importlib/__init__.py", line 127, in import_module return _bootstrap._gcd_import(name[level:], package, level) File "/home/tginart/anaconda3/envs/huggingface/lib/python3.7/site-packages/transformers/models/__init__.py", line 19, in <module> from . import ( File "/home/tginart/anaconda3/envs/huggingface/lib/python3.7/site-packages/transformers/models/layoutlm/__init__.py", line 23, in <module> from .tokenization_layoutlm import LayoutLMTokenizer File "/home/tginart/anaconda3/envs/huggingface/lib/python3.7/site-packages/transformers/models/layoutlm/tokenization_layoutlm.py", line 19, in <module> from ..bert.tokenization_bert import BertTokenizer File "/home/tginart/anaconda3/envs/huggingface/lib/python3.7/site-packages/transformers/models/bert/tokenization_bert.py", line 23, in <module> from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace File "/home/tginart/anaconda3/envs/huggingface/lib/python3.7/site-packages/transformers/tokenization_utils.py", line 26, in <module> from .tokenization_utils_base import ( File "/home/tginart/anaconda3/envs/huggingface/lib/python3.7/site-packages/transformers/tokenization_utils_base.py", line 68, in <module> from tokenizers import AddedToken File "/home/tginart/anaconda3/envs/huggingface/lib/python3.7/site-packages/tokenizers/__init__.py", line 79, in <module> from .tokenizers import ( ImportError: /lib/x86_64-linux-gnu/libm.so.6: version `GLIBC_2.29' not found (required by /home/tginart/anaconda3/envs/huggingface/lib/python3.7/site-packages/tokenizers/tokenizers.cpython-37m-x86_64-linux-gnu.so) ``` ## Versions Paste the output of the following code: ``` - Datasets: 1.6.1 - Python: 3.7.10 (default, Feb 26 2021, 18:47:35) [GCC 7.3.0] - Platform: Linux-4.15.0-128-generic-x86_64-with-debian-buster-sid ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2279/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2279/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
9:34:35
https://api.github.com/repos/huggingface/datasets/issues/2278
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2278/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2278/comments
https://api.github.com/repos/huggingface/datasets/issues/2278/events
https://github.com/huggingface/datasets/issues/2278
870,088,059
MDU6SXNzdWU4NzAwODgwNTk=
2,278
Loss result inGptNeoForCasual
{ "avatar_url": "https://avatars.githubusercontent.com/u/51174606?v=4", "events_url": "https://api.github.com/users/Yossillamm/events{/privacy}", "followers_url": "https://api.github.com/users/Yossillamm/followers", "following_url": "https://api.github.com/users/Yossillamm/following{/other_user}", "gists_url": "https://api.github.com/users/Yossillamm/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Yossillamm", "id": 51174606, "login": "Yossillamm", "node_id": "MDQ6VXNlcjUxMTc0NjA2", "organizations_url": "https://api.github.com/users/Yossillamm/orgs", "received_events_url": "https://api.github.com/users/Yossillamm/received_events", "repos_url": "https://api.github.com/users/Yossillamm/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Yossillamm/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Yossillamm/subscriptions", "type": "User", "url": "https://api.github.com/users/Yossillamm", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
closed
false
null
[]
[ "Hi ! I think you might have to ask on the `transformers` repo on or the forum at https://discuss.huggingface.co/\r\n\r\nClosing since it's not related to this library" ]
2021-04-28T15:39:52
2021-05-06T16:14:23
2021-05-06T16:14:23
NONE
null
null
null
null
Is there any way you give the " loss" and "logits" results in the gpt neo api?
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2278/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2278/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
8 days, 0:34:31
https://api.github.com/repos/huggingface/datasets/issues/2276
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2276/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2276/comments
https://api.github.com/repos/huggingface/datasets/issues/2276/events
https://github.com/huggingface/datasets/issues/2276
870,010,511
MDU6SXNzdWU4NzAwMTA1MTE=
2,276
concatenate_datasets loads all the data into memory
{ "avatar_url": "https://avatars.githubusercontent.com/u/7063207?v=4", "events_url": "https://api.github.com/users/chbensch/events{/privacy}", "followers_url": "https://api.github.com/users/chbensch/followers", "following_url": "https://api.github.com/users/chbensch/following{/other_user}", "gists_url": "https://api.github.com/users/chbensch/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/chbensch", "id": 7063207, "login": "chbensch", "node_id": "MDQ6VXNlcjcwNjMyMDc=", "organizations_url": "https://api.github.com/users/chbensch/orgs", "received_events_url": "https://api.github.com/users/chbensch/received_events", "repos_url": "https://api.github.com/users/chbensch/repos", "site_admin": false, "starred_url": "https://api.github.com/users/chbensch/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chbensch/subscriptions", "type": "User", "url": "https://api.github.com/users/chbensch", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" } ]
[ "Therefore, when I try to concatenate larger datasets (5x 35GB data sets) I also get an out of memory error, since over 90GB of swap space was used at the time of the crash:\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nMemoryError Traceback (most recent call last)\r\n<ipython-input-6-9766d77530b9> in <module>\r\n 20 print(file_name)\r\n 21 cv_batch = load_from_disk(file_name)\r\n---> 22 cv_sampled_train = concatenate_datasets([cv_sampled_train, cv_batch])\r\n 23 \r\n 24 print(\"Saving to disk!\")\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\site-packages\\datasets\\arrow_dataset.py in concatenate_datasets(dsets, info, split, axis)\r\n 2891 \r\n 2892 # Concatenate tables\r\n-> 2893 table = concat_tables([dset._data for dset in dsets if len(dset._data) > 0], axis=axis)\r\n 2894 table = update_metadata_with_features(table, None)\r\n 2895 \r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\site-packages\\datasets\\table.py in concat_tables(tables, axis)\r\n 837 if len(tables) == 1:\r\n 838 return tables[0]\r\n--> 839 return ConcatenationTable.from_tables(tables, axis=axis)\r\n 840 \r\n 841 \r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\site-packages\\datasets\\table.py in from_tables(cls, tables, axis)\r\n 697 return result\r\n 698 \r\n--> 699 blocks = to_blocks(tables[0])\r\n 700 for table in tables[1:]:\r\n 701 table_blocks = to_blocks(table)\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\site-packages\\datasets\\table.py in to_blocks(table)\r\n 669 return [[InMemoryTable(table)]]\r\n 670 elif isinstance(table, ConcatenationTable):\r\n--> 671 return copy.deepcopy(table.blocks)\r\n 672 else:\r\n 673 return [[table]]\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in deepcopy(x, memo, _nil)\r\n 144 copier = _deepcopy_dispatch.get(cls)\r\n 145 if copier is not None:\r\n--> 146 y = copier(x, memo)\r\n 147 else:\r\n 148 if issubclass(cls, type):\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in _deepcopy_list(x, memo, deepcopy)\r\n 203 append = y.append\r\n 204 for a in x:\r\n--> 205 append(deepcopy(a, memo))\r\n 206 return y\r\n 207 d[list] = _deepcopy_list\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in deepcopy(x, memo, _nil)\r\n 144 copier = _deepcopy_dispatch.get(cls)\r\n 145 if copier is not None:\r\n--> 146 y = copier(x, memo)\r\n 147 else:\r\n 148 if issubclass(cls, type):\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in _deepcopy_list(x, memo, deepcopy)\r\n 203 append = y.append\r\n 204 for a in x:\r\n--> 205 append(deepcopy(a, memo))\r\n 206 return y\r\n 207 d[list] = _deepcopy_list\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in deepcopy(x, memo, _nil)\r\n 151 copier = getattr(x, \"__deepcopy__\", None)\r\n 152 if copier is not None:\r\n--> 153 y = copier(memo)\r\n 154 else:\r\n 155 reductor = dispatch_table.get(cls)\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\site-packages\\datasets\\table.py in __deepcopy__(self, memo)\r\n 143 # by adding it to the memo, self.table won't be copied\r\n 144 memo[id(self.table)] = self.table\r\n--> 145 return _deepcopy(self, memo)\r\n 146 \r\n 147 def __getstate__(self):\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\site-packages\\datasets\\table.py in _deepcopy(x, memo)\r\n 62 memo[id(x)] = result\r\n 63 for k, v in x.__dict__.items():\r\n---> 64 setattr(result, k, copy.deepcopy(v, memo))\r\n 65 return result\r\n 66 \r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in deepcopy(x, memo, _nil)\r\n 144 copier = _deepcopy_dispatch.get(cls)\r\n 145 if copier is not None:\r\n--> 146 y = copier(x, memo)\r\n 147 else:\r\n 148 if issubclass(cls, type):\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in _deepcopy_list(x, memo, deepcopy)\r\n 203 append = y.append\r\n 204 for a in x:\r\n--> 205 append(deepcopy(a, memo))\r\n 206 return y\r\n 207 d[list] = _deepcopy_list\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in deepcopy(x, memo, _nil)\r\n 170 y = x\r\n 171 else:\r\n--> 172 y = _reconstruct(x, memo, *rv)\r\n 173 \r\n 174 # If is its own copy, don't memoize.\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in _reconstruct(x, memo, func, args, state, listiter, dictiter, deepcopy)\r\n 262 if deep and args:\r\n 263 args = (deepcopy(arg, memo) for arg in args)\r\n--> 264 y = func(*args)\r\n 265 if deep:\r\n 266 memo[id(x)] = y\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in <genexpr>(.0)\r\n 261 deep = memo is not None\r\n 262 if deep and args:\r\n--> 263 args = (deepcopy(arg, memo) for arg in args)\r\n 264 y = func(*args)\r\n 265 if deep:\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in deepcopy(x, memo, _nil)\r\n 144 copier = _deepcopy_dispatch.get(cls)\r\n 145 if copier is not None:\r\n--> 146 y = copier(x, memo)\r\n 147 else:\r\n 148 if issubclass(cls, type):\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in _deepcopy_list(x, memo, deepcopy)\r\n 203 append = y.append\r\n 204 for a in x:\r\n--> 205 append(deepcopy(a, memo))\r\n 206 return y\r\n 207 d[list] = _deepcopy_list\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in deepcopy(x, memo, _nil)\r\n 170 y = x\r\n 171 else:\r\n--> 172 y = _reconstruct(x, memo, *rv)\r\n 173 \r\n 174 # If is its own copy, don't memoize.\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in _reconstruct(x, memo, func, args, state, listiter, dictiter, deepcopy)\r\n 262 if deep and args:\r\n 263 args = (deepcopy(arg, memo) for arg in args)\r\n--> 264 y = func(*args)\r\n 265 if deep:\r\n 266 memo[id(x)] = y\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in <genexpr>(.0)\r\n 261 deep = memo is not None\r\n 262 if deep and args:\r\n--> 263 args = (deepcopy(arg, memo) for arg in args)\r\n 264 y = func(*args)\r\n 265 if deep:\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in deepcopy(x, memo, _nil)\r\n 144 copier = _deepcopy_dispatch.get(cls)\r\n 145 if copier is not None:\r\n--> 146 y = copier(x, memo)\r\n 147 else:\r\n 148 if issubclass(cls, type):\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in _deepcopy_tuple(x, memo, deepcopy)\r\n 208 \r\n 209 def _deepcopy_tuple(x, memo, deepcopy=deepcopy):\r\n--> 210 y = [deepcopy(a, memo) for a in x]\r\n 211 # We're not going to put the tuple in the memo, but it's still important we\r\n 212 # check for it, in case the tuple contains recursive mutable structures.\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in <listcomp>(.0)\r\n 208 \r\n 209 def _deepcopy_tuple(x, memo, deepcopy=deepcopy):\r\n--> 210 y = [deepcopy(a, memo) for a in x]\r\n 211 # We're not going to put the tuple in the memo, but it's still important we\r\n 212 # check for it, in case the tuple contains recursive mutable structures.\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in deepcopy(x, memo, _nil)\r\n 144 copier = _deepcopy_dispatch.get(cls)\r\n 145 if copier is not None:\r\n--> 146 y = copier(x, memo)\r\n 147 else:\r\n 148 if issubclass(cls, type):\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in _deepcopy_list(x, memo, deepcopy)\r\n 203 append = y.append\r\n 204 for a in x:\r\n--> 205 append(deepcopy(a, memo))\r\n 206 return y\r\n 207 d[list] = _deepcopy_list\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in deepcopy(x, memo, _nil)\r\n 144 copier = _deepcopy_dispatch.get(cls)\r\n 145 if copier is not None:\r\n--> 146 y = copier(x, memo)\r\n 147 else:\r\n 148 if issubclass(cls, type):\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in _deepcopy_tuple(x, memo, deepcopy)\r\n 208 \r\n 209 def _deepcopy_tuple(x, memo, deepcopy=deepcopy):\r\n--> 210 y = [deepcopy(a, memo) for a in x]\r\n 211 # We're not going to put the tuple in the memo, but it's still important we\r\n 212 # check for it, in case the tuple contains recursive mutable structures.\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in <listcomp>(.0)\r\n 208 \r\n 209 def _deepcopy_tuple(x, memo, deepcopy=deepcopy):\r\n--> 210 y = [deepcopy(a, memo) for a in x]\r\n 211 # We're not going to put the tuple in the memo, but it's still important we\r\n 212 # check for it, in case the tuple contains recursive mutable structures.\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in deepcopy(x, memo, _nil)\r\n 144 copier = _deepcopy_dispatch.get(cls)\r\n 145 if copier is not None:\r\n--> 146 y = copier(x, memo)\r\n 147 else:\r\n 148 if issubclass(cls, type):\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in _deepcopy_list(x, memo, deepcopy)\r\n 203 append = y.append\r\n 204 for a in x:\r\n--> 205 append(deepcopy(a, memo))\r\n 206 return y\r\n 207 d[list] = _deepcopy_list\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in deepcopy(x, memo, _nil)\r\n 159 reductor = getattr(x, \"__reduce_ex__\", None)\r\n 160 if reductor is not None:\r\n--> 161 rv = reductor(4)\r\n 162 else:\r\n 163 reductor = getattr(x, \"__reduce__\", None)\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\site-packages\\pyarrow\\io.pxi in pyarrow.lib.Buffer.__reduce_ex__()\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\site-packages\\pyarrow\\io.pxi in pyarrow.lib.Buffer.to_pybytes()\r\n\r\nMemoryError: \r\n\r\n```", "Hi ! this looks like an important issue. Let me try to reproduce this.\r\nCc @samsontmr this might be related to the memory issue you have in #2134 ", "@lhoestq Just went to open a similar issue.\r\n\r\nIt seems like deep copying (tested on master) the dataset object writes the table's record batches (`dset._data._batches`) into RAM.\r\n\r\nTo find the bug, I modified the `_deepcopy` function in `table.py` as follows:\r\n```python\r\ndef _deepcopy(x, memo: dict):\r\n \"\"\"deepcopy a regular class instance\"\"\"\r\n import psutil # pip install this package\r\n import time\r\n cls = x.__class__\r\n result = cls.__new__(cls)\r\n memo[id(x)] = result\r\n for k, v in x.__dict__.items():\r\n print(\"=\"* 50)\r\n print(\"Current memory:\", psutil.virtual_memory().percent)\r\n print(f\"Saving object {k} with value {v}\")\r\n setattr(result, k, copy.deepcopy(v, memo))\r\n time.sleep(5)\r\n print(\"Memory after copy:\", psutil.virtual_memory().percent)\r\n return result\r\n```\r\nTest script:\r\n```python\r\nimport copy\r\nfrom datasets import load_dataset\r\nbk = load_dataset(\"bookcorpus\", split=\"train\")\r\nbk_copy = copy.deepcopy(bk)\r\n```", "Thanks for the insights @mariosasko ! I'm working on a fix.\r\nSince this is a big issue I'll make a patch release as soon as this is fixed", "Hi @samsontmr @TaskManager91 the fix is on the master branch, feel free to install `datasets` from source and let us know if you still have issues", "We just released `datasets` 1.6.2 that includes the fix :)", "thanks it works like a charm! :)" ]
2021-04-28T14:27:21
2021-05-03T08:41:55
2021-05-03T08:41:55
NONE
null
null
null
null
## Describe the bug When I try to concatenate 2 datasets (10GB each) , the entire data is loaded into memory instead of being written directly to disk. Interestingly, this happens when trying to save the new dataset to disk or concatenating it again. ![image](https://user-images.githubusercontent.com/7063207/116420321-2b21b480-a83e-11eb-9006-8f6ca729fb6f.png) ## Steps to reproduce the bug ```python from datasets import concatenate_datasets, load_from_disk test_sampled_pro = load_from_disk("test_sampled_pro") val_sampled_pro = load_from_disk("val_sampled_pro") big_set = concatenate_datasets([test_sampled_pro, val_sampled_pro]) # Loaded to memory big_set.save_to_disk("big_set") # Loaded to memory big_set = concatenate_datasets([big_set, val_sampled_pro]) ``` ## Expected results The data should be loaded into memory in batches and then saved directly to disk. ## Actual results The entire data set is loaded into the memory and then saved to the hard disk. ## Versions Paste the output of the following code: ```python - Datasets: 1.6.1 - Python: 3.8.8 (default, Apr 13 2021, 19:58:26) [GCC 7.3.0] - Platform: Linux-5.4.72-microsoft-standard-WSL2-x86_64-with-glibc2.10 ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/7063207?v=4", "events_url": "https://api.github.com/users/chbensch/events{/privacy}", "followers_url": "https://api.github.com/users/chbensch/followers", "following_url": "https://api.github.com/users/chbensch/following{/other_user}", "gists_url": "https://api.github.com/users/chbensch/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/chbensch", "id": 7063207, "login": "chbensch", "node_id": "MDQ6VXNlcjcwNjMyMDc=", "organizations_url": "https://api.github.com/users/chbensch/orgs", "received_events_url": "https://api.github.com/users/chbensch/received_events", "repos_url": "https://api.github.com/users/chbensch/repos", "site_admin": false, "starred_url": "https://api.github.com/users/chbensch/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chbensch/subscriptions", "type": "User", "url": "https://api.github.com/users/chbensch", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2276/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2276/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
4 days, 18:14:34
https://api.github.com/repos/huggingface/datasets/issues/2275
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2275/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2275/comments
https://api.github.com/repos/huggingface/datasets/issues/2275/events
https://github.com/huggingface/datasets/issues/2275
869,378,311
MDU6SXNzdWU4NjkzNzgzMTE=
2,275
SNLI dataset has labels of -1
{ "avatar_url": "https://avatars.githubusercontent.com/u/17426779?v=4", "events_url": "https://api.github.com/users/puzzler10/events{/privacy}", "followers_url": "https://api.github.com/users/puzzler10/followers", "following_url": "https://api.github.com/users/puzzler10/following{/other_user}", "gists_url": "https://api.github.com/users/puzzler10/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/puzzler10", "id": 17426779, "login": "puzzler10", "node_id": "MDQ6VXNlcjE3NDI2Nzc5", "organizations_url": "https://api.github.com/users/puzzler10/orgs", "received_events_url": "https://api.github.com/users/puzzler10/received_events", "repos_url": "https://api.github.com/users/puzzler10/repos", "site_admin": false, "starred_url": "https://api.github.com/users/puzzler10/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/puzzler10/subscriptions", "type": "User", "url": "https://api.github.com/users/puzzler10", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Hi @puzzler10, \r\nThose examples where `gold_label` field was empty, -1 label was alloted to it. In order to remove it you can filter the samples from train/val/test splits. Here's how you can drop those rows from the dataset:\r\n`dataset = load_dataset(\"snli\")`\r\n`dataset_test_filter = dataset['test'].filter(lambda example: example['label'] != -1)`\r\n\r\nI agree it should have been mentioned in the documentation. I'll raise a PR regarding the same. Thanks for pointing out!" ]
2021-04-28T00:32:25
2021-05-17T13:34:18
2021-05-17T13:34:18
NONE
null
null
null
null
There are a number of rows with a label of -1 in the SNLI dataset. The dataset descriptions [here](https://nlp.stanford.edu/projects/snli/) and [here](https://github.com/huggingface/datasets/tree/master/datasets/snli) don't list -1 as a label possibility, and neither does the dataset viewer. As examples, see index 107 or 124 of the test set. It isn't clear what these labels mean. I found a [line of code](https://github.com/huggingface/datasets/blob/80e59ef178d3bb2090d091bc32315c655eb0633d/datasets/snli/snli.py#L94) that seems to put them in but it seems still unclear why they are there. The current workaround is to just drop the rows from any model being trained. Perhaps the documentation should be updated.
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2275/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2275/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
19 days, 13:01:53
https://api.github.com/repos/huggingface/datasets/issues/2272
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2272/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2272/comments
https://api.github.com/repos/huggingface/datasets/issues/2272/events
https://github.com/huggingface/datasets/issues/2272
869,017,977
MDU6SXNzdWU4NjkwMTc5Nzc=
2,272
Bug in Dataset.class_encode_column
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
[ "This has been fixed in this commit: https://github.com/huggingface/datasets/pull/2254/commits/88676c930216cd4cc31741b99827b477d2b46cb6\r\n\r\nIt was introduced in #2246 : using map with `input_columns` doesn't return the other columns anymore" ]
2021-04-27T16:13:18
2021-04-30T12:54:27
2021-04-30T12:54:27
MEMBER
null
null
null
null
## Describe the bug All the rest of the columns except the one passed to `Dataset.class_encode_column` are discarded. ## Expected results All the original columns should be kept. This needs regression tests.
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2272/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2272/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
2 days, 20:41:09
https://api.github.com/repos/huggingface/datasets/issues/2271
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2271/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2271/comments
https://api.github.com/repos/huggingface/datasets/issues/2271/events
https://github.com/huggingface/datasets/issues/2271
869,002,141
MDU6SXNzdWU4NjkwMDIxNDE=
2,271
Synchronize table metadata with features
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
closed
false
null
[]
[ "See PR #2274 " ]
2021-04-27T15:55:13
2022-06-01T17:13:21
2022-06-01T17:13:21
MEMBER
null
null
null
null
**Is your feature request related to a problem? Please describe.** As pointed out in this [comment](https://github.com/huggingface/datasets/pull/2145#discussion_r621326767): > Metadata stored in the schema is just a redundant information regarding the feature types. It is used when calling Dataset.from_file to know which feature types to use. These metadata are stored in the schema of the pyarrow table by using `update_metadata_with_features`. However this something that's almost never tested properly. **Describe the solution you'd like** We should find a way to always make sure that the metadata (in `self.data.schema.metadata`) are synced with the actual feature types (in `self.info.features`).
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2271/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2271/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
400 days, 1:18:08
https://api.github.com/repos/huggingface/datasets/issues/2267
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2267/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2267/comments
https://api.github.com/repos/huggingface/datasets/issues/2267/events
https://github.com/huggingface/datasets/issues/2267
868,291,129
MDU6SXNzdWU4NjgyOTExMjk=
2,267
DatasetDict save load Failing test in 1.6 not in 1.5
{ "avatar_url": "https://avatars.githubusercontent.com/u/2000204?v=4", "events_url": "https://api.github.com/users/timothyjlaurent/events{/privacy}", "followers_url": "https://api.github.com/users/timothyjlaurent/followers", "following_url": "https://api.github.com/users/timothyjlaurent/following{/other_user}", "gists_url": "https://api.github.com/users/timothyjlaurent/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/timothyjlaurent", "id": 2000204, "login": "timothyjlaurent", "node_id": "MDQ6VXNlcjIwMDAyMDQ=", "organizations_url": "https://api.github.com/users/timothyjlaurent/orgs", "received_events_url": "https://api.github.com/users/timothyjlaurent/received_events", "repos_url": "https://api.github.com/users/timothyjlaurent/repos", "site_admin": false, "starred_url": "https://api.github.com/users/timothyjlaurent/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/timothyjlaurent/subscriptions", "type": "User", "url": "https://api.github.com/users/timothyjlaurent", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
open
false
null
[]
[ "Thanks for reporting ! We're looking into it", "I'm not able to reproduce this, do you think you can provide a code that creates a DatasetDict that has this issue when saving and reloading ?", "Hi, I just ran into a similar error. Here is the minimal code to reproduce:\r\n```python\r\nfrom datasets import load_dataset, DatasetDict\r\nds = load_dataset('super_glue', 'multirc')\r\n\r\nds.save_to_disk('tempds')\r\n\r\nds = DatasetDict.load_from_disk('tempds')\r\n\r\n```\r\n\r\n```bash\r\nReusing dataset super_glue (/home/idahl/.cache/huggingface/datasets/super_glue/multirc/1.0.2/2fb163bca9085c1deb906aff20f00c242227ff704a4e8c9cfdfe820be3abfc83)\r\nTraceback (most recent call last):\r\n File \"/home/idahl/eval-util-expl/multirc/tmp.py\", line 7, in <module>\r\n ds = DatasetDict.load_from_disk('tempds')\r\n File \"/home/idahl/miniconda3/envs/eval-util-expl/lib/python3.9/site-packages/datasets/dataset_dict.py\", line 710, in load_from_disk\r\n dataset_dict[k] = Dataset.load_from_disk(dataset_dict_split_path, fs, keep_in_memory=keep_in_memory)\r\n File \"/home/idahl/miniconda3/envs/eval-util-expl/lib/python3.9/site-packages/datasets/arrow_dataset.py\", line 687, in load_from_disk\r\n return Dataset(\r\n File \"/home/idahl/miniconda3/envs/eval-util-expl/lib/python3.9/site-packages/datasets/arrow_dataset.py\", line 274, in __init__\r\n raise ValueError(\r\nValueError: External features info don't match the dataset:\r\nGot\r\n{'answer': Value(dtype='string', id=None), 'idx': {'answer': Value(dtype='int32', id=None), 'paragraph': Value(dtype='int32', id=None), 'question': Value(dtype='int32', id=None)}, 'label': ClassLabel(num_classes=2, names=['False', 'True'], names_file=None, id=None), 'paragraph': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None)}\r\nwith type\r\nstruct<answer: string, idx: struct<answer: int32, paragraph: int32, question: int32>, label: int64, paragraph: string, question: string>\r\n\r\nbut expected something like\r\n{'answer': Value(dtype='string', id=None), 'idx': {'paragraph': Value(dtype='int32', id=None), 'question': Value(dtype='int32', id=None), 'answer': Value(dtype='int32', id=None)}, 'label': Value(dtype='int64', id=None), 'paragraph': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None)}\r\nwith type\r\nstruct<answer: string, idx: struct<paragraph: int32, question: int32, answer: int32>, label: int64, paragraph: string, question: string>\r\n\r\n```\r\n\r\nThe non-matching part seems to be\r\n`'label': ClassLabel(num_classes=2, names=['False', 'True'], names_file=None, id=None),`\r\nvs \r\n`'label': Value(dtype='int64', id=None),`\r\n\r\nAnd the order in the `<struct...` being different, which might cause the [features.type != inferred_features.type](https://github.com/huggingface/datasets/blob/master/src/datasets/arrow_dataset.py#L274) condition to become true and raise this ValueError.\r\n\r\n\r\nI am using datasets version 1.6.2.\r\n\r\nEdit: can confirm, this works without error in version 1.5.0", "My current workaround is to remove the idx feature:\r\n\r\n```\r\n\r\nfrom datasets import load_dataset, DatasetDict, Value\r\nds = load_dataset('super_glue', 'multirc')\r\nds = ds.remove_columns('idx')\r\n\r\nds.save_to_disk('tempds')\r\n\r\nds = DatasetDict.load_from_disk('tempds')\r\n\r\n```\r\n\r\nworks.", "It looks like this issue comes from the order of the fields in the 'idx' struct that is different for some reason.\r\nI'm looking into it. Note that as a workaround you can also flatten the nested features with `ds = ds.flatten()`", "I just pushed a fix on `master`. We'll do a new release soon !\r\n\r\nThanks for reporting" ]
2021-04-27T00:03:25
2021-05-28T15:27:34
null
NONE
null
null
null
null
## Describe the bug We have a test that saves a DatasetDict to disk and then loads it from disk. In 1.6 there is an incompatibility in the schema. Downgrading to `>1.6` -- fixes the problem. ## Steps to reproduce the bug ```python ### Load a dataset dict from jsonl path = '/test/foo' ds_dict.save_to_disk(path) ds_from_disk = DatasetDict.load_from_disk(path). ## <-- this is where I see the error on 1.6 ``` ## Expected results Upgrading to 1.6 shouldn't break that test. We should be able to serialize to and from disk. ## Actual results ``` # Infer features if None inferred_features = Features.from_arrow_schema(arrow_table.schema) if self.info.features is None: self.info.features = inferred_features # Infer fingerprint if None if self._fingerprint is None: self._fingerprint = generate_fingerprint(self) # Sanity checks assert self.features is not None, "Features can't be None in a Dataset object" assert self._fingerprint is not None, "Fingerprint can't be None in a Dataset object" if self.info.features.type != inferred_features.type: > raise ValueError( "External features info don't match the dataset:\nGot\n{}\nwith type\n{}\n\nbut expected something like\n{}\nwith type\n{}".format( self.info.features, self.info.features.type, inferred_features, inferred_features.type ) ) E ValueError: External features info don't match the dataset: E Got E {'_input_hash': Value(dtype='int64', id=None), '_task_hash': Value(dtype='int64', id=None), '_view_id': Value(dtype='string', id=None), 'answer': Value(dtype='string', id=None), 'encoding__ids': Sequence(feature=Value(dtype='int64', id=None), length=-1, id=None), 'encoding__offsets': Sequence(feature=Sequence(feature=Value(dtype='int64', id=None), length=-1, id=None), length=-1, id=None), 'encoding__overflowing': Sequence(feature=Value(dtype='null', id=None), length=-1, id=None), 'encoding__tokens': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None), 'encoding__words': Sequence(feature=Value(dtype='int64', id=None), length=-1, id=None), 'ner_ids': Sequence(feature=Value(dtype='int64', id=None), length=-1, id=None), 'ner_labels': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None), 'relations': [{'child': Value(dtype='int64', id=None), 'child_span': {'end': Value(dtype='int64', id=None), 'label': Value(dtype='string', id=None), 'start': Value(dtype='int64', id=None), 'token_end': Value(dtype='int64', id=None), 'token_start': Value(dtype='int64', id=None)}, 'color': Value(dtype='string', id=None), 'head': Value(dtype='int64', id=None), 'head_span': {'end': Value(dtype='int64', id=None), 'label': Value(dtype='string', id=None), 'start': Value(dtype='int64', id=None), 'token_end': Value(dtype='int64', id=None), 'token_start': Value(dtype='int64', id=None)}, 'label': Value(dtype='string', id=None)}], 'spans': [{'end': Value(dtype='int64', id=None), 'label': Value(dtype='string', id=None), 'start': Value(dtype='int64', id=None), 'text': Value(dtype='string', id=None), 'token_end': Value(dtype='int64', id=None), 'token_start': Value(dtype='int64', id=None), 'type': Value(dtype='string', id=None)}], 'text': Value(dtype='string', id=None), 'tokens': [{'disabled': Value(dtype='bool', id=None), 'end': Value(dtype='int64', id=None), 'id': Value(dtype='int64', id=None), 'start': Value(dtype='int64', id=None), 'text': Value(dtype='string', id=None), 'ws': Value(dtype='bool', id=None)}]} E with type E struct<_input_hash: int64, _task_hash: int64, _view_id: string, answer: string, encoding__ids: list<item: int64>, encoding__offsets: list<item: list<item: int64>>, encoding__overflowing: list<item: null>, encoding__tokens: list<item: string>, encoding__words: list<item: int64>, ner_ids: list<item: int64>, ner_labels: list<item: string>, relations: list<item: struct<child: int64, child_span: struct<end: int64, label: string, start: int64, token_end: int64, token_start: int64>, color: string, head: int64, head_span: struct<end: int64, label: string, start: int64, token_end: int64, token_start: int64>, label: string>>, spans: list<item: struct<end: int64, label: string, start: int64, text: string, token_end: int64, token_start: int64, type: string>>, text: string, tokens: list<item: struct<disabled: bool, end: int64, id: int64, start: int64, text: string, ws: bool>>> E E but expected something like E {'_input_hash': Value(dtype='int64', id=None), '_task_hash': Value(dtype='int64', id=None), '_view_id': Value(dtype='string', id=None), 'answer': Value(dtype='string', id=None), 'encoding__ids': Sequence(feature=Value(dtype='int64', id=None), length=-1, id=None), 'encoding__offsets': Sequence(feature=Sequence(feature=Value(dtype='int64', id=None), length=-1, id=None), length=-1, id=None), 'encoding__overflowing': Sequence(feature=Value(dtype='null', id=None), length=-1, id=None), 'encoding__tokens': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None), 'encoding__words': Sequence(feature=Value(dtype='int64', id=None), length=-1, id=None), 'ner_ids': Sequence(feature=Value(dtype='int64', id=None), length=-1, id=None), 'ner_labels': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None), 'relations': [{'head': Value(dtype='int64', id=None), 'child': Value(dtype='int64', id=None), 'head_span': {'start': Value(dtype='int64', id=None), 'end': Value(dtype='int64', id=None), 'token_start': Value(dtype='int64', id=None), 'token_end': Value(dtype='int64', id=None), 'label': Value(dtype='string', id=None)}, 'child_span': {'start': Value(dtype='int64', id=None), 'end': Value(dtype='int64', id=None), 'token_start': Value(dtype='int64', id=None), 'token_end': Value(dtype='int64', id=None), 'label': Value(dtype='string', id=None)}, 'color': Value(dtype='string', id=None), 'label': Value(dtype='string', id=None)}], 'spans': [{'text': Value(dtype='string', id=None), 'start': Value(dtype='int64', id=None), 'token_start': Value(dtype='int64', id=None), 'token_end': Value(dtype='int64', id=None), 'end': Value(dtype='int64', id=None), 'type': Value(dtype='string', id=None), 'label': Value(dtype='string', id=None)}], 'text': Value(dtype='string', id=None), 'tokens': [{'text': Value(dtype='string', id=None), 'start': Value(dtype='int64', id=None), 'end': Value(dtype='int64', id=None), 'id': Value(dtype='int64', id=None), 'ws': Value(dtype='bool', id=None), 'disabled': Value(dtype='bool', id=None)}]} E with type E struct<_input_hash: int64, _task_hash: int64, _view_id: string, answer: string, encoding__ids: list<item: int64>, encoding__offsets: list<item: list<item: int64>>, encoding__overflowing: list<item: null>, encoding__tokens: list<item: string>, encoding__words: list<item: int64>, ner_ids: list<item: int64>, ner_labels: list<item: string>, relations: list<item: struct<head: int64, child: int64, head_span: struct<start: int64, end: int64, token_start: int64, token_end: int64, label: string>, child_span: struct<start: int64, end: int64, token_start: int64, token_end: int64, label: string>, color: string, label: string>>, spans: list<item: struct<text: string, start: int64, token_start: int64, token_end: int64, end: int64, type: string, label: string>>, text: string, tokens: list<item: struct<text: string, start: int64, end: int64, id: int64, ws: bool, disabled: bool>>> ../../../../../.virtualenvs/tf_ner_rel_lib/lib/python3.8/site-packages/datasets/arrow_dataset.py:274: ValueError ``` ## Versions - Datasets: 1.6.1 - Python: 3.8.5 (default, Jan 26 2021, 10:01:04) [Clang 12.0.0 (clang-1200.0.32.2)] - Platform: macOS-10.15.7-x86_64-i386-64bit ```
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2267/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2267/timeline
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
null
https://api.github.com/repos/huggingface/datasets/issues/2262
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2262/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2262/comments
https://api.github.com/repos/huggingface/datasets/issues/2262/events
https://github.com/huggingface/datasets/issues/2262
867,325,351
MDU6SXNzdWU4NjczMjUzNTE=
2,262
NewsPH NLI dataset script fails to access test data.
{ "avatar_url": "https://avatars.githubusercontent.com/u/37775784?v=4", "events_url": "https://api.github.com/users/jinmang2/events{/privacy}", "followers_url": "https://api.github.com/users/jinmang2/followers", "following_url": "https://api.github.com/users/jinmang2/following{/other_user}", "gists_url": "https://api.github.com/users/jinmang2/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jinmang2", "id": 37775784, "login": "jinmang2", "node_id": "MDQ6VXNlcjM3Nzc1Nzg0", "organizations_url": "https://api.github.com/users/jinmang2/orgs", "received_events_url": "https://api.github.com/users/jinmang2/received_events", "repos_url": "https://api.github.com/users/jinmang2/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jinmang2/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jinmang2/subscriptions", "type": "User", "url": "https://api.github.com/users/jinmang2", "user_view_type": "public" }
[ { "color": "2edb81", "default": false, "description": "A bug in a dataset script provided in the library", "id": 2067388877, "name": "dataset bug", "node_id": "MDU6TGFiZWwyMDY3Mzg4ODc3", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20bug" } ]
closed
false
null
[]
[ "Thanks @bhavitvyamalik for the fix !\r\nThe fix will be available in the next release.\r\nIt's already available on the `master` branch. For now you can either install `datasets` from source or use `script_version=\"master\"` in `load_dataset` to use the fixed version of this dataset." ]
2021-04-26T06:44:41
2021-04-29T09:32:03
2021-04-29T09:30:20
NONE
null
null
null
null
In Newsph-NLI Dataset (#1192), it fails to access test data. According to the script below, the download manager will download the train data when trying to download the test data. https://github.com/huggingface/datasets/blob/2a2dd6316af2cc7fdf24e4779312e8ee0c7ed98b/datasets/newsph_nli/newsph_nli.py#L71 If you download it according to the script above, you can see that train and test receive the same data as shown below. ```python >>> from datasets import load_dataset >>> newsph_nli = load_dataset(path="./datasets/newsph_nli.py") >>> newsph_nli DatasetDict({ train: Dataset({ features: ['premise', 'hypothesis', 'label'], num_rows: 420000 }) test: Dataset({ features: ['premise', 'hypothesis', 'label'], num_rows: 420000 }) validation: Dataset({ features: ['premise', 'hypothesis', 'label'], num_rows: 90000 }) }) >>> newsph_nli["train"][0] {'hypothesis': 'Ito ang dineklara ni Atty. Romulo Macalintal, abogado ni Robredo, kaugnay ng pagsisimula ng preliminary conference ngayong hapon sa Presidential Electoral Tribunal (PET).', 'label': 1, 'premise': '"Hindi ko ugali ang mamulitika; mas gusto kong tahimik na magtrabaho. Pero sasabihin ko ito ngayon: ang tapang, lakas, at diskarte, hindi nadadaan sa mapanirang salita. Ang kailangan ng taumbayan ay tapang sa gawa," ayon kay Robredo sa inilabas nitong statement.'} >>> newsph_nli["test"][0] {'hypothesis': 'Ito ang dineklara ni Atty. Romulo Macalintal, abogado ni Robredo, kaugnay ng pagsisimula ng preliminary conference ngayong hapon sa Presidential Electoral Tribunal (PET).', 'label': 1, 'premise': '"Hindi ko ugali ang mamulitika; mas gusto kong tahimik na magtrabaho. Pero sasabihin ko ito ngayon: ang tapang, lakas, at diskarte, hindi nadadaan sa mapanirang salita. Ang kailangan ng taumbayan ay tapang sa gawa," ayon kay Robredo sa inilabas nitong statement.'} ``` In local, I modified the code of the source as below and got the correct result. ```python 71 test_path = os.path.join(download_path, "test.csv") ``` ```python >>> from datasets import load_dataset >>> newsph_nli = load_dataset(path="./datasets/newsph_nli.py") >>> newsph_nli DatasetDict({ train: Dataset({ features: ['premise', 'hypothesis', 'label'], num_rows: 420000 }) test: Dataset({ features: ['premise', 'hypothesis', 'label'], num_rows: 9000 }) validation: Dataset({ features: ['premise', 'hypothesis', 'label'], num_rows: 90000 }) }) >>> newsph_nli["train"][0] {'hypothesis': 'Ito ang dineklara ni Atty. Romulo Macalintal, abogado ni Robredo, kaugnay ng pagsisimula ng preliminary conference ngayong hapon sa Presidential Electoral Tribunal (PET).', 'label': 1, 'premise': '"Hindi ko ugali ang mamulitika; mas gusto kong tahimik na magtrabaho. Pero sasabihin ko ito ngayon: ang tapang, lakas, at diskarte, hindi nadadaan sa mapanirang salita. Ang kailangan ng taumbayan ay tapang sa gawa," ayon kay Robredo sa inilabas nitong statement.'} >>> newsph_nli["test"][0] {'hypothesis': '-- JAI (@JaiPaller) September 13, 2019', 'label': 1, 'premise': 'Pinag-iingat ng Konsulado ng Pilipinas sa Dubai ang publiko, partikular ang mga donor, laban sa mga scam na gumagamit ng mga charitable organization.'} ``` I don't have experience with open source pull requests, so I suggest that you reflect them in the source. Thank you for reading :)
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2262/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2262/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
3 days, 2:45:39
https://api.github.com/repos/huggingface/datasets/issues/2256
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2256/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2256/comments
https://api.github.com/repos/huggingface/datasets/issues/2256/events
https://github.com/huggingface/datasets/issues/2256
866,708,609
MDU6SXNzdWU4NjY3MDg2MDk=
2,256
Running `datase.map` with `num_proc > 1` uses a lot of memory
{ "avatar_url": "https://avatars.githubusercontent.com/u/8143425?v=4", "events_url": "https://api.github.com/users/roskoN/events{/privacy}", "followers_url": "https://api.github.com/users/roskoN/followers", "following_url": "https://api.github.com/users/roskoN/following{/other_user}", "gists_url": "https://api.github.com/users/roskoN/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/roskoN", "id": 8143425, "login": "roskoN", "node_id": "MDQ6VXNlcjgxNDM0MjU=", "organizations_url": "https://api.github.com/users/roskoN/orgs", "received_events_url": "https://api.github.com/users/roskoN/received_events", "repos_url": "https://api.github.com/users/roskoN/repos", "site_admin": false, "starred_url": "https://api.github.com/users/roskoN/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/roskoN/subscriptions", "type": "User", "url": "https://api.github.com/users/roskoN", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" } ]
[ "Thanks for reporting ! We are working on this and we'll do a patch release very soon.", "We did a patch release to fix this issue.\r\nIt should be fixed in the new version 1.6.1\r\n\r\nThanks again for reporting and for the details :)" ]
2021-04-24T09:56:20
2021-04-26T17:12:15
2021-04-26T17:12:15
NONE
null
null
null
null
## Describe the bug Running `datase.map` with `num_proc > 1` leads to a tremendous memory usage that requires swapping on disk and it becomes very slow. ## Steps to reproduce the bug ```python from datasets import load_dataset dstc8_datset = load_dataset("roskoN/dstc8-reddit-corpus", keep_in_memory=False) def _prepare_sample(batch): return {"input_ids": list(), "attention_mask": list()} for split_name, dataset_split in list(dstc8_datset.items()): print(f"Processing {split_name}") encoded_dataset_split = dataset_split.map( function=_prepare_sample, batched=True, num_proc=4, remove_columns=dataset_split.column_names, batch_size=10, writer_batch_size=10, keep_in_memory=False, ) print(encoded_dataset_split) path = f"./data/encoded_{split_name}" encoded_dataset_split.save_to_disk(path) ``` ## Expected results Memory usage should stay within reasonable boundaries. ## Actual results This is htop-output from running the provided script. ![image](https://user-images.githubusercontent.com/8143425/115954836-66954980-a4f3-11eb-8340-0153bdc3a475.png) ## Versions ``` - Datasets: 1.6.0 - Python: 3.8.8 (default, Apr 13 2021, 19:58:26) [GCC 7.3.0] - Platform: Linux-4.19.128-microsoft-standard-x86_64-with-glibc2.10 ``` Running on WSL2
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2256/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2256/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
2 days, 7:15:55
https://api.github.com/repos/huggingface/datasets/issues/2252
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2252/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2252/comments
https://api.github.com/repos/huggingface/datasets/issues/2252/events
https://github.com/huggingface/datasets/issues/2252
865,870,710
MDU6SXNzdWU4NjU4NzA3MTA=
2,252
Slow dataloading with big datasets issue persists
{ "avatar_url": "https://avatars.githubusercontent.com/u/29157715?v=4", "events_url": "https://api.github.com/users/hwijeen/events{/privacy}", "followers_url": "https://api.github.com/users/hwijeen/followers", "following_url": "https://api.github.com/users/hwijeen/following{/other_user}", "gists_url": "https://api.github.com/users/hwijeen/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/hwijeen", "id": 29157715, "login": "hwijeen", "node_id": "MDQ6VXNlcjI5MTU3NzE1", "organizations_url": "https://api.github.com/users/hwijeen/orgs", "received_events_url": "https://api.github.com/users/hwijeen/received_events", "repos_url": "https://api.github.com/users/hwijeen/repos", "site_admin": false, "starred_url": "https://api.github.com/users/hwijeen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/hwijeen/subscriptions", "type": "User", "url": "https://api.github.com/users/hwijeen", "user_view_type": "public" }
[]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" } ]
[ "Hi ! Sorry to hear that. This may come from another issue then.\r\n\r\nFirst can we check if this latency comes from the dataset itself ?\r\nYou can try to load your dataset and benchmark the speed of querying random examples inside it ?\r\n```python\r\nimport time\r\nimport numpy as np\r\n\r\nfrom datasets import load_from_disk\r\n\r\ndataset = load_from_disk(...) # or from load_dataset...\r\n\r\n_start = time.time()\r\nn = 100\r\nfor i in np.random.default_rng(42).integers(0, len(dataset), size=n):\r\n _ = dataset[i]\r\nprint(time.time() - _start)\r\n```\r\n\r\nIf we see a significant speed difference between your two datasets then it would mean that there's an issue somewhere", "Hi @lhoestq, here is the result. I additionally measured time to `load_from_disk`:\r\n* 60GB\r\n```\r\nloading took: 22.618776321411133\r\nramdom indexing 100 times took: 0.10214924812316895\r\n```\r\n\r\n* 600GB\r\n```\r\nloading took: 1176.1764674186707\r\nramdom indexing 100 times took: 2.853600025177002\r\n```\r\n\r\nHmm.. I double checked that it's version 1.6.0. The difference seems quite big, could it be related to the running environment? \r\n", "I'm surprised by the speed change. Can you give more details about your dataset ?\r\nThe speed depends on the number of batches in the arrow tables and the distribution of the lengths of the batches.\r\nYou can access the batches by doing `dataset.data.to_batches()` (use only for debugging) (it doesn't bring data in memory).\r\n\r\nAlso can you explain what parameters you used if you used `map` calls ?\r\nAlso if you have some code that reproduces the issue I'd be happy to investigate it.", "Also if you could give us more info about your env like your OS, version of pyarrow and if you're using an HDD or a SSD", "Here are some details of my 600GB dataset. This is a dataset AFTER the `map` function and once I load this dataset, I do not use `map` anymore in the training. Regarding the distribution of the lengths, it is almost uniform (90% is 512 tokens, and 10% is randomly shorter than that -- typical setting for language modeling).\r\n```\r\nlen(batches):\r\n492763\r\n\r\nbatches[0]: \r\npyarrow.RecordBatch\r\nattention_mask: list<item: uint8>\r\n child 0, item: uint8\r\ninput_ids: list<item: int16>\r\n child 0, item: int16\r\nspecial_tokens_mask: list<item: uint8>\r\n child 0, item: uint8\r\ntoken_type_ids: list<item: uint8>\r\n child 0, item: uint8\r\n```\r\n\r\nHere the some parameters to `map` function just in case it is relevant:\r\n```\r\nnum_proc=1 # as multi processing is slower in my case\r\nload_from_cache_file=False\r\n```\r\n", "Regarding the environment, I am running the code on a cloud server. Here are some info:\r\n```\r\nUbuntu 18.04.5 LTS # cat /etc/issue\r\npyarrow 3.0.0 # pip list | grep pyarrow\r\n```\r\nThe data is stored in SSD and it is mounted to the machine via Network File System.\r\n\r\nIf you could point me to some of the commands to check the details of the environment, I would be happy to provide relevant information @lhoestq !", "I am not sure how I could provide you with the reproducible code, since the problem only arises when the data is big. For the moment, I would share the part that I think is relevant. Feel free to ask me for more info.\r\n\r\n```python\r\nclass MyModel(pytorch_lightning.LightningModule)\r\n def setup(self, stage):\r\n self.dataset = datasets.load_from_disk(path)\r\n self.dataset.set_format(\"torch\")\r\n\r\n def train_dataloader(self):\r\n collate_fn = transformers.DataCollatorForLanguageModeling(\r\n tokenizer=transformers.ElectraTokenizerFast.from_pretrained(tok_path)\r\n )\r\n dataloader = torch.utils.DataLoader(\r\n self.dataset,\r\n batch_size=32,\r\n collate_fn=collate_fn,\r\n num_workers=8,\r\n pin_memory=True,\r\n )\r\n```", "Hi ! Sorry for the delay I haven't had a chance to take a look at this yet. Are you still experiencing this issue ?\r\nI'm asking because the latest patch release 1.6.2 fixed a few memory issues that could have lead to slow downs", "Hi! I just ran the same code with different datasets (one is 60 GB and another 600 GB), and the latter runs much slower. ETA differs by 10x.", "@lhoestq and @hwijeen\r\n\r\nDespite upgrading to datasets 1.6.2, still experiencing extremely slow (2h00) loading for a 300Gb local dataset shard size 1.1Gb on local HDD (40Mb/s read speed). This corresponds almost exactly to total data divided by reading speed implying that it reads the entire dataset at each load.\r\n\r\nStack details:\r\n=========\r\n\r\n> GCC version: Could not collect\r\n> Clang version: Could not collect\r\n> CMake version: Could not collect\r\n> \r\n> Python version: 3.7 (64-bit runtime)\r\n> Is CUDA available: True\r\n> CUDA runtime version: 10.2.89\r\n> GPU models and configuration: GPU 0: GeForce GTX 1050\r\n> Nvidia driver version: 457.63\r\n> cuDNN version: C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.2\\bin\\cudnn64_7.dll\r\n> HIP runtime version: N/A\r\n> MIOpen runtime version: N/A\r\n> \r\n> Versions of relevant libraries:\r\n> [pip3] datasets==1.6.2\r\n> [pip3] transformers==4.5.1\r\n> [pip3] numpy==1.19.1\r\n> [pip3] numpydoc==1.1.0\r\n> [pip3] pytorch-metric-learning==0.9.98\r\n> [pip3] torch==1.8.1\r\n> [pip3] torchaudio==0.8.1\r\n> [pip3] torchvision==0.2.2\r\n> [conda] blas 2.16 mkl conda-forge\r\n> [conda] cudatoolkit 10.2.89 hb195166_8 conda-forge\r\n> [conda] libblas 3.8.0 16_mkl conda-forge\r\n> [conda] libcblas 3.8.0 16_mkl conda-forge\r\n> [conda] liblapack 3.8.0 16_mkl conda-forge\r\n> [conda] liblapacke 3.8.0 16_mkl conda-forge\r\n> [conda] mkl 2020.1 216\r\n> [conda] numpy 1.19.1 py37hae9e721_0 conda-forge\r\n> [conda] numpydoc 1.1.0 py_1 conda-forge\r\n> [conda] pytorch 1.8.1 py3.7_cuda10.2_cudnn7_0 pytorch\r\n> [conda] pytorch-metric-learning 0.9.98 pyh39e3cac_0 metric-learning\r\n> [conda] torchaudio 0.8.1 py37 pytorch\r\n> [conda] torchvision 0.2.2 py_3 pytorch", "Hi @BenoitDalFerro how do your load your dataset ?", "Hi @lhoestq thanks for the quick turn-around, actually the plain vanilla way, without an particular knack or fashion, I tried to look into the documentation for some alternative but couldn't find any\r\n\r\n> dataset = load_from_disk(dataset_path=os.path.join(datasets_dir,dataset_dir))", "I’m facing the same issue when loading a 900GB dataset (stored via `save_to_disk`): `load_from_disk(path_to_dir)` takes 1.5 hours and htop consistently shows high IO rates > 120 M/s.", "@tsproisl same here, smells like ~~teen spirit~~ intended generator inadvertently ending up iterator\r\n\r\n@lhoestq perhaps solution to detect bug location in code is to track its signature via HD read usage monitoring, option is to add tracking decorator on top each function and sequentially close all hatches from top to bottom, suggest PySmart https://pypi.org/project/pySMART/ a Smartmontools implementation", "I wasn't able to reproduce this on a toy dataset of around 300GB:\r\n\r\n```python\r\nimport datasets as ds\r\n\r\ns = ds.load_dataset(\"squad\", split=\"train\")\r\ns4000 = ds.concatenate_datasets([s] * 4000)\r\nprint(ds.utils.size_str(s4000.data.nbytes)) # '295.48 GiB'\r\n\r\ns4000.save_to_disk(\"tmp/squad_4000\")\r\n```\r\n\r\n```python\r\nimport psutil\r\nimport time\r\nfrom datasets import load_from_disk\r\n\r\ndisk = \"disk0\" # You may have to change your disk here\r\niocnt1 = psutil.disk_io_counters(perdisk=True)[disk]\r\ntime1 = time.time()\r\n\r\ns4000_reloaded = load_from_disk(\"tmp/squad_4000\")\r\n\r\ntime2 = time.time()\r\niocnt2 = psutil.disk_io_counters(perdisk=True)[disk]\r\n\r\nprint(f\"Blocks read {iocnt2.read_count - iocnt1.read_count}\") # Blocks read 18\r\nprint(f\"Elapsed time: {time2 - time1:.02f}s\") # Elapsed time: 14.60s\r\n```\r\n\r\nCould you run this on your side and tell me if how much time it takes ? Please run this when your machine is idle so that other processes don't interfere.\r\n\r\nI got these results on my macbook pro on datasets 1.6.2", "@lhoestq thanks, test running as we speak, bear with me", "Just tried on google colab and got ~1min for a 15GB dataset (only 200 times SQuAD), while it should be instantaneous. The time is spent reading the Apache Arrow table from the memory mapped file. This might come a virtual disk management issue. I'm trying to see if I can still speed it up on colab.", "@lhoestq what is Google Colab's HD read speed, is it possible to introspect incl. make like SSD or HDD ?", "@lhoestq Thank you! The issue is getting more interesting. The second script is still running, but it's definitely taking much longer than 15 seconds.", "Okay, here’s the ouput:\r\nBlocks read 158396\r\nElapsed time: 529.10s\r\n\r\nAlso using datasets 1.6.2. Do you have any ideas, how to pinpoint the problem?", "@lhoestq, @tsproisl mmmh still writing on my side about 1h to go, thinking on it are your large datasets all monoblock unsharded ? mine is 335 times 1.18Gb shards.", "The 529.10s was a bit too optimistic. I cancelled the reading process once before running it completely, therefore the harddrive cache probably did its work.\r\n\r\nHere are three consecutive runs\r\nFirst run (freshly written to disk):\r\nBlocks read 309702\r\nElapsed time: 1267.74s\r\nSecond run (immediately after):\r\nBlocks read 113944\r\nElapsed time: 417.55s\r\nThird run (immediately after):\r\nBlocks read 42518\r\nElapsed time: 199.19s\r\n", "@lhoestq \r\nFirst test\r\n> elapsed time: 11219.05s\r\n\r\nSecond test running bear with me, for Windows users slight trick to modify original \"disk0\" string:\r\n\r\nFirst find physical unit relevant key in dictionnary\r\n```\r\nimport psutil\r\npsutil.disk_io_counters(perdisk=True)\r\n```\r\n\r\n> {'PhysicalDrive0': sdiskio(read_count=18453286, write_count=4075333, read_bytes=479546467840, write_bytes=161590275072, read_time=20659, write_time=2464),\r\n> 'PhysicalDrive1': sdiskio(read_count=1495778, write_count=388781, read_bytes=548628622336, write_bytes=318234849280, read_time=426066, write_time=19085)}\r\n\r\nIn my case it's _PhysicalDrive1_\r\n\r\nThen insert relevant key's string as _disk_ variable\r\n\r\n```\r\npsutil.disk_io_counters()\r\ndisk = 'PhysicalDrive1' # You may have to change your disk here\r\niocnt1 = psutil.disk_io_counters(perdisk=True)[disk]\r\ntime1 = time.time()\r\ns4000_reloaded = load_from_disk(\"your path here\")\r\ntime2 = time.time()\r\niocnt2 = psutil.disk_io_counters(perdisk=True)[disk]\r\nprint(f\"Blocks read {iocnt2.read_count - iocnt1.read_count}\") # Blocks read 18\r\nprint(f\"Elapsed time: {time2 - time1:.02f}s\") # Elapsed time: 14.60s\r\n```", "@lhoestq\r\nSecond test\r\n\r\n> Blocks read 1265609\r\n> Elapsed time: 11216.55s", "@lhoestq any luck ?", "Unfortunately no. Thanks for running the benchmark though, it shows that you machine does a lot of read operations. This is not expected: in other machines it does almost no read operations which enables a very fast loading.\r\n\r\nI did some tests on google colab and have the same issue. The first time the dataset arrow file is memory mapped takes always a lot of time (time seems linear with respect to the dataset size). Reloading the dataset is then instantaneous since the arrow file has already been memory mapped.\r\n\r\nI also tried using the Arrow IPC file format (see #1933) instead of the current streaming format that we use but it didn't help.\r\n\r\nMemory mapping is handled by the OS and depends on the disk you're using, so I'm not sure we can do much about it. I'll continue to investigate anyway, because I still don't know why in some cases it would go through the entire file (high `Blocks read ` as in your tests) and in other cases it would do almost no reading.", "@lhoestq thanks for the effort, let's stay in touch", "Just want to say that I am seeing the same issue. Dataset size if 268GB and it takes **3 hours** to load `load_from_disk`, using dataset version `1.9.0`. Filesystem underneath is `Lustre` ", "Hi @lhoestq, confirmed Windows issue, exact same code running on Linux OS total loading time about 3 minutes.", "Hmm that's different from what I got. I was on Ubuntu when reporting the initial issue." ]
2021-04-23T08:18:20
2024-01-26T15:10:28
2024-01-26T15:10:28
NONE
null
null
null
null
Hi, I reported too slow data fetching when data is large(#2210) a couple of weeks ago, and @lhoestq referred me to the fix (#2122). However, the problem seems to persist. Here is the profiled results: 1) Running with 60GB ``` Action | Mean duration (s) |Num calls | Total time (s) | Percentage % | ------------------------------------------------------------------------------------------------------------------------------------ Total | - |_ | 517.96 | 100 % | ------------------------------------------------------------------------------------------------------------------------------------ model_backward | 0.26144 |100 | 26.144 | 5.0475 | model_forward | 0.11123 |100 | 11.123 | 2.1474 | get_train_batch | 0.097121 |100 | 9.7121 | 1.8751 | ``` 3) Running with 600GB, datasets==1.6.0 ``` Action | Mean duration (s) |Num calls | Total time (s) | Percentage % | ------------------------------------------------------------------------------------------------------------------------------------ Total | - |_ | 4563.2 | 100 % | ------------------------------------------------------------------------------------------------------------------------------------ get_train_batch | 5.1279 |100 | 512.79 | 11.237 | model_backward | 4.8394 |100 | 483.94 | 10.605 | model_forward | 0.12162 |100 | 12.162 | 0.26652 | ``` I see that `get_train_batch` lags when data is large. Could this be related to different issues? I would be happy to provide necessary information to investigate.
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 5, "-1": 0, "confused": 0, "eyes": 4, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 9, "url": "https://api.github.com/repos/huggingface/datasets/issues/2252/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2252/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
1008 days, 6:52:08
https://api.github.com/repos/huggingface/datasets/issues/2251
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2251/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2251/comments
https://api.github.com/repos/huggingface/datasets/issues/2251/events
https://github.com/huggingface/datasets/issues/2251
865,848,705
MDU6SXNzdWU4NjU4NDg3MDU=
2,251
while running run_qa.py, ran into a value error
{ "avatar_url": "https://avatars.githubusercontent.com/u/44570724?v=4", "events_url": "https://api.github.com/users/nlee0212/events{/privacy}", "followers_url": "https://api.github.com/users/nlee0212/followers", "following_url": "https://api.github.com/users/nlee0212/following{/other_user}", "gists_url": "https://api.github.com/users/nlee0212/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/nlee0212", "id": 44570724, "login": "nlee0212", "node_id": "MDQ6VXNlcjQ0NTcwNzI0", "organizations_url": "https://api.github.com/users/nlee0212/orgs", "received_events_url": "https://api.github.com/users/nlee0212/received_events", "repos_url": "https://api.github.com/users/nlee0212/repos", "site_admin": false, "starred_url": "https://api.github.com/users/nlee0212/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/nlee0212/subscriptions", "type": "User", "url": "https://api.github.com/users/nlee0212", "user_view_type": "public" }
[]
open
false
null
[]
[]
2021-04-23T07:51:03
2021-04-23T07:51:03
null
NONE
null
null
null
null
command: python3 run_qa.py --model_name_or_path hyunwoongko/kobart --dataset_name squad_kor_v2 --do_train --do_eval --per_device_train_batch_size 8 --learning_rate 3e-5 --num_train_epochs 3 --max_seq_length 512 --doc_stride 128 --output_dir /tmp/debug_squad/ error: ValueError: External features info don't match the dataset: Got {'id': Value(dtype='string', id=None), 'title': Value(dtype='string', id=None), 'context': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None), 'answer': {'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None), 'html_answer_start': Value(dtype='int32', id=None)}, 'url': Value(dtype='string', id=None), 'raw_html': Value(dtype='string', id=None)} with type struct<answer: struct<text: string, answer_start: int32, html_answer_start: int32>, context: string, id: string, question: string, raw_html: string, title: string, url: string> but expected something like {'answer': {'answer_start': Value(dtype='int32', id=None), 'html_answer_start': Value(dtype='int32', id=None), 'text': Value(dtype='string', id=None)}, 'context': Value(dtype='string', id=None), 'id': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None), 'raw_html': Value(dtype='string', id=None), 'title': Value(dtype='string', id=None), 'url': Value(dtype='string', id=None)} with type struct<answer: struct<answer_start: int32, html_answer_start: int32, text: string>, context: string, id: string, question: string, raw_html: string, title: string, url: string> I didn't encounter this error 4 hours ago. any solutions for this kind of issue? looks like gained dataset format refers to 'Data Fields', while expected refers to 'Data Instances'.
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2251/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2251/timeline
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
null
https://api.github.com/repos/huggingface/datasets/issues/2250
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2250/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2250/comments
https://api.github.com/repos/huggingface/datasets/issues/2250/events
https://github.com/huggingface/datasets/issues/2250
865,402,449
MDU6SXNzdWU4NjU0MDI0NDk=
2,250
some issue in loading local txt file as Dataset for run_mlm.py
{ "avatar_url": "https://avatars.githubusercontent.com/u/14968123?v=4", "events_url": "https://api.github.com/users/alighofrani95/events{/privacy}", "followers_url": "https://api.github.com/users/alighofrani95/followers", "following_url": "https://api.github.com/users/alighofrani95/following{/other_user}", "gists_url": "https://api.github.com/users/alighofrani95/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/alighofrani95", "id": 14968123, "login": "alighofrani95", "node_id": "MDQ6VXNlcjE0OTY4MTIz", "organizations_url": "https://api.github.com/users/alighofrani95/orgs", "received_events_url": "https://api.github.com/users/alighofrani95/received_events", "repos_url": "https://api.github.com/users/alighofrani95/repos", "site_admin": false, "starred_url": "https://api.github.com/users/alighofrani95/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/alighofrani95/subscriptions", "type": "User", "url": "https://api.github.com/users/alighofrani95", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Hi,\r\n\r\n1. try\r\n ```python\r\n dataset = load_dataset(\"text\", data_files={\"train\": [\"a1.txt\", \"b1.txt\"], \"test\": [\"c1.txt\"]})\r\n ```\r\n instead.\r\n\r\n Sadly, I can't reproduce the error on my machine. If the above code doesn't resolve the issue, try to update the library to the \r\n newest version (`pip install datasets --upgrade`).\r\n\r\n2. https://github.com/huggingface/transformers/blob/3ed5e97ba04ce9b24b4a7161ea74572598a4c480/examples/pytorch/language-modeling/run_mlm.py#L258-L259\r\nThis is the original code. You'll have to modify the example source to work with multiple train files. To make it easier, let's say \"|\" will act as a delimiter between files:\r\n ```python\r\n if data_args.train_file is not None:\r\n data_files[\"train\"] = data_args.train_file.split(\"|\") # + .split(\"|\")\r\n ```\r\n Then call the script as follows (**dataset_name must be None**):\r\n ```bash\r\n python run_mlm.py [... other args] --train_file a1.txt|b1.txt\r\n ```", "i meet the same error with datasets 1.11.0, is there any insight about this?" ]
2021-04-22T19:39:13
2022-03-30T08:29:47
2022-03-30T08:29:47
NONE
null
null
null
null
![image](https://user-images.githubusercontent.com/14968123/115773877-18cef300-a3c6-11eb-8e58-a9cbfd1001ec.png) first of all, I tried to load 3 .txt files as a dataset (sure that the directory and permission is OK.), I face with the below error. > FileNotFoundError: [Errno 2] No such file or directory: 'c' by removing one of the training .txt files It's fixed and although if I put all file as training it's ok ![image](https://user-images.githubusercontent.com/14968123/115774207-867b1f00-a3c6-11eb-953b-905cfb112d25.png) ![image](https://user-images.githubusercontent.com/14968123/115774264-9b57b280-a3c6-11eb-9f36-7b109f0e5a31.png) after this, my question is how could I use this defined Dataset for run_mlm.py for from scratch pretraining. by using --train_file path_to_train_file just can use one .txt , .csv or, .json file. I tried to set my defined Dataset as --dataset_name but the below issue occurs. > Traceback (most recent call last): File "/usr/local/lib/python3.7/dist-packages/datasets/load.py", line 336, in prepare_module local_path = cached_path(file_path, download_config=download_config) File "/usr/local/lib/python3.7/dist-packages/datasets/utils/file_utils.py", line 291, in cached_path use_auth_token=download_config.use_auth_token, File "/usr/local/lib/python3.7/dist-packages/datasets/utils/file_utils.py", line 621, in get_from_cache raise FileNotFoundError("Couldn't find file at {}".format(url)) FileNotFoundError: Couldn't find file at https://raw.githubusercontent.com/huggingface/datasets/master/datasets/dataset/dataset.py > During handling of the above exception, another exception occurred: > Traceback (most recent call last): File "run_mlm.py", line 486, in <module> main() File "run_mlm.py", line 242, in main datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir) File "/usr/local/lib/python3.7/dist-packages/datasets/load.py", line 719, in load_dataset use_auth_token=use_auth_token, File "/usr/local/lib/python3.7/dist-packages/datasets/load.py", line 347, in prepare_module combined_path, github_file_path FileNotFoundError: Couldn't find file locally at dataset/dataset.py, or remotely at https://raw.githubusercontent.com/huggingface/datasets/1.6.0/datasets/dataset/dataset.py. The file is also not present on the master branch on github.
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2250/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2250/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
341 days, 12:50:34