url
stringlengths
58
61
repository_url
stringclasses
1 value
labels_url
stringlengths
72
75
comments_url
stringlengths
67
70
events_url
stringlengths
65
68
html_url
stringlengths
48
51
id
int64
600M
3.67B
node_id
stringlengths
18
24
number
int64
2
7.88k
title
stringlengths
1
290
user
dict
labels
listlengths
0
4
state
stringclasses
2 values
locked
bool
1 class
assignee
dict
assignees
listlengths
0
4
comments
listlengths
0
30
created_at
timestamp[s]date
2020-04-14 18:18:51
2025-11-26 16:16:56
updated_at
timestamp[s]date
2020-04-29 09:23:05
2025-11-30 03:52:07
closed_at
timestamp[s]date
2020-04-29 09:23:05
2025-11-21 12:31:19
author_association
stringclasses
4 values
type
null
active_lock_reason
null
draft
null
pull_request
null
body
stringlengths
0
228k
closed_by
dict
reactions
dict
timeline_url
stringlengths
67
70
performed_via_github_app
null
state_reason
stringclasses
4 values
sub_issues_summary
dict
issue_dependencies_summary
dict
is_pull_request
bool
1 class
closed_at_time_taken
duration[s]
https://api.github.com/repos/huggingface/datasets/issues/1774
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1774/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1774/comments
https://api.github.com/repos/huggingface/datasets/issues/1774/events
https://github.com/huggingface/datasets/issues/1774
792,730,559
MDU6SXNzdWU3OTI3MzA1NTk=
1,774
is it possible to make slice to be more compatible like python list and numpy?
{ "avatar_url": "https://avatars.githubusercontent.com/u/7607120?v=4", "events_url": "https://api.github.com/users/world2vec/events{/privacy}", "followers_url": "https://api.github.com/users/world2vec/followers", "following_url": "https://api.github.com/users/world2vec/following{/other_user}", "gists_url": "https://api.github.com/users/world2vec/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/world2vec", "id": 7607120, "login": "world2vec", "node_id": "MDQ6VXNlcjc2MDcxMjA=", "organizations_url": "https://api.github.com/users/world2vec/orgs", "received_events_url": "https://api.github.com/users/world2vec/received_events", "repos_url": "https://api.github.com/users/world2vec/repos", "site_admin": false, "starred_url": "https://api.github.com/users/world2vec/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/world2vec/subscriptions", "type": "User", "url": "https://api.github.com/users/world2vec", "user_view_type": "public" }
[]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" } ]
[ "Hi ! Thanks for reporting.\r\nI am working on changes in the way data are sliced from arrow. I can probably fix your issue with the changes I'm doing.\r\nIf you have some code to reproduce the issue it would be nice so I can make sure that this case will be supported :)\r\nI'll make a PR in a few days ", "Good if you can take care at your side.\r\nHere is the [colab notebook](https://colab.research.google.com/drive/19c-abm87RTRYgW9G1D8ktfwRW95zDYBZ?usp=sharing)" ]
2021-01-24T06:15:52
2024-01-31T15:54:18
2024-01-31T15:54:18
NONE
null
null
null
null
Hi, see below error: ``` AssertionError: Requested slice [:10000000000000000] incompatible with 20 examples. ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 2, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 2, "url": "https://api.github.com/repos/huggingface/datasets/issues/1774/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1774/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
1102 days, 9:38:26
https://api.github.com/repos/huggingface/datasets/issues/1773
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1773/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1773/comments
https://api.github.com/repos/huggingface/datasets/issues/1773/events
https://github.com/huggingface/datasets/issues/1773
792,708,160
MDU6SXNzdWU3OTI3MDgxNjA=
1,773
bug in loading datasets
{ "avatar_url": "https://avatars.githubusercontent.com/u/10137?v=4", "events_url": "https://api.github.com/users/ghost/events{/privacy}", "followers_url": "https://api.github.com/users/ghost/followers", "following_url": "https://api.github.com/users/ghost/following{/other_user}", "gists_url": "https://api.github.com/users/ghost/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ghost", "id": 10137, "login": "ghost", "node_id": "MDQ6VXNlcjEwMTM3", "organizations_url": "https://api.github.com/users/ghost/orgs", "received_events_url": "https://api.github.com/users/ghost/received_events", "repos_url": "https://api.github.com/users/ghost/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ghost/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ghost/subscriptions", "type": "User", "url": "https://api.github.com/users/ghost", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Looks like an issue with your csv file. Did you use the right delimiter ?\r\nApparently at line 37 the CSV reader from pandas reads 2 fields instead of 1.", "Note that you can pass any argument you would pass to `pandas.read_csv` as kwargs to `load_dataset`. For example you can do\r\n```python\r\nfrom datasets import load_dataset\r\ndataset = load_dataset('csv', data_files=data_files, sep=\"\\t\")\r\n```\r\n\r\nfor example to use a tab separator.\r\n\r\nYou can see the full list of arguments here: https://github.com/huggingface/datasets/blob/master/src/datasets/packaged_modules/csv/csv.py\r\n\r\n(I've not found the list in the documentation though, we definitely must add them !)", "You can try to convert the file to (CSV UTF-8)" ]
2021-01-24T02:53:45
2021-09-06T08:54:46
2021-08-04T18:13:01
NONE
null
null
null
null
Hi, I need to load a dataset, I use these commands: ``` from datasets import load_dataset dataset = load_dataset('csv', data_files={'train': 'sick/train.csv', 'test': 'sick/test.csv', 'validation': 'sick/validation.csv'}) print(dataset['validation']) ``` the dataset in sick/train.csv are simple csv files representing the data. I am getting this error, do you have an idea how I can solve this? thank you @lhoestq ``` Using custom data configuration default Downloading and preparing dataset csv/default-61468fc71a743ec1 (download: Unknown size, generated: Unknown size, post-processed: Unknown size, total: Unknown size) to /julia/cache_home_2/datasets/csv/default-61468fc71a743ec1/0.0.0/2960f95a26e85d40ca41a230ac88787f715ee3003edaacb8b1f0891e9f04dda2... Traceback (most recent call last): File "/julia/libs/anaconda3/envs/success/lib/python3.7/site-packages/datasets-1.2.0-py3.7.egg/datasets/builder.py", line 485, in incomplete_dir yield tmp_dir File "/julia/libs/anaconda3/envs/success/lib/python3.7/site-packages/datasets-1.2.0-py3.7.egg/datasets/builder.py", line 527, in download_and_prepare dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs File "/julia/libs/anaconda3/envs/success/lib/python3.7/site-packages/datasets-1.2.0-py3.7.egg/datasets/builder.py", line 604, in _download_and_prepare self._prepare_split(split_generator, **prepare_split_kwargs) File "/julia/libs/anaconda3/envs/success/lib/python3.7/site-packages/datasets-1.2.0-py3.7.egg/datasets/builder.py", line 959, in _prepare_split for key, table in utils.tqdm(generator, unit=" tables", leave=False, disable=not_verbose): File "/julia/libs/anaconda3/envs/success/lib/python3.7/site-packages/tqdm-4.49.0-py3.7.egg/tqdm/std.py", line 1133, in __iter__ for obj in iterable: File "/julia/cache_home_2/modules/datasets_modules/datasets/csv/2960f95a26e85d40ca41a230ac88787f715ee3003edaacb8b1f0891e9f04dda2/csv.py", line 129, in _generate_tables for batch_idx, df in enumerate(csv_file_reader): File "/julia/libs/anaconda3/envs/success/lib/python3.7/site-packages/pandas-1.2.0-py3.7-linux-x86_64.egg/pandas/io/parsers.py", line 1029, in __next__ return self.get_chunk() File "/julia/libs/anaconda3/envs/success/lib/python3.7/site-packages/pandas-1.2.0-py3.7-linux-x86_64.egg/pandas/io/parsers.py", line 1079, in get_chunk return self.read(nrows=size) File "/julia/libs/anaconda3/envs/success/lib/python3.7/site-packages/pandas-1.2.0-py3.7-linux-x86_64.egg/pandas/io/parsers.py", line 1052, in read index, columns, col_dict = self._engine.read(nrows) File "/julia/libs/anaconda3/envs/success/lib/python3.7/site-packages/pandas-1.2.0-py3.7-linux-x86_64.egg/pandas/io/parsers.py", line 2056, in read data = self._reader.read(nrows) File "pandas/_libs/parsers.pyx", line 756, in pandas._libs.parsers.TextReader.read File "pandas/_libs/parsers.pyx", line 783, in pandas._libs.parsers.TextReader._read_low_memory File "pandas/_libs/parsers.pyx", line 827, in pandas._libs.parsers.TextReader._read_rows File "pandas/_libs/parsers.pyx", line 814, in pandas._libs.parsers.TextReader._tokenize_rows File "pandas/_libs/parsers.pyx", line 1951, in pandas._libs.parsers.raise_parser_error pandas.errors.ParserError: Error tokenizing data. C error: Expected 1 fields in line 37, saw 2 During handling of the above exception, another exception occurred: Traceback (most recent call last): File "write_sick.py", line 19, in <module> 'validation': 'sick/validation.csv'}) File "/julia/libs/anaconda3/envs/success/lib/python3.7/site-packages/datasets-1.2.0-py3.7.egg/datasets/load.py", line 612, in load_dataset ignore_verifications=ignore_verifications, File "/julia/libs/anaconda3/envs/success/lib/python3.7/site-packages/datasets-1.2.0-py3.7.egg/datasets/builder.py", line 534, in download_and_prepare self._save_info() File "/julia/libs/anaconda3/envs/success/lib/python3.7/contextlib.py", line 130, in __exit__ self.gen.throw(type, value, traceback) File "/julia/libs/anaconda3/envs/success/lib/python3.7/site-packages/datasets-1.2.0-py3.7.egg/datasets/builder.py", line 491, in incomplete_dir shutil.rmtree(tmp_dir) File "/julia/libs/anaconda3/envs/success/lib/python3.7/shutil.py", line 498, in rmtree onerror(os.rmdir, path, sys.exc_info()) File "/julia/libs/anaconda3/envs/success/lib/python3.7/shutil.py", line 496, in rmtree os.rmdir(path) OSError: [Errno 39] Directory not empty: '/julia/cache_home_2/datasets/csv/default-61468fc71a743ec1/0.0.0/2960f95a26e85d40ca41a230ac88787f715ee3003edaacb8b1f0891e9f04dda2.incomplete' ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1773/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1773/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
192 days, 15:19:16
https://api.github.com/repos/huggingface/datasets/issues/1772
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1772/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1772/comments
https://api.github.com/repos/huggingface/datasets/issues/1772/events
https://github.com/huggingface/datasets/issues/1772
792,703,797
MDU6SXNzdWU3OTI3MDM3OTc=
1,772
Adding SICK dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/10137?v=4", "events_url": "https://api.github.com/users/ghost/events{/privacy}", "followers_url": "https://api.github.com/users/ghost/followers", "following_url": "https://api.github.com/users/ghost/following{/other_user}", "gists_url": "https://api.github.com/users/ghost/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ghost", "id": 10137, "login": "ghost", "node_id": "MDQ6VXNlcjEwMTM3", "organizations_url": "https://api.github.com/users/ghost/orgs", "received_events_url": "https://api.github.com/users/ghost/received_events", "repos_url": "https://api.github.com/users/ghost/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ghost/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ghost/subscriptions", "type": "User", "url": "https://api.github.com/users/ghost", "user_view_type": "public" }
[ { "color": "e99695", "default": false, "description": "Requesting to add a new dataset", "id": 2067376369, "name": "dataset request", "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request" } ]
closed
false
null
[]
[]
2021-01-24T02:15:31
2021-02-05T15:49:25
2021-02-05T15:49:25
NONE
null
null
null
null
Hi It would be great to include SICK dataset. ## Adding a Dataset - **Name:** SICK - **Description:** a well known entailment dataset - **Paper:** http://marcobaroni.org/composes/sick.html - **Data:** http://marcobaroni.org/composes/sick.html - **Motivation:** this is an important NLI benchmark Instructions to add a new dataset can be found [here](https://github.com/huggingface/datasets/blob/master/ADD_NEW_DATASET.md). thanks
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1772/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1772/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
12 days, 13:33:54
https://api.github.com/repos/huggingface/datasets/issues/1771
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1771/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1771/comments
https://api.github.com/repos/huggingface/datasets/issues/1771/events
https://github.com/huggingface/datasets/issues/1771
792,701,276
MDU6SXNzdWU3OTI3MDEyNzY=
1,771
Couldn't reach https://raw.githubusercontent.com/huggingface/datasets/1.2.1/datasets/csv/csv.py
{ "avatar_url": "https://avatars.githubusercontent.com/u/7607120?v=4", "events_url": "https://api.github.com/users/world2vec/events{/privacy}", "followers_url": "https://api.github.com/users/world2vec/followers", "following_url": "https://api.github.com/users/world2vec/following{/other_user}", "gists_url": "https://api.github.com/users/world2vec/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/world2vec", "id": 7607120, "login": "world2vec", "node_id": "MDQ6VXNlcjc2MDcxMjA=", "organizations_url": "https://api.github.com/users/world2vec/orgs", "received_events_url": "https://api.github.com/users/world2vec/received_events", "repos_url": "https://api.github.com/users/world2vec/repos", "site_admin": false, "starred_url": "https://api.github.com/users/world2vec/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/world2vec/subscriptions", "type": "User", "url": "https://api.github.com/users/world2vec", "user_view_type": "public" }
[]
closed
false
null
[]
[ "I temporary manually download csv.py as custom dataset loading script", "Indeed in 1.2.1 the script to process csv file is downloaded. Starting from the next release though we include the csv processing directly in the library.\r\nSee PR #1726 \r\nWe'll do a new release soon :)", "Thanks." ]
2021-01-24T01:53:52
2021-01-24T23:06:29
2021-01-24T23:06:29
NONE
null
null
null
null
Hi, When I load_dataset from local csv files, below error happened, looks raw.githubusercontent.com was blocked by the chinese government. But why it need to download csv.py? should it include when pip install the dataset? ``` Traceback (most recent call last): File "/home/tom/pyenv/pystory/lib/python3.6/site-packages/datasets/load.py", line 267, in prepare_module local_path = cached_path(file_path, download_config=download_config) File "/home/tom/pyenv/pystory/lib/python3.6/site-packages/datasets/utils/file_utils.py", line 343, in cached_path max_retries=download_config.max_retries, File "/home/tom/pyenv/pystory/lib/python3.6/site-packages/datasets/utils/file_utils.py", line 617, in get_from_cache raise ConnectionError("Couldn't reach {}".format(url)) ConnectionError: Couldn't reach https://raw.githubusercontent.com/huggingface/datasets/1.2.1/datasets/csv/csv.py ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/7607120?v=4", "events_url": "https://api.github.com/users/world2vec/events{/privacy}", "followers_url": "https://api.github.com/users/world2vec/followers", "following_url": "https://api.github.com/users/world2vec/following{/other_user}", "gists_url": "https://api.github.com/users/world2vec/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/world2vec", "id": 7607120, "login": "world2vec", "node_id": "MDQ6VXNlcjc2MDcxMjA=", "organizations_url": "https://api.github.com/users/world2vec/orgs", "received_events_url": "https://api.github.com/users/world2vec/received_events", "repos_url": "https://api.github.com/users/world2vec/repos", "site_admin": false, "starred_url": "https://api.github.com/users/world2vec/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/world2vec/subscriptions", "type": "User", "url": "https://api.github.com/users/world2vec", "user_view_type": "public" }
{ "+1": 2, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 2, "url": "https://api.github.com/repos/huggingface/datasets/issues/1771/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1771/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
21:12:37
https://api.github.com/repos/huggingface/datasets/issues/1770
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1770/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1770/comments
https://api.github.com/repos/huggingface/datasets/issues/1770/events
https://github.com/huggingface/datasets/issues/1770
792,698,148
MDU6SXNzdWU3OTI2OTgxNDg=
1,770
how can I combine 2 dataset with different/same features?
{ "avatar_url": "https://avatars.githubusercontent.com/u/7607120?v=4", "events_url": "https://api.github.com/users/world2vec/events{/privacy}", "followers_url": "https://api.github.com/users/world2vec/followers", "following_url": "https://api.github.com/users/world2vec/following{/other_user}", "gists_url": "https://api.github.com/users/world2vec/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/world2vec", "id": 7607120, "login": "world2vec", "node_id": "MDQ6VXNlcjc2MDcxMjA=", "organizations_url": "https://api.github.com/users/world2vec/orgs", "received_events_url": "https://api.github.com/users/world2vec/received_events", "repos_url": "https://api.github.com/users/world2vec/repos", "site_admin": false, "starred_url": "https://api.github.com/users/world2vec/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/world2vec/subscriptions", "type": "User", "url": "https://api.github.com/users/world2vec", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Hi ! Currently we don't have a way to `zip` datasets but we plan to add this soon :)\r\nFor now you'll need to use `map` to add the fields from one dataset to the other. See the comment here for more info : https://github.com/huggingface/datasets/issues/853#issuecomment-727872188", "Good to hear.\r\nCurrently I did not use map , just fetch src and tgt from the 2 dataset and merge them.\r\nIt will be a release if you can deal with it at the backend.\r\nThanks.", "Hi! You can rename the columns and concatenate the datasets along `axis=1` to get the desired result as follows:\r\n```python\r\nds1 = ds1.rename_column(\"text\", \"src\")\r\nds2 = ds2.rename_column(\"text\", \"tgt\")\r\nds = datasets.concatenate_datasets([\"ds1\", \"ds2\"], axis=1)\r\n```" ]
2021-01-24T01:26:06
2022-06-01T15:43:15
2022-06-01T15:43:15
NONE
null
null
null
null
to combine 2 dataset by one-one map like ds = zip(ds1, ds2): ds1: {'text'}, ds2: {'text'}, combine ds:{'src', 'tgt'} or different feature: ds1: {'src'}, ds2: {'tgt'}, combine ds:{'src', 'tgt'}
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1770/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1770/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
493 days, 14:17:09
https://api.github.com/repos/huggingface/datasets/issues/1769
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1769/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1769/comments
https://api.github.com/repos/huggingface/datasets/issues/1769/events
https://github.com/huggingface/datasets/issues/1769
792,523,284
MDU6SXNzdWU3OTI1MjMyODQ=
1,769
_pickle.PicklingError: Can't pickle typing.Union[str, NoneType]: it's not the same object as typing.Union when calling datasets.map with num_proc=2
{ "avatar_url": "https://avatars.githubusercontent.com/u/14048129?v=4", "events_url": "https://api.github.com/users/shuaihuaiyi/events{/privacy}", "followers_url": "https://api.github.com/users/shuaihuaiyi/followers", "following_url": "https://api.github.com/users/shuaihuaiyi/following{/other_user}", "gists_url": "https://api.github.com/users/shuaihuaiyi/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/shuaihuaiyi", "id": 14048129, "login": "shuaihuaiyi", "node_id": "MDQ6VXNlcjE0MDQ4MTI5", "organizations_url": "https://api.github.com/users/shuaihuaiyi/orgs", "received_events_url": "https://api.github.com/users/shuaihuaiyi/received_events", "repos_url": "https://api.github.com/users/shuaihuaiyi/repos", "site_admin": false, "starred_url": "https://api.github.com/users/shuaihuaiyi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/shuaihuaiyi/subscriptions", "type": "User", "url": "https://api.github.com/users/shuaihuaiyi", "user_view_type": "public" }
[]
closed
false
null
[]
[ "More information: `run_mlm.py` will raise same error when `data_args.line_by_line==True`\r\n\r\nhttps://github.com/huggingface/transformers/blob/9152f16023b59d262b51573714b40325c8e49370/examples/language-modeling/run_mlm.py#L300\r\n", "Hi ! What version of python and datasets do you have ? And also what version of dill and pickle ?", "> Hi ! What version of python and datasets do you have ? And also what version of dill and pickle ?\r\n\r\npython==3.6.10\r\ndatasets==1.2.1\r\ndill==0.3.2\r\npickle.format_version==4.0", "Multiprocessing in python require all the functions to be picklable. More specifically, functions need to be picklable with `dill`.\r\n\r\nHowever objects like `typing.Union[str, NoneType]` are not picklable in python <3.7.\r\nCan you try to update your python version to python>=3.7 ?\r\n" ]
2021-01-23T10:13:00
2022-10-05T12:38:51
2022-10-05T12:38:51
NONE
null
null
null
null
It may be a bug of multiprocessing with Datasets, when I disable the multiprocessing by set num_proc to None, everything works fine. The script I use is https://github.com/huggingface/transformers/blob/master/examples/language-modeling/run_mlm_wwm.py Script args: ``` --model_name_or_path ../../../model/chinese-roberta-wwm-ext --train_file /nfs/volume-377-2/bert/data/test/train.txt --output_dir test --do_train --per_device_train_batch_size 2 --gradient_accumulation_steps 2 --learning_rate 1e-4 --max_steps 1000 --warmup_steps 10 --save_steps 1000 --save_total_limit 1 --seed 23333 --max_seq_length 512 --preprocessing_num_workers 2 --cache_dir /nfs/volume-377-2/bert/data/test/cache ``` Where the `/nfs/volume-377-2/bert/data/test/train.txt` is just a toy example with 10000 lines of random string, you should be able to reproduce this error esaily. Full Traceback: ``` Traceback (most recent call last): File "/nfs/volume-377-2/bert/transformers/examples/language-modeling/run_mlm_wwm.py", line 398, in <module> main() File "/nfs/volume-377-2/bert/transformers/examples/language-modeling/run_mlm_wwm.py", line 325, in main load_from_cache_file=not data_args.overwrite_cache, File "/home/luban/miniconda3/envs/py36/lib/python3.6/site-packages/datasets/dataset_dict.py", line 303, in map for k, dataset in self.items() File "/home/luban/miniconda3/envs/py36/lib/python3.6/site-packages/datasets/dataset_dict.py", line 303, in <dictcomp> for k, dataset in self.items() File "/home/luban/miniconda3/envs/py36/lib/python3.6/site-packages/datasets/arrow_dataset.py", line 1318, in map transformed_shards = [r.get() for r in results] File "/home/luban/miniconda3/envs/py36/lib/python3.6/site-packages/datasets/arrow_dataset.py", line 1318, in <listcomp> transformed_shards = [r.get() for r in results] File "/home/luban/miniconda3/envs/py36/lib/python3.6/site-packages/multiprocess/pool.py", line 644, in get raise self._value File "/home/luban/miniconda3/envs/py36/lib/python3.6/site-packages/multiprocess/pool.py", line 424, in _handle_tasks put(task) File "/home/luban/miniconda3/envs/py36/lib/python3.6/site-packages/multiprocess/connection.py", line 209, in send self._send_bytes(_ForkingPickler.dumps(obj)) File "/home/luban/miniconda3/envs/py36/lib/python3.6/site-packages/multiprocess/reduction.py", line 54, in dumps cls(buf, protocol, *args, **kwds).dump(obj) File "/home/luban/miniconda3/envs/py36/lib/python3.6/site-packages/dill/_dill.py", line 446, in dump StockPickler.dump(self, obj) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 409, in dump self.save(obj) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 476, in save f(self, obj) # Call unbound method with explicit self File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 751, in save_tuple save(element) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 476, in save f(self, obj) # Call unbound method with explicit self File "/home/luban/miniconda3/envs/py36/lib/python3.6/site-packages/dill/_dill.py", line 933, in save_module_dict StockPickler.save_dict(pickler, obj) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 821, in save_dict self._batch_setitems(obj.items()) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 847, in _batch_setitems save(v) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 476, in save f(self, obj) # Call unbound method with explicit self File "/home/luban/miniconda3/envs/py36/lib/python3.6/site-packages/dill/_dill.py", line 1438, in save_function obj.__dict__, fkwdefaults), obj=obj) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 610, in save_reduce save(args) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 476, in save f(self, obj) # Call unbound method with explicit self File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 751, in save_tuple save(element) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 476, in save f(self, obj) # Call unbound method with explicit self File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 736, in save_tuple save(element) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 476, in save f(self, obj) # Call unbound method with explicit self File "/home/luban/miniconda3/envs/py36/lib/python3.6/site-packages/dill/_dill.py", line 1170, in save_cell pickler.save_reduce(_create_cell, (f,), obj=obj) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 610, in save_reduce save(args) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 476, in save f(self, obj) # Call unbound method with explicit self File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 736, in save_tuple save(element) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 521, in save self.save_reduce(obj=obj, *rv) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 605, in save_reduce save(cls) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 476, in save f(self, obj) # Call unbound method with explicit self File "/home/luban/miniconda3/envs/py36/lib/python3.6/site-packages/dill/_dill.py", line 1365, in save_type obj.__bases__, _dict), obj=obj) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 610, in save_reduce save(args) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 476, in save f(self, obj) # Call unbound method with explicit self File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 751, in save_tuple save(element) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 476, in save f(self, obj) # Call unbound method with explicit self File "/home/luban/miniconda3/envs/py36/lib/python3.6/site-packages/dill/_dill.py", line 933, in save_module_dict StockPickler.save_dict(pickler, obj) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 821, in save_dict self._batch_setitems(obj.items()) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 847, in _batch_setitems save(v) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 476, in save f(self, obj) # Call unbound method with explicit self File "/home/luban/miniconda3/envs/py36/lib/python3.6/site-packages/dill/_dill.py", line 933, in save_module_dict StockPickler.save_dict(pickler, obj) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 821, in save_dict self._batch_setitems(obj.items()) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 847, in _batch_setitems save(v) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 507, in save self.save_global(obj, rv) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 927, in save_global (obj, module_name, name)) _pickle.PicklingError: Can't pickle typing.Union[str, NoneType]: it's not the same object as typing.Union ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1769/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1769/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
620 days, 2:25:51
https://api.github.com/repos/huggingface/datasets/issues/1766
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1766/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1766/comments
https://api.github.com/repos/huggingface/datasets/issues/1766/events
https://github.com/huggingface/datasets/issues/1766
792,044,105
MDU6SXNzdWU3OTIwNDQxMDU=
1,766
Issues when run two programs compute the same metrics
{ "avatar_url": "https://avatars.githubusercontent.com/u/8089862?v=4", "events_url": "https://api.github.com/users/lamthuy/events{/privacy}", "followers_url": "https://api.github.com/users/lamthuy/followers", "following_url": "https://api.github.com/users/lamthuy/following{/other_user}", "gists_url": "https://api.github.com/users/lamthuy/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lamthuy", "id": 8089862, "login": "lamthuy", "node_id": "MDQ6VXNlcjgwODk4NjI=", "organizations_url": "https://api.github.com/users/lamthuy/orgs", "received_events_url": "https://api.github.com/users/lamthuy/received_events", "repos_url": "https://api.github.com/users/lamthuy/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lamthuy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lamthuy/subscriptions", "type": "User", "url": "https://api.github.com/users/lamthuy", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Hi ! To avoid collisions you can specify a `experiment_id` when instantiating your metric using `load_metric`. It will replace \"default_experiment\" with the experiment id that you provide in the arrow filename. \r\n\r\nAlso when two `experiment_id` collide we're supposed to detect it using our locking mechanism. Not sure why it didn't work in your case. Could you share some code that reproduces the issue ? This would help us investigate.", "Thank you for your response. I fixed the issue by set \"keep_in_memory=True\" when load_metric. \r\nI cannot share the entire source code but below is the wrapper I wrote:\r\n\r\n```python\r\nclass Evaluation:\r\n def __init__(self, metric='sacrebleu'):\r\n # self.metric = load_metric(metric, keep_in_memory=True)\r\n self.metric = load_metric(metric)\r\n\r\n def add(self, predictions, references):\r\n self.metric.add_batch(predictions=predictions, references=references)\r\n\r\n def compute(self):\r\n return self.metric.compute()['score']\r\n```\r\n\r\nThen call the given wrapper as follows:\r\n\r\n```python\r\neval = Evaluation(metric='sacrebleu')\r\nfor query, candidates, labels in tqdm(dataset):\r\n predictions = net.generate(query)\r\n references = [[s] for s in labels]\r\n eval.add(predictions, references)\r\n if n % 100 == 0:\r\n bleu += eval.compute()\r\n eval = Evaluation(metric='sacrebleu')" ]
2021-01-22T14:22:55
2021-02-02T10:38:06
2021-02-02T10:38:06
NONE
null
null
null
null
I got the following error when running two different programs that both compute sacreblue metrics. It seems that both read/and/write to the same location (.cache/huggingface/metrics/sacrebleu/default/default_experiment-1-0.arrow) where it caches the batches: ``` File "train_matching_min.py", line 160, in <module>ch_9_label avg_loss = valid(epoch, args.batch, args.validation, args.with_label) File "train_matching_min.py", line 93, in valid bleu += eval.compute() File "/u/tlhoang/projects/seal/match/models/eval.py", line 23, in compute return self.metric.compute()['score'] File "/dccstor/know/anaconda3/lib/python3.7/site-packages/datasets/metric.py", line 387, in compute self._finalize() File "/dccstor/know/anaconda3/lib/python3.7/site-packages/datasets/metric.py", line 355, in _finalize self.data = Dataset(**reader.read_files([{"filename": f} for f in file_paths])) File "/dccstor/know/anaconda3/lib/python3.7/site-packages/datasets/arrow_reader.py", line 231, in read_files pa_table = self._read_files(files) File "/dccstor/know/anaconda3/lib/python3.7/site-packages/datasets/arrow_reader.py", line 170, in _read_files pa_table: pa.Table = self._get_dataset_from_filename(f_dict) File "/dccstor/know/anaconda3/lib/python3.7/site-packages/datasets/arrow_reader.py", line 299, in _get_dataset_from_filename pa_table = f.read_all() File "pyarrow/ipc.pxi", line 481, in pyarrow.lib.RecordBatchReader.read_all File "pyarrow/error.pxi", line 84, in pyarrow.lib.check_status pyarrow.lib.ArrowInvalid: Expected to read 1819307375 metadata bytes, but only read 454396 ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/33657802?v=4", "events_url": "https://api.github.com/users/SBrandeis/events{/privacy}", "followers_url": "https://api.github.com/users/SBrandeis/followers", "following_url": "https://api.github.com/users/SBrandeis/following{/other_user}", "gists_url": "https://api.github.com/users/SBrandeis/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/SBrandeis", "id": 33657802, "login": "SBrandeis", "node_id": "MDQ6VXNlcjMzNjU3ODAy", "organizations_url": "https://api.github.com/users/SBrandeis/orgs", "received_events_url": "https://api.github.com/users/SBrandeis/received_events", "repos_url": "https://api.github.com/users/SBrandeis/repos", "site_admin": false, "starred_url": "https://api.github.com/users/SBrandeis/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SBrandeis/subscriptions", "type": "User", "url": "https://api.github.com/users/SBrandeis", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1766/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1766/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
10 days, 20:15:11
https://api.github.com/repos/huggingface/datasets/issues/1765
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1765/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1765/comments
https://api.github.com/repos/huggingface/datasets/issues/1765/events
https://github.com/huggingface/datasets/issues/1765
791,553,065
MDU6SXNzdWU3OTE1NTMwNjU=
1,765
Error iterating over Dataset with DataLoader
{ "avatar_url": "https://avatars.githubusercontent.com/u/1295082?v=4", "events_url": "https://api.github.com/users/EvanZ/events{/privacy}", "followers_url": "https://api.github.com/users/EvanZ/followers", "following_url": "https://api.github.com/users/EvanZ/following{/other_user}", "gists_url": "https://api.github.com/users/EvanZ/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/EvanZ", "id": 1295082, "login": "EvanZ", "node_id": "MDQ6VXNlcjEyOTUwODI=", "organizations_url": "https://api.github.com/users/EvanZ/orgs", "received_events_url": "https://api.github.com/users/EvanZ/received_events", "repos_url": "https://api.github.com/users/EvanZ/repos", "site_admin": false, "starred_url": "https://api.github.com/users/EvanZ/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/EvanZ/subscriptions", "type": "User", "url": "https://api.github.com/users/EvanZ", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Instead of:\r\n```python\r\ndataloader = torch.utils.data.DataLoader(encoded_dataset, batch_sampler=32)\r\n```\r\nIt should be:\r\n```python\r\ndataloader = torch.utils.data.DataLoader(encoded_dataset, batch_size=32)\r\n```\r\n\r\n`batch_sampler` accepts a Sampler object or an Iterable, so you get an error.", "@mariosasko I thought that would fix it, but now I'm getting a different error:\r\n\r\n```\r\n/usr/local/lib/python3.6/dist-packages/datasets/arrow_dataset.py:851: UserWarning: The given NumPy array is not writeable, and PyTorch does not support non-writeable tensors. This means you can write to the underlying (supposedly non-writeable) NumPy array using the tensor. You may want to copy the array to protect its data or make it writeable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at /pytorch/torch/csrc/utils/tensor_numpy.cpp:141.)\r\n return torch.tensor(x, **format_kwargs)\r\n---------------------------------------------------------------------------\r\nRuntimeError Traceback (most recent call last)\r\n<ipython-input-20-3af1d82bf93a> in <module>()\r\n 1 dataloader = torch.utils.data.DataLoader(encoded_dataset, batch_size=32)\r\n----> 2 next(iter(dataloader))\r\n\r\n5 frames\r\n/usr/local/lib/python3.6/dist-packages/torch/utils/data/_utils/collate.py in default_collate(batch)\r\n 53 storage = elem.storage()._new_shared(numel)\r\n 54 out = elem.new(storage)\r\n---> 55 return torch.stack(batch, 0, out=out)\r\n 56 elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\r\n 57 and elem_type.__name__ != 'string_':\r\n\r\nRuntimeError: stack expects each tensor to be equal size, but got [7] at entry 0 and [10] at entry 1\r\n```\r\n\r\nAny thoughts what this means?I Do I need padding?", "Yes, padding is an answer. \r\n\r\nThis can be solved easily by passing a callable to the collate_fn arg of DataLoader that adds padding. ", "Padding was the fix, thanks!", "dataloader = torch.utils.data.DataLoader(encoded_dataset, batch_size=4)\r\nbatch = next(iter(dataloader))\r\n\r\ngetting \r\nValueError: cannot reshape array of size 8192 into shape (1,512,4)\r\n\r\nI had put padding as 2048 for encoded_dataset\r\nkindly help", "data_loader_val = torch.utils.data.DataLoader(val_dataset, batch_size=32, shuffle=True, drop_last=False, num_workers=0)\r\ndataiter = iter(data_loader_val)\r\nimages, _ = next(dataiter)\r\n\r\ngetting -> TypeError: 'list' object is not callable\r\n\r\nCannot iterate through the data. Kindly suggest." ]
2021-01-21T22:56:45
2022-10-28T02:16:38
2021-01-23T03:44:14
NONE
null
null
null
null
I have a Dataset that I've mapped a tokenizer over: ``` encoded_dataset.set_format(type='torch',columns=['attention_mask','input_ids','token_type_ids']) encoded_dataset[:1] ``` ``` {'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]), 'input_ids': tensor([[ 101, 178, 1198, 1400, 1714, 22233, 21365, 4515, 8618, 1113, 102]]), 'token_type_ids': tensor([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])} ``` When I try to iterate as in the docs, I get errors: ``` dataloader = torch.utils.data.DataLoader(encoded_dataset, batch_sampler=32) next(iter(dataloader)) ``` ``` --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-45-05180ba8aa35> in <module>() 1 dataloader = torch.utils.data.DataLoader(encoded_dataset, batch_sampler=32) ----> 2 next(iter(dataloader)) 3 frames /usr/local/lib/python3.6/dist-packages/torch/utils/data/dataloader.py in __init__(self, loader) 411 self._timeout = loader.timeout 412 self._collate_fn = loader.collate_fn --> 413 self._sampler_iter = iter(self._index_sampler) 414 self._base_seed = torch.empty((), dtype=torch.int64).random_(generator=loader.generator).item() 415 self._persistent_workers = loader.persistent_workers TypeError: 'int' object is not iterable ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/1295082?v=4", "events_url": "https://api.github.com/users/EvanZ/events{/privacy}", "followers_url": "https://api.github.com/users/EvanZ/followers", "following_url": "https://api.github.com/users/EvanZ/following{/other_user}", "gists_url": "https://api.github.com/users/EvanZ/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/EvanZ", "id": 1295082, "login": "EvanZ", "node_id": "MDQ6VXNlcjEyOTUwODI=", "organizations_url": "https://api.github.com/users/EvanZ/orgs", "received_events_url": "https://api.github.com/users/EvanZ/received_events", "repos_url": "https://api.github.com/users/EvanZ/repos", "site_admin": false, "starred_url": "https://api.github.com/users/EvanZ/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/EvanZ/subscriptions", "type": "User", "url": "https://api.github.com/users/EvanZ", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1765/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1765/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
1 day, 4:47:29
https://api.github.com/repos/huggingface/datasets/issues/1764
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1764/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1764/comments
https://api.github.com/repos/huggingface/datasets/issues/1764/events
https://github.com/huggingface/datasets/issues/1764
791,486,860
MDU6SXNzdWU3OTE0ODY4NjA=
1,764
Connection Issues
{ "avatar_url": "https://avatars.githubusercontent.com/u/12455298?v=4", "events_url": "https://api.github.com/users/SaeedNajafi/events{/privacy}", "followers_url": "https://api.github.com/users/SaeedNajafi/followers", "following_url": "https://api.github.com/users/SaeedNajafi/following{/other_user}", "gists_url": "https://api.github.com/users/SaeedNajafi/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/SaeedNajafi", "id": 12455298, "login": "SaeedNajafi", "node_id": "MDQ6VXNlcjEyNDU1Mjk4", "organizations_url": "https://api.github.com/users/SaeedNajafi/orgs", "received_events_url": "https://api.github.com/users/SaeedNajafi/received_events", "repos_url": "https://api.github.com/users/SaeedNajafi/repos", "site_admin": false, "starred_url": "https://api.github.com/users/SaeedNajafi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SaeedNajafi/subscriptions", "type": "User", "url": "https://api.github.com/users/SaeedNajafi", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Academic WIFI was blocking." ]
2021-01-21T20:56:09
2021-01-21T21:00:19
2021-01-21T21:00:02
NONE
null
null
null
null
Today, I am getting connection issues while loading a dataset and the metric. ``` Traceback (most recent call last): File "src/train.py", line 180, in <module> train_dataset, dev_dataset, test_dataset = create_race_dataset() File "src/train.py", line 130, in create_race_dataset train_dataset = load_dataset("race", "all", split="train") File "/Users/saeed/Desktop/codes/repos/dreamscape-qa/env/lib/python3.7/site-packages/datasets/load.py", line 591, in load_dataset path, script_version=script_version, download_config=download_config, download_mode=download_mode, dataset=True File "/Users/saeed/Desktop/codes/repos/dreamscape-qa/env/lib/python3.7/site-packages/datasets/load.py", line 267, in prepare_module local_path = cached_path(file_path, download_config=download_config) File "/Users/saeed/Desktop/codes/repos/dreamscape-qa/env/lib/python3.7/site-packages/datasets/utils/file_utils.py", line 343, in cached_path max_retries=download_config.max_retries, File "/Users/saeed/Desktop/codes/repos/dreamscape-qa/env/lib/python3.7/site-packages/datasets/utils/file_utils.py", line 617, in get_from_cache raise ConnectionError("Couldn't reach {}".format(url)) ConnectionError: Couldn't reach https://raw.githubusercontent.com/huggingface/datasets/1.2.1/datasets/race/race.py ``` Or ``` Traceback (most recent call last): File "src/train.py", line 105, in <module> rouge = datasets.load_metric("rouge") File "/Users/saeed/Desktop/codes/repos/dreamscape-qa/env/lib/python3.7/site-packages/datasets/load.py", line 500, in load_metric dataset=False, File "/Users/saeed/Desktop/codes/repos/dreamscape-qa/env/lib/python3.7/site-packages/datasets/load.py", line 267, in prepare_module local_path = cached_path(file_path, download_config=download_config) File "/Users/saeed/Desktop/codes/repos/dreamscape-qa/env/lib/python3.7/site-packages/datasets/utils/file_utils.py", line 343, in cached_path max_retries=download_config.max_retries, File "/Users/saeed/Desktop/codes/repos/dreamscape-qa/env/lib/python3.7/site-packages/datasets/utils/file_utils.py", line 617, in get_from_cache raise ConnectionError("Couldn't reach {}".format(url)) ConnectionError: Couldn't reach https://raw.githubusercontent.com/huggingface/datasets/1.2.1/metrics/rouge/rouge.py ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/12455298?v=4", "events_url": "https://api.github.com/users/SaeedNajafi/events{/privacy}", "followers_url": "https://api.github.com/users/SaeedNajafi/followers", "following_url": "https://api.github.com/users/SaeedNajafi/following{/other_user}", "gists_url": "https://api.github.com/users/SaeedNajafi/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/SaeedNajafi", "id": 12455298, "login": "SaeedNajafi", "node_id": "MDQ6VXNlcjEyNDU1Mjk4", "organizations_url": "https://api.github.com/users/SaeedNajafi/orgs", "received_events_url": "https://api.github.com/users/SaeedNajafi/received_events", "repos_url": "https://api.github.com/users/SaeedNajafi/repos", "site_admin": false, "starred_url": "https://api.github.com/users/SaeedNajafi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SaeedNajafi/subscriptions", "type": "User", "url": "https://api.github.com/users/SaeedNajafi", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1764/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1764/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
0:03:53
https://api.github.com/repos/huggingface/datasets/issues/1762
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1762/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1762/comments
https://api.github.com/repos/huggingface/datasets/issues/1762/events
https://github.com/huggingface/datasets/issues/1762
791,226,007
MDU6SXNzdWU3OTEyMjYwMDc=
1,762
Unable to format dataset to CUDA Tensors
{ "avatar_url": "https://avatars.githubusercontent.com/u/29076344?v=4", "events_url": "https://api.github.com/users/gchhablani/events{/privacy}", "followers_url": "https://api.github.com/users/gchhablani/followers", "following_url": "https://api.github.com/users/gchhablani/following{/other_user}", "gists_url": "https://api.github.com/users/gchhablani/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/gchhablani", "id": 29076344, "login": "gchhablani", "node_id": "MDQ6VXNlcjI5MDc2MzQ0", "organizations_url": "https://api.github.com/users/gchhablani/orgs", "received_events_url": "https://api.github.com/users/gchhablani/received_events", "repos_url": "https://api.github.com/users/gchhablani/repos", "site_admin": false, "starred_url": "https://api.github.com/users/gchhablani/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gchhablani/subscriptions", "type": "User", "url": "https://api.github.com/users/gchhablani", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Hi ! You can get CUDA tensors with\r\n\r\n```python\r\ndataset.set_format(\"torch\", columns=columns, device=\"cuda\")\r\n```\r\n\r\nIndeed `set_format` passes the `**kwargs` to `torch.tensor`", "Hi @lhoestq,\r\n\r\nThanks a lot. Is this true for all format types?\r\n\r\nAs in, for 'torch', I can have `**kwargs` to `torch.tensor` and for 'tf' those args are passed to `tf.Tensor`, and the same for 'numpy' and 'pandas'?", "Yes the keywords arguments are passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.\r\nWe don't support the kwargs for pandas on the other hand.", "Thanks @lhoestq,\r\nWould it be okay if I added this to the docs and made a PR?", "Sure ! Feel free to open a PR to improve the documentation :) ", "Closing this issue as it has been resolved." ]
2021-01-21T15:31:23
2021-02-02T07:13:22
2021-02-02T07:13:22
CONTRIBUTOR
null
null
null
null
Hi, I came across this [link](https://huggingface.co/docs/datasets/torch_tensorflow.html) where the docs show show to convert a dataset to a particular format. I see that there is an option to convert it to tensors, but I don't see any option to convert it to CUDA tensors. I tried this, but Dataset doesn't support assignment: ``` columns=['input_ids', 'token_type_ids', 'attention_mask', 'start_positions','end_positions'] samples.set_format(type='torch', columns = columns) for column in columns: samples[column].to(torch.device(self.config.device)) ``` There should be an option to do so, or if there is already a way to do this, please let me know. Thanks, Gunjan
{ "avatar_url": "https://avatars.githubusercontent.com/u/29076344?v=4", "events_url": "https://api.github.com/users/gchhablani/events{/privacy}", "followers_url": "https://api.github.com/users/gchhablani/followers", "following_url": "https://api.github.com/users/gchhablani/following{/other_user}", "gists_url": "https://api.github.com/users/gchhablani/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/gchhablani", "id": 29076344, "login": "gchhablani", "node_id": "MDQ6VXNlcjI5MDc2MzQ0", "organizations_url": "https://api.github.com/users/gchhablani/orgs", "received_events_url": "https://api.github.com/users/gchhablani/received_events", "repos_url": "https://api.github.com/users/gchhablani/repos", "site_admin": false, "starred_url": "https://api.github.com/users/gchhablani/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gchhablani/subscriptions", "type": "User", "url": "https://api.github.com/users/gchhablani", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1762/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1762/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
11 days, 15:41:59
https://api.github.com/repos/huggingface/datasets/issues/1759
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1759/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1759/comments
https://api.github.com/repos/huggingface/datasets/issues/1759/events
https://github.com/huggingface/datasets/issues/1759
790,992,226
MDU6SXNzdWU3OTA5OTIyMjY=
1,759
wikipedia dataset incomplete
{ "avatar_url": "https://avatars.githubusercontent.com/u/19912393?v=4", "events_url": "https://api.github.com/users/ChrisDelClea/events{/privacy}", "followers_url": "https://api.github.com/users/ChrisDelClea/followers", "following_url": "https://api.github.com/users/ChrisDelClea/following{/other_user}", "gists_url": "https://api.github.com/users/ChrisDelClea/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ChrisDelClea", "id": 19912393, "login": "ChrisDelClea", "node_id": "MDQ6VXNlcjE5OTEyMzkz", "organizations_url": "https://api.github.com/users/ChrisDelClea/orgs", "received_events_url": "https://api.github.com/users/ChrisDelClea/received_events", "repos_url": "https://api.github.com/users/ChrisDelClea/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ChrisDelClea/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ChrisDelClea/subscriptions", "type": "User", "url": "https://api.github.com/users/ChrisDelClea", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Hi !\r\nFrom what pickle file fo you get this ?\r\nI guess you mean the dataset loaded using `load_dataset` ?", "yes sorry, I used the `load_dataset`function and saved the data to a pickle file so I don't always have to reload it and are able to work offline. ", "The wikipedia articles are processed using the `mwparserfromhell` library. Even if it works well in most cases, such issues can happen unfortunately. You can find the repo here: https://github.com/earwig/mwparserfromhell\r\n\r\nThere also exist other datasets based on wikipedia that were processed differently (and are often cleaner) such as `wiki40b`.\r\n\r\n", "ok great. Thank you, @lhoestq. " ]
2021-01-21T11:47:15
2021-01-21T17:22:11
2021-01-21T17:21:06
NONE
null
null
null
null
Hey guys, I am using the https://github.com/huggingface/datasets/tree/master/datasets/wikipedia dataset. Unfortunately, I found out that there is an incompleteness for the German dataset. For reasons unknown to me, the number of inhabitants has been removed from many pages: Thorey-sur-Ouche has 128 inhabitants according to the webpage (https://de.wikipedia.org/wiki/Thorey-sur-Ouche). The pickle file however shows: französische Gemeinde mit Einwohnern (Stand). Is it possible to fix this? Best regards Chris
{ "avatar_url": "https://avatars.githubusercontent.com/u/19912393?v=4", "events_url": "https://api.github.com/users/ChrisDelClea/events{/privacy}", "followers_url": "https://api.github.com/users/ChrisDelClea/followers", "following_url": "https://api.github.com/users/ChrisDelClea/following{/other_user}", "gists_url": "https://api.github.com/users/ChrisDelClea/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ChrisDelClea", "id": 19912393, "login": "ChrisDelClea", "node_id": "MDQ6VXNlcjE5OTEyMzkz", "organizations_url": "https://api.github.com/users/ChrisDelClea/orgs", "received_events_url": "https://api.github.com/users/ChrisDelClea/received_events", "repos_url": "https://api.github.com/users/ChrisDelClea/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ChrisDelClea/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ChrisDelClea/subscriptions", "type": "User", "url": "https://api.github.com/users/ChrisDelClea", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1759/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1759/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
5:33:51
https://api.github.com/repos/huggingface/datasets/issues/1758
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1758/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1758/comments
https://api.github.com/repos/huggingface/datasets/issues/1758/events
https://github.com/huggingface/datasets/issues/1758
790,626,116
MDU6SXNzdWU3OTA2MjYxMTY=
1,758
dataset.search() (elastic) cannot reliably retrieve search results
{ "avatar_url": "https://avatars.githubusercontent.com/u/49048309?v=4", "events_url": "https://api.github.com/users/afogarty85/events{/privacy}", "followers_url": "https://api.github.com/users/afogarty85/followers", "following_url": "https://api.github.com/users/afogarty85/following{/other_user}", "gists_url": "https://api.github.com/users/afogarty85/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/afogarty85", "id": 49048309, "login": "afogarty85", "node_id": "MDQ6VXNlcjQ5MDQ4MzA5", "organizations_url": "https://api.github.com/users/afogarty85/orgs", "received_events_url": "https://api.github.com/users/afogarty85/received_events", "repos_url": "https://api.github.com/users/afogarty85/repos", "site_admin": false, "starred_url": "https://api.github.com/users/afogarty85/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/afogarty85/subscriptions", "type": "User", "url": "https://api.github.com/users/afogarty85", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Hi !\r\nI tried your code on my side and I was able to workaround this issue by waiting a few seconds before querying the index.\r\nMaybe this is because the index is not updated yet on the ElasticSearch side ?", "Thanks for the feedback! I added a 30 second \"sleep\" and that seemed to work well!" ]
2021-01-21T02:26:37
2021-01-22T00:25:50
2021-01-22T00:25:50
NONE
null
null
null
null
I am trying to use elastic search to retrieve the indices of items in the dataset in their precise order, given shuffled training indices. The problem I have is that I cannot retrieve reliable results with my data on my first search. I have to run the search **twice** to get the right answer. I am indexing data that looks like the following from the HF SQuAD 2.0 data set: ``` ['57318658e6313a140071d02b', '56f7165e3d8e2e1400e3733a', '570e2f6e0b85d914000d7d21', '5727e58aff5b5019007d97d0', '5a3b5a503ff257001ab8441f', '57262fab271a42140099d725'] ``` To reproduce the issue, try: ``` from datasets import load_dataset, load_metric from transformers import BertTokenizerFast, BertForQuestionAnswering from elasticsearch import Elasticsearch import numpy as np import collections from tqdm.auto import tqdm import torch # from https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/question_answering.ipynb#scrollTo=941LPhDWeYv- tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased') max_length = 384 # The maximum length of a feature (question and context) doc_stride = 128 # The authorized overlap between two part of the context when splitting it is needed. pad_on_right = tokenizer.padding_side == "right" squad_v2 = True # from https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/question_answering.ipynb#scrollTo=941LPhDWeYv- def prepare_validation_features(examples): # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. tokenized_examples = tokenizer( examples["question" if pad_on_right else "context"], examples["context" if pad_on_right else "question"], truncation="only_second" if pad_on_right else "only_first", max_length=max_length, stride=doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, padding="max_length", ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping") # We keep the example_id that gave us this feature and we will store the offset mappings. tokenized_examples["example_id"] = [] for i in range(len(tokenized_examples["input_ids"])): # Grab the sequence corresponding to that example (to know what is the context and what is the question). sequence_ids = tokenized_examples.sequence_ids(i) context_index = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. sample_index = sample_mapping[i] tokenized_examples["example_id"].append(examples["id"][sample_index]) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. tokenized_examples["offset_mapping"][i] = [ (list(o) if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["offset_mapping"][i]) ] return tokenized_examples # build base examples, features set of training data shuffled_idx = pd.read_csv('https://raw.githubusercontent.com/afogarty85/temp/main/idx.csv')['idx'].to_list() examples = load_dataset("squad_v2").shuffle(seed=1)['train'] features = load_dataset("squad_v2").shuffle(seed=1)['train'].map( prepare_validation_features, batched=True, remove_columns=['answers', 'context', 'id', 'question', 'title']) # reorder features by the training process features = features.select(indices=shuffled_idx) # get the example ids to match with the "example" data; get unique entries id_list = list(dict.fromkeys(features['example_id'])) # now search for their index positions in the examples data set; load elastic search es = Elasticsearch([{'host': 'localhost'}]).ping() # add an index to the id column for the examples examples.add_elasticsearch_index(column='id') # retrieve the example index example_idx_k1 = [examples.search(index_name='id', query=i, k=1).indices for i in id_list] example_idx_k1 = [item for sublist in example_idx_k1 for item in sublist] example_idx_k2 = [examples.search(index_name='id', query=i, k=3).indices for i in id_list] example_idx_k2 = [item for sublist in example_idx_k2 for item in sublist] len(example_idx_k1) # should be 130319 len(example_idx_k2) # should be 130319 #trial 1 lengths: # k=1: 130314 # k=3: 130319 # trial 2: # just run k=3 first: 130310 # try k=1 after k=3: 130319 ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/49048309?v=4", "events_url": "https://api.github.com/users/afogarty85/events{/privacy}", "followers_url": "https://api.github.com/users/afogarty85/followers", "following_url": "https://api.github.com/users/afogarty85/following{/other_user}", "gists_url": "https://api.github.com/users/afogarty85/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/afogarty85", "id": 49048309, "login": "afogarty85", "node_id": "MDQ6VXNlcjQ5MDQ4MzA5", "organizations_url": "https://api.github.com/users/afogarty85/orgs", "received_events_url": "https://api.github.com/users/afogarty85/received_events", "repos_url": "https://api.github.com/users/afogarty85/repos", "site_admin": false, "starred_url": "https://api.github.com/users/afogarty85/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/afogarty85/subscriptions", "type": "User", "url": "https://api.github.com/users/afogarty85", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1758/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1758/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
21:59:13
https://api.github.com/repos/huggingface/datasets/issues/1757
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1757/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1757/comments
https://api.github.com/repos/huggingface/datasets/issues/1757/events
https://github.com/huggingface/datasets/issues/1757
790,466,509
MDU6SXNzdWU3OTA0NjY1MDk=
1,757
FewRel
{ "avatar_url": "https://avatars.githubusercontent.com/u/6183050?v=4", "events_url": "https://api.github.com/users/dspoka/events{/privacy}", "followers_url": "https://api.github.com/users/dspoka/followers", "following_url": "https://api.github.com/users/dspoka/following{/other_user}", "gists_url": "https://api.github.com/users/dspoka/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/dspoka", "id": 6183050, "login": "dspoka", "node_id": "MDQ6VXNlcjYxODMwNTA=", "organizations_url": "https://api.github.com/users/dspoka/orgs", "received_events_url": "https://api.github.com/users/dspoka/received_events", "repos_url": "https://api.github.com/users/dspoka/repos", "site_admin": false, "starred_url": "https://api.github.com/users/dspoka/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dspoka/subscriptions", "type": "User", "url": "https://api.github.com/users/dspoka", "user_view_type": "public" }
[ { "color": "e99695", "default": false, "description": "Requesting to add a new dataset", "id": 2067376369, "name": "dataset request", "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request" } ]
closed
false
null
[]
[ "+1", "@dspoka Please check the following link : https://github.com/thunlp/FewRel\r\nThis link mentions two versions of the datasets. Also, this one seems to be the official link.\r\n\r\nI am assuming this is the correct link and implementing based on the same.", "Hi @lhoestq,\r\n\r\nThis issue can be closed, I guess.", "Yes :) closing\r\nThanks again for adding FewRel !", "Thanks for adding this @gchhablani ! Sorry didn't see the email notifications sooner!" ]
2021-01-20T23:56:03
2021-03-09T02:52:05
2021-03-08T14:34:52
NONE
null
null
null
null
## Adding a Dataset - **Name:** FewRel - **Description:** Large-Scale Supervised Few-Shot Relation Classification Dataset - **Paper:** @inproceedings{han2018fewrel, title={FewRel:A Large-Scale Supervised Few-Shot Relation Classification Dataset with State-of-the-Art Evaluation}, author={Han, Xu and Zhu, Hao and Yu, Pengfei and Wang, Ziyun and Yao, Yuan and Liu, Zhiyuan and Sun, Maosong}, booktitle={EMNLP}, year={2018}} - **Data:** https://github.com/ProKil/FewRel - **Motivation:** relationship extraction dataset that's been used by some state of the art systems that should be incorporated. Instructions to add a new dataset can be found [here](https://github.com/huggingface/datasets/blob/master/ADD_NEW_DATASET.md).
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1757/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1757/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
46 days, 14:38:49
https://api.github.com/repos/huggingface/datasets/issues/1756
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1756/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1756/comments
https://api.github.com/repos/huggingface/datasets/issues/1756/events
https://github.com/huggingface/datasets/issues/1756
790,380,028
MDU6SXNzdWU3OTAzODAwMjg=
1,756
Ccaligned multilingual translation dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/47894090?v=4", "events_url": "https://api.github.com/users/flozi00/events{/privacy}", "followers_url": "https://api.github.com/users/flozi00/followers", "following_url": "https://api.github.com/users/flozi00/following{/other_user}", "gists_url": "https://api.github.com/users/flozi00/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/flozi00", "id": 47894090, "login": "flozi00", "node_id": "MDQ6VXNlcjQ3ODk0MDkw", "organizations_url": "https://api.github.com/users/flozi00/orgs", "received_events_url": "https://api.github.com/users/flozi00/received_events", "repos_url": "https://api.github.com/users/flozi00/repos", "site_admin": false, "starred_url": "https://api.github.com/users/flozi00/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/flozi00/subscriptions", "type": "User", "url": "https://api.github.com/users/flozi00", "user_view_type": "public" }
[ { "color": "e99695", "default": false, "description": "Requesting to add a new dataset", "id": 2067376369, "name": "dataset request", "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request" } ]
closed
false
null
[]
[]
2021-01-20T22:18:44
2021-03-01T10:36:21
2021-03-01T10:36:21
CONTRIBUTOR
null
null
null
null
## Adding a Dataset - **Name:** *name of the dataset* - **Description:** *short description of the dataset (or link to social media or blog post)* - CCAligned consists of parallel or comparable web-document pairs in 137 languages aligned with English. These web-document pairs were constructed by performing language identification on raw web-documents, and ensuring corresponding language codes were corresponding in the URLs of web documents. This pattern matching approach yielded more than 100 million aligned documents paired with English. Recognizing that each English document was often aligned to mulitple documents in different target language, we can join on English documents to obtain aligned documents that directly pair two non-English documents (e.g., Arabic-French). - **Paper:** *link to the dataset paper if available* - https://www.aclweb.org/anthology/2020.emnlp-main.480.pdf - **Data:** *link to the Github repository or current dataset location* - http://www.statmt.org/cc-aligned/ - **Motivation:** *what are some good reasons to have this dataset* - The authors says it's an high quality dataset. - it's pretty large and includes many language pairs. It could be interesting training mt5 on this task. Instructions to add a new dataset can be found [here](https://github.com/huggingface/datasets/blob/master/ADD_NEW_DATASET.md).
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1756/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1756/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
39 days, 12:17:37
https://api.github.com/repos/huggingface/datasets/issues/1755
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1755/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1755/comments
https://api.github.com/repos/huggingface/datasets/issues/1755/events
https://github.com/huggingface/datasets/issues/1755
790,324,734
MDU6SXNzdWU3OTAzMjQ3MzQ=
1,755
Using select/reordering datasets slows operations down immensely
{ "avatar_url": "https://avatars.githubusercontent.com/u/49048309?v=4", "events_url": "https://api.github.com/users/afogarty85/events{/privacy}", "followers_url": "https://api.github.com/users/afogarty85/followers", "following_url": "https://api.github.com/users/afogarty85/following{/other_user}", "gists_url": "https://api.github.com/users/afogarty85/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/afogarty85", "id": 49048309, "login": "afogarty85", "node_id": "MDQ6VXNlcjQ5MDQ4MzA5", "organizations_url": "https://api.github.com/users/afogarty85/orgs", "received_events_url": "https://api.github.com/users/afogarty85/received_events", "repos_url": "https://api.github.com/users/afogarty85/repos", "site_admin": false, "starred_url": "https://api.github.com/users/afogarty85/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/afogarty85/subscriptions", "type": "User", "url": "https://api.github.com/users/afogarty85", "user_view_type": "public" }
[]
closed
false
null
[]
[ "You can use `Dataset.flatten_indices()` to make it fast after a select or shuffle.", "Thanks for the input! I gave that a try by adding this after my selection / reordering operations, but before the big computation task of `score_squad`\r\n\r\n```\r\nexamples = examples.flatten_indices()\r\nfeatures = features.flatten_indices()\r\n```\r\n\r\nThat helped quite a bit!" ]
2021-01-20T21:12:12
2021-01-20T22:03:39
2021-01-20T22:03:39
NONE
null
null
null
null
I am using portions of HF's helpful work in preparing / scoring the SQuAD 2.0 data. The problem I have is that after using `select` to re-ordering the dataset, computations slow down immensely where the total scoring process on 131k training examples would take maybe 3 minutes, now take over an hour. The below example should be reproducible and I have ran myself down this path because I want to use HF's scoring functions and helpful data preparation, but use my own trainer. The training process uses shuffle and therefore the order I trained on no longer matches the original data set order. So, to score my results correctly, the original data set needs to match the order of the training. This requires that I: (1) collect the index for each row of data emitted during training, and (2) use this index information to re-order the datasets correctly so the orders match when I go to score. The problem is, the dataset class starts performing very poorly as soon as you start manipulating its order by immense magnitudes. ``` from datasets import load_dataset, load_metric from transformers import BertTokenizerFast, BertForQuestionAnswering from elasticsearch import Elasticsearch import numpy as np import collections from tqdm.auto import tqdm import torch # from https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/question_answering.ipynb#scrollTo=941LPhDWeYv- tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased') max_length = 384 # The maximum length of a feature (question and context) doc_stride = 128 # The authorized overlap between two part of the context when splitting it is needed. pad_on_right = tokenizer.padding_side == "right" squad_v2 = True # from https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/question_answering.ipynb#scrollTo=941LPhDWeYv- def prepare_validation_features(examples): # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. tokenized_examples = tokenizer( examples["question" if pad_on_right else "context"], examples["context" if pad_on_right else "question"], truncation="only_second" if pad_on_right else "only_first", max_length=max_length, stride=doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, padding="max_length", ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping") # We keep the example_id that gave us this feature and we will store the offset mappings. tokenized_examples["example_id"] = [] for i in range(len(tokenized_examples["input_ids"])): # Grab the sequence corresponding to that example (to know what is the context and what is the question). sequence_ids = tokenized_examples.sequence_ids(i) context_index = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. sample_index = sample_mapping[i] tokenized_examples["example_id"].append(examples["id"][sample_index]) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. tokenized_examples["offset_mapping"][i] = [ (list(o) if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["offset_mapping"][i]) ] return tokenized_examples # from https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/question_answering.ipynb#scrollTo=941LPhDWeYv- def postprocess_qa_predictions(examples, features, starting_logits, ending_logits, n_best_size = 20, max_answer_length = 30): all_start_logits, all_end_logits = starting_logits, ending_logits # Build a map example to its corresponding features. example_id_to_index = {k: i for i, k in enumerate(examples["id"])} features_per_example = collections.defaultdict(list) for i, feature in enumerate(features): features_per_example[example_id_to_index[feature["example_id"]]].append(i) # The dictionaries we have to fill. predictions = collections.OrderedDict() # Logging. print(f"Post-processing {len(examples)} example predictions split into {len(features)} features.") # Let's loop over all the examples! for example_index, example in enumerate(tqdm(examples)): # Those are the indices of the features associated to the current example. feature_indices = features_per_example[example_index] min_null_score = None # Only used if squad_v2 is True. valid_answers = [] context = example["context"] # Looping through all the features associated to the current example. for feature_index in feature_indices: # We grab the predictions of the model for this feature. start_logits = all_start_logits[feature_index] end_logits = all_end_logits[feature_index] # This is what will allow us to map some the positions in our logits to span of texts in the original # context. offset_mapping = features[feature_index]["offset_mapping"] # Update minimum null prediction. cls_index = features[feature_index]["input_ids"].index(tokenizer.cls_token_id) feature_null_score = start_logits[cls_index] + end_logits[cls_index] if min_null_score is None or min_null_score < feature_null_score: min_null_score = feature_null_score # Go through all possibilities for the `n_best_size` greater start and end logits. start_indexes = np.argsort(start_logits)[-1 : -n_best_size - 1 : -1].tolist() end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist() for start_index in start_indexes: for end_index in end_indexes: # Don't consider out-of-scope answers, either because the indices are out of bounds or correspond # to part of the input_ids that are not in the context. if ( start_index >= len(offset_mapping) or end_index >= len(offset_mapping) or offset_mapping[start_index] is None or offset_mapping[end_index] is None ): continue # Don't consider answers with a length that is either < 0 or > max_answer_length. if end_index < start_index or end_index - start_index + 1 > max_answer_length: continue start_char = offset_mapping[start_index][0] end_char = offset_mapping[end_index][1] valid_answers.append( { "score": start_logits[start_index] + end_logits[end_index], "text": context[start_char: end_char] } ) if len(valid_answers) > 0: best_answer = sorted(valid_answers, key=lambda x: x["score"], reverse=True)[0] else: # In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid # failure. best_answer = {"text": "", "score": 0.0} # Let's pick our final answer: the best one or the null answer (only for squad_v2) if not squad_v2: predictions[example["id"]] = best_answer["text"] else: answer = best_answer["text"] if best_answer["score"] > min_null_score else "" predictions[example["id"]] = answer return predictions # build base examples, features from training data examples = load_dataset("squad_v2").shuffle(seed=5)['train'] features = load_dataset("squad_v2").shuffle(seed=5)['train'].map( prepare_validation_features, batched=True, remove_columns=['answers', 'context', 'id', 'question', 'title']) # sim some shuffled training indices that we want to use to re-order the data to compare how we did shuffle_idx = np.arange(0, 131754) np.random.shuffle(shuffle_idx) # create a new dataset with rows selected following the training shuffle features = features.select(indices=shuffle_idx) # get unique example ids to match with the "example" data id_list = list(dict.fromkeys(features['example_id'])) # now search for their index positions; load elastic search es = Elasticsearch([{'host': 'localhost'}]).ping() # add an index to the id column for the examples examples.add_elasticsearch_index(column='id') # search the examples for their index position example_idx = [examples.search(index_name='id', query=i, k=1).indices for i in id_list] # drop the elastic search examples.drop_index(index_name='id') # put examples in the right order examples = examples.select(indices=example_idx) # generate some fake data logits = {'starting_logits': torch.randn(131754, 384), 'ending_logits': torch.randn(131754, 384)} def score_squad(logits, n_best_size, max_answer): # proceed with QA calculation final_predictions = postprocess_qa_predictions(examples=examples, features=features, starting_logits=logits['starting_logits'], ending_logits=logits['ending_logits'], n_best_size=20, max_answer_length=30) metric = load_metric("squad_v2") formatted_predictions = [{"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in final_predictions.items()] references = [{"id": ex["id"], "answers": ex["answers"]} for ex in examples] metrics = metric.compute(predictions=formatted_predictions, references=references) return metrics metrics = score_squad(logits, n_best_size=20, max_answer=30) ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/49048309?v=4", "events_url": "https://api.github.com/users/afogarty85/events{/privacy}", "followers_url": "https://api.github.com/users/afogarty85/followers", "following_url": "https://api.github.com/users/afogarty85/following{/other_user}", "gists_url": "https://api.github.com/users/afogarty85/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/afogarty85", "id": 49048309, "login": "afogarty85", "node_id": "MDQ6VXNlcjQ5MDQ4MzA5", "organizations_url": "https://api.github.com/users/afogarty85/orgs", "received_events_url": "https://api.github.com/users/afogarty85/received_events", "repos_url": "https://api.github.com/users/afogarty85/repos", "site_admin": false, "starred_url": "https://api.github.com/users/afogarty85/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/afogarty85/subscriptions", "type": "User", "url": "https://api.github.com/users/afogarty85", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1755/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1755/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
0:51:27
https://api.github.com/repos/huggingface/datasets/issues/1747
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1747/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1747/comments
https://api.github.com/repos/huggingface/datasets/issues/1747/events
https://github.com/huggingface/datasets/issues/1747
788,299,775
MDU6SXNzdWU3ODgyOTk3NzU=
1,747
datasets slicing with seed
{ "avatar_url": "https://avatars.githubusercontent.com/u/10137?v=4", "events_url": "https://api.github.com/users/ghost/events{/privacy}", "followers_url": "https://api.github.com/users/ghost/followers", "following_url": "https://api.github.com/users/ghost/following{/other_user}", "gists_url": "https://api.github.com/users/ghost/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ghost", "id": 10137, "login": "ghost", "node_id": "MDQ6VXNlcjEwMTM3", "organizations_url": "https://api.github.com/users/ghost/orgs", "received_events_url": "https://api.github.com/users/ghost/received_events", "repos_url": "https://api.github.com/users/ghost/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ghost/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ghost/subscriptions", "type": "User", "url": "https://api.github.com/users/ghost", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Hi :) \r\nThe slicing API from https://huggingface.co/docs/datasets/splits.html doesn't shuffle the data.\r\nYou can shuffle and then take a subset of your dataset with\r\n```python\r\n# shuffle and take the first 100 examples\r\ndataset = dataset.shuffle(seed=42).select(range(100))\r\n```\r\n\r\nYou can find more information about shuffling and selecting rows in the documentation: https://huggingface.co/docs/datasets/processing.html#selecting-sorting-shuffling-splitting-rows", "thank you so much\n\nOn Mon, Jan 18, 2021 at 3:17 PM Quentin Lhoest <notifications@github.com>\nwrote:\n\n> Hi :)\n> The slicing API doesn't shuffle the data.\n> You can shuffle and then take a subset of your dataset with\n>\n> # shuffle and take the first 100 examplesdataset = dataset.shuffle(seed=42).select(range(100))\n>\n> You can find more information about shuffling and selecting rows in the\n> documentation:\n> https://huggingface.co/docs/datasets/processing.html#selecting-sorting-shuffling-splitting-rows\n>\n> —\n> You are receiving this because you authored the thread.\n> Reply to this email directly, view it on GitHub\n> <https://github.com/huggingface/datasets/issues/1747#issuecomment-762278134>,\n> or unsubscribe\n> <https://github.com/notifications/unsubscribe-auth/AM3GZM5D5MDPLJGI4IG3UADS2Q7GPANCNFSM4WHLOZJQ>\n> .\n>\n" ]
2021-01-18T14:08:55
2022-10-05T12:37:27
2022-10-05T12:37:27
NONE
null
null
null
null
Hi I need to slice a dataset with random seed, I looked into documentation here https://huggingface.co/docs/datasets/splits.html I could not find a seed option, could you assist me please how I can get a slice for different seeds? thank you. @lhoestq
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1747/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1747/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
624 days, 22:28:32
https://api.github.com/repos/huggingface/datasets/issues/1745
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1745/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1745/comments
https://api.github.com/repos/huggingface/datasets/issues/1745/events
https://github.com/huggingface/datasets/issues/1745
787,838,256
MDU6SXNzdWU3ODc4MzgyNTY=
1,745
difference between wsc and wsc.fixed for superglue
{ "avatar_url": "https://avatars.githubusercontent.com/u/10137?v=4", "events_url": "https://api.github.com/users/ghost/events{/privacy}", "followers_url": "https://api.github.com/users/ghost/followers", "following_url": "https://api.github.com/users/ghost/following{/other_user}", "gists_url": "https://api.github.com/users/ghost/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ghost", "id": 10137, "login": "ghost", "node_id": "MDQ6VXNlcjEwMTM3", "organizations_url": "https://api.github.com/users/ghost/orgs", "received_events_url": "https://api.github.com/users/ghost/received_events", "repos_url": "https://api.github.com/users/ghost/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ghost/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ghost/subscriptions", "type": "User", "url": "https://api.github.com/users/ghost", "user_view_type": "public" }
[]
closed
false
null
[]
[ "From the description given in the dataset script for `wsc.fixed`:\r\n```\r\nThis version fixes issues where the spans are not actually substrings of the text.\r\n```" ]
2021-01-18T00:50:19
2021-01-18T11:02:43
2021-01-18T00:59:34
NONE
null
null
null
null
Hi I see two versions of wsc in superglue, and I am not sure what is the differences and which one is the original one. could you help to discuss the differences? thanks @lhoestq
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1745/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1745/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
0:09:15
https://api.github.com/repos/huggingface/datasets/issues/1743
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1743/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1743/comments
https://api.github.com/repos/huggingface/datasets/issues/1743/events
https://github.com/huggingface/datasets/issues/1743
787,631,412
MDU6SXNzdWU3ODc2MzE0MTI=
1,743
Issue while Creating Custom Metric
{ "avatar_url": "https://avatars.githubusercontent.com/u/29076344?v=4", "events_url": "https://api.github.com/users/gchhablani/events{/privacy}", "followers_url": "https://api.github.com/users/gchhablani/followers", "following_url": "https://api.github.com/users/gchhablani/following{/other_user}", "gists_url": "https://api.github.com/users/gchhablani/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/gchhablani", "id": 29076344, "login": "gchhablani", "node_id": "MDQ6VXNlcjI5MDc2MzQ0", "organizations_url": "https://api.github.com/users/gchhablani/orgs", "received_events_url": "https://api.github.com/users/gchhablani/received_events", "repos_url": "https://api.github.com/users/gchhablani/repos", "site_admin": false, "starred_url": "https://api.github.com/users/gchhablani/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gchhablani/subscriptions", "type": "User", "url": "https://api.github.com/users/gchhablani", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Currently it's only possible to define the features for the two columns `references` and `predictions`.\r\nThe data for these columns can then be passed to `metric.add_batch` and `metric.compute`.\r\nInstead of defining more columns `text`, `offset_mapping` and `ground` you must include them in either references and predictions.\r\n\r\nFor example \r\n```python\r\nfeatures = datasets.Features({\r\n 'predictions':datasets.Sequence(datasets.Value(\"int32\")),\r\n \"references\": datasets.Sequence({\r\n \"references_ids\": datasets.Value(\"int32\"),\r\n \"offset_mapping\": datasets.Value(\"int32\"),\r\n 'text': datasets.Value('string'),\r\n \"ground\": datasets.Value(\"int32\")\r\n }),\r\n})\r\n```\r\n\r\nAnother option would be to simply have the two features like \r\n```python\r\nfeatures = datasets.Features({\r\n 'predictions':datasets.Sequence(datasets.Value(\"int32\")),\r\n \"references\": datasets.Sequence(datasets.Value(\"int32\")),\r\n})\r\n```\r\nand keep `offset_mapping`, `text` and `ground` as as parameters for the computation (i.e. kwargs when calling `metric.compute`).\r\n\r\n\r\nWhat is the metric you would like to implement ?\r\n\r\nI'm asking since we consider allowing additional fields as requested in the `Comet` metric (see PR and discussion [here](https://github.com/huggingface/datasets/pull/1577)) and I'd like to know if it's something that can be interesting for users.\r\n\r\nWhat do you think ?", "Hi @lhoestq,\r\n\r\nI am doing text segmentation and the metric is effectively dice score on character offsets. So I need to pass the actual spans and I want to be able to get the spans based on predictions using offset_mapping.\r\n\r\nIncluding them in references seems like a good idea. I'll try it out and get back to you. If there's a better way to write a metric function for the same, please let me know.", "Resolved via https://github.com/huggingface/datasets/pull/3824." ]
2021-01-17T07:01:14
2022-06-01T15:49:34
2022-06-01T15:49:34
CONTRIBUTOR
null
null
null
null
Hi Team, I am trying to create a custom metric for my training as follows, where f1 is my own metric: ```python def _info(self): # TODO: Specifies the datasets.MetricInfo object return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, # This defines the format of each prediction and reference features = datasets.Features({'predictions':datasets.Sequence(datasets.Value("int32")), "references": datasets.Sequence(datasets.Value("int32")),"offset_mapping":datasets.Sequence(datasets.Value("int32")),'text':datasets.Sequence(datasets.Value('string')),"ground":datasets.Sequence(datasets.Value("int32")),}), # Homepage of the metric for documentation homepage="http://metric.homepage", # Additional links to the codebase or references codebase_urls=["http://github.com/path/to/codebase/of/new_metric"], reference_urls=["http://path.to.reference.url/new_metric"] ) def _compute(self,predictions,references,text,offset_mapping,spans): pred_spans = [] for i,preds in enumerate(predictions): current_preds = [] for j,token_preds in enumerate(preds): if (preds>0.5): current_preds+=list(range(offset_mapping[i][j][0],offset_mapping[i][j][1])) pred_spans.append(current_spans) return { "Token Wise F1": f1_score(references,predictions,labels=[0,1]), "Offset Wise F1": np.mean([f1(preds,gold) for preds,fold in zip(pred_spans,ground)]) } ``` I believe this is not correct. But that's not the issue I am facing right now. I get this error : ```python --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-144-ed7349b50821> in <module>() ----> 1 new_metric.compute(predictions=inputs["labels"],references=inputs["labels"], text=inputs["text"], offset_mapping=inputs["offset_mapping"],ground=inputs["ground"] ) 2 frames /usr/local/lib/python3.6/dist-packages/datasets/features.py in encode_batch(self, batch) 802 encoded_batch = {} 803 if set(batch) != set(self): --> 804 print(batch) 805 print(self) 806 raise ValueError("Column mismatch between batch {} and features {}".format(set(batch), set(self))) ValueError: Column mismatch between batch {'references', 'predictions'} and features {'ground', 'predictions', 'offset_mapping', 'text', 'references'} ``` On checking the features.py file, I see the call is made from add_batch() in metrics.py which only takes in predictions and references. How do I make my custom metric work? Will it work with a trainer even if I am able to make this metric work? Thanks, Gunjan
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1743/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1743/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
500 days, 8:48:20
https://api.github.com/repos/huggingface/datasets/issues/1741
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1741/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1741/comments
https://api.github.com/repos/huggingface/datasets/issues/1741/events
https://github.com/huggingface/datasets/issues/1741
787,327,060
MDU6SXNzdWU3ODczMjcwNjA=
1,741
error when run fine_tuning on text_classification
{ "avatar_url": "https://avatars.githubusercontent.com/u/43234824?v=4", "events_url": "https://api.github.com/users/XiaoYang66/events{/privacy}", "followers_url": "https://api.github.com/users/XiaoYang66/followers", "following_url": "https://api.github.com/users/XiaoYang66/following{/other_user}", "gists_url": "https://api.github.com/users/XiaoYang66/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/XiaoYang66", "id": 43234824, "login": "XiaoYang66", "node_id": "MDQ6VXNlcjQzMjM0ODI0", "organizations_url": "https://api.github.com/users/XiaoYang66/orgs", "received_events_url": "https://api.github.com/users/XiaoYang66/received_events", "repos_url": "https://api.github.com/users/XiaoYang66/repos", "site_admin": false, "starred_url": "https://api.github.com/users/XiaoYang66/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/XiaoYang66/subscriptions", "type": "User", "url": "https://api.github.com/users/XiaoYang66", "user_view_type": "public" }
[]
closed
false
null
[]
[ "none" ]
2021-01-16T02:23:19
2021-01-16T02:39:28
2021-01-16T02:39:18
NONE
null
null
null
null
dataset:sem_eval_2014_task_1 pretrained_model:bert-base-uncased error description: when i use these resoruce to train fine_tuning a text_classification on sem_eval_2014_task_1,there always be some problem(when i use other dataset ,there exist the error too). And i followed the colab code (url:https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/text_classification.ipynb#scrollTo=TlqNaB8jIrJW). the error is like this : `File "train.py", line 69, in <module> trainer.train() File "/home/projects/anaconda3/envs/calibration/lib/python3.7/site-packages/transformers/trainer.py", line 784, in train for step, inputs in enumerate(epoch_iterator): File "/home/projects/anaconda3/envs/calibration/lib/python3.7/site-packages/torch/utils/data/dataloader.py", line 435, in __next__ data = self._next_data() File "/home/projects/anaconda3/envs/calibration/lib/python3.7/site-packages/torch/utils/data/dataloader.py", line 475, in _next_data data = self._dataset_fetcher.fetch(index) # may raise StopIteration File "/home/projects/anaconda3/envs/calibration/lib/python3.7/site-packages/torch/utils/data/_utils/fetch.py", line 44, in fetch data = [self.dataset[idx] for idx in possibly_batched_index] File "/home/projects/anaconda3/envs/calibration/lib/python3.7/site-packages/torch/utils/data/_utils/fetch.py", line 44, in <listcomp> data = [self.dataset[idx] for idx in possibly_batched_index] KeyError: 2` this is my code : ```dataset_name = 'sem_eval_2014_task_1' num_labels_size = 3 batch_size = 4 model_checkpoint = 'bert-base-uncased' number_train_epoch = 5 def tokenize(batch): return tokenizer(batch['premise'], batch['hypothesis'], truncation=True, ) def compute_metrics(pred): labels = pred.label_ids preds = pred.predictions.argmax(-1) precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average='micro') acc = accuracy_score(labels, preds) return { 'accuracy': acc, 'f1': f1, 'precision': precision, 'recall': recall } model = BertForSequenceClassification.from_pretrained(model_checkpoint, num_labels=num_labels_size) tokenizer = BertTokenizerFast.from_pretrained(model_checkpoint, use_fast=True) train_dataset = load_dataset(dataset_name, split='train') test_dataset = load_dataset(dataset_name, split='test') train_encoded_dataset = train_dataset.map(tokenize, batched=True) test_encoded_dataset = test_dataset.map(tokenize, batched=True) args = TrainingArguments( output_dir='./results', evaluation_strategy="epoch", learning_rate=2e-5, per_device_train_batch_size=batch_size, per_device_eval_batch_size=batch_size, num_train_epochs=number_train_epoch, weight_decay=0.01, do_predict=True, ) trainer = Trainer( model=model, args=args, compute_metrics=compute_metrics, train_dataset=train_encoded_dataset, eval_dataset=test_encoded_dataset, tokenizer=tokenizer ) trainer.train() trainer.evaluate()
{ "avatar_url": "https://avatars.githubusercontent.com/u/43234824?v=4", "events_url": "https://api.github.com/users/XiaoYang66/events{/privacy}", "followers_url": "https://api.github.com/users/XiaoYang66/followers", "following_url": "https://api.github.com/users/XiaoYang66/following{/other_user}", "gists_url": "https://api.github.com/users/XiaoYang66/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/XiaoYang66", "id": 43234824, "login": "XiaoYang66", "node_id": "MDQ6VXNlcjQzMjM0ODI0", "organizations_url": "https://api.github.com/users/XiaoYang66/orgs", "received_events_url": "https://api.github.com/users/XiaoYang66/received_events", "repos_url": "https://api.github.com/users/XiaoYang66/repos", "site_admin": false, "starred_url": "https://api.github.com/users/XiaoYang66/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/XiaoYang66/subscriptions", "type": "User", "url": "https://api.github.com/users/XiaoYang66", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1741/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1741/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
0:15:59
https://api.github.com/repos/huggingface/datasets/issues/1733
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1733/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1733/comments
https://api.github.com/repos/huggingface/datasets/issues/1733/events
https://github.com/huggingface/datasets/issues/1733
784,903,002
MDU6SXNzdWU3ODQ5MDMwMDI=
1,733
connection issue with glue, what is the data url for glue?
{ "avatar_url": "https://avatars.githubusercontent.com/u/10137?v=4", "events_url": "https://api.github.com/users/ghost/events{/privacy}", "followers_url": "https://api.github.com/users/ghost/followers", "following_url": "https://api.github.com/users/ghost/following{/other_user}", "gists_url": "https://api.github.com/users/ghost/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ghost", "id": 10137, "login": "ghost", "node_id": "MDQ6VXNlcjEwMTM3", "organizations_url": "https://api.github.com/users/ghost/orgs", "received_events_url": "https://api.github.com/users/ghost/received_events", "repos_url": "https://api.github.com/users/ghost/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ghost/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ghost/subscriptions", "type": "User", "url": "https://api.github.com/users/ghost", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Hello @juliahane, which config of GLUE causes you trouble?\r\nThe URLs are defined in the dataset script source code: https://github.com/huggingface/datasets/blob/master/datasets/glue/glue.py" ]
2021-01-13T08:37:40
2021-08-04T18:13:55
2021-08-04T18:13:55
NONE
null
null
null
null
Hi my codes sometimes fails due to connection issue with glue, could you tell me how I can have the URL datasets library is trying to read GLUE from to test the machines I am working on if there is an issue on my side or not thanks
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1733/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1733/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
203 days, 9:36:15
https://api.github.com/repos/huggingface/datasets/issues/1731
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1731/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1731/comments
https://api.github.com/repos/huggingface/datasets/issues/1731/events
https://github.com/huggingface/datasets/issues/1731
784,744,674
MDU6SXNzdWU3ODQ3NDQ2NzQ=
1,731
Couldn't reach swda.py
{ "avatar_url": "https://avatars.githubusercontent.com/u/13365326?v=4", "events_url": "https://api.github.com/users/yangp725/events{/privacy}", "followers_url": "https://api.github.com/users/yangp725/followers", "following_url": "https://api.github.com/users/yangp725/following{/other_user}", "gists_url": "https://api.github.com/users/yangp725/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/yangp725", "id": 13365326, "login": "yangp725", "node_id": "MDQ6VXNlcjEzMzY1MzI2", "organizations_url": "https://api.github.com/users/yangp725/orgs", "received_events_url": "https://api.github.com/users/yangp725/received_events", "repos_url": "https://api.github.com/users/yangp725/repos", "site_admin": false, "starred_url": "https://api.github.com/users/yangp725/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yangp725/subscriptions", "type": "User", "url": "https://api.github.com/users/yangp725", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Hi @yangp725,\r\nThe SWDA has been added very recently and has not been released yet, thus it is not available in the `1.2.0` version of 🤗`datasets`.\r\nYou can still access it by installing the latest version of the library (master branch), by following instructions in [this issue](https://github.com/huggingface/datasets/issues/1641#issuecomment-751571471).\r\nLet me know if this helps !", "Thanks @SBrandeis ,\r\nProblem solved by downloading and installing the latest version datasets." ]
2021-01-13T02:57:40
2021-01-13T11:17:40
2021-01-13T11:17:40
NONE
null
null
null
null
ConnectionError: Couldn't reach https://raw.githubusercontent.com/huggingface/datasets/1.2.0/datasets/swda/swda.py
{ "avatar_url": "https://avatars.githubusercontent.com/u/13365326?v=4", "events_url": "https://api.github.com/users/yangp725/events{/privacy}", "followers_url": "https://api.github.com/users/yangp725/followers", "following_url": "https://api.github.com/users/yangp725/following{/other_user}", "gists_url": "https://api.github.com/users/yangp725/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/yangp725", "id": 13365326, "login": "yangp725", "node_id": "MDQ6VXNlcjEzMzY1MzI2", "organizations_url": "https://api.github.com/users/yangp725/orgs", "received_events_url": "https://api.github.com/users/yangp725/received_events", "repos_url": "https://api.github.com/users/yangp725/repos", "site_admin": false, "starred_url": "https://api.github.com/users/yangp725/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yangp725/subscriptions", "type": "User", "url": "https://api.github.com/users/yangp725", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1731/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1731/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
8:20:00
https://api.github.com/repos/huggingface/datasets/issues/1729
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1729/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1729/comments
https://api.github.com/repos/huggingface/datasets/issues/1729/events
https://github.com/huggingface/datasets/issues/1729
784,565,898
MDU6SXNzdWU3ODQ1NjU4OTg=
1,729
Is there support for Deep learning datasets?
{ "avatar_url": "https://avatars.githubusercontent.com/u/28235457?v=4", "events_url": "https://api.github.com/users/pablodz/events{/privacy}", "followers_url": "https://api.github.com/users/pablodz/followers", "following_url": "https://api.github.com/users/pablodz/following{/other_user}", "gists_url": "https://api.github.com/users/pablodz/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/pablodz", "id": 28235457, "login": "pablodz", "node_id": "MDQ6VXNlcjI4MjM1NDU3", "organizations_url": "https://api.github.com/users/pablodz/orgs", "received_events_url": "https://api.github.com/users/pablodz/received_events", "repos_url": "https://api.github.com/users/pablodz/repos", "site_admin": false, "starred_url": "https://api.github.com/users/pablodz/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pablodz/subscriptions", "type": "User", "url": "https://api.github.com/users/pablodz", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Hi @ZurMaD!\r\nThanks for your interest in 🤗 `datasets`. Support for image datasets is at an early stage, with CIFAR-10 added in #1617 \r\nMNIST is also on the way: #1730 \r\n\r\nIf you feel like adding another image dataset, I would advise starting by reading the [ADD_NEW_DATASET.md](https://github.com/huggingface/datasets/blob/master/ADD_NEW_DATASET.md) guide. New datasets are always very much appreciated 🚀\r\n" ]
2021-01-12T20:22:41
2021-03-31T04:24:07
2021-03-31T04:24:07
NONE
null
null
null
null
I looked around this repository and looking the datasets I think that there's no support for images-datasets. Or am I missing something? For example to add a repo like this https://github.com/DZPeru/fish-datasets
{ "avatar_url": "https://avatars.githubusercontent.com/u/28235457?v=4", "events_url": "https://api.github.com/users/pablodz/events{/privacy}", "followers_url": "https://api.github.com/users/pablodz/followers", "following_url": "https://api.github.com/users/pablodz/following{/other_user}", "gists_url": "https://api.github.com/users/pablodz/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/pablodz", "id": 28235457, "login": "pablodz", "node_id": "MDQ6VXNlcjI4MjM1NDU3", "organizations_url": "https://api.github.com/users/pablodz/orgs", "received_events_url": "https://api.github.com/users/pablodz/received_events", "repos_url": "https://api.github.com/users/pablodz/repos", "site_admin": false, "starred_url": "https://api.github.com/users/pablodz/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pablodz/subscriptions", "type": "User", "url": "https://api.github.com/users/pablodz", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1729/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1729/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
77 days, 8:01:26
https://api.github.com/repos/huggingface/datasets/issues/1728
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1728/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1728/comments
https://api.github.com/repos/huggingface/datasets/issues/1728/events
https://github.com/huggingface/datasets/issues/1728
784,458,342
MDU6SXNzdWU3ODQ0NTgzNDI=
1,728
Add an entry to an arrow dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/18645407?v=4", "events_url": "https://api.github.com/users/ameet-1997/events{/privacy}", "followers_url": "https://api.github.com/users/ameet-1997/followers", "following_url": "https://api.github.com/users/ameet-1997/following{/other_user}", "gists_url": "https://api.github.com/users/ameet-1997/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ameet-1997", "id": 18645407, "login": "ameet-1997", "node_id": "MDQ6VXNlcjE4NjQ1NDA3", "organizations_url": "https://api.github.com/users/ameet-1997/orgs", "received_events_url": "https://api.github.com/users/ameet-1997/received_events", "repos_url": "https://api.github.com/users/ameet-1997/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ameet-1997/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ameet-1997/subscriptions", "type": "User", "url": "https://api.github.com/users/ameet-1997", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Hi @ameet-1997,\r\nI think what you are looking for is the `concatenate_datasets` function: https://huggingface.co/docs/datasets/processing.html?highlight=concatenate#concatenate-several-datasets\r\n\r\nFor your use case, I would use the [`map` method](https://huggingface.co/docs/datasets/processing.html?highlight=concatenate#processing-data-with-map) to transform the SQuAD sentences and the `concatenate` the original and mapped dataset.\r\n\r\nLet me know If this helps!", "That's a great idea! Thank you so much!\r\n\r\nWhen I try that solution, I get the following error when I try to concatenate `datasets` and `modified_dataset`. I have also attached the output I get when I print out those two variables. Am I missing something?\r\n\r\nCode:\r\n``` python\r\ncombined_dataset = concatenate_datasets([datasets, modified_dataset])\r\n```\r\n\r\nError:\r\n```\r\nAttributeError: 'DatasetDict' object has no attribute 'features'\r\n```\r\n\r\nOutput:\r\n```\r\n(Pdb) datasets\r\nDatasetDict({\r\n train: Dataset({\r\n features: ['attention_mask', 'input_ids', 'special_tokens_mask'],\r\n num_rows: 493\r\n })\r\n})\r\n(Pdb) modified_dataset\r\nDatasetDict({\r\n train: Dataset({\r\n features: ['attention_mask', 'input_ids', 'special_tokens_mask'],\r\n num_rows: 493\r\n })\r\n})\r\n```\r\n\r\nThe error is stemming from the fact that the attribute `datasets.features` does not exist. Would it not be possible to use `concatenate_datasets` in such a case? Is there an alternate solution?", "You should do `combined_dataset = concatenate_datasets([datasets['train'], modified_dataset['train']])`\r\n\r\nDidn't we talk about returning a Dataset instead of a DatasetDict with load_dataset and no split provided @lhoestq? Not sure it's the way to go but I'm wondering if it's not simpler for some use-cases.", "> Didn't we talk about returning a Dataset instead of a DatasetDict with load_dataset and no split provided @lhoestq? Not sure it's the way to go but I'm wondering if it's not simpler for some use-cases.\r\n\r\nMy opinion is that users should always know in advance what type of objects they're going to get. Otherwise the development workflow on their side is going to be pretty chaotic with sometimes unexpected behaviors.\r\nFor instance is `split=` is not specified it's currently always returning a DatasetDict. And if `split=\"train\"` is given for example it's always returning a Dataset.", "Thanks @thomwolf. Your solution worked!" ]
2021-01-12T18:01:47
2021-01-18T19:15:32
2021-01-18T19:15:32
NONE
null
null
null
null
Is it possible to add an entry to a dataset object? **Motivation: I want to transform the sentences in the dataset and add them to the original dataset** For example, say we have the following code: ``` python from datasets import load_dataset # Load a dataset and print the first examples in the training set squad_dataset = load_dataset('squad') print(squad_dataset['train'][0]) ``` Is it possible to add an entry to `squad_dataset`? Something like the following? ``` python squad_dataset.append({'text': "This is a new sentence"}) ``` The motivation for doing this is that I want to transform the sentences in the squad dataset and add them to the original dataset. If the above doesn't work, is there any other way of achieving the motivation mentioned above? Perhaps by creating a new arrow dataset by using the older one and the transformer sentences?
{ "avatar_url": "https://avatars.githubusercontent.com/u/18645407?v=4", "events_url": "https://api.github.com/users/ameet-1997/events{/privacy}", "followers_url": "https://api.github.com/users/ameet-1997/followers", "following_url": "https://api.github.com/users/ameet-1997/following{/other_user}", "gists_url": "https://api.github.com/users/ameet-1997/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ameet-1997", "id": 18645407, "login": "ameet-1997", "node_id": "MDQ6VXNlcjE4NjQ1NDA3", "organizations_url": "https://api.github.com/users/ameet-1997/orgs", "received_events_url": "https://api.github.com/users/ameet-1997/received_events", "repos_url": "https://api.github.com/users/ameet-1997/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ameet-1997/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ameet-1997/subscriptions", "type": "User", "url": "https://api.github.com/users/ameet-1997", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1728/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1728/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
6 days, 1:13:45
https://api.github.com/repos/huggingface/datasets/issues/1727
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1727/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1727/comments
https://api.github.com/repos/huggingface/datasets/issues/1727/events
https://github.com/huggingface/datasets/issues/1727
784,435,131
MDU6SXNzdWU3ODQ0MzUxMzE=
1,727
BLEURT score calculation raises UnrecognizedFlagError
{ "avatar_url": "https://avatars.githubusercontent.com/u/6603920?v=4", "events_url": "https://api.github.com/users/nadavo/events{/privacy}", "followers_url": "https://api.github.com/users/nadavo/followers", "following_url": "https://api.github.com/users/nadavo/following{/other_user}", "gists_url": "https://api.github.com/users/nadavo/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/nadavo", "id": 6603920, "login": "nadavo", "node_id": "MDQ6VXNlcjY2MDM5MjA=", "organizations_url": "https://api.github.com/users/nadavo/orgs", "received_events_url": "https://api.github.com/users/nadavo/received_events", "repos_url": "https://api.github.com/users/nadavo/repos", "site_admin": false, "starred_url": "https://api.github.com/users/nadavo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/nadavo/subscriptions", "type": "User", "url": "https://api.github.com/users/nadavo", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Upgrading tensorflow to version 2.4.0 solved the issue.", "I still have the same error even with TF 2.4.0.", "And I have the same error with TF 2.4.1. I believe this issue should be reopened. Any ideas?!", "I'm seeing the same issue with TF 2.4.1 when running the following in https://colab.research.google.com/github/huggingface/datasets/blob/master/notebooks/Overview.ipynb:\r\n```\r\n!pip install git+https://github.com/google-research/bleurt.git\r\nreferences = [\"foo bar baz\", \"one two three\"]\r\nbleurt_metric = load_metric('bleurt')\r\npredictions = [\"foo bar\", \"four five six\"]\r\nbleurt_metric.compute(predictions=predictions, references=references)\r\n```", "@aleSuglia @oscartackstrom - Are you getting the error when running your code in a Jupyter notebook ?\r\n\r\nI tried reproducing this error again, and was unable to do so from the python command line console in a virtual environment similar to the one I originally used (and unfortunately no longer have access to) when I first got the error. \r\nHowever, I've managed to reproduce the error by running the same code in a Jupyter notebook running a kernel from the same virtual environment.\r\nThis made me suspect that the problem is somehow related to the Jupyter notebook.\r\n\r\nMore environment details:\r\n```\r\nOS: Ubuntu Linux 18.04\r\nconda==4.8.3\r\npython==3.8.5\r\ndatasets==1.3.0\r\ntensorflow==2.4.0\r\nBLEURT==0.0.1\r\nnotebook==6.2.0\r\n```", "This happens when running the notebook on colab. The issue seems to be that colab populates sys.argv with arguments not handled by bleurt.\r\n\r\nRunning this before calling bleurt fixes it:\r\n```\r\nimport sys\r\nsys.argv = sys.argv[:1]\r\n```\r\n\r\nNot the most elegant solution. Perhaps it needs to be fixed in the bleurt code itself rather than huggingface?\r\n\r\nThis is the output of `print(sys.argv)` when running on colab:\r\n```\r\n['/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py', '-f', '/root/.local/share/jupyter/runtime/kernel-a857a78c-44d6-4b9d-b18a-030b858ee327.json']\r\n```", "I got the error when running it from the command line. It looks more like an error that should be fixed in the BLEURT codebase.", "Seems to be a known issue in the bleurt codebase: https://github.com/google-research/bleurt/issues/24.", "Hi, the problem should be solved now.", "Hi @tsellam! I can verify that the issue is indeed fixed now. Thanks!" ]
2021-01-12T17:27:02
2022-06-01T16:06:02
2022-06-01T16:06:02
NONE
null
null
null
null
Calling the `compute` method for **bleurt** metric fails with an `UnrecognizedFlagError` for `FLAGS.bleurt_batch_size`. My environment: ``` python==3.8.5 datasets==1.2.0 tensorflow==2.3.1 cudatoolkit==11.0.221 ``` Test code for reproducing the error: ``` from datasets import load_metric bleurt = load_metric('bleurt') gen_text = "I am walking on the promenade today" ref_text = "I am walking along the promenade on this sunny day" bleurt.compute(predictions=[test_text], references=[test_text]) ``` Error Output: ``` Using default BLEURT-Base checkpoint for sequence maximum length 128. You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512'). INFO:tensorflow:Reading checkpoint /home/ubuntu/.cache/huggingface/metrics/bleurt/default/downloads/extracted/9aee35580225730ac5422599f35c4986e4c49cafd08082123342b1019720dac4/bleurt-base-128. INFO:tensorflow:Config file found, reading. INFO:tensorflow:Will load checkpoint bert_custom INFO:tensorflow:Performs basic checks... INFO:tensorflow:... name:bert_custom INFO:tensorflow:... vocab_file:vocab.txt INFO:tensorflow:... bert_config_file:bert_config.json INFO:tensorflow:... do_lower_case:True INFO:tensorflow:... max_seq_length:128 INFO:tensorflow:Creating BLEURT scorer. INFO:tensorflow:Loading model... INFO:tensorflow:BLEURT initialized. --------------------------------------------------------------------------- UnrecognizedFlagError Traceback (most recent call last) <ipython-input-12-8b3f4322318a> in <module> 2 gen_text = "I am walking on the promenade today" 3 ref_text = "I am walking along the promenade on this sunny day" ----> 4 bleurt.compute(predictions=[gen_text], references=[ref_text]) ~/anaconda3/envs/noved/lib/python3.8/site-packages/datasets/metric.py in compute(self, *args, **kwargs) 396 references = self.data["references"] 397 with temp_seed(self.seed): --> 398 output = self._compute(predictions=predictions, references=references, **kwargs) 399 400 if self.buf_writer is not None: ~/.cache/huggingface/modules/datasets_modules/metrics/bleurt/b1de33e1cbbcb1dbe276c887efa1fad68c6aff913885108078fa1ad408908778/bleurt.py in _compute(self, predictions, references) 103 104 def _compute(self, predictions, references): --> 105 scores = self.scorer.score(references=references, candidates=predictions) 106 return {"scores": scores} ~/anaconda3/envs/noved/lib/python3.8/site-packages/bleurt/score.py in score(self, references, candidates, batch_size) 164 """ 165 if not batch_size: --> 166 batch_size = FLAGS.bleurt_batch_size 167 168 candidates, references = list(candidates), list(references) ~/anaconda3/envs/noved/lib/python3.8/site-packages/tensorflow/python/platform/flags.py in __getattr__(self, name) 83 # a flag. 84 if not wrapped.is_parsed(): ---> 85 wrapped(_sys.argv) 86 return wrapped.__getattr__(name) 87 ~/anaconda3/envs/noved/lib/python3.8/site-packages/absl/flags/_flagvalues.py in __call__(self, argv, known_only) 643 for name, value in unknown_flags: 644 suggestions = _helpers.get_flag_suggestions(name, list(self)) --> 645 raise _exceptions.UnrecognizedFlagError( 646 name, value, suggestions=suggestions) 647 UnrecognizedFlagError: Unknown command line flag 'f' ``` Possible Fix: Modify `_compute` method https://github.com/huggingface/datasets/blob/7e64851a12263dc74d41c668167918484c8000ab/metrics/bleurt/bleurt.py#L104 to receive a `batch_size` argument, for example: ``` def _compute(self, predictions, references, batch_size=1): scores = self.scorer.score(references=references, candidates=predictions, batch_size=batch_size) return {"scores": scores} ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1727/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1727/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
504 days, 22:39:00
https://api.github.com/repos/huggingface/datasets/issues/1725
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1725/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1725/comments
https://api.github.com/repos/huggingface/datasets/issues/1725/events
https://github.com/huggingface/datasets/issues/1725
784,182,273
MDU6SXNzdWU3ODQxODIyNzM=
1,725
load the local dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/41193842?v=4", "events_url": "https://api.github.com/users/xinjicong/events{/privacy}", "followers_url": "https://api.github.com/users/xinjicong/followers", "following_url": "https://api.github.com/users/xinjicong/following{/other_user}", "gists_url": "https://api.github.com/users/xinjicong/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/xinjicong", "id": 41193842, "login": "xinjicong", "node_id": "MDQ6VXNlcjQxMTkzODQy", "organizations_url": "https://api.github.com/users/xinjicong/orgs", "received_events_url": "https://api.github.com/users/xinjicong/received_events", "repos_url": "https://api.github.com/users/xinjicong/repos", "site_admin": false, "starred_url": "https://api.github.com/users/xinjicong/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/xinjicong/subscriptions", "type": "User", "url": "https://api.github.com/users/xinjicong", "user_view_type": "public" }
[]
closed
false
null
[]
[ "You should rephrase your question or give more examples and details on what you want to do.\r\n\r\nit’s not possible to understand it and help you with only this information.", "sorry for that.\r\ni want to know how could i load the train set and the test set from the local ,which api or function should i use .\r\n", "Did you try to follow the instructions in the documentation?\r\nHere: https://huggingface.co/docs/datasets/loading_datasets.html#from-local-files", "thanks a lot \r\ni find that the problem is i dont use vpn...\r\nso i have to keep my net work even if i want to load the local data ?", "We will solve this soon (cf #1724)", "thanks a lot", "Hi! `json` is a packaged dataset now, which means its script comes with the library and doesn't require an internet connection." ]
2021-01-12T12:12:55
2022-06-01T16:00:59
2022-06-01T16:00:59
NONE
null
null
null
null
your guidebook's example is like >>>from datasets import load_dataset >>> dataset = load_dataset('json', data_files='my_file.json') but the first arg is path... so how should i do if i want to load the local dataset for model training? i will be grateful if you can help me handle this problem! thanks a lot!
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1725/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1725/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
505 days, 3:48:04
https://api.github.com/repos/huggingface/datasets/issues/1724
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1724/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1724/comments
https://api.github.com/repos/huggingface/datasets/issues/1724/events
https://github.com/huggingface/datasets/issues/1724
784,023,338
MDU6SXNzdWU3ODQwMjMzMzg=
1,724
could not run models on a offline server successfully
{ "avatar_url": "https://avatars.githubusercontent.com/u/49967236?v=4", "events_url": "https://api.github.com/users/lkcao/events{/privacy}", "followers_url": "https://api.github.com/users/lkcao/followers", "following_url": "https://api.github.com/users/lkcao/following{/other_user}", "gists_url": "https://api.github.com/users/lkcao/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lkcao", "id": 49967236, "login": "lkcao", "node_id": "MDQ6VXNlcjQ5OTY3MjM2", "organizations_url": "https://api.github.com/users/lkcao/orgs", "received_events_url": "https://api.github.com/users/lkcao/received_events", "repos_url": "https://api.github.com/users/lkcao/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lkcao/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lkcao/subscriptions", "type": "User", "url": "https://api.github.com/users/lkcao", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Transferred to `datasets` based on the stack trace.", "Hi @lkcao !\r\nYour issue is indeed related to `datasets`. In addition to installing the package manually, you will need to download the `text.py` script on your server. You'll find it (under `datasets/datasets/text`: https://github.com/huggingface/datasets/blob/master/datasets/text/text.py.\r\nThen you can change the line 221 of `run_mlm_new.py` into:\r\n```python\r\n datasets = load_dataset('/path/to/text.py', data_files=data_files)\r\n```\r\nWhere `/path/to/text.py` is the path on the server where you saved the `text.py` script.", "We're working on including the local dataset builders (csv, text, json etc.) directly in the `datasets` package so that they can be used offline", "The local dataset builders (csv, text , json and pandas) are now part of the `datasets` package since #1726 :)\r\nYou can now use them offline\r\n```python\r\ndatasets = load_dataset('text', data_files=data_files)\r\n```\r\n\r\nWe'll do a new release soon", "> The local dataset builders (csv, text , json and pandas) are now part of the `datasets` package since #1726 :)\r\n> You can now use them offline\r\n> \r\n> ```python\r\n> datasets = load_dataset('text', data_files=data_files)\r\n> ```\r\n> \r\n> We'll do a new release soon\r\n\r\nso the new version release now?", "Yes it's been available since datasets 1.3.0 !" ]
2021-01-12T06:08:06
2022-10-05T12:39:07
2022-10-05T12:39:07
NONE
null
null
null
null
Hi, I really need your help about this. I am trying to fine-tuning a RoBERTa on a remote server, which is strictly banning internet. I try to install all the packages by hand and try to run run_mlm.py on the server. It works well on colab, but when I try to run it on this offline server, it shows: ![image](https://user-images.githubusercontent.com/49967236/104276256-25a88600-546a-11eb-9776-8ec695dfa24e.png) is there anything I can do? Is it possible to download all the things in cache and upload it to the server? Please help me out...
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 1, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/1724/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1724/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
631 days, 6:31:01
https://api.github.com/repos/huggingface/datasets/issues/1718
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1718/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1718/comments
https://api.github.com/repos/huggingface/datasets/issues/1718/events
https://github.com/huggingface/datasets/issues/1718
783,474,753
MDU6SXNzdWU3ODM0NzQ3NTM=
1,718
Possible cache miss in datasets
{ "avatar_url": "https://avatars.githubusercontent.com/u/18296312?v=4", "events_url": "https://api.github.com/users/ofirzaf/events{/privacy}", "followers_url": "https://api.github.com/users/ofirzaf/followers", "following_url": "https://api.github.com/users/ofirzaf/following{/other_user}", "gists_url": "https://api.github.com/users/ofirzaf/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ofirzaf", "id": 18296312, "login": "ofirzaf", "node_id": "MDQ6VXNlcjE4Mjk2MzEy", "organizations_url": "https://api.github.com/users/ofirzaf/orgs", "received_events_url": "https://api.github.com/users/ofirzaf/received_events", "repos_url": "https://api.github.com/users/ofirzaf/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ofirzaf/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ofirzaf/subscriptions", "type": "User", "url": "https://api.github.com/users/ofirzaf", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Thanks for reporting !\r\nI was able to reproduce thanks to your code and find the origin of the bug.\r\nThe cache was not reusing the same file because one object was not deterministic. It comes from a conversion from `set` to `list` in the `datasets.arrrow_dataset.transmit_format` function, where the resulting list would not always be in the same order and therefore the function that computes the hash used by the cache would not always return the same result.\r\nI'm opening a PR to fix this.\r\n\r\nAlso we plan to do a new release in the coming days so you can expect the fix to be available soon.\r\nNote that you can still specify `cache_file_name=` in the second `map()` call to name the cache file yourself if you want to.", "Thanks for the fast reply, waiting for the fix :)\r\n\r\nI tried to use `cache_file_names` and wasn't sure how, I tried to give it the following:\r\n```\r\ntokenized_datasets = tokenized_datasets.map(\r\n group_texts,\r\n batched=True,\r\n num_proc=60,\r\n load_from_cache_file=True,\r\n cache_file_names={k: f'.cache/{str(k)}' for k in tokenized_datasets}\r\n)\r\n```\r\n\r\nand got an error:\r\n```\r\nmultiprocess.pool.RemoteTraceback:\r\n\"\"\"\r\nTraceback (most recent call last):\r\n File \"/venv/lib/python3.6/site-packages/multiprocess/pool.py\", line 119, in worker\r\n result = (True, func(*args, **kwds))\r\n File \"/venv/lib/python3.6/site-packages/datasets/arrow_dataset.py\", line 157, in wrapper\r\n out: Union[\"Dataset\", \"DatasetDict\"] = func(self, *args, **kwargs)\r\n File \"/venv/lib/python3.6/site-packages/datasets/fingerprint.py\", line 163, in wrapper\r\n out = func(self, *args, **kwargs)\r\n File \"/venv/lib/python3.6/site-packages/datasets/arrow_dataset.py\", line 1491, in _map_single\r\n tmp_file = tempfile.NamedTemporaryFile(\"wb\", dir=os.path.dirname(cache_file_name), delete=False)\r\n File \"/usr/lib/python3.6/tempfile.py\", line 690, in NamedTemporaryFile\r\n (fd, name) = _mkstemp_inner(dir, prefix, suffix, flags, output_type)\r\n File \"/usr/lib/python3.6/tempfile.py\", line 401, in _mkstemp_inner\r\n fd = _os.open(file, flags, 0o600)\r\nFileNotFoundError: [Errno 2] No such file or directory: '_00000_of_00060.cache/tmpsvszxtop'\r\n\"\"\"\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"test.py\", line 48, in <module>\r\n cache_file_names={k: f'.cache/{str(k)}' for k in tokenized_datasets}\r\n File \"/venv/lib/python3.6/site-packages/datasets/dataset_dict.py\", line 303, in map\r\n for k, dataset in self.items()\r\n File \"/venv/lib/python3.6/site-packages/datasets/dataset_dict.py\", line 303, in <dictcomp>\r\n for k, dataset in self.items()\r\n File \"/venv/lib/python3.6/site-packages/datasets/arrow_dataset.py\", line 1317, in map\r\n transformed_shards = [r.get() for r in results]\r\n File \"/venv/lib/python3.6/site-packages/datasets/arrow_dataset.py\", line 1317, in <listcomp>\r\n transformed_shards = [r.get() for r in results]\r\n File \"/venv/lib/python3.6/site-packages/multiprocess/pool.py\", line 644, in get\r\n raise self._value\r\nFileNotFoundError: [Errno 2] No such file or directory: '_00000_of_00060.cache/tmpsvszxtop'\r\n```\r\n", "The documentation says\r\n```\r\ncache_file_names (`Optional[Dict[str, str]]`, defaults to `None`): Provide the name of a cache file to use to store the\r\n results of the computation instead of the automatically generated cache file name.\r\n You have to provide one :obj:`cache_file_name` per dataset in the dataset dictionary.\r\n```\r\nWhat is expected is simply the name of a file, not a path. The file will be located in the cache directory of the `wikitext` dataset. You can try again with something like\r\n```python\r\ncache_file_names = {k: f'tokenized_and_grouped_{str(k)}' for k in tokenized_datasets}\r\n```", "Managed to get `cache_file_names` working and caching works well with it\r\nHad to make a small modification for it to work:\r\n```\r\ncache_file_names = {k: f'tokenized_and_grouped_{str(k)}.arrow' for k in tokenized_datasets}\r\n```", "Another comment on `cache_file_names`, it doesn't save the produced cached files in the dataset's cache folder, it requires to give a path to an existing directory for it to work.\r\nI can confirm that this is how it works in `datasets==1.1.3`", "Oh yes indeed ! Maybe we need to update the docstring to mention that it is a path", "I fixed the docstring. Hopefully this is less confusing now: https://github.com/huggingface/datasets/commit/42ccc0012ba8864e6db1392430100f350236183a", "I upgraded to the latest version and I encountered some strange behaviour, the script I posted in the OP doesn't trigger recalculation, however, if I add the following change it does trigger partial recalculation, I am not sure if its something wrong on my machine or a bug:\r\n```\r\nfrom datasets import load_dataset\r\nfrom transformers import AutoTokenizer\r\n\r\ndatasets = load_dataset('wikitext', 'wikitext-103-raw-v1')\r\ntokenizer = AutoTokenizer.from_pretrained('bert-base-uncased', use_fast=True)\r\n\r\ncolumn_names = datasets[\"train\"].column_names\r\ntext_column_name = \"text\" if \"text\" in column_names else column_names[0]\r\ndef tokenize_function(examples):\r\n return tokenizer(examples[text_column_name], return_special_tokens_mask=True)\r\n# CHANGE\r\nprint('hello')\r\n# CHANGE\r\n\r\ntokenized_datasets = datasets.map(\r\n tokenize_function,\r\n batched=True,\r\n...\r\n```\r\nI am using datasets in the `run_mlm.py` script in the transformers examples and I found that if I change the script without touching any of the preprocessing. it still triggers recalculation which is very weird\r\n\r\nEdit: accidently clicked the close issue button ", "This is because the `group_texts` line definition changes (it is defined 3 lines later than in the previous call). Currently if a function is moved elsewhere in a script we consider it to be different.\r\n\r\nNot sure this is actually a good idea to keep this behavior though. We had this as a security in the early development of the lib but now the recursive hashing of objects is robust so we can probably remove that.\r\nMoreover we're already ignoring the line definition for lambda functions.", "I opened a PR to change this, let me know what you think.", "Sounds great, thank you for your quick responses and help! Looking forward for the next release.", "I am having a similar issue where only the grouped files are loaded from cache while the tokenized ones aren't. I can confirm both datasets are being stored to file, but only the grouped version is loaded from cache. Not sure what might be going on. But I've tried to remove all kinds of non deterministic behaviour, but still no luck. Thanks for the help!\r\n\r\n\r\n```python\r\n # Datasets\r\n train = sorted(glob(args.data_dir + '*.{}'.format(args.ext)))\r\n if args.dev_split >= len(train):\r\n raise ValueError(\"Not enough dev files\")\r\n dev = []\r\n state = random.Random(1001)\r\n for _ in range(args.dev_split):\r\n dev.append(train.pop(state.randint(0, len(train) - 1)))\r\n\r\n max_seq_length = min(args.max_seq_length, tokenizer.model_max_length)\r\n\r\n def tokenize_function(examples):\r\n return tokenizer(examples['text'], return_special_tokens_mask=True)\r\n\r\n def group_texts(examples):\r\n # Concatenate all texts from our dataset and generate chunks of max_seq_length\r\n concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}\r\n total_length = len(concatenated_examples[list(examples.keys())[0]])\r\n # Truncate (not implementing padding)\r\n total_length = (total_length // max_seq_length) * max_seq_length\r\n # Split by chunks of max_seq_length\r\n result = {\r\n k: [t[i : i + max_seq_length] for i in range(0, total_length, max_seq_length)]\r\n for k, t in concatenated_examples.items()\r\n }\r\n return result\r\n\r\n datasets = load_dataset(\r\n 'text', name='DBNL', data_files={'train': train[:10], 'dev': dev[:5]}, \r\n cache_dir=args.data_cache_dir)\r\n datasets = datasets.map(tokenize_function, \r\n batched=True, remove_columns=['text'], \r\n cache_file_names={k: os.path.join(args.data_cache_dir, f'{k}-tokenized') for k in datasets},\r\n load_from_cache_file=not args.overwrite_cache)\r\n datasets = datasets.map(group_texts, \r\n batched=True,\r\n cache_file_names={k: os.path.join(args.data_cache_dir, f'{k}-grouped') for k in datasets},\r\n load_from_cache_file=not args.overwrite_cache)\r\n```\r\n\r\nAnd this is the log\r\n\r\n```\r\n04/26/2021 10:26:59 - WARNING - datasets.builder - Using custom data configuration DBNL-f8d988ad33ccf2c1\r\n04/26/2021 10:26:59 - WARNING - datasets.builder - Reusing dataset text (/home/manjavacasema/data/.cache/text/DBNL-f8d988ad33ccf2c1/0.0.0/e16f44aa1b321ece1f87b07977cc5d70be93d69b20486d6dacd62e12cf25c9a5)\r\n100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 13/13 [00:00<00:00, 21.07ba/s]\r\n100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 40/40 [00:01<00:00, 24.28ba/s]\r\n04/26/2021 10:27:01 - WARNING - datasets.arrow_dataset - Loading cached processed dataset at /home/manjavacasema/data/.cache/train-grouped\r\n04/26/2021 10:27:01 - WARNING - datasets.arrow_dataset - Loading cached processed dataset at /home/manjavacasema/data/.cache/dev-grouped\r\n```\r\n", "Hi ! What tokenizer are you using ?", "It's the ByteLevelBPETokenizer", "This error happened to me too, when I tried to supply my own fingerprint to `map()` via the `new_fingerprint` arg.\r\n\r\nEdit: realized it was because my path was weird and had colons and brackets and slashes in it, since one of the variable values I included in the fingerprint was a dataset split like \"train[:10%]\". I fixed it with [this solution](https://stackoverflow.com/a/13593932/2287177) from StackOverflow to just remove those invalid characters from the fingerprint.", "Good catch @jxmorris12, maybe we should do additional checks on the valid characters for fingerprints ! Would you like to contribute this ?\r\n\r\nI think this can be added here, when we set the fingerprint(s) that are passed `map`:\r\n\r\nhttps://github.com/huggingface/datasets/blob/25bb7c9cbf519fbbf9abf3898083b529e7762705/src/datasets/fingerprint.py#L449-L454\r\n\r\nmaybe something like\r\n```python\r\nif kwargs.get(fingerprint_name) is None:\r\n ...\r\nelse:\r\n # In this case, it's the user who specified the fingerprint manually:\r\n # we need to make sure it's a valid hash\r\n validate_fingerprint(kwargs[fingerprint_name])\r\n```\r\n\r\nOtherwise I can open a PR later", "I opened a PR here to add the fingerprint validation: https://github.com/huggingface/datasets/pull/4587\r\n\r\nEDIT: merged :)", "thank you!" ]
2021-01-11T15:37:31
2022-06-29T14:54:42
2021-01-26T02:47:59
NONE
null
null
null
null
Hi, I am using the datasets package and even though I run the same data processing functions, datasets always recomputes the function instead of using cache. I have attached an example script that for me reproduces the problem. In the attached example the second map function always recomputes instead of loading from cache. Is this a bug or am I doing something wrong? Is there a way for fix this and avoid all the recomputation? Thanks Edit: transformers==3.5.1 datasets==1.2.0 ``` from datasets import load_dataset from transformers import AutoTokenizer datasets = load_dataset('wikitext', 'wikitext-103-raw-v1') tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased', use_fast=True) column_names = datasets["train"].column_names text_column_name = "text" if "text" in column_names else column_names[0] def tokenize_function(examples): return tokenizer(examples[text_column_name], return_special_tokens_mask=True) tokenized_datasets = datasets.map( tokenize_function, batched=True, num_proc=60, remove_columns=[text_column_name], load_from_cache_file=True, ) max_seq_length = tokenizer.model_max_length def group_texts(examples): # Concatenate all texts. concatenated_examples = { k: sum(examples[k], []) for k in examples.keys()} total_length = len(concatenated_examples[list(examples.keys())[0]]) # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can # customize this part to your needs. total_length = (total_length // max_seq_length) * max_seq_length # Split by chunks of max_len. result = { k: [t[i: i + max_seq_length] for i in range(0, total_length, max_seq_length)] for k, t in concatenated_examples.items() } return result tokenized_datasets = tokenized_datasets.map( group_texts, batched=True, num_proc=60, load_from_cache_file=True, ) print(tokenized_datasets) print('finished') ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/18296312?v=4", "events_url": "https://api.github.com/users/ofirzaf/events{/privacy}", "followers_url": "https://api.github.com/users/ofirzaf/followers", "following_url": "https://api.github.com/users/ofirzaf/following{/other_user}", "gists_url": "https://api.github.com/users/ofirzaf/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ofirzaf", "id": 18296312, "login": "ofirzaf", "node_id": "MDQ6VXNlcjE4Mjk2MzEy", "organizations_url": "https://api.github.com/users/ofirzaf/orgs", "received_events_url": "https://api.github.com/users/ofirzaf/received_events", "repos_url": "https://api.github.com/users/ofirzaf/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ofirzaf/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ofirzaf/subscriptions", "type": "User", "url": "https://api.github.com/users/ofirzaf", "user_view_type": "public" }
{ "+1": 2, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 2, "url": "https://api.github.com/repos/huggingface/datasets/issues/1718/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1718/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
14 days, 11:10:28
https://api.github.com/repos/huggingface/datasets/issues/1717
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1717/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1717/comments
https://api.github.com/repos/huggingface/datasets/issues/1717/events
https://github.com/huggingface/datasets/issues/1717
783,074,255
MDU6SXNzdWU3ODMwNzQyNTU=
1,717
SciFact dataset - minor changes
{ "avatar_url": "https://avatars.githubusercontent.com/u/3091916?v=4", "events_url": "https://api.github.com/users/dwadden/events{/privacy}", "followers_url": "https://api.github.com/users/dwadden/followers", "following_url": "https://api.github.com/users/dwadden/following{/other_user}", "gists_url": "https://api.github.com/users/dwadden/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/dwadden", "id": 3091916, "login": "dwadden", "node_id": "MDQ6VXNlcjMwOTE5MTY=", "organizations_url": "https://api.github.com/users/dwadden/orgs", "received_events_url": "https://api.github.com/users/dwadden/received_events", "repos_url": "https://api.github.com/users/dwadden/repos", "site_admin": false, "starred_url": "https://api.github.com/users/dwadden/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dwadden/subscriptions", "type": "User", "url": "https://api.github.com/users/dwadden", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Hi Dave,\r\nYou are more than welcome to open a PR to make these changes! 🤗\r\nYou will find the relevant information about opening a PR in the [contributing guide](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md) and in the [dataset addition guide](https://github.com/huggingface/datasets/blob/master/ADD_NEW_DATASET.md).\r\n\r\nPinging also @lhoestq for the Google cloud matter.", "> I'd like to make a few minor changes, including the citation information and the `_URL` from which to download the dataset. Can I submit a PR for this?\r\n\r\nSure ! Also feel free to ping us for reviews or if we can help :)\r\n\r\n> It also looks like the dataset is being downloaded directly from Huggingface's Google cloud account rather than via the `_URL` in [scifact.py](https://github.com/huggingface/datasets/blob/master/datasets/scifact/scifact.py). Can you help me update the version on gcloud?\r\n\r\nWhat makes you think that ?\r\nAfaik there's no scifact on our google storage\r\n", "\r\n\r\n> > I'd like to make a few minor changes, including the citation information and the `_URL` from which to download the dataset. Can I submit a PR for this?\r\n> \r\n> Sure ! Also feel free to ping us for reviews or if we can help :)\r\n> \r\nOK! We're organizing a [shared task](https://sdproc.org/2021/sharedtasks.html#sciver) based on the dataset, and I made some updates and changed the download URL - so the current code points to a dead URL. I'll update appropriately once the task is finalized and make a PR.\r\n\r\n> > It also looks like the dataset is being downloaded directly from Huggingface's Google cloud account rather than via the `_URL` in [scifact.py](https://github.com/huggingface/datasets/blob/master/datasets/scifact/scifact.py). Can you help me update the version on gcloud?\r\n> \r\n> What makes you think that ?\r\n> Afaik there's no scifact on our google storage\r\n\r\nYou're right, I had the data cached on my machine somewhere. \r\n\r\n", "I opened a PR about this: https://github.com/huggingface/datasets/pull/1780. Closing this issue, will continue there." ]
2021-01-11T05:26:40
2021-01-26T02:52:17
2021-01-26T02:52:17
CONTRIBUTOR
null
null
null
null
Hi, SciFact dataset creator here. First of all, thanks for adding the dataset to Huggingface, much appreciated! I'd like to make a few minor changes, including the citation information and the `_URL` from which to download the dataset. Can I submit a PR for this? It also looks like the dataset is being downloaded directly from Huggingface's Google cloud account rather than via the `_URL` in [scifact.py](https://github.com/huggingface/datasets/blob/master/datasets/scifact/scifact.py). Can you help me update the version on gcloud? Thanks, Dave
{ "avatar_url": "https://avatars.githubusercontent.com/u/3091916?v=4", "events_url": "https://api.github.com/users/dwadden/events{/privacy}", "followers_url": "https://api.github.com/users/dwadden/followers", "following_url": "https://api.github.com/users/dwadden/following{/other_user}", "gists_url": "https://api.github.com/users/dwadden/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/dwadden", "id": 3091916, "login": "dwadden", "node_id": "MDQ6VXNlcjMwOTE5MTY=", "organizations_url": "https://api.github.com/users/dwadden/orgs", "received_events_url": "https://api.github.com/users/dwadden/received_events", "repos_url": "https://api.github.com/users/dwadden/repos", "site_admin": false, "starred_url": "https://api.github.com/users/dwadden/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dwadden/subscriptions", "type": "User", "url": "https://api.github.com/users/dwadden", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1717/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1717/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
14 days, 21:25:37
https://api.github.com/repos/huggingface/datasets/issues/1713
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1713/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1713/comments
https://api.github.com/repos/huggingface/datasets/issues/1713/events
https://github.com/huggingface/datasets/issues/1713
782,337,723
MDU6SXNzdWU3ODIzMzc3MjM=
1,713
Installation using conda
{ "avatar_url": "https://avatars.githubusercontent.com/u/9393002?v=4", "events_url": "https://api.github.com/users/pranav-s/events{/privacy}", "followers_url": "https://api.github.com/users/pranav-s/followers", "following_url": "https://api.github.com/users/pranav-s/following{/other_user}", "gists_url": "https://api.github.com/users/pranav-s/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/pranav-s", "id": 9393002, "login": "pranav-s", "node_id": "MDQ6VXNlcjkzOTMwMDI=", "organizations_url": "https://api.github.com/users/pranav-s/orgs", "received_events_url": "https://api.github.com/users/pranav-s/received_events", "repos_url": "https://api.github.com/users/pranav-s/repos", "site_admin": false, "starred_url": "https://api.github.com/users/pranav-s/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pranav-s/subscriptions", "type": "User", "url": "https://api.github.com/users/pranav-s", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Yes indeed the idea is to have the next release on conda cc @LysandreJik ", "Great! Did you guys have a timeframe in mind for the next release?\r\n\r\nThank you for all the great work in developing this library.", "I think we can have `datasets` on conda by next week. Will see what I can do!", "Thank you. Looking forward to it.", "`datasets` has been added to the huggingface channel thanks to @LysandreJik :)\r\nIt depends on conda-forge though\r\n\r\n```\r\nconda install -c huggingface -c conda-forge datasets\r\n```" ]
2021-01-08T19:12:15
2021-09-17T12:47:40
2021-09-17T12:47:40
NONE
null
null
null
null
Will a conda package for installing datasets be added to the huggingface conda channel? I have installed transformers using conda and would like to use the datasets library to use some of the scripts in the transformers/examples folder but am unable to do so at the moment as datasets can only be installed using pip and using pip in a conda environment is generally a bad idea in my experience.
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1713/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1713/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
251 days, 17:35:25
https://api.github.com/repos/huggingface/datasets/issues/1710
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1710/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1710/comments
https://api.github.com/repos/huggingface/datasets/issues/1710/events
https://github.com/huggingface/datasets/issues/1710
781,914,951
MDU6SXNzdWU3ODE5MTQ5NTE=
1,710
IsADirectoryError when trying to download C4
{ "avatar_url": "https://avatars.githubusercontent.com/u/5771366?v=4", "events_url": "https://api.github.com/users/fredriko/events{/privacy}", "followers_url": "https://api.github.com/users/fredriko/followers", "following_url": "https://api.github.com/users/fredriko/following{/other_user}", "gists_url": "https://api.github.com/users/fredriko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/fredriko", "id": 5771366, "login": "fredriko", "node_id": "MDQ6VXNlcjU3NzEzNjY=", "organizations_url": "https://api.github.com/users/fredriko/orgs", "received_events_url": "https://api.github.com/users/fredriko/received_events", "repos_url": "https://api.github.com/users/fredriko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/fredriko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/fredriko/subscriptions", "type": "User", "url": "https://api.github.com/users/fredriko", "user_view_type": "public" }
[]
closed
false
null
[]
[ "I haven't tested C4 on my side so there so there may be a few bugs in the code/adjustments to make.\r\nHere it looks like in c4.py, line 190 one of the `files_to_download` is `'/'` which is invalid.\r\nValid files are paths to local files or URLs to remote files.", "Fixed once processed data is used instead:\r\n- #2575" ]
2021-01-08T07:31:30
2022-08-04T11:56:10
2022-08-04T11:55:04
NONE
null
null
null
null
**TLDR**: I fail to download C4 and see a stacktrace originating in `IsADirectoryError` as an explanation for failure. How can the problem be fixed? **VERBOSE**: I use Python version 3.7 and have the following dependencies listed in my project: ``` datasets==1.2.0 apache-beam==2.26.0 ``` When running the following code, where `/data/huggingface/unpacked/` contains a single unzipped `wet.paths` file manually downloaded as per the instructions for C4: ``` from datasets import load_dataset load_dataset("c4", "en", data_dir="/data/huggingface/unpacked", beam_runner='DirectRunner') ``` I get the following stacktrace: ``` /Users/fredriko/venv/misc/bin/python /Users/fredriko/source/misc/main.py Downloading and preparing dataset c4/en (download: Unknown size, generated: Unknown size, post-processed: Unknown size, total: Unknown size) to /Users/fredriko/.cache/huggingface/datasets/c4/en/2.3.0/8304cf264cc42bdebcb13fca4b9cb36368a96f557d36f9dc969bebbe2568b283... Traceback (most recent call last): File "/Users/fredriko/source/misc/main.py", line 3, in <module> load_dataset("c4", "en", data_dir="/data/huggingface/unpacked", beam_runner='DirectRunner') File "/Users/fredriko/venv/misc/lib/python3.7/site-packages/datasets/load.py", line 612, in load_dataset ignore_verifications=ignore_verifications, File "/Users/fredriko/venv/misc/lib/python3.7/site-packages/datasets/builder.py", line 527, in download_and_prepare dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs File "/Users/fredriko/venv/misc/lib/python3.7/site-packages/datasets/builder.py", line 1066, in _download_and_prepare pipeline=pipeline, File "/Users/fredriko/venv/misc/lib/python3.7/site-packages/datasets/builder.py", line 582, in _download_and_prepare split_generators = self._split_generators(dl_manager, **split_generators_kwargs) File "/Users/fredriko/.cache/huggingface/modules/datasets_modules/datasets/c4/8304cf264cc42bdebcb13fca4b9cb36368a96f557d36f9dc969bebbe2568b283/c4.py", line 190, in _split_generators file_paths = dl_manager.download_and_extract(files_to_download) File "/Users/fredriko/venv/misc/lib/python3.7/site-packages/datasets/utils/download_manager.py", line 258, in download_and_extract return self.extract(self.download(url_or_urls)) File "/Users/fredriko/venv/misc/lib/python3.7/site-packages/datasets/utils/download_manager.py", line 189, in download self._record_sizes_checksums(url_or_urls, downloaded_path_or_paths) File "/Users/fredriko/venv/misc/lib/python3.7/site-packages/datasets/utils/download_manager.py", line 117, in _record_sizes_checksums self._recorded_sizes_checksums[str(url)] = get_size_checksum_dict(path) File "/Users/fredriko/venv/misc/lib/python3.7/site-packages/datasets/utils/info_utils.py", line 80, in get_size_checksum_dict with open(path, "rb") as f: IsADirectoryError: [Errno 21] Is a directory: '/' Process finished with exit code 1 ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1710/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1710/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
573 days, 4:23:34
https://api.github.com/repos/huggingface/datasets/issues/1709
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1709/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1709/comments
https://api.github.com/repos/huggingface/datasets/issues/1709/events
https://github.com/huggingface/datasets/issues/1709
781,875,640
MDU6SXNzdWU3ODE4NzU2NDA=
1,709
Databases
{ "avatar_url": "https://avatars.githubusercontent.com/u/68724553?v=4", "events_url": "https://api.github.com/users/JimmyJim1/events{/privacy}", "followers_url": "https://api.github.com/users/JimmyJim1/followers", "following_url": "https://api.github.com/users/JimmyJim1/following{/other_user}", "gists_url": "https://api.github.com/users/JimmyJim1/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/JimmyJim1", "id": 68724553, "login": "JimmyJim1", "node_id": "MDQ6VXNlcjY4NzI0NTUz", "organizations_url": "https://api.github.com/users/JimmyJim1/orgs", "received_events_url": "https://api.github.com/users/JimmyJim1/received_events", "repos_url": "https://api.github.com/users/JimmyJim1/repos", "site_admin": false, "starred_url": "https://api.github.com/users/JimmyJim1/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/JimmyJim1/subscriptions", "type": "User", "url": "https://api.github.com/users/JimmyJim1", "user_view_type": "public" }
[]
closed
false
null
[]
[]
2021-01-08T06:14:03
2021-01-08T09:00:08
2021-01-08T09:00:08
NONE
null
null
null
null
## Adding a Dataset - **Name:** *name of the dataset* - **Description:** *short description of the dataset (or link to social media or blog post)* - **Paper:** *link to the dataset paper if available* - **Data:** *link to the Github repository or current dataset location* - **Motivation:** *what are some good reasons to have this dataset* Instructions to add a new dataset can be found [here](https://github.com/huggingface/datasets/blob/master/ADD_NEW_DATASET.md).
{ "avatar_url": "https://avatars.githubusercontent.com/u/10469459?v=4", "events_url": "https://api.github.com/users/yjernite/events{/privacy}", "followers_url": "https://api.github.com/users/yjernite/followers", "following_url": "https://api.github.com/users/yjernite/following{/other_user}", "gists_url": "https://api.github.com/users/yjernite/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/yjernite", "id": 10469459, "login": "yjernite", "node_id": "MDQ6VXNlcjEwNDY5NDU5", "organizations_url": "https://api.github.com/users/yjernite/orgs", "received_events_url": "https://api.github.com/users/yjernite/received_events", "repos_url": "https://api.github.com/users/yjernite/repos", "site_admin": false, "starred_url": "https://api.github.com/users/yjernite/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yjernite/subscriptions", "type": "User", "url": "https://api.github.com/users/yjernite", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1709/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1709/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
2:46:05
https://api.github.com/repos/huggingface/datasets/issues/1708
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1708/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1708/comments
https://api.github.com/repos/huggingface/datasets/issues/1708/events
https://github.com/huggingface/datasets/issues/1708
781,631,455
MDU6SXNzdWU3ODE2MzE0NTU=
1,708
<html dir="ltr" lang="en" class="focus-outline-visible"><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
{ "avatar_url": "https://avatars.githubusercontent.com/u/77126849?v=4", "events_url": "https://api.github.com/users/Louiejay54/events{/privacy}", "followers_url": "https://api.github.com/users/Louiejay54/followers", "following_url": "https://api.github.com/users/Louiejay54/following{/other_user}", "gists_url": "https://api.github.com/users/Louiejay54/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Louiejay54", "id": 77126849, "login": "Louiejay54", "node_id": "MDQ6VXNlcjc3MTI2ODQ5", "organizations_url": "https://api.github.com/users/Louiejay54/orgs", "received_events_url": "https://api.github.com/users/Louiejay54/received_events", "repos_url": "https://api.github.com/users/Louiejay54/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Louiejay54/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Louiejay54/subscriptions", "type": "User", "url": "https://api.github.com/users/Louiejay54", "user_view_type": "public" }
[]
closed
false
null
[]
[]
2021-01-07T21:45:24
2021-01-08T09:00:01
2021-01-08T09:00:01
NONE
null
null
null
null
## Adding a Dataset - **Name:** *name of the dataset* - **Description:** *short description of the dataset (or link to social media or blog post)* - **Paper:** *link to the dataset paper if available* - **Data:** *link to the Github repository or current dataset location* - **Motivation:** *what are some good reasons to have this dataset* Instructions to add a new dataset can be found [here](https://github.com/huggingface/datasets/blob/master/ADD_NEW_DATASET.md).
{ "avatar_url": "https://avatars.githubusercontent.com/u/10469459?v=4", "events_url": "https://api.github.com/users/yjernite/events{/privacy}", "followers_url": "https://api.github.com/users/yjernite/followers", "following_url": "https://api.github.com/users/yjernite/following{/other_user}", "gists_url": "https://api.github.com/users/yjernite/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/yjernite", "id": 10469459, "login": "yjernite", "node_id": "MDQ6VXNlcjEwNDY5NDU5", "organizations_url": "https://api.github.com/users/yjernite/orgs", "received_events_url": "https://api.github.com/users/yjernite/received_events", "repos_url": "https://api.github.com/users/yjernite/repos", "site_admin": false, "starred_url": "https://api.github.com/users/yjernite/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yjernite/subscriptions", "type": "User", "url": "https://api.github.com/users/yjernite", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1708/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1708/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
11:14:37
https://api.github.com/repos/huggingface/datasets/issues/1706
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1706/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1706/comments
https://api.github.com/repos/huggingface/datasets/issues/1706/events
https://github.com/huggingface/datasets/issues/1706
781,494,476
MDU6SXNzdWU3ODE0OTQ0NzY=
1,706
Error when downloading a large dataset on slow connection.
{ "avatar_url": "https://avatars.githubusercontent.com/u/23355969?v=4", "events_url": "https://api.github.com/users/lucadiliello/events{/privacy}", "followers_url": "https://api.github.com/users/lucadiliello/followers", "following_url": "https://api.github.com/users/lucadiliello/following{/other_user}", "gists_url": "https://api.github.com/users/lucadiliello/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lucadiliello", "id": 23355969, "login": "lucadiliello", "node_id": "MDQ6VXNlcjIzMzU1OTY5", "organizations_url": "https://api.github.com/users/lucadiliello/orgs", "received_events_url": "https://api.github.com/users/lucadiliello/received_events", "repos_url": "https://api.github.com/users/lucadiliello/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lucadiliello/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lucadiliello/subscriptions", "type": "User", "url": "https://api.github.com/users/lucadiliello", "user_view_type": "public" }
[]
open
false
null
[]
[ "Hi ! Is this an issue you have with `openwebtext` specifically or also with other datasets ?\r\n\r\nIt looks like the downloaded file is corrupted and can't be extracted using `tarfile`.\r\nCould you try loading it again with \r\n```python\r\nimport datasets\r\ndatasets.load_dataset(\"openwebtext\", download_mode=\"force_redownload\")\r\n```" ]
2021-01-07T17:48:15
2021-01-13T10:35:02
null
CONTRIBUTOR
null
null
null
null
I receive the following error after about an hour trying to download the `openwebtext` dataset. The code used is: ```python import datasets datasets.load_dataset("openwebtext") ``` > Traceback (most recent call last): [4/28] > File "<stdin>", line 1, in <module> > File "/home/lucadiliello/anaconda3/envs/nlp/lib/python3.7/site-packages/datasets/load.py", line 610, in load_dataset > ignore_verifications=ignore_verifications, > File "/home/lucadiliello/anaconda3/envs/nlp/lib/python3.7/site-packages/datasets/builder.py", line 515, in download_and_prepare > dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs > File "/home/lucadiliello/anaconda3/envs/nlp/lib/python3.7/site-packages/datasets/builder.py", line 570, in _download_and_prepare > split_generators = self._split_generators(dl_manager, **split_generators_kwargs) > File "/home/lucadiliello/.cache/huggingface/modules/datasets_modules/datasets/openwebtext/5c636399c7155da97c982d0d70ecdce30fbca66a4eb4fc768ad91f8331edac02/openwebtext.py", line 62, in _split_generators > dl_dir = dl_manager.download_and_extract(_URL) > File "/home/lucadiliello/anaconda3/envs/nlp/lib/python3.7/site-packages/datasets/utils/download_manager.py", line 254, in download_and_extract > return self.extract(self.download(url_or_urls)) > File "/home/lucadiliello/anaconda3/envs/nlp/lib/python3.7/site-packages/datasets/utils/download_manager.py", line 235, in extract > num_proc=num_proc, > File "/home/lucadiliello/anaconda3/envs/nlp/lib/python3.7/site-packages/datasets/utils/py_utils.py", line 225, in map_nested > return function(data_struct) > File "/home/lucadiliello/anaconda3/envs/nlp/lib/python3.7/site-packages/datasets/utils/file_utils.py", line 343, in cached_path > tar_file.extractall(output_path_extracted) > File "/home/lucadiliello/anaconda3/envs/nlp/lib/python3.7/tarfile.py", line 2000, in extractall > numeric_owner=numeric_owner) > File "/home/lucadiliello/anaconda3/envs/nlp/lib/python3.7/tarfile.py", line 2042, in extract > numeric_owner=numeric_owner) > File "/home/lucadiliello/anaconda3/envs/nlp/lib/python3.7/tarfile.py", line 2112, in _extract_member > self.makefile(tarinfo, targetpath) > File "/home/lucadiliello/anaconda3/envs/nlp/lib/python3.7/tarfile.py", line 2161, in makefile > copyfileobj(source, target, tarinfo.size, ReadError, bufsize) > File "/home/lucadiliello/anaconda3/envs/nlp/lib/python3.7/tarfile.py", line 253, in copyfileobj > buf = src.read(remainder) > File "/home/lucadiliello/anaconda3/envs/nlp/lib/python3.7/lzma.py", line 200, in read > return self._buffer.read(size) > File "/home/lucadiliello/anaconda3/envs/nlp/lib/python3.7/_compression.py", line 68, in readinto > data = self.read(len(byte_view)) > File "/home/lucadiliello/anaconda3/envs/nlp/lib/python3.7/_compression.py", line 99, in read > raise EOFError("Compressed file ended before the " > EOFError: Compressed file ended before the end-of-stream marker was reached
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1706/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1706/timeline
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
null
https://api.github.com/repos/huggingface/datasets/issues/1701
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1701/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1701/comments
https://api.github.com/repos/huggingface/datasets/issues/1701/events
https://github.com/huggingface/datasets/issues/1701
781,345,717
MDU6SXNzdWU3ODEzNDU3MTc=
1,701
Some datasets miss dataset_infos.json or dummy_data.zip
{ "avatar_url": "https://avatars.githubusercontent.com/u/272253?v=4", "events_url": "https://api.github.com/users/madlag/events{/privacy}", "followers_url": "https://api.github.com/users/madlag/followers", "following_url": "https://api.github.com/users/madlag/following{/other_user}", "gists_url": "https://api.github.com/users/madlag/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/madlag", "id": 272253, "login": "madlag", "node_id": "MDQ6VXNlcjI3MjI1Mw==", "organizations_url": "https://api.github.com/users/madlag/orgs", "received_events_url": "https://api.github.com/users/madlag/received_events", "repos_url": "https://api.github.com/users/madlag/repos", "site_admin": false, "starred_url": "https://api.github.com/users/madlag/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/madlag/subscriptions", "type": "User", "url": "https://api.github.com/users/madlag", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Thanks for reporting.\r\nWe should indeed add all the missing dummy_data.zip and also the dataset_infos.json at least for lm1b, reclor and wikihow.\r\n\r\nFor c4 I haven't tested the script and I think we'll require some optimizations regarding beam datasets before processing it.\r\n", "Closing since the dummy data generation is deprecated now (and the issue with missing metadata seems to be addressed)." ]
2021-01-07T14:17:13
2022-11-04T15:11:16
2022-11-04T15:06:00
CONTRIBUTOR
null
null
null
null
While working on dataset REAME generation script at https://github.com/madlag/datasets_readme_generator , I noticed that some datasets miss a dataset_infos.json : ``` c4 lm1b reclor wikihow ``` And some does not have a dummy_data.zip : ``` kor_nli math_dataset mlqa ms_marco newsgroup qa4mre qangaroo reddit_tifu super_glue trivia_qa web_of_science wmt14 wmt15 wmt16 wmt17 wmt18 wmt19 xtreme ``` But it seems that some of those last do have a "dummy" directory .
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/1701/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1701/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
666 days, 0:48:47
https://api.github.com/repos/huggingface/datasets/issues/1696
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1696/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1696/comments
https://api.github.com/repos/huggingface/datasets/issues/1696/events
https://github.com/huggingface/datasets/issues/1696
781,096,918
MDU6SXNzdWU3ODEwOTY5MTg=
1,696
Unable to install datasets
{ "avatar_url": "https://avatars.githubusercontent.com/u/12635475?v=4", "events_url": "https://api.github.com/users/glee2429/events{/privacy}", "followers_url": "https://api.github.com/users/glee2429/followers", "following_url": "https://api.github.com/users/glee2429/following{/other_user}", "gists_url": "https://api.github.com/users/glee2429/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/glee2429", "id": 12635475, "login": "glee2429", "node_id": "MDQ6VXNlcjEyNjM1NDc1", "organizations_url": "https://api.github.com/users/glee2429/orgs", "received_events_url": "https://api.github.com/users/glee2429/received_events", "repos_url": "https://api.github.com/users/glee2429/repos", "site_admin": false, "starred_url": "https://api.github.com/users/glee2429/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/glee2429/subscriptions", "type": "User", "url": "https://api.github.com/users/glee2429", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Maybe try to create a virtual env with python 3.8 or 3.7", "Thanks, @thomwolf! I fixed the issue by downgrading python to 3.7. ", "Damn sorry", "Damn sorry" ]
2021-01-07T07:24:37
2021-01-08T00:33:05
2021-01-07T22:06:05
NONE
null
null
null
null
** Edit ** I believe there's a bug with the package when you're installing it with Python 3.9. I recommend sticking with previous versions. Thanks, @thomwolf for the insight! **Short description** I followed the instructions for installing datasets (https://huggingface.co/docs/datasets/installation.html). However, while I tried to download datasets using `pip install datasets` I got a massive error message after getting stuck at "Installing build dependencies..." I was wondering if this problem can be fixed by creating a virtual environment, but it didn't help. Can anyone offer some advice on how to fix this issue? Here's an error message: `(env) Gas-MacBook-Pro:Downloads destiny$ pip install datasets Collecting datasets Using cached datasets-1.2.0-py3-none-any.whl (159 kB) Collecting numpy>=1.17 Using cached numpy-1.19.5-cp39-cp39-macosx_10_9_x86_64.whl (15.6 MB) Collecting pyarrow>=0.17.1 Using cached pyarrow-2.0.0.tar.gz (58.9 MB) .... _configtest.c:9:5: warning: incompatible redeclaration of library function 'ceilf' [-Wincompatible-library-redeclaration] int ceilf (void); ^ _configtest.c:9:5: note: 'ceilf' is a builtin with type 'float (float)' _configtest.c:10:5: warning: incompatible redeclaration of library function 'rintf' [-Wincompatible-library-redeclaration] int rintf (void); ^ _configtest.c:10:5: note: 'rintf' is a builtin with type 'float (float)' _configtest.c:11:5: warning: incompatible redeclaration of library function 'truncf' [-Wincompatible-library-redeclaration] int truncf (void); ^ _configtest.c:11:5: note: 'truncf' is a builtin with type 'float (float)' _configtest.c:12:5: warning: incompatible redeclaration of library function 'sqrtf' [-Wincompatible-library-redeclaration] int sqrtf (void); ^ _configtest.c:12:5: note: 'sqrtf' is a builtin with type 'float (float)' _configtest.c:13:5: warning: incompatible redeclaration of library function 'log10f' [-Wincompatible-library-redeclaration] int log10f (void); ^ _configtest.c:13:5: note: 'log10f' is a builtin with type 'float (float)' _configtest.c:14:5: warning: incompatible redeclaration of library function 'logf' [-Wincompatible-library-redeclaration] int logf (void); ^ _configtest.c:14:5: note: 'logf' is a builtin with type 'float (float)' _configtest.c:15:5: warning: incompatible redeclaration of library function 'log1pf' [-Wincompatible-library-redeclaration] int log1pf (void); ^ _configtest.c:15:5: note: 'log1pf' is a builtin with type 'float (float)' _configtest.c:16:5: warning: incompatible redeclaration of library function 'expf' [-Wincompatible-library-redeclaration] int expf (void); ^ _configtest.c:16:5: note: 'expf' is a builtin with type 'float (float)' _configtest.c:17:5: warning: incompatible redeclaration of library function 'expm1f' [-Wincompatible-library-redeclaration] int expm1f (void); ^ _configtest.c:17:5: note: 'expm1f' is a builtin with type 'float (float)' _configtest.c:18:5: warning: incompatible redeclaration of library function 'asinf' [-Wincompatible-library-redeclaration] int asinf (void); ^ _configtest.c:18:5: note: 'asinf' is a builtin with type 'float (float)' _configtest.c:19:5: warning: incompatible redeclaration of library function 'acosf' [-Wincompatible-library-redeclaration] int acosf (void); ^ _configtest.c:19:5: note: 'acosf' is a builtin with type 'float (float)' _configtest.c:20:5: warning: incompatible redeclaration of library function 'atanf' [-Wincompatible-library-redeclaration] int atanf (void); ^ _configtest.c:20:5: note: 'atanf' is a builtin with type 'float (float)' _configtest.c:21:5: warning: incompatible redeclaration of library function 'asinhf' [-Wincompatible-library-redeclaration] int asinhf (void); ^ _configtest.c:21:5: note: 'asinhf' is a builtin with type 'float (float)' _configtest.c:22:5: warning: incompatible redeclaration of library function 'acoshf' [-Wincompatible-library-redeclaration] int acoshf (void); ^ _configtest.c:22:5: note: 'acoshf' is a builtin with type 'float (float)' _configtest.c:23:5: warning: incompatible redeclaration of library function 'atanhf' [-Wincompatible-library-redeclaration] int atanhf (void); ^ _configtest.c:23:5: note: 'atanhf' is a builtin with type 'float (float)' _configtest.c:24:5: warning: incompatible redeclaration of library function 'hypotf' [-Wincompatible-library-redeclaration] int hypotf (void); ^ _configtest.c:24:5: note: 'hypotf' is a builtin with type 'float (float, float)' _configtest.c:25:5: warning: incompatible redeclaration of library function 'atan2f' [-Wincompatible-library-redeclaration] int atan2f (void); ^ _configtest.c:25:5: note: 'atan2f' is a builtin with type 'float (float, float)' _configtest.c:26:5: warning: incompatible redeclaration of library function 'powf' [-Wincompatible-library-redeclaration] int powf (void); ^ _configtest.c:26:5: note: 'powf' is a builtin with type 'float (float, float)' _configtest.c:27:5: warning: incompatible redeclaration of library function 'fmodf' [-Wincompatible-library-redeclaration] int fmodf (void); ^ _configtest.c:27:5: note: 'fmodf' is a builtin with type 'float (float, float)' _configtest.c:28:5: warning: incompatible redeclaration of library function 'modff' [-Wincompatible-library-redeclaration] int modff (void); ^ _configtest.c:28:5: note: 'modff' is a builtin with type 'float (float, float *)' _configtest.c:29:5: warning: incompatible redeclaration of library function 'frexpf' [-Wincompatible-library-redeclaration] int frexpf (void); ^ _configtest.c:29:5: note: 'frexpf' is a builtin with type 'float (float, int *)' _configtest.c:30:5: warning: incompatible redeclaration of library function 'ldexpf' [-Wincompatible-library-redeclaration] int ldexpf (void); ^ _configtest.c:30:5: note: 'ldexpf' is a builtin with type 'float (float, int)' _configtest.c:31:5: warning: incompatible redeclaration of library function 'exp2f' [-Wincompatible-library-redeclaration] int exp2f (void); ^ _configtest.c:31:5: note: 'exp2f' is a builtin with type 'float (float)' _configtest.c:32:5: warning: incompatible redeclaration of library function 'log2f' [-Wincompatible-library-redeclaration] int log2f (void); ^ _configtest.c:32:5: note: 'log2f' is a builtin with type 'float (float)' _configtest.c:33:5: warning: incompatible redeclaration of library function 'copysignf' [-Wincompatible-library-redeclaration] int copysignf (void); ^ _configtest.c:33:5: note: 'copysignf' is a builtin with type 'float (float, float)' _configtest.c:34:5: warning: incompatible redeclaration of library function 'nextafterf' [-Wincompatible-library-redeclaration] int nextafterf (void); ^ _configtest.c:34:5: note: 'nextafterf' is a builtin with type 'float (float, float)' _configtest.c:35:5: warning: incompatible redeclaration of library function 'cbrtf' [-Wincompatible-library-redeclaration] int cbrtf (void); ^ _configtest.c:35:5: note: 'cbrtf' is a builtin with type 'float (float)' 35 warnings generated. clang _configtest.o -o _configtest success! removing: _configtest.c _configtest.o _configtest.o.d _configtest C compiler: clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers compile options: '-Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/usr/local/include -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/sqlite/include -I/Users/destiny/Downloads/env/include -I/usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9 -c' clang: _configtest.c _configtest.c:1:5: warning: incompatible redeclaration of library function 'sinl' [-Wincompatible-library-redeclaration] int sinl (void); ^ _configtest.c:1:5: note: 'sinl' is a builtin with type 'long double (long double)' _configtest.c:2:5: warning: incompatible redeclaration of library function 'cosl' [-Wincompatible-library-redeclaration] int cosl (void); ^ _configtest.c:2:5: note: 'cosl' is a builtin with type 'long double (long double)' _configtest.c:3:5: warning: incompatible redeclaration of library function 'tanl' [-Wincompatible-library-redeclaration] int tanl (void); ^ _configtest.c:3:5: note: 'tanl' is a builtin with type 'long double (long double)' _configtest.c:4:5: warning: incompatible redeclaration of library function 'sinhl' [-Wincompatible-library-redeclaration] int sinhl (void); ^ _configtest.c:4:5: note: 'sinhl' is a builtin with type 'long double (long double)' _configtest.c:5:5: warning: incompatible redeclaration of library function 'coshl' [-Wincompatible-library-redeclaration] int coshl (void); ^ _configtest.c:5:5: note: 'coshl' is a builtin with type 'long double (long double)' _configtest.c:6:5: warning: incompatible redeclaration of library function 'tanhl' [-Wincompatible-library-redeclaration] int tanhl (void); ^ _configtest.c:6:5: note: 'tanhl' is a builtin with type 'long double (long double)' _configtest.c:7:5: warning: incompatible redeclaration of library function 'fabsl' [-Wincompatible-library-redeclaration] int fabsl (void); ^ _configtest.c:7:5: note: 'fabsl' is a builtin with type 'long double (long double)' _configtest.c:8:5: warning: incompatible redeclaration of library function 'floorl' [-Wincompatible-library-redeclaration] int floorl (void); ^ _configtest.c:8:5: note: 'floorl' is a builtin with type 'long double (long double)' _configtest.c:9:5: warning: incompatible redeclaration of library function 'ceill' [-Wincompatible-library-redeclaration] int ceill (void); ^ _configtest.c:9:5: note: 'ceill' is a builtin with type 'long double (long double)' _configtest.c:10:5: warning: incompatible redeclaration of library function 'rintl' [-Wincompatible-library-redeclaration] int rintl (void); ^ _configtest.c:10:5: note: 'rintl' is a builtin with type 'long double (long double)' _configtest.c:11:5: warning: incompatible redeclaration of library function 'truncl' [-Wincompatible-library-redeclaration] int truncl (void); ^ _configtest.c:11:5: note: 'truncl' is a builtin with type 'long double (long double)' _configtest.c:12:5: warning: incompatible redeclaration of library function 'sqrtl' [-Wincompatible-library-redeclaration] int sqrtl (void); ^ _configtest.c:12:5: note: 'sqrtl' is a builtin with type 'long double (long double)' _configtest.c:13:5: warning: incompatible redeclaration of library function 'log10l' [-Wincompatible-library-redeclaration] int log10l (void); ^ _configtest.c:13:5: note: 'log10l' is a builtin with type 'long double (long double)' _configtest.c:14:5: warning: incompatible redeclaration of library function 'logl' [-Wincompatible-library-redeclaration] int logl (void); ^ _configtest.c:14:5: note: 'logl' is a builtin with type 'long double (long double)' _configtest.c:15:5: warning: incompatible redeclaration of library function 'log1pl' [-Wincompatible-library-redeclaration] int log1pl (void); ^ _configtest.c:15:5: note: 'log1pl' is a builtin with type 'long double (long double)' _configtest.c:16:5: warning: incompatible redeclaration of library function 'expl' [-Wincompatible-library-redeclaration] int expl (void); ^ _configtest.c:16:5: note: 'expl' is a builtin with type 'long double (long double)' _configtest.c:17:5: warning: incompatible redeclaration of library function 'expm1l' [-Wincompatible-library-redeclaration] int expm1l (void); ^ _configtest.c:17:5: note: 'expm1l' is a builtin with type 'long double (long double)' _configtest.c:18:5: warning: incompatible redeclaration of library function 'asinl' [-Wincompatible-library-redeclaration] int asinl (void); ^ _configtest.c:18:5: note: 'asinl' is a builtin with type 'long double (long double)' _configtest.c:19:5: warning: incompatible redeclaration of library function 'acosl' [-Wincompatible-library-redeclaration] int acosl (void); ^ _configtest.c:19:5: note: 'acosl' is a builtin with type 'long double (long double)' _configtest.c:20:5: warning: incompatible redeclaration of library function 'atanl' [-Wincompatible-library-redeclaration] int atanl (void); ^ _configtest.c:20:5: note: 'atanl' is a builtin with type 'long double (long double)' _configtest.c:21:5: warning: incompatible redeclaration of library function 'asinhl' [-Wincompatible-library-redeclaration] int asinhl (void); ^ _configtest.c:21:5: note: 'asinhl' is a builtin with type 'long double (long double)' _configtest.c:22:5: warning: incompatible redeclaration of library function 'acoshl' [-Wincompatible-library-redeclaration] int acoshl (void); ^ _configtest.c:22:5: note: 'acoshl' is a builtin with type 'long double (long double)' _configtest.c:23:5: warning: incompatible redeclaration of library function 'atanhl' [-Wincompatible-library-redeclaration] int atanhl (void); ^ _configtest.c:23:5: note: 'atanhl' is a builtin with type 'long double (long double)' _configtest.c:24:5: warning: incompatible redeclaration of library function 'hypotl' [-Wincompatible-library-redeclaration] int hypotl (void); ^ _configtest.c:24:5: note: 'hypotl' is a builtin with type 'long double (long double, long double)' _configtest.c:25:5: warning: incompatible redeclaration of library function 'atan2l' [-Wincompatible-library-redeclaration] int atan2l (void); ^ _configtest.c:25:5: note: 'atan2l' is a builtin with type 'long double (long double, long double)' _configtest.c:26:5: warning: incompatible redeclaration of library function 'powl' [-Wincompatible-library-redeclaration] int powl (void); ^ _configtest.c:26:5: note: 'powl' is a builtin with type 'long double (long double, long double)' _configtest.c:27:5: warning: incompatible redeclaration of library function 'fmodl' [-Wincompatible-library-redeclaration] int fmodl (void); ^ _configtest.c:27:5: note: 'fmodl' is a builtin with type 'long double (long double, long double)' _configtest.c:28:5: warning: incompatible redeclaration of library function 'modfl' [-Wincompatible-library-redeclaration] int modfl (void); ^ _configtest.c:28:5: note: 'modfl' is a builtin with type 'long double (long double, long double *)' _configtest.c:29:5: warning: incompatible redeclaration of library function 'frexpl' [-Wincompatible-library-redeclaration] int frexpl (void); ^ _configtest.c:29:5: note: 'frexpl' is a builtin with type 'long double (long double, int *)' _configtest.c:30:5: warning: incompatible redeclaration of library function 'ldexpl' [-Wincompatible-library-redeclaration] int ldexpl (void); ^ _configtest.c:30:5: note: 'ldexpl' is a builtin with type 'long double (long double, int)' _configtest.c:31:5: warning: incompatible redeclaration of library function 'exp2l' [-Wincompatible-library-redeclaration] int exp2l (void); ^ _configtest.c:31:5: note: 'exp2l' is a builtin with type 'long double (long double)' _configtest.c:32:5: warning: incompatible redeclaration of library function 'log2l' [-Wincompatible-library-redeclaration] int log2l (void); ^ _configtest.c:32:5: note: 'log2l' is a builtin with type 'long double (long double)' _configtest.c:33:5: warning: incompatible redeclaration of library function 'copysignl' [-Wincompatible-library-redeclaration] int copysignl (void); ^ _configtest.c:33:5: note: 'copysignl' is a builtin with type 'long double (long double, long double)' _configtest.c:34:5: warning: incompatible redeclaration of library function 'nextafterl' [-Wincompatible-library-redeclaration] int nextafterl (void); ^ _configtest.c:34:5: note: 'nextafterl' is a builtin with type 'long double (long double, long double)' _configtest.c:35:5: warning: incompatible redeclaration of library function 'cbrtl' [-Wincompatible-library-redeclaration] int cbrtl (void); ^ _configtest.c:35:5: note: 'cbrtl' is a builtin with type 'long double (long double)' 35 warnings generated. clang _configtest.o -o _configtest success! removing: _configtest.c _configtest.o _configtest.o.d _configtest C compiler: clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers compile options: '-Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/usr/local/include -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/sqlite/include -I/Users/destiny/Downloads/env/include -I/usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9 -c' clang: _configtest.c success! removing: _configtest.c _configtest.o _configtest.o.d C compiler: clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers compile options: '-Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/usr/local/include -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/sqlite/include -I/Users/destiny/Downloads/env/include -I/usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9 -c' clang: _configtest.c success! removing: _configtest.c _configtest.o _configtest.o.d C compiler: clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers compile options: '-Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/usr/local/include -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/sqlite/include -I/Users/destiny/Downloads/env/include -I/usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9 -c' clang: _configtest.c success! removing: _configtest.c _configtest.o _configtest.o.d C compiler: clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers compile options: '-Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/usr/local/include -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/sqlite/include -I/Users/destiny/Downloads/env/include -I/usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9 -c' clang: _configtest.c success! removing: _configtest.c _configtest.o _configtest.o.d C compiler: clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers compile options: '-Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/usr/local/include -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/sqlite/include -I/Users/destiny/Downloads/env/include -I/usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9 -c' clang: _configtest.c _configtest.c:8:12: error: use of undeclared identifier 'HAVE_DECL_SIGNBIT' (void) HAVE_DECL_SIGNBIT; ^ 1 error generated. failure. removing: _configtest.c _configtest.o C compiler: clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers compile options: '-Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/usr/local/include -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/sqlite/include -I/Users/destiny/Downloads/env/include -I/usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9 -c' clang: _configtest.c success! removing: _configtest.c _configtest.o _configtest.o.d C compiler: clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers compile options: '-Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/usr/local/include -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/sqlite/include -I/Users/destiny/Downloads/env/include -I/usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9 -c' clang: _configtest.c success! removing: _configtest.c _configtest.o _configtest.o.d C compiler: clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers compile options: '-Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/usr/local/include -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/sqlite/include -I/Users/destiny/Downloads/env/include -I/usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9 -c' clang: _configtest.c success! removing: _configtest.c _configtest.o _configtest.o.d C compiler: clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers compile options: '-Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/usr/local/include -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/sqlite/include -I/Users/destiny/Downloads/env/include -I/usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9 -c' clang: _configtest.c success! removing: _configtest.c _configtest.o _configtest.o.d C compiler: clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers compile options: '-Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/usr/local/include -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/sqlite/include -I/Users/destiny/Downloads/env/include -I/usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9 -c' clang: _configtest.c removing: _configtest.c _configtest.o _configtest.o.d C compiler: clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers compile options: '-Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/usr/local/include -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/sqlite/include -I/Users/destiny/Downloads/env/include -I/usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9 -c' clang: _configtest.c removing: _configtest.c _configtest.o _configtest.o.d C compiler: clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers compile options: '-Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/usr/local/include -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/sqlite/include -I/Users/destiny/Downloads/env/include -I/usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9 -c' clang: _configtest.c removing: _configtest.c _configtest.o _configtest.o.d C compiler: clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers compile options: '-Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/usr/local/include -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/sqlite/include -I/Users/destiny/Downloads/env/include -I/usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9 -c' clang: _configtest.c _configtest.c:1:5: warning: incompatible redeclaration of library function 'cabs' [-Wincompatible-library-redeclaration] int cabs (void); ^ _configtest.c:1:5: note: 'cabs' is a builtin with type 'double (_Complex double)' _configtest.c:2:5: warning: incompatible redeclaration of library function 'cacos' [-Wincompatible-library-redeclaration] int cacos (void); ^ _configtest.c:2:5: note: 'cacos' is a builtin with type '_Complex double (_Complex double)' _configtest.c:3:5: warning: incompatible redeclaration of library function 'cacosh' [-Wincompatible-library-redeclaration] int cacosh (void); ^ _configtest.c:3:5: note: 'cacosh' is a builtin with type '_Complex double (_Complex double)' _configtest.c:4:5: warning: incompatible redeclaration of library function 'carg' [-Wincompatible-library-redeclaration] int carg (void); ^ _configtest.c:4:5: note: 'carg' is a builtin with type 'double (_Complex double)' _configtest.c:5:5: warning: incompatible redeclaration of library function 'casin' [-Wincompatible-library-redeclaration] int casin (void); ^ _configtest.c:5:5: note: 'casin' is a builtin with type '_Complex double (_Complex double)' _configtest.c:6:5: warning: incompatible redeclaration of library function 'casinh' [-Wincompatible-library-redeclaration] int casinh (void); ^ _configtest.c:6:5: note: 'casinh' is a builtin with type '_Complex double (_Complex double)' _configtest.c:7:5: warning: incompatible redeclaration of library function 'catan' [-Wincompatible-library-redeclaration] int catan (void); ^ _configtest.c:7:5: note: 'catan' is a builtin with type '_Complex double (_Complex double)' _configtest.c:8:5: warning: incompatible redeclaration of library function 'catanh' [-Wincompatible-library-redeclaration] int catanh (void); ^ _configtest.c:8:5: note: 'catanh' is a builtin with type '_Complex double (_Complex double)' _configtest.c:9:5: warning: incompatible redeclaration of library function 'ccos' [-Wincompatible-library-redeclaration] int ccos (void); ^ _configtest.c:9:5: note: 'ccos' is a builtin with type '_Complex double (_Complex double)' _configtest.c:10:5: warning: incompatible redeclaration of library function 'ccosh' [-Wincompatible-library-redeclaration] int ccosh (void); ^ _configtest.c:10:5: note: 'ccosh' is a builtin with type '_Complex double (_Complex double)' _configtest.c:11:5: warning: incompatible redeclaration of library function 'cexp' [-Wincompatible-library-redeclaration] int cexp (void); ^ _configtest.c:11:5: note: 'cexp' is a builtin with type '_Complex double (_Complex double)' _configtest.c:12:5: warning: incompatible redeclaration of library function 'cimag' [-Wincompatible-library-redeclaration] int cimag (void); ^ _configtest.c:12:5: note: 'cimag' is a builtin with type 'double (_Complex double)' _configtest.c:13:5: warning: incompatible redeclaration of library function 'clog' [-Wincompatible-library-redeclaration] int clog (void); ^ _configtest.c:13:5: note: 'clog' is a builtin with type '_Complex double (_Complex double)' _configtest.c:14:5: warning: incompatible redeclaration of library function 'conj' [-Wincompatible-library-redeclaration] int conj (void); ^ _configtest.c:14:5: note: 'conj' is a builtin with type '_Complex double (_Complex double)' _configtest.c:15:5: warning: incompatible redeclaration of library function 'cpow' [-Wincompatible-library-redeclaration] int cpow (void); ^ _configtest.c:15:5: note: 'cpow' is a builtin with type '_Complex double (_Complex double, _Complex double)' _configtest.c:16:5: warning: incompatible redeclaration of library function 'cproj' [-Wincompatible-library-redeclaration] int cproj (void); ^ _configtest.c:16:5: note: 'cproj' is a builtin with type '_Complex double (_Complex double)' _configtest.c:17:5: warning: incompatible redeclaration of library function 'creal' [-Wincompatible-library-redeclaration] int creal (void); ^ _configtest.c:17:5: note: 'creal' is a builtin with type 'double (_Complex double)' _configtest.c:18:5: warning: incompatible redeclaration of library function 'csin' [-Wincompatible-library-redeclaration] int csin (void); ^ _configtest.c:18:5: note: 'csin' is a builtin with type '_Complex double (_Complex double)' _configtest.c:19:5: warning: incompatible redeclaration of library function 'csinh' [-Wincompatible-library-redeclaration] int csinh (void); ^ _configtest.c:19:5: note: 'csinh' is a builtin with type '_Complex double (_Complex double)' _configtest.c:20:5: warning: incompatible redeclaration of library function 'csqrt' [-Wincompatible-library-redeclaration] int csqrt (void); ^ _configtest.c:20:5: note: 'csqrt' is a builtin with type '_Complex double (_Complex double)' _configtest.c:21:5: warning: incompatible redeclaration of library function 'ctan' [-Wincompatible-library-redeclaration] int ctan (void); ^ _configtest.c:21:5: note: 'ctan' is a builtin with type '_Complex double (_Complex double)' _configtest.c:22:5: warning: incompatible redeclaration of library function 'ctanh' [-Wincompatible-library-redeclaration] int ctanh (void); ^ _configtest.c:22:5: note: 'ctanh' is a builtin with type '_Complex double (_Complex double)' 22 warnings generated. clang _configtest.o -o _configtest success! removing: _configtest.c _configtest.o _configtest.o.d _configtest C compiler: clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers compile options: '-Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/usr/local/include -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/sqlite/include -I/Users/destiny/Downloads/env/include -I/usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9 -c' clang: _configtest.c _configtest.c:1:5: warning: incompatible redeclaration of library function 'cabsf' [-Wincompatible-library-redeclaration] int cabsf (void); ^ _configtest.c:1:5: note: 'cabsf' is a builtin with type 'float (_Complex float)' _configtest.c:2:5: warning: incompatible redeclaration of library function 'cacosf' [-Wincompatible-library-redeclaration] int cacosf (void); ^ _configtest.c:2:5: note: 'cacosf' is a builtin with type '_Complex float (_Complex float)' _configtest.c:3:5: warning: incompatible redeclaration of library function 'cacoshf' [-Wincompatible-library-redeclaration] int cacoshf (void); ^ _configtest.c:3:5: note: 'cacoshf' is a builtin with type '_Complex float (_Complex float)' _configtest.c:4:5: warning: incompatible redeclaration of library function 'cargf' [-Wincompatible-library-redeclaration] int cargf (void); ^ _configtest.c:4:5: note: 'cargf' is a builtin with type 'float (_Complex float)' _configtest.c:5:5: warning: incompatible redeclaration of library function 'casinf' [-Wincompatible-library-redeclaration] int casinf (void); ^ _configtest.c:5:5: note: 'casinf' is a builtin with type '_Complex float (_Complex float)' _configtest.c:6:5: warning: incompatible redeclaration of library function 'casinhf' [-Wincompatible-library-redeclaration] int casinhf (void); ^ _configtest.c:6:5: note: 'casinhf' is a builtin with type '_Complex float (_Complex float)' _configtest.c:7:5: warning: incompatible redeclaration of library function 'catanf' [-Wincompatible-library-redeclaration] int catanf (void); ^ _configtest.c:7:5: note: 'catanf' is a builtin with type '_Complex float (_Complex float)' _configtest.c:8:5: warning: incompatible redeclaration of library function 'catanhf' [-Wincompatible-library-redeclaration] int catanhf (void); ^ _configtest.c:8:5: note: 'catanhf' is a builtin with type '_Complex float (_Complex float)' _configtest.c:9:5: warning: incompatible redeclaration of library function 'ccosf' [-Wincompatible-library-redeclaration] int ccosf (void); ^ _configtest.c:9:5: note: 'ccosf' is a builtin with type '_Complex float (_Complex float)' _configtest.c:10:5: warning: incompatible redeclaration of library function 'ccoshf' [-Wincompatible-library-redeclaration] int ccoshf (void); ^ _configtest.c:10:5: note: 'ccoshf' is a builtin with type '_Complex float (_Complex float)' _configtest.c:11:5: warning: incompatible redeclaration of library function 'cexpf' [-Wincompatible-library-redeclaration] int cexpf (void); ^ _configtest.c:11:5: note: 'cexpf' is a builtin with type '_Complex float (_Complex float)' _configtest.c:12:5: warning: incompatible redeclaration of library function 'cimagf' [-Wincompatible-library-redeclaration] int cimagf (void); ^ _configtest.c:12:5: note: 'cimagf' is a builtin with type 'float (_Complex float)' _configtest.c:13:5: warning: incompatible redeclaration of library function 'clogf' [-Wincompatible-library-redeclaration] int clogf (void); ^ _configtest.c:13:5: note: 'clogf' is a builtin with type '_Complex float (_Complex float)' _configtest.c:14:5: warning: incompatible redeclaration of library function 'conjf' [-Wincompatible-library-redeclaration] int conjf (void); ^ _configtest.c:14:5: note: 'conjf' is a builtin with type '_Complex float (_Complex float)' _configtest.c:15:5: warning: incompatible redeclaration of library function 'cpowf' [-Wincompatible-library-redeclaration] int cpowf (void); ^ _configtest.c:15:5: note: 'cpowf' is a builtin with type '_Complex float (_Complex float, _Complex float)' _configtest.c:16:5: warning: incompatible redeclaration of library function 'cprojf' [-Wincompatible-library-redeclaration] int cprojf (void); ^ _configtest.c:16:5: note: 'cprojf' is a builtin with type '_Complex float (_Complex float)' _configtest.c:17:5: warning: incompatible redeclaration of library function 'crealf' [-Wincompatible-library-redeclaration] int crealf (void); ^ _configtest.c:17:5: note: 'crealf' is a builtin with type 'float (_Complex float)' _configtest.c:18:5: warning: incompatible redeclaration of library function 'csinf' [-Wincompatible-library-redeclaration] int csinf (void); ^ _configtest.c:18:5: note: 'csinf' is a builtin with type '_Complex float (_Complex float)' _configtest.c:19:5: warning: incompatible redeclaration of library function 'csinhf' [-Wincompatible-library-redeclaration] int csinhf (void); ^ _configtest.c:19:5: note: 'csinhf' is a builtin with type '_Complex float (_Complex float)' _configtest.c:20:5: warning: incompatible redeclaration of library function 'csqrtf' [-Wincompatible-library-redeclaration] int csqrtf (void); ^ _configtest.c:20:5: note: 'csqrtf' is a builtin with type '_Complex float (_Complex float)' _configtest.c:21:5: warning: incompatible redeclaration of library function 'ctanf' [-Wincompatible-library-redeclaration] int ctanf (void); ^ _configtest.c:21:5: note: 'ctanf' is a builtin with type '_Complex float (_Complex float)' _configtest.c:22:5: warning: incompatible redeclaration of library function 'ctanhf' [-Wincompatible-library-redeclaration] int ctanhf (void); ^ _configtest.c:22:5: note: 'ctanhf' is a builtin with type '_Complex float (_Complex float)' 22 warnings generated. clang _configtest.o -o _configtest success! removing: _configtest.c _configtest.o _configtest.o.d _configtest C compiler: clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers compile options: '-Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/usr/local/include -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/sqlite/include -I/Users/destiny/Downloads/env/include -I/usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9 -c' clang: _configtest.c _configtest.c:1:5: warning: incompatible redeclaration of library function 'cabsl' [-Wincompatible-library-redeclaration] int cabsl (void); ^ _configtest.c:1:5: note: 'cabsl' is a builtin with type 'long double (_Complex long double)' _configtest.c:2:5: warning: incompatible redeclaration of library function 'cacosl' [-Wincompatible-library-redeclaration] int cacosl (void); ^ _configtest.c:2:5: note: 'cacosl' is a builtin with type '_Complex long double (_Complex long double)' _configtest.c:3:5: warning: incompatible redeclaration of library function 'cacoshl' [-Wincompatible-library-redeclaration] int cacoshl (void); ^ _configtest.c:3:5: note: 'cacoshl' is a builtin with type '_Complex long double (_Complex long double)' _configtest.c:4:5: warning: incompatible redeclaration of library function 'cargl' [-Wincompatible-library-redeclaration] int cargl (void); ^ _configtest.c:4:5: note: 'cargl' is a builtin with type 'long double (_Complex long double)' _configtest.c:5:5: warning: incompatible redeclaration of library function 'casinl' [-Wincompatible-library-redeclaration] int casinl (void); ^ _configtest.c:5:5: note: 'casinl' is a builtin with type '_Complex long double (_Complex long double)' _configtest.c:6:5: warning: incompatible redeclaration of library function 'casinhl' [-Wincompatible-library-redeclaration] int casinhl (void); ^ _configtest.c:6:5: note: 'casinhl' is a builtin with type '_Complex long double (_Complex long double)' _configtest.c:7:5: warning: incompatible redeclaration of library function 'catanl' [-Wincompatible-library-redeclaration] int catanl (void); ^ _configtest.c:7:5: note: 'catanl' is a builtin with type '_Complex long double (_Complex long double)' _configtest.c:8:5: warning: incompatible redeclaration of library function 'catanhl' [-Wincompatible-library-redeclaration] int catanhl (void); ^ _configtest.c:8:5: note: 'catanhl' is a builtin with type '_Complex long double (_Complex long double)' _configtest.c:9:5: warning: incompatible redeclaration of library function 'ccosl' [-Wincompatible-library-redeclaration] int ccosl (void); ^ _configtest.c:9:5: note: 'ccosl' is a builtin with type '_Complex long double (_Complex long double)' _configtest.c:10:5: warning: incompatible redeclaration of library function 'ccoshl' [-Wincompatible-library-redeclaration] int ccoshl (void); ^ _configtest.c:10:5: note: 'ccoshl' is a builtin with type '_Complex long double (_Complex long double)' _configtest.c:11:5: warning: incompatible redeclaration of library function 'cexpl' [-Wincompatible-library-redeclaration] int cexpl (void); ^ _configtest.c:11:5: note: 'cexpl' is a builtin with type '_Complex long double (_Complex long double)' _configtest.c:12:5: warning: incompatible redeclaration of library function 'cimagl' [-Wincompatible-library-redeclaration] int cimagl (void); ^ _configtest.c:12:5: note: 'cimagl' is a builtin with type 'long double (_Complex long double)' _configtest.c:13:5: warning: incompatible redeclaration of library function 'clogl' [-Wincompatible-library-redeclaration] int clogl (void); ^ _configtest.c:13:5: note: 'clogl' is a builtin with type '_Complex long double (_Complex long double)' _configtest.c:14:5: warning: incompatible redeclaration of library function 'conjl' [-Wincompatible-library-redeclaration] int conjl (void); ^ _configtest.c:14:5: note: 'conjl' is a builtin with type '_Complex long double (_Complex long double)' _configtest.c:15:5: warning: incompatible redeclaration of library function 'cpowl' [-Wincompatible-library-redeclaration] int cpowl (void); ^ _configtest.c:15:5: note: 'cpowl' is a builtin with type '_Complex long double (_Complex long double, _Complex long double)' _configtest.c:16:5: warning: incompatible redeclaration of library function 'cprojl' [-Wincompatible-library-redeclaration] int cprojl (void); ^ _configtest.c:16:5: note: 'cprojl' is a builtin with type '_Complex long double (_Complex long double)' _configtest.c:17:5: warning: incompatible redeclaration of library function 'creall' [-Wincompatible-library-redeclaration] int creall (void); ^ _configtest.c:17:5: note: 'creall' is a builtin with type 'long double (_Complex long double)' _configtest.c:18:5: warning: incompatible redeclaration of library function 'csinl' [-Wincompatible-library-redeclaration] int csinl (void); ^ _configtest.c:18:5: note: 'csinl' is a builtin with type '_Complex long double (_Complex long double)' _configtest.c:19:5: warning: incompatible redeclaration of library function 'csinhl' [-Wincompatible-library-redeclaration] int csinhl (void); ^ _configtest.c:19:5: note: 'csinhl' is a builtin with type '_Complex long double (_Complex long double)' _configtest.c:20:5: warning: incompatible redeclaration of library function 'csqrtl' [-Wincompatible-library-redeclaration] int csqrtl (void); ^ _configtest.c:20:5: note: 'csqrtl' is a builtin with type '_Complex long double (_Complex long double)' _configtest.c:21:5: warning: incompatible redeclaration of library function 'ctanl' [-Wincompatible-library-redeclaration] int ctanl (void); ^ _configtest.c:21:5: note: 'ctanl' is a builtin with type '_Complex long double (_Complex long double)' _configtest.c:22:5: warning: incompatible redeclaration of library function 'ctanhl' [-Wincompatible-library-redeclaration] int ctanhl (void); ^ _configtest.c:22:5: note: 'ctanhl' is a builtin with type '_Complex long double (_Complex long double)' 22 warnings generated. clang _configtest.o -o _configtest success! removing: _configtest.c _configtest.o _configtest.o.d _configtest C compiler: clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers compile options: '-Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/usr/local/include -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/sqlite/include -I/Users/destiny/Downloads/env/include -I/usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9 -c' clang: _configtest.c _configtest.c:2:12: warning: unused function 'static_func' [-Wunused-function] static int static_func (char * restrict a) ^ 1 warning generated. success! removing: _configtest.c _configtest.o _configtest.o.d C compiler: clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers compile options: '-Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/usr/local/include -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/sqlite/include -I/Users/destiny/Downloads/env/include -I/usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9 -c' clang: _configtest.c _configtest.c:3:19: warning: unused function 'static_func' [-Wunused-function] static inline int static_func (void) ^ 1 warning generated. success! removing: _configtest.c _configtest.o _configtest.o.d C compiler: clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers compile options: '-Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/usr/local/include -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/sqlite/include -I/Users/destiny/Downloads/env/include -I/usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9 -c' clang: _configtest.c removing: _configtest.c _configtest.o _configtest.o.d File: build/src.macosx-10.15-x86_64-3.9/numpy/core/include/numpy/config.h #define SIZEOF_PY_INTPTR_T 8 #define SIZEOF_OFF_T 8 #define SIZEOF_PY_LONG_LONG 8 #define MATHLIB #define HAVE_SIN 1 #define HAVE_COS 1 #define HAVE_TAN 1 #define HAVE_SINH 1 #define HAVE_COSH 1 #define HAVE_TANH 1 #define HAVE_FABS 1 #define HAVE_FLOOR 1 #define HAVE_CEIL 1 #define HAVE_SQRT 1 #define HAVE_LOG10 1 #define HAVE_LOG 1 #define HAVE_EXP 1 #define HAVE_ASIN 1 #define HAVE_ACOS 1 #define HAVE_ATAN 1 #define HAVE_FMOD 1 #define HAVE_MODF 1 #define HAVE_FREXP 1 #define HAVE_LDEXP 1 #define HAVE_RINT 1 #define HAVE_TRUNC 1 #define HAVE_EXP2 1 #define HAVE_LOG2 1 #define HAVE_ATAN2 1 #define HAVE_POW 1 #define HAVE_NEXTAFTER 1 #define HAVE_STRTOLL 1 #define HAVE_STRTOULL 1 #define HAVE_CBRT 1 #define HAVE_STRTOLD_L 1 #define HAVE_BACKTRACE 1 #define HAVE_MADVISE 1 #define HAVE_XMMINTRIN_H 1 #define HAVE_EMMINTRIN_H 1 #define HAVE_XLOCALE_H 1 #define HAVE_DLFCN_H 1 #define HAVE_SYS_MMAN_H 1 #define HAVE___BUILTIN_ISNAN 1 #define HAVE___BUILTIN_ISINF 1 #define HAVE___BUILTIN_ISFINITE 1 #define HAVE___BUILTIN_BSWAP32 1 #define HAVE___BUILTIN_BSWAP64 1 #define HAVE___BUILTIN_EXPECT 1 #define HAVE___BUILTIN_MUL_OVERFLOW 1 #define HAVE___BUILTIN_CPU_SUPPORTS 1 #define HAVE__M_FROM_INT64 1 #define HAVE__MM_LOAD_PS 1 #define HAVE__MM_PREFETCH 1 #define HAVE__MM_LOAD_PD 1 #define HAVE___BUILTIN_PREFETCH 1 #define HAVE_LINK_AVX 1 #define HAVE_LINK_AVX2 1 #define HAVE_XGETBV 1 #define HAVE_ATTRIBUTE_NONNULL 1 #define HAVE_ATTRIBUTE_TARGET_AVX 1 #define HAVE_ATTRIBUTE_TARGET_AVX2 1 #define HAVE___THREAD 1 #define HAVE_SINF 1 #define HAVE_COSF 1 #define HAVE_TANF 1 #define HAVE_SINHF 1 #define HAVE_COSHF 1 #define HAVE_TANHF 1 #define HAVE_FABSF 1 #define HAVE_FLOORF 1 #define HAVE_CEILF 1 #define HAVE_RINTF 1 #define HAVE_TRUNCF 1 #define HAVE_SQRTF 1 #define HAVE_LOG10F 1 #define HAVE_LOGF 1 #define HAVE_LOG1PF 1 #define HAVE_EXPF 1 #define HAVE_EXPM1F 1 #define HAVE_ASINF 1 #define HAVE_ACOSF 1 #define HAVE_ATANF 1 #define HAVE_ASINHF 1 #define HAVE_ACOSHF 1 #define HAVE_ATANHF 1 #define HAVE_HYPOTF 1 #define HAVE_ATAN2F 1 #define HAVE_POWF 1 #define HAVE_FMODF 1 #define HAVE_MODFF 1 #define HAVE_FREXPF 1 #define HAVE_LDEXPF 1 #define HAVE_EXP2F 1 #define HAVE_LOG2F 1 #define HAVE_COPYSIGNF 1 #define HAVE_NEXTAFTERF 1 #define HAVE_CBRTF 1 #define HAVE_SINL 1 #define HAVE_COSL 1 #define HAVE_TANL 1 #define HAVE_SINHL 1 #define HAVE_COSHL 1 #define HAVE_TANHL 1 #define HAVE_FABSL 1 #define HAVE_FLOORL 1 #define HAVE_CEILL 1 #define HAVE_RINTL 1 #define HAVE_TRUNCL 1 #define HAVE_SQRTL 1 #define HAVE_LOG10L 1 #define HAVE_LOGL 1 #define HAVE_LOG1PL 1 #define HAVE_EXPL 1 #define HAVE_EXPM1L 1 #define HAVE_ASINL 1 #define HAVE_ACOSL 1 #define HAVE_ATANL 1 #define HAVE_ASINHL 1 #define HAVE_ACOSHL 1 #define HAVE_ATANHL 1 #define HAVE_HYPOTL 1 #define HAVE_ATAN2L 1 #define HAVE_POWL 1 #define HAVE_FMODL 1 #define HAVE_MODFL 1 #define HAVE_FREXPL 1 #define HAVE_LDEXPL 1 #define HAVE_EXP2L 1 #define HAVE_LOG2L 1 #define HAVE_COPYSIGNL 1 #define HAVE_NEXTAFTERL 1 #define HAVE_CBRTL 1 #define HAVE_DECL_SIGNBIT #define HAVE_COMPLEX_H 1 #define HAVE_CABS 1 #define HAVE_CACOS 1 #define HAVE_CACOSH 1 #define HAVE_CARG 1 #define HAVE_CASIN 1 #define HAVE_CASINH 1 #define HAVE_CATAN 1 #define HAVE_CATANH 1 #define HAVE_CCOS 1 #define HAVE_CCOSH 1 #define HAVE_CEXP 1 #define HAVE_CIMAG 1 #define HAVE_CLOG 1 #define HAVE_CONJ 1 #define HAVE_CPOW 1 #define HAVE_CPROJ 1 #define HAVE_CREAL 1 #define HAVE_CSIN 1 #define HAVE_CSINH 1 #define HAVE_CSQRT 1 #define HAVE_CTAN 1 #define HAVE_CTANH 1 #define HAVE_CABSF 1 #define HAVE_CACOSF 1 #define HAVE_CACOSHF 1 #define HAVE_CARGF 1 #define HAVE_CASINF 1 #define HAVE_CASINHF 1 #define HAVE_CATANF 1 #define HAVE_CATANHF 1 #define HAVE_CCOSF 1 #define HAVE_CCOSHF 1 #define HAVE_CEXPF 1 #define HAVE_CIMAGF 1 #define HAVE_CLOGF 1 #define HAVE_CONJF 1 #define HAVE_CPOWF 1 #define HAVE_CPROJF 1 #define HAVE_CREALF 1 #define HAVE_CSINF 1 #define HAVE_CSINHF 1 #define HAVE_CSQRTF 1 #define HAVE_CTANF 1 #define HAVE_CTANHF 1 #define HAVE_CABSL 1 #define HAVE_CACOSL 1 #define HAVE_CACOSHL 1 #define HAVE_CARGL 1 #define HAVE_CASINL 1 #define HAVE_CASINHL 1 #define HAVE_CATANL 1 #define HAVE_CATANHL 1 #define HAVE_CCOSL 1 #define HAVE_CCOSHL 1 #define HAVE_CEXPL 1 #define HAVE_CIMAGL 1 #define HAVE_CLOGL 1 #define HAVE_CONJL 1 #define HAVE_CPOWL 1 #define HAVE_CPROJL 1 #define HAVE_CREALL 1 #define HAVE_CSINL 1 #define HAVE_CSINHL 1 #define HAVE_CSQRTL 1 #define HAVE_CTANL 1 #define HAVE_CTANHL 1 #define NPY_RESTRICT restrict #define NPY_RELAXED_STRIDES_CHECKING 1 #define HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE 1 #define NPY_PY3K 1 #ifndef __cplusplus /* #undef inline */ #endif #ifndef _NPY_NPY_CONFIG_H_ #error config.h should never be included directly, include npy_config.h instead #endif EOF adding 'build/src.macosx-10.15-x86_64-3.9/numpy/core/include/numpy/config.h' to sources. Generating build/src.macosx-10.15-x86_64-3.9/numpy/core/include/numpy/_numpyconfig.h C compiler: clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers compile options: '-Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/usr/local/include -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/sqlite/include -I/Users/destiny/Downloads/env/include -I/usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9 -c' clang: _configtest.c _configtest.c:1:5: warning: incompatible redeclaration of library function 'exp' [-Wincompatible-library-redeclaration] int exp (void); ^ _configtest.c:1:5: note: 'exp' is a builtin with type 'double (double)' 1 warning generated. clang _configtest.o -o _configtest success! removing: _configtest.c _configtest.o _configtest.o.d _configtest C compiler: clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers compile options: '-Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/usr/local/include -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/sqlite/include -I/Users/destiny/Downloads/env/include -I/usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9 -c' clang: _configtest.c success! removing: _configtest.c _configtest.o _configtest.o.d C compiler: clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers compile options: '-Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/usr/local/include -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/sqlite/include -I/Users/destiny/Downloads/env/include -I/usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9 -c' clang: _configtest.c success! removing: _configtest.c _configtest.o _configtest.o.d File: build/src.macosx-10.15-x86_64-3.9/numpy/core/include/numpy/_numpyconfig.h #define NPY_SIZEOF_SHORT SIZEOF_SHORT #define NPY_SIZEOF_INT SIZEOF_INT #define NPY_SIZEOF_LONG SIZEOF_LONG #define NPY_SIZEOF_FLOAT 4 #define NPY_SIZEOF_COMPLEX_FLOAT 8 #define NPY_SIZEOF_DOUBLE 8 #define NPY_SIZEOF_COMPLEX_DOUBLE 16 #define NPY_SIZEOF_LONGDOUBLE 16 #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32 #define NPY_SIZEOF_PY_INTPTR_T 8 #define NPY_SIZEOF_OFF_T 8 #define NPY_SIZEOF_PY_LONG_LONG 8 #define NPY_SIZEOF_LONGLONG 8 #define NPY_NO_SMP 0 #define NPY_HAVE_DECL_ISNAN #define NPY_HAVE_DECL_ISINF #define NPY_HAVE_DECL_ISFINITE #define NPY_HAVE_DECL_SIGNBIT #define NPY_USE_C99_COMPLEX 1 #define NPY_HAVE_COMPLEX_DOUBLE 1 #define NPY_HAVE_COMPLEX_FLOAT 1 #define NPY_HAVE_COMPLEX_LONG_DOUBLE 1 #define NPY_RELAXED_STRIDES_CHECKING 1 #define NPY_USE_C99_FORMATS 1 #define NPY_VISIBILITY_HIDDEN __attribute__((visibility("hidden"))) #define NPY_ABI_VERSION 0x01000009 #define NPY_API_VERSION 0x0000000D #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS 1 #endif EOF adding 'build/src.macosx-10.15-x86_64-3.9/numpy/core/include/numpy/_numpyconfig.h' to sources. executing numpy/core/code_generators/generate_numpy_api.py adding 'build/src.macosx-10.15-x86_64-3.9/numpy/core/include/numpy/__multiarray_api.h' to sources. numpy.core - nothing done with h_files = ['build/src.macosx-10.15-x86_64-3.9/numpy/core/include/numpy/config.h', 'build/src.macosx-10.15-x86_64-3.9/numpy/core/include/numpy/_numpyconfig.h', 'build/src.macosx-10.15-x86_64-3.9/numpy/core/include/numpy/__multiarray_api.h'] building extension "numpy.core._multiarray_tests" sources creating build/src.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray conv_template:> build/src.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/_multiarray_tests.c building extension "numpy.core._multiarray_umath" sources adding 'build/src.macosx-10.15-x86_64-3.9/numpy/core/include/numpy/config.h' to sources. adding 'build/src.macosx-10.15-x86_64-3.9/numpy/core/include/numpy/_numpyconfig.h' to sources. executing numpy/core/code_generators/generate_numpy_api.py adding 'build/src.macosx-10.15-x86_64-3.9/numpy/core/include/numpy/__multiarray_api.h' to sources. executing numpy/core/code_generators/generate_ufunc_api.py adding 'build/src.macosx-10.15-x86_64-3.9/numpy/core/include/numpy/__ufunc_api.h' to sources. conv_template:> build/src.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/arraytypes.c conv_template:> build/src.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/einsum.c conv_template:> build/src.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/lowlevel_strided_loops.c conv_template:> build/src.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/nditer_templ.c conv_template:> build/src.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/scalartypes.c creating build/src.macosx-10.15-x86_64-3.9/numpy/core/src/umath conv_template:> build/src.macosx-10.15-x86_64-3.9/numpy/core/src/umath/funcs.inc adding 'build/src.macosx-10.15-x86_64-3.9/numpy/core/src/umath' to include_dirs. conv_template:> build/src.macosx-10.15-x86_64-3.9/numpy/core/src/umath/simd.inc conv_template:> build/src.macosx-10.15-x86_64-3.9/numpy/core/src/umath/loops.h conv_template:> build/src.macosx-10.15-x86_64-3.9/numpy/core/src/umath/loops.c conv_template:> build/src.macosx-10.15-x86_64-3.9/numpy/core/src/umath/matmul.h conv_template:> build/src.macosx-10.15-x86_64-3.9/numpy/core/src/umath/matmul.c conv_template:> build/src.macosx-10.15-x86_64-3.9/numpy/core/src/umath/scalarmath.c adding 'build/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath' to include_dirs. conv_template:> build/src.macosx-10.15-x86_64-3.9/numpy/core/src/common/templ_common.h adding 'build/src.macosx-10.15-x86_64-3.9/numpy/core/src/common' to include_dirs. numpy.core - nothing done with h_files = ['build/src.macosx-10.15-x86_64-3.9/numpy/core/src/umath/funcs.inc', 'build/src.macosx-10.15-x86_64-3.9/numpy/core/src/umath/simd.inc', 'build/src.macosx-10.15-x86_64-3.9/numpy/core/src/umath/loops.h', 'build/src.macosx-10.15-x86_64-3.9/numpy/core/src/umath/matmul.h', 'build/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath/npy_math_internal.h', 'build/src.macosx-10.15-x86_64-3.9/numpy/core/src/common/templ_common.h', 'build/src.macosx-10.15-x86_64-3.9/numpy/core/include/numpy/config.h', 'build/src.macosx-10.15-x86_64-3.9/numpy/core/include/numpy/_numpyconfig.h', 'build/src.macosx-10.15-x86_64-3.9/numpy/core/include/numpy/__multiarray_api.h', 'build/src.macosx-10.15-x86_64-3.9/numpy/core/include/numpy/__ufunc_api.h'] building extension "numpy.core._umath_tests" sources conv_template:> build/src.macosx-10.15-x86_64-3.9/numpy/core/src/umath/_umath_tests.c building extension "numpy.core._rational_tests" sources conv_template:> build/src.macosx-10.15-x86_64-3.9/numpy/core/src/umath/_rational_tests.c building extension "numpy.core._struct_ufunc_tests" sources conv_template:> build/src.macosx-10.15-x86_64-3.9/numpy/core/src/umath/_struct_ufunc_tests.c building extension "numpy.core._operand_flag_tests" sources conv_template:> build/src.macosx-10.15-x86_64-3.9/numpy/core/src/umath/_operand_flag_tests.c building extension "numpy.fft.fftpack_lite" sources building extension "numpy.linalg.lapack_lite" sources creating build/src.macosx-10.15-x86_64-3.9/numpy/linalg adding 'numpy/linalg/lapack_lite/python_xerbla.c' to sources. building extension "numpy.linalg._umath_linalg" sources adding 'numpy/linalg/lapack_lite/python_xerbla.c' to sources. conv_template:> build/src.macosx-10.15-x86_64-3.9/numpy/linalg/umath_linalg.c building extension "numpy.random.mtrand" sources creating build/src.macosx-10.15-x86_64-3.9/numpy/random building data_files sources build_src: building npy-pkg config files running build_py creating build/lib.macosx-10.15-x86_64-3.9 creating build/lib.macosx-10.15-x86_64-3.9/numpy copying numpy/conftest.py -> build/lib.macosx-10.15-x86_64-3.9/numpy copying numpy/version.py -> build/lib.macosx-10.15-x86_64-3.9/numpy copying numpy/_globals.py -> build/lib.macosx-10.15-x86_64-3.9/numpy copying numpy/__init__.py -> build/lib.macosx-10.15-x86_64-3.9/numpy copying numpy/dual.py -> build/lib.macosx-10.15-x86_64-3.9/numpy copying numpy/_distributor_init.py -> build/lib.macosx-10.15-x86_64-3.9/numpy copying numpy/setup.py -> build/lib.macosx-10.15-x86_64-3.9/numpy copying numpy/ctypeslib.py -> build/lib.macosx-10.15-x86_64-3.9/numpy copying numpy/matlib.py -> build/lib.macosx-10.15-x86_64-3.9/numpy copying numpy/_pytesttester.py -> build/lib.macosx-10.15-x86_64-3.9/numpy copying build/src.macosx-10.15-x86_64-3.9/numpy/__config__.py -> build/lib.macosx-10.15-x86_64-3.9/numpy creating build/lib.macosx-10.15-x86_64-3.9/numpy/compat copying numpy/compat/py3k.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/compat copying numpy/compat/__init__.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/compat copying numpy/compat/setup.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/compat copying numpy/compat/_inspect.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/compat creating build/lib.macosx-10.15-x86_64-3.9/numpy/core copying numpy/core/umath.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/core copying numpy/core/fromnumeric.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/core copying numpy/core/_dtype.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/core copying numpy/core/_add_newdocs.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/core copying numpy/core/_methods.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/core copying numpy/core/_internal.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/core copying numpy/core/_string_helpers.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/core copying numpy/core/multiarray.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/core copying numpy/core/records.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/core copying numpy/core/__init__.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/core copying numpy/core/setup_common.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/core copying numpy/core/_aliased_types.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/core copying numpy/core/memmap.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/core copying numpy/core/overrides.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/core copying numpy/core/getlimits.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/core copying numpy/core/_dtype_ctypes.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/core copying numpy/core/defchararray.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/core copying numpy/core/shape_base.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/core copying numpy/core/machar.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/core copying numpy/core/setup.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/core copying numpy/core/numeric.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/core copying numpy/core/function_base.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/core copying numpy/core/einsumfunc.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/core copying numpy/core/umath_tests.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/core copying numpy/core/info.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/core copying numpy/core/numerictypes.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/core copying numpy/core/_type_aliases.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/core copying numpy/core/cversions.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/core copying numpy/core/arrayprint.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/core copying numpy/core/code_generators/generate_numpy_api.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/core creating build/lib.macosx-10.15-x86_64-3.9/numpy/distutils copying numpy/distutils/unixccompiler.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils copying numpy/distutils/numpy_distribution.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils copying numpy/distutils/conv_template.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils copying numpy/distutils/cpuinfo.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils copying numpy/distutils/ccompiler.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils copying numpy/distutils/msvc9compiler.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils copying numpy/distutils/npy_pkg_config.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils copying numpy/distutils/compat.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils copying numpy/distutils/misc_util.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils copying numpy/distutils/log.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils copying numpy/distutils/line_endings.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils copying numpy/distutils/lib2def.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils copying numpy/distutils/pathccompiler.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils copying numpy/distutils/system_info.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils copying numpy/distutils/__init__.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils copying numpy/distutils/core.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils copying numpy/distutils/__version__.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils copying numpy/distutils/exec_command.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils copying numpy/distutils/from_template.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils copying numpy/distutils/mingw32ccompiler.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils copying numpy/distutils/setup.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils copying numpy/distutils/extension.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils copying numpy/distutils/msvccompiler.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils copying numpy/distutils/intelccompiler.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils copying numpy/distutils/info.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils copying build/src.macosx-10.15-x86_64-3.9/numpy/distutils/__config__.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils creating build/lib.macosx-10.15-x86_64-3.9/numpy/distutils/command copying numpy/distutils/command/build.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils/command copying numpy/distutils/command/config_compiler.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils/command copying numpy/distutils/command/build_ext.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils/command copying numpy/distutils/command/config.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils/command copying numpy/distutils/command/install_headers.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils/command copying numpy/distutils/command/build_py.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils/command copying numpy/distutils/command/build_src.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils/command copying numpy/distutils/command/__init__.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils/command copying numpy/distutils/command/sdist.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils/command copying numpy/distutils/command/build_scripts.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils/command copying numpy/distutils/command/bdist_rpm.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils/command copying numpy/distutils/command/install_clib.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils/command copying numpy/distutils/command/build_clib.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils/command copying numpy/distutils/command/autodist.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils/command copying numpy/distutils/command/egg_info.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils/command copying numpy/distutils/command/install.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils/command copying numpy/distutils/command/develop.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils/command copying numpy/distutils/command/install_data.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils/command creating build/lib.macosx-10.15-x86_64-3.9/numpy/distutils/fcompiler copying numpy/distutils/fcompiler/gnu.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils/fcompiler copying numpy/distutils/fcompiler/compaq.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils/fcompiler copying numpy/distutils/fcompiler/intel.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils/fcompiler copying numpy/distutils/fcompiler/none.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils/fcompiler copying numpy/distutils/fcompiler/nag.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils/fcompiler copying numpy/distutils/fcompiler/pg.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils/fcompiler copying numpy/distutils/fcompiler/ibm.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils/fcompiler copying numpy/distutils/fcompiler/sun.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils/fcompiler copying numpy/distutils/fcompiler/lahey.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils/fcompiler copying numpy/distutils/fcompiler/__init__.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils/fcompiler copying numpy/distutils/fcompiler/g95.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils/fcompiler copying numpy/distutils/fcompiler/mips.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils/fcompiler copying numpy/distutils/fcompiler/hpux.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils/fcompiler copying numpy/distutils/fcompiler/environment.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils/fcompiler copying numpy/distutils/fcompiler/pathf95.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils/fcompiler copying numpy/distutils/fcompiler/absoft.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils/fcompiler copying numpy/distutils/fcompiler/vast.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/distutils/fcompiler creating build/lib.macosx-10.15-x86_64-3.9/numpy/doc copying numpy/doc/misc.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/doc copying numpy/doc/internals.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/doc copying numpy/doc/creation.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/doc copying numpy/doc/constants.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/doc copying numpy/doc/ufuncs.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/doc copying numpy/doc/__init__.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/doc copying numpy/doc/broadcasting.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/doc copying numpy/doc/basics.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/doc copying numpy/doc/subclassing.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/doc copying numpy/doc/indexing.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/doc copying numpy/doc/byteswapping.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/doc copying numpy/doc/structured_arrays.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/doc copying numpy/doc/glossary.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/doc creating build/lib.macosx-10.15-x86_64-3.9/numpy/f2py copying numpy/f2py/cfuncs.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/f2py copying numpy/f2py/common_rules.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/f2py copying numpy/f2py/crackfortran.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/f2py copying numpy/f2py/cb_rules.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/f2py copying numpy/f2py/__init__.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/f2py copying numpy/f2py/rules.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/f2py copying numpy/f2py/f2py2e.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/f2py copying numpy/f2py/func2subr.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/f2py copying numpy/f2py/__version__.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/f2py copying numpy/f2py/diagnose.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/f2py copying numpy/f2py/setup.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/f2py copying numpy/f2py/capi_maps.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/f2py copying numpy/f2py/f90mod_rules.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/f2py copying numpy/f2py/f2py_testing.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/f2py copying numpy/f2py/use_rules.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/f2py copying numpy/f2py/info.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/f2py copying numpy/f2py/auxfuncs.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/f2py copying numpy/f2py/__main__.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/f2py creating build/lib.macosx-10.15-x86_64-3.9/numpy/fft copying numpy/fft/__init__.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/fft copying numpy/fft/setup.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/fft copying numpy/fft/helper.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/fft copying numpy/fft/fftpack.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/fft copying numpy/fft/info.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/fft creating build/lib.macosx-10.15-x86_64-3.9/numpy/lib copying numpy/lib/_iotools.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/lib copying numpy/lib/mixins.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/lib copying numpy/lib/nanfunctions.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/lib copying numpy/lib/recfunctions.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/lib copying numpy/lib/histograms.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/lib copying numpy/lib/scimath.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/lib copying numpy/lib/_version.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/lib copying numpy/lib/user_array.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/lib copying numpy/lib/__init__.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/lib copying numpy/lib/format.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/lib copying numpy/lib/twodim_base.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/lib copying numpy/lib/financial.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/lib copying numpy/lib/index_tricks.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/lib copying numpy/lib/npyio.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/lib copying numpy/lib/shape_base.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/lib copying numpy/lib/setup.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/lib copying numpy/lib/stride_tricks.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/lib copying numpy/lib/utils.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/lib copying numpy/lib/arrayterator.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/lib copying numpy/lib/function_base.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/lib copying numpy/lib/arraysetops.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/lib copying numpy/lib/arraypad.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/lib copying numpy/lib/type_check.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/lib copying numpy/lib/info.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/lib copying numpy/lib/polynomial.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/lib copying numpy/lib/_datasource.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/lib copying numpy/lib/ufunclike.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/lib creating build/lib.macosx-10.15-x86_64-3.9/numpy/linalg copying numpy/linalg/__init__.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/linalg copying numpy/linalg/setup.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/linalg copying numpy/linalg/linalg.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/linalg copying numpy/linalg/info.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/linalg creating build/lib.macosx-10.15-x86_64-3.9/numpy/ma copying numpy/ma/extras.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/ma copying numpy/ma/version.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/ma copying numpy/ma/testutils.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/ma copying numpy/ma/__init__.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/ma copying numpy/ma/core.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/ma copying numpy/ma/bench.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/ma copying numpy/ma/setup.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/ma copying numpy/ma/timer_comparison.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/ma copying numpy/ma/mrecords.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/ma creating build/lib.macosx-10.15-x86_64-3.9/numpy/matrixlib copying numpy/matrixlib/__init__.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/matrixlib copying numpy/matrixlib/setup.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/matrixlib copying numpy/matrixlib/defmatrix.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/matrixlib creating build/lib.macosx-10.15-x86_64-3.9/numpy/polynomial copying numpy/polynomial/laguerre.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/polynomial copying numpy/polynomial/_polybase.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/polynomial copying numpy/polynomial/polyutils.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/polynomial copying numpy/polynomial/__init__.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/polynomial copying numpy/polynomial/setup.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/polynomial copying numpy/polynomial/hermite_e.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/polynomial copying numpy/polynomial/chebyshev.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/polynomial copying numpy/polynomial/polynomial.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/polynomial copying numpy/polynomial/legendre.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/polynomial copying numpy/polynomial/hermite.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/polynomial creating build/lib.macosx-10.15-x86_64-3.9/numpy/random copying numpy/random/__init__.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/random copying numpy/random/setup.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/random copying numpy/random/info.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/random creating build/lib.macosx-10.15-x86_64-3.9/numpy/testing copying numpy/testing/nosetester.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/testing copying numpy/testing/__init__.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/testing copying numpy/testing/noseclasses.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/testing copying numpy/testing/setup.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/testing copying numpy/testing/utils.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/testing copying numpy/testing/print_coercion_tables.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/testing copying numpy/testing/decorators.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/testing creating build/lib.macosx-10.15-x86_64-3.9/numpy/testing/_private copying numpy/testing/_private/nosetester.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/testing/_private copying numpy/testing/_private/__init__.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/testing/_private copying numpy/testing/_private/noseclasses.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/testing/_private copying numpy/testing/_private/utils.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/testing/_private copying numpy/testing/_private/parameterized.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/testing/_private copying numpy/testing/_private/decorators.py -> build/lib.macosx-10.15-x86_64-3.9/numpy/testing/_private running build_clib customize UnixCCompiler customize UnixCCompiler using build_clib building 'npymath' library compiling C sources C compiler: clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers creating build/temp.macosx-10.15-x86_64-3.9 creating build/temp.macosx-10.15-x86_64-3.9/numpy creating build/temp.macosx-10.15-x86_64-3.9/numpy/core creating build/temp.macosx-10.15-x86_64-3.9/numpy/core/src creating build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/npymath creating build/temp.macosx-10.15-x86_64-3.9/build creating build/temp.macosx-10.15-x86_64-3.9/build/src.macosx-10.15-x86_64-3.9 creating build/temp.macosx-10.15-x86_64-3.9/build/src.macosx-10.15-x86_64-3.9/numpy creating build/temp.macosx-10.15-x86_64-3.9/build/src.macosx-10.15-x86_64-3.9/numpy/core creating build/temp.macosx-10.15-x86_64-3.9/build/src.macosx-10.15-x86_64-3.9/numpy/core/src creating build/temp.macosx-10.15-x86_64-3.9/build/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath compile options: '-Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath -Inumpy/core/include -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/include/numpy -Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/usr/local/include -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/sqlite/include -I/Users/destiny/Downloads/env/include -I/usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9 -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/common -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/common -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath -c' clang: numpy/core/src/npymath/npy_math.c clang: build/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath/npy_math_complex.c clang: build/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath/ieee754.c clang: numpy/core/src/npymath/halffloat.c numpy/core/src/npymath/npy_math_complex.c.src:48:33: warning: unused variable 'tiny' [-Wunused-const-variable] static const volatile npy_float tiny = 3.9443045e-31f; ^ numpy/core/src/npymath/npy_math_complex.c.src:67:25: warning: unused variable 'c_halff' [-Wunused-const-variable] static const npy_cfloat c_halff = {0.5F, 0.0}; ^ numpy/core/src/npymath/npy_math_complex.c.src:68:25: warning: unused variable 'c_if' [-Wunused-const-variable] static const npy_cfloat c_if = {0.0, 1.0F}; ^ numpy/core/src/npymath/npy_math_complex.c.src:69:25: warning: unused variable 'c_ihalff' [-Wunused-const-variable] static const npy_cfloat c_ihalff = {0.0, 0.5F}; ^ numpy/core/src/npymath/npy_math_complex.c.src:79:1: warning: unused function 'caddf' [-Wunused-function] caddf(npy_cfloat a, npy_cfloat b) ^ numpy/core/src/npymath/npy_math_complex.c.src:87:1: warning: unused function 'csubf' [-Wunused-function] csubf(npy_cfloat a, npy_cfloat b) ^ numpy/core/src/npymath/npy_math_complex.c.src:137:1: warning: unused function 'cnegf' [-Wunused-function] cnegf(npy_cfloat a) ^ numpy/core/src/npymath/npy_math_complex.c.src:144:1: warning: unused function 'cmulif' [-Wunused-function] cmulif(npy_cfloat a) ^ numpy/core/src/npymath/npy_math_complex.c.src:67:26: warning: unused variable 'c_half' [-Wunused-const-variable] static const npy_cdouble c_half = {0.5, 0.0}; ^ numpy/core/src/npymath/npy_math_complex.c.src:68:26: warning: unused variable 'c_i' [-Wunused-const-variable] static const npy_cdouble c_i = {0.0, 1.0}; ^ numpy/core/src/npymath/npy_math_complex.c.src:69:26: warning: unused variable 'c_ihalf' [-Wunused-const-variable] static const npy_cdouble c_ihalf = {0.0, 0.5}; ^ numpy/core/src/npymath/npy_math_complex.c.src:79:1: warning: unused function 'cadd' [-Wunused-function] cadd(npy_cdouble a, npy_cdouble b) ^ numpy/core/src/npymath/npy_math_complex.c.src:87:1: warning: unused function 'csub' [-Wunused-function] csub(npy_cdouble a, npy_cdouble b) ^ numpy/core/src/npymath/npy_math_complex.c.src:137:1: warning: unused function 'cneg' [-Wunused-function] cneg(npy_cdouble a) ^ numpy/core/src/npymath/npy_math_complex.c.src:144:1: warning: unused function 'cmuli' [-Wunused-function] cmuli(npy_cdouble a) ^ numpy/core/src/npymath/npy_math_complex.c.src:67:30: warning: unused variable 'c_halfl' [-Wunused-const-variable] static const npy_clongdouble c_halfl = {0.5L, 0.0}; ^ numpy/core/src/npymath/npy_math_complex.c.src:68:30: warning: unused variable 'c_il' [-Wunused-const-variable] static const npy_clongdouble c_il = {0.0, 1.0L}; ^ numpy/core/src/npymath/npy_math_complex.c.src:69:30: warning: unused variable 'c_ihalfl' [-Wunused-const-variable] static const npy_clongdouble c_ihalfl = {0.0, 0.5L}; ^ numpy/core/src/npymath/npy_math_complex.c.src:79:1: warning: unused function 'caddl' [-Wunused-function] caddl(npy_clongdouble a, npy_clongdouble b) ^ numpy/core/src/npymath/npy_math_complex.c.src:87:1: warning: unused function 'csubl' [-Wunused-function] csubl(npy_clongdouble a, npy_clongdouble b) ^ numpy/core/src/npymath/npy_math_complex.c.src:137:1: warning: unused function 'cnegl' [-Wunused-function] cnegl(npy_clongdouble a) ^ numpy/core/src/npymath/npy_math_complex.c.src:144:1: warning: unused function 'cmulil' [-Wunused-function] cmulil(npy_clongdouble a) ^ 22 warnings generated. ar: adding 4 object files to build/temp.macosx-10.15-x86_64-3.9/libnpymath.a ranlib:@ build/temp.macosx-10.15-x86_64-3.9/libnpymath.a building 'npysort' library compiling C sources C compiler: clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers creating build/temp.macosx-10.15-x86_64-3.9/build/src.macosx-10.15-x86_64-3.9/numpy/core/src/npysort compile options: '-Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/common -Inumpy/core/include -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/include/numpy -Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/usr/local/include -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/sqlite/include -I/Users/destiny/Downloads/env/include -I/usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9 -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/common -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/common -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath -c' clang: build/src.macosx-10.15-x86_64-3.9/numpy/core/src/npysort/quicksort.c clang: build/src.macosx-10.15-x86_64-3.9/numpy/core/src/npysort/mergesort.c clang: build/src.macosx-10.15-x86_64-3.9/numpy/core/src/npysort/heapsort.c clang: build/src.macosx-10.15-x86_64-3.9/numpy/core/src/npysort/selection.c clang: build/src.macosx-10.15-x86_64-3.9/numpy/core/src/npysort/binsearch.c numpy/core/src/npysort/selection.c.src:328:9: warning: code will never be executed [-Wunreachable-code] npy_intp k; ^~~~~~~~~~~ numpy/core/src/npysort/selection.c.src:326:14: note: silence by adding parentheses to mark code as explicitly dead else if (0 && kth == num - 1) { ^ /* DISABLES CODE */ ( ) numpy/core/src/npysort/selection.c.src:328:9: warning: code will never be executed [-Wunreachable-code] npy_intp k; ^~~~~~~~~~~ numpy/core/src/npysort/selection.c.src:326:14: note: silence by adding parentheses to mark code as explicitly dead else if (0 && kth == num - 1) { ^ /* DISABLES CODE */ ( ) numpy/core/src/npysort/selection.c.src:328:9: warning: code will never be executed [-Wunreachable-code] npy_intp k; ^~~~~~~~~~~ numpy/core/src/npysort/selection.c.src:326:14: note: silence by adding parentheses to mark code as explicitly dead else if (0 && kth == num - 1) { ^ /* DISABLES CODE */ ( ) numpy/core/src/npysort/selection.c.src:328:9: warning: code will never be executed [-Wunreachable-code] npy_intp k; ^~~~~~~~~~~ numpy/core/src/npysort/selection.c.src:326:14: note: silence by adding parentheses to mark code as explicitly dead else if (0 && kth == num - 1) { ^ /* DISABLES CODE */ ( ) numpy/core/src/npysort/selection.c.src:328:9: warning: code will never be executed [-Wunreachable-code] npy_intp k; ^~~~~~~~~~~ numpy/core/src/npysort/selection.c.src:326:14: note: silence by adding parentheses to mark code as explicitly dead else if (0 && kth == num - 1) { ^ /* DISABLES CODE */ ( ) numpy/core/src/npysort/selection.c.src:328:9: warning: code will never be executed [-Wunreachable-code] npy_intp k; ^~~~~~~~~~~ numpy/core/src/npysort/selection.c.src:326:14: note: silence by adding parentheses to mark code as explicitly dead else if (0 && kth == num - 1) { ^ /* DISABLES CODE */ ( ) numpy/core/src/npysort/selection.c.src:328:9: warning: code will never be executed [-Wunreachable-code] npy_intp k; ^~~~~~~~~~~ numpy/core/src/npysort/selection.c.src:326:14: note: silence by adding parentheses to mark code as explicitly dead else if (0 && kth == num - 1) { ^ /* DISABLES CODE */ ( ) numpy/core/src/npysort/selection.c.src:328:9: warning: code will never be executed [-Wunreachable-code] npy_intp k; ^~~~~~~~~~~ numpy/core/src/npysort/selection.c.src:326:14: note: silence by adding parentheses to mark code as explicitly dead else if (0 && kth == num - 1) { ^ /* DISABLES CODE */ ( ) numpy/core/src/npysort/selection.c.src:328:9: warning: code will never be executed [-Wunreachable-code] npy_intp k; ^~~~~~~~~~~ numpy/core/src/npysort/selection.c.src:326:14: note: silence by adding parentheses to mark code as explicitly dead else if (0 && kth == num - 1) { ^ /* DISABLES CODE */ ( ) numpy/core/src/npysort/selection.c.src:328:9: warning: code will never be executed [-Wunreachable-code] npy_intp k; ^~~~~~~~~~~ numpy/core/src/npysort/selection.c.src:326:14: note: silence by adding parentheses to mark code as explicitly dead else if (0 && kth == num - 1) { ^ /* DISABLES CODE */ ( ) numpy/core/src/npysort/selection.c.src:328:9: warning: code will never be executed [-Wunreachable-code] npy_intp k; ^~~~~~~~~~~ numpy/core/src/npysort/selection.c.src:326:14: note: silence by adding parentheses to mark code as explicitly dead else if (0 && kth == num - 1) { ^ /* DISABLES CODE */ ( ) numpy/core/src/npysort/selection.c.src:328:9: warning: code will never be executed [-Wunreachable-code] npy_intp k; ^~~~~~~~~~~ numpy/core/src/npysort/selection.c.src:326:14: note: silence by adding parentheses to mark code as explicitly dead else if (0 && kth == num - 1) { ^ /* DISABLES CODE */ ( ) numpy/core/src/npysort/selection.c.src:328:9: warning: code will never be executed [-Wunreachable-code] npy_intp k; ^~~~~~~~~~~ numpy/core/src/npysort/selection.c.src:326:14: note: silence by adding parentheses to mark code as explicitly dead else if (0 && kth == num - 1) { ^ /* DISABLES CODE */ ( ) numpy/core/src/npysort/selection.c.src:328:9: warning: code will never be executed [-Wunreachable-code] npy_intp k; ^~~~~~~~~~~ numpy/core/src/npysort/selection.c.src:326:14: note: silence by adding parentheses to mark code as explicitly dead else if (0 && kth == num - 1) { ^ /* DISABLES CODE */ ( ) numpy/core/src/npysort/selection.c.src:328:9: warning: code will never be executed [-Wunreachable-code] npy_intp k; ^~~~~~~~~~~ numpy/core/src/npysort/selection.c.src:326:14: note: silence by adding parentheses to mark code as explicitly dead else if (0 && kth == num - 1) { ^ /* DISABLES CODE */ ( ) numpy/core/src/npysort/selection.c.src:328:9: warning: code will never be executed [-Wunreachable-code] npy_intp k; ^~~~~~~~~~~ numpy/core/src/npysort/selection.c.src:326:14: note: silence by adding parentheses to mark code as explicitly dead else if (0 && kth == num - 1) { ^ /* DISABLES CODE */ ( ) numpy/core/src/npysort/selection.c.src:328:9: warning: code will never be executed [-Wunreachable-code] npy_intp k; ^~~~~~~~~~~ numpy/core/src/npysort/selection.c.src:326:14: note: silence by adding parentheses to mark code as explicitly dead else if (0 && kth == num - 1) { ^ /* DISABLES CODE */ ( ) numpy/core/src/npysort/selection.c.src:328:9: warning: code will never be executed [-Wunreachable-code] npy_intp k; ^~~~~~~~~~~ numpy/core/src/npysort/selection.c.src:326:14: note: silence by adding parentheses to mark code as explicitly dead else if (0 && kth == num - 1) { ^ /* DISABLES CODE */ ( ) numpy/core/src/npysort/selection.c.src:328:9: warning: code will never be executed [-Wunreachable-code] npy_intp k; ^~~~~~~~~~~ numpy/core/src/npysort/selection.c.src:326:14: note: silence by adding parentheses to mark code as explicitly dead else if (0 && kth == num - 1) { ^ /* DISABLES CODE */ ( ) numpy/core/src/npysort/selection.c.src:328:9: warning: code will never be executed [-Wunreachable-code] npy_intp k; ^~~~~~~~~~~ numpy/core/src/npysort/selection.c.src:326:14: note: silence by adding parentheses to mark code as explicitly dead else if (0 && kth == num - 1) { ^ /* DISABLES CODE */ ( ) numpy/core/src/npysort/selection.c.src:328:9: warning: code will never be executed [-Wunreachable-code] npy_intp k; ^~~~~~~~~~~ numpy/core/src/npysort/selection.c.src:326:14: note: silence by adding parentheses to mark code as explicitly dead else if (0 && kth == num - 1) { ^ /* DISABLES CODE */ ( ) numpy/core/src/npysort/selection.c.src:328:9: warning: code will never be executed [-Wunreachable-code] npy_intp k; ^~~~~~~~~~~ numpy/core/src/npysort/selection.c.src:326:14: note: silence by adding parentheses to mark code as explicitly dead else if (0 && kth == num - 1) { ^ /* DISABLES CODE */ ( ) 22 warnings generated. ar: adding 5 object files to build/temp.macosx-10.15-x86_64-3.9/libnpysort.a ranlib:@ build/temp.macosx-10.15-x86_64-3.9/libnpysort.a running build_ext customize UnixCCompiler customize UnixCCompiler using build_ext building 'numpy.core._dummy' extension compiling C sources C compiler: clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers compile options: '-DNPY_INTERNAL_BUILD=1 -DHAVE_NPY_CONFIG_H=1 -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE=1 -D_LARGEFILE64_SOURCE=1 -Inumpy/core/include -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/include/numpy -Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/usr/local/include -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/sqlite/include -I/Users/destiny/Downloads/env/include -I/usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9 -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/common -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/common -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath -c' clang: numpy/core/src/dummymodule.c clang -bundle -undefined dynamic_lookup -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/dummymodule.o -L/usr/local/lib -L/usr/local/opt/openssl@1.1/lib -L/usr/local/opt/sqlite/lib -Lbuild/temp.macosx-10.15-x86_64-3.9 -o build/lib.macosx-10.15-x86_64-3.9/numpy/core/_dummy.cpython-39-darwin.so building 'numpy.core._multiarray_tests' extension compiling C sources C compiler: clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers creating build/temp.macosx-10.15-x86_64-3.9/build/src.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray creating build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/common compile options: '-DNPY_INTERNAL_BUILD=1 -DHAVE_NPY_CONFIG_H=1 -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE=1 -D_LARGEFILE64_SOURCE=1 -Inumpy/core/include -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/include/numpy -Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/usr/local/include -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/sqlite/include -I/Users/destiny/Downloads/env/include -I/usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9 -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/common -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/common -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath -c' clang: build/src.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/_multiarray_tests.c clang: numpy/core/src/common/mem_overlap.c clang -bundle -undefined dynamic_lookup -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk build/temp.macosx-10.15-x86_64-3.9/build/src.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/_multiarray_tests.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/common/mem_overlap.o -L/usr/local/lib -L/usr/local/opt/openssl@1.1/lib -L/usr/local/opt/sqlite/lib -Lbuild/temp.macosx-10.15-x86_64-3.9 -lnpymath -o build/lib.macosx-10.15-x86_64-3.9/numpy/core/_multiarray_tests.cpython-39-darwin.so building 'numpy.core._multiarray_umath' extension compiling C sources C compiler: clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers creating build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray creating build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/umath creating build/temp.macosx-10.15-x86_64-3.9/build/src.macosx-10.15-x86_64-3.9/numpy/core/src/umath creating build/temp.macosx-10.15-x86_64-3.9/private creating build/temp.macosx-10.15-x86_64-3.9/private/var creating build/temp.macosx-10.15-x86_64-3.9/private/var/folders creating build/temp.macosx-10.15-x86_64-3.9/private/var/folders/fz creating build/temp.macosx-10.15-x86_64-3.9/private/var/folders/fz/0j719tys48x7jlnjnwc69smr0000gn creating build/temp.macosx-10.15-x86_64-3.9/private/var/folders/fz/0j719tys48x7jlnjnwc69smr0000gn/T creating build/temp.macosx-10.15-x86_64-3.9/private/var/folders/fz/0j719tys48x7jlnjnwc69smr0000gn/T/pip-install-ufzck51l creating build/temp.macosx-10.15-x86_64-3.9/private/var/folders/fz/0j719tys48x7jlnjnwc69smr0000gn/T/pip-install-ufzck51l/numpy_b0e8a3953a1d4b46801f12bcea55536e creating build/temp.macosx-10.15-x86_64-3.9/private/var/folders/fz/0j719tys48x7jlnjnwc69smr0000gn/T/pip-install-ufzck51l/numpy_b0e8a3953a1d4b46801f12bcea55536e/numpy creating build/temp.macosx-10.15-x86_64-3.9/private/var/folders/fz/0j719tys48x7jlnjnwc69smr0000gn/T/pip-install-ufzck51l/numpy_b0e8a3953a1d4b46801f12bcea55536e/numpy/_build_utils creating build/temp.macosx-10.15-x86_64-3.9/private/var/folders/fz/0j719tys48x7jlnjnwc69smr0000gn/T/pip-install-ufzck51l/numpy_b0e8a3953a1d4b46801f12bcea55536e/numpy/_build_utils/src compile options: '-DNPY_INTERNAL_BUILD=1 -DHAVE_NPY_CONFIG_H=1 -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE=1 -D_LARGEFILE64_SOURCE=1 -DNO_ATLAS_INFO=3 -DHAVE_CBLAS -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/umath -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/common -Inumpy/core/include -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/include/numpy -Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/usr/local/include -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/sqlite/include -I/Users/destiny/Downloads/env/include -I/usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9 -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/common -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/common -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath -c' extra options: '-msse3 -I/System/Library/Frameworks/vecLib.framework/Headers' clang: numpy/core/src/multiarray/alloc.c clang: numpy/core/src/multiarray/calculation.cclang: numpy/core/src/multiarray/array_assign_scalar.c clang: numpy/core/src/multiarray/convert.c clang: numpy/core/src/multiarray/ctors.c clang: numpy/core/src/multiarray/datetime_busday.c clang: numpy/core/src/multiarray/dragon4.cclang: numpy/core/src/multiarray/flagsobject.c numpy/core/src/multiarray/ctors.c:2261:36: warning: '_PyUnicode_get_wstr_length' is deprecated [-Wdeprecated-declarations] if (!(PyUString_Check(name) && PyUString_GET_SIZE(name) == 0)) { ^ numpy/core/include/numpy/npy_3kcompat.h:110:28: note: expanded from macro 'PyUString_GET_SIZE' #define PyUString_GET_SIZE PyUnicode_GET_SIZE ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:261:7: note: expanded from macro 'PyUnicode_GET_SIZE' PyUnicode_WSTR_LENGTH(op) : \ ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:451:35: note: expanded from macro 'PyUnicode_WSTR_LENGTH' #define PyUnicode_WSTR_LENGTH(op) _PyUnicode_get_wstr_length((PyObject*)op) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:445:1: note: '_PyUnicode_get_wstr_length' has been explicitly marked deprecated here Py_DEPRECATED(3.3) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/ctors.c:2261:36: warning: 'PyUnicode_AsUnicode' is deprecated [-Wdeprecated-declarations] if (!(PyUString_Check(name) && PyUString_GET_SIZE(name) == 0)) { ^ numpy/core/include/numpy/npy_3kcompat.h:110:28: note: expanded from macro 'PyUString_GET_SIZE' #define PyUString_GET_SIZE PyUnicode_GET_SIZE ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:262:14: note: expanded from macro 'PyUnicode_GET_SIZE' ((void)PyUnicode_AsUnicode(_PyObject_CAST(op)),\ ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:580:1: note: 'PyUnicode_AsUnicode' has been explicitly marked deprecated here Py_DEPRECATED(3.3) PyAPI_FUNC(Py_UNICODE *) PyUnicode_AsUnicode( ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/ctors.c:2261:36: warning: '_PyUnicode_get_wstr_length' is deprecated [-Wdeprecated-declarations] if (!(PyUString_Check(name) && PyUString_GET_SIZE(name) == 0)) { ^ numpy/core/include/numpy/npy_3kcompat.h:110:28: note: expanded from macro 'PyUString_GET_SIZE' #define PyUString_GET_SIZE PyUnicode_GET_SIZE ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:264:8: note: expanded from macro 'PyUnicode_GET_SIZE' PyUnicode_WSTR_LENGTH(op))) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:451:35: note: expanded from macro 'PyUnicode_WSTR_LENGTH' #define PyUnicode_WSTR_LENGTH(op) _PyUnicode_get_wstr_length((PyObject*)op) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:445:1: note: '_PyUnicode_get_wstr_length' has been explicitly marked deprecated here Py_DEPRECATED(3.3) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ clang: numpy/core/src/multiarray/arrayobject.c clang: numpy/core/src/multiarray/array_assign_array.c clang: numpy/core/src/multiarray/convert_datatype.c clang: numpy/core/src/multiarray/getset.c clang: numpy/core/src/multiarray/datetime_busdaycal.c clang: numpy/core/src/multiarray/buffer.c clang: numpy/core/src/multiarray/compiled_base.c clang: numpy/core/src/multiarray/hashdescr.c clang: numpy/core/src/multiarray/descriptor.c numpy/core/src/multiarray/descriptor.c:453:13: warning: '_PyUnicode_get_wstr_length' is deprecated [-Wdeprecated-declarations] if (PyUString_GET_SIZE(name) == 0) { ^ numpy/core/include/numpy/npy_3kcompat.h:110:28: note: expanded from macro 'PyUString_GET_SIZE' #define PyUString_GET_SIZE PyUnicode_GET_SIZE ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:261:7: note: expanded from macro 'PyUnicode_GET_SIZE' PyUnicode_WSTR_LENGTH(op) : \ ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:451:35: note: expanded from macro 'PyUnicode_WSTR_LENGTH' #define PyUnicode_WSTR_LENGTH(op) _PyUnicode_get_wstr_length((PyObject*)op) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:445:1: note: '_PyUnicode_get_wstr_length' has been explicitly marked deprecated here Py_DEPRECATED(3.3) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/descriptor.c:453:13: warning: 'PyUnicode_AsUnicode' is deprecated [-Wdeprecated-declarations] if (PyUString_GET_SIZE(name) == 0) { ^ numpy/core/include/numpy/npy_3kcompat.h:110:28: note: expanded from macro 'PyUString_GET_SIZE' #define PyUString_GET_SIZE PyUnicode_GET_SIZE ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:262:14: note: expanded from macro 'PyUnicode_GET_SIZE' ((void)PyUnicode_AsUnicode(_PyObject_CAST(op)),\ ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:580:1: note: 'PyUnicode_AsUnicode' has been explicitly marked deprecated here Py_DEPRECATED(3.3) PyAPI_FUNC(Py_UNICODE *) PyUnicode_AsUnicode( ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/descriptor.c:453:13: warning: '_PyUnicode_get_wstr_length' is deprecated [-Wdeprecated-declarations] if (PyUString_GET_SIZE(name) == 0) { ^ numpy/core/include/numpy/npy_3kcompat.h:110:28: note: expanded from macro 'PyUString_GET_SIZE' #define PyUString_GET_SIZE PyUnicode_GET_SIZE ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:264:8: note: expanded from macro 'PyUnicode_GET_SIZE' PyUnicode_WSTR_LENGTH(op))) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:451:35: note: expanded from macro 'PyUnicode_WSTR_LENGTH' #define PyUnicode_WSTR_LENGTH(op) _PyUnicode_get_wstr_length((PyObject*)op) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:445:1: note: '_PyUnicode_get_wstr_length' has been explicitly marked deprecated here Py_DEPRECATED(3.3) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/descriptor.c:460:48: warning: '_PyUnicode_get_wstr_length' is deprecated [-Wdeprecated-declarations] else if (PyUString_Check(title) && PyUString_GET_SIZE(title) > 0) { ^ numpy/core/include/numpy/npy_3kcompat.h:110:28: note: expanded from macro 'PyUString_GET_SIZE' #define PyUString_GET_SIZE PyUnicode_GET_SIZE ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:261:7: note: expanded from macro 'PyUnicode_GET_SIZE' PyUnicode_WSTR_LENGTH(op) : \ ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:451:35: note: expanded from macro 'PyUnicode_WSTR_LENGTH' #define PyUnicode_WSTR_LENGTH(op) _PyUnicode_get_wstr_length((PyObject*)op) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:445:1: note: '_PyUnicode_get_wstr_length' has been explicitly marked deprecated here Py_DEPRECATED(3.3) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/descriptor.c:460:48: warning: 'PyUnicode_AsUnicode' is deprecated [-Wdeprecated-declarations] else if (PyUString_Check(title) && PyUString_GET_SIZE(title) > 0) { ^ numpy/core/include/numpy/npy_3kcompat.h:110:28: note: expanded from macro 'PyUString_GET_SIZE' #define PyUString_GET_SIZE PyUnicode_GET_SIZE ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:262:14: note: expanded from macro 'PyUnicode_GET_SIZE' ((void)PyUnicode_AsUnicode(_PyObject_CAST(op)),\ ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:580:1: note: 'PyUnicode_AsUnicode' has been explicitly marked deprecated here Py_DEPRECATED(3.3) PyAPI_FUNC(Py_UNICODE *) PyUnicode_AsUnicode( ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/descriptor.c:460:48: warning: '_PyUnicode_get_wstr_length' is deprecated [-Wdeprecated-declarations] else if (PyUString_Check(title) && PyUString_GET_SIZE(title) > 0) { ^ numpy/core/include/numpy/npy_3kcompat.h:110:28: note: expanded from macro 'PyUString_GET_SIZE' #define PyUString_GET_SIZE PyUnicode_GET_SIZE ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:264:8: note: expanded from macro 'PyUnicode_GET_SIZE' PyUnicode_WSTR_LENGTH(op))) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:451:35: note: expanded from macro 'PyUnicode_WSTR_LENGTH' #define PyUnicode_WSTR_LENGTH(op) _PyUnicode_get_wstr_length((PyObject*)op) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:445:1: note: '_PyUnicode_get_wstr_length' has been explicitly marked deprecated here Py_DEPRECATED(3.3) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ clang: numpy/core/src/multiarray/conversion_utils.c clang: numpy/core/src/multiarray/item_selection.c clang: numpy/core/src/multiarray/dtype_transfer.c clang: numpy/core/src/multiarray/mapping.c clang: build/src.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/arraytypes.c clang: build/src.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/nditer_templ.c 3 warnings generated. clang: numpy/core/src/multiarray/datetime.c numpy/core/src/multiarray/arraytypes.c.src:477:11: warning: 'PyUnicode_AsUnicode' is deprecated [-Wdeprecated-declarations] ptr = PyUnicode_AS_UNICODE(temp); ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:279:7: note: expanded from macro 'PyUnicode_AS_UNICODE' PyUnicode_AsUnicode(_PyObject_CAST(op))) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:580:1: note: 'PyUnicode_AsUnicode' has been explicitly marked deprecated here Py_DEPRECATED(3.3) PyAPI_FUNC(Py_UNICODE *) PyUnicode_AsUnicode( ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/arraytypes.c.src:482:15: warning: '_PyUnicode_get_wstr_length' is deprecated [-Wdeprecated-declarations] datalen = PyUnicode_GET_DATA_SIZE(temp); ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:268:6: note: expanded from macro 'PyUnicode_GET_DATA_SIZE' (PyUnicode_GET_SIZE(op) * Py_UNICODE_SIZE) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:261:7: note: expanded from macro 'PyUnicode_GET_SIZE' PyUnicode_WSTR_LENGTH(op) : \ ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:451:35: note: expanded from macro 'PyUnicode_WSTR_LENGTH' #define PyUnicode_WSTR_LENGTH(op) _PyUnicode_get_wstr_length((PyObject*)op) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:445:1: note: '_PyUnicode_get_wstr_length' has been explicitly marked deprecated here Py_DEPRECATED(3.3) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/arraytypes.c.src:482:15: warning: 'PyUnicode_AsUnicode' is deprecated [-Wdeprecated-declarations] datalen = PyUnicode_GET_DATA_SIZE(temp); ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:268:6: note: expanded from macro 'PyUnicode_GET_DATA_SIZE' (PyUnicode_GET_SIZE(op) * Py_UNICODE_SIZE) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:262:14: note: expanded from macro 'PyUnicode_GET_SIZE' ((void)PyUnicode_AsUnicode(_PyObject_CAST(op)),\ ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:580:1: note: 'PyUnicode_AsUnicode' has been explicitly marked deprecated here Py_DEPRECATED(3.3) PyAPI_FUNC(Py_UNICODE *) PyUnicode_AsUnicode( ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/arraytypes.c.src:482:15: warning: '_PyUnicode_get_wstr_length' is deprecated [-Wdeprecated-declarations] datalen = PyUnicode_GET_DATA_SIZE(temp); ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:268:6: note: expanded from macro 'PyUnicode_GET_DATA_SIZE' (PyUnicode_GET_SIZE(op) * Py_UNICODE_SIZE) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:264:8: note: expanded from macro 'PyUnicode_GET_SIZE' PyUnicode_WSTR_LENGTH(op))) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:451:35: note: expanded from macro 'PyUnicode_WSTR_LENGTH' #define PyUnicode_WSTR_LENGTH(op) _PyUnicode_get_wstr_length((PyObject*)op) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:445:1: note: '_PyUnicode_get_wstr_length' has been explicitly marked deprecated here Py_DEPRECATED(3.3) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ clang: numpy/core/src/multiarray/common.c numpy/core/src/multiarray/common.c:187:28: warning: '_PyUnicode_get_wstr_length' is deprecated [-Wdeprecated-declarations] itemsize = PyUnicode_GET_DATA_SIZE(temp); ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:268:6: note: expanded from macro 'PyUnicode_GET_DATA_SIZE' (PyUnicode_GET_SIZE(op) * Py_UNICODE_SIZE) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:261:7: note: expanded from macro 'PyUnicode_GET_SIZE' PyUnicode_WSTR_LENGTH(op) : \ ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:451:35: note: expanded from macro 'PyUnicode_WSTR_LENGTH' #define PyUnicode_WSTR_LENGTH(op) _PyUnicode_get_wstr_length((PyObject*)op) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:445:1: note: '_PyUnicode_get_wstr_length' has been explicitly marked deprecated here Py_DEPRECATED(3.3) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/common.c:187:28: warning: 'PyUnicode_AsUnicode' is deprecated [-Wdeprecated-declarations] itemsize = PyUnicode_GET_DATA_SIZE(temp); ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:268:6: note: expanded from macro 'PyUnicode_GET_DATA_SIZE' (PyUnicode_GET_SIZE(op) * Py_UNICODE_SIZE) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:262:14: note: expanded from macro 'PyUnicode_GET_SIZE' ((void)PyUnicode_AsUnicode(_PyObject_CAST(op)),\ ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:580:1: note: 'PyUnicode_AsUnicode' has been explicitly marked deprecated here Py_DEPRECATED(3.3) PyAPI_FUNC(Py_UNICODE *) PyUnicode_AsUnicode( ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/common.c:187:28: warning: '_PyUnicode_get_wstr_length' is deprecated [-Wdeprecated-declarations] itemsize = PyUnicode_GET_DATA_SIZE(temp); ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:268:6: note: expanded from macro 'PyUnicode_GET_DATA_SIZE' (PyUnicode_GET_SIZE(op) * Py_UNICODE_SIZE) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:264:8: note: expanded from macro 'PyUnicode_GET_SIZE' PyUnicode_WSTR_LENGTH(op))) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:451:35: note: expanded from macro 'PyUnicode_WSTR_LENGTH' #define PyUnicode_WSTR_LENGTH(op) _PyUnicode_get_wstr_length((PyObject*)op) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:445:1: note: '_PyUnicode_get_wstr_length' has been explicitly marked deprecated here Py_DEPRECATED(3.3) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/common.c:239:28: warning: '_PyUnicode_get_wstr_length' is deprecated [-Wdeprecated-declarations] itemsize = PyUnicode_GET_DATA_SIZE(temp); ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:268:6: note: expanded from macro 'PyUnicode_GET_DATA_SIZE' (PyUnicode_GET_SIZE(op) * Py_UNICODE_SIZE) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:261:7: note: expanded from macro 'PyUnicode_GET_SIZE' PyUnicode_WSTR_LENGTH(op) : \ ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:451:35: note: expanded from macro 'PyUnicode_WSTR_LENGTH' #define PyUnicode_WSTR_LENGTH(op) _PyUnicode_get_wstr_length((PyObject*)op) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:445:1: note: '_PyUnicode_get_wstr_length' has been explicitly marked deprecated here Py_DEPRECATED(3.3) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/common.c:239:28: warning: 'PyUnicode_AsUnicode' is deprecated [-Wdeprecated-declarations] itemsize = PyUnicode_GET_DATA_SIZE(temp); ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:268:6: note: expanded from macro 'PyUnicode_GET_DATA_SIZE' (PyUnicode_GET_SIZE(op) * Py_UNICODE_SIZE) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:262:14: note: expanded from macro 'PyUnicode_GET_SIZE' ((void)PyUnicode_AsUnicode(_PyObject_CAST(op)),\ ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:580:1: note: 'PyUnicode_AsUnicode' has been explicitly marked deprecated here Py_DEPRECATED(3.3) PyAPI_FUNC(Py_UNICODE *) PyUnicode_AsUnicode( ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/common.c:239:28: warning: '_PyUnicode_get_wstr_length' is deprecated [-Wdeprecated-declarations] itemsize = PyUnicode_GET_DATA_SIZE(temp); ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:268:6: note: expanded from macro 'PyUnicode_GET_DATA_SIZE' (PyUnicode_GET_SIZE(op) * Py_UNICODE_SIZE) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:264:8: note: expanded from macro 'PyUnicode_GET_SIZE' PyUnicode_WSTR_LENGTH(op))) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:451:35: note: expanded from macro 'PyUnicode_WSTR_LENGTH' #define PyUnicode_WSTR_LENGTH(op) _PyUnicode_get_wstr_length((PyObject*)op) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:445:1: note: '_PyUnicode_get_wstr_length' has been explicitly marked deprecated here Py_DEPRECATED(3.3) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/common.c:282:24: warning: '_PyUnicode_get_wstr_length' is deprecated [-Wdeprecated-declarations] int itemsize = PyUnicode_GET_DATA_SIZE(obj); ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:268:6: note: expanded from macro 'PyUnicode_GET_DATA_SIZE' (PyUnicode_GET_SIZE(op) * Py_UNICODE_SIZE) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:261:7: note: expanded from macro 'PyUnicode_GET_SIZE' PyUnicode_WSTR_LENGTH(op) : \ ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:451:35: note: expanded from macro 'PyUnicode_WSTR_LENGTH' #define PyUnicode_WSTR_LENGTH(op) _PyUnicode_get_wstr_length((PyObject*)op) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:445:1: note: '_PyUnicode_get_wstr_length' has been explicitly marked deprecated here Py_DEPRECATED(3.3) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/common.c:282:24: warning: 'PyUnicode_AsUnicode' is deprecated [-Wdeprecated-declarations] int itemsize = PyUnicode_GET_DATA_SIZE(obj); ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:268:6: note: expanded from macro 'PyUnicode_GET_DATA_SIZE' (PyUnicode_GET_SIZE(op) * Py_UNICODE_SIZE) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:262:14: note: expanded from macro 'PyUnicode_GET_SIZE' ((void)PyUnicode_AsUnicode(_PyObject_CAST(op)),\ ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:580:1: note: 'PyUnicode_AsUnicode' has been explicitly marked deprecated here Py_DEPRECATED(3.3) PyAPI_FUNC(Py_UNICODE *) PyUnicode_AsUnicode( ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/common.c:282:24: warning: '_PyUnicode_get_wstr_length' is deprecated [-Wdeprecated-declarations] int itemsize = PyUnicode_GET_DATA_SIZE(obj); ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:268:6: note: expanded from macro 'PyUnicode_GET_DATA_SIZE' (PyUnicode_GET_SIZE(op) * Py_UNICODE_SIZE) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:264:8: note: expanded from macro 'PyUnicode_GET_SIZE' PyUnicode_WSTR_LENGTH(op))) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:451:35: note: expanded from macro 'PyUnicode_WSTR_LENGTH' #define PyUnicode_WSTR_LENGTH(op) _PyUnicode_get_wstr_length((PyObject*)op) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:445:1: note: '_PyUnicode_get_wstr_length' has been explicitly marked deprecated here Py_DEPRECATED(3.3) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ 6 warnings generated. clang: numpy/core/src/multiarray/nditer_pywrap.c 9 warnings generated. clang: numpy/core/src/multiarray/sequence.c clang: numpy/core/src/multiarray/shape.c clang: build/src.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/einsum.c clang: numpy/core/src/multiarray/methods.c clang: numpy/core/src/multiarray/iterators.c clang: numpy/core/src/multiarray/datetime_strings.c clang: numpy/core/src/multiarray/number.c clang: numpy/core/src/multiarray/scalarapi.c clang: build/src.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/scalartypes.c numpy/core/src/multiarray/scalarapi.c:74:28: warning: 'PyUnicode_AsUnicode' is deprecated [-Wdeprecated-declarations] return (void *)PyUnicode_AS_DATA(scalar); ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:283:21: note: expanded from macro 'PyUnicode_AS_DATA' ((const char *)(PyUnicode_AS_UNICODE(op))) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:279:7: note: expanded from macro 'PyUnicode_AS_UNICODE' PyUnicode_AsUnicode(_PyObject_CAST(op))) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:580:1: note: 'PyUnicode_AsUnicode' has been explicitly marked deprecated here Py_DEPRECATED(3.3) PyAPI_FUNC(Py_UNICODE *) PyUnicode_AsUnicode( ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/scalarapi.c:135:28: warning: 'PyUnicode_AsUnicode' is deprecated [-Wdeprecated-declarations] return (void *)PyUnicode_AS_DATA(scalar); ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:283:21: note: expanded from macro 'PyUnicode_AS_DATA' ((const char *)(PyUnicode_AS_UNICODE(op))) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:279:7: note: expanded from macro 'PyUnicode_AS_UNICODE' PyUnicode_AsUnicode(_PyObject_CAST(op))) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:580:1: note: 'PyUnicode_AsUnicode' has been explicitly marked deprecated here Py_DEPRECATED(3.3) PyAPI_FUNC(Py_UNICODE *) PyUnicode_AsUnicode( ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/scalarapi.c:568:29: warning: '_PyUnicode_get_wstr_length' is deprecated [-Wdeprecated-declarations] descr->elsize = PyUnicode_GET_DATA_SIZE(sc); ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:268:6: note: expanded from macro 'PyUnicode_GET_DATA_SIZE' (PyUnicode_GET_SIZE(op) * Py_UNICODE_SIZE) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:261:7: note: expanded from macro 'PyUnicode_GET_SIZE' PyUnicode_WSTR_LENGTH(op) : \ ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:451:35: note: expanded from macro 'PyUnicode_WSTR_LENGTH' #define PyUnicode_WSTR_LENGTH(op) _PyUnicode_get_wstr_length((PyObject*)op) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:445:1: note: '_PyUnicode_get_wstr_length' has been explicitly marked deprecated here Py_DEPRECATED(3.3) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/scalarapi.c:568:29: warning: 'PyUnicode_AsUnicode' is deprecated [-Wdeprecated-declarations] descr->elsize = PyUnicode_GET_DATA_SIZE(sc); ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:268:6: note: expanded from macro 'PyUnicode_GET_DATA_SIZE' (PyUnicode_GET_SIZE(op) * Py_UNICODE_SIZE) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:262:14: note: expanded from macro 'PyUnicode_GET_SIZE' ((void)PyUnicode_AsUnicode(_PyObject_CAST(op)),\ ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:580:1: note: 'PyUnicode_AsUnicode' has been explicitly marked deprecated here Py_DEPRECATED(3.3) PyAPI_FUNC(Py_UNICODE *) PyUnicode_AsUnicode( ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/scalarapi.c:568:29: warning: '_PyUnicode_get_wstr_length' is deprecated [-Wdeprecated-declarations] descr->elsize = PyUnicode_GET_DATA_SIZE(sc); ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:268:6: note: expanded from macro 'PyUnicode_GET_DATA_SIZE' (PyUnicode_GET_SIZE(op) * Py_UNICODE_SIZE) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:264:8: note: expanded from macro 'PyUnicode_GET_SIZE' PyUnicode_WSTR_LENGTH(op))) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:451:35: note: expanded from macro 'PyUnicode_WSTR_LENGTH' #define PyUnicode_WSTR_LENGTH(op) _PyUnicode_get_wstr_length((PyObject*)op) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:445:1: note: '_PyUnicode_get_wstr_length' has been explicitly marked deprecated here Py_DEPRECATED(3.3) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/scalartypes.c.src:475:17: warning: 'PyUnicode_AsUnicode' is deprecated [-Wdeprecated-declarations] ip = dptr = PyUnicode_AS_UNICODE(self); ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:279:7: note: expanded from macro 'PyUnicode_AS_UNICODE' PyUnicode_AsUnicode(_PyObject_CAST(op))) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:580:1: note: 'PyUnicode_AsUnicode' has been explicitly marked deprecated here Py_DEPRECATED(3.3) PyAPI_FUNC(Py_UNICODE *) PyUnicode_AsUnicode( ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/scalartypes.c.src:476:11: warning: '_PyUnicode_get_wstr_length' is deprecated [-Wdeprecated-declarations] len = PyUnicode_GET_SIZE(self); ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:261:7: note: expanded from macro 'PyUnicode_GET_SIZE' PyUnicode_WSTR_LENGTH(op) : \ ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:451:35: note: expanded from macro 'PyUnicode_WSTR_LENGTH' #define PyUnicode_WSTR_LENGTH(op) _PyUnicode_get_wstr_length((PyObject*)op) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:445:1: note: '_PyUnicode_get_wstr_length' has been explicitly marked deprecated here Py_DEPRECATED(3.3) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/scalartypes.c.src:476:11: warning: 'PyUnicode_AsUnicode' is deprecated [-Wdeprecated-declarations] len = PyUnicode_GET_SIZE(self); ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:262:14: note: expanded from macro 'PyUnicode_GET_SIZE' ((void)PyUnicode_AsUnicode(_PyObject_CAST(op)),\ ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:580:1: note: 'PyUnicode_AsUnicode' has been explicitly marked deprecated here Py_DEPRECATED(3.3) PyAPI_FUNC(Py_UNICODE *) PyUnicode_AsUnicode( ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/scalartypes.c.src:476:11: warning: '_PyUnicode_get_wstr_length' is deprecated [-Wdeprecated-declarations] len = PyUnicode_GET_SIZE(self); ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:264:8: note: expanded from macro 'PyUnicode_GET_SIZE' PyUnicode_WSTR_LENGTH(op))) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:451:35: note: expanded from macro 'PyUnicode_WSTR_LENGTH' #define PyUnicode_WSTR_LENGTH(op) _PyUnicode_get_wstr_length((PyObject*)op) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:445:1: note: '_PyUnicode_get_wstr_length' has been explicitly marked deprecated here Py_DEPRECATED(3.3) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/scalartypes.c.src:481:11: warning: 'PyUnicode_FromUnicode' is deprecated [-Wdeprecated-declarations] new = PyUnicode_FromUnicode(ip, len); ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:551:1: note: 'PyUnicode_FromUnicode' has been explicitly marked deprecated here Py_DEPRECATED(3.3) PyAPI_FUNC(PyObject*) PyUnicode_FromUnicode( ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/scalartypes.c.src:475:17: warning: 'PyUnicode_AsUnicode' is deprecated [-Wdeprecated-declarations] ip = dptr = PyUnicode_AS_UNICODE(self); ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:279:7: note: expanded from macro 'PyUnicode_AS_UNICODE' PyUnicode_AsUnicode(_PyObject_CAST(op))) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:580:1: note: 'PyUnicode_AsUnicode' has been explicitly marked deprecated here Py_DEPRECATED(3.3) PyAPI_FUNC(Py_UNICODE *) PyUnicode_AsUnicode( ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/scalartypes.c.src:476:11: warning: '_PyUnicode_get_wstr_length' is deprecated [-Wdeprecated-declarations] len = PyUnicode_GET_SIZE(self); ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:261:7: note: expanded from macro 'PyUnicode_GET_SIZE' PyUnicode_WSTR_LENGTH(op) : \ ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:451:35: note: expanded from macro 'PyUnicode_WSTR_LENGTH' #define PyUnicode_WSTR_LENGTH(op) _PyUnicode_get_wstr_length((PyObject*)op) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:445:1: note: '_PyUnicode_get_wstr_length' has been explicitly marked deprecated here Py_DEPRECATED(3.3) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/scalartypes.c.src:476:11: warning: 'PyUnicode_AsUnicode' is deprecated [-Wdeprecated-declarations] len = PyUnicode_GET_SIZE(self); ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:262:14: note: expanded from macro 'PyUnicode_GET_SIZE' ((void)PyUnicode_AsUnicode(_PyObject_CAST(op)),\ ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:580:1: note: 'PyUnicode_AsUnicode' has been explicitly marked deprecated here Py_DEPRECATED(3.3) PyAPI_FUNC(Py_UNICODE *) PyUnicode_AsUnicode( ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/scalartypes.c.src:476:11: warning: '_PyUnicode_get_wstr_length' is deprecated [-Wdeprecated-declarations] len = PyUnicode_GET_SIZE(self); ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:264:8: note: expanded from macro 'PyUnicode_GET_SIZE' PyUnicode_WSTR_LENGTH(op))) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:451:35: note: expanded from macro 'PyUnicode_WSTR_LENGTH' #define PyUnicode_WSTR_LENGTH(op) _PyUnicode_get_wstr_length((PyObject*)op) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:445:1: note: '_PyUnicode_get_wstr_length' has been explicitly marked deprecated here Py_DEPRECATED(3.3) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/scalartypes.c.src:481:11: warning: 'PyUnicode_FromUnicode' is deprecated [-Wdeprecated-declarations] new = PyUnicode_FromUnicode(ip, len); ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:551:1: note: 'PyUnicode_FromUnicode' has been explicitly marked deprecated here Py_DEPRECATED(3.3) PyAPI_FUNC(PyObject*) PyUnicode_FromUnicode( ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/scalartypes.c.src:1849:18: warning: 'PyUnicode_AsUnicode' is deprecated [-Wdeprecated-declarations] buffer = PyUnicode_AS_DATA(self); ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:283:21: note: expanded from macro 'PyUnicode_AS_DATA' ((const char *)(PyUnicode_AS_UNICODE(op))) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:279:7: note: expanded from macro 'PyUnicode_AS_UNICODE' PyUnicode_AsUnicode(_PyObject_CAST(op))) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:580:1: note: 'PyUnicode_AsUnicode' has been explicitly marked deprecated here Py_DEPRECATED(3.3) PyAPI_FUNC(Py_UNICODE *) PyUnicode_AsUnicode( ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/scalartypes.c.src:1850:18: warning: '_PyUnicode_get_wstr_length' is deprecated [-Wdeprecated-declarations] buflen = PyUnicode_GET_DATA_SIZE(self); ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:268:6: note: expanded from macro 'PyUnicode_GET_DATA_SIZE' (PyUnicode_GET_SIZE(op) * Py_UNICODE_SIZE) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:261:7: note: expanded from macro 'PyUnicode_GET_SIZE' PyUnicode_WSTR_LENGTH(op) : \ ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:451:35: note: expanded from macro 'PyUnicode_WSTR_LENGTH' #define PyUnicode_WSTR_LENGTH(op) _PyUnicode_get_wstr_length((PyObject*)op) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:445:1: note: '_PyUnicode_get_wstr_length' has been explicitly marked deprecated here Py_DEPRECATED(3.3) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/scalartypes.c.src:1850:18: warning: 'PyUnicode_AsUnicode' is deprecated [-Wdeprecated-declarations] buflen = PyUnicode_GET_DATA_SIZE(self); ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:268:6: note: expanded from macro 'PyUnicode_GET_DATA_SIZE' (PyUnicode_GET_SIZE(op) * Py_UNICODE_SIZE) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:262:14: note: expanded from macro 'PyUnicode_GET_SIZE' ((void)PyUnicode_AsUnicode(_PyObject_CAST(op)),\ ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:580:1: note: 'PyUnicode_AsUnicode' has been explicitly marked deprecated here Py_DEPRECATED(3.3) PyAPI_FUNC(Py_UNICODE *) PyUnicode_AsUnicode( ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/scalartypes.c.src:1850:18: warning: '_PyUnicode_get_wstr_length' is deprecated [-Wdeprecated-declarations] buflen = PyUnicode_GET_DATA_SIZE(self); ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:268:6: note: expanded from macro 'PyUnicode_GET_DATA_SIZE' (PyUnicode_GET_SIZE(op) * Py_UNICODE_SIZE) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:264:8: note: expanded from macro 'PyUnicode_GET_SIZE' PyUnicode_WSTR_LENGTH(op))) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:451:35: note: expanded from macro 'PyUnicode_WSTR_LENGTH' #define PyUnicode_WSTR_LENGTH(op) _PyUnicode_get_wstr_length((PyObject*)op) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:445:1: note: '_PyUnicode_get_wstr_length' has been explicitly marked deprecated here Py_DEPRECATED(3.3) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ 5 warnings generated. clang: numpy/core/src/multiarray/typeinfo.c clang: numpy/core/src/multiarray/refcount.c clang: numpy/core/src/multiarray/usertypes.c clang: numpy/core/src/multiarray/multiarraymodule.c clang: build/src.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/lowlevel_strided_loops.c clang: numpy/core/src/multiarray/vdot.c clang: numpy/core/src/umath/umathmodule.c clang: build/src.macosx-10.15-x86_64-3.9/numpy/core/src/umath/matmul.c clang: numpy/core/src/umath/reduction.c clang: build/src.macosx-10.15-x86_64-3.9/numpy/core/src/umath/loops.c clang: numpy/core/src/multiarray/nditer_api.c 14 warnings generated. clang: numpy/core/src/multiarray/strfuncs.c numpy/core/src/umath/loops.c.src:655:18: warning: 'PyEval_CallObjectWithKeywords' is deprecated [-Wdeprecated-declarations] result = PyEval_CallObject(tocall, arglist); ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/ceval.h:24:5: note: expanded from macro 'PyEval_CallObject' PyEval_CallObjectWithKeywords(callable, arg, (PyObject *)NULL) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/ceval.h:17:1: note: 'PyEval_CallObjectWithKeywords' has been explicitly marked deprecated here Py_DEPRECATED(3.9) PyAPI_FUNC(PyObject *) PyEval_CallObjectWithKeywords( ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/strfuncs.c:178:13: warning: 'PyEval_CallObjectWithKeywords' is deprecated [-Wdeprecated-declarations] s = PyEval_CallObject(PyArray_ReprFunction, arglist); ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/ceval.h:24:5: note: expanded from macro 'PyEval_CallObject' PyEval_CallObjectWithKeywords(callable, arg, (PyObject *)NULL) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/ceval.h:17:1: note: 'PyEval_CallObjectWithKeywords' has been explicitly marked deprecated here Py_DEPRECATED(3.9) PyAPI_FUNC(PyObject *) PyEval_CallObjectWithKeywords( ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/core/src/multiarray/strfuncs.c:195:13: warning: 'PyEval_CallObjectWithKeywords' is deprecated [-Wdeprecated-declarations] s = PyEval_CallObject(PyArray_StrFunction, arglist); ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/ceval.h:24:5: note: expanded from macro 'PyEval_CallObject' PyEval_CallObjectWithKeywords(callable, arg, (PyObject *)NULL) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/ceval.h:17:1: note: 'PyEval_CallObjectWithKeywords' has been explicitly marked deprecated here Py_DEPRECATED(3.9) PyAPI_FUNC(PyObject *) PyEval_CallObjectWithKeywords( ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ 2 warnings generated. clang: numpy/core/src/multiarray/temp_elide.c clang: numpy/core/src/umath/cpuid.c clang: build/src.macosx-10.15-x86_64-3.9/numpy/core/src/umath/scalarmath.c clang: numpy/core/src/umath/ufunc_object.c numpy/core/src/umath/scalarmath.c.src:1449:1: warning: unused function 'byte_long' [-Wunused-function] byte_long(PyObject *obj) ^ numpy/core/src/umath/scalarmath.c.src:1449:1: warning: unused function 'ubyte_long' [-Wunused-function] ubyte_long(PyObject *obj) ^ numpy/core/src/umath/scalarmath.c.src:1449:1: warning: unused function 'short_long' [-Wunused-function] short_long(PyObject *obj) ^ numpy/core/src/umath/scalarmath.c.src:1449:1: warning: unused function 'ushort_long' [-Wunused-function] ushort_long(PyObject *obj) ^ numpy/core/src/umath/scalarmath.c.src:1449:1: warning: unused function 'int_long' [-Wunused-function] int_long(PyObject *obj) ^ numpy/core/src/umath/scalarmath.c.src:1449:1: warning: unused function 'uint_long' [-Wunused-function] uint_long(PyObject *obj) ^ numpy/core/src/umath/scalarmath.c.src:1449:1: warning: unused function 'long_long' [-Wunused-function] long_long(PyObject *obj) ^ numpy/core/src/umath/scalarmath.c.src:1449:1: warning: unused function 'ulong_long' [-Wunused-function] ulong_long(PyObject *obj) ^ numpy/core/src/umath/scalarmath.c.src:1449:1: warning: unused function 'longlong_long' [-Wunused-function] longlong_long(PyObject *obj) ^ numpy/core/src/umath/scalarmath.c.src:1449:1: warning: unused function 'ulonglong_long' [-Wunused-function] ulonglong_long(PyObject *obj) ^ numpy/core/src/umath/scalarmath.c.src:1449:1: warning: unused function 'half_long' [-Wunused-function] half_long(PyObject *obj) ^ numpy/core/src/umath/scalarmath.c.src:1449:1: warning: unused function 'float_long' [-Wunused-function] float_long(PyObject *obj) ^ numpy/core/src/umath/scalarmath.c.src:1449:1: warning: unused function 'double_long' [-Wunused-function] double_long(PyObject *obj) ^ numpy/core/src/umath/scalarmath.c.src:1449:1: warning: unused function 'longdouble_long' [-Wunused-function] longdouble_long(PyObject *obj) ^ numpy/core/src/umath/scalarmath.c.src:1449:1: warning: unused function 'cfloat_long' [-Wunused-function] cfloat_long(PyObject *obj) ^ numpy/core/src/umath/scalarmath.c.src:1449:1: warning: unused function 'cdouble_long' [-Wunused-function] cdouble_long(PyObject *obj) ^ numpy/core/src/umath/scalarmath.c.src:1449:1: warning: unused function 'clongdouble_long' [-Wunused-function] clongdouble_long(PyObject *obj) ^ clang: numpy/core/src/multiarray/nditer_constr.c numpy/core/src/umath/ufunc_object.c:657:19: warning: comparison of integers of different signs: 'int' and 'size_t' (aka 'unsigned long') [-Wsign-compare] for (i = 0; i < len; i++) { ~ ^ ~~~ clang: numpy/core/src/umath/override.c clang: numpy/core/src/npymath/npy_math.c clang: build/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath/ieee754.c numpy/core/src/umath/loops.c.src:2527:22: warning: code will never be executed [-Wunreachable-code] npy_intp n = dimensions[0]; ^~~~~~~~~~ numpy/core/src/umath/loops.c.src:2526:29: note: silence by adding parentheses to mark code as explicitly dead if (IS_BINARY_REDUCE && 0) { ^ /* DISABLES CODE */ ( ) numpy/core/src/umath/loops.c.src:2527:22: warning: code will never be executed [-Wunreachable-code] npy_intp n = dimensions[0]; ^~~~~~~~~~ numpy/core/src/umath/loops.c.src:2526:29: note: silence by adding parentheses to mark code as explicitly dead if (IS_BINARY_REDUCE && 0) { ^ /* DISABLES CODE */ ( ) numpy/core/src/umath/loops.c.src:2527:22: warning: code will never be executed [-Wunreachable-code] npy_intp n = dimensions[0]; ^~~~~~~~~~ numpy/core/src/umath/loops.c.src:2526:29: note: silence by adding parentheses to mark code as explicitly dead if (IS_BINARY_REDUCE && 0) { ^ /* DISABLES CODE */ ( ) clang: build/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath/npy_math_complex.c numpy/core/src/npymath/npy_math_complex.c.src:48:33: warning: unused variable 'tiny' [-Wunused-const-variable] static const volatile npy_float tiny = 3.9443045e-31f; ^ numpy/core/src/npymath/npy_math_complex.c.src:67:25: warning: unused variable 'c_halff' [-Wunused-const-variable] static const npy_cfloat c_halff = {0.5F, 0.0}; ^ numpy/core/src/npymath/npy_math_complex.c.src:68:25: warning: unused variable 'c_if' [-Wunused-const-variable] static const npy_cfloat c_if = {0.0, 1.0F}; ^ numpy/core/src/npymath/npy_math_complex.c.src:69:25: warning: unused variable 'c_ihalff' [-Wunused-const-variable] static const npy_cfloat c_ihalff = {0.0, 0.5F}; ^ numpy/core/src/npymath/npy_math_complex.c.src:79:1: warning: unused function 'caddf' [-Wunused-function] caddf(npy_cfloat a, npy_cfloat b) ^ numpy/core/src/npymath/npy_math_complex.c.src:87:1: warning: unused function 'csubf' [-Wunused-function] csubf(npy_cfloat a, npy_cfloat b) ^ numpy/core/src/npymath/npy_math_complex.c.src:137:1: warning: unused function 'cnegf' [-Wunused-function] cnegf(npy_cfloat a) ^ numpy/core/src/npymath/npy_math_complex.c.src:144:1: warning: unused function 'cmulif' [-Wunused-function] cmulif(npy_cfloat a) ^ numpy/core/src/npymath/npy_math_complex.c.src:67:26: warning: unused variable 'c_half' [-Wunused-const-variable] static const npy_cdouble c_half = {0.5, 0.0}; ^ numpy/core/src/npymath/npy_math_complex.c.src:68:26: warning: unused variable 'c_i' [-Wunused-const-variable] static const npy_cdouble c_i = {0.0, 1.0}; ^ numpy/core/src/npymath/npy_math_complex.c.src:69:26: warning: unused variable 'c_ihalf' [-Wunused-const-variable] static const npy_cdouble c_ihalf = {0.0, 0.5}; ^ numpy/core/src/npymath/npy_math_complex.c.src:79:1: warning: unused function 'cadd' [-Wunused-function] cadd(npy_cdouble a, npy_cdouble b) ^ numpy/core/src/npymath/npy_math_complex.c.src:87:1: warning: unused function 'csub' [-Wunused-function] csub(npy_cdouble a, npy_cdouble b) ^ numpy/core/src/npymath/npy_math_complex.c.src:137:1: warning: unused function 'cneg' [-Wunused-function] cneg(npy_cdouble a) ^ numpy/core/src/npymath/npy_math_complex.c.src:144:1: warning: unused function 'cmuli' [-Wunused-function] cmuli(npy_cdouble a) ^ numpy/core/src/npymath/npy_math_complex.c.src:67:30: warning: unused variable 'c_halfl' [-Wunused-const-variable] static const npy_clongdouble c_halfl = {0.5L, 0.0}; ^ numpy/core/src/npymath/npy_math_complex.c.src:68:30: warning: unused variable 'c_il' [-Wunused-const-variable] static const npy_clongdouble c_il = {0.0, 1.0L}; ^ numpy/core/src/npymath/npy_math_complex.c.src:69:30: warning: unused variable 'c_ihalfl' [-Wunused-const-variable] static const npy_clongdouble c_ihalfl = {0.0, 0.5L}; ^ numpy/core/src/npymath/npy_math_complex.c.src:79:1: warning: unused function 'caddl' [-Wunused-function] caddl(npy_clongdouble a, npy_clongdouble b) ^ numpy/core/src/npymath/npy_math_complex.c.src:87:1: warning: unused function 'csubl' [-Wunused-function] csubl(npy_clongdouble a, npy_clongdouble b) ^ numpy/core/src/npymath/npy_math_complex.c.src:137:1: warning: unused function 'cnegl' [-Wunused-function] cnegl(npy_clongdouble a) ^ numpy/core/src/npymath/npy_math_complex.c.src:144:1: warning: unused function 'cmulil' [-Wunused-function] cmulil(npy_clongdouble a) ^ 22 warnings generated. clang: numpy/core/src/common/mem_overlap.c clang: numpy/core/src/npymath/halffloat.c clang: numpy/core/src/common/array_assign.c clang: numpy/core/src/common/ufunc_override.c clang: numpy/core/src/common/npy_longdouble.c clang: numpy/core/src/common/numpyos.c clang: numpy/core/src/common/ucsnarrow.c 1 warning generated. clang: numpy/core/src/umath/extobj.c numpy/core/src/common/ucsnarrow.c:139:34: warning: 'PyUnicode_FromUnicode' is deprecated [-Wdeprecated-declarations] ret = (PyUnicodeObject *)PyUnicode_FromUnicode((Py_UNICODE*)buf, ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:551:1: note: 'PyUnicode_FromUnicode' has been explicitly marked deprecated here Py_DEPRECATED(3.3) PyAPI_FUNC(PyObject*) PyUnicode_FromUnicode( ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ 1 warning generated. clang: numpy/core/src/common/python_xerbla.c clang: numpy/core/src/common/cblasfuncs.c clang: /private/var/folders/fz/0j719tys48x7jlnjnwc69smr0000gn/T/pip-install-ufzck51l/numpy_b0e8a3953a1d4b46801f12bcea55536e/numpy/_build_utils/src/apple_sgemv_fix.c In file included from /private/var/folders/fz/0j719tys48x7jlnjnwc69smr0000gn/T/pip-install-ufzck51l/numpy_b0e8a3953a1d4b46801f12bcea55536e/numpy/_build_utils/src/apple_sgemv_fix.c:26: In file included from numpy/core/include/numpy/arrayobject.h:4: In file included from numpy/core/include/numpy/ndarrayobject.h:21: build/src.macosx-10.15-x86_64-3.9/numpy/core/include/numpy/__multiarray_api.h:1463:1: warning: unused function '_import_array' [-Wunused-function] _import_array(void) ^ 1 warning generated. 17 warnings generated. clang: numpy/core/src/umath/ufunc_type_resolution.c 4 warnings generated. 4 warnings generated. clang -bundle -undefined dynamic_lookup -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/alloc.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/arrayobject.o build/temp.macosx-10.15-x86_64-3.9/build/src.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/arraytypes.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/array_assign_scalar.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/array_assign_array.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/buffer.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/calculation.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/compiled_base.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/common.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/convert.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/convert_datatype.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/conversion_utils.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/ctors.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/datetime.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/datetime_strings.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/datetime_busday.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/datetime_busdaycal.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/descriptor.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/dragon4.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/dtype_transfer.o build/temp.macosx-10.15-x86_64-3.9/build/src.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/einsum.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/flagsobject.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/getset.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/hashdescr.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/item_selection.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/iterators.o build/temp.macosx-10.15-x86_64-3.9/build/src.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/lowlevel_strided_loops.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/mapping.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/methods.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/multiarraymodule.o build/temp.macosx-10.15-x86_64-3.9/build/src.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/nditer_templ.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/nditer_api.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/nditer_constr.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/nditer_pywrap.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/number.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/refcount.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/sequence.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/shape.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/scalarapi.o build/temp.macosx-10.15-x86_64-3.9/build/src.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/scalartypes.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/strfuncs.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/temp_elide.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/typeinfo.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/usertypes.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/multiarray/vdot.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/umath/umathmodule.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/umath/reduction.o build/temp.macosx-10.15-x86_64-3.9/build/src.macosx-10.15-x86_64-3.9/numpy/core/src/umath/loops.o build/temp.macosx-10.15-x86_64-3.9/build/src.macosx-10.15-x86_64-3.9/numpy/core/src/umath/matmul.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/umath/ufunc_object.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/umath/extobj.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/umath/cpuid.o build/temp.macosx-10.15-x86_64-3.9/build/src.macosx-10.15-x86_64-3.9/numpy/core/src/umath/scalarmath.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/umath/ufunc_type_resolution.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/umath/override.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/npymath/npy_math.o build/temp.macosx-10.15-x86_64-3.9/build/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath/ieee754.o build/temp.macosx-10.15-x86_64-3.9/build/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath/npy_math_complex.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/npymath/halffloat.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/common/array_assign.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/common/mem_overlap.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/common/npy_longdouble.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/common/ucsnarrow.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/common/ufunc_override.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/common/numpyos.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/common/cblasfuncs.o build/temp.macosx-10.15-x86_64-3.9/numpy/core/src/common/python_xerbla.o build/temp.macosx-10.15-x86_64-3.9/private/var/folders/fz/0j719tys48x7jlnjnwc69smr0000gn/T/pip-install-ufzck51l/numpy_b0e8a3953a1d4b46801f12bcea55536e/numpy/_build_utils/src/apple_sgemv_fix.o -L/usr/local/lib -L/usr/local/opt/openssl@1.1/lib -L/usr/local/opt/sqlite/lib -Lbuild/temp.macosx-10.15-x86_64-3.9 -lnpymath -lnpysort -o build/lib.macosx-10.15-x86_64-3.9/numpy/core/_multiarray_umath.cpython-39-darwin.so -Wl,-framework -Wl,Accelerate building 'numpy.core._umath_tests' extension compiling C sources C compiler: clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers compile options: '-DNPY_INTERNAL_BUILD=1 -DHAVE_NPY_CONFIG_H=1 -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE=1 -D_LARGEFILE64_SOURCE=1 -Inumpy/core/include -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/include/numpy -Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/usr/local/include -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/sqlite/include -I/Users/destiny/Downloads/env/include -I/usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9 -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/common -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/common -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath -c' clang: build/src.macosx-10.15-x86_64-3.9/numpy/core/src/umath/_umath_tests.c clang -bundle -undefined dynamic_lookup -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk build/temp.macosx-10.15-x86_64-3.9/build/src.macosx-10.15-x86_64-3.9/numpy/core/src/umath/_umath_tests.o -L/usr/local/lib -L/usr/local/opt/openssl@1.1/lib -L/usr/local/opt/sqlite/lib -Lbuild/temp.macosx-10.15-x86_64-3.9 -o build/lib.macosx-10.15-x86_64-3.9/numpy/core/_umath_tests.cpython-39-darwin.so building 'numpy.core._rational_tests' extension compiling C sources C compiler: clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers compile options: '-DNPY_INTERNAL_BUILD=1 -DHAVE_NPY_CONFIG_H=1 -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE=1 -D_LARGEFILE64_SOURCE=1 -Inumpy/core/include -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/include/numpy -Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/usr/local/include -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/sqlite/include -I/Users/destiny/Downloads/env/include -I/usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9 -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/common -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/common -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath -c' clang: build/src.macosx-10.15-x86_64-3.9/numpy/core/src/umath/_rational_tests.c clang -bundle -undefined dynamic_lookup -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk build/temp.macosx-10.15-x86_64-3.9/build/src.macosx-10.15-x86_64-3.9/numpy/core/src/umath/_rational_tests.o -L/usr/local/lib -L/usr/local/opt/openssl@1.1/lib -L/usr/local/opt/sqlite/lib -Lbuild/temp.macosx-10.15-x86_64-3.9 -o build/lib.macosx-10.15-x86_64-3.9/numpy/core/_rational_tests.cpython-39-darwin.so building 'numpy.core._struct_ufunc_tests' extension compiling C sources C compiler: clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers compile options: '-DNPY_INTERNAL_BUILD=1 -DHAVE_NPY_CONFIG_H=1 -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE=1 -D_LARGEFILE64_SOURCE=1 -Inumpy/core/include -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/include/numpy -Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/usr/local/include -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/sqlite/include -I/Users/destiny/Downloads/env/include -I/usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9 -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/common -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/common -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath -c' clang: build/src.macosx-10.15-x86_64-3.9/numpy/core/src/umath/_struct_ufunc_tests.c clang -bundle -undefined dynamic_lookup -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk build/temp.macosx-10.15-x86_64-3.9/build/src.macosx-10.15-x86_64-3.9/numpy/core/src/umath/_struct_ufunc_tests.o -L/usr/local/lib -L/usr/local/opt/openssl@1.1/lib -L/usr/local/opt/sqlite/lib -Lbuild/temp.macosx-10.15-x86_64-3.9 -o build/lib.macosx-10.15-x86_64-3.9/numpy/core/_struct_ufunc_tests.cpython-39-darwin.so building 'numpy.core._operand_flag_tests' extension compiling C sources C compiler: clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers compile options: '-DNPY_INTERNAL_BUILD=1 -DHAVE_NPY_CONFIG_H=1 -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE=1 -D_LARGEFILE64_SOURCE=1 -Inumpy/core/include -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/include/numpy -Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/usr/local/include -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/sqlite/include -I/Users/destiny/Downloads/env/include -I/usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9 -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/common -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/common -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath -c' clang: build/src.macosx-10.15-x86_64-3.9/numpy/core/src/umath/_operand_flag_tests.c clang -bundle -undefined dynamic_lookup -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk build/temp.macosx-10.15-x86_64-3.9/build/src.macosx-10.15-x86_64-3.9/numpy/core/src/umath/_operand_flag_tests.o -L/usr/local/lib -L/usr/local/opt/openssl@1.1/lib -L/usr/local/opt/sqlite/lib -Lbuild/temp.macosx-10.15-x86_64-3.9 -o build/lib.macosx-10.15-x86_64-3.9/numpy/core/_operand_flag_tests.cpython-39-darwin.so building 'numpy.fft.fftpack_lite' extension compiling C sources C compiler: clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers creating build/temp.macosx-10.15-x86_64-3.9/numpy/fft compile options: '-Inumpy/core/include -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/include/numpy -Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/usr/local/include -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/sqlite/include -I/Users/destiny/Downloads/env/include -I/usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9 -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/common -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/common -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath -c' clang: numpy/fft/fftpack_litemodule.c clang: numpy/fft/fftpack.c clang -bundle -undefined dynamic_lookup -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk build/temp.macosx-10.15-x86_64-3.9/numpy/fft/fftpack_litemodule.o build/temp.macosx-10.15-x86_64-3.9/numpy/fft/fftpack.o -L/usr/local/lib -L/usr/local/opt/openssl@1.1/lib -L/usr/local/opt/sqlite/lib -Lbuild/temp.macosx-10.15-x86_64-3.9 -o build/lib.macosx-10.15-x86_64-3.9/numpy/fft/fftpack_lite.cpython-39-darwin.so building 'numpy.linalg.lapack_lite' extension compiling C sources C compiler: clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers creating build/temp.macosx-10.15-x86_64-3.9/numpy/linalg creating build/temp.macosx-10.15-x86_64-3.9/numpy/linalg/lapack_lite compile options: '-DNO_ATLAS_INFO=3 -DHAVE_CBLAS -Inumpy/core/include -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/include/numpy -Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/usr/local/include -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/sqlite/include -I/Users/destiny/Downloads/env/include -I/usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9 -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/common -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/common -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath -c' extra options: '-msse3 -I/System/Library/Frameworks/vecLib.framework/Headers' clang: numpy/linalg/lapack_litemodule.c clang: numpy/linalg/lapack_lite/python_xerbla.c clang -bundle -undefined dynamic_lookup -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk build/temp.macosx-10.15-x86_64-3.9/numpy/linalg/lapack_litemodule.o build/temp.macosx-10.15-x86_64-3.9/numpy/linalg/lapack_lite/python_xerbla.o -L/usr/local/lib -L/usr/local/opt/openssl@1.1/lib -L/usr/local/opt/sqlite/lib -Lbuild/temp.macosx-10.15-x86_64-3.9 -o build/lib.macosx-10.15-x86_64-3.9/numpy/linalg/lapack_lite.cpython-39-darwin.so -Wl,-framework -Wl,Accelerate building 'numpy.linalg._umath_linalg' extension compiling C sources C compiler: clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers creating build/temp.macosx-10.15-x86_64-3.9/build/src.macosx-10.15-x86_64-3.9/numpy/linalg compile options: '-DNO_ATLAS_INFO=3 -DHAVE_CBLAS -Inumpy/core/include -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/include/numpy -Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/usr/local/include -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/sqlite/include -I/Users/destiny/Downloads/env/include -I/usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9 -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/common -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/common -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath -c' extra options: '-msse3 -I/System/Library/Frameworks/vecLib.framework/Headers' clang: build/src.macosx-10.15-x86_64-3.9/numpy/linalg/umath_linalg.c numpy/linalg/umath_linalg.c.src:735:32: warning: unknown warning group '-Wmaybe-uninitialized', ignored [-Wunknown-warning-option] #pragma GCC diagnostic ignored "-Wmaybe-uninitialized" ^ numpy/linalg/umath_linalg.c.src:541:1: warning: unused function 'dump_ufunc_object' [-Wunused-function] dump_ufunc_object(PyUFuncObject* ufunc) ^ numpy/linalg/umath_linalg.c.src:566:1: warning: unused function 'dump_linearize_data' [-Wunused-function] dump_linearize_data(const char* name, const LINEARIZE_DATA_t* params) ^ numpy/linalg/umath_linalg.c.src:602:1: warning: unused function 'dump_FLOAT_matrix' [-Wunused-function] dump_FLOAT_matrix(const char* name, ^ numpy/linalg/umath_linalg.c.src:602:1: warning: unused function 'dump_DOUBLE_matrix' [-Wunused-function] dump_DOUBLE_matrix(const char* name, ^ numpy/linalg/umath_linalg.c.src:602:1: warning: unused function 'dump_CFLOAT_matrix' [-Wunused-function] dump_CFLOAT_matrix(const char* name, ^ numpy/linalg/umath_linalg.c.src:602:1: warning: unused function 'dump_CDOUBLE_matrix' [-Wunused-function] dump_CDOUBLE_matrix(const char* name, ^ numpy/linalg/umath_linalg.c.src:865:1: warning: unused function 'zero_FLOAT_matrix' [-Wunused-function] zero_FLOAT_matrix(void *dst_in, const LINEARIZE_DATA_t* data) ^ numpy/linalg/umath_linalg.c.src:865:1: warning: unused function 'zero_DOUBLE_matrix' [-Wunused-function] zero_DOUBLE_matrix(void *dst_in, const LINEARIZE_DATA_t* data) ^ numpy/linalg/umath_linalg.c.src:865:1: warning: unused function 'zero_CFLOAT_matrix' [-Wunused-function] zero_CFLOAT_matrix(void *dst_in, const LINEARIZE_DATA_t* data) ^ numpy/linalg/umath_linalg.c.src:865:1: warning: unused function 'zero_CDOUBLE_matrix' [-Wunused-function] zero_CDOUBLE_matrix(void *dst_in, const LINEARIZE_DATA_t* data) ^ numpy/linalg/umath_linalg.c.src:1862:1: warning: unused function 'dump_geev_params' [-Wunused-function] dump_geev_params(const char *name, GEEV_PARAMS_t* params) ^ numpy/linalg/umath_linalg.c.src:2132:1: warning: unused function 'init_cgeev' [-Wunused-function] init_cgeev(GEEV_PARAMS_t* params, ^ numpy/linalg/umath_linalg.c.src:2213:1: warning: unused function 'process_cgeev_results' [-Wunused-function] process_cgeev_results(GEEV_PARAMS_t *NPY_UNUSED(params)) ^ numpy/linalg/umath_linalg.c.src:2376:1: warning: unused function 'dump_gesdd_params' [-Wunused-function] dump_gesdd_params(const char *name, ^ numpy/linalg/umath_linalg.c.src:2864:1: warning: unused function 'dump_gelsd_params' [-Wunused-function] dump_gelsd_params(const char *name, ^ 16 warnings generated. clang -bundle -undefined dynamic_lookup -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk build/temp.macosx-10.15-x86_64-3.9/build/src.macosx-10.15-x86_64-3.9/numpy/linalg/umath_linalg.o build/temp.macosx-10.15-x86_64-3.9/numpy/linalg/lapack_lite/python_xerbla.o -L/usr/local/lib -L/usr/local/opt/openssl@1.1/lib -L/usr/local/opt/sqlite/lib -Lbuild/temp.macosx-10.15-x86_64-3.9 -lnpymath -o build/lib.macosx-10.15-x86_64-3.9/numpy/linalg/_umath_linalg.cpython-39-darwin.so -Wl,-framework -Wl,Accelerate building 'numpy.random.mtrand' extension compiling C sources C compiler: clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers creating build/temp.macosx-10.15-x86_64-3.9/numpy/random creating build/temp.macosx-10.15-x86_64-3.9/numpy/random/mtrand compile options: '-D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE=1 -D_LARGEFILE64_SOURCE=1 -Inumpy/core/include -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/include/numpy -Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/usr/local/include -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/sqlite/include -I/Users/destiny/Downloads/env/include -I/usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9 -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/common -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/common -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath -c' clang: numpy/random/mtrand/mtrand.c clang: numpy/random/mtrand/initarray.cclang: numpy/random/mtrand/randomkit.c clang: numpy/random/mtrand/distributions.c numpy/random/mtrand/mtrand.c:40400:34: error: no member named 'tp_print' in 'struct _typeobject' __pyx_type_6mtrand_RandomState.tp_print = 0; ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ^ numpy/random/mtrand/mtrand.c:42673:22: warning: '_PyUnicode_get_wstr_length' is deprecated [-Wdeprecated-declarations] (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:261:7: note: expanded from macro 'PyUnicode_GET_SIZE' PyUnicode_WSTR_LENGTH(op) : \ ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:451:35: note: expanded from macro 'PyUnicode_WSTR_LENGTH' #define PyUnicode_WSTR_LENGTH(op) _PyUnicode_get_wstr_length((PyObject*)op) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:445:1: note: '_PyUnicode_get_wstr_length' has been explicitly marked deprecated here Py_DEPRECATED(3.3) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/random/mtrand/mtrand.c:42673:22: warning: 'PyUnicode_AsUnicode' is deprecated [-Wdeprecated-declarations] (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:262:14: note: expanded from macro 'PyUnicode_GET_SIZE' ((void)PyUnicode_AsUnicode(_PyObject_CAST(op)),\ ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:580:1: note: 'PyUnicode_AsUnicode' has been explicitly marked deprecated here Py_DEPRECATED(3.3) PyAPI_FUNC(Py_UNICODE *) PyUnicode_AsUnicode( ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/random/mtrand/mtrand.c:42673:22: warning: '_PyUnicode_get_wstr_length' is deprecated [-Wdeprecated-declarations] (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:264:8: note: expanded from macro 'PyUnicode_GET_SIZE' PyUnicode_WSTR_LENGTH(op))) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:451:35: note: expanded from macro 'PyUnicode_WSTR_LENGTH' #define PyUnicode_WSTR_LENGTH(op) _PyUnicode_get_wstr_length((PyObject*)op) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:445:1: note: '_PyUnicode_get_wstr_length' has been explicitly marked deprecated here Py_DEPRECATED(3.3) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/random/mtrand/mtrand.c:42673:52: warning: '_PyUnicode_get_wstr_length' is deprecated [-Wdeprecated-declarations] (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:261:7: note: expanded from macro 'PyUnicode_GET_SIZE' PyUnicode_WSTR_LENGTH(op) : \ ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:451:35: note: expanded from macro 'PyUnicode_WSTR_LENGTH' #define PyUnicode_WSTR_LENGTH(op) _PyUnicode_get_wstr_length((PyObject*)op) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:445:1: note: '_PyUnicode_get_wstr_length' has been explicitly marked deprecated here Py_DEPRECATED(3.3) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/random/mtrand/mtrand.c:42673:52: warning: 'PyUnicode_AsUnicode' is deprecated [-Wdeprecated-declarations] (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:262:14: note: expanded from macro 'PyUnicode_GET_SIZE' ((void)PyUnicode_AsUnicode(_PyObject_CAST(op)),\ ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:580:1: note: 'PyUnicode_AsUnicode' has been explicitly marked deprecated here Py_DEPRECATED(3.3) PyAPI_FUNC(Py_UNICODE *) PyUnicode_AsUnicode( ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/random/mtrand/mtrand.c:42673:52: warning: '_PyUnicode_get_wstr_length' is deprecated [-Wdeprecated-declarations] (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:264:8: note: expanded from macro 'PyUnicode_GET_SIZE' PyUnicode_WSTR_LENGTH(op))) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:451:35: note: expanded from macro 'PyUnicode_WSTR_LENGTH' #define PyUnicode_WSTR_LENGTH(op) _PyUnicode_get_wstr_length((PyObject*)op) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:445:1: note: '_PyUnicode_get_wstr_length' has been explicitly marked deprecated here Py_DEPRECATED(3.3) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/random/mtrand/mtrand.c:42689:26: warning: '_PyUnicode_get_wstr_length' is deprecated [-Wdeprecated-declarations] (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:261:7: note: expanded from macro 'PyUnicode_GET_SIZE' PyUnicode_WSTR_LENGTH(op) : \ ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:451:35: note: expanded from macro 'PyUnicode_WSTR_LENGTH' #define PyUnicode_WSTR_LENGTH(op) _PyUnicode_get_wstr_length((PyObject*)op) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:445:1: note: '_PyUnicode_get_wstr_length' has been explicitly marked deprecated here Py_DEPRECATED(3.3) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/random/mtrand/mtrand.c:42689:26: warning: 'PyUnicode_AsUnicode' is deprecated [-Wdeprecated-declarations] (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:262:14: note: expanded from macro 'PyUnicode_GET_SIZE' ((void)PyUnicode_AsUnicode(_PyObject_CAST(op)),\ ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:580:1: note: 'PyUnicode_AsUnicode' has been explicitly marked deprecated here Py_DEPRECATED(3.3) PyAPI_FUNC(Py_UNICODE *) PyUnicode_AsUnicode( ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/random/mtrand/mtrand.c:42689:26: warning: '_PyUnicode_get_wstr_length' is deprecated [-Wdeprecated-declarations] (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:264:8: note: expanded from macro 'PyUnicode_GET_SIZE' PyUnicode_WSTR_LENGTH(op))) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:451:35: note: expanded from macro 'PyUnicode_WSTR_LENGTH' #define PyUnicode_WSTR_LENGTH(op) _PyUnicode_get_wstr_length((PyObject*)op) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:445:1: note: '_PyUnicode_get_wstr_length' has been explicitly marked deprecated here Py_DEPRECATED(3.3) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/random/mtrand/mtrand.c:42689:59: warning: '_PyUnicode_get_wstr_length' is deprecated [-Wdeprecated-declarations] (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:261:7: note: expanded from macro 'PyUnicode_GET_SIZE' PyUnicode_WSTR_LENGTH(op) : \ ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:451:35: note: expanded from macro 'PyUnicode_WSTR_LENGTH' #define PyUnicode_WSTR_LENGTH(op) _PyUnicode_get_wstr_length((PyObject*)op) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:445:1: note: '_PyUnicode_get_wstr_length' has been explicitly marked deprecated here Py_DEPRECATED(3.3) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/random/mtrand/mtrand.c:42689:59: warning: 'PyUnicode_AsUnicode' is deprecated [-Wdeprecated-declarations] (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:262:14: note: expanded from macro 'PyUnicode_GET_SIZE' ((void)PyUnicode_AsUnicode(_PyObject_CAST(op)),\ ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:580:1: note: 'PyUnicode_AsUnicode' has been explicitly marked deprecated here Py_DEPRECATED(3.3) PyAPI_FUNC(Py_UNICODE *) PyUnicode_AsUnicode( ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ numpy/random/mtrand/mtrand.c:42689:59: warning: '_PyUnicode_get_wstr_length' is deprecated [-Wdeprecated-declarations] (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:264:8: note: expanded from macro 'PyUnicode_GET_SIZE' PyUnicode_WSTR_LENGTH(op))) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:451:35: note: expanded from macro 'PyUnicode_WSTR_LENGTH' #define PyUnicode_WSTR_LENGTH(op) _PyUnicode_get_wstr_length((PyObject*)op) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/cpython/unicodeobject.h:445:1: note: '_PyUnicode_get_wstr_length' has been explicitly marked deprecated here Py_DEPRECATED(3.3) ^ /usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9/pyport.h:508:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ 12 warnings and 1 error generated. error: Command "clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/usr/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX10.15.sdk/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE=1 -D_LARGEFILE64_SOURCE=1 -Inumpy/core/include -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/include/numpy -Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/usr/local/include -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/sqlite/include -I/Users/destiny/Downloads/env/include -I/usr/local/Cellar/python@3.9/3.9.0_1/Frameworks/Python.framework/Versions/3.9/include/python3.9 -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/common -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/common -Ibuild/src.macosx-10.15-x86_64-3.9/numpy/core/src/npymath -c numpy/random/mtrand/mtrand.c -o build/temp.macosx-10.15-x86_64-3.9/numpy/random/mtrand/mtrand.o -MMD -MF build/temp.macosx-10.15-x86_64-3.9/numpy/random/mtrand/mtrand.o.d" failed with exit status 1
{ "avatar_url": "https://avatars.githubusercontent.com/u/12635475?v=4", "events_url": "https://api.github.com/users/glee2429/events{/privacy}", "followers_url": "https://api.github.com/users/glee2429/followers", "following_url": "https://api.github.com/users/glee2429/following{/other_user}", "gists_url": "https://api.github.com/users/glee2429/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/glee2429", "id": 12635475, "login": "glee2429", "node_id": "MDQ6VXNlcjEyNjM1NDc1", "organizations_url": "https://api.github.com/users/glee2429/orgs", "received_events_url": "https://api.github.com/users/glee2429/received_events", "repos_url": "https://api.github.com/users/glee2429/repos", "site_admin": false, "starred_url": "https://api.github.com/users/glee2429/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/glee2429/subscriptions", "type": "User", "url": "https://api.github.com/users/glee2429", "user_view_type": "public" }
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/1696/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1696/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
14:41:28
https://api.github.com/repos/huggingface/datasets/issues/1687
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1687/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1687/comments
https://api.github.com/repos/huggingface/datasets/issues/1687/events
https://github.com/huggingface/datasets/issues/1687
779,004,894
MDU6SXNzdWU3NzkwMDQ4OTQ=
1,687
Question: Shouldn't .info be a part of DatasetDict?
{ "avatar_url": "https://avatars.githubusercontent.com/u/23721977?v=4", "events_url": "https://api.github.com/users/KennethEnevoldsen/events{/privacy}", "followers_url": "https://api.github.com/users/KennethEnevoldsen/followers", "following_url": "https://api.github.com/users/KennethEnevoldsen/following{/other_user}", "gists_url": "https://api.github.com/users/KennethEnevoldsen/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/KennethEnevoldsen", "id": 23721977, "login": "KennethEnevoldsen", "node_id": "MDQ6VXNlcjIzNzIxOTc3", "organizations_url": "https://api.github.com/users/KennethEnevoldsen/orgs", "received_events_url": "https://api.github.com/users/KennethEnevoldsen/received_events", "repos_url": "https://api.github.com/users/KennethEnevoldsen/repos", "site_admin": false, "starred_url": "https://api.github.com/users/KennethEnevoldsen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/KennethEnevoldsen/subscriptions", "type": "User", "url": "https://api.github.com/users/KennethEnevoldsen", "user_view_type": "public" }
[]
open
false
null
[]
[ "We could do something. There is a part of `.info` which is split specific (cache files, split instructions) but maybe if could be made to work.", "Yes this was kinda the idea I was going for. DatasetDict.info would be the shared info amongs the datasets (maybe even some info on how they differ). " ]
2021-01-05T13:08:41
2021-01-07T10:18:06
null
CONTRIBUTOR
null
null
null
null
Currently, only `Dataset` contains the .info or .features, but as many datasets contains standard splits (train, test) and thus the underlying information is the same (or at least should be) across the datasets. For instance: ``` >>> ds = datasets.load_dataset("conll2002", "es") >>> ds.info Traceback (most recent call last): File "<stdin>", line 1, in <module> AttributeError: 'DatasetDict' object has no attribute 'info' ``` I could imagine that this wouldn't work for datasets dicts which hold entirely different datasets (multimodal datasets), but it seems odd that splits of the same dataset is treated the same as what is essentially different datasets. Intuitively it would also make sense that if a dataset is supplied via. the load_dataset that is have a common .info which covers the entire dataset. It is entirely possible that I am missing another perspective
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1687/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1687/timeline
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
null
https://api.github.com/repos/huggingface/datasets/issues/1686
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1686/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1686/comments
https://api.github.com/repos/huggingface/datasets/issues/1686/events
https://github.com/huggingface/datasets/issues/1686
778,921,684
MDU6SXNzdWU3Nzg5MjE2ODQ=
1,686
Dataset Error: DaNE contains empty samples at the end
{ "avatar_url": "https://avatars.githubusercontent.com/u/23721977?v=4", "events_url": "https://api.github.com/users/KennethEnevoldsen/events{/privacy}", "followers_url": "https://api.github.com/users/KennethEnevoldsen/followers", "following_url": "https://api.github.com/users/KennethEnevoldsen/following{/other_user}", "gists_url": "https://api.github.com/users/KennethEnevoldsen/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/KennethEnevoldsen", "id": 23721977, "login": "KennethEnevoldsen", "node_id": "MDQ6VXNlcjIzNzIxOTc3", "organizations_url": "https://api.github.com/users/KennethEnevoldsen/orgs", "received_events_url": "https://api.github.com/users/KennethEnevoldsen/received_events", "repos_url": "https://api.github.com/users/KennethEnevoldsen/repos", "site_admin": false, "starred_url": "https://api.github.com/users/KennethEnevoldsen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/KennethEnevoldsen/subscriptions", "type": "User", "url": "https://api.github.com/users/KennethEnevoldsen", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Thanks for reporting, I opened a PR to fix that", "One the PR is merged the fix will be available in the next release of `datasets`.\r\n\r\nIf you don't want to wait the next release you can still load the script from the master branch with\r\n\r\n```python\r\nload_dataset(\"dane\", script_version=\"master\")\r\n```", "If you have other questions feel free to reopen :) " ]
2021-01-05T11:54:26
2021-01-05T14:01:09
2021-01-05T14:00:13
CONTRIBUTOR
null
null
null
null
The dataset DaNE, contains empty samples at the end. It is naturally easy to remove using a filter but should probably not be there, to begin with as it can cause errors. ```python >>> import datasets [...] >>> dataset = datasets.load_dataset("dane") [...] >>> dataset["test"][-1] {'dep_ids': [], 'dep_labels': [], 'lemmas': [], 'morph_tags': [], 'ner_tags': [], 'pos_tags': [], 'sent_id': '', 'text': '', 'tok_ids': [], 'tokens': []} >>> dataset["train"][-1] {'dep_ids': [], 'dep_labels': [], 'lemmas': [], 'morph_tags': [], 'ner_tags': [], 'pos_tags': [], 'sent_id': '', 'text': '', 'tok_ids': [], 'tokens': []} ``` Best, Kenneth
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1686/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1686/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
2:05:47
https://api.github.com/repos/huggingface/datasets/issues/1683
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1683/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1683/comments
https://api.github.com/repos/huggingface/datasets/issues/1683/events
https://github.com/huggingface/datasets/issues/1683
778,287,612
MDU6SXNzdWU3NzgyODc2MTI=
1,683
`ArrowInvalid` occurs while running `Dataset.map()` function for DPRContext
{ "avatar_url": "https://avatars.githubusercontent.com/u/6608232?v=4", "events_url": "https://api.github.com/users/abarbosa94/events{/privacy}", "followers_url": "https://api.github.com/users/abarbosa94/followers", "following_url": "https://api.github.com/users/abarbosa94/following{/other_user}", "gists_url": "https://api.github.com/users/abarbosa94/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/abarbosa94", "id": 6608232, "login": "abarbosa94", "node_id": "MDQ6VXNlcjY2MDgyMzI=", "organizations_url": "https://api.github.com/users/abarbosa94/orgs", "received_events_url": "https://api.github.com/users/abarbosa94/received_events", "repos_url": "https://api.github.com/users/abarbosa94/repos", "site_admin": false, "starred_url": "https://api.github.com/users/abarbosa94/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/abarbosa94/subscriptions", "type": "User", "url": "https://api.github.com/users/abarbosa94", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Looks like the mapping function returns a dictionary with a 768-dim array in the `embeddings` field. Since the map is batched, we actually expect the `embeddings` field to be an array of shape (batch_size, 768) to have one embedding per example in the batch.\r\n\r\nTo fix that can you try to remove one of the `[0]` ? In my opinion you only need one of them, not two.", "It makes sense :D\r\n\r\nIt seems to work! Thanks a lot :))\r\n\r\nClosing the issue" ]
2021-01-04T18:47:53
2021-01-04T19:04:45
2021-01-04T19:04:45
CONTRIBUTOR
null
null
null
null
It seems to fail the final batch ): steps to reproduce: ``` from datasets import load_dataset from elasticsearch import Elasticsearch import torch from transformers import file_utils, set_seed from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast MAX_SEQ_LENGTH = 256 ctx_encoder = DPRContextEncoder.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base", cache_dir="../datasets/") ctx_tokenizer = DPRContextEncoderTokenizerFast.from_pretrained( "facebook/dpr-ctx_encoder-single-nq-base", cache_dir="..datasets/" ) dataset = load_dataset('text', data_files='data/raw/ARC_Corpus.txt', cache_dir='../datasets') torch.set_grad_enabled(False) ds_with_embeddings = dataset.map( lambda example: { 'embeddings': ctx_encoder( **ctx_tokenizer( example["text"], padding='max_length', truncation=True, max_length=MAX_SEQ_LENGTH, return_tensors="pt" ) )[0][0].numpy(), }, batched=True, load_from_cache_file=False, batch_size=1000 ) ``` ARC Corpus can be obtained from [here](https://ai2-datasets.s3-us-west-2.amazonaws.com/arc/ARC-V1-Feb2018.zip) And then the error: ``` --------------------------------------------------------------------------- ArrowInvalid Traceback (most recent call last) <ipython-input-13-67d139bb2ed3> in <module> 14 batched=True, 15 load_from_cache_file=False, ---> 16 batch_size=1000 17 ) ~/.cache/pypoetry/virtualenvs/masters-utTTC0p8-py3.7/lib/python3.7/site-packages/datasets/dataset_dict.py in map(self, function, with_indices, input_columns, batched, batch_size, remove_columns, keep_in_memory, load_from_cache_file, cache_file_names, writer_batch_size, features, disable_nullable, fn_kwargs, num_proc) 301 num_proc=num_proc, 302 ) --> 303 for k, dataset in self.items() 304 } 305 ) ~/.cache/pypoetry/virtualenvs/masters-utTTC0p8-py3.7/lib/python3.7/site-packages/datasets/dataset_dict.py in <dictcomp>(.0) 301 num_proc=num_proc, 302 ) --> 303 for k, dataset in self.items() 304 } 305 ) ~/.cache/pypoetry/virtualenvs/masters-utTTC0p8-py3.7/lib/python3.7/site-packages/datasets/arrow_dataset.py in map(self, function, with_indices, input_columns, batched, batch_size, drop_last_batch, remove_columns, keep_in_memory, load_from_cache_file, cache_file_name, writer_batch_size, features, disable_nullable, fn_kwargs, num_proc, suffix_template, new_fingerprint) 1257 fn_kwargs=fn_kwargs, 1258 new_fingerprint=new_fingerprint, -> 1259 update_data=update_data, 1260 ) 1261 else: ~/.cache/pypoetry/virtualenvs/masters-utTTC0p8-py3.7/lib/python3.7/site-packages/datasets/arrow_dataset.py in wrapper(*args, **kwargs) 155 } 156 # apply actual function --> 157 out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) 158 datasets: List["Dataset"] = list(out.values()) if isinstance(out, dict) else [out] 159 # re-apply format to the output ~/.cache/pypoetry/virtualenvs/masters-utTTC0p8-py3.7/lib/python3.7/site-packages/datasets/fingerprint.py in wrapper(*args, **kwargs) 161 # Call actual function 162 --> 163 out = func(self, *args, **kwargs) 164 165 # Update fingerprint of in-place transforms + update in-place history of transforms ~/.cache/pypoetry/virtualenvs/masters-utTTC0p8-py3.7/lib/python3.7/site-packages/datasets/arrow_dataset.py in _map_single(self, function, with_indices, input_columns, batched, batch_size, drop_last_batch, remove_columns, keep_in_memory, load_from_cache_file, cache_file_name, writer_batch_size, features, disable_nullable, fn_kwargs, new_fingerprint, rank, offset, update_data) 1526 if update_data: 1527 batch = cast_to_python_objects(batch) -> 1528 writer.write_batch(batch) 1529 if update_data: 1530 writer.finalize() # close_stream=bool(buf_writer is None)) # We only close if we are writing in a file ~/.cache/pypoetry/virtualenvs/masters-utTTC0p8-py3.7/lib/python3.7/site-packages/datasets/arrow_writer.py in write_batch(self, batch_examples, writer_batch_size) 276 typed_sequence = TypedSequence(batch_examples[col], type=col_type, try_type=col_try_type) 277 typed_sequence_examples[col] = typed_sequence --> 278 pa_table = pa.Table.from_pydict(typed_sequence_examples) 279 self.write_table(pa_table) 280 ~/.cache/pypoetry/virtualenvs/masters-utTTC0p8-py3.7/lib/python3.7/site-packages/pyarrow/table.pxi in pyarrow.lib.Table.from_pydict() ~/.cache/pypoetry/virtualenvs/masters-utTTC0p8-py3.7/lib/python3.7/site-packages/pyarrow/table.pxi in pyarrow.lib.Table.from_arrays() ~/.cache/pypoetry/virtualenvs/masters-utTTC0p8-py3.7/lib/python3.7/site-packages/pyarrow/table.pxi in pyarrow.lib.Table.validate() ~/.cache/pypoetry/virtualenvs/masters-utTTC0p8-py3.7/lib/python3.7/site-packages/pyarrow/error.pxi in pyarrow.lib.check_status() ArrowInvalid: Column 1 named text expected length 768 but got length 1000 ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/6608232?v=4", "events_url": "https://api.github.com/users/abarbosa94/events{/privacy}", "followers_url": "https://api.github.com/users/abarbosa94/followers", "following_url": "https://api.github.com/users/abarbosa94/following{/other_user}", "gists_url": "https://api.github.com/users/abarbosa94/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/abarbosa94", "id": 6608232, "login": "abarbosa94", "node_id": "MDQ6VXNlcjY2MDgyMzI=", "organizations_url": "https://api.github.com/users/abarbosa94/orgs", "received_events_url": "https://api.github.com/users/abarbosa94/received_events", "repos_url": "https://api.github.com/users/abarbosa94/repos", "site_admin": false, "starred_url": "https://api.github.com/users/abarbosa94/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/abarbosa94/subscriptions", "type": "User", "url": "https://api.github.com/users/abarbosa94", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1683/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1683/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
0:16:52
https://api.github.com/repos/huggingface/datasets/issues/1681
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1681/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1681/comments
https://api.github.com/repos/huggingface/datasets/issues/1681/events
https://github.com/huggingface/datasets/issues/1681
777,644,163
MDU6SXNzdWU3Nzc2NDQxNjM=
1,681
Dataset "dane" missing
{ "avatar_url": "https://avatars.githubusercontent.com/u/23721977?v=4", "events_url": "https://api.github.com/users/KennethEnevoldsen/events{/privacy}", "followers_url": "https://api.github.com/users/KennethEnevoldsen/followers", "following_url": "https://api.github.com/users/KennethEnevoldsen/following{/other_user}", "gists_url": "https://api.github.com/users/KennethEnevoldsen/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/KennethEnevoldsen", "id": 23721977, "login": "KennethEnevoldsen", "node_id": "MDQ6VXNlcjIzNzIxOTc3", "organizations_url": "https://api.github.com/users/KennethEnevoldsen/orgs", "received_events_url": "https://api.github.com/users/KennethEnevoldsen/received_events", "repos_url": "https://api.github.com/users/KennethEnevoldsen/repos", "site_admin": false, "starred_url": "https://api.github.com/users/KennethEnevoldsen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/KennethEnevoldsen/subscriptions", "type": "User", "url": "https://api.github.com/users/KennethEnevoldsen", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Hi @KennethEnevoldsen ,\r\nI think the issue might be that this dataset was added during the community sprint and has not been released yet. It will be available with the v2 of datasets.\r\nFor now, you should be able to load the datasets after installing the latest (master) version of datasets using pip:\r\npip install git+https://github.com/huggingface/datasets.git@master", "The `dane` dataset was added recently, that's why it wasn't available yet. We did an intermediate release today just before the v2.0.\r\n\r\nTo load it you can just update `datasets`\r\n```\r\npip install --upgrade datasets\r\n```\r\n\r\nand then you can load `dane` with\r\n\r\n```python\r\nfrom datasets import load_dataset\r\n\r\ndataset = load_dataset(\"dane\")\r\n```", "Thanks. Solved the problem." ]
2021-01-03T14:03:03
2021-01-05T08:35:35
2021-01-05T08:35:13
CONTRIBUTOR
null
null
null
null
the `dane` dataset appear to be missing in the latest version (1.1.3). ```python >>> import datasets >>> datasets.__version__ '1.1.3' >>> "dane" in datasets.list_datasets() True ``` As we can see it should be present, but doesn't seem to be findable when using `load_dataset`. ```python >>> datasets.load_dataset("dane") Traceback (most recent call last): File "/home/kenneth/.Envs/EDP/lib/python3.8/site-packages/datasets/load.py", line 267, in prepare_module local_path = cached_path(file_path, download_config=download_config) File "/home/kenneth/.Envs/EDP/lib/python3.8/site-packages/datasets/utils/file_utils.py", line 300, in cached_path output_path = get_from_cache( File "/home/kenneth/.Envs/EDP/lib/python3.8/site-packages/datasets/utils/file_utils.py", line 486, in get_from_cache raise FileNotFoundError("Couldn't find file at {}".format(url)) FileNotFoundError: Couldn't find file at https://raw.githubusercontent.com/huggingface/datasets/1.1.3/datasets/dane/dane.py During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/kenneth/.Envs/EDP/lib/python3.8/site-packages/datasets/load.py", line 278, in prepare_module local_path = cached_path(file_path, download_config=download_config) File "/home/kenneth/.Envs/EDP/lib/python3.8/site-packages/datasets/utils/file_utils.py", line 300, in cached_path output_path = get_from_cache( File "/home/kenneth/.Envs/EDP/lib/python3.8/site-packages/datasets/utils/file_utils.py", line 486, in get_from_cache raise FileNotFoundError("Couldn't find file at {}".format(url)) FileNotFoundError: Couldn't find file at https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/dane/dane.py During handling of the above exception, another exception occurred: Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/kenneth/.Envs/EDP/lib/python3.8/site-packages/datasets/load.py", line 588, in load_dataset module_path, hash = prepare_module( File "/home/kenneth/.Envs/EDP/lib/python3.8/site-packages/datasets/load.py", line 280, in prepare_module raise FileNotFoundError( FileNotFoundError: Couldn't find file locally at dane/dane.py, or remotely at https://raw.githubusercontent.com/huggingface/datasets/1.1.3/datasets/dane/dane.py or https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/dane/dane.py ``` This issue might be relevant to @ophelielacroix from the Alexandra Institut whom created the data.
{ "avatar_url": "https://avatars.githubusercontent.com/u/23721977?v=4", "events_url": "https://api.github.com/users/KennethEnevoldsen/events{/privacy}", "followers_url": "https://api.github.com/users/KennethEnevoldsen/followers", "following_url": "https://api.github.com/users/KennethEnevoldsen/following{/other_user}", "gists_url": "https://api.github.com/users/KennethEnevoldsen/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/KennethEnevoldsen", "id": 23721977, "login": "KennethEnevoldsen", "node_id": "MDQ6VXNlcjIzNzIxOTc3", "organizations_url": "https://api.github.com/users/KennethEnevoldsen/orgs", "received_events_url": "https://api.github.com/users/KennethEnevoldsen/received_events", "repos_url": "https://api.github.com/users/KennethEnevoldsen/repos", "site_admin": false, "starred_url": "https://api.github.com/users/KennethEnevoldsen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/KennethEnevoldsen/subscriptions", "type": "User", "url": "https://api.github.com/users/KennethEnevoldsen", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1681/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1681/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
1 day, 18:32:10
https://api.github.com/repos/huggingface/datasets/issues/1679
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1679/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1679/comments
https://api.github.com/repos/huggingface/datasets/issues/1679/events
https://github.com/huggingface/datasets/issues/1679
777,587,792
MDU6SXNzdWU3Nzc1ODc3OTI=
1,679
Can't import cc100 dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/14968123?v=4", "events_url": "https://api.github.com/users/alighofrani95/events{/privacy}", "followers_url": "https://api.github.com/users/alighofrani95/followers", "following_url": "https://api.github.com/users/alighofrani95/following{/other_user}", "gists_url": "https://api.github.com/users/alighofrani95/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/alighofrani95", "id": 14968123, "login": "alighofrani95", "node_id": "MDQ6VXNlcjE0OTY4MTIz", "organizations_url": "https://api.github.com/users/alighofrani95/orgs", "received_events_url": "https://api.github.com/users/alighofrani95/received_events", "repos_url": "https://api.github.com/users/alighofrani95/repos", "site_admin": false, "starred_url": "https://api.github.com/users/alighofrani95/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/alighofrani95/subscriptions", "type": "User", "url": "https://api.github.com/users/alighofrani95", "user_view_type": "public" }
[]
closed
false
null
[]
[ "cc100 was added recently, that's why it wasn't available yet.\r\n\r\nTo load it you can just update `datasets`\r\n```\r\npip install --upgrade datasets\r\n```\r\n\r\nand then you can load `cc100` with\r\n\r\n```python\r\nfrom datasets import load_dataset\r\n\r\nlang = \"en\"\r\ndataset = load_dataset(\"cc100\", lang=lang, split=\"train\")\r\n```" ]
2021-01-03T07:12:56
2022-10-05T12:42:25
2022-10-05T12:42:25
NONE
null
null
null
null
There is some issue to import cc100 dataset. ``` from datasets import load_dataset dataset = load_dataset("cc100") ``` FileNotFoundError: Couldn't find file at https://raw.githubusercontent.com/huggingface/datasets/1.1.3/datasets/cc100/cc100.py During handling of the above exception, another exception occurred: FileNotFoundError Traceback (most recent call last) FileNotFoundError: Couldn't find file at https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/cc100/cc100.py During handling of the above exception, another exception occurred: FileNotFoundError Traceback (most recent call last) /usr/local/lib/python3.6/dist-packages/datasets/load.py in prepare_module(path, script_version, download_config, download_mode, dataset, force_local_path, **download_kwargs) 280 raise FileNotFoundError( 281 "Couldn't find file locally at {}, or remotely at {} or {}".format( --> 282 combined_path, github_file_path, file_path 283 ) 284 ) FileNotFoundError: Couldn't find file locally at cc100/cc100.py, or remotely at https://raw.githubusercontent.com/huggingface/datasets/1.1.3/datasets/cc100/cc100.py or https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/cc100/cc100.py
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1679/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1679/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
640 days, 5:29:29
https://api.github.com/repos/huggingface/datasets/issues/1675
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1675/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1675/comments
https://api.github.com/repos/huggingface/datasets/issues/1675/events
https://github.com/huggingface/datasets/issues/1675
777,367,320
MDU6SXNzdWU3NzczNjczMjA=
1,675
Add the 800GB Pile dataset?
{ "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lewtun", "id": 26859204, "login": "lewtun", "node_id": "MDQ6VXNlcjI2ODU5MjA0", "organizations_url": "https://api.github.com/users/lewtun/orgs", "received_events_url": "https://api.github.com/users/lewtun/received_events", "repos_url": "https://api.github.com/users/lewtun/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "type": "User", "url": "https://api.github.com/users/lewtun", "user_view_type": "public" }
[ { "color": "e99695", "default": false, "description": "Requesting to add a new dataset", "id": 2067376369, "name": "dataset request", "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" } ]
[ "The pile dataset would be very nice.\r\nBenchmarks show that pile trained models achieve better results than most of actually trained models", "The pile can very easily be added and adapted using this [tfds implementation](https://github.com/EleutherAI/The-Pile/blob/master/the_pile/tfds_pile.py) from the repo. \r\n\r\nHowever, the question is whether you'd be ok with 800GB+ cached in your local disk, since the tfds implementation was designed to offload the storage to Google Cloud Storage.", "With the dataset streaming feature (see #2375) it will be more convenient to play with such big datasets :)\r\nI'm currently adding C4 (see #2511 ) but I can probably start working on this afterwards", "Hi folks! Just wanted to follow up on this -- would be really nice to get the Pile on HF Datasets... unclear if it would be easy to also add partitions of the Pile subject to the original 22 datasets used, but that would be nice too!", "Hi folks, thanks to some awesome work by @lhoestq and @albertvillanova you can now stream the Pile as follows:\r\n\r\n```python\r\n# Install master branch of `datasets`\r\npip install git+https://github.com/huggingface/datasets.git#egg=datasets[streaming]\r\npip install zstandard\r\n\r\nfrom datasets import load_dataset\r\n\r\ndset = load_dataset(\"json\", data_files=\"https://the-eye.eu/public/AI/pile/train/00.jsonl.zst\", streaming=True, split=\"train\")\r\nnext(iter(dset))\r\n# {'meta': {'pile_set_name': 'Pile-CC'},\r\n# 'text': 'It is done, and submitted. You can play “Survival of the Tastiest” on Android, and on the web ... '}\r\n```\r\n\r\nNext step is to add the Pile as a \"canonical\" dataset that can be streamed without specifying the file names explicitly :)", "> Hi folks! Just wanted to follow up on this -- would be really nice to get the Pile on HF Datasets... unclear if it would be easy to also add partitions of the Pile subject to the original 22 datasets used, but that would be nice too!\r\n\r\nHi @siddk thanks to a tip from @richarddwang it seems we can access some of the partitions that EleutherAI created for the Pile [here](https://the-eye.eu/public/AI/pile_preliminary_components/). What's missing are links to the preprocessed versions of pre-existing datasets like DeepMind Mathematics and OpenSubtitles, but worst case we do the processing ourselves and host these components on the Hub.\r\n\r\nMy current idea is that we could provide 23 configs: one for each of the 22 datasets and an `all` config that links to the train / dev / test splits that EleutherAI released [here](https://the-eye.eu/public/AI/pile/), e.g.\r\n\r\n```python\r\nfrom datasets import load_dataset\r\n\r\n# Load a single component\r\nyoutube_subtitles = load_dataset(\"the_pile\", \"youtube_subtitles\")\r\n# Load the train / dev / test splits of the whole corpus\r\ndset = load_dataset(\"the_pile\", \"all\")\r\n```\r\n\r\nIdeally we'd like everything to be compatible with the streaming API and there's ongoing work by @albertvillanova to make this happen for the various compression algorithms.\r\n\r\ncc @lhoestq ", "Ah I just saw that @lhoestq is already thinking about the specifying of one or more subsets in [this PR](https://github.com/huggingface/datasets/pull/2817#issuecomment-901874049) :)" ]
2021-01-01T22:58:12
2021-12-01T15:29:07
2021-12-01T15:29:07
MEMBER
null
null
null
null
## Adding a Dataset - **Name:** The Pile - **Description:** The Pile is a 825 GiB diverse, open source language modelling data set that consists of 22 smaller, high-quality datasets combined together. See [here](https://twitter.com/nabla_theta/status/1345130408170541056?s=20) for the Twitter announcement - **Paper:** https://pile.eleuther.ai/paper.pdf - **Data:** https://pile.eleuther.ai/ - **Motivation:** Enables hardcore (GPT-3 scale!) language modelling ## Remarks Given the extreme size of this dataset, I'm not sure how feasible this will be to include in `datasets` 🤯 . I'm also unsure how many `datasets` users are pretraining LMs, so the usage of this dataset may not warrant the effort to integrate it.
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 5, "-1": 0, "confused": 1, "eyes": 2, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 5, "total_count": 13, "url": "https://api.github.com/repos/huggingface/datasets/issues/1675/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1675/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
333 days, 16:30:55
https://api.github.com/repos/huggingface/datasets/issues/1674
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1674/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1674/comments
https://api.github.com/repos/huggingface/datasets/issues/1674/events
https://github.com/huggingface/datasets/issues/1674
777,321,840
MDU6SXNzdWU3NzczMjE4NDA=
1,674
dutch_social can't be loaded
{ "avatar_url": "https://avatars.githubusercontent.com/u/10134844?v=4", "events_url": "https://api.github.com/users/koenvandenberge/events{/privacy}", "followers_url": "https://api.github.com/users/koenvandenberge/followers", "following_url": "https://api.github.com/users/koenvandenberge/following{/other_user}", "gists_url": "https://api.github.com/users/koenvandenberge/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/koenvandenberge", "id": 10134844, "login": "koenvandenberge", "node_id": "MDQ6VXNlcjEwMTM0ODQ0", "organizations_url": "https://api.github.com/users/koenvandenberge/orgs", "received_events_url": "https://api.github.com/users/koenvandenberge/received_events", "repos_url": "https://api.github.com/users/koenvandenberge/repos", "site_admin": false, "starred_url": "https://api.github.com/users/koenvandenberge/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/koenvandenberge/subscriptions", "type": "User", "url": "https://api.github.com/users/koenvandenberge", "user_view_type": "public" }
[]
closed
false
null
[]
[ "exactly the same issue in some other datasets.\r\nDid you find any solution??\r\n", "Hi @koenvandenberge and @alighofrani95!\r\nThe datasets you're experiencing issues with were most likely added recently to the `datasets` library, meaning they have not been released yet. They will be released with the v2 of the library.\r\nMeanwhile, you can still load the datasets using one of the techniques described in this issue: #1641 \r\nLet me know if this helps!", "Maybe we should do a small release on Monday in the meantime @lhoestq ?", "Yes sure !", "I just did the release :)\r\n\r\nTo load it you can just update `datasets`\r\n```\r\npip install --upgrade datasets\r\n```\r\n\r\nand then you can load `dutch_social` with\r\n\r\n```python\r\nfrom datasets import load_dataset\r\n\r\ndataset = load_dataset(\"dutch_social\")\r\n```", "@lhoestq could you also shed light on the Hindi Wikipedia Dataset for issue number #1673. Will this also be available in the new release that you committed recently?", "The issue is different for this one, let me give more details in the issue", "Okay. Could you comment on the #1673 thread? Actually @thomwolf had commented that if i use datasets library from source, it would allow me to download the Hindi Wikipedia Dataset but even the version 1.1.3 gave me the same issue. The details are there in the issue #1673 thread." ]
2021-01-01T17:37:08
2022-10-05T13:03:26
2022-10-05T13:03:26
NONE
null
null
null
null
Hi all, I'm trying to import the `dutch_social` dataset described [here](https://huggingface.co/datasets/dutch_social). However, the code that should load the data doesn't seem to be working, in particular because the corresponding files can't be found at the provided links. ``` (base) Koens-MacBook-Pro:~ koenvandenberge$ python Python 3.7.4 (default, Aug 13 2019, 15:17:50) [Clang 4.0.1 (tags/RELEASE_401/final)] :: Anaconda, Inc. on darwin Type "help", "copyright", "credits" or "license" for more information. >>> from datasets import load_dataset dataset = load_dataset( 'dutch_social') >>> dataset = load_dataset( ... 'dutch_social') Traceback (most recent call last): File "/Users/koenvandenberge/opt/anaconda3/lib/python3.7/site-packages/datasets/load.py", line 267, in prepare_module local_path = cached_path(file_path, download_config=download_config) File "/Users/koenvandenberge/opt/anaconda3/lib/python3.7/site-packages/datasets/utils/file_utils.py", line 308, in cached_path use_etag=download_config.use_etag, File "/Users/koenvandenberge/opt/anaconda3/lib/python3.7/site-packages/datasets/utils/file_utils.py", line 486, in get_from_cache raise FileNotFoundError("Couldn't find file at {}".format(url)) FileNotFoundError: Couldn't find file at https://raw.githubusercontent.com/huggingface/datasets/1.1.3/datasets/dutch_social/dutch_social.py During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/Users/koenvandenberge/opt/anaconda3/lib/python3.7/site-packages/datasets/load.py", line 278, in prepare_module local_path = cached_path(file_path, download_config=download_config) File "/Users/koenvandenberge/opt/anaconda3/lib/python3.7/site-packages/datasets/utils/file_utils.py", line 308, in cached_path use_etag=download_config.use_etag, File "/Users/koenvandenberge/opt/anaconda3/lib/python3.7/site-packages/datasets/utils/file_utils.py", line 486, in get_from_cache raise FileNotFoundError("Couldn't find file at {}".format(url)) FileNotFoundError: Couldn't find file at https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/dutch_social/dutch_social.py During handling of the above exception, another exception occurred: Traceback (most recent call last): File "<stdin>", line 2, in <module> File "/Users/koenvandenberge/opt/anaconda3/lib/python3.7/site-packages/datasets/load.py", line 589, in load_dataset path, script_version=script_version, download_config=download_config, download_mode=download_mode, dataset=True File "/Users/koenvandenberge/opt/anaconda3/lib/python3.7/site-packages/datasets/load.py", line 282, in prepare_module combined_path, github_file_path, file_path FileNotFoundError: Couldn't find file locally at dutch_social/dutch_social.py, or remotely at https://raw.githubusercontent.com/huggingface/datasets/1.1.3/datasets/dutch_social/dutch_social.py or https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/dutch_social/dutch_social.py ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1674/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1674/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
641 days, 19:26:18
https://api.github.com/repos/huggingface/datasets/issues/1673
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1673/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1673/comments
https://api.github.com/repos/huggingface/datasets/issues/1673/events
https://github.com/huggingface/datasets/issues/1673
777,263,651
MDU6SXNzdWU3NzcyNjM2NTE=
1,673
Unable to Download Hindi Wikipedia Dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/30871963?v=4", "events_url": "https://api.github.com/users/aditya3498/events{/privacy}", "followers_url": "https://api.github.com/users/aditya3498/followers", "following_url": "https://api.github.com/users/aditya3498/following{/other_user}", "gists_url": "https://api.github.com/users/aditya3498/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/aditya3498", "id": 30871963, "login": "aditya3498", "node_id": "MDQ6VXNlcjMwODcxOTYz", "organizations_url": "https://api.github.com/users/aditya3498/orgs", "received_events_url": "https://api.github.com/users/aditya3498/received_events", "repos_url": "https://api.github.com/users/aditya3498/repos", "site_admin": false, "starred_url": "https://api.github.com/users/aditya3498/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/aditya3498/subscriptions", "type": "User", "url": "https://api.github.com/users/aditya3498", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Currently this dataset is only available when the library is installed from source since it was added after the last release.\r\n\r\nWe pin the dataset version with the library version so that people can have a reproducible dataset and processing when pinning the library.\r\n\r\nWe'll see if we can provide access to newer datasets with a warning that they are newer than your library version, that would help in cases like yours.", "So for now, should i try and install the library from source and then try out the same piece of code? Will it work then, considering both the versions will match then?", "Yes", "Hey, so i tried installing the library from source using the commands : **git clone https://github.com/huggingface/datasets**, **cd datasets** and then **pip3 install -e .**. But i still am facing the same error that file is not found. Please advise.\r\n\r\nThe Datasets library version now is 1.1.3 by installing from source as compared to the earlier 1.0.3 that i had loaded using pip command but I am still getting same error\r\n\r\n![Error](https://user-images.githubusercontent.com/30871963/103479005-69f3b080-4df0-11eb-83ae-58d7bb56a90e.png)\r\n", "Looks like the wikipedia dump for hindi at the date of 05/05/2020 is not available anymore.\r\nYou can try to load a more recent version of wikipedia\r\n```python\r\nfrom datasets import load_dataset\r\n\r\nd = load_dataset(\"wikipedia\", language=\"hi\", date=\"20210101\", split=\"train\", beam_runner=\"DirectRunner\")\r\n```", "Okay, thank you so much" ]
2021-01-01T10:52:53
2021-01-05T10:22:12
2021-01-05T10:22:12
NONE
null
null
null
null
I used the Dataset Library in Python to load the wikipedia dataset with the Hindi Config 20200501.hi along with something called beam_runner='DirectRunner' and it keeps giving me the error that the file is not found. I have attached the screenshot of the error and the code both. Please help me to understand how to resolve this issue. ![Code](https://user-images.githubusercontent.com/30871963/103437466-1f3a3300-4c4e-11eb-9d54-fc9601abfeec.png) ![Error](https://user-images.githubusercontent.com/30871963/103437407-7ee40e80-4c4d-11eb-8151-a86eb664e6be.png)
{ "avatar_url": "https://avatars.githubusercontent.com/u/30871963?v=4", "events_url": "https://api.github.com/users/aditya3498/events{/privacy}", "followers_url": "https://api.github.com/users/aditya3498/followers", "following_url": "https://api.github.com/users/aditya3498/following{/other_user}", "gists_url": "https://api.github.com/users/aditya3498/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/aditya3498", "id": 30871963, "login": "aditya3498", "node_id": "MDQ6VXNlcjMwODcxOTYz", "organizations_url": "https://api.github.com/users/aditya3498/orgs", "received_events_url": "https://api.github.com/users/aditya3498/received_events", "repos_url": "https://api.github.com/users/aditya3498/repos", "site_admin": false, "starred_url": "https://api.github.com/users/aditya3498/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/aditya3498/subscriptions", "type": "User", "url": "https://api.github.com/users/aditya3498", "user_view_type": "public" }
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/1673/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1673/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
3 days, 23:29:19
https://api.github.com/repos/huggingface/datasets/issues/1672
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1672/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1672/comments
https://api.github.com/repos/huggingface/datasets/issues/1672/events
https://github.com/huggingface/datasets/issues/1672
777,258,941
MDU6SXNzdWU3NzcyNTg5NDE=
1,672
load_dataset hang on file_lock
{ "avatar_url": "https://avatars.githubusercontent.com/u/69860107?v=4", "events_url": "https://api.github.com/users/tomacai/events{/privacy}", "followers_url": "https://api.github.com/users/tomacai/followers", "following_url": "https://api.github.com/users/tomacai/following{/other_user}", "gists_url": "https://api.github.com/users/tomacai/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/tomacai", "id": 69860107, "login": "tomacai", "node_id": "MDQ6VXNlcjY5ODYwMTA3", "organizations_url": "https://api.github.com/users/tomacai/orgs", "received_events_url": "https://api.github.com/users/tomacai/received_events", "repos_url": "https://api.github.com/users/tomacai/repos", "site_admin": false, "starred_url": "https://api.github.com/users/tomacai/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/tomacai/subscriptions", "type": "User", "url": "https://api.github.com/users/tomacai", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Can you try to upgrade to a more recent version of datasets?", "Thank, upgrading to 1.1.3 resolved the issue.", "Having the same issue with `datasets 1.1.3` of `1.5.0` (both tracebacks look the same) and `kilt_wikipedia`, Ubuntu 20.04\r\n\r\n```py\r\nIn [1]: from datasets import load_dataset \r\n\r\nIn [2]: wikipedia = load_dataset('kilt_wikipedia')['full'] \r\nDownloading: 7.37kB [00:00, 2.74MB/s] \r\nDownloading: 3.33kB [00:00, 1.44MB/s] \r\n^C---------------------------------------------------------------------------\r\nOSError Traceback (most recent call last)\r\n~/anaconda3/envs/transformers2/lib/python3.7/site-packages/datasets/utils/filelock.py in _acquire(self)\r\n 380 try:\r\n--> 381 fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)\r\n 382 except (IOError, OSError):\r\n\r\nOSError: [Errno 37] No locks available\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nKeyboardInterrupt Traceback (most recent call last)\r\n<ipython-input-2-f412d3d46ec9> in <module>\r\n----> 1 wikipedia = load_dataset('kilt_wikipedia')['full']\r\n\r\n~/anaconda3/envs/transformers2/lib/python3.7/site-packages/datasets/load.py in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, ignore_verifications, sav\r\ne_infos, script_version, **config_kwargs)\r\n 601 hash=hash,\r\n 602 features=features,\r\n--> 603 **config_kwargs,\r\n 604 )\r\n 605 \r\n\r\n~/anaconda3/envs/transformers2/lib/python3.7/site-packages/datasets/builder.py in __init__(self, *args, **kwargs)\r\n 841 def __init__(self, *args, **kwargs):\r\n 842 self._writer_batch_size = kwargs.pop(\"writer_batch_size\", self._writer_batch_size)\r\n--> 843 super(GeneratorBasedBuilder, self).__init__(*args, **kwargs)\r\n 844 \r\n 845 @abc.abstractmethod\r\n\r\n~/anaconda3/envs/transformers2/lib/python3.7/site-packages/datasets/builder.py in __init__(self, cache_dir, name, hash, features, **config_kwargs)\r\n 174 os.makedirs(self._cache_dir_root, exist_ok=True)\r\n 175 lock_path = os.path.join(self._cache_dir_root, self._cache_dir.replace(os.sep, \"_\") + \".lock\")\r\n--> 176 with FileLock(lock_path):\r\n 177 if os.path.exists(self._cache_dir): # check if data exist\r\n 178 if len(os.listdir(self._cache_dir)) > 0:\r\n\r\n~/anaconda3/envs/transformers2/lib/python3.7/site-packages/datasets/utils/filelock.py in __enter__(self)\r\n 312 \r\n 313 def __enter__(self):\r\n--> 314 self.acquire()\r\n 315 return self\r\n 316 \r\n\r\n~/anaconda3/envs/transformers2/lib/python3.7/site-packages/datasets/utils/filelock.py in acquire(self, timeout, poll_intervall)\r\n 261 if not self.is_locked:\r\n 262 logger().debug(\"Attempting to acquire lock %s on %s\", lock_id, lock_filename)\r\n--> 263 self._acquire()\r\n 264 \r\n 265 if self.is_locked:\r\n\r\n~/anaconda3/envs/transformers2/lib/python3.7/site-packages/datasets/utils/filelock.py in _acquire(self)\r\n 379 \r\n 380 try:\r\n--> 381 fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)\r\n 382 except (IOError, OSError):\r\n 383 os.close(fd)\r\n\r\nKeyboardInterrupt: \r\n\r\n```" ]
2021-01-01T10:25:07
2021-03-31T16:24:13
2021-01-01T11:47:36
NONE
null
null
null
null
I am trying to load the squad dataset. Fails on Windows 10 but succeeds in Colab. Transformers: 3.3.1 Datasets: 1.0.2 Windows 10 (also tested in WSL) ``` datasets.logging.set_verbosity_debug() datasets. train_dataset = load_dataset('squad', split='train') valid_dataset = load_dataset('squad', split='validation') train_dataset.features ``` ``` https://raw.githubusercontent.com/huggingface/datasets/1.0.2/datasets/squad/squad.py not found in cache or force_download set to True, downloading to C:\Users\simpl\.cache\huggingface\datasets\tmpzj_o_6u7 Downloading: 5.24k/? [00:00<00:00, 134kB/s] storing https://raw.githubusercontent.com/huggingface/datasets/1.0.2/datasets/squad/squad.py in cache at C:\Users\simpl\.cache\huggingface\datasets\f6877c8d2e01e8fcb60dc101be28b54a7522feac756deb9ac5c39c6d8ebef1ce.85f43de978b9b25921cb78d7a2f2b350c04acdbaedb9ecb5f7101cd7c0950e68.py creating metadata file for C:\Users\simpl\.cache\huggingface\datasets\f6877c8d2e01e8fcb60dc101be28b54a7522feac756deb9ac5c39c6d8ebef1ce.85f43de978b9b25921cb78d7a2f2b350c04acdbaedb9ecb5f7101cd7c0950e68.py Checking C:\Users\simpl\.cache\huggingface\datasets\f6877c8d2e01e8fcb60dc101be28b54a7522feac756deb9ac5c39c6d8ebef1ce.85f43de978b9b25921cb78d7a2f2b350c04acdbaedb9ecb5f7101cd7c0950e68.py for additional imports. Found main folder for dataset https://raw.githubusercontent.com/huggingface/datasets/1.0.2/datasets/squad/squad.py at C:\Users\simpl\.cache\huggingface\modules\datasets_modules\datasets\squad Found specific version folder for dataset https://raw.githubusercontent.com/huggingface/datasets/1.0.2/datasets/squad/squad.py at C:\Users\simpl\.cache\huggingface\modules\datasets_modules\datasets\squad\1244d044b266a5e4dbd4174d23cb995eead372fbca31a03edc3f8a132787af41 Found script file from https://raw.githubusercontent.com/huggingface/datasets/1.0.2/datasets/squad/squad.py to C:\Users\simpl\.cache\huggingface\modules\datasets_modules\datasets\squad\1244d044b266a5e4dbd4174d23cb995eead372fbca31a03edc3f8a132787af41\squad.py Couldn't find dataset infos file at https://raw.githubusercontent.com/huggingface/datasets/1.0.2/datasets/squad\dataset_infos.json Found metadata file for dataset https://raw.githubusercontent.com/huggingface/datasets/1.0.2/datasets/squad/squad.py at C:\Users\simpl\.cache\huggingface\modules\datasets_modules\datasets\squad\1244d044b266a5e4dbd4174d23cb995eead372fbca31a03edc3f8a132787af41\squad.json No config specified, defaulting to first: squad/plain_text ``` Interrupting the jupyter kernel we are in a file lock. In Google Colab the download is ok. In contrast to a local run in colab dataset_infos.json is downloaded ``` https://raw.githubusercontent.com/huggingface/datasets/1.0.2/datasets/squad/dataset_infos.json not found in cache or force_download set to True, downloading to /root/.cache/huggingface/datasets/tmptl9ha_ad Downloading: 2.19k/? [00:00<00:00, 26.2kB/s] ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/69860107?v=4", "events_url": "https://api.github.com/users/tomacai/events{/privacy}", "followers_url": "https://api.github.com/users/tomacai/followers", "following_url": "https://api.github.com/users/tomacai/following{/other_user}", "gists_url": "https://api.github.com/users/tomacai/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/tomacai", "id": 69860107, "login": "tomacai", "node_id": "MDQ6VXNlcjY5ODYwMTA3", "organizations_url": "https://api.github.com/users/tomacai/orgs", "received_events_url": "https://api.github.com/users/tomacai/received_events", "repos_url": "https://api.github.com/users/tomacai/repos", "site_admin": false, "starred_url": "https://api.github.com/users/tomacai/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/tomacai/subscriptions", "type": "User", "url": "https://api.github.com/users/tomacai", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1672/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1672/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
1:22:29
https://api.github.com/repos/huggingface/datasets/issues/1671
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1671/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1671/comments
https://api.github.com/repos/huggingface/datasets/issues/1671/events
https://github.com/huggingface/datasets/issues/1671
776,652,193
MDU6SXNzdWU3NzY2NTIxOTM=
1,671
connection issue
{ "avatar_url": "https://avatars.githubusercontent.com/u/73364383?v=4", "events_url": "https://api.github.com/users/rabeehkarimimahabadi/events{/privacy}", "followers_url": "https://api.github.com/users/rabeehkarimimahabadi/followers", "following_url": "https://api.github.com/users/rabeehkarimimahabadi/following{/other_user}", "gists_url": "https://api.github.com/users/rabeehkarimimahabadi/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/rabeehkarimimahabadi", "id": 73364383, "login": "rabeehkarimimahabadi", "node_id": "MDQ6VXNlcjczMzY0Mzgz", "organizations_url": "https://api.github.com/users/rabeehkarimimahabadi/orgs", "received_events_url": "https://api.github.com/users/rabeehkarimimahabadi/received_events", "repos_url": "https://api.github.com/users/rabeehkarimimahabadi/repos", "site_admin": false, "starred_url": "https://api.github.com/users/rabeehkarimimahabadi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rabeehkarimimahabadi/subscriptions", "type": "User", "url": "https://api.github.com/users/rabeehkarimimahabadi", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Also, mayjor issue for me is the format issue, even if I go through changing the whole code to use load_from_disk, then if I do \r\n\r\nd = datasets.load_from_disk(\"imdb\")\r\nd = d[\"train\"][:10] => the format of this is no more in datasets format\r\nthis is different from you call load_datasets(\"train[10]\")\r\n\r\ncould you tell me how I can make the two datastes the same format @lhoestq \r\n\r\n", "> `\r\nrequests.exceptions.ConnectTimeout: HTTPSConnectionPool(host='s3.amazonaws.com', port=443): Max retries exceeded with url: /datasets.huggingface.co/datasets/datasets/glue/glue.py (Caused by ConnectTimeoutError(<urllib3.connection.HTTPSConnection object at 0x7ff6d6c60a20>, 'Connection to s3.amazonaws.com timed out. (connect timeout=10)'))`\r\n\r\nDo you have an internet connection on the machine ? Is there a proxy that might block requests to aws ?\r\n\r\n> I tried to do read the data, save it to a path and then set HF_HOME, which does not work and this is still not reading from the old set path, could you assist me how to save the datasets in a path, and let dataset library read from this path to avoid connection issue. thanks\r\n\r\nHF_HOME is used to specify the directory for the cache files of this library.\r\nYou can use save_to_disk and load_from_disk without changing the HF_HOME:\r\n```python\r\nimdb = datasets.load_dataset(\"imdb\")\r\nimdb.save_to_disk(\"/idiap/temp/rkarimi/hf_datasets/imdb\")\r\nimdb = datasets.load_from_disk(\"/idiap/temp/rkarimi/hf_datasets/imdb\")\r\n```\r\n\r\n> could you tell me how I can make the two datastes the same format\r\n\r\nIndeed they returns different things:\r\n- `load_dataset` returns a `Dataset` object if the split is specified, or a `DatasetDict` if no split is given. Therefore `load_datasets(\"imdb\", split=\"train[10]\")` returns a `Dataset` object containing 10 elements.\r\n- doing `d[\"train\"][:10]` on a DatasetDict \"d\" gets the train split `d[\"train\"]` as a `Dataset` object and then gets the first 10 elements as a dictionary" ]
2020-12-30T21:56:20
2022-10-05T12:42:12
2022-10-05T12:42:12
NONE
null
null
null
null
Hi I am getting this connection issue, resulting in large failure on cloud, @lhoestq I appreciate your help on this. If I want to keep the codes the same, so not using save_to_disk, load_from_disk, but save the datastes in the way load_dataset reads from and copy the files in the same folder the datasets library reads from, could you assist me how this can be done, thanks I tried to do read the data, save it to a path and then set HF_HOME, which does not work and this is still not reading from the old set path, could you assist me how to save the datasets in a path, and let dataset library read from this path to avoid connection issue. thanks ``` imdb = datasets.load_dataset("imdb") imdb.save_to_disk("/idiap/temp/rkarimi/hf_datasets/imdb") >>> os.environ["HF_HOME"]="/idiap/temp/rkarimi/hf_datasets/" >>> imdb = datasets.load_dataset("imdb") Reusing dataset imdb (/idiap/temp/rkarimi/cache_home_2/datasets/imdb/plain_text/1.0.0/90099cb476936b753383ba2ae6ab2eae419b2e87f71cd5189cb9c8e5814d12a3) ``` I tried afterwards to set HF_HOME in bash, this makes it read from it, but it cannot let dataset library load from the saved path and still downloading data. could you tell me how to fix this issue @lhoestq thanks Also this is on cloud, so I save them in a path, copy it to "another machine" to load the data ### Error stack ``` Traceback (most recent call last): File "./finetune_t5_trainer.py", line 344, in <module> main() File "./finetune_t5_trainer.py", line 232, in main for task in data_args.eval_tasks} if training_args.do_test else None File "./finetune_t5_trainer.py", line 232, in <dictcomp> for task in data_args.eval_tasks} if training_args.do_test else None File "/workdir/seq2seq/data/tasks.py", line 136, in get_dataset split = self.get_sampled_split(split, n_obs) File "/workdir/seq2seq/data/tasks.py", line 64, in get_sampled_split dataset = self.load_dataset(split) File "/workdir/seq2seq/data/tasks.py", line 454, in load_dataset split=split, script_version="master") File "/usr/local/lib/python3.6/dist-packages/datasets/load.py", line 589, in load_dataset path, script_version=script_version, download_config=download_config, download_mode=download_mode, dataset=True File "/usr/local/lib/python3.6/dist-packages/datasets/load.py", line 263, in prepare_module head_hf_s3(path, filename=name, dataset=dataset) File "/usr/local/lib/python3.6/dist-packages/datasets/utils/file_utils.py", line 200, in head_hf_s3 return http_head(hf_bucket_url(identifier=identifier, filename=filename, use_cdn=use_cdn, dataset=dataset)) File "/usr/local/lib/python3.6/dist-packages/datasets/utils/file_utils.py", line 403, in http_head url, proxies=proxies, headers=headers, cookies=cookies, allow_redirects=allow_redirects, timeout=timeout File "/usr/local/lib/python3.6/dist-packages/requests/api.py", line 104, in head return request('head', url, **kwargs) File "/usr/local/lib/python3.6/dist-packages/requests/api.py", line 61, in request return session.request(method=method, url=url, **kwargs) File "/usr/local/lib/python3.6/dist-packages/requests/sessions.py", line 542, in request resp = self.send(prep, **send_kwargs) File "/usr/local/lib/python3.6/dist-packages/requests/sessions.py", line 655, in send r = adapter.send(request, **kwargs) File "/usr/local/lib/python3.6/dist-packages/requests/adapters.py", line 504, in send raise ConnectTimeout(e, request=request) requests.exceptions.ConnectTimeout: HTTPSConnectionPool(host='s3.amazonaws.com', port=443): Max retries exceeded with url: /datasets.huggingface.co/datasets/datasets/glue/glue.py (Caused by ConnectTimeoutError(<urllib3.connection.HTTPSConnection object at 0x7ff6d6c60a20>, 'Connection to s3.amazonaws.com timed out. (connect timeout=10)')) ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1671/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1671/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
643 days, 14:45:52
https://api.github.com/repos/huggingface/datasets/issues/1670
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1670/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1670/comments
https://api.github.com/repos/huggingface/datasets/issues/1670/events
https://github.com/huggingface/datasets/issues/1670
776,608,579
MDU6SXNzdWU3NzY2MDg1Nzk=
1,670
wiki_dpr pre-processing performance
{ "avatar_url": "https://avatars.githubusercontent.com/u/753898?v=4", "events_url": "https://api.github.com/users/dbarnhart/events{/privacy}", "followers_url": "https://api.github.com/users/dbarnhart/followers", "following_url": "https://api.github.com/users/dbarnhart/following{/other_user}", "gists_url": "https://api.github.com/users/dbarnhart/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/dbarnhart", "id": 753898, "login": "dbarnhart", "node_id": "MDQ6VXNlcjc1Mzg5OA==", "organizations_url": "https://api.github.com/users/dbarnhart/orgs", "received_events_url": "https://api.github.com/users/dbarnhart/received_events", "repos_url": "https://api.github.com/users/dbarnhart/repos", "site_admin": false, "starred_url": "https://api.github.com/users/dbarnhart/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dbarnhart/subscriptions", "type": "User", "url": "https://api.github.com/users/dbarnhart", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" }, { "color": "72f99f", "default": false, "description": "Discussions on the datasets", "id": 2067401494, "name": "Dataset discussion", "node_id": "MDU6TGFiZWwyMDY3NDAxNDk0", "url": "https://api.github.com/repos/huggingface/datasets/labels/Dataset%20discussion" } ]
open
false
null
[]
[ "Hi ! And thanks for the tips :) \r\n\r\nIndeed currently `wiki_dpr` takes some time to be processed.\r\nMultiprocessing for dataset generation is definitely going to speed up things.\r\n\r\nRegarding the index note that for the default configurations, the index is downloaded instead of being built, which avoid spending time on constructing the index. However in other cases it would be awesome to make the construction faster.\r\n\r\nAny contribution that can help things faster are welcome. In particular in you have some code that can build a wiki_dpr IVF PQ index in a sharded GPU setup and would like to share it, we can add it to an `examples` folder. In particular since faiss is becoming the library of reference for dataset indexing for tasks like Open Domain Question Answering.\r\n\r\n", "I'd be happy to contribute something when I get the time, probably adding multiprocessing and / or cython support to wiki_dpr. I've written cythonized apache beam code before as well.\r\n\r\nFor sharded index building, I used the FAISS example code for indexing 1 billion vectors as a start. I'm sure you're aware that the documentation isn't great, but the source code is fairly easy to follow.", "Nice thanks ! That would be awesome to make its construction faster :) " ]
2020-12-30T19:41:43
2021-01-28T09:41:36
null
NONE
null
null
null
null
I've been working with wiki_dpr and noticed that the dataset processing is seriously impaired in performance [1]. It takes about 12h to process the entire dataset. Most of this time is simply loading and processing the data, but the actual indexing is also quite slow (3h). I won't repeat the concerns around multiprocessing as they are addressed in other issues (#786), but this is the first obvious thing to do. Using cython to speed up the text manipulation may be also help. Loading and processing a dataset of this size in under 15 minutes does not seem unreasonable on a modern multi-core machine. I have hit such targets myself on similar tasks. Would love to see this improve. The other issue is that it takes 3h to construct the FAISS index. If only we could use GPUs with HNSW, but we can't. My sharded GPU indexing code can build an IVF + PQ index in 10 minutes on 20 million vectors. Still, 3h seems slow even for the CPU. It looks like HF is adding only 1000 vectors at a time by default [2], whereas the faiss benchmarks adds 1 million vectors at a time (effectively) [3]. It's possible the runtime could be reduced with a larger batch. Also, it looks like project dependencies ultimately use OpenBLAS, but this is known to have issues when combined with OpenMP, which HNSW does [3]. A workaround is to set the environment variable `OMP_WAIT_POLICY=PASSIVE` via `os.environ` or similar. References: [1] https://github.com/huggingface/datasets/blob/master/datasets/wiki_dpr/wiki_dpr.py [2] https://github.com/huggingface/datasets/blob/master/src/datasets/search.py [3] https://github.com/facebookresearch/faiss/blob/master/benchs/bench_hnsw.py [4] https://github.com/facebookresearch/faiss/issues/422
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1670/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1670/timeline
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
null
https://api.github.com/repos/huggingface/datasets/issues/1669
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1669/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1669/comments
https://api.github.com/repos/huggingface/datasets/issues/1669/events
https://github.com/huggingface/datasets/issues/1669
776,608,386
MDU6SXNzdWU3NzY2MDgzODY=
1,669
wiki_dpr dataset pre-processesing performance
{ "avatar_url": "https://avatars.githubusercontent.com/u/753898?v=4", "events_url": "https://api.github.com/users/dbarnhart/events{/privacy}", "followers_url": "https://api.github.com/users/dbarnhart/followers", "following_url": "https://api.github.com/users/dbarnhart/following{/other_user}", "gists_url": "https://api.github.com/users/dbarnhart/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/dbarnhart", "id": 753898, "login": "dbarnhart", "node_id": "MDQ6VXNlcjc1Mzg5OA==", "organizations_url": "https://api.github.com/users/dbarnhart/orgs", "received_events_url": "https://api.github.com/users/dbarnhart/received_events", "repos_url": "https://api.github.com/users/dbarnhart/repos", "site_admin": false, "starred_url": "https://api.github.com/users/dbarnhart/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dbarnhart/subscriptions", "type": "User", "url": "https://api.github.com/users/dbarnhart", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Sorry, double posted." ]
2020-12-30T19:41:09
2020-12-30T19:42:25
2020-12-30T19:42:25
NONE
null
null
null
null
I've been working with wiki_dpr and noticed that the dataset processing is seriously impaired in performance [1]. It takes about 12h to process the entire dataset. Most of this time is simply loading and processing the data, but the actual indexing is also quite slow (3h). I won't repeat the concerns around multiprocessing as they are addressed in other issues (#786), but this is the first obvious thing to do. Using cython to speed up the text manipulation may be also help. Loading and processing a dataset of this size in under 15 minutes does not seem unreasonable on a modern multi-core machine. I have hit such targets myself on similar tasks. Would love to see this improve. The other issue is that it takes 3h to construct the FAISS index. If only we could use GPUs with HNSW, but we can't. My sharded GPU indexing code can build an IVF + PQ index in 10 minutes on 20 million vectors. Still, 3h seems slow even for the CPU. It looks like HF is adding only 1000 vectors at a time by default [2], whereas the faiss benchmarks adds 1 million vectors at a time (effectively) [3]. It's possible the runtime could be reduced with a larger batch. Also, it looks like project dependencies ultimately use OpenBLAS, but this is known to have issues when combined with OpenMP, which HNSW does [3]. A workaround is to set the environment variable `OMP_WAIT_POLICY=PASSIVE` via `os.environ` or similar. References: [1] https://github.com/huggingface/datasets/blob/master/datasets/wiki_dpr/wiki_dpr.py [2] https://github.com/huggingface/datasets/blob/master/src/datasets/search.py [3] https://github.com/facebookresearch/faiss/blob/master/benchs/bench_hnsw.py [4] https://github.com/facebookresearch/faiss/issues/422
{ "avatar_url": "https://avatars.githubusercontent.com/u/753898?v=4", "events_url": "https://api.github.com/users/dbarnhart/events{/privacy}", "followers_url": "https://api.github.com/users/dbarnhart/followers", "following_url": "https://api.github.com/users/dbarnhart/following{/other_user}", "gists_url": "https://api.github.com/users/dbarnhart/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/dbarnhart", "id": 753898, "login": "dbarnhart", "node_id": "MDQ6VXNlcjc1Mzg5OA==", "organizations_url": "https://api.github.com/users/dbarnhart/orgs", "received_events_url": "https://api.github.com/users/dbarnhart/received_events", "repos_url": "https://api.github.com/users/dbarnhart/repos", "site_admin": false, "starred_url": "https://api.github.com/users/dbarnhart/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dbarnhart/subscriptions", "type": "User", "url": "https://api.github.com/users/dbarnhart", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1669/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1669/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
0:01:16
https://api.github.com/repos/huggingface/datasets/issues/1662
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1662/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1662/comments
https://api.github.com/repos/huggingface/datasets/issues/1662/events
https://github.com/huggingface/datasets/issues/1662
775,890,154
MDU6SXNzdWU3NzU4OTAxNTQ=
1,662
Arrow file is too large when saving vector data
{ "avatar_url": "https://avatars.githubusercontent.com/u/22360336?v=4", "events_url": "https://api.github.com/users/weiwangorg/events{/privacy}", "followers_url": "https://api.github.com/users/weiwangorg/followers", "following_url": "https://api.github.com/users/weiwangorg/following{/other_user}", "gists_url": "https://api.github.com/users/weiwangorg/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/weiwangorg", "id": 22360336, "login": "weiwangorg", "node_id": "MDQ6VXNlcjIyMzYwMzM2", "organizations_url": "https://api.github.com/users/weiwangorg/orgs", "received_events_url": "https://api.github.com/users/weiwangorg/received_events", "repos_url": "https://api.github.com/users/weiwangorg/repos", "site_admin": false, "starred_url": "https://api.github.com/users/weiwangorg/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/weiwangorg/subscriptions", "type": "User", "url": "https://api.github.com/users/weiwangorg", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Hi !\r\nThe arrow file size is due to the embeddings. Indeed if they're stored as float32 then the total size of the embeddings is\r\n\r\n20 000 000 vectors * 768 dimensions * 4 bytes per dimension ~= 60GB\r\n\r\nIf you want to reduce the size you can consider using quantization for example, or maybe using dimension reduction techniques.\r\n", "Thanks for your reply @lhoestq.\r\nI want to save original embedding for these sentences for subsequent calculations. So does arrow have a way to save in a compressed format to reduce the size of the file?", "Arrow doesn't have compression since it is designed to have no serialization overhead", "I see. Thank you." ]
2020-12-29T13:23:12
2021-01-21T14:12:39
2021-01-21T14:12:39
NONE
null
null
null
null
I computed the sentence embedding of each sentence of bookcorpus data using bert base and saved them to disk. I used 20M sentences and the obtained arrow file is about 59GB while the original text file is only about 1.3GB. Are there any ways to reduce the size of the arrow file?
{ "avatar_url": "https://avatars.githubusercontent.com/u/22360336?v=4", "events_url": "https://api.github.com/users/weiwangorg/events{/privacy}", "followers_url": "https://api.github.com/users/weiwangorg/followers", "following_url": "https://api.github.com/users/weiwangorg/following{/other_user}", "gists_url": "https://api.github.com/users/weiwangorg/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/weiwangorg", "id": 22360336, "login": "weiwangorg", "node_id": "MDQ6VXNlcjIyMzYwMzM2", "organizations_url": "https://api.github.com/users/weiwangorg/orgs", "received_events_url": "https://api.github.com/users/weiwangorg/received_events", "repos_url": "https://api.github.com/users/weiwangorg/repos", "site_admin": false, "starred_url": "https://api.github.com/users/weiwangorg/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/weiwangorg/subscriptions", "type": "User", "url": "https://api.github.com/users/weiwangorg", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1662/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1662/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
23 days, 0:49:27
https://api.github.com/repos/huggingface/datasets/issues/1647
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1647/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1647/comments
https://api.github.com/repos/huggingface/datasets/issues/1647/events
https://github.com/huggingface/datasets/issues/1647
775,525,799
MDU6SXNzdWU3NzU1MjU3OTk=
1,647
NarrativeQA fails to load with `load_dataset`
{ "avatar_url": "https://avatars.githubusercontent.com/u/56408839?v=4", "events_url": "https://api.github.com/users/eric-mitchell/events{/privacy}", "followers_url": "https://api.github.com/users/eric-mitchell/followers", "following_url": "https://api.github.com/users/eric-mitchell/following{/other_user}", "gists_url": "https://api.github.com/users/eric-mitchell/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/eric-mitchell", "id": 56408839, "login": "eric-mitchell", "node_id": "MDQ6VXNlcjU2NDA4ODM5", "organizations_url": "https://api.github.com/users/eric-mitchell/orgs", "received_events_url": "https://api.github.com/users/eric-mitchell/received_events", "repos_url": "https://api.github.com/users/eric-mitchell/repos", "site_admin": false, "starred_url": "https://api.github.com/users/eric-mitchell/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/eric-mitchell/subscriptions", "type": "User", "url": "https://api.github.com/users/eric-mitchell", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Hi @eric-mitchell,\r\nI think the issue might be that this dataset was added during the community sprint and has not been released yet. It will be available with the v2 of `datasets`.\r\nFor now, you should be able to load the datasets after installing the latest (master) version of `datasets` using pip:\r\n`pip install git+https://github.com/huggingface/datasets.git@master`", "@bhavitvyamalik Great, thanks for this! Confirmed that the problem is resolved on master at [cbbda53](https://github.com/huggingface/datasets/commit/cbbda53ac1520b01f0f67ed6017003936c41ec59).", "Update: HuggingFace did an intermediate release yesterday just before the v2.0.\r\n\r\nTo load it you can just update `datasets`\r\n\r\n`pip install --upgrade datasets`" ]
2020-12-28T18:16:09
2021-01-05T12:05:08
2021-01-03T17:58:05
NONE
null
null
null
null
When loading the NarrativeQA dataset with `load_dataset('narrativeqa')` as given in the documentation [here](https://huggingface.co/datasets/narrativeqa), I receive a cascade of exceptions, ending with FileNotFoundError: Couldn't find file locally at narrativeqa/narrativeqa.py, or remotely at https://raw.githubusercontent.com/huggingface/datasets/1.1.3/datasets/narrativeqa/narrativeqa.py or https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/narrativeqa/narrativeqa.py Workaround: manually copy the `narrativeqa.py` builder into my local directory with curl https://raw.githubusercontent.com/huggingface/datasets/master/datasets/narrativeqa/narrativeqa.py -o narrativeqa.py and load the dataset as `load_dataset('narrativeqa.py')` everything works fine. I'm on datasets v1.1.3 using Python 3.6.10.
{ "avatar_url": "https://avatars.githubusercontent.com/u/56408839?v=4", "events_url": "https://api.github.com/users/eric-mitchell/events{/privacy}", "followers_url": "https://api.github.com/users/eric-mitchell/followers", "following_url": "https://api.github.com/users/eric-mitchell/following{/other_user}", "gists_url": "https://api.github.com/users/eric-mitchell/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/eric-mitchell", "id": 56408839, "login": "eric-mitchell", "node_id": "MDQ6VXNlcjU2NDA4ODM5", "organizations_url": "https://api.github.com/users/eric-mitchell/orgs", "received_events_url": "https://api.github.com/users/eric-mitchell/received_events", "repos_url": "https://api.github.com/users/eric-mitchell/repos", "site_admin": false, "starred_url": "https://api.github.com/users/eric-mitchell/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/eric-mitchell/subscriptions", "type": "User", "url": "https://api.github.com/users/eric-mitchell", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1647/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1647/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
5 days, 23:41:56
https://api.github.com/repos/huggingface/datasets/issues/1644
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1644/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1644/comments
https://api.github.com/repos/huggingface/datasets/issues/1644/events
https://github.com/huggingface/datasets/issues/1644
775,375,880
MDU6SXNzdWU3NzUzNzU4ODA=
1,644
HoVeR dataset fails to load
{ "avatar_url": "https://avatars.githubusercontent.com/u/1473778?v=4", "events_url": "https://api.github.com/users/urikz/events{/privacy}", "followers_url": "https://api.github.com/users/urikz/followers", "following_url": "https://api.github.com/users/urikz/following{/other_user}", "gists_url": "https://api.github.com/users/urikz/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/urikz", "id": 1473778, "login": "urikz", "node_id": "MDQ6VXNlcjE0NzM3Nzg=", "organizations_url": "https://api.github.com/users/urikz/orgs", "received_events_url": "https://api.github.com/users/urikz/received_events", "repos_url": "https://api.github.com/users/urikz/repos", "site_admin": false, "starred_url": "https://api.github.com/users/urikz/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/urikz/subscriptions", "type": "User", "url": "https://api.github.com/users/urikz", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Hover was added recently, that's why it wasn't available yet.\r\n\r\nTo load it you can just update `datasets`\r\n```\r\npip install --upgrade datasets\r\n```\r\n\r\nand then you can load `hover` with\r\n\r\n```python\r\nfrom datasets import load_dataset\r\n\r\ndataset = load_dataset(\"hover\")\r\n```" ]
2020-12-28T12:27:07
2022-10-05T12:40:34
2022-10-05T12:40:34
NONE
null
null
null
null
Hi! I'm getting an error when trying to load **HoVeR** dataset. Another one (**SQuAD**) does work for me. I'm using the latest (1.1.3) version of the library. Steps to reproduce the error: ```python >>> from datasets import load_dataset >>> dataset = load_dataset("hover") Traceback (most recent call last): File "/Users/urikz/anaconda/envs/mentionmemory/lib/python3.7/site-packages/datasets/load.py", line 267, in prepare_module local_path = cached_path(file_path, download_config=download_config) File "/Users/urikz/anaconda/envs/mentionmemory/lib/python3.7/site-packages/datasets/utils/file_utils.py", line 308, in cached_path use_etag=download_config.use_etag, File "/Users/urikz/anaconda/envs/mentionmemory/lib/python3.7/site-packages/datasets/utils/file_utils.py", line 486, in get_from_cache raise FileNotFoundError("Couldn't find file at {}".format(url)) FileNotFoundError: Couldn't find file at https://raw.githubusercontent.com/huggingface/datasets/1.1.3/datasets/hover/hover.py During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/Users/urikz/anaconda/envs/mentionmemory/lib/python3.7/site-packages/datasets/load.py", line 278, in prepare_module local_path = cached_path(file_path, download_config=download_config) File "/Users/urikz/anaconda/envs/mentionmemory/lib/python3.7/site-packages/datasets/utils/file_utils.py", line 308, in cached_path use_etag=download_config.use_etag, File "/Users/urikz/anaconda/envs/mentionmemory/lib/python3.7/site-packages/datasets/utils/file_utils.py", line 486, in get_from_cache raise FileNotFoundError("Couldn't find file at {}".format(url)) FileNotFoundError: Couldn't find file at https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/hover/hover.py During handling of the above exception, another exception occurred: Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/urikz/anaconda/envs/mentionmemory/lib/python3.7/site-packages/datasets/load.py", line 589, in load_dataset path, script_version=script_version, download_config=download_config, download_mode=download_mode, dataset=True File "/Users/urikz/anaconda/envs/mentionmemory/lib/python3.7/site-packages/datasets/load.py", line 282, in prepare_module combined_path, github_file_path, file_path FileNotFoundError: Couldn't find file locally at hover/hover.py, or remotely at https://raw.githubusercontent.com/huggingface/datasets/1.1.3/datasets/hover/hover.py or https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/hover/hover.py ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1644/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1644/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
646 days, 0:13:27
https://api.github.com/repos/huggingface/datasets/issues/1643
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1643/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1643/comments
https://api.github.com/repos/huggingface/datasets/issues/1643/events
https://github.com/huggingface/datasets/issues/1643
775,280,046
MDU6SXNzdWU3NzUyODAwNDY=
1,643
Dataset social_bias_frames 404
{ "avatar_url": "https://avatars.githubusercontent.com/u/7501517?v=4", "events_url": "https://api.github.com/users/atemate/events{/privacy}", "followers_url": "https://api.github.com/users/atemate/followers", "following_url": "https://api.github.com/users/atemate/following{/other_user}", "gists_url": "https://api.github.com/users/atemate/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/atemate", "id": 7501517, "login": "atemate", "node_id": "MDQ6VXNlcjc1MDE1MTc=", "organizations_url": "https://api.github.com/users/atemate/orgs", "received_events_url": "https://api.github.com/users/atemate/received_events", "repos_url": "https://api.github.com/users/atemate/repos", "site_admin": false, "starred_url": "https://api.github.com/users/atemate/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/atemate/subscriptions", "type": "User", "url": "https://api.github.com/users/atemate", "user_view_type": "public" }
[]
closed
false
null
[]
[ "I see, master is already fixed in https://github.com/huggingface/datasets/commit/9e058f098a0919efd03a136b9b9c3dec5076f626" ]
2020-12-28T08:35:34
2020-12-28T08:38:07
2020-12-28T08:38:07
NONE
null
null
null
null
``` >>> from datasets import load_dataset >>> dataset = load_dataset("social_bias_frames") ... Downloading and preparing dataset social_bias_frames/default ... ~/.pyenv/versions/3.7.6/lib/python3.7/site-packages/datasets/utils/file_utils.py in get_from_cache(url, cache_dir, force_download, proxies, etag_timeout, resume_download, user_agent, local_files_only, use_etag) 484 ) 485 elif response is not None and response.status_code == 404: --> 486 raise FileNotFoundError("Couldn't find file at {}".format(url)) 487 raise ConnectionError("Couldn't reach {}".format(url)) 488 FileNotFoundError: Couldn't find file at https://homes.cs.washington.edu/~msap/social-bias-frames/SocialBiasFrames_v2.tgz ``` [Here](https://homes.cs.washington.edu/~msap/social-bias-frames/) we find button `Download data` with the correct URL for the data: https://homes.cs.washington.edu/~msap/social-bias-frames/SBIC.v2.tgz
{ "avatar_url": "https://avatars.githubusercontent.com/u/7501517?v=4", "events_url": "https://api.github.com/users/atemate/events{/privacy}", "followers_url": "https://api.github.com/users/atemate/followers", "following_url": "https://api.github.com/users/atemate/following{/other_user}", "gists_url": "https://api.github.com/users/atemate/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/atemate", "id": 7501517, "login": "atemate", "node_id": "MDQ6VXNlcjc1MDE1MTc=", "organizations_url": "https://api.github.com/users/atemate/orgs", "received_events_url": "https://api.github.com/users/atemate/received_events", "repos_url": "https://api.github.com/users/atemate/repos", "site_admin": false, "starred_url": "https://api.github.com/users/atemate/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/atemate/subscriptions", "type": "User", "url": "https://api.github.com/users/atemate", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1643/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1643/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
0:02:33
https://api.github.com/repos/huggingface/datasets/issues/1641
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1641/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1641/comments
https://api.github.com/repos/huggingface/datasets/issues/1641/events
https://github.com/huggingface/datasets/issues/1641
775,110,872
MDU6SXNzdWU3NzUxMTA4NzI=
1,641
muchocine dataset cannot be dowloaded
{ "avatar_url": "https://avatars.githubusercontent.com/u/3653789?v=4", "events_url": "https://api.github.com/users/mrm8488/events{/privacy}", "followers_url": "https://api.github.com/users/mrm8488/followers", "following_url": "https://api.github.com/users/mrm8488/following{/other_user}", "gists_url": "https://api.github.com/users/mrm8488/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mrm8488", "id": 3653789, "login": "mrm8488", "node_id": "MDQ6VXNlcjM2NTM3ODk=", "organizations_url": "https://api.github.com/users/mrm8488/orgs", "received_events_url": "https://api.github.com/users/mrm8488/received_events", "repos_url": "https://api.github.com/users/mrm8488/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mrm8488/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mrm8488/subscriptions", "type": "User", "url": "https://api.github.com/users/mrm8488", "user_view_type": "public" }
[ { "color": "ffffff", "default": true, "description": "This will not be worked on", "id": 1935892913, "name": "wontfix", "node_id": "MDU6TGFiZWwxOTM1ODkyOTEz", "url": "https://api.github.com/repos/huggingface/datasets/labels/wontfix" }, { "color": "2edb81", "default": false, "description": "A bug in a dataset script provided in the library", "id": 2067388877, "name": "dataset bug", "node_id": "MDU6TGFiZWwyMDY3Mzg4ODc3", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20bug" } ]
closed
false
null
[]
[ "I have encountered the same error with `v1.0.1` and `v1.0.2` on both Windows and Linux environments. However, cloning the repo and using the path to the dataset's root directory worked for me. Even after having the dataset cached - passing the path is the only way (for now) to load the dataset.\r\n\r\n```python\r\nfrom datasets import load_dataset\r\n\r\ndataset = load_dataset(\"squad\") # Works\r\ndataset = load_dataset(\"code_search_net\", \"python\") # Error\r\ndataset = load_dataset(\"covid_qa_deepset\") # Error\r\n\r\npath = \"/huggingface/datasets/datasets/{}/\"\r\ndataset = load_dataset(path.format(\"code_search_net\"), \"python\") # Works\r\ndataset = load_dataset(path.format(\"covid_qa_deepset\")) # Works\r\n```\r\n\r\n", "Hi @mrm8488 and @amoux!\r\n The datasets you are trying to load have been added to the library during the community sprint for v2 last month. They will be available with the v2 release!\r\nFor now, there are still a couple of solutions to load the datasets:\r\n1. As suggested by @amoux, you can clone the git repo and pass the local path to the script\r\n2. You can also install the latest (master) version of `datasets` using pip: `pip install git+https://github.com/huggingface/datasets.git@master`", "If you don't want to clone entire `datasets` repo, just download the `muchocine` directory and pass the local path to the directory. Cheers!", "Muchocine was added recently, that's why it wasn't available yet.\r\n\r\nTo load it you can just update `datasets`\r\n```\r\npip install --upgrade datasets\r\n```\r\n\r\nand then you can load `muchocine` with\r\n\r\n```python\r\nfrom datasets import load_dataset\r\n\r\ndataset = load_dataset(\"muchocine\", split=\"train\")\r\n```", "Thanks @lhoestq " ]
2020-12-27T21:26:28
2021-08-03T05:07:29
2021-08-03T05:07:29
CONTRIBUTOR
null
null
null
null
```python --------------------------------------------------------------------------- FileNotFoundError Traceback (most recent call last) /usr/local/lib/python3.6/dist-packages/datasets/load.py in prepare_module(path, script_version, download_config, download_mode, dataset, force_local_path, **download_kwargs) 267 try: --> 268 local_path = cached_path(file_path, download_config=download_config) 269 except FileNotFoundError: 7 frames FileNotFoundError: Couldn't find file at https://raw.githubusercontent.com/huggingface/datasets/1.0.2/datasets/muchocine/muchocine.py During handling of the above exception, another exception occurred: FileNotFoundError Traceback (most recent call last) FileNotFoundError: Couldn't find file at https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/muchocine/muchocine.py During handling of the above exception, another exception occurred: FileNotFoundError Traceback (most recent call last) /usr/local/lib/python3.6/dist-packages/datasets/load.py in prepare_module(path, script_version, download_config, download_mode, dataset, force_local_path, **download_kwargs) 281 raise FileNotFoundError( 282 "Couldn't find file locally at {}, or remotely at {} or {}".format( --> 283 combined_path, github_file_path, file_path 284 ) 285 ) FileNotFoundError: Couldn't find file locally at muchocine/muchocine.py, or remotely at https://raw.githubusercontent.com/huggingface/datasets/1.0.2/datasets/muchocine/muchocine.py or https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/muchocine/muchocine.py ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1641/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1641/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
218 days, 7:41:01
https://api.github.com/repos/huggingface/datasets/issues/1639
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1639/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1639/comments
https://api.github.com/repos/huggingface/datasets/issues/1639/events
https://github.com/huggingface/datasets/issues/1639
774,903,472
MDU6SXNzdWU3NzQ5MDM0NzI=
1,639
bug with sst2 in glue
{ "avatar_url": "https://avatars.githubusercontent.com/u/10137?v=4", "events_url": "https://api.github.com/users/ghost/events{/privacy}", "followers_url": "https://api.github.com/users/ghost/followers", "following_url": "https://api.github.com/users/ghost/following{/other_user}", "gists_url": "https://api.github.com/users/ghost/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ghost", "id": 10137, "login": "ghost", "node_id": "MDQ6VXNlcjEwMTM3", "organizations_url": "https://api.github.com/users/ghost/orgs", "received_events_url": "https://api.github.com/users/ghost/received_events", "repos_url": "https://api.github.com/users/ghost/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ghost/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ghost/subscriptions", "type": "User", "url": "https://api.github.com/users/ghost", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Maybe you can use nltk's treebank detokenizer ?\r\n```python\r\nfrom nltk.tokenize.treebank import TreebankWordDetokenizer\r\n\r\nTreebankWordDetokenizer().detokenize(\"it 's a charming and often affecting journey . \".split())\r\n# \"it's a charming and often affecting journey.\"\r\n```", "I am looking for alternative file URL here instead of adding extra processing code: https://github.com/huggingface/datasets/blob/171f2bba9dd8b92006b13cf076a5bf31d67d3e69/datasets/glue/glue.py#L174", "I don't know if there exists a detokenized version somewhere. Even the version on kaggle is tokenized" ]
2020-12-26T16:57:23
2022-10-05T12:40:16
2022-10-05T12:40:16
NONE
null
null
null
null
Hi I am getting very low accuracy on SST2 I investigate this and observe that for this dataset sentences are tokenized, while this is correct for the other datasets in GLUE, please see below. Is there any alternatives I could get untokenized sentences? I am unfortunately under time pressure to report some results on this dataset. thank you for your help. @lhoestq ``` >>> a = datasets.load_dataset('glue', 'sst2', split="validation", script_version="master") Reusing dataset glue (/julia/datasets/glue/sst2/1.0.0/7c99657241149a24692c402a5c3f34d4c9f1df5ac2e4c3759fadea38f6cb29c4) >>> a[:10] {'idx': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 'label': [1, 0, 1, 1, 0, 1, 0, 0, 1, 0], 'sentence': ["it 's a charming and often affecting journey . ", 'unflinchingly bleak and desperate ', 'allows us to hope that nolan is poised to embark a major career as a commercial yet inventive filmmaker . ', "the acting , costumes , music , cinematography and sound are all astounding given the production 's austere locales . ", "it 's slow -- very , very slow . ", 'although laced with humor and a few fanciful touches , the film is a refreshingly serious look at young women . ', 'a sometimes tedious film . ', "or doing last year 's taxes with your ex-wife . ", "you do n't have to know about music to appreciate the film 's easygoing blend of comedy and romance . ", "in exactly 89 minutes , most of which passed as slowly as if i 'd been sitting naked on an igloo , formula 51 sank from quirky to jerky to utter turkey . "]} ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1639/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1639/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
647 days, 19:42:53
https://api.github.com/repos/huggingface/datasets/issues/1636
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1636/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1636/comments
https://api.github.com/repos/huggingface/datasets/issues/1636/events
https://github.com/huggingface/datasets/issues/1636
774,574,378
MDU6SXNzdWU3NzQ1NzQzNzg=
1,636
winogrande cannot be dowloaded
{ "avatar_url": "https://avatars.githubusercontent.com/u/10137?v=4", "events_url": "https://api.github.com/users/ghost/events{/privacy}", "followers_url": "https://api.github.com/users/ghost/followers", "following_url": "https://api.github.com/users/ghost/following{/other_user}", "gists_url": "https://api.github.com/users/ghost/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ghost", "id": 10137, "login": "ghost", "node_id": "MDQ6VXNlcjEwMTM3", "organizations_url": "https://api.github.com/users/ghost/orgs", "received_events_url": "https://api.github.com/users/ghost/received_events", "repos_url": "https://api.github.com/users/ghost/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ghost/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ghost/subscriptions", "type": "User", "url": "https://api.github.com/users/ghost", "user_view_type": "public" }
[]
closed
false
null
[]
[ "I have same issue for other datasets (`myanmar_news` in my case).\r\n\r\nA version of `datasets` runs correctly on my local machine (**without GPU**) which looking for the dataset at \r\n```\r\nhttps://raw.githubusercontent.com/huggingface/datasets/master/datasets/myanmar_news/myanmar_news.py\r\n```\r\n\r\nMeanwhile, other version runs on Colab (**with GPU**) failed to download the dataset. It try to find the dataset at `1.1.3` instead of `master` . If I disable GPU on my Colab, the code can load the dataset without any problem.\r\n\r\nMaybe there is some version missmatch with the GPU and CPU version of code for these datasets?", "It looks like they're two different issues\r\n\r\n----------\r\n\r\nFirst for `myanmar_news`: \r\n\r\nIt must come from the way you installed `datasets`.\r\nIf you install `datasets` from source, then the `myanmar_news` script will be loaded from `master`.\r\nHowever if you install from `pip` it will get it using the version of the lib (here `1.1.3`) and `myanmar_news` is not available in `1.1.3`.\r\n\r\nThe difference between your GPU and CPU executions must be the environment, one seems to have installed `datasets` from source and not the other.\r\n\r\n----------\r\n\r\nThen for `winogrande`:\r\n\r\nThe errors says that the url https://raw.githubusercontent.com/huggingface/datasets/1.1.3/datasets/winogrande/winogrande.py is not reachable.\r\nHowever it works fine on my side.\r\n\r\nDoes your machine have an internet connection ? Are connections to github blocked by some sort of proxy ?\r\nCan you also try again in case github had issues when you tried the first time ?\r\n" ]
2020-12-24T22:28:22
2022-10-05T12:35:44
2022-10-05T12:35:44
NONE
null
null
null
null
Hi, I am getting this error when trying to run the codes on the cloud. Thank you for any suggestion and help on this @lhoestq ``` File "./finetune_trainer.py", line 318, in <module> main() File "./finetune_trainer.py", line 148, in main for task in data_args.tasks] File "./finetune_trainer.py", line 148, in <listcomp> for task in data_args.tasks] File "/workdir/seq2seq/data/tasks.py", line 65, in get_dataset dataset = self.load_dataset(split=split) File "/workdir/seq2seq/data/tasks.py", line 466, in load_dataset return datasets.load_dataset('winogrande', 'winogrande_l', split=split) File "/usr/local/lib/python3.6/dist-packages/datasets/load.py", line 589, in load_dataset path, script_version=script_version, download_config=download_config, download_mode=download_mode, dataset=True File "/usr/local/lib/python3.6/dist-packages/datasets/load.py", line 267, in prepare_module local_path = cached_path(file_path, download_config=download_config) File "/usr/local/lib/python3.6/dist-packages/datasets/utils/file_utils.py", line 308, in cached_path use_etag=download_config.use_etag, File "/usr/local/lib/python3.6/dist-packages/datasets/utils/file_utils.py", line 487, in get_from_cache raise ConnectionError("Couldn't reach {}".format(url)) ConnectionError: Couldn't reach https://raw.githubusercontent.com/huggingface/datasets/1.1.3/datasets/winogrande/winogrande.py yo/0 I1224 14:17:46.419031 31226 main shadow.py:122 > Traceback (most recent call last): File "/usr/lib/python3.6/runpy.py", line 193, in _run_module_as_main "__main__", mod_spec) File "/usr/lib/python3.6/runpy.py", line 85, in _run_code exec(code, run_globals) File "/usr/local/lib/python3.6/dist-packages/torch/distributed/launch.py", line 260, in <module> main() File "/usr/local/lib/python3.6/dist-packages/torch/distributed/launch.py", line 256, in main cmd=cmd) ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1636/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1636/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
649 days, 14:07:22
https://api.github.com/repos/huggingface/datasets/issues/1635
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1635/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1635/comments
https://api.github.com/repos/huggingface/datasets/issues/1635/events
https://github.com/huggingface/datasets/issues/1635
774,524,492
MDU6SXNzdWU3NzQ1MjQ0OTI=
1,635
Persian Abstractive/Extractive Text Summarization
{ "avatar_url": "https://avatars.githubusercontent.com/u/2601833?v=4", "events_url": "https://api.github.com/users/m3hrdadfi/events{/privacy}", "followers_url": "https://api.github.com/users/m3hrdadfi/followers", "following_url": "https://api.github.com/users/m3hrdadfi/following{/other_user}", "gists_url": "https://api.github.com/users/m3hrdadfi/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/m3hrdadfi", "id": 2601833, "login": "m3hrdadfi", "node_id": "MDQ6VXNlcjI2MDE4MzM=", "organizations_url": "https://api.github.com/users/m3hrdadfi/orgs", "received_events_url": "https://api.github.com/users/m3hrdadfi/received_events", "repos_url": "https://api.github.com/users/m3hrdadfi/repos", "site_admin": false, "starred_url": "https://api.github.com/users/m3hrdadfi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/m3hrdadfi/subscriptions", "type": "User", "url": "https://api.github.com/users/m3hrdadfi", "user_view_type": "public" }
[ { "color": "e99695", "default": false, "description": "Requesting to add a new dataset", "id": 2067376369, "name": "dataset request", "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request" } ]
closed
false
null
[]
[]
2020-12-24T17:47:12
2021-01-04T15:11:04
2021-01-04T15:11:04
CONTRIBUTOR
null
null
null
null
Assembling datasets tailored to different tasks and languages is a precious target. This would be great to have this dataset included. ## Adding a Dataset - **Name:** *pn-summary* - **Description:** *A well-structured summarization dataset for the Persian language consists of 93,207 records. It is prepared for Abstractive/Extractive tasks (like cnn_dailymail for English). It can also be used in other scopes like Text Generation, Title Generation, and News Category Classification.* - **Paper:** *https://arxiv.org/abs/2012.11204* - **Data:** *https://github.com/hooshvare/pn-summary/#download* - **Motivation:** *It is the first Persian abstractive/extractive Text summarization dataset (like cnn_dailymail for English)!* Instructions to add a new dataset can be found [here](https://github.com/huggingface/datasets/blob/master/ADD_NEW_DATASET.md).
{ "avatar_url": "https://avatars.githubusercontent.com/u/2601833?v=4", "events_url": "https://api.github.com/users/m3hrdadfi/events{/privacy}", "followers_url": "https://api.github.com/users/m3hrdadfi/followers", "following_url": "https://api.github.com/users/m3hrdadfi/following{/other_user}", "gists_url": "https://api.github.com/users/m3hrdadfi/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/m3hrdadfi", "id": 2601833, "login": "m3hrdadfi", "node_id": "MDQ6VXNlcjI2MDE4MzM=", "organizations_url": "https://api.github.com/users/m3hrdadfi/orgs", "received_events_url": "https://api.github.com/users/m3hrdadfi/received_events", "repos_url": "https://api.github.com/users/m3hrdadfi/repos", "site_admin": false, "starred_url": "https://api.github.com/users/m3hrdadfi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/m3hrdadfi/subscriptions", "type": "User", "url": "https://api.github.com/users/m3hrdadfi", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1635/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1635/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
10 days, 21:23:52
https://api.github.com/repos/huggingface/datasets/issues/1634
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1634/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1634/comments
https://api.github.com/repos/huggingface/datasets/issues/1634/events
https://github.com/huggingface/datasets/issues/1634
774,487,934
MDU6SXNzdWU3NzQ0ODc5MzQ=
1,634
Inspecting datasets per category
{ "avatar_url": "https://avatars.githubusercontent.com/u/10137?v=4", "events_url": "https://api.github.com/users/ghost/events{/privacy}", "followers_url": "https://api.github.com/users/ghost/followers", "following_url": "https://api.github.com/users/ghost/following{/other_user}", "gists_url": "https://api.github.com/users/ghost/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ghost", "id": 10137, "login": "ghost", "node_id": "MDQ6VXNlcjEwMTM3", "organizations_url": "https://api.github.com/users/ghost/orgs", "received_events_url": "https://api.github.com/users/ghost/received_events", "repos_url": "https://api.github.com/users/ghost/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ghost/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ghost/subscriptions", "type": "User", "url": "https://api.github.com/users/ghost", "user_view_type": "public" }
[]
closed
false
null
[]
[ "That's interesting, can you tell me what you think would be useful to access to inspect a dataset?\r\n\r\nYou can filter them in the hub with the search by the way: https://huggingface.co/datasets have you seen it?", "Hi @thomwolf \r\nthank you, I was not aware of this, I was looking into the data viewer linked into readme page. \r\n\r\nThis is exactly what I was looking for, but this does not work currently, please see the attached \r\nI am selecting to see all nli datasets in english and it retrieves none. thanks\r\n\r\n![5tarDHn9CP6ngeM](https://user-images.githubusercontent.com/53898419/103107612-1509aa80-4638-11eb-85b5-0c995a189969.png)\r\n\r\n\r\n\r\n", "I see 4 results for NLI in English but indeed some are not tagged yet and missing (GLUE), we will focus on that in January (cc @yjernite): https://huggingface.co/datasets?filter=task_ids:natural-language-inference,languages:en", "Hi! You can use `huggingface_hub`'s `list_datasets` for that now:\r\n```python\r\nimport huggingface_hub # pip install huggingface_hub\r\nhuggingface_hub.list_datasets(filter=\"task_categories:question-answering\")\r\n# or\r\nhuggingface_hub.list_datasets(filter=(\"task_categories:natural-language-inference\", \"languages:\"en\"))\r\n```" ]
2020-12-24T15:26:34
2022-10-04T14:57:33
2022-10-04T14:57:33
NONE
null
null
null
null
Hi Is there a way I could get all NLI datasets/all QA datasets to get some understanding of available datasets per category? this is hard for me to inspect the datasets one by one in the webpage, thanks for the suggestions @lhoestq
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1634/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1634/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
648 days, 23:30:59
https://api.github.com/repos/huggingface/datasets/issues/1633
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1633/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1633/comments
https://api.github.com/repos/huggingface/datasets/issues/1633/events
https://github.com/huggingface/datasets/issues/1633
774,422,603
MDU6SXNzdWU3NzQ0MjI2MDM=
1,633
social_i_qa wrong format of labels
{ "avatar_url": "https://avatars.githubusercontent.com/u/10137?v=4", "events_url": "https://api.github.com/users/ghost/events{/privacy}", "followers_url": "https://api.github.com/users/ghost/followers", "following_url": "https://api.github.com/users/ghost/following{/other_user}", "gists_url": "https://api.github.com/users/ghost/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ghost", "id": 10137, "login": "ghost", "node_id": "MDQ6VXNlcjEwMTM3", "organizations_url": "https://api.github.com/users/ghost/orgs", "received_events_url": "https://api.github.com/users/ghost/received_events", "repos_url": "https://api.github.com/users/ghost/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ghost/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ghost/subscriptions", "type": "User", "url": "https://api.github.com/users/ghost", "user_view_type": "public" }
[]
closed
false
null
[]
[ "@lhoestq, should I raise a PR for this? Just a minor change while reading labels text file", "Sure feel free to open a PR thanks !" ]
2020-12-24T13:11:54
2020-12-30T17:18:49
2020-12-30T17:18:49
NONE
null
null
null
null
Hi, there is extra "\n" in labels of social_i_qa datasets, no big deal, but I was wondering if you could remove it to make it consistent. so label is 'label': '1\n', not '1' thanks ``` >>> import datasets >>> from datasets import load_dataset >>> dataset = load_dataset( ... 'social_i_qa') cahce dir /julia/cache/datasets Downloading: 4.72kB [00:00, 3.52MB/s] cahce dir /julia/cache/datasets Downloading: 2.19kB [00:00, 1.81MB/s] Using custom data configuration default Reusing dataset social_i_qa (/julia/datasets/social_i_qa/default/0.1.0/4a4190cc2d2482d43416c2167c0c5dccdd769d4482e84893614bd069e5c3ba06) >>> dataset['train'][0] {'answerA': 'like attending', 'answerB': 'like staying home', 'answerC': 'a good friend to have', 'context': 'Cameron decided to have a barbecue and gathered her friends together.', 'label': '1\n', 'question': 'How would Others feel as a result?'} ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1633/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1633/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
6 days, 4:06:55
https://api.github.com/repos/huggingface/datasets/issues/1632
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1632/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1632/comments
https://api.github.com/repos/huggingface/datasets/issues/1632/events
https://github.com/huggingface/datasets/issues/1632
774,388,625
MDU6SXNzdWU3NzQzODg2MjU=
1,632
SICK dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/6278280?v=4", "events_url": "https://api.github.com/users/rabeehk/events{/privacy}", "followers_url": "https://api.github.com/users/rabeehk/followers", "following_url": "https://api.github.com/users/rabeehk/following{/other_user}", "gists_url": "https://api.github.com/users/rabeehk/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/rabeehk", "id": 6278280, "login": "rabeehk", "node_id": "MDQ6VXNlcjYyNzgyODA=", "organizations_url": "https://api.github.com/users/rabeehk/orgs", "received_events_url": "https://api.github.com/users/rabeehk/received_events", "repos_url": "https://api.github.com/users/rabeehk/repos", "site_admin": false, "starred_url": "https://api.github.com/users/rabeehk/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rabeehk/subscriptions", "type": "User", "url": "https://api.github.com/users/rabeehk", "user_view_type": "public" }
[ { "color": "e99695", "default": false, "description": "Requesting to add a new dataset", "id": 2067376369, "name": "dataset request", "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request" } ]
closed
false
null
[]
[]
2020-12-24T12:40:14
2021-02-05T15:49:25
2021-02-05T15:49:25
CONTRIBUTOR
null
null
null
null
Hi, this would be great to have this dataset included. I might be missing something, but I could not find it in the list of already included datasets. Thank you. ## Adding a Dataset - **Name:** SICK - **Description:** SICK consists of about 10,000 English sentence pairs that include many examples of the lexical, syntactic, and semantic phenomena. - **Paper:** https://www.aclweb.org/anthology/L14-1314/ - **Data:** http://marcobaroni.org/composes/sick.html - **Motivation:** This dataset is well-known in the NLP community used for recognizing entailment between sentences. Instructions to add a new dataset can be found [here](https://github.com/huggingface/datasets/blob/master/ADD_NEW_DATASET.md).
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1632/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1632/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
43 days, 3:09:11
https://api.github.com/repos/huggingface/datasets/issues/1630
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1630/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1630/comments
https://api.github.com/repos/huggingface/datasets/issues/1630/events
https://github.com/huggingface/datasets/issues/1630
774,332,129
MDU6SXNzdWU3NzQzMzIxMjk=
1,630
Adding UKP Argument Aspect Similarity Corpus
{ "avatar_url": "https://avatars.githubusercontent.com/u/6278280?v=4", "events_url": "https://api.github.com/users/rabeehk/events{/privacy}", "followers_url": "https://api.github.com/users/rabeehk/followers", "following_url": "https://api.github.com/users/rabeehk/following{/other_user}", "gists_url": "https://api.github.com/users/rabeehk/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/rabeehk", "id": 6278280, "login": "rabeehk", "node_id": "MDQ6VXNlcjYyNzgyODA=", "organizations_url": "https://api.github.com/users/rabeehk/orgs", "received_events_url": "https://api.github.com/users/rabeehk/received_events", "repos_url": "https://api.github.com/users/rabeehk/repos", "site_admin": false, "starred_url": "https://api.github.com/users/rabeehk/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rabeehk/subscriptions", "type": "User", "url": "https://api.github.com/users/rabeehk", "user_view_type": "public" }
[ { "color": "e99695", "default": false, "description": "Requesting to add a new dataset", "id": 2067376369, "name": "dataset request", "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request" } ]
closed
false
null
[]
[ "Adding a link to the guide on adding a dataset if someone want to give it a try: https://github.com/huggingface/datasets#add-a-new-dataset-to-the-hub\r\n\r\nwe should add this guide to the issue template @lhoestq ", "thanks @thomwolf , this is added now. The template is correct, sorry my mistake not to include it. ", "Available here: https://huggingface.co/datasets/UKPLab/UKP_ASPECT" ]
2020-12-24T11:01:31
2022-10-05T12:36:12
2022-10-05T12:36:12
CONTRIBUTOR
null
null
null
null
Hi, this would be great to have this dataset included. ## Adding a Dataset - **Name:** UKP Argument Aspect Similarity Corpus - **Description:** The UKP Argument Aspect Similarity Corpus (UKP ASPECT) includes 3,595 sentence pairs over 28 controversial topics. Each sentence pair was annotated via crowdsourcing as either “high similarity”, “some similarity”, “no similarity” or “not related” with respect to the topic. - **Paper:** https://www.aclweb.org/anthology/P19-1054/ - **Data:** https://tudatalib.ulb.tu-darmstadt.de/handle/tudatalib/1998 - **Motivation:** this is one of the datasets currently used frequently in recent adapter papers like https://arxiv.org/pdf/2005.00247.pdf Instructions to add a new dataset can be found [here](https://github.com/huggingface/datasets/blob/master/ADD_NEW_DATASET.md). Thank you
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1630/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1630/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
650 days, 1:34:41
https://api.github.com/repos/huggingface/datasets/issues/1627
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1627/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1627/comments
https://api.github.com/repos/huggingface/datasets/issues/1627/events
https://github.com/huggingface/datasets/issues/1627
773,960,255
MDU6SXNzdWU3NzM5NjAyNTU=
1,627
`Dataset.map` disable progress bar
{ "avatar_url": "https://avatars.githubusercontent.com/u/8767964?v=4", "events_url": "https://api.github.com/users/Nickil21/events{/privacy}", "followers_url": "https://api.github.com/users/Nickil21/followers", "following_url": "https://api.github.com/users/Nickil21/following{/other_user}", "gists_url": "https://api.github.com/users/Nickil21/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Nickil21", "id": 8767964, "login": "Nickil21", "node_id": "MDQ6VXNlcjg3Njc5NjQ=", "organizations_url": "https://api.github.com/users/Nickil21/orgs", "received_events_url": "https://api.github.com/users/Nickil21/received_events", "repos_url": "https://api.github.com/users/Nickil21/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Nickil21/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Nickil21/subscriptions", "type": "User", "url": "https://api.github.com/users/Nickil21", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Progress bar can be disabled like this:\r\n```python\r\nfrom datasets.utils.logging import set_verbosity_error\r\nset_verbosity_error()\r\n```\r\n\r\nThere is this line in `Dataset.map`:\r\n```python\r\nnot_verbose = bool(logger.getEffectiveLevel() > WARNING)\r\n```\r\n\r\nSo any logging level higher than `WARNING` turns off the progress bar.", "From the linked issues above, an up-to-date solution is:\r\n\r\n```python\r\nfrom datasets.utils.logging import disable_progress_bar\r\ndisable_progress_bar()\r\n```\r\n\r\nhttps://github.com/huggingface/datasets/blob/c6e08fcfc3a04e53430c26fa7c07da4cb18d977d/src/datasets/utils/logging.py#L233-L236", "Why not have a parameter in the function such as `progress: bool = True`?", "+1. We shouldn't need to play with logging levels for a simple thing like this. For instance, trainers have an option `show_progress` that does exactly this.", "Bump on this, such a simple QOL issue why can't we fix this?" ]
2020-12-23T17:53:42
2025-05-16T16:36:24
2020-12-26T19:57:17
NONE
null
null
null
null
I can't find anything to turn off the `tqdm` progress bars while running a preprocessing function using `Dataset.map`. I want to do akin to `disable_tqdm=True` in the case of `transformers`. Is there something like that?
{ "avatar_url": "https://avatars.githubusercontent.com/u/8767964?v=4", "events_url": "https://api.github.com/users/Nickil21/events{/privacy}", "followers_url": "https://api.github.com/users/Nickil21/followers", "following_url": "https://api.github.com/users/Nickil21/following{/other_user}", "gists_url": "https://api.github.com/users/Nickil21/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Nickil21", "id": 8767964, "login": "Nickil21", "node_id": "MDQ6VXNlcjg3Njc5NjQ=", "organizations_url": "https://api.github.com/users/Nickil21/orgs", "received_events_url": "https://api.github.com/users/Nickil21/received_events", "repos_url": "https://api.github.com/users/Nickil21/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Nickil21/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Nickil21/subscriptions", "type": "User", "url": "https://api.github.com/users/Nickil21", "user_view_type": "public" }
{ "+1": 2, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 2, "url": "https://api.github.com/repos/huggingface/datasets/issues/1627/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1627/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
3 days, 2:03:35
https://api.github.com/repos/huggingface/datasets/issues/1624
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1624/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1624/comments
https://api.github.com/repos/huggingface/datasets/issues/1624/events
https://github.com/huggingface/datasets/issues/1624
773,669,700
MDU6SXNzdWU3NzM2Njk3MDA=
1,624
Cannot download ade_corpus_v2
{ "avatar_url": "https://avatars.githubusercontent.com/u/20259310?v=4", "events_url": "https://api.github.com/users/him1411/events{/privacy}", "followers_url": "https://api.github.com/users/him1411/followers", "following_url": "https://api.github.com/users/him1411/following{/other_user}", "gists_url": "https://api.github.com/users/him1411/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/him1411", "id": 20259310, "login": "him1411", "node_id": "MDQ6VXNlcjIwMjU5MzEw", "organizations_url": "https://api.github.com/users/him1411/orgs", "received_events_url": "https://api.github.com/users/him1411/received_events", "repos_url": "https://api.github.com/users/him1411/repos", "site_admin": false, "starred_url": "https://api.github.com/users/him1411/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/him1411/subscriptions", "type": "User", "url": "https://api.github.com/users/him1411", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Hi @him1411, the dataset you are trying to load has been added during the community sprint and has not been released yet. It will be available with the v2 of `datasets`.\r\nFor now, you should be able to load the datasets after installing the latest (master) version of `datasets` using pip:\r\n`pip install git+https://github.com/huggingface/datasets.git@master`", "`ade_corpus_v2` was added recently, that's why it wasn't available yet.\r\n\r\nTo load it you can just update `datasets`\r\n```\r\npip install --upgrade datasets\r\n```\r\n\r\nand then you can load `ade_corpus_v2` with\r\n\r\n```python\r\nfrom datasets import load_dataset\r\n\r\ndataset = load_dataset(\"ade_corpus_v2\", \"Ade_corpos_v2_drug_ade_relation\")\r\n```\r\n\r\n(looks like there is a typo in the configuration name, we'll fix it for the v2.0 release of `datasets` soon)" ]
2020-12-23T10:58:14
2021-08-03T05:08:54
2021-08-03T05:08:54
NONE
null
null
null
null
I tried this to get the dataset following this url : https://huggingface.co/datasets/ade_corpus_v2 but received this error : `Traceback (most recent call last): File "/opt/anaconda3/lib/python3.7/site-packages/datasets/load.py", line 267, in prepare_module local_path = cached_path(file_path, download_config=download_config) File "/opt/anaconda3/lib/python3.7/site-packages/datasets/utils/file_utils.py", line 308, in cached_path use_etag=download_config.use_etag, File "/opt/anaconda3/lib/python3.7/site-packages/datasets/utils/file_utils.py", line 486, in get_from_cache raise FileNotFoundError("Couldn't find file at {}".format(url)) FileNotFoundError: Couldn't find file at https://raw.githubusercontent.com/huggingface/datasets/1.1.3/datasets/ade_corpus_v2/ade_corpus_v2.py During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/opt/anaconda3/lib/python3.7/site-packages/datasets/load.py", line 278, in prepare_module local_path = cached_path(file_path, download_config=download_config) File "/opt/anaconda3/lib/python3.7/site-packages/datasets/utils/file_utils.py", line 308, in cached_path use_etag=download_config.use_etag, File "/opt/anaconda3/lib/python3.7/site-packages/datasets/utils/file_utils.py", line 486, in get_from_cache raise FileNotFoundError("Couldn't find file at {}".format(url)) FileNotFoundError: Couldn't find file at https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/ade_corpus_v2/ade_corpus_v2.py During handling of the above exception, another exception occurred: Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/opt/anaconda3/lib/python3.7/site-packages/datasets/load.py", line 589, in load_dataset path, script_version=script_version, download_config=download_config, download_mode=download_mode, dataset=True File "/opt/anaconda3/lib/python3.7/site-packages/datasets/load.py", line 282, in prepare_module combined_path, github_file_path, file_path FileNotFoundError: Couldn't find file locally at ade_corpus_v2/ade_corpus_v2.py, or remotely at https://raw.githubusercontent.com/huggingface/datasets/1.1.3/datasets/ade_corpus_v2/ade_corpus_v2.py or https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/ade_corpus_v2/ade_corpus_v2.py`
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 2, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 2, "url": "https://api.github.com/repos/huggingface/datasets/issues/1624/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1624/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
222 days, 18:10:40
https://api.github.com/repos/huggingface/datasets/issues/1622
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1622/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1622/comments
https://api.github.com/repos/huggingface/datasets/issues/1622/events
https://github.com/huggingface/datasets/issues/1622
772,940,768
MDU6SXNzdWU3NzI5NDA3Njg=
1,622
Can't call shape on the output of select()
{ "avatar_url": "https://avatars.githubusercontent.com/u/47183162?v=4", "events_url": "https://api.github.com/users/noaonoszko/events{/privacy}", "followers_url": "https://api.github.com/users/noaonoszko/followers", "following_url": "https://api.github.com/users/noaonoszko/following{/other_user}", "gists_url": "https://api.github.com/users/noaonoszko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/noaonoszko", "id": 47183162, "login": "noaonoszko", "node_id": "MDQ6VXNlcjQ3MTgzMTYy", "organizations_url": "https://api.github.com/users/noaonoszko/orgs", "received_events_url": "https://api.github.com/users/noaonoszko/received_events", "repos_url": "https://api.github.com/users/noaonoszko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/noaonoszko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/noaonoszko/subscriptions", "type": "User", "url": "https://api.github.com/users/noaonoszko", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Indeed that's a typo, do you want to open a PR to fix it?", "Yes, created a PR" ]
2020-12-22T13:18:40
2020-12-23T13:37:13
2020-12-23T13:37:12
CONTRIBUTOR
null
null
null
null
I get the error `TypeError: tuple expected at most 1 argument, got 2` when calling `shape` on the output of `select()`. It's line 531 in shape in arrow_dataset.py that causes the problem: ``return tuple(self._indices.num_rows, self._data.num_columns)`` This makes sense, since `tuple(num1, num2)` is not a valid call. Full code to reproduce: ```python dataset = load_dataset("cnn_dailymail", "3.0.0") train_set = dataset["train"] t = train_set.select(range(10)) print(t.shape)
{ "avatar_url": "https://avatars.githubusercontent.com/u/47183162?v=4", "events_url": "https://api.github.com/users/noaonoszko/events{/privacy}", "followers_url": "https://api.github.com/users/noaonoszko/followers", "following_url": "https://api.github.com/users/noaonoszko/following{/other_user}", "gists_url": "https://api.github.com/users/noaonoszko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/noaonoszko", "id": 47183162, "login": "noaonoszko", "node_id": "MDQ6VXNlcjQ3MTgzMTYy", "organizations_url": "https://api.github.com/users/noaonoszko/orgs", "received_events_url": "https://api.github.com/users/noaonoszko/received_events", "repos_url": "https://api.github.com/users/noaonoszko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/noaonoszko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/noaonoszko/subscriptions", "type": "User", "url": "https://api.github.com/users/noaonoszko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1622/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1622/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
1 day, 0:18:32
https://api.github.com/repos/huggingface/datasets/issues/1618
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1618/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1618/comments
https://api.github.com/repos/huggingface/datasets/issues/1618/events
https://github.com/huggingface/datasets/issues/1618
772,248,730
MDU6SXNzdWU3NzIyNDg3MzA=
1,618
Can't filter language:EN on https://huggingface.co/datasets
{ "avatar_url": "https://avatars.githubusercontent.com/u/4547987?v=4", "events_url": "https://api.github.com/users/davidefiocco/events{/privacy}", "followers_url": "https://api.github.com/users/davidefiocco/followers", "following_url": "https://api.github.com/users/davidefiocco/following{/other_user}", "gists_url": "https://api.github.com/users/davidefiocco/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/davidefiocco", "id": 4547987, "login": "davidefiocco", "node_id": "MDQ6VXNlcjQ1NDc5ODc=", "organizations_url": "https://api.github.com/users/davidefiocco/orgs", "received_events_url": "https://api.github.com/users/davidefiocco/received_events", "repos_url": "https://api.github.com/users/davidefiocco/repos", "site_admin": false, "starred_url": "https://api.github.com/users/davidefiocco/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/davidefiocco/subscriptions", "type": "User", "url": "https://api.github.com/users/davidefiocco", "user_view_type": "public" }
[]
closed
false
null
[]
[ "cc'ing @mapmeld ", "Full language list is now deployed to https://huggingface.co/datasets ! Recommend close", "Cool @mapmeld ! My 2 cents (for a next iteration), it would be cool to have a small search widget in the filter dropdown as you have a ton of languages now here! Closing this in the meantime." ]
2020-12-21T15:23:23
2020-12-22T17:17:00
2020-12-22T17:16:09
NONE
null
null
null
null
When visiting https://huggingface.co/datasets, I don't see an obvious way to filter only English datasets. This is unexpected for me, am I missing something? I'd expect English to be selectable in the language widget. This problem reproduced on Mozilla Firefox and MS Edge: ![screenshot](https://user-images.githubusercontent.com/4547987/102792244-892e1f00-43a8-11eb-9e89-4826ca201a87.png)
{ "avatar_url": "https://avatars.githubusercontent.com/u/4547987?v=4", "events_url": "https://api.github.com/users/davidefiocco/events{/privacy}", "followers_url": "https://api.github.com/users/davidefiocco/followers", "following_url": "https://api.github.com/users/davidefiocco/following{/other_user}", "gists_url": "https://api.github.com/users/davidefiocco/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/davidefiocco", "id": 4547987, "login": "davidefiocco", "node_id": "MDQ6VXNlcjQ1NDc5ODc=", "organizations_url": "https://api.github.com/users/davidefiocco/orgs", "received_events_url": "https://api.github.com/users/davidefiocco/received_events", "repos_url": "https://api.github.com/users/davidefiocco/repos", "site_admin": false, "starred_url": "https://api.github.com/users/davidefiocco/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/davidefiocco/subscriptions", "type": "User", "url": "https://api.github.com/users/davidefiocco", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1618/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1618/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
1 day, 1:52:46
https://api.github.com/repos/huggingface/datasets/issues/1615
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1615/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1615/comments
https://api.github.com/repos/huggingface/datasets/issues/1615/events
https://github.com/huggingface/datasets/issues/1615
771,641,088
MDU6SXNzdWU3NzE2NDEwODg=
1,615
Bug: Can't download TriviaQA with `load_dataset` - custom `cache_dir`
{ "avatar_url": "https://avatars.githubusercontent.com/u/44585792?v=4", "events_url": "https://api.github.com/users/SapirWeissbuch/events{/privacy}", "followers_url": "https://api.github.com/users/SapirWeissbuch/followers", "following_url": "https://api.github.com/users/SapirWeissbuch/following{/other_user}", "gists_url": "https://api.github.com/users/SapirWeissbuch/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/SapirWeissbuch", "id": 44585792, "login": "SapirWeissbuch", "node_id": "MDQ6VXNlcjQ0NTg1Nzky", "organizations_url": "https://api.github.com/users/SapirWeissbuch/orgs", "received_events_url": "https://api.github.com/users/SapirWeissbuch/received_events", "repos_url": "https://api.github.com/users/SapirWeissbuch/repos", "site_admin": false, "starred_url": "https://api.github.com/users/SapirWeissbuch/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SapirWeissbuch/subscriptions", "type": "User", "url": "https://api.github.com/users/SapirWeissbuch", "user_view_type": "public" }
[]
open
false
null
[]
[ "Hi @SapirWeissbuch,\r\nWhen you are saying it freezes, at that time it is unzipping the file from the zip file it downloaded. Since it's a very heavy file it'll take some time. It was taking ~11GB after unzipping when it started reading examples for me. Hope that helps!\r\n![Screenshot 2020-12-21 at 23 40 52](https://user-images.githubusercontent.com/19718818/102808355-3b380c00-43e6-11eb-81ab-c31019ae6322.png)\r\n", "Hi @bhavitvyamalik \r\nThanks for the reply!\r\nActually I let it run for 30 minutes before I killed the process. In this time, 30GB were extracted (much more than 11GB), I checked the size of the destination directory.\r\n\r\nWhat version of Datasets are you using?\r\n", "I'm using datasets version: 1.1.3. I think you should drop `cache_dir` and use only\r\n`dataset = datasets.load_dataset(\"trivia_qa\", \"rc\")`\r\n\r\nTried that on colab and it's working there too\r\n![image](https://user-images.githubusercontent.com/19718818/102814269-4db74300-43f0-11eb-8f26-ecfcf4632002.png)\r\n", "Train, Validation, and Test splits contain 138384, 18669, and 17210 samples respectively. It takes some time to read the samples. Even in your colab notebook it was reading the samples before you killed the process. Let me know if it works now!", "Hi, it works on colab but it still doesn't work on my computer, same problem as before - overly large and long extraction process.\r\nI have to use a custom 'cache_dir' because I don't have any space left in my home directory where it is defaulted, maybe this could be the issue?", "I tried running this again - More details of the problem:\r\nCode:\r\n```\r\ndatasets.load_dataset(\"trivia_qa\", \"rc\", cache_dir=\"/path/to/cache\")\r\n```\r\n\r\nThe output:\r\n```\r\nDownloading and preparing dataset trivia_qa/rc (download: 2.48 GiB, generated: 14.92 GiB, post-processed: Unknown size, total: 17.40 GiB) to path/to/cache/trivia_qa/rc/1.1.0/e734e28133f4d9a353af322aa52b9f266f6f27cbf2f072690a1694e577546b0d... \r\nDownloading: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2.67G/2.67G [03:38<00:00, 12.2MB/s]\r\n\r\n```\r\nThe process continues (no progress bar is visible).\r\nI tried `du -sh .` in `path/to/cache`, and the size keeps increasing, reached 35G before I killed the process.\r\n\r\nGoogle Colab with custom `cache_dir` has same issue.\r\nhttps://colab.research.google.com/drive/1nn1Lw02GhfGFylzbS2j6yksGjPo7kkN-?usp=sharing#scrollTo=2G2O0AeNIXan", "1) You can clear the huggingface folder in your `.cache` directory to use default directory for datasets. Speed of extraction and loading of samples depends a lot on your machine's configurations too.\r\n\r\n2) I tried on colab `dataset = datasets.load_dataset(\"trivia_qa\", \"rc\", cache_dir = \"./datasets\")`. After memory usage reached around 42GB (starting from 32GB used already), the dataset was loaded in the memory. Even Your colab notebook shows \r\n![image](https://user-images.githubusercontent.com/19718818/102852229-c7c4e780-4443-11eb-91d6-bf21024358a3.png)\r\nwhich means it's loaded now.", "Facing the same issue.\r\nI am able to download datasets without `cache_dir`, however, when I specify the `cache_dir`, the process hangs indefinitely after partial download. \r\nTried for `data = load_dataset(\"cnn_dailymail\", \"3.0.0\")`", "Hi @ashutoshml,\r\nI tried this and it worked for me:\r\n`data = load_dataset(\"cnn_dailymail\", \"3.0.0\", cache_dir=\"./dummy\")`\r\n\r\nI'm using datasets==1.8.0. It took around 3-4 mins for dataset to unpack and start loading examples.", "Ok. I waited for 20-30 mins, and it still is stuck.\r\nI am using datasets==1.8.0.\r\n\r\nIs there anyway to check what is happening? like a` --verbose` flag?\r\n\r\n![Screenshot 2021-06-25 at 6 37 43 PM](https://user-images.githubusercontent.com/2375919/123429653-cdfb7280-d5e4-11eb-9fa7-ff295800cc86.png)\r\n" ]
2020-12-20T17:27:38
2021-06-25T13:11:33
null
NONE
null
null
null
null
Hello, I'm having issue downloading TriviaQA dataset with `load_dataset`. ## Environment info - `datasets` version: 1.1.3 - Platform: Linux-4.19.129-aufs-1-x86_64-with-debian-10.1 - Python version: 3.7.3 ## The code I'm running: ```python import datasets dataset = datasets.load_dataset("trivia_qa", "rc", cache_dir = "./datasets") ``` ## The output: 1. Download begins: ``` Downloading and preparing dataset trivia_qa/rc (download: 2.48 GiB, generated: 14.92 GiB, post-processed: Unknown size, total: 17.40 GiB) to /cs/labs/gabis/sapirweissbuch/tr ivia_qa/rc/1.1.0/e734e28133f4d9a353af322aa52b9f266f6f27cbf2f072690a1694e577546b0d... Downloading: 17%|███████████████████▉ | 446M/2.67G [00:37<04:45, 7.77MB/s] ``` 2. 100% is reached 3. It got stuck here for about an hour, and added additional 30G of data to "./datasets" directory. I killed the process eventually. A similar issue can be observed in Google Colab: https://colab.research.google.com/drive/1nn1Lw02GhfGFylzbS2j6yksGjPo7kkN-?usp=sharing ## Expected behaviour: The dataset "TriviaQA" should be successfully downloaded.
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1615/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1615/timeline
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
null
https://api.github.com/repos/huggingface/datasets/issues/1611
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1611/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1611/comments
https://api.github.com/repos/huggingface/datasets/issues/1611/events
https://github.com/huggingface/datasets/issues/1611
771,486,456
MDU6SXNzdWU3NzE0ODY0NTY=
1,611
shuffle with torch generator
{ "avatar_url": "https://avatars.githubusercontent.com/u/73364383?v=4", "events_url": "https://api.github.com/users/rabeehkarimimahabadi/events{/privacy}", "followers_url": "https://api.github.com/users/rabeehkarimimahabadi/followers", "following_url": "https://api.github.com/users/rabeehkarimimahabadi/following{/other_user}", "gists_url": "https://api.github.com/users/rabeehkarimimahabadi/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/rabeehkarimimahabadi", "id": 73364383, "login": "rabeehkarimimahabadi", "node_id": "MDQ6VXNlcjczMzY0Mzgz", "organizations_url": "https://api.github.com/users/rabeehkarimimahabadi/orgs", "received_events_url": "https://api.github.com/users/rabeehkarimimahabadi/received_events", "repos_url": "https://api.github.com/users/rabeehkarimimahabadi/repos", "site_admin": false, "starred_url": "https://api.github.com/users/rabeehkarimimahabadi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rabeehkarimimahabadi/subscriptions", "type": "User", "url": "https://api.github.com/users/rabeehkarimimahabadi", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
closed
false
null
[]
[ "Is there a way one can convert the two generator? not sure overall what alternatives I could have to shuffle the datasets with a torch generator, thanks ", "@lhoestq let me please expalin in more details, maybe you could help me suggesting an alternative to solve the issue for now, I have multiple large datasets using huggingface library, then I need to define a distributed sampler on top of it, for this I need to shard the datasets and give each shard to each core, but before sharding I need to shuffle the dataset, if you are familiar with distributed sampler in pytorch, this needs to be done based on seed+epoch generator to make it consistent across the cores they do it through defining a torch generator, I was wondering if you could tell me how I can shuffle the data for now, I am unfortunately blocked by this and have a limited time left, and I greatly appreciate your help on this. thanks ", "@lhoestq Is there a way I could shuffle the datasets from this library with a custom defined shuffle function? thanks for your help on this. ", "Right now the shuffle method only accepts the `seed` (optional int) or `generator` (optional `np.random.Generator`) parameters.\r\n\r\nHere is a suggestion to shuffle the data using your own shuffle method using `select`.\r\n`select` can be used to re-order the dataset samples or simply pick a few ones if you want.\r\nIt's what is used under the hood when you call `dataset.shuffle`.\r\n\r\nTo use `select` you must have the list of re-ordered indices of your samples.\r\n\r\nLet's say you have a `shuffle` methods that you want to use. Then you can first build your shuffled list of indices:\r\n```python\r\nshuffled_indices = shuffle(range(len(dataset)))\r\n```\r\n\r\nThen you can shuffle your dataset using the shuffled indices with \r\n```python\r\nshuffled_dataset = dataset.select(shuffled_indices)\r\n```\r\n\r\nHope that helps", "thank you @lhoestq thank you very much for responding to my question, this greatly helped me and remove the blocking for continuing my work, thanks. ", "@lhoestq could you confirm the method proposed does not bring the whole data into memory? thanks ", "Yes the dataset is not loaded into memory", "great. thanks a lot." ]
2020-12-20T00:57:14
2022-06-01T15:30:13
2022-06-01T15:30:13
NONE
null
null
null
null
Hi I need to shuffle mutliple large datasets with `generator = torch.Generator()` for a distributed sampler which needs to make sure datasets are consistent across different cores, for this, this is really necessary for me to use torch generator, based on documentation this generator is not supported with datasets, I really need to make shuffle work with this generator and I was wondering what I can do about this issue, thanks for your help @lhoestq
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1611/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1611/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
528 days, 14:32:59
https://api.github.com/repos/huggingface/datasets/issues/1610
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1610/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1610/comments
https://api.github.com/repos/huggingface/datasets/issues/1610/events
https://github.com/huggingface/datasets/issues/1610
771,453,599
MDU6SXNzdWU3NzE0NTM1OTk=
1,610
shuffle does not accept seed
{ "avatar_url": "https://avatars.githubusercontent.com/u/6278280?v=4", "events_url": "https://api.github.com/users/rabeehk/events{/privacy}", "followers_url": "https://api.github.com/users/rabeehk/followers", "following_url": "https://api.github.com/users/rabeehk/following{/other_user}", "gists_url": "https://api.github.com/users/rabeehk/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/rabeehk", "id": 6278280, "login": "rabeehk", "node_id": "MDQ6VXNlcjYyNzgyODA=", "organizations_url": "https://api.github.com/users/rabeehk/orgs", "received_events_url": "https://api.github.com/users/rabeehk/received_events", "repos_url": "https://api.github.com/users/rabeehk/repos", "site_admin": false, "starred_url": "https://api.github.com/users/rabeehk/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rabeehk/subscriptions", "type": "User", "url": "https://api.github.com/users/rabeehk", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
[ "Hi, did you check the doc on `shuffle`?\r\nhttps://huggingface.co/docs/datasets/package_reference/main_classes.html?datasets.Dataset.shuffle#datasets.Dataset.shuffle", "Hi Thomas\r\nthanks for reponse, yes, I did checked it, but this does not work for me please see \r\n\r\n```\r\n(internship) rkarimi@italix17:/idiap/user/rkarimi/dev$ python \r\nPython 3.7.9 (default, Aug 31 2020, 12:42:55) \r\n[GCC 7.3.0] :: Anaconda, Inc. on linux\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> import datasets \r\n2020-12-20 01:48:50.766004: W tensorflow/stream_executor/platform/default/dso_loader.cc:60] Could not load dynamic library 'libcudart.so.11.0'; dlerror: libcudart.so.11.0: cannot open shared object file: No such file or directory\r\n2020-12-20 01:48:50.766029: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.\r\n>>> data = datasets.load_dataset(\"scitail\", \"snli_format\")\r\ncahce dir /idiap/temp/rkarimi/cache_home_1/datasets\r\ncahce dir /idiap/temp/rkarimi/cache_home_1/datasets\r\nReusing dataset scitail (/idiap/temp/rkarimi/cache_home_1/datasets/scitail/snli_format/1.1.0/fd8ccdfc3134ce86eb4ef10ba7f21ee2a125c946e26bb1dd3625fe74f48d3b90)\r\n>>> data.shuffle(seed=2)\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\nTypeError: shuffle() got an unexpected keyword argument 'seed'\r\n\r\n```\r\n\r\ndatasets version\r\n`datasets 1.1.2 <pip>\r\n`\r\n", "Thanks for reporting ! \r\n\r\nIndeed it looks like an issue with `suffle` on `DatasetDict`. We're going to fix that.\r\nIn the meantime you can shuffle each split (train, validation, test) separately:\r\n```python\r\nshuffled_train_dataset = data[\"train\"].shuffle(seed=42)\r\n```\r\n" ]
2020-12-19T20:59:39
2021-01-04T10:00:03
2021-01-04T10:00:03
CONTRIBUTOR
null
null
null
null
Hi I need to shuffle the dataset, but this needs to be based on epoch+seed to be consistent across the cores, when I pass seed to shuffle, this does not accept seed, could you assist me with this? thanks @lhoestq
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1610/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1610/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
15 days, 13:00:24
https://api.github.com/repos/huggingface/datasets/issues/1609
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1609/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1609/comments
https://api.github.com/repos/huggingface/datasets/issues/1609/events
https://github.com/huggingface/datasets/issues/1609
771,421,881
MDU6SXNzdWU3NzE0MjE4ODE=
1,609
Not able to use 'jigsaw_toxicity_pred' dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/7424133?v=4", "events_url": "https://api.github.com/users/jassimran/events{/privacy}", "followers_url": "https://api.github.com/users/jassimran/followers", "following_url": "https://api.github.com/users/jassimran/following{/other_user}", "gists_url": "https://api.github.com/users/jassimran/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jassimran", "id": 7424133, "login": "jassimran", "node_id": "MDQ6VXNlcjc0MjQxMzM=", "organizations_url": "https://api.github.com/users/jassimran/orgs", "received_events_url": "https://api.github.com/users/jassimran/received_events", "repos_url": "https://api.github.com/users/jassimran/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jassimran/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jassimran/subscriptions", "type": "User", "url": "https://api.github.com/users/jassimran", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Hi @jassimran,\r\nThe `jigsaw_toxicity_pred` dataset has not been released yet, it will be available with version 2 of `datasets`, coming soon.\r\nYou can still access it by installing the master (unreleased) version of datasets directly :\r\n`pip install git+https://github.com/huggingface/datasets.git@master`\r\nPlease let me know if this helps", "Thanks.That works for now." ]
2020-12-19T17:35:48
2020-12-22T16:42:24
2020-12-22T16:42:23
NONE
null
null
null
null
When trying to use jigsaw_toxicity_pred dataset, like this in a [colab](https://colab.research.google.com/drive/1LwO2A5M2X5dvhkAFYE4D2CUT3WUdWnkn?usp=sharing): ``` from datasets import list_datasets, list_metrics, load_dataset, load_metric ds = load_dataset("jigsaw_toxicity_pred") ``` I see below error: > FileNotFoundError: Couldn't find file at https://raw.githubusercontent.com/huggingface/datasets/1.1.3/datasets/jigsaw_toxicity_pred/jigsaw_toxicity_pred.py During handling of the above exception, another exception occurred: FileNotFoundError Traceback (most recent call last) FileNotFoundError: Couldn't find file at https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/jigsaw_toxicity_pred/jigsaw_toxicity_pred.py During handling of the above exception, another exception occurred: FileNotFoundError Traceback (most recent call last) /usr/local/lib/python3.6/dist-packages/datasets/load.py in prepare_module(path, script_version, download_config, download_mode, dataset, force_local_path, **download_kwargs) 280 raise FileNotFoundError( 281 "Couldn't find file locally at {}, or remotely at {} or {}".format( --> 282 combined_path, github_file_path, file_path 283 ) 284 ) FileNotFoundError: Couldn't find file locally at jigsaw_toxicity_pred/jigsaw_toxicity_pred.py, or remotely at https://raw.githubusercontent.com/huggingface/datasets/1.1.3/datasets/jigsaw_toxicity_pred/jigsaw_toxicity_pred.py or https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/jigsaw_toxicity_pred/jigsaw_toxicity_pred.py
{ "avatar_url": "https://avatars.githubusercontent.com/u/7424133?v=4", "events_url": "https://api.github.com/users/jassimran/events{/privacy}", "followers_url": "https://api.github.com/users/jassimran/followers", "following_url": "https://api.github.com/users/jassimran/following{/other_user}", "gists_url": "https://api.github.com/users/jassimran/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jassimran", "id": 7424133, "login": "jassimran", "node_id": "MDQ6VXNlcjc0MjQxMzM=", "organizations_url": "https://api.github.com/users/jassimran/orgs", "received_events_url": "https://api.github.com/users/jassimran/received_events", "repos_url": "https://api.github.com/users/jassimran/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jassimran/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jassimran/subscriptions", "type": "User", "url": "https://api.github.com/users/jassimran", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1609/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1609/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
2 days, 23:06:35
https://api.github.com/repos/huggingface/datasets/issues/1605
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1605/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1605/comments
https://api.github.com/repos/huggingface/datasets/issues/1605/events
https://github.com/huggingface/datasets/issues/1605
770,979,620
MDU6SXNzdWU3NzA5Nzk2MjA=
1,605
Navigation version breaking
{ "avatar_url": "https://avatars.githubusercontent.com/u/3007947?v=4", "events_url": "https://api.github.com/users/mttk/events{/privacy}", "followers_url": "https://api.github.com/users/mttk/followers", "following_url": "https://api.github.com/users/mttk/following{/other_user}", "gists_url": "https://api.github.com/users/mttk/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mttk", "id": 3007947, "login": "mttk", "node_id": "MDQ6VXNlcjMwMDc5NDc=", "organizations_url": "https://api.github.com/users/mttk/orgs", "received_events_url": "https://api.github.com/users/mttk/received_events", "repos_url": "https://api.github.com/users/mttk/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mttk/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mttk/subscriptions", "type": "User", "url": "https://api.github.com/users/mttk", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Not relevant for our current docs :)." ]
2020-12-18T15:36:24
2022-10-05T12:35:11
2022-10-05T12:35:11
NONE
null
null
null
null
Hi, when navigating docs (Chrome, Ubuntu) (e.g. on this page: https://huggingface.co/docs/datasets/loading_metrics.html#using-a-custom-metric-script) the version control dropdown has the wrong string displayed as the current version: ![image](https://user-images.githubusercontent.com/3007947/102632187-02cad080-414f-11eb-813b-28f3c8d80def.png) **Edit:** this actually happens _only_ if you open a link to a concrete subsection. IMO, the best way to fix this without getting too deep into the intricacies of retrieving version numbers from the URL would be to change [this](https://github.com/huggingface/datasets/blob/master/docs/source/_static/js/custom.js#L112) line to: ``` let label = (version in versionMapping) ? version : stableVersion ``` which delegates the check to the (already maintained) keys of the version mapping dictionary & should be more robust. There's a similar ternary expression [here](https://github.com/huggingface/datasets/blob/master/docs/source/_static/js/custom.js#L97) which should also fail in this case. I'd also suggest swapping this [block](https://github.com/huggingface/datasets/blob/master/docs/source/_static/js/custom.js#L80-L90) to `string.contains(version) for version in versionMapping` which might be more robust. I'd add a PR myself but I'm by no means competent in JS :) I also have a side question wrt. docs versioning: I'm trying to make docs for a project which are versioned alike to your dropdown versioning. I was wondering how do you handle storage of multiple doc versions on your server? Do you update what `https://huggingface.co/docs/datasets` points to for every stable release & manually create new folders for each released version? So far I'm building & publishing (scping) the docs to the server with a github action which works well for a single version, but would ideally need to reorder the public files triggered on a new release.
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 1, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/1605/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1605/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
655 days, 20:58:47
https://api.github.com/repos/huggingface/datasets/issues/1604
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1604/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1604/comments
https://api.github.com/repos/huggingface/datasets/issues/1604/events
https://github.com/huggingface/datasets/issues/1604
770,862,112
MDU6SXNzdWU3NzA4NjIxMTI=
1,604
Add tests for the download functions ?
{ "avatar_url": "https://avatars.githubusercontent.com/u/33657802?v=4", "events_url": "https://api.github.com/users/SBrandeis/events{/privacy}", "followers_url": "https://api.github.com/users/SBrandeis/followers", "following_url": "https://api.github.com/users/SBrandeis/following{/other_user}", "gists_url": "https://api.github.com/users/SBrandeis/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/SBrandeis", "id": 33657802, "login": "SBrandeis", "node_id": "MDQ6VXNlcjMzNjU3ODAy", "organizations_url": "https://api.github.com/users/SBrandeis/orgs", "received_events_url": "https://api.github.com/users/SBrandeis/received_events", "repos_url": "https://api.github.com/users/SBrandeis/repos", "site_admin": false, "starred_url": "https://api.github.com/users/SBrandeis/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SBrandeis/subscriptions", "type": "User", "url": "https://api.github.com/users/SBrandeis", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
closed
false
null
[]
[ "We have some tests now for it under `tests/test_download_manager.py`." ]
2020-12-18T12:49:25
2022-10-05T13:04:24
2022-10-05T13:04:24
CONTRIBUTOR
null
null
null
null
AFAIK the download functions in `DownloadManager` are not tested yet. It could be good to add some to ensure behavior is as expected.
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1604/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1604/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
656 days, 0:14:59
https://api.github.com/repos/huggingface/datasets/issues/1600
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1600/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1600/comments
https://api.github.com/repos/huggingface/datasets/issues/1600/events
https://github.com/huggingface/datasets/issues/1600
770,582,960
MDU6SXNzdWU3NzA1ODI5NjA=
1,600
AttributeError: 'DatasetDict' object has no attribute 'train_test_split'
{ "avatar_url": "https://avatars.githubusercontent.com/u/5028974?v=4", "events_url": "https://api.github.com/users/david-waterworth/events{/privacy}", "followers_url": "https://api.github.com/users/david-waterworth/followers", "following_url": "https://api.github.com/users/david-waterworth/following{/other_user}", "gists_url": "https://api.github.com/users/david-waterworth/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/david-waterworth", "id": 5028974, "login": "david-waterworth", "node_id": "MDQ6VXNlcjUwMjg5NzQ=", "organizations_url": "https://api.github.com/users/david-waterworth/orgs", "received_events_url": "https://api.github.com/users/david-waterworth/received_events", "repos_url": "https://api.github.com/users/david-waterworth/repos", "site_admin": false, "starred_url": "https://api.github.com/users/david-waterworth/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/david-waterworth/subscriptions", "type": "User", "url": "https://api.github.com/users/david-waterworth", "user_view_type": "public" }
[ { "color": "d876e3", "default": true, "description": "Further information is requested", "id": 1935892912, "name": "question", "node_id": "MDU6TGFiZWwxOTM1ODkyOTEy", "url": "https://api.github.com/repos/huggingface/datasets/labels/question" } ]
closed
false
null
[]
[ "Hi @david-waterworth!\r\n\r\nAs indicated in the error message, `load_dataset(\"csv\")` returns a `DatasetDict` object, which is mapping of `str` to `Dataset` objects. I believe in this case the behavior is to return a `train` split with all the data.\r\n`train_test_split` is a method of the `Dataset` object, so you will need to do something like this:\r\n```python\r\ndataset_dict = load_dataset(`'csv', data_files='data.txt')\r\ndataset = dataset_dict['split name, eg train']\r\ndataset.train_test_split(test_size=0.1)\r\n```\r\n\r\nPlease let me know if this helps. 🙂 ", "Thanks, that's working - the same issue also tripped me up with training. \r\n\r\nI also agree https://github.com/huggingface/datasets/issues/767 would be a useful addition. ", "Closing this now", "> ```python\r\n> dataset_dict = load_dataset(`'csv', data_files='data.txt')\r\n> dataset = dataset_dict['split name, eg train']\r\n> dataset.train_test_split(test_size=0.1)\r\n> ```\r\n\r\nI am getting error like\r\nKeyError: 'split name, eg train'\r\nCould you please tell me how to solve this?", "dataset = load_dataset('csv', data_files=['files/datasets/dataset.csv'])\r\ndataset = dataset['train']\r\ndataset = dataset.train_test_split(test_size=0.1)", "!curl -L \"https://app.roboflow.com/ds/YQYgzFyKns?key=f0IwaEetrr\" > roboflow.zip; unzip roboflow.zip; rm roboflow.zip\r\n\r\nfrom datasets import load_dataset\r\n\r\ndataset = load_dataset(\"imagefolder\", data_dir=\"/content/\")\r\ndataset[\"train\"][0]\r\n\r\ndataset[\"train\"][-1]\r\n\r\ntrain_ds = load_dataset(\"imagefolder\", data_dir=\"/content/train/\")\r\ntest_ds = load_dataset(\"imagefolder\", data_dir=\"/content/test/\")\r\nval_ds = load_dataset(\"imagefolder\", data_dir=\"/content/valid/\")\r\n\r\ntrain_ds.features\r\n\r\nand i got error \r\nAttributeError Traceback (most recent call last)\r\n[<ipython-input-6-289222110c33>](https://localhost:8080/#) in <cell line: 1>()\r\n----> 1 train_ds.features\r\n\r\nAttributeError: 'DatasetDict' object has no attribute 'features'", "This has been closed, you should open a new issue describing what your problem is." ]
2020-12-18T05:37:10
2023-05-03T04:22:55
2020-12-21T07:38:58
NONE
null
null
null
null
The following code fails with "'DatasetDict' object has no attribute 'train_test_split'" - am I doing something wrong? ``` from datasets import load_dataset dataset = load_dataset('csv', data_files='data.txt') dataset = dataset.train_test_split(test_size=0.1) ``` > AttributeError: 'DatasetDict' object has no attribute 'train_test_split'
{ "avatar_url": "https://avatars.githubusercontent.com/u/33657802?v=4", "events_url": "https://api.github.com/users/SBrandeis/events{/privacy}", "followers_url": "https://api.github.com/users/SBrandeis/followers", "following_url": "https://api.github.com/users/SBrandeis/following{/other_user}", "gists_url": "https://api.github.com/users/SBrandeis/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/SBrandeis", "id": 33657802, "login": "SBrandeis", "node_id": "MDQ6VXNlcjMzNjU3ODAy", "organizations_url": "https://api.github.com/users/SBrandeis/orgs", "received_events_url": "https://api.github.com/users/SBrandeis/received_events", "repos_url": "https://api.github.com/users/SBrandeis/repos", "site_admin": false, "starred_url": "https://api.github.com/users/SBrandeis/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SBrandeis/subscriptions", "type": "User", "url": "https://api.github.com/users/SBrandeis", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1600/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1600/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
3 days, 2:01:48
https://api.github.com/repos/huggingface/datasets/issues/1594
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1594/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1594/comments
https://api.github.com/repos/huggingface/datasets/issues/1594/events
https://github.com/huggingface/datasets/issues/1594
769,747,767
MDU6SXNzdWU3Njk3NDc3Njc=
1,594
connection error
{ "avatar_url": "https://avatars.githubusercontent.com/u/73364383?v=4", "events_url": "https://api.github.com/users/rabeehkarimimahabadi/events{/privacy}", "followers_url": "https://api.github.com/users/rabeehkarimimahabadi/followers", "following_url": "https://api.github.com/users/rabeehkarimimahabadi/following{/other_user}", "gists_url": "https://api.github.com/users/rabeehkarimimahabadi/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/rabeehkarimimahabadi", "id": 73364383, "login": "rabeehkarimimahabadi", "node_id": "MDQ6VXNlcjczMzY0Mzgz", "organizations_url": "https://api.github.com/users/rabeehkarimimahabadi/orgs", "received_events_url": "https://api.github.com/users/rabeehkarimimahabadi/received_events", "repos_url": "https://api.github.com/users/rabeehkarimimahabadi/repos", "site_admin": false, "starred_url": "https://api.github.com/users/rabeehkarimimahabadi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rabeehkarimimahabadi/subscriptions", "type": "User", "url": "https://api.github.com/users/rabeehkarimimahabadi", "user_view_type": "public" }
[]
closed
false
null
[]
[ "This happen quite often when they are too many concurrent requests to github.\r\n\r\ni can understand it’s a bit cumbersome to handle on the user side. Maybe we should try a few times in the lib (eg with timeout) before failing, what do you think @lhoestq ?", "Yes currently there's no retry afaik. We should add retries", "Retries were added in #1603 :) \r\nIt will be available in the next release", "Hi @lhoestq thank you for the modification, I will use`script_version=\"master\"` for now :), to my experience, also setting timeout to a larger number like 3*60 which I normally use helps a lot on this.\r\n" ]
2020-12-17T09:18:34
2022-06-01T15:33:42
2022-06-01T15:33:41
NONE
null
null
null
null
Hi I am hitting to this error, thanks ``` > Traceback (most recent call last): File "finetune_t5_trainer.py", line 379, in <module> main() File "finetune_t5_trainer.py", line 208, in main if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO File "finetune_t5_trainer.py", line 207, in <dictcomp> for task in data_args.eval_tasks} File "/workdir/seq2seq/data/tasks.py", line 70, in get_dataset dataset = self.load_dataset(split=split) File "/workdir/seq2seq/data/tasks.py", line 66, in load_dataset return datasets.load_dataset(self.task.name, split=split, script_version="master") File "/usr/local/lib/python3.6/dist-packages/datasets/load.py", line 589, in load_dataset path, script_version=script_version, download_config=download_config, download_mode=download_mode, dataset=True File "/usr/local/lib/python3.6/dist-packages/datasets/load.py", line 267, in prepare_module local_path = cached_path(file_path, download_config=download_config) File "/usr/local/lib/python3.6/dist-packages/datasets/utils/file_utils.py", line 308, in cached_path use_etag=download_config.use_etag, File "/usr/local/lib/python3.6/dist-packages/datasets/utils/file_utils.py", line 487, in get_from_cache raise ConnectionError("Couldn't reach {}".format(url)) ConnectionError: Couldn't reach https://raw.githubusercontent.com/huggingface/datasets/master/datasets/boolq/boolq.py el/0 I1217 01:11:33.898849 354161 main shadow.py:210 Current job status: FINISHED ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1594/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1594/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
531 days, 6:15:07
https://api.github.com/repos/huggingface/datasets/issues/1593
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1593/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1593/comments
https://api.github.com/repos/huggingface/datasets/issues/1593/events
https://github.com/huggingface/datasets/issues/1593
769,611,386
MDU6SXNzdWU3Njk2MTEzODY=
1,593
Access to key in DatasetDict map
{ "avatar_url": "https://avatars.githubusercontent.com/u/11954789?v=4", "events_url": "https://api.github.com/users/ZhaofengWu/events{/privacy}", "followers_url": "https://api.github.com/users/ZhaofengWu/followers", "following_url": "https://api.github.com/users/ZhaofengWu/following{/other_user}", "gists_url": "https://api.github.com/users/ZhaofengWu/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ZhaofengWu", "id": 11954789, "login": "ZhaofengWu", "node_id": "MDQ6VXNlcjExOTU0Nzg5", "organizations_url": "https://api.github.com/users/ZhaofengWu/orgs", "received_events_url": "https://api.github.com/users/ZhaofengWu/received_events", "repos_url": "https://api.github.com/users/ZhaofengWu/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ZhaofengWu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ZhaofengWu/subscriptions", "type": "User", "url": "https://api.github.com/users/ZhaofengWu", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
closed
false
null
[]
[ "Indeed that would be cool\r\n\r\nAlso FYI right now the easiest way to do this is\r\n```python\r\ndataset_dict[\"train\"] = dataset_dict[\"train\"].map(my_transform_for_the_train_set)\r\ndataset_dict[\"test\"] = dataset_dict[\"test\"].map(my_transform_for_the_test_set)\r\n```", "I don't feel like adding an extra param for this simple usage makes sense, considering how many args `map` already has. \r\n\r\n(Feel free to re-open this issue if you don't agree with me)", "I still think this is useful, since it's common that the data processing is different for training/dev/testing. And I don't know if the fact that `map` currently takes many arguments is a good reason not to support a useful feature." ]
2020-12-17T07:02:20
2022-10-05T13:47:28
2022-10-05T12:33:06
NONE
null
null
null
null
It is possible that we want to do different things in the `map` function (and possibly other functions too) of a `DatasetDict`, depending on the key. I understand that `DatasetDict.map` is a really thin wrapper of `Dataset.map`, so it is easy to directly implement this functionality in the client code. Still, it'd be nice if there can be a flag, similar to `with_indices`, that allows the callable to know the key inside `DatasetDict`.
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1593/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1593/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
657 days, 5:30:46
https://api.github.com/repos/huggingface/datasets/issues/1591
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1591/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1591/comments
https://api.github.com/repos/huggingface/datasets/issues/1591/events
https://github.com/huggingface/datasets/issues/1591
769,383,714
MDU6SXNzdWU3NjkzODM3MTQ=
1,591
IWSLT-17 Link Broken
{ "avatar_url": "https://avatars.githubusercontent.com/u/11954789?v=4", "events_url": "https://api.github.com/users/ZhaofengWu/events{/privacy}", "followers_url": "https://api.github.com/users/ZhaofengWu/followers", "following_url": "https://api.github.com/users/ZhaofengWu/following{/other_user}", "gists_url": "https://api.github.com/users/ZhaofengWu/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ZhaofengWu", "id": 11954789, "login": "ZhaofengWu", "node_id": "MDQ6VXNlcjExOTU0Nzg5", "organizations_url": "https://api.github.com/users/ZhaofengWu/orgs", "received_events_url": "https://api.github.com/users/ZhaofengWu/received_events", "repos_url": "https://api.github.com/users/ZhaofengWu/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ZhaofengWu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ZhaofengWu/subscriptions", "type": "User", "url": "https://api.github.com/users/ZhaofengWu", "user_view_type": "public" }
[ { "color": "cfd3d7", "default": true, "description": "This issue or pull request already exists", "id": 1935892865, "name": "duplicate", "node_id": "MDU6TGFiZWwxOTM1ODkyODY1", "url": "https://api.github.com/repos/huggingface/datasets/labels/duplicate" }, { "color": "2edb81", "default": false, "description": "A bug in a dataset script provided in the library", "id": 2067388877, "name": "dataset bug", "node_id": "MDU6TGFiZWwyMDY3Mzg4ODc3", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20bug" } ]
closed
false
null
[]
[ "Sorry, this is a duplicate of #1287. Not sure why it didn't come up when I searched `iwslt` in the issues list.", "Closing this since its a duplicate" ]
2020-12-17T00:46:42
2020-12-18T08:06:36
2020-12-18T08:05:28
NONE
null
null
null
null
``` FileNotFoundError: Couldn't find file at https://wit3.fbk.eu/archive/2017-01-trnmted//texts/DeEnItNlRo/DeEnItNlRo/DeEnItNlRo-DeEnItNlRo.tgz ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/33657802?v=4", "events_url": "https://api.github.com/users/SBrandeis/events{/privacy}", "followers_url": "https://api.github.com/users/SBrandeis/followers", "following_url": "https://api.github.com/users/SBrandeis/following{/other_user}", "gists_url": "https://api.github.com/users/SBrandeis/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/SBrandeis", "id": 33657802, "login": "SBrandeis", "node_id": "MDQ6VXNlcjMzNjU3ODAy", "organizations_url": "https://api.github.com/users/SBrandeis/orgs", "received_events_url": "https://api.github.com/users/SBrandeis/received_events", "repos_url": "https://api.github.com/users/SBrandeis/repos", "site_admin": false, "starred_url": "https://api.github.com/users/SBrandeis/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SBrandeis/subscriptions", "type": "User", "url": "https://api.github.com/users/SBrandeis", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1591/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1591/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
1 day, 7:18:46
https://api.github.com/repos/huggingface/datasets/issues/1590
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1590/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1590/comments
https://api.github.com/repos/huggingface/datasets/issues/1590/events
https://github.com/huggingface/datasets/issues/1590
769,242,858
MDU6SXNzdWU3NjkyNDI4NTg=
1,590
Add helper to resolve namespace collision
{ "avatar_url": "https://avatars.githubusercontent.com/u/8204807?v=4", "events_url": "https://api.github.com/users/jramapuram/events{/privacy}", "followers_url": "https://api.github.com/users/jramapuram/followers", "following_url": "https://api.github.com/users/jramapuram/following{/other_user}", "gists_url": "https://api.github.com/users/jramapuram/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jramapuram", "id": 8204807, "login": "jramapuram", "node_id": "MDQ6VXNlcjgyMDQ4MDc=", "organizations_url": "https://api.github.com/users/jramapuram/orgs", "received_events_url": "https://api.github.com/users/jramapuram/received_events", "repos_url": "https://api.github.com/users/jramapuram/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jramapuram/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jramapuram/subscriptions", "type": "User", "url": "https://api.github.com/users/jramapuram", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Do you have an example?", "I was thinking about using something like [importlib](https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly) to over-ride the collision. \r\n\r\n**Reason requested**: I use the [following template](https://github.com/jramapuram/ml_base/) repo where I house all my datasets as a submodule.", "Alternatively huggingface could consider some submodule type structure like:\r\n\r\n`import huggingface.datasets`\r\n`import huggingface.transformers`\r\n\r\n`datasets` is a very common module in ML and should be an end-user decision and not scope all of python ¯\\_(ツ)_/¯ \r\n", "That's a interesting option indeed. We'll think about it.", "It also wasn't initially obvious to me that the samples which contain `import datasets` were in fact importing a huggingface library (in fact all the huggingface imports are very generic - transformers, tokenizers, datasets...)" ]
2020-12-16T20:17:24
2022-06-01T15:32:04
2022-06-01T15:32:04
NONE
null
null
null
null
Many projects use a module called `datasets`, however this is incompatible with huggingface datasets. It would be great if there if there was some helper or similar function to resolve such a common conflict.
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1590/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1590/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
531 days, 19:14:40
https://api.github.com/repos/huggingface/datasets/issues/1585
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1585/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1585/comments
https://api.github.com/repos/huggingface/datasets/issues/1585/events
https://github.com/huggingface/datasets/issues/1585
768,831,171
MDU6SXNzdWU3Njg4MzExNzE=
1,585
FileNotFoundError for `amazon_polarity`
{ "avatar_url": "https://avatars.githubusercontent.com/u/24647404?v=4", "events_url": "https://api.github.com/users/phtephanx/events{/privacy}", "followers_url": "https://api.github.com/users/phtephanx/followers", "following_url": "https://api.github.com/users/phtephanx/following{/other_user}", "gists_url": "https://api.github.com/users/phtephanx/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/phtephanx", "id": 24647404, "login": "phtephanx", "node_id": "MDQ6VXNlcjI0NjQ3NDA0", "organizations_url": "https://api.github.com/users/phtephanx/orgs", "received_events_url": "https://api.github.com/users/phtephanx/received_events", "repos_url": "https://api.github.com/users/phtephanx/repos", "site_admin": false, "starred_url": "https://api.github.com/users/phtephanx/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/phtephanx/subscriptions", "type": "User", "url": "https://api.github.com/users/phtephanx", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Hi @phtephanx , the `amazon_polarity` dataset has not been released yet. It will be available in the coming soon v2of `datasets` :) \r\n\r\nYou can still access it now if you want, but you will need to install datasets via the master branch:\r\n`pip install git+https://github.com/huggingface/datasets.git@master`" ]
2020-12-16T12:51:05
2020-12-16T16:02:56
2020-12-16T16:02:56
NONE
null
null
null
null
Version: `datasets==v1.1.3` ### Reproduction ```python from datasets import load_dataset data = load_dataset("amazon_polarity") ``` crashes with ```bash FileNotFoundError: Couldn't find file at https://raw.githubusercontent.com/huggingface/datasets/1.1.3/datasets/amazon_polarity/amazon_polarity.py ``` and ```bash FileNotFoundError: Couldn't find file at https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/amazon_polarity/amazon_polarity.py ``` and ```bash FileNotFoundError: Couldn't find file locally at amazon_polarity/amazon_polarity.py, or remotely at https://raw.githubusercontent.com/huggingface/datasets/1.1.3/datasets/amazon_polarity/amazon_polarity.py or https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/amazon_polarity/amazon_polarity.py ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/33657802?v=4", "events_url": "https://api.github.com/users/SBrandeis/events{/privacy}", "followers_url": "https://api.github.com/users/SBrandeis/followers", "following_url": "https://api.github.com/users/SBrandeis/following{/other_user}", "gists_url": "https://api.github.com/users/SBrandeis/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/SBrandeis", "id": 33657802, "login": "SBrandeis", "node_id": "MDQ6VXNlcjMzNjU3ODAy", "organizations_url": "https://api.github.com/users/SBrandeis/orgs", "received_events_url": "https://api.github.com/users/SBrandeis/received_events", "repos_url": "https://api.github.com/users/SBrandeis/repos", "site_admin": false, "starred_url": "https://api.github.com/users/SBrandeis/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SBrandeis/subscriptions", "type": "User", "url": "https://api.github.com/users/SBrandeis", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1585/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1585/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
3:11:51
https://api.github.com/repos/huggingface/datasets/issues/1581
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1581/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1581/comments
https://api.github.com/repos/huggingface/datasets/issues/1581/events
https://github.com/huggingface/datasets/issues/1581
768,320,594
MDU6SXNzdWU3NjgzMjA1OTQ=
1,581
Installing datasets and transformers in a tensorflow docker image throws Permission Error on 'import transformers'
{ "avatar_url": "https://avatars.githubusercontent.com/u/702586?v=4", "events_url": "https://api.github.com/users/eduardofv/events{/privacy}", "followers_url": "https://api.github.com/users/eduardofv/followers", "following_url": "https://api.github.com/users/eduardofv/following{/other_user}", "gists_url": "https://api.github.com/users/eduardofv/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/eduardofv", "id": 702586, "login": "eduardofv", "node_id": "MDQ6VXNlcjcwMjU4Ng==", "organizations_url": "https://api.github.com/users/eduardofv/orgs", "received_events_url": "https://api.github.com/users/eduardofv/received_events", "repos_url": "https://api.github.com/users/eduardofv/repos", "site_admin": false, "starred_url": "https://api.github.com/users/eduardofv/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/eduardofv/subscriptions", "type": "User", "url": "https://api.github.com/users/eduardofv", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Thanks for reporting !\r\nYou can override the directory in which cache file are stored using for example\r\n```\r\nENV HF_HOME=\"/root/cache/hf_cache_home\"\r\n```\r\n\r\nThis way both `transformers` and `datasets` will use this directory instead of the default `.cache`", "Great, thanks. I didn't see documentation about than ENV variable, looks like an obvious solution. ", "> Thanks for reporting !\r\n> You can override the directory in which cache file are stored using for example\r\n> \r\n> ```\r\n> ENV HF_HOME=\"/root/cache/hf_cache_home\"\r\n> ```\r\n> \r\n> This way both `transformers` and `datasets` will use this directory instead of the default `.cache`\r\n\r\ncan we disable caching directly?", "Hi ! Unfortunately no since we need this directory to load datasets.\r\nWhen you load a dataset, it downloads the raw data files in the cache directory inside <cache_dir>/downloads. Then it builds the dataset and saves it as arrow data inside <cache_dir>/<dataset_name>.\r\n\r\nHowever you can specify the directory of your choice, and it can be a temporary directory if you want to clean everything up at one point.", "I'm closing this to keep issues a bit cleaner" ]
2020-12-16T00:02:21
2021-06-17T15:40:45
2021-06-17T15:40:45
NONE
null
null
null
null
I am using a docker container, based on latest tensorflow-gpu image, to run transformers and datasets (4.0.1 and 1.1.3 respectively - Dockerfile attached below). Importing transformers throws a Permission Error to access `/.cache`: ``` $ docker run --gpus=all --rm -it -u $(id -u):$(id -g) -v $(pwd)/data:/root/data -v $(pwd):/root -v $(pwd)/models/:/root/models -v $(pwd)/saved_models/:/root/saved_models -e "HOST_HOSTNAME=$(hostname)" hf-error:latest /bin/bash ________ _______________ ___ __/__________________________________ ____/__ /________ __ __ / _ _ \_ __ \_ ___/ __ \_ ___/_ /_ __ /_ __ \_ | /| / / _ / / __/ / / /(__ )/ /_/ / / _ __/ _ / / /_/ /_ |/ |/ / /_/ \___//_/ /_//____/ \____//_/ /_/ /_/ \____/____/|__/ You are running this container as user with ID 1000 and group 1000, which should map to the ID and group for your user on the Docker host. Great! tf-docker /root > python Python 3.6.9 (default, Oct 8 2020, 12:12:24) [GCC 8.4.0] on linux Type "help", "copyright", "credits" or "license" for more information. >>> import transformers 2020-12-15 23:53:21.165827: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0 Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/usr/local/lib/python3.6/dist-packages/transformers/__init__.py", line 22, in <module> from .integrations import ( # isort:skip File "/usr/local/lib/python3.6/dist-packages/transformers/integrations.py", line 5, in <module> from .trainer_utils import EvaluationStrategy File "/usr/local/lib/python3.6/dist-packages/transformers/trainer_utils.py", line 25, in <module> from .file_utils import is_tf_available, is_torch_available, is_torch_tpu_available File "/usr/local/lib/python3.6/dist-packages/transformers/file_utils.py", line 88, in <module> import datasets # noqa: F401 File "/usr/local/lib/python3.6/dist-packages/datasets/__init__.py", line 26, in <module> from .arrow_dataset import Dataset, concatenate_datasets File "/usr/local/lib/python3.6/dist-packages/datasets/arrow_dataset.py", line 40, in <module> from .arrow_reader import ArrowReader File "/usr/local/lib/python3.6/dist-packages/datasets/arrow_reader.py", line 31, in <module> from .utils import cached_path, logging File "/usr/local/lib/python3.6/dist-packages/datasets/utils/__init__.py", line 20, in <module> from .download_manager import DownloadManager, GenerateMode File "/usr/local/lib/python3.6/dist-packages/datasets/utils/download_manager.py", line 25, in <module> from .file_utils import HF_DATASETS_CACHE, cached_path, get_from_cache, hash_url_to_filename File "/usr/local/lib/python3.6/dist-packages/datasets/utils/file_utils.py", line 118, in <module> os.makedirs(HF_MODULES_CACHE, exist_ok=True) File "/usr/lib/python3.6/os.py", line 210, in makedirs makedirs(head, mode, exist_ok) File "/usr/lib/python3.6/os.py", line 210, in makedirs makedirs(head, mode, exist_ok) File "/usr/lib/python3.6/os.py", line 220, in makedirs mkdir(name, mode) PermissionError: [Errno 13] Permission denied: '/.cache' ``` I've pinned the problem to `RUN pip install datasets`, and by commenting it you can actually import transformers correctly. Another workaround I've found is creating the directory and giving permissions to it directly on the Dockerfile. ``` FROM tensorflow/tensorflow:latest-gpu-jupyter WORKDIR /root EXPOSE 80 EXPOSE 8888 EXPOSE 6006 ENV SHELL /bin/bash ENV PATH="/root/.local/bin:${PATH}" ENV CUDA_CACHE_PATH="/root/cache/cuda" ENV CUDA_CACHE_MAXSIZE="4294967296" ENV TFHUB_CACHE_DIR="/root/cache/tfhub" RUN pip install --upgrade pip RUN apt update -y && apt upgrade -y RUN pip install transformers #Installing datasets will throw the error, try commenting and rebuilding RUN pip install datasets #Another workaround is creating the directory and give permissions explicitly #RUN mkdir /.cache #RUN chmod 777 /.cache ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/702586?v=4", "events_url": "https://api.github.com/users/eduardofv/events{/privacy}", "followers_url": "https://api.github.com/users/eduardofv/followers", "following_url": "https://api.github.com/users/eduardofv/following{/other_user}", "gists_url": "https://api.github.com/users/eduardofv/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/eduardofv", "id": 702586, "login": "eduardofv", "node_id": "MDQ6VXNlcjcwMjU4Ng==", "organizations_url": "https://api.github.com/users/eduardofv/orgs", "received_events_url": "https://api.github.com/users/eduardofv/received_events", "repos_url": "https://api.github.com/users/eduardofv/repos", "site_admin": false, "starred_url": "https://api.github.com/users/eduardofv/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/eduardofv/subscriptions", "type": "User", "url": "https://api.github.com/users/eduardofv", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1581/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1581/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
183 days, 15:38:24
https://api.github.com/repos/huggingface/datasets/issues/1541
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1541/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1541/comments
https://api.github.com/repos/huggingface/datasets/issues/1541/events
https://github.com/huggingface/datasets/issues/1541
765,430,586
MDU6SXNzdWU3NjU0MzA1ODY=
1,541
connection issue while downloading data
{ "avatar_url": "https://avatars.githubusercontent.com/u/73364383?v=4", "events_url": "https://api.github.com/users/rabeehkarimimahabadi/events{/privacy}", "followers_url": "https://api.github.com/users/rabeehkarimimahabadi/followers", "following_url": "https://api.github.com/users/rabeehkarimimahabadi/following{/other_user}", "gists_url": "https://api.github.com/users/rabeehkarimimahabadi/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/rabeehkarimimahabadi", "id": 73364383, "login": "rabeehkarimimahabadi", "node_id": "MDQ6VXNlcjczMzY0Mzgz", "organizations_url": "https://api.github.com/users/rabeehkarimimahabadi/orgs", "received_events_url": "https://api.github.com/users/rabeehkarimimahabadi/received_events", "repos_url": "https://api.github.com/users/rabeehkarimimahabadi/repos", "site_admin": false, "starred_url": "https://api.github.com/users/rabeehkarimimahabadi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rabeehkarimimahabadi/subscriptions", "type": "User", "url": "https://api.github.com/users/rabeehkarimimahabadi", "user_view_type": "public" }
[]
closed
false
null
[]
[ "could you tell me how I can avoid download, by pre-downloading the data first, put them in a folder so the code does not try to redownload? could you tell me the path to put the downloaded data, and how to do it? thanks\r\n@lhoestq ", "Does your instance have an internet connection ?\r\n\r\nIf you don't have an internet connection you'll need to have the dataset on the instance disk.\r\nTo do so first download the dataset on another machine using `load_dataset` and then you can save it in a folder using `my_dataset.save_to_disk(\"path/to/folder\")`. Once the folder is copied on your instance you can reload the dataset with `datasets.load_from_disk(\"path/to/folder\")`" ]
2020-12-13T14:27:00
2022-10-05T12:33:29
2022-10-05T12:33:29
NONE
null
null
null
null
Hi I am running my codes on google cloud, and I am getting this error resulting in the failure of the codes when trying to download the data, could you assist me to solve this? also as a temporary solution, could you tell me how I can increase the number of retries and timeout to at least let the models run for now. thanks ``` Traceback (most recent call last): File "finetune_t5_trainer.py", line 361, in <module> main() File "finetune_t5_trainer.py", line 269, in main add_prefix=False if training_args.train_adapters else True) File "/workdir/seq2seq/data/tasks.py", line 70, in get_dataset dataset = self.load_dataset(split=split) File "/workdir/seq2seq/data/tasks.py", line 306, in load_dataset return datasets.load_dataset('glue', 'cola', split=split) File "/usr/local/lib/python3.6/dist-packages/datasets/load.py", line 589, in load_dataset path, script_version=script_version, download_config=download_config, download_mode=download_mode, dataset=True File "/usr/local/lib/python3.6/dist-packages/datasets/load.py", line 263, in prepare_module head_hf_s3(path, filename=name, dataset=dataset) File "/usr/local/lib/python3.6/dist-packages/datasets/utils/file_utils.py", line 200, in head_hf_s3 return http_head(hf_bucket_url(identifier=identifier, filename=filename, use_cdn=use_cdn, dataset=dataset)) File "/usr/local/lib/python3.6/dist-packages/datasets/utils/file_utils.py", line 403, in http_head url, proxies=proxies, headers=headers, cookies=cookies, allow_redirects=allow_redirects, timeout=timeout File "/usr/local/lib/python3.6/dist-packages/requests/api.py", line 104, in head return request('head', url, **kwargs) File "/usr/local/lib/python3.6/dist-packages/requests/api.py", line 61, in request return session.request(method=method, url=url, **kwargs) File "/usr/local/lib/python3.6/dist-packages/requests/sessions.py", line 542, in request resp = self.send(prep, **send_kwargs) File "/usr/local/lib/python3.6/dist-packages/requests/sessions.py", line 655, in send r = adapter.send(request, **kwargs) File "/usr/local/lib/python3.6/dist-packages/requests/adapters.py", line 504, in send raise ConnectTimeout(e, request=request) requests.exceptions.ConnectTimeout: HTTPSConnectionPool(host='s3.amazonaws.com', port=443): Max retries exceeded with url: /datasets.huggingface.co/datasets/datasets/glue/glue.py (Caused by ConnectTimeoutError(<urllib3.connection.HTTPSConnection object at 0x7f47db511e80>, 'Connection to s3.amazonaws.com timed out. (connect timeout=10)')) ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1541/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1541/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
660 days, 22:06:29
https://api.github.com/repos/huggingface/datasets/issues/1514
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1514/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1514/comments
https://api.github.com/repos/huggingface/datasets/issues/1514/events
https://github.com/huggingface/datasets/issues/1514
764,017,148
MDU6SXNzdWU3NjQwMTcxNDg=
1,514
how to get all the options of a property in datasets
{ "avatar_url": "https://avatars.githubusercontent.com/u/6278280?v=4", "events_url": "https://api.github.com/users/rabeehk/events{/privacy}", "followers_url": "https://api.github.com/users/rabeehk/followers", "following_url": "https://api.github.com/users/rabeehk/following{/other_user}", "gists_url": "https://api.github.com/users/rabeehk/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/rabeehk", "id": 6278280, "login": "rabeehk", "node_id": "MDQ6VXNlcjYyNzgyODA=", "organizations_url": "https://api.github.com/users/rabeehk/orgs", "received_events_url": "https://api.github.com/users/rabeehk/received_events", "repos_url": "https://api.github.com/users/rabeehk/repos", "site_admin": false, "starred_url": "https://api.github.com/users/rabeehk/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rabeehk/subscriptions", "type": "User", "url": "https://api.github.com/users/rabeehk", "user_view_type": "public" }
[ { "color": "d876e3", "default": true, "description": "Further information is requested", "id": 1935892912, "name": "question", "node_id": "MDU6TGFiZWwxOTM1ODkyOTEy", "url": "https://api.github.com/repos/huggingface/datasets/labels/question" } ]
closed
false
null
[]
[ "In a dataset, labels correspond to the `ClassLabel` feature that has the `names` property that returns string represenation of the integer classes (or `num_classes` to get the number of different classes).", "I think the `features` attribute of the dataset object is what you are looking for:\r\n```\r\n>>> dataset.features\r\n{'sentence1': Value(dtype='string', id=None),\r\n 'sentence2': Value(dtype='string', id=None),\r\n 'label': ClassLabel(num_classes=2, names=['not_equivalent', 'equivalent'], names_file=None, id=None),\r\n 'idx': Value(dtype='int32', id=None)\r\n}\r\n>>> dataset.features[\"label\"].names\r\n['not_equivalent', 'equivalent']\r\n```\r\n\r\nFor reference: https://huggingface.co/docs/datasets/exploring.html" ]
2020-12-12T16:24:08
2022-05-25T16:27:29
2022-05-25T16:27:29
CONTRIBUTOR
null
null
null
null
Hi could you tell me how I can get all unique options of a property of dataset? for instance in case of boolq, if the user wants to know which unique labels it has, is there a way to access unique labels without getting all training data lables and then forming a set i mean? thanks
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1514/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1514/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
529 days, 0:03:21
https://api.github.com/repos/huggingface/datasets/issues/1478
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1478/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1478/comments
https://api.github.com/repos/huggingface/datasets/issues/1478/events
https://github.com/huggingface/datasets/issues/1478
762,293,076
MDU6SXNzdWU3NjIyOTMwNzY=
1,478
Inconsistent argument names.
{ "avatar_url": "https://avatars.githubusercontent.com/u/8402500?v=4", "events_url": "https://api.github.com/users/Fraser-Greenlee/events{/privacy}", "followers_url": "https://api.github.com/users/Fraser-Greenlee/followers", "following_url": "https://api.github.com/users/Fraser-Greenlee/following{/other_user}", "gists_url": "https://api.github.com/users/Fraser-Greenlee/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Fraser-Greenlee", "id": 8402500, "login": "Fraser-Greenlee", "node_id": "MDQ6VXNlcjg0MDI1MDA=", "organizations_url": "https://api.github.com/users/Fraser-Greenlee/orgs", "received_events_url": "https://api.github.com/users/Fraser-Greenlee/received_events", "repos_url": "https://api.github.com/users/Fraser-Greenlee/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Fraser-Greenlee/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Fraser-Greenlee/subscriptions", "type": "User", "url": "https://api.github.com/users/Fraser-Greenlee", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Also for the `Accuracy` metric the `accuracy_score` method should have its args in the opposite order so `accuracy_score(predictions, references,,,)`.", "Thanks for pointing this out ! 🕵🏻 \r\nPredictions and references should indeed be swapped in the docstring.\r\nHowever, the call to `accuracy_score` should not be changed, it [signature](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.accuracy_score.html#sklearn.metrics.accuracy_score) being:\r\n```\r\nsklearn.metrics.accuracy_score(y_true, y_pred, *, normalize=True, sample_weight=None)\r\n```\r\n\r\nFeel free to open a PR if you want to fix this :)" ]
2020-12-11T12:19:38
2020-12-19T15:03:39
2020-12-19T15:03:39
CONTRIBUTOR
null
null
null
null
Just find it a wee bit odd that in the transformers library `predictions` are those made by the model: https://github.com/huggingface/transformers/blob/master/src/transformers/trainer_utils.py#L51-L61 While in many datasets metrics they are the ground truth labels: https://github.com/huggingface/datasets/blob/c3f53792a744ede18d748a1133b6597fdd2d8d18/metrics/accuracy/accuracy.py#L31-L40 Do you think predictions & references should be swapped? I'd be willing to do some refactoring here if you agree.
{ "avatar_url": "https://avatars.githubusercontent.com/u/8402500?v=4", "events_url": "https://api.github.com/users/Fraser-Greenlee/events{/privacy}", "followers_url": "https://api.github.com/users/Fraser-Greenlee/followers", "following_url": "https://api.github.com/users/Fraser-Greenlee/following{/other_user}", "gists_url": "https://api.github.com/users/Fraser-Greenlee/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Fraser-Greenlee", "id": 8402500, "login": "Fraser-Greenlee", "node_id": "MDQ6VXNlcjg0MDI1MDA=", "organizations_url": "https://api.github.com/users/Fraser-Greenlee/orgs", "received_events_url": "https://api.github.com/users/Fraser-Greenlee/received_events", "repos_url": "https://api.github.com/users/Fraser-Greenlee/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Fraser-Greenlee/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Fraser-Greenlee/subscriptions", "type": "User", "url": "https://api.github.com/users/Fraser-Greenlee", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1478/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1478/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
8 days, 2:44:01
https://api.github.com/repos/huggingface/datasets/issues/1452
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1452/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1452/comments
https://api.github.com/repos/huggingface/datasets/issues/1452/events
https://github.com/huggingface/datasets/issues/1452
761,104,924
MDU6SXNzdWU3NjExMDQ5MjQ=
1,452
SNLI dataset contains labels with value -1
{ "avatar_url": "https://avatars.githubusercontent.com/u/11405654?v=4", "events_url": "https://api.github.com/users/aarnetalman/events{/privacy}", "followers_url": "https://api.github.com/users/aarnetalman/followers", "following_url": "https://api.github.com/users/aarnetalman/following{/other_user}", "gists_url": "https://api.github.com/users/aarnetalman/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/aarnetalman", "id": 11405654, "login": "aarnetalman", "node_id": "MDQ6VXNlcjExNDA1NjU0", "organizations_url": "https://api.github.com/users/aarnetalman/orgs", "received_events_url": "https://api.github.com/users/aarnetalman/received_events", "repos_url": "https://api.github.com/users/aarnetalman/repos", "site_admin": false, "starred_url": "https://api.github.com/users/aarnetalman/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/aarnetalman/subscriptions", "type": "User", "url": "https://api.github.com/users/aarnetalman", "user_view_type": "public" }
[]
closed
false
null
[]
[ "I believe the `-1` label is used for missing/NULL data as per HuggingFace Dataset conventions. If I recall correctly SNLI has some entries with no (gold) labels in the dataset.", "Ah, you're right. The dataset has some pairs with missing labels. Thanks for reminding me." ]
2020-12-10T10:16:55
2020-12-10T17:49:55
2020-12-10T17:49:55
NONE
null
null
null
null
``` import datasets nli_data = datasets.load_dataset("snli") train_data = nli_data['train'] train_labels = train_data['label'] label_set = set(train_labels) print(label_set) ``` **Output:** `{0, 1, 2, -1}`
{ "avatar_url": "https://avatars.githubusercontent.com/u/11405654?v=4", "events_url": "https://api.github.com/users/aarnetalman/events{/privacy}", "followers_url": "https://api.github.com/users/aarnetalman/followers", "following_url": "https://api.github.com/users/aarnetalman/following{/other_user}", "gists_url": "https://api.github.com/users/aarnetalman/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/aarnetalman", "id": 11405654, "login": "aarnetalman", "node_id": "MDQ6VXNlcjExNDA1NjU0", "organizations_url": "https://api.github.com/users/aarnetalman/orgs", "received_events_url": "https://api.github.com/users/aarnetalman/received_events", "repos_url": "https://api.github.com/users/aarnetalman/repos", "site_admin": false, "starred_url": "https://api.github.com/users/aarnetalman/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/aarnetalman/subscriptions", "type": "User", "url": "https://api.github.com/users/aarnetalman", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1452/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1452/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
7:33:00
https://api.github.com/repos/huggingface/datasets/issues/1444
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1444/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1444/comments
https://api.github.com/repos/huggingface/datasets/issues/1444/events
https://github.com/huggingface/datasets/issues/1444
761,055,651
MDU6SXNzdWU3NjEwNTU2NTE=
1,444
FileNotFound remotly, can't load a dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/18331629?v=4", "events_url": "https://api.github.com/users/sadakmed/events{/privacy}", "followers_url": "https://api.github.com/users/sadakmed/followers", "following_url": "https://api.github.com/users/sadakmed/following{/other_user}", "gists_url": "https://api.github.com/users/sadakmed/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/sadakmed", "id": 18331629, "login": "sadakmed", "node_id": "MDQ6VXNlcjE4MzMxNjI5", "organizations_url": "https://api.github.com/users/sadakmed/orgs", "received_events_url": "https://api.github.com/users/sadakmed/received_events", "repos_url": "https://api.github.com/users/sadakmed/repos", "site_admin": false, "starred_url": "https://api.github.com/users/sadakmed/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sadakmed/subscriptions", "type": "User", "url": "https://api.github.com/users/sadakmed", "user_view_type": "public" }
[]
closed
false
null
[]
[ "This dataset will be available in version-2 of the library. If you want to use this dataset now, install datasets from `master` branch rather.\r\n\r\nCommand to install datasets from `master` branch:\r\n`!pip install git+https://github.com/huggingface/datasets.git@master`", "Closing this, thanks @VasudevGupta7 " ]
2020-12-10T09:14:47
2020-12-15T17:41:14
2020-12-15T17:41:14
NONE
null
null
null
null
```py !pip install datasets import datasets as ds corpus = ds.load_dataset('large_spanish_corpus') ``` gives the error > FileNotFoundError: Couldn't find file locally at large_spanish_corpus/large_spanish_corpus.py, or remotely at https://raw.githubusercontent.com/huggingface/datasets/1.1.3/datasets/large_spanish_corpus/large_spanish_corpus.py or https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/large_spanish_corpus/large_spanish_corpus.py not just `large_spanish_corpus`, `zest` too, but `squad` is available. this was using colab and localy
{ "avatar_url": "https://avatars.githubusercontent.com/u/33657802?v=4", "events_url": "https://api.github.com/users/SBrandeis/events{/privacy}", "followers_url": "https://api.github.com/users/SBrandeis/followers", "following_url": "https://api.github.com/users/SBrandeis/following{/other_user}", "gists_url": "https://api.github.com/users/SBrandeis/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/SBrandeis", "id": 33657802, "login": "SBrandeis", "node_id": "MDQ6VXNlcjMzNjU3ODAy", "organizations_url": "https://api.github.com/users/SBrandeis/orgs", "received_events_url": "https://api.github.com/users/SBrandeis/received_events", "repos_url": "https://api.github.com/users/SBrandeis/repos", "site_admin": false, "starred_url": "https://api.github.com/users/SBrandeis/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SBrandeis/subscriptions", "type": "User", "url": "https://api.github.com/users/SBrandeis", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1444/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1444/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
5 days, 8:26:27
https://api.github.com/repos/huggingface/datasets/issues/1422
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1422/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1422/comments
https://api.github.com/repos/huggingface/datasets/issues/1422/events
https://github.com/huggingface/datasets/issues/1422
760,707,113
MDU6SXNzdWU3NjA3MDcxMTM=
1,422
Can't map dataset (loaded from csv)
{ "avatar_url": "https://avatars.githubusercontent.com/u/28161779?v=4", "events_url": "https://api.github.com/users/SolomidHero/events{/privacy}", "followers_url": "https://api.github.com/users/SolomidHero/followers", "following_url": "https://api.github.com/users/SolomidHero/following{/other_user}", "gists_url": "https://api.github.com/users/SolomidHero/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/SolomidHero", "id": 28161779, "login": "SolomidHero", "node_id": "MDQ6VXNlcjI4MTYxNzc5", "organizations_url": "https://api.github.com/users/SolomidHero/orgs", "received_events_url": "https://api.github.com/users/SolomidHero/received_events", "repos_url": "https://api.github.com/users/SolomidHero/repos", "site_admin": false, "starred_url": "https://api.github.com/users/SolomidHero/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SolomidHero/subscriptions", "type": "User", "url": "https://api.github.com/users/SolomidHero", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Please could you post the whole script? I can't reproduce your issue. After updating the feature names/labels to match with the data, everything works fine for me. Try to update datasets/transformers to the newest version.", "Actually, the problem was how `tokenize` function was defined. This was completely my side mistake, so there are really no needs in this issue anymore" ]
2020-12-09T22:05:42
2020-12-17T18:13:40
2020-12-17T18:13:40
NONE
null
null
null
null
Hello! I am trying to load single csv file with two columns: ('label': str, 'text' str), where is label is str of two possible classes. Below steps are similar with [this notebook](https://colab.research.google.com/drive/1-JIJlao4dI-Ilww_NnTc0rxtp-ymgDgM?usp=sharing), where bert model and tokenizer are used to classify lmdb loaded dataset. Only one difference it is the dataset loaded from .csv file. Here is how I load it: ```python data_path = 'data.csv' data = pd.read_csv(data_path) # process class name to indices classes = ['neg', 'pos'] class_to_idx = { cl: i for i, cl in enumerate(classes) } # now data is like {'label': int, 'text' str} data['label'] = data['label'].apply(lambda x: class_to_idx[x]) # load dataset and map it with defined `tokenize` function features = Features({ target: ClassLabel(num_classes=2, names=['neg', 'pos'], names_file=None, id=None), feature: Value(dtype='string', id=None), }) dataset = Dataset.from_pandas(data, features=features) dataset.map(tokenize, batched=True, batch_size=len(dataset)) ``` It ruins on the last line with following error: ``` --------------------------------------------------------------------------- AssertionError Traceback (most recent call last) <ipython-input-112-32b6275ce418> in <module>() 9 }) 10 dataset = Dataset.from_pandas(data, features=features) ---> 11 dataset.map(tokenizer, batched=True, batch_size=len(dataset)) 2 frames /usr/local/lib/python3.6/dist-packages/datasets/arrow_dataset.py in map(self, function, with_indices, input_columns, batched, batch_size, drop_last_batch, remove_columns, keep_in_memory, load_from_cache_file, cache_file_name, writer_batch_size, features, disable_nullable, fn_kwargs, num_proc, suffix_template, new_fingerprint) 1237 test_inputs = self[:2] if batched else self[0] 1238 test_indices = [0, 1] if batched else 0 -> 1239 update_data = does_function_return_dict(test_inputs, test_indices) 1240 logger.info("Testing finished, running the mapping function on the dataset") 1241 /usr/local/lib/python3.6/dist-packages/datasets/arrow_dataset.py in does_function_return_dict(inputs, indices) 1208 fn_args = [inputs] if input_columns is None else [inputs[col] for col in input_columns] 1209 processed_inputs = ( -> 1210 function(*fn_args, indices, **fn_kwargs) if with_indices else function(*fn_args, **fn_kwargs) 1211 ) 1212 does_return_dict = isinstance(processed_inputs, Mapping) /usr/local/lib/python3.6/dist-packages/transformers/tokenization_utils_base.py in __call__(self, text, text_pair, add_special_tokens, padding, truncation, max_length, stride, is_split_into_words, pad_to_multiple_of, return_tensors, return_token_type_ids, return_attention_mask, return_overflowing_tokens, return_special_tokens_mask, return_offsets_mapping, return_length, verbose, **kwargs) 2281 ) 2282 ), ( -> 2283 "text input must of type `str` (single example), `List[str]` (batch or single pretokenized example) " 2284 "or `List[List[str]]` (batch of pretokenized examples)." 2285 ) AssertionError: text input must of type `str` (single example), `List[str]` (batch or single pretokenized example) or `List[List[str]]` (batch of pretokenized examples). ``` which I think is not expected. I also tried the same steps using `Dataset.from_csv` which resulted in the same error. For reproducing this, I used [this dataset from kaggle](https://www.kaggle.com/team-ai/spam-text-message-classification)
{ "avatar_url": "https://avatars.githubusercontent.com/u/28161779?v=4", "events_url": "https://api.github.com/users/SolomidHero/events{/privacy}", "followers_url": "https://api.github.com/users/SolomidHero/followers", "following_url": "https://api.github.com/users/SolomidHero/following{/other_user}", "gists_url": "https://api.github.com/users/SolomidHero/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/SolomidHero", "id": 28161779, "login": "SolomidHero", "node_id": "MDQ6VXNlcjI4MTYxNzc5", "organizations_url": "https://api.github.com/users/SolomidHero/orgs", "received_events_url": "https://api.github.com/users/SolomidHero/received_events", "repos_url": "https://api.github.com/users/SolomidHero/repos", "site_admin": false, "starred_url": "https://api.github.com/users/SolomidHero/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SolomidHero/subscriptions", "type": "User", "url": "https://api.github.com/users/SolomidHero", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1422/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1422/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
7 days, 20:07:58
https://api.github.com/repos/huggingface/datasets/issues/1324
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1324/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1324/comments
https://api.github.com/repos/huggingface/datasets/issues/1324/events
https://github.com/huggingface/datasets/issues/1324
759,587,864
MDU6SXNzdWU3NTk1ODc4NjQ=
1,324
❓ Sharing ElasticSearch indexed dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/61748653?v=4", "events_url": "https://api.github.com/users/pietrolesci/events{/privacy}", "followers_url": "https://api.github.com/users/pietrolesci/followers", "following_url": "https://api.github.com/users/pietrolesci/following{/other_user}", "gists_url": "https://api.github.com/users/pietrolesci/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/pietrolesci", "id": 61748653, "login": "pietrolesci", "node_id": "MDQ6VXNlcjYxNzQ4NjUz", "organizations_url": "https://api.github.com/users/pietrolesci/orgs", "received_events_url": "https://api.github.com/users/pietrolesci/received_events", "repos_url": "https://api.github.com/users/pietrolesci/repos", "site_admin": false, "starred_url": "https://api.github.com/users/pietrolesci/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pietrolesci/subscriptions", "type": "User", "url": "https://api.github.com/users/pietrolesci", "user_view_type": "public" }
[ { "color": "e99695", "default": false, "description": "Requesting to add a new dataset", "id": 2067376369, "name": "dataset request", "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request" } ]
open
false
null
[]
[ "Hello @pietrolesci , I am not sure to understand what you are trying to do here.\r\n\r\nIf you're looking for ways to save a dataset on disk, you can you the `save_to_disk` method:\r\n```python\r\n>>> import datasets\r\n>>> loaded_dataset = datasets.load(\"dataset_name\")\r\n>>> loaded_dataset.save_to_disk(\"/path/on/your/disk\")\r\n```\r\n\r\nThe saved dataset can later be retrieved using:\r\n```python\r\n>>> loaded_dataset = datasets.Dataset.load_from_disk(\"/path/on/your/disk\")\r\n```\r\n\r\nAlso, I'd recommend posting your question directly in the issue section of the [elasticsearch repo](https://github.com/elastic/elasticsearch)", "Hi @SBrandeis,\n\nThanks a lot for picking up my request. \n\nMaybe I can clarify my use-case with a bit of context. Say I have the IMDb dataset. I create an ES index on it. Now I can save and reload the dataset from disk normally. Once I reload the dataset, it is easy to retrieve the ES index on my machine. I was wondering: is there a way I can share the (now) indexed version of the IMDb dataset with my colleagues without requiring them to re-index it?\n\nThanks a lot in advance for your consideration.\n\nBest,\n\nPietro", "Thanks for the clarification.\r\n\r\nI am not familiar with ElasticSearch, but if I understand well you're trying to migrate your data along with the ES index.\r\nMy advice would be to check out ES documentation, for instance, this might help you: https://www.elastic.co/guide/en/cloud/current/ec-migrate-data.html\r\n\r\nLet me know if it helps" ]
2020-12-08T16:25:58
2020-12-22T07:50:56
null
NONE
null
null
null
null
Hi there, First of all, thank you very much for this amazing library. Datasets have become my preferred data structure for basically everything I am currently doing. **Question:** I'm working with a dataset and I have an elasticsearch container running at localhost:9200. I added an elasticsearch index and I was wondering - how can I know where it has been saved? - how can I share the indexed dataset with others? I tried to dig into the docs, but could not find anything about that. Thank you very much for your help. Best, Pietro Edit: apologies for the wrong label
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1324/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1324/timeline
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
null
https://api.github.com/repos/huggingface/datasets/issues/1299
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1299/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1299/comments
https://api.github.com/repos/huggingface/datasets/issues/1299/events
https://github.com/huggingface/datasets/issues/1299
759,414,566
MDU6SXNzdWU3NTk0MTQ1NjY=
1,299
can't load "german_legal_entity_recognition" dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/59837137?v=4", "events_url": "https://api.github.com/users/nataly-obr/events{/privacy}", "followers_url": "https://api.github.com/users/nataly-obr/followers", "following_url": "https://api.github.com/users/nataly-obr/following{/other_user}", "gists_url": "https://api.github.com/users/nataly-obr/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/nataly-obr", "id": 59837137, "login": "nataly-obr", "node_id": "MDQ6VXNlcjU5ODM3MTM3", "organizations_url": "https://api.github.com/users/nataly-obr/orgs", "received_events_url": "https://api.github.com/users/nataly-obr/received_events", "repos_url": "https://api.github.com/users/nataly-obr/repos", "site_admin": false, "starred_url": "https://api.github.com/users/nataly-obr/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/nataly-obr/subscriptions", "type": "User", "url": "https://api.github.com/users/nataly-obr", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Please if you could tell me more about the error? \r\n\r\n1. Please check the directory you've been working on\r\n2. Check for any typos", "> Please if you could tell me more about the error?\r\n> \r\n> 1. Please check the directory you've been working on\r\n> 2. Check for any typos\r\n\r\nError happens during the execution of this line:\r\ndataset = load_dataset(\"german_legal_entity_recognition\")\r\n\r\nAlso, when I try to open mentioned links via Opera I have errors \"404: Not Found\" and \"This XML file does not appear to have any style information associated with it. The document tree is shown below.\" respectively.", "Hello @nataly-obr, the `german_legal_entity_recognition` dataset has not yet been released (it is part of the coming soon v2 release).\r\n\r\nYou can still access it now if you want, but you will need to install `datasets` via the master branch:\r\n`pip install git+https://github.com/huggingface/datasets.git@master`\r\n\r\nPlease let me know if it solves the issue :) " ]
2020-12-08T12:42:01
2020-12-16T16:03:13
2020-12-16T16:03:13
NONE
null
null
null
null
FileNotFoundError: Couldn't find file locally at german_legal_entity_recognition/german_legal_entity_recognition.py, or remotely at https://raw.githubusercontent.com/huggingface/datasets/1.1.3/datasets/german_legal_entity_recognition/german_legal_entity_recognition.py or https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/german_legal_entity_recognition/german_legal_entity_recognition.py
{ "avatar_url": "https://avatars.githubusercontent.com/u/33657802?v=4", "events_url": "https://api.github.com/users/SBrandeis/events{/privacy}", "followers_url": "https://api.github.com/users/SBrandeis/followers", "following_url": "https://api.github.com/users/SBrandeis/following{/other_user}", "gists_url": "https://api.github.com/users/SBrandeis/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/SBrandeis", "id": 33657802, "login": "SBrandeis", "node_id": "MDQ6VXNlcjMzNjU3ODAy", "organizations_url": "https://api.github.com/users/SBrandeis/orgs", "received_events_url": "https://api.github.com/users/SBrandeis/received_events", "repos_url": "https://api.github.com/users/SBrandeis/repos", "site_admin": false, "starred_url": "https://api.github.com/users/SBrandeis/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SBrandeis/subscriptions", "type": "User", "url": "https://api.github.com/users/SBrandeis", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1299/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1299/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
8 days, 3:21:12
https://api.github.com/repos/huggingface/datasets/issues/1290
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1290/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1290/comments
https://api.github.com/repos/huggingface/datasets/issues/1290/events
https://github.com/huggingface/datasets/issues/1290
759,339,989
MDU6SXNzdWU3NTkzMzk5ODk=
1,290
imdb dataset cannot be downloaded
{ "avatar_url": "https://avatars.githubusercontent.com/u/6278280?v=4", "events_url": "https://api.github.com/users/rabeehk/events{/privacy}", "followers_url": "https://api.github.com/users/rabeehk/followers", "following_url": "https://api.github.com/users/rabeehk/following{/other_user}", "gists_url": "https://api.github.com/users/rabeehk/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/rabeehk", "id": 6278280, "login": "rabeehk", "node_id": "MDQ6VXNlcjYyNzgyODA=", "organizations_url": "https://api.github.com/users/rabeehk/orgs", "received_events_url": "https://api.github.com/users/rabeehk/received_events", "repos_url": "https://api.github.com/users/rabeehk/repos", "site_admin": false, "starred_url": "https://api.github.com/users/rabeehk/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rabeehk/subscriptions", "type": "User", "url": "https://api.github.com/users/rabeehk", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Hi @rabeehk , I am unable to reproduce your problem locally.\r\nCan you try emptying the cache (removing the content of `/idiap/temp/rkarimi/cache_home_1/datasets`) and retry ?", "Hi,\r\nthanks, I did remove the cache and still the same error here\r\n\r\n```\r\n>>> a = datasets.load_dataset(\"imdb\", split=\"train\")\r\ncahce dir /idiap/temp/rkarimi/cache_home_1/datasets\r\ncahce dir /idiap/temp/rkarimi/cache_home_1/datasets\r\nDownloading and preparing dataset imdb/plain_text (download: 80.23 MiB, generated: 127.06 MiB, post-processed: Unknown size, total: 207.28 MiB) to /idiap/temp/rkarimi/cache_home_1/datasets/imdb/plain_text/1.0.0/90099cb476936b753383ba2ae6ab2eae419b2e87f71cd5189cb9c8e5814d12a3...\r\ncahce dir /idiap/temp/rkarimi/cache_home_1/datasets\r\ncahce dir /idiap/temp/rkarimi/cache_home_1/datasets/downloads\r\nTraceback (most recent call last): \r\n File \"<stdin>\", line 1, in <module>\r\n File \"/idiap/user/rkarimi/libs/anaconda3/envs/internship/lib/python3.7/site-packages/datasets/load.py\", line 611, in load_dataset\r\n ignore_verifications=ignore_verifications,\r\n File \"/idiap/user/rkarimi/libs/anaconda3/envs/internship/lib/python3.7/site-packages/datasets/builder.py\", line 476, in download_and_prepare\r\n dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs\r\n File \"/idiap/user/rkarimi/libs/anaconda3/envs/internship/lib/python3.7/site-packages/datasets/builder.py\", line 558, in _download_and_prepare\r\n verify_splits(self.info.splits, split_dict)\r\n File \"/idiap/user/rkarimi/libs/anaconda3/envs/internship/lib/python3.7/site-packages/datasets/utils/info_utils.py\", line 73, in verify_splits\r\n raise NonMatchingSplitsSizesError(str(bad_splits))\r\ndatasets.utils.info_utils.NonMatchingSplitsSizesError: [{'expected': SplitInfo(name='unsupervised', num_bytes=67125548, num_examples=50000, dataset_name='imdb'), 'recorded': SplitInfo(name='unsupervised', num_bytes=4902716, num_examples=3680, dataset_name='imdb')}]\r\n```\r\n\r\ndatasets version\r\n```\r\ndatasets 1.1.2 <pip>\r\ntensorflow-datasets 4.1.0 <pip>\r\n\r\n```", "resolved with moving to version 1.1.3" ]
2020-12-08T10:47:36
2020-12-24T17:38:09
2020-12-24T17:38:09
CONTRIBUTOR
null
null
null
null
hi please find error below getting imdb train spli: thanks ` datasets.load_dataset>>> datasets.load_dataset("imdb", split="train")` errors ``` cahce dir /idiap/temp/rkarimi/cache_home_1/datasets cahce dir /idiap/temp/rkarimi/cache_home_1/datasets Downloading and preparing dataset imdb/plain_text (download: 80.23 MiB, generated: 127.06 MiB, post-processed: Unknown size, total: 207.28 MiB) to /idiap/temp/rkarimi/cache_home_1/datasets/imdb/plain_text/1.0.0/90099cb476936b753383ba2ae6ab2eae419b2e87f71cd5189cb9c8e5814d12a3... cahce dir /idiap/temp/rkarimi/cache_home_1/datasets cahce dir /idiap/temp/rkarimi/cache_home_1/datasets/downloads Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/idiap/user/rkarimi/libs/anaconda3/envs/internship/lib/python3.7/site-packages/datasets/load.py", line 611, in load_dataset ignore_verifications=ignore_verifications, File "/idiap/user/rkarimi/libs/anaconda3/envs/internship/lib/python3.7/site-packages/datasets/builder.py", line 476, in download_and_prepare dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs File "/idiap/user/rkarimi/libs/anaconda3/envs/internship/lib/python3.7/site-packages/datasets/builder.py", line 558, in _download_and_prepare verify_splits(self.info.splits, split_dict) File "/idiap/user/rkarimi/libs/anaconda3/envs/internship/lib/python3.7/site-packages/datasets/utils/info_utils.py", line 73, in verify_splits raise NonMatchingSplitsSizesError(str(bad_splits)) datasets.utils.info_utils.NonMatchingSplitsSizesError: [{'expected': SplitInfo(name='unsupervised', num_bytes=67125548, num_examples=50000, dataset_name='imdb'), 'recorded': SplitInfo(name='unsupervised', num_bytes=7486451, num_examples=5628, dataset_name='imdb')}] ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/6278280?v=4", "events_url": "https://api.github.com/users/rabeehk/events{/privacy}", "followers_url": "https://api.github.com/users/rabeehk/followers", "following_url": "https://api.github.com/users/rabeehk/following{/other_user}", "gists_url": "https://api.github.com/users/rabeehk/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/rabeehk", "id": 6278280, "login": "rabeehk", "node_id": "MDQ6VXNlcjYyNzgyODA=", "organizations_url": "https://api.github.com/users/rabeehk/orgs", "received_events_url": "https://api.github.com/users/rabeehk/received_events", "repos_url": "https://api.github.com/users/rabeehk/repos", "site_admin": false, "starred_url": "https://api.github.com/users/rabeehk/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rabeehk/subscriptions", "type": "User", "url": "https://api.github.com/users/rabeehk", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1290/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1290/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
16 days, 6:50:33
https://api.github.com/repos/huggingface/datasets/issues/1287
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1287/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1287/comments
https://api.github.com/repos/huggingface/datasets/issues/1287/events
https://github.com/huggingface/datasets/issues/1287
759,300,992
MDU6SXNzdWU3NTkzMDA5OTI=
1,287
'iwslt2017-ro-nl', cannot be downloaded
{ "avatar_url": "https://avatars.githubusercontent.com/u/6278280?v=4", "events_url": "https://api.github.com/users/rabeehk/events{/privacy}", "followers_url": "https://api.github.com/users/rabeehk/followers", "following_url": "https://api.github.com/users/rabeehk/following{/other_user}", "gists_url": "https://api.github.com/users/rabeehk/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/rabeehk", "id": 6278280, "login": "rabeehk", "node_id": "MDQ6VXNlcjYyNzgyODA=", "organizations_url": "https://api.github.com/users/rabeehk/orgs", "received_events_url": "https://api.github.com/users/rabeehk/received_events", "repos_url": "https://api.github.com/users/rabeehk/repos", "site_admin": false, "starred_url": "https://api.github.com/users/rabeehk/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rabeehk/subscriptions", "type": "User", "url": "https://api.github.com/users/rabeehk", "user_view_type": "public" }
[ { "color": "2edb81", "default": false, "description": "A bug in a dataset script provided in the library", "id": 2067388877, "name": "dataset bug", "node_id": "MDU6TGFiZWwyMDY3Mzg4ODc3", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" } ]
[ "the same issue with datasets.load_dataset(\"iwslt2017\", 'iwslt2017-en-nl', split=split), ..... ", "even with setting master like the following command, still remains \r\n\r\ndatasets.load_dataset(\"iwslt2017\", 'iwslt2017-en-nl', split=\"train\", script_version=\"master\")\r\n", "Looks like the data has been moved from its original location to google drive\r\n\r\nNew url: https://drive.google.com/u/0/uc?id=12ycYSzLIG253AFN35Y6qoyf9wtkOjakp&export=download", "Fixed by #4481 " ]
2020-12-08T09:56:55
2022-06-13T10:41:33
2022-06-13T10:41:33
CONTRIBUTOR
null
null
null
null
Hi I am trying `>>> datasets.load_dataset("iwslt2017", 'iwslt2017-ro-nl', split="train")` getting this error thank you for your help ``` cahce dir /idiap/temp/rkarimi/cache_home_1/datasets cahce dir /idiap/temp/rkarimi/cache_home_1/datasets Downloading and preparing dataset iwsl_t217/iwslt2017-ro-nl (download: 314.07 MiB, generated: 39.92 MiB, post-processed: Unknown size, total: 354.00 MiB) to /idiap/temp/rkarimi/cache_home_1/datasets/iwsl_t217/iwslt2017-ro-nl/1.0.0/cca6935a0851a8ceac1202a62c958738bdfa23c57a51bc52ac1c5ebd2aa172cd... cahce dir /idiap/temp/rkarimi/cache_home_1/datasets cahce dir /idiap/temp/rkarimi/cache_home_1/datasets/downloads Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/idiap/user/rkarimi/libs/anaconda3/envs/internship/lib/python3.7/site-packages/datasets/load.py", line 611, in load_dataset ignore_verifications=ignore_verifications, File "/idiap/user/rkarimi/libs/anaconda3/envs/internship/lib/python3.7/site-packages/datasets/builder.py", line 476, in download_and_prepare dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs File "/idiap/user/rkarimi/libs/anaconda3/envs/internship/lib/python3.7/site-packages/datasets/builder.py", line 531, in _download_and_prepare split_generators = self._split_generators(dl_manager, **split_generators_kwargs) File " /idiap/home/rkarimi/.cache/huggingface/modules/datasets_modules/datasets/iwslt2017/cca6935a0851a8ceac1202a62c958738bdfa23c57a51bc52ac1c5ebd2aa172cd/iwslt2017.py", line 118, in _split_generators dl_dir = dl_manager.download_and_extract(MULTI_URL) File "/idiap/user/rkarimi/libs/anaconda3/envs/internship/lib/python3.7/site-packages/datasets/utils/download_manager.py", line 254, in download_and_extract return self.extract(self.download(url_or_urls)) File "/idiap/user/rkarimi/libs/anaconda3/envs/internship/lib/python3.7/site-packages/datasets/utils/download_manager.py", line 179, in download num_proc=download_config.num_proc, File "/idiap/user/rkarimi/libs/anaconda3/envs/internship/lib/python3.7/site-packages/datasets/utils/py_utils.py", line 216, in map_nested return function(data_struct) File "/idiap/user/rkarimi/libs/anaconda3/envs/internship/lib/python3.7/site-packages/datasets/utils/file_utils.py", line 308, in cached_path use_etag=download_config.use_etag, File "/idiap/user/rkarimi/libs/anaconda3/envs/internship/lib/python3.7/site-packages/datasets/utils/file_utils.py", line 477, in get_from_cache raise ConnectionError("Couldn't reach {}".format(url)) ConnectionError: Couldn't reach https://wit3.fbk.eu/archive/2017-01-trnmted//texts/DeEnItNlRo/DeEnItNlRo/DeEnItNlRo-DeEnItNlRo.tgz ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1287/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1287/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
552 days, 0:44:38
https://api.github.com/repos/huggingface/datasets/issues/1286
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1286/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1286/comments
https://api.github.com/repos/huggingface/datasets/issues/1286/events
https://github.com/huggingface/datasets/issues/1286
759,291,509
MDU6SXNzdWU3NTkyOTE1MDk=
1,286
[libprotobuf FATAL /sentencepiece/src/../third_party/protobuf-lite/google/protobuf/repeated_field.h:1505] CHECK failed: (index) >= (0): terminate called after throwing an instance of 'google::protobuf::FatalException' what(): CHECK failed: (index) >= (0): Aborted
{ "avatar_url": "https://avatars.githubusercontent.com/u/6278280?v=4", "events_url": "https://api.github.com/users/rabeehk/events{/privacy}", "followers_url": "https://api.github.com/users/rabeehk/followers", "following_url": "https://api.github.com/users/rabeehk/following{/other_user}", "gists_url": "https://api.github.com/users/rabeehk/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/rabeehk", "id": 6278280, "login": "rabeehk", "node_id": "MDQ6VXNlcjYyNzgyODA=", "organizations_url": "https://api.github.com/users/rabeehk/orgs", "received_events_url": "https://api.github.com/users/rabeehk/received_events", "repos_url": "https://api.github.com/users/rabeehk/repos", "site_admin": false, "starred_url": "https://api.github.com/users/rabeehk/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rabeehk/subscriptions", "type": "User", "url": "https://api.github.com/users/rabeehk", "user_view_type": "public" }
[]
closed
false
null
[]
[ "I remember also getting the same issue for several other translation datasets like all the iwslt2017 group, this is blokcing me and I really need to fix it and I was wondering if you have an idea on this. @lhoestq thanks,. ", "maybe there is an empty line or something inside these datasets? could you tell me why this is happening? thanks ", "I just checked and the wmt16 en-ro doesn't have empty lines\r\n```python\r\nfrom datasets import load_dataset\r\n\r\nd = load_dataset(\"wmt16\", \"ro-en\", split=\"train\")\r\nlen(d) # 610320\r\nlen(d.filter(lambda x: len(x[\"translation\"][\"en\"].strip()) > 0)) # 610320\r\nlen(d.filter(lambda x: len(x[\"translation\"][\"ro\"].strip()) > 0)) # 610320\r\n# also tested for split=\"validation\" and \"test\"\r\n```\r\n\r\nCan you open an issue on the `transformers` repo ? also cc @sgugger ", "Hi @lhoestq \r\nI am not really sure which part is causing this, to me this is more related to dataset library as this is happening for some of the datassets below please find the information to reprodcue the bug, this is really blocking me and I appreciate your help\r\n\r\n\r\n## Environment info\r\n- `transformers` version: 3.5.1\r\n- Platform: GPU\r\n- Python version: 3.7 \r\n- PyTorch version (GPU?): 1.0.4\r\n- Tensorflow version (GPU?): - \r\n- Using GPU in script?: - \r\n- Using distributed or parallel set-up in script?: - \r\n\r\n### Who can help\r\n tokenizers: @mfuntowicz\r\n Trainer: @sgugger\r\n TextGeneration: @TevenLeScao \r\n nlp datasets: [different repo](https://github.com/huggingface/nlp)\r\n rust tokenizers: [different repo](https://github.com/huggingface/tokenizers)\r\n examples/seq2seq: @patil-suraj\r\n\r\n## Information\r\nHi\r\nI am testing seq2seq model with T5 on different datasets and this is always getting the following bug, this is really blocking me as this fails for many datasets. could you have a look please? thanks \r\n\r\n```\r\n[libprotobuf FATAL /sentencepiece/src/../third_party/protobuf-lite/google/protobuf/repeated_field.h:1505] CHECK failed: (index) >= (0): \r\nterminate called after throwing an instance of 'google::protobuf::FatalException'\r\n what(): CHECK failed: (index) >= (0): \r\nAborted\r\n\r\n```\r\n\r\nTo reproduce the error please run on 1 GPU:\r\n```\r\ngit clone git@github.com:rabeehk/debug-seq2seq.git\r\npython setup.py develop \r\ncd seq2seq \r\npython finetune_t5_trainer.py temp.json\r\n\r\n```\r\n\r\nFull output of the program:\r\n\r\n```\r\n(internship) rkarimi@vgnh008:/idiap/user/rkarimi/dev/debug-seq2seq/seq2seq$ python finetune_t5_trainer.py temp.json \r\n2020-12-12 15:38:16.234542: W tensorflow/stream_executor/platform/default/dso_loader.cc:60] Could not load dynamic library 'libcudart.so.11.0'; dlerror: libcudart.so.11.0: cannot open shared object file: No such file or directory\r\n2020-12-12 15:38:16.234598: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.\r\n12/12/2020 15:38:32 - WARNING - __main__ - Process rank: -1, device: cuda:0, n_gpu: 1, distributed training: False, 16-bits training: False\r\n12/12/2020 15:38:32 - INFO - __main__ - Training/evaluation parameters Seq2SeqTrainingArguments(output_dir='outputs/test', overwrite_output_dir=True, do_train=True, do_eval=True, do_predict=False, evaluate_during_training=False, evaluation_strategy=<EvaluationStrategy.NO: 'no'>, prediction_loss_only=False, per_device_train_batch_size=64, per_device_eval_batch_size=64, per_gpu_train_batch_size=None, per_gpu_eval_batch_size=None, gradient_accumulation_steps=1, eval_accumulation_steps=None, learning_rate=0.01, weight_decay=0.0, adam_beta1=0.9, adam_beta2=0.999, adam_epsilon=1e-08, max_grad_norm=1.0, num_train_epochs=2, max_steps=-1, warmup_steps=500, logging_dir='runs/Dec12_15-38-32_vgnh008', logging_first_step=True, logging_steps=200, save_steps=200, save_total_limit=1, no_cuda=False, seed=42, fp16=False, fp16_opt_level='O1', local_rank=-1, tpu_num_cores=None, tpu_metrics_debug=False, debug=False, dataloader_drop_last=False, eval_steps=200, dataloader_num_workers=0, past_index=-1, run_name='outputs/test', disable_tqdm=False, remove_unused_columns=True, label_names=None, load_best_model_at_end=False, metric_for_best_model=None, greater_is_better=None, label_smoothing=0.1, sortish_sampler=False, predict_with_generate=True, adafactor=False, encoder_layerdrop=None, decoder_layerdrop=None, dropout=None, attention_dropout=None, lr_scheduler='linear', fixed_length_emb=None, encoder_projection=None, encoder_pooling=None, projection_length=None, only_projection_bottleneck=False, concat_projection_token=False, gcs_bucket='ruse-xcloud-bucket', temperature=10, train_adapters=True, do_finetune=True, parametric_task_embedding=False, eval_output_dir='outputs/finetune-adapter/test-n-1-lr-1e-02-e-20')\r\nSome weights of T5ForConditionalGeneration were not initialized from the model checkpoint at t5-small and are newly initialized: ['encoder.block.0.layer.0.adapter_controller.meta_up_sampler.weight_generator.0.weight', 'encoder.block.0.layer.0.adapter_controller.meta_up_sampler.weight_generator.0.bias', 'encoder.block.0.layer.0.adapter_controller.meta_up_sampler.weight_generator.1.weight', 'encoder.block.0.layer.0.adapter_controller.meta_up_sampler.weight_generator.1.bias', 'encoder.block.0.layer.0.adapter_controller.meta_up_sampler.bias_generator.0.weight', 'encoder.block.0.layer.0.adapter_controller.meta_up_sampler.bias_generator.0.bias', 'encoder.block.0.layer.0.adapter_controller.meta_up_sampler.bias_generator.1.weight', 'encoder.block.0.layer.0.adapter_controller.meta_up_sampler.bias_generator.1.bias', 'encoder.block.0.layer.0.adapter_controller.meta_down_sampler.weight_generator.0.weight', 'encoder.block.0.layer.0.adapter_controller.meta_down_sampler.weight_generator.0.bias', 'encoder.block.0.layer.0.adapter_controller.meta_down_sampler.weight_generator.1.weight', 'encoder.block.0.layer.0.adapter_controller.meta_down_sampler.weight_generator.1.bias', 'encoder.block.0.layer.0.adapter_controller.meta_down_sampler.bias_generator.0.weight', 'encoder.block.0.layer.0.adapter_controller.meta_down_sampler.bias_generator.0.bias', 'encoder.block.0.layer.0.adapter_controller.meta_down_sampler.bias_generator.1.weight', 'encoder.block.0.layer.0.adapter_controller.meta_down_sampler.bias_generator.1.bias', 'encoder.block.0.layer.0.adapter_controller.post_layer_norm.weight', 'encoder.block.0.layer.0.adapter_controller.post_layer_norm.bias', 'encoder.block.0.layer.1.adapter_controller.meta_up_sampler.weight_generator.0.weight', 'encoder.block.0.layer.1.adapter_controller.meta_up_sampler.weight_generator.0.bias', 'encoder.block.0.layer.1.adapter_controller.meta_up_sampler.weight_generator.1.weight', 'encoder.block.0.layer.1.adapter_controller.meta_up_sampler.weight_generator.1.bias', 'encoder.block.0.layer.1.adapter_controller.meta_up_sampler.bias_generator.0.weight', 'encoder.block.0.layer.1.adapter_controller.meta_up_sampler.bias_generator.0.bias', 'encoder.block.0.layer.1.adapter_controller.meta_up_sampler.bias_generator.1.weight', 'encoder.block.0.layer.1.adapter_controller.meta_up_sampler.bias_generator.1.bias', 'encoder.block.0.layer.1.adapter_controller.meta_down_sampler.weight_generator.0.weight', 'encoder.block.0.layer.1.adapter_controller.meta_down_sampler.weight_generator.0.bias', 'encoder.block.0.layer.1.adapter_controller.meta_down_sampler.weight_generator.1.weight', 'encoder.block.0.layer.1.adapter_controller.meta_down_sampler.weight_generator.1.bias', 'encoder.block.0.layer.1.adapter_controller.meta_down_sampler.bias_generator.0.weight', 'encoder.block.0.layer.1.adapter_controller.meta_down_sampler.bias_generator.0.bias', 'encoder.block.0.layer.1.adapter_controller.meta_down_sampler.bias_generator.1.weight', 'encoder.block.0.layer.1.adapter_controller.meta_down_sampler.bias_generator.1.bias', 'encoder.block.0.layer.1.adapter_controller.post_layer_norm.weight', 'encoder.block.0.layer.1.adapter_controller.post_layer_norm.bias', 'encoder.block.1.layer.0.adapter_controller.meta_up_sampler.weight_generator.0.weight', 'encoder.block.1.layer.0.adapter_controller.meta_up_sampler.weight_generator.0.bias', 'encoder.block.1.layer.0.adapter_controller.meta_up_sampler.weight_generator.1.weight', 'encoder.block.1.layer.0.adapter_controller.meta_up_sampler.weight_generator.1.bias', 'encoder.block.1.layer.0.adapter_controller.meta_up_sampler.bias_generator.0.weight', 'encoder.block.1.layer.0.adapter_controller.meta_up_sampler.bias_generator.0.bias', 'encoder.block.1.layer.0.adapter_controller.meta_up_sampler.bias_generator.1.weight', 'encoder.block.1.layer.0.adapter_controller.meta_up_sampler.bias_generator.1.bias', 'encoder.block.1.layer.0.adapter_controller.meta_down_sampler.weight_generator.0.weight', 'encoder.block.1.layer.0.adapter_controller.meta_down_sampler.weight_generator.0.bias', 'encoder.block.1.layer.0.adapter_controller.meta_down_sampler.weight_generator.1.weight', 'encoder.block.1.layer.0.adapter_controller.meta_down_sampler.weight_generator.1.bias', 'encoder.block.1.layer.0.adapter_controller.meta_down_sampler.bias_generator.0.weight', 'encoder.block.1.layer.0.adapter_controller.meta_down_sampler.bias_generator.0.bias', 'encoder.block.1.layer.0.adapter_controller.meta_down_sampler.bias_generator.1.weight', 'encoder.block.1.layer.0.adapter_controller.meta_down_sampler.bias_generator.1.bias', 'encoder.block.1.layer.0.adapter_controller.post_layer_norm.weight', 'encoder.block.1.layer.0.adapter_controller.post_layer_norm.bias', 'encoder.block.1.layer.1.adapter_controller.meta_up_sampler.weight_generator.0.weight', 'encoder.block.1.layer.1.adapter_controller.meta_up_sampler.weight_generator.0.bias', 'encoder.block.1.layer.1.adapter_controller.meta_up_sampler.weight_generator.1.weight', 'encoder.block.1.layer.1.adapter_controller.meta_up_sampler.weight_generator.1.bias', 'encoder.block.1.layer.1.adapter_controller.meta_up_sampler.bias_generator.0.weight', 'encoder.block.1.layer.1.adapter_controller.meta_up_sampler.bias_generator.0.bias', 'encoder.block.1.layer.1.adapter_controller.meta_up_sampler.bias_generator.1.weight', 'encoder.block.1.layer.1.adapter_controller.meta_up_sampler.bias_generator.1.bias', 'encoder.block.1.layer.1.adapter_controller.meta_down_sampler.weight_generator.0.weight', 'encoder.block.1.layer.1.adapter_controller.meta_down_sampler.weight_generator.0.bias', 'encoder.block.1.layer.1.adapter_controller.meta_down_sampler.weight_generator.1.weight', 'encoder.block.1.layer.1.adapter_controller.meta_down_sampler.weight_generator.1.bias', 'encoder.block.1.layer.1.adapter_controller.meta_down_sampler.bias_generator.0.weight', 'encoder.block.1.layer.1.adapter_controller.meta_down_sampler.bias_generator.0.bias', 'encoder.block.1.layer.1.adapter_controller.meta_down_sampler.bias_generator.1.weight', 'encoder.block.1.layer.1.adapter_controller.meta_down_sampler.bias_generator.1.bias', 'encoder.block.1.layer.1.adapter_controller.post_layer_norm.weight', 'encoder.block.1.layer.1.adapter_controller.post_layer_norm.bias', 'encoder.block.2.layer.0.adapter_controller.meta_up_sampler.weight_generator.0.weight', 'encoder.block.2.layer.0.adapter_controller.meta_up_sampler.weight_generator.0.bias', 'encoder.block.2.layer.0.adapter_controller.meta_up_sampler.weight_generator.1.weight', 'encoder.block.2.layer.0.adapter_controller.meta_up_sampler.weight_generator.1.bias', 'encoder.block.2.layer.0.adapter_controller.meta_up_sampler.bias_generator.0.weight', 'encoder.block.2.layer.0.adapter_controller.meta_up_sampler.bias_generator.0.bias', 'encoder.block.2.layer.0.adapter_controller.meta_up_sampler.bias_generator.1.weight', 'encoder.block.2.layer.0.adapter_controller.meta_up_sampler.bias_generator.1.bias', 'encoder.block.2.layer.0.adapter_controller.meta_down_sampler.weight_generator.0.weight', 'encoder.block.2.layer.0.adapter_controller.meta_down_sampler.weight_generator.0.bias', 'encoder.block.2.layer.0.adapter_controller.meta_down_sampler.weight_generator.1.weight', 'encoder.block.2.layer.0.adapter_controller.meta_down_sampler.weight_generator.1.bias', 'encoder.block.2.layer.0.adapter_controller.meta_down_sampler.bias_generator.0.weight', 'encoder.block.2.layer.0.adapter_controller.meta_down_sampler.bias_generator.0.bias', 'encoder.block.2.layer.0.adapter_controller.meta_down_sampler.bias_generator.1.weight', 'encoder.block.2.layer.0.adapter_controller.meta_down_sampler.bias_generator.1.bias', 'encoder.block.2.layer.0.adapter_controller.post_layer_norm.weight', 'encoder.block.2.layer.0.adapter_controller.post_layer_norm.bias', 'encoder.block.2.layer.1.adapter_controller.meta_up_sampler.weight_generator.0.weight', 'encoder.block.2.layer.1.adapter_controller.meta_up_sampler.weight_generator.0.bias', 'encoder.block.2.layer.1.adapter_controller.meta_up_sampler.weight_generator.1.weight', 'encoder.block.2.layer.1.adapter_controller.meta_up_sampler.weight_generator.1.bias', 'encoder.block.2.layer.1.adapter_controller.meta_up_sampler.bias_generator.0.weight', 'encoder.block.2.layer.1.adapter_controller.meta_up_sampler.bias_generator.0.bias', 'encoder.block.2.layer.1.adapter_controller.meta_up_sampler.bias_generator.1.weight', 'encoder.block.2.layer.1.adapter_controller.meta_up_sampler.bias_generator.1.bias', 'encoder.block.2.layer.1.adapter_controller.meta_down_sampler.weight_generator.0.weight', 'encoder.block.2.layer.1.adapter_controller.meta_down_sampler.weight_generator.0.bias', 'encoder.block.2.layer.1.adapter_controller.meta_down_sampler.weight_generator.1.weight', 'encoder.block.2.layer.1.adapter_controller.meta_down_sampler.weight_generator.1.bias', 'encoder.block.2.layer.1.adapter_controller.meta_down_sampler.bias_generator.0.weight', 'encoder.block.2.layer.1.adapter_controller.meta_down_sampler.bias_generator.0.bias', 'encoder.block.2.layer.1.adapter_controller.meta_down_sampler.bias_generator.1.weight', 'encoder.block.2.layer.1.adapter_controller.meta_down_sampler.bias_generator.1.bias', 'encoder.block.2.layer.1.adapter_controller.post_layer_norm.weight', 'encoder.block.2.layer.1.adapter_controller.post_layer_norm.bias', 'encoder.block.3.layer.0.adapter_controller.meta_up_sampler.weight_generator.0.weight', 'encoder.block.3.layer.0.adapter_controller.meta_up_sampler.weight_generator.0.bias', 'encoder.block.3.layer.0.adapter_controller.meta_up_sampler.weight_generator.1.weight', 'encoder.block.3.layer.0.adapter_controller.meta_up_sampler.weight_generator.1.bias', 'encoder.block.3.layer.0.adapter_controller.meta_up_sampler.bias_generator.0.weight', 'encoder.block.3.layer.0.adapter_controller.meta_up_sampler.bias_generator.0.bias', 'encoder.block.3.layer.0.adapter_controller.meta_up_sampler.bias_generator.1.weight', 'encoder.block.3.layer.0.adapter_controller.meta_up_sampler.bias_generator.1.bias', 'encoder.block.3.layer.0.adapter_controller.meta_down_sampler.weight_generator.0.weight', 'encoder.block.3.layer.0.adapter_controller.meta_down_sampler.weight_generator.0.bias', 'encoder.block.3.layer.0.adapter_controller.meta_down_sampler.weight_generator.1.weight', 'encoder.block.3.layer.0.adapter_controller.meta_down_sampler.weight_generator.1.bias', 'encoder.block.3.layer.0.adapter_controller.meta_down_sampler.bias_generator.0.weight', 'encoder.block.3.layer.0.adapter_controller.meta_down_sampler.bias_generator.0.bias', 'encoder.block.3.layer.0.adapter_controller.meta_down_sampler.bias_generator.1.weight', 'encoder.block.3.layer.0.adapter_controller.meta_down_sampler.bias_generator.1.bias', 'encoder.block.3.layer.0.adapter_controller.post_layer_norm.weight', 'encoder.block.3.layer.0.adapter_controller.post_layer_norm.bias', 'encoder.block.3.layer.1.adapter_controller.meta_up_sampler.weight_generator.0.weight', 'encoder.block.3.layer.1.adapter_controller.meta_up_sampler.weight_generator.0.bias', 'encoder.block.3.layer.1.adapter_controller.meta_up_sampler.weight_generator.1.weight', 'encoder.block.3.layer.1.adapter_controller.meta_up_sampler.weight_generator.1.bias', 'encoder.block.3.layer.1.adapter_controller.meta_up_sampler.bias_generator.0.weight', 'encoder.block.3.layer.1.adapter_controller.meta_up_sampler.bias_generator.0.bias', 'encoder.block.3.layer.1.adapter_controller.meta_up_sampler.bias_generator.1.weight', 'encoder.block.3.layer.1.adapter_controller.meta_up_sampler.bias_generator.1.bias', 'encoder.block.3.layer.1.adapter_controller.meta_down_sampler.weight_generator.0.weight', 'encoder.block.3.layer.1.adapter_controller.meta_down_sampler.weight_generator.0.bias', 'encoder.block.3.layer.1.adapter_controller.meta_down_sampler.weight_generator.1.weight', 'encoder.block.3.layer.1.adapter_controller.meta_down_sampler.weight_generator.1.bias', 'encoder.block.3.layer.1.adapter_controller.meta_down_sampler.bias_generator.0.weight', 'encoder.block.3.layer.1.adapter_controller.meta_down_sampler.bias_generator.0.bias', 'encoder.block.3.layer.1.adapter_controller.meta_down_sampler.bias_generator.1.weight', 'encoder.block.3.layer.1.adapter_controller.meta_down_sampler.bias_generator.1.bias', 'encoder.block.3.layer.1.adapter_controller.post_layer_norm.weight', 'encoder.block.3.layer.1.adapter_controller.post_layer_norm.bias', 'encoder.block.4.layer.0.adapter_controller.meta_up_sampler.weight_generator.0.weight', 'encoder.block.4.layer.0.adapter_controller.meta_up_sampler.weight_generator.0.bias', 'encoder.block.4.layer.0.adapter_controller.meta_up_sampler.weight_generator.1.weight', 'encoder.block.4.layer.0.adapter_controller.meta_up_sampler.weight_generator.1.bias', 'encoder.block.4.layer.0.adapter_controller.meta_up_sampler.bias_generator.0.weight', 'encoder.block.4.layer.0.adapter_controller.meta_up_sampler.bias_generator.0.bias', 'encoder.block.4.layer.0.adapter_controller.meta_up_sampler.bias_generator.1.weight', 'encoder.block.4.layer.0.adapter_controller.meta_up_sampler.bias_generator.1.bias', 'encoder.block.4.layer.0.adapter_controller.meta_down_sampler.weight_generator.0.weight', 'encoder.block.4.layer.0.adapter_controller.meta_down_sampler.weight_generator.0.bias', 'encoder.block.4.layer.0.adapter_controller.meta_down_sampler.weight_generator.1.weight', 'encoder.block.4.layer.0.adapter_controller.meta_down_sampler.weight_generator.1.bias', 'encoder.block.4.layer.0.adapter_controller.meta_down_sampler.bias_generator.0.weight', 'encoder.block.4.layer.0.adapter_controller.meta_down_sampler.bias_generator.0.bias', 'encoder.block.4.layer.0.adapter_controller.meta_down_sampler.bias_generator.1.weight', 'encoder.block.4.layer.0.adapter_controller.meta_down_sampler.bias_generator.1.bias', 'encoder.block.4.layer.0.adapter_controller.post_layer_norm.weight', 'encoder.block.4.layer.0.adapter_controller.post_layer_norm.bias', 'encoder.block.4.layer.1.adapter_controller.meta_up_sampler.weight_generator.0.weight', 'encoder.block.4.layer.1.adapter_controller.meta_up_sampler.weight_generator.0.bias', 'encoder.block.4.layer.1.adapter_controller.meta_up_sampler.weight_generator.1.weight', 'encoder.block.4.layer.1.adapter_controller.meta_up_sampler.weight_generator.1.bias', 'encoder.block.4.layer.1.adapter_controller.meta_up_sampler.bias_generator.0.weight', 'encoder.block.4.layer.1.adapter_controller.meta_up_sampler.bias_generator.0.bias', 'encoder.block.4.layer.1.adapter_controller.meta_up_sampler.bias_generator.1.weight', 'encoder.block.4.layer.1.adapter_controller.meta_up_sampler.bias_generator.1.bias', 'encoder.block.4.layer.1.adapter_controller.meta_down_sampler.weight_generator.0.weight', 'encoder.block.4.layer.1.adapter_controller.meta_down_sampler.weight_generator.0.bias', 'encoder.block.4.layer.1.adapter_controller.meta_down_sampler.weight_generator.1.weight', 'encoder.block.4.layer.1.adapter_controller.meta_down_sampler.weight_generator.1.bias', 'encoder.block.4.layer.1.adapter_controller.meta_down_sampler.bias_generator.0.weight', 'encoder.block.4.layer.1.adapter_controller.meta_down_sampler.bias_generator.0.bias', 'encoder.block.4.layer.1.adapter_controller.meta_down_sampler.bias_generator.1.weight', 'encoder.block.4.layer.1.adapter_controller.meta_down_sampler.bias_generator.1.bias', 'encoder.block.4.layer.1.adapter_controller.post_layer_norm.weight', 'encoder.block.4.layer.1.adapter_controller.post_layer_norm.bias', 'encoder.block.5.layer.0.adapter_controller.meta_up_sampler.weight_generator.0.weight', 'encoder.block.5.layer.0.adapter_controller.meta_up_sampler.weight_generator.0.bias', 'encoder.block.5.layer.0.adapter_controller.meta_up_sampler.weight_generator.1.weight', 'encoder.block.5.layer.0.adapter_controller.meta_up_sampler.weight_generator.1.bias', 'encoder.block.5.layer.0.adapter_controller.meta_up_sampler.bias_generator.0.weight', 'encoder.block.5.layer.0.adapter_controller.meta_up_sampler.bias_generator.0.bias', 'encoder.block.5.layer.0.adapter_controller.meta_up_sampler.bias_generator.1.weight', 'encoder.block.5.layer.0.adapter_controller.meta_up_sampler.bias_generator.1.bias', 'encoder.block.5.layer.0.adapter_controller.meta_down_sampler.weight_generator.0.weight', 'encoder.block.5.layer.0.adapter_controller.meta_down_sampler.weight_generator.0.bias', 'encoder.block.5.layer.0.adapter_controller.meta_down_sampler.weight_generator.1.weight', 'encoder.block.5.layer.0.adapter_controller.meta_down_sampler.weight_generator.1.bias', 'encoder.block.5.layer.0.adapter_controller.meta_down_sampler.bias_generator.0.weight', 'encoder.block.5.layer.0.adapter_controller.meta_down_sampler.bias_generator.0.bias', 'encoder.block.5.layer.0.adapter_controller.meta_down_sampler.bias_generator.1.weight', 'encoder.block.5.layer.0.adapter_controller.meta_down_sampler.bias_generator.1.bias', 'encoder.block.5.layer.0.adapter_controller.post_layer_norm.weight', 'encoder.block.5.layer.0.adapter_controller.post_layer_norm.bias', 'encoder.block.5.layer.1.adapter_controller.meta_up_sampler.weight_generator.0.weight', 'encoder.block.5.layer.1.adapter_controller.meta_up_sampler.weight_generator.0.bias', 'encoder.block.5.layer.1.adapter_controller.meta_up_sampler.weight_generator.1.weight', 'encoder.block.5.layer.1.adapter_controller.meta_up_sampler.weight_generator.1.bias', 'encoder.block.5.layer.1.adapter_controller.meta_up_sampler.bias_generator.0.weight', 'encoder.block.5.layer.1.adapter_controller.meta_up_sampler.bias_generator.0.bias', 'encoder.block.5.layer.1.adapter_controller.meta_up_sampler.bias_generator.1.weight', 'encoder.block.5.layer.1.adapter_controller.meta_up_sampler.bias_generator.1.bias', 'encoder.block.5.layer.1.adapter_controller.meta_down_sampler.weight_generator.0.weight', 'encoder.block.5.layer.1.adapter_controller.meta_down_sampler.weight_generator.0.bias', 'encoder.block.5.layer.1.adapter_controller.meta_down_sampler.weight_generator.1.weight', 'encoder.block.5.layer.1.adapter_controller.meta_down_sampler.weight_generator.1.bias', 'encoder.block.5.layer.1.adapter_controller.meta_down_sampler.bias_generator.0.weight', 'encoder.block.5.layer.1.adapter_controller.meta_down_sampler.bias_generator.0.bias', 'encoder.block.5.layer.1.adapter_controller.meta_down_sampler.bias_generator.1.weight', 'encoder.block.5.layer.1.adapter_controller.meta_down_sampler.bias_generator.1.bias', 'encoder.block.5.layer.1.adapter_controller.post_layer_norm.weight', 'encoder.block.5.layer.1.adapter_controller.post_layer_norm.bias', 'decoder.block.0.layer.0.adapter_controller.meta_up_sampler.weight_generator.0.weight', 'decoder.block.0.layer.0.adapter_controller.meta_up_sampler.weight_generator.0.bias', 'decoder.block.0.layer.0.adapter_controller.meta_up_sampler.weight_generator.1.weight', 'decoder.block.0.layer.0.adapter_controller.meta_up_sampler.weight_generator.1.bias', 'decoder.block.0.layer.0.adapter_controller.meta_up_sampler.bias_generator.0.weight', 'decoder.block.0.layer.0.adapter_controller.meta_up_sampler.bias_generator.0.bias', 'decoder.block.0.layer.0.adapter_controller.meta_up_sampler.bias_generator.1.weight', 'decoder.block.0.layer.0.adapter_controller.meta_up_sampler.bias_generator.1.bias', 'decoder.block.0.layer.0.adapter_controller.meta_down_sampler.weight_generator.0.weight', 'decoder.block.0.layer.0.adapter_controller.meta_down_sampler.weight_generator.0.bias', 'decoder.block.0.layer.0.adapter_controller.meta_down_sampler.weight_generator.1.weight', 'decoder.block.0.layer.0.adapter_controller.meta_down_sampler.weight_generator.1.bias', 'decoder.block.0.layer.0.adapter_controller.meta_down_sampler.bias_generator.0.weight', 'decoder.block.0.layer.0.adapter_controller.meta_down_sampler.bias_generator.0.bias', 'decoder.block.0.layer.0.adapter_controller.meta_down_sampler.bias_generator.1.weight', 'decoder.block.0.layer.0.adapter_controller.meta_down_sampler.bias_generator.1.bias', 'decoder.block.0.layer.0.adapter_controller.post_layer_norm.weight', 'decoder.block.0.layer.0.adapter_controller.post_layer_norm.bias', 'decoder.block.0.layer.2.adapter_controller.meta_up_sampler.weight_generator.0.weight', 'decoder.block.0.layer.2.adapter_controller.meta_up_sampler.weight_generator.0.bias', 'decoder.block.0.layer.2.adapter_controller.meta_up_sampler.weight_generator.1.weight', 'decoder.block.0.layer.2.adapter_controller.meta_up_sampler.weight_generator.1.bias', 'decoder.block.0.layer.2.adapter_controller.meta_up_sampler.bias_generator.0.weight', 'decoder.block.0.layer.2.adapter_controller.meta_up_sampler.bias_generator.0.bias', 'decoder.block.0.layer.2.adapter_controller.meta_up_sampler.bias_generator.1.weight', 'decoder.block.0.layer.2.adapter_controller.meta_up_sampler.bias_generator.1.bias', 'decoder.block.0.layer.2.adapter_controller.meta_down_sampler.weight_generator.0.weight', 'decoder.block.0.layer.2.adapter_controller.meta_down_sampler.weight_generator.0.bias', 'decoder.block.0.layer.2.adapter_controller.meta_down_sampler.weight_generator.1.weight', 'decoder.block.0.layer.2.adapter_controller.meta_down_sampler.weight_generator.1.bias', 'decoder.block.0.layer.2.adapter_controller.meta_down_sampler.bias_generator.0.weight', 'decoder.block.0.layer.2.adapter_controller.meta_down_sampler.bias_generator.0.bias', 'decoder.block.0.layer.2.adapter_controller.meta_down_sampler.bias_generator.1.weight', 'decoder.block.0.layer.2.adapter_controller.meta_down_sampler.bias_generator.1.bias', 'decoder.block.0.layer.2.adapter_controller.post_layer_norm.weight', 'decoder.block.0.layer.2.adapter_controller.post_layer_norm.bias', 'decoder.block.1.layer.0.adapter_controller.meta_up_sampler.weight_generator.0.weight', 'decoder.block.1.layer.0.adapter_controller.meta_up_sampler.weight_generator.0.bias', 'decoder.block.1.layer.0.adapter_controller.meta_up_sampler.weight_generator.1.weight', 'decoder.block.1.layer.0.adapter_controller.meta_up_sampler.weight_generator.1.bias', 'decoder.block.1.layer.0.adapter_controller.meta_up_sampler.bias_generator.0.weight', 'decoder.block.1.layer.0.adapter_controller.meta_up_sampler.bias_generator.0.bias', 'decoder.block.1.layer.0.adapter_controller.meta_up_sampler.bias_generator.1.weight', 'decoder.block.1.layer.0.adapter_controller.meta_up_sampler.bias_generator.1.bias', 'decoder.block.1.layer.0.adapter_controller.meta_down_sampler.weight_generator.0.weight', 'decoder.block.1.layer.0.adapter_controller.meta_down_sampler.weight_generator.0.bias', 'decoder.block.1.layer.0.adapter_controller.meta_down_sampler.weight_generator.1.weight', 'decoder.block.1.layer.0.adapter_controller.meta_down_sampler.weight_generator.1.bias', 'decoder.block.1.layer.0.adapter_controller.meta_down_sampler.bias_generator.0.weight', 'decoder.block.1.layer.0.adapter_controller.meta_down_sampler.bias_generator.0.bias', 'decoder.block.1.layer.0.adapter_controller.meta_down_sampler.bias_generator.1.weight', 'decoder.block.1.layer.0.adapter_controller.meta_down_sampler.bias_generator.1.bias', 'decoder.block.1.layer.0.adapter_controller.post_layer_norm.weight', 'decoder.block.1.layer.0.adapter_controller.post_layer_norm.bias', 'decoder.block.1.layer.2.adapter_controller.meta_up_sampler.weight_generator.0.weight', 'decoder.block.1.layer.2.adapter_controller.meta_up_sampler.weight_generator.0.bias', 'decoder.block.1.layer.2.adapter_controller.meta_up_sampler.weight_generator.1.weight', 'decoder.block.1.layer.2.adapter_controller.meta_up_sampler.weight_generator.1.bias', 'decoder.block.1.layer.2.adapter_controller.meta_up_sampler.bias_generator.0.weight', 'decoder.block.1.layer.2.adapter_controller.meta_up_sampler.bias_generator.0.bias', 'decoder.block.1.layer.2.adapter_controller.meta_up_sampler.bias_generator.1.weight', 'decoder.block.1.layer.2.adapter_controller.meta_up_sampler.bias_generator.1.bias', 'decoder.block.1.layer.2.adapter_controller.meta_down_sampler.weight_generator.0.weight', 'decoder.block.1.layer.2.adapter_controller.meta_down_sampler.weight_generator.0.bias', 'decoder.block.1.layer.2.adapter_controller.meta_down_sampler.weight_generator.1.weight', 'decoder.block.1.layer.2.adapter_controller.meta_down_sampler.weight_generator.1.bias', 'decoder.block.1.layer.2.adapter_controller.meta_down_sampler.bias_generator.0.weight', 'decoder.block.1.layer.2.adapter_controller.meta_down_sampler.bias_generator.0.bias', 'decoder.block.1.layer.2.adapter_controller.meta_down_sampler.bias_generator.1.weight', 'decoder.block.1.layer.2.adapter_controller.meta_down_sampler.bias_generator.1.bias', 'decoder.block.1.layer.2.adapter_controller.post_layer_norm.weight', 'decoder.block.1.layer.2.adapter_controller.post_layer_norm.bias', 'decoder.block.2.layer.0.adapter_controller.meta_up_sampler.weight_generator.0.weight', 'decoder.block.2.layer.0.adapter_controller.meta_up_sampler.weight_generator.0.bias', 'decoder.block.2.layer.0.adapter_controller.meta_up_sampler.weight_generator.1.weight', 'decoder.block.2.layer.0.adapter_controller.meta_up_sampler.weight_generator.1.bias', 'decoder.block.2.layer.0.adapter_controller.meta_up_sampler.bias_generator.0.weight', 'decoder.block.2.layer.0.adapter_controller.meta_up_sampler.bias_generator.0.bias', 'decoder.block.2.layer.0.adapter_controller.meta_up_sampler.bias_generator.1.weight', 'decoder.block.2.layer.0.adapter_controller.meta_up_sampler.bias_generator.1.bias', 'decoder.block.2.layer.0.adapter_controller.meta_down_sampler.weight_generator.0.weight', 'decoder.block.2.layer.0.adapter_controller.meta_down_sampler.weight_generator.0.bias', 'decoder.block.2.layer.0.adapter_controller.meta_down_sampler.weight_generator.1.weight', 'decoder.block.2.layer.0.adapter_controller.meta_down_sampler.weight_generator.1.bias', 'decoder.block.2.layer.0.adapter_controller.meta_down_sampler.bias_generator.0.weight', 'decoder.block.2.layer.0.adapter_controller.meta_down_sampler.bias_generator.0.bias', 'decoder.block.2.layer.0.adapter_controller.meta_down_sampler.bias_generator.1.weight', 'decoder.block.2.layer.0.adapter_controller.meta_down_sampler.bias_generator.1.bias', 'decoder.block.2.layer.0.adapter_controller.post_layer_norm.weight', 'decoder.block.2.layer.0.adapter_controller.post_layer_norm.bias', 'decoder.block.2.layer.2.adapter_controller.meta_up_sampler.weight_generator.0.weight', 'decoder.block.2.layer.2.adapter_controller.meta_up_sampler.weight_generator.0.bias', 'decoder.block.2.layer.2.adapter_controller.meta_up_sampler.weight_generator.1.weight', 'decoder.block.2.layer.2.adapter_controller.meta_up_sampler.weight_generator.1.bias', 'decoder.block.2.layer.2.adapter_controller.meta_up_sampler.bias_generator.0.weight', 'decoder.block.2.layer.2.adapter_controller.meta_up_sampler.bias_generator.0.bias', 'decoder.block.2.layer.2.adapter_controller.meta_up_sampler.bias_generator.1.weight', 'decoder.block.2.layer.2.adapter_controller.meta_up_sampler.bias_generator.1.bias', 'decoder.block.2.layer.2.adapter_controller.meta_down_sampler.weight_generator.0.weight', 'decoder.block.2.layer.2.adapter_controller.meta_down_sampler.weight_generator.0.bias', 'decoder.block.2.layer.2.adapter_controller.meta_down_sampler.weight_generator.1.weight', 'decoder.block.2.layer.2.adapter_controller.meta_down_sampler.weight_generator.1.bias', 'decoder.block.2.layer.2.adapter_controller.meta_down_sampler.bias_generator.0.weight', 'decoder.block.2.layer.2.adapter_controller.meta_down_sampler.bias_generator.0.bias', 'decoder.block.2.layer.2.adapter_controller.meta_down_sampler.bias_generator.1.weight', 'decoder.block.2.layer.2.adapter_controller.meta_down_sampler.bias_generator.1.bias', 'decoder.block.2.layer.2.adapter_controller.post_layer_norm.weight', 'decoder.block.2.layer.2.adapter_controller.post_layer_norm.bias', 'decoder.block.3.layer.0.adapter_controller.meta_up_sampler.weight_generator.0.weight', 'decoder.block.3.layer.0.adapter_controller.meta_up_sampler.weight_generator.0.bias', 'decoder.block.3.layer.0.adapter_controller.meta_up_sampler.weight_generator.1.weight', 'decoder.block.3.layer.0.adapter_controller.meta_up_sampler.weight_generator.1.bias', 'decoder.block.3.layer.0.adapter_controller.meta_up_sampler.bias_generator.0.weight', 'decoder.block.3.layer.0.adapter_controller.meta_up_sampler.bias_generator.0.bias', 'decoder.block.3.layer.0.adapter_controller.meta_up_sampler.bias_generator.1.weight', 'decoder.block.3.layer.0.adapter_controller.meta_up_sampler.bias_generator.1.bias', 'decoder.block.3.layer.0.adapter_controller.meta_down_sampler.weight_generator.0.weight', 'decoder.block.3.layer.0.adapter_controller.meta_down_sampler.weight_generator.0.bias', 'decoder.block.3.layer.0.adapter_controller.meta_down_sampler.weight_generator.1.weight', 'decoder.block.3.layer.0.adapter_controller.meta_down_sampler.weight_generator.1.bias', 'decoder.block.3.layer.0.adapter_controller.meta_down_sampler.bias_generator.0.weight', 'decoder.block.3.layer.0.adapter_controller.meta_down_sampler.bias_generator.0.bias', 'decoder.block.3.layer.0.adapter_controller.meta_down_sampler.bias_generator.1.weight', 'decoder.block.3.layer.0.adapter_controller.meta_down_sampler.bias_generator.1.bias', 'decoder.block.3.layer.0.adapter_controller.post_layer_norm.weight', 'decoder.block.3.layer.0.adapter_controller.post_layer_norm.bias', 'decoder.block.3.layer.2.adapter_controller.meta_up_sampler.weight_generator.0.weight', 'decoder.block.3.layer.2.adapter_controller.meta_up_sampler.weight_generator.0.bias', 'decoder.block.3.layer.2.adapter_controller.meta_up_sampler.weight_generator.1.weight', 'decoder.block.3.layer.2.adapter_controller.meta_up_sampler.weight_generator.1.bias', 'decoder.block.3.layer.2.adapter_controller.meta_up_sampler.bias_generator.0.weight', 'decoder.block.3.layer.2.adapter_controller.meta_up_sampler.bias_generator.0.bias', 'decoder.block.3.layer.2.adapter_controller.meta_up_sampler.bias_generator.1.weight', 'decoder.block.3.layer.2.adapter_controller.meta_up_sampler.bias_generator.1.bias', 'decoder.block.3.layer.2.adapter_controller.meta_down_sampler.weight_generator.0.weight', 'decoder.block.3.layer.2.adapter_controller.meta_down_sampler.weight_generator.0.bias', 'decoder.block.3.layer.2.adapter_controller.meta_down_sampler.weight_generator.1.weight', 'decoder.block.3.layer.2.adapter_controller.meta_down_sampler.weight_generator.1.bias', 'decoder.block.3.layer.2.adapter_controller.meta_down_sampler.bias_generator.0.weight', 'decoder.block.3.layer.2.adapter_controller.meta_down_sampler.bias_generator.0.bias', 'decoder.block.3.layer.2.adapter_controller.meta_down_sampler.bias_generator.1.weight', 'decoder.block.3.layer.2.adapter_controller.meta_down_sampler.bias_generator.1.bias', 'decoder.block.3.layer.2.adapter_controller.post_layer_norm.weight', 'decoder.block.3.layer.2.adapter_controller.post_layer_norm.bias', 'decoder.block.4.layer.0.adapter_controller.meta_up_sampler.weight_generator.0.weight', 'decoder.block.4.layer.0.adapter_controller.meta_up_sampler.weight_generator.0.bias', 'decoder.block.4.layer.0.adapter_controller.meta_up_sampler.weight_generator.1.weight', 'decoder.block.4.layer.0.adapter_controller.meta_up_sampler.weight_generator.1.bias', 'decoder.block.4.layer.0.adapter_controller.meta_up_sampler.bias_generator.0.weight', 'decoder.block.4.layer.0.adapter_controller.meta_up_sampler.bias_generator.0.bias', 'decoder.block.4.layer.0.adapter_controller.meta_up_sampler.bias_generator.1.weight', 'decoder.block.4.layer.0.adapter_controller.meta_up_sampler.bias_generator.1.bias', 'decoder.block.4.layer.0.adapter_controller.meta_down_sampler.weight_generator.0.weight', 'decoder.block.4.layer.0.adapter_controller.meta_down_sampler.weight_generator.0.bias', 'decoder.block.4.layer.0.adapter_controller.meta_down_sampler.weight_generator.1.weight', 'decoder.block.4.layer.0.adapter_controller.meta_down_sampler.weight_generator.1.bias', 'decoder.block.4.layer.0.adapter_controller.meta_down_sampler.bias_generator.0.weight', 'decoder.block.4.layer.0.adapter_controller.meta_down_sampler.bias_generator.0.bias', 'decoder.block.4.layer.0.adapter_controller.meta_down_sampler.bias_generator.1.weight', 'decoder.block.4.layer.0.adapter_controller.meta_down_sampler.bias_generator.1.bias', 'decoder.block.4.layer.0.adapter_controller.post_layer_norm.weight', 'decoder.block.4.layer.0.adapter_controller.post_layer_norm.bias', 'decoder.block.4.layer.2.adapter_controller.meta_up_sampler.weight_generator.0.weight', 'decoder.block.4.layer.2.adapter_controller.meta_up_sampler.weight_generator.0.bias', 'decoder.block.4.layer.2.adapter_controller.meta_up_sampler.weight_generator.1.weight', 'decoder.block.4.layer.2.adapter_controller.meta_up_sampler.weight_generator.1.bias', 'decoder.block.4.layer.2.adapter_controller.meta_up_sampler.bias_generator.0.weight', 'decoder.block.4.layer.2.adapter_controller.meta_up_sampler.bias_generator.0.bias', 'decoder.block.4.layer.2.adapter_controller.meta_up_sampler.bias_generator.1.weight', 'decoder.block.4.layer.2.adapter_controller.meta_up_sampler.bias_generator.1.bias', 'decoder.block.4.layer.2.adapter_controller.meta_down_sampler.weight_generator.0.weight', 'decoder.block.4.layer.2.adapter_controller.meta_down_sampler.weight_generator.0.bias', 'decoder.block.4.layer.2.adapter_controller.meta_down_sampler.weight_generator.1.weight', 'decoder.block.4.layer.2.adapter_controller.meta_down_sampler.weight_generator.1.bias', 'decoder.block.4.layer.2.adapter_controller.meta_down_sampler.bias_generator.0.weight', 'decoder.block.4.layer.2.adapter_controller.meta_down_sampler.bias_generator.0.bias', 'decoder.block.4.layer.2.adapter_controller.meta_down_sampler.bias_generator.1.weight', 'decoder.block.4.layer.2.adapter_controller.meta_down_sampler.bias_generator.1.bias', 'decoder.block.4.layer.2.adapter_controller.post_layer_norm.weight', 'decoder.block.4.layer.2.adapter_controller.post_layer_norm.bias', 'decoder.block.5.layer.0.adapter_controller.meta_up_sampler.weight_generator.0.weight', 'decoder.block.5.layer.0.adapter_controller.meta_up_sampler.weight_generator.0.bias', 'decoder.block.5.layer.0.adapter_controller.meta_up_sampler.weight_generator.1.weight', 'decoder.block.5.layer.0.adapter_controller.meta_up_sampler.weight_generator.1.bias', 'decoder.block.5.layer.0.adapter_controller.meta_up_sampler.bias_generator.0.weight', 'decoder.block.5.layer.0.adapter_controller.meta_up_sampler.bias_generator.0.bias', 'decoder.block.5.layer.0.adapter_controller.meta_up_sampler.bias_generator.1.weight', 'decoder.block.5.layer.0.adapter_controller.meta_up_sampler.bias_generator.1.bias', 'decoder.block.5.layer.0.adapter_controller.meta_down_sampler.weight_generator.0.weight', 'decoder.block.5.layer.0.adapter_controller.meta_down_sampler.weight_generator.0.bias', 'decoder.block.5.layer.0.adapter_controller.meta_down_sampler.weight_generator.1.weight', 'decoder.block.5.layer.0.adapter_controller.meta_down_sampler.weight_generator.1.bias', 'decoder.block.5.layer.0.adapter_controller.meta_down_sampler.bias_generator.0.weight', 'decoder.block.5.layer.0.adapter_controller.meta_down_sampler.bias_generator.0.bias', 'decoder.block.5.layer.0.adapter_controller.meta_down_sampler.bias_generator.1.weight', 'decoder.block.5.layer.0.adapter_controller.meta_down_sampler.bias_generator.1.bias', 'decoder.block.5.layer.0.adapter_controller.post_layer_norm.weight', 'decoder.block.5.layer.0.adapter_controller.post_layer_norm.bias', 'decoder.block.5.layer.2.adapter_controller.meta_up_sampler.weight_generator.0.weight', 'decoder.block.5.layer.2.adapter_controller.meta_up_sampler.weight_generator.0.bias', 'decoder.block.5.layer.2.adapter_controller.meta_up_sampler.weight_generator.1.weight', 'decoder.block.5.layer.2.adapter_controller.meta_up_sampler.weight_generator.1.bias', 'decoder.block.5.layer.2.adapter_controller.meta_up_sampler.bias_generator.0.weight', 'decoder.block.5.layer.2.adapter_controller.meta_up_sampler.bias_generator.0.bias', 'decoder.block.5.layer.2.adapter_controller.meta_up_sampler.bias_generator.1.weight', 'decoder.block.5.layer.2.adapter_controller.meta_up_sampler.bias_generator.1.bias', 'decoder.block.5.layer.2.adapter_controller.meta_down_sampler.weight_generator.0.weight', 'decoder.block.5.layer.2.adapter_controller.meta_down_sampler.weight_generator.0.bias', 'decoder.block.5.layer.2.adapter_controller.meta_down_sampler.weight_generator.1.weight', 'decoder.block.5.layer.2.adapter_controller.meta_down_sampler.weight_generator.1.bias', 'decoder.block.5.layer.2.adapter_controller.meta_down_sampler.bias_generator.0.weight', 'decoder.block.5.layer.2.adapter_controller.meta_down_sampler.bias_generator.0.bias', 'decoder.block.5.layer.2.adapter_controller.meta_down_sampler.bias_generator.1.weight', 'decoder.block.5.layer.2.adapter_controller.meta_down_sampler.bias_generator.1.bias', 'decoder.block.5.layer.2.adapter_controller.post_layer_norm.weight', 'decoder.block.5.layer.2.adapter_controller.post_layer_norm.bias']\r\nYou should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\r\ncahce dir /idiap/temp/rkarimi/cache_home_1/datasets\r\ncahce dir /idiap/temp/rkarimi/cache_home_1/datasets\r\n12/12/2020 15:38:44 - INFO - filelock - Lock 140079090376272 acquired on /idiap/home/rkarimi/.cache/huggingface/datasets/4c7b1146606607c193d1ef601d8d0c134521b2ac59f61ee98c09119be925ee16.7ad892de9d7f1b4f9dfc598ef31e4a398a7224176bc9a3110e0e2075ff943e8f.py.lock\r\n12/12/2020 15:38:44 - INFO - filelock - Lock 140079090376272 released on /idiap/home/rkarimi/.cache/huggingface/datasets/4c7b1146606607c193d1ef601d8d0c134521b2ac59f61ee98c09119be925ee16.7ad892de9d7f1b4f9dfc598ef31e4a398a7224176bc9a3110e0e2075ff943e8f.py.lock\r\nUsing custom data configuration default\r\n12/12/2020 15:38:44 - INFO - filelock - Lock 140082549312272 acquired on /idiap/temp/rkarimi/cache_home_1/datasets/_idiap_temp_rkarimi_cache_home_1_datasets_boolq_default_0.1.0_1fcfdc6f36dc89a2245ffbbd5248ab33890594b50396731ebc78411bdd2ca534.lock\r\n12/12/2020 15:38:44 - INFO - filelock - Lock 140082549312272 released on /idiap/temp/rkarimi/cache_home_1/datasets/_idiap_temp_rkarimi_cache_home_1_datasets_boolq_default_0.1.0_1fcfdc6f36dc89a2245ffbbd5248ab33890594b50396731ebc78411bdd2ca534.lock\r\n12/12/2020 15:38:44 - INFO - filelock - Lock 140082549365648 acquired on /idiap/temp/rkarimi/cache_home_1/datasets/_idiap_temp_rkarimi_cache_home_1_datasets_boolq_default_0.1.0_1fcfdc6f36dc89a2245ffbbd5248ab33890594b50396731ebc78411bdd2ca534.lock\r\nReusing dataset boolq (/idiap/temp/rkarimi/cache_home_1/datasets/boolq/default/0.1.0/1fcfdc6f36dc89a2245ffbbd5248ab33890594b50396731ebc78411bdd2ca534)\r\n12/12/2020 15:38:44 - INFO - filelock - Lock 140082549365648 released on /idiap/temp/rkarimi/cache_home_1/datasets/_idiap_temp_rkarimi_cache_home_1_datasets_boolq_default_0.1.0_1fcfdc6f36dc89a2245ffbbd5248ab33890594b50396731ebc78411bdd2ca534.lock\r\nLoading cached processed dataset at /idiap/temp/rkarimi/cache_home_1/datasets/boolq/default/0.1.0/1fcfdc6f36dc89a2245ffbbd5248ab33890594b50396731ebc78411bdd2ca534/cache-6810ece2a440c3be.arrow\r\ncahce dir /idiap/temp/rkarimi/cache_home_1/datasets\r\ncahce dir /idiap/temp/rkarimi/cache_home_1/datasets\r\n12/12/2020 15:38:45 - INFO - filelock - Lock 140082549560848 acquired on /idiap/home/rkarimi/.cache/huggingface/datasets/4c7b1146606607c193d1ef601d8d0c134521b2ac59f61ee98c09119be925ee16.7ad892de9d7f1b4f9dfc598ef31e4a398a7224176bc9a3110e0e2075ff943e8f.py.lock\r\n12/12/2020 15:38:45 - INFO - filelock - Lock 140082549560848 released on /idiap/home/rkarimi/.cache/huggingface/datasets/4c7b1146606607c193d1ef601d8d0c134521b2ac59f61ee98c09119be925ee16.7ad892de9d7f1b4f9dfc598ef31e4a398a7224176bc9a3110e0e2075ff943e8f.py.lock\r\nUsing custom data configuration default\r\n12/12/2020 15:38:45 - INFO - filelock - Lock 140082549560848 acquired on /idiap/temp/rkarimi/cache_home_1/datasets/_idiap_temp_rkarimi_cache_home_1_datasets_boolq_default_0.1.0_1fcfdc6f36dc89a2245ffbbd5248ab33890594b50396731ebc78411bdd2ca534.lock\r\n12/12/2020 15:38:45 - INFO - filelock - Lock 140082549560848 released on /idiap/temp/rkarimi/cache_home_1/datasets/_idiap_temp_rkarimi_cache_home_1_datasets_boolq_default_0.1.0_1fcfdc6f36dc89a2245ffbbd5248ab33890594b50396731ebc78411bdd2ca534.lock\r\n12/12/2020 15:38:45 - INFO - filelock - Lock 140082549365200 acquired on /idiap/temp/rkarimi/cache_home_1/datasets/_idiap_temp_rkarimi_cache_home_1_datasets_boolq_default_0.1.0_1fcfdc6f36dc89a2245ffbbd5248ab33890594b50396731ebc78411bdd2ca534.lock\r\nReusing dataset boolq (/idiap/temp/rkarimi/cache_home_1/datasets/boolq/default/0.1.0/1fcfdc6f36dc89a2245ffbbd5248ab33890594b50396731ebc78411bdd2ca534)\r\n12/12/2020 15:38:45 - INFO - filelock - Lock 140082549365200 released on /idiap/temp/rkarimi/cache_home_1/datasets/_idiap_temp_rkarimi_cache_home_1_datasets_boolq_default_0.1.0_1fcfdc6f36dc89a2245ffbbd5248ab33890594b50396731ebc78411bdd2ca534.lock\r\nLoading cached processed dataset at /idiap/temp/rkarimi/cache_home_1/datasets/boolq/default/0.1.0/1fcfdc6f36dc89a2245ffbbd5248ab33890594b50396731ebc78411bdd2ca534/cache-9a2822394a3a4e34.arrow\r\n12/12/2020 15:38:45 - INFO - seq2seq.metrics.metrics - selected metric <function build_compute_metrics_fn.<locals>.classification_metrics at 0x7f66b464cc20> for task boolq\r\n12/12/2020 15:38:45 - INFO - seq2seq.trainers.trainer - ***** Running training *****\r\n12/12/2020 15:38:45 - INFO - seq2seq.trainers.trainer - Num examples = 10\r\n12/12/2020 15:38:45 - INFO - seq2seq.trainers.trainer - Num Epochs = 2\r\n12/12/2020 15:38:45 - INFO - seq2seq.trainers.trainer - Instantaneous batch size per device = 64\r\n12/12/2020 15:38:45 - INFO - seq2seq.trainers.trainer - Total train batch size (w. parallel, distributed & accumulation) = 64\r\n12/12/2020 15:38:45 - INFO - seq2seq.trainers.trainer - Gradient Accumulation steps = 1\r\n12/12/2020 15:38:45 - INFO - seq2seq.trainers.trainer - Total optimization steps = 2\r\n{'loss': 529.79443359375, 'learning_rate': 2e-05, 'epoch': 1.0} \r\n100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 2.37it/s]12/12/2020 15:38:46 - INFO - seq2seq.trainers.trainer - \r\n\r\nTraining completed. Do not forget to share your model on huggingface.co/models =)\r\n\r\n\r\n{'epoch': 2.0} \r\n100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 2.43it/s]\r\n12/12/2020 15:38:46 - INFO - seq2seq.trainers.trainer - Saving model checkpoint to outputs/test\r\ncahce dir /idiap/temp/rkarimi/cache_home_1/datasets\r\ncahce dir /idiap/temp/rkarimi/cache_home_1/datasets\r\n12/12/2020 15:38:59 - INFO - filelock - Lock 140079084929680 acquired on /idiap/home/rkarimi/.cache/huggingface/datasets/4c7b1146606607c193d1ef601d8d0c134521b2ac59f61ee98c09119be925ee16.7ad892de9d7f1b4f9dfc598ef31e4a398a7224176bc9a3110e0e2075ff943e8f.py.lock\r\n12/12/2020 15:38:59 - INFO - filelock - Lock 140079084929680 released on /idiap/home/rkarimi/.cache/huggingface/datasets/4c7b1146606607c193d1ef601d8d0c134521b2ac59f61ee98c09119be925ee16.7ad892de9d7f1b4f9dfc598ef31e4a398a7224176bc9a3110e0e2075ff943e8f.py.lock\r\nUsing custom data configuration default\r\n12/12/2020 15:38:59 - INFO - filelock - Lock 140079084929360 acquired on /idiap/temp/rkarimi/cache_home_1/datasets/_idiap_temp_rkarimi_cache_home_1_datasets_boolq_default_0.1.0_1fcfdc6f36dc89a2245ffbbd5248ab33890594b50396731ebc78411bdd2ca534.lock\r\n12/12/2020 15:38:59 - INFO - filelock - Lock 140079084929360 released on /idiap/temp/rkarimi/cache_home_1/datasets/_idiap_temp_rkarimi_cache_home_1_datasets_boolq_default_0.1.0_1fcfdc6f36dc89a2245ffbbd5248ab33890594b50396731ebc78411bdd2ca534.lock\r\n12/12/2020 15:38:59 - INFO - filelock - Lock 140079085355216 acquired on /idiap/temp/rkarimi/cache_home_1/datasets/_idiap_temp_rkarimi_cache_home_1_datasets_boolq_default_0.1.0_1fcfdc6f36dc89a2245ffbbd5248ab33890594b50396731ebc78411bdd2ca534.lock\r\nReusing dataset boolq (/idiap/temp/rkarimi/cache_home_1/datasets/boolq/default/0.1.0/1fcfdc6f36dc89a2245ffbbd5248ab33890594b50396731ebc78411bdd2ca534)\r\n12/12/2020 15:38:59 - INFO - filelock - Lock 140079085355216 released on /idiap/temp/rkarimi/cache_home_1/datasets/_idiap_temp_rkarimi_cache_home_1_datasets_boolq_default_0.1.0_1fcfdc6f36dc89a2245ffbbd5248ab33890594b50396731ebc78411bdd2ca534.lock\r\nLoading cached processed dataset at /idiap/temp/rkarimi/cache_home_1/datasets/boolq/default/0.1.0/1fcfdc6f36dc89a2245ffbbd5248ab33890594b50396731ebc78411bdd2ca534/cache-164dd1d57e9fa69a.arrow\r\n12/12/2020 15:38:59 - INFO - seq2seq.metrics.metrics - selected metric <function build_compute_metrics_fn.<locals>.classification_metrics at 0x7f66b40c67a0> for task boolq\r\n12/12/2020 15:38:59 - INFO - seq2seq.trainers.trainer - ***** Running training *****\r\n12/12/2020 15:38:59 - INFO - seq2seq.trainers.trainer - Num examples = 1\r\n12/12/2020 15:38:59 - INFO - seq2seq.trainers.trainer - Num Epochs = 2\r\n12/12/2020 15:38:59 - INFO - seq2seq.trainers.trainer - Instantaneous batch size per device = 64\r\n12/12/2020 15:38:59 - INFO - seq2seq.trainers.trainer - Total train batch size (w. parallel, distributed & accumulation) = 64\r\n12/12/2020 15:38:59 - INFO - seq2seq.trainers.trainer - Gradient Accumulation steps = 1\r\n12/12/2020 15:38:59 - INFO - seq2seq.trainers.trainer - Total optimization steps = 2\r\n12/12/2020 15:38:59 - INFO - seq2seq.trainers.trainer - Continuing training from checkpoint, will skip to saved global_step\r\n12/12/2020 15:38:59 - INFO - seq2seq.trainers.trainer - Continuing training from epoch 2\r\n12/12/2020 15:38:59 - INFO - seq2seq.trainers.trainer - Continuing training from global step 2\r\n12/12/2020 15:38:59 - INFO - seq2seq.trainers.trainer - Will skip the first 0 steps in the first epoch\r\n 0%| | 0/2 [00:00<?, ?it/s]12/12/2020 15:38:59 - INFO - seq2seq.trainers.trainer - \r\n\r\nTraining completed. Do not forget to share your model on huggingface.co/models =)\r\n\r\n\r\n{'epoch': 2.0} \r\n 0%| | 0/2 [00:00<?, ?it/s]\r\n12/12/2020 15:38:59 - INFO - seq2seq.trainers.trainer - Saving model checkpoint to outputs/finetune-adapter/test-n-1-lr-1e-02-e-20/boolq\r\n12/12/2020 15:39:07 - INFO - seq2seq.utils.utils - using task specific params for boolq: {'max_length': 3}\r\n12/12/2020 15:39:07 - INFO - seq2seq.trainers.trainer - ***** Running Evaluation *****\r\n12/12/2020 15:39:07 - INFO - seq2seq.trainers.trainer - Num examples = 3269\r\n12/12/2020 15:39:07 - INFO - seq2seq.trainers.trainer - Batch size = 64\r\n100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 52/52 [00:12<00:00, 4.86it/s][libprotobuf FATAL /sentencepiece/src/../third_party/protobuf-lite/google/protobuf/repeated_field.h:1505] CHECK failed: (index) >= (0): \r\nterminate called after throwing an instance of 'google::protobuf::FatalException'\r\n what(): CHECK failed: (index) >= (0): \r\nAborted\r\n```\r\n\r\n\r\n\r\n", "solved see https://github.com/huggingface/transformers/issues/9079?_pjax=%23js-repo-pjax-container ", "Hii please follow me" ]
2020-12-08T09:44:15
2020-12-12T19:36:22
2020-12-12T16:22:36
CONTRIBUTOR
null
null
null
null
Hi I am getting this error when evaluating on wmt16-ro-en using finetune_trainer.py of huggingface repo. thank for your help {'epoch': 20.0} 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 20/20 [00:16<00:00, 1.22it/s] 12/08/2020 10:41:19 - INFO - seq2seq.trainers.trainer - Saving model checkpoint to outputs/experiment/joint/finetune/lr-2e-5 12/08/2020 10:41:24 - INFO - __main__ - {'wmt16-en-ro': Dataset(features: {'src_texts': Value(dtype='string', id=None), 'task': Value(dtype='string', id=None), 'tgt_texts': Value(dtype='string', id=None)}, num_rows: 1998), 'qnli': Dataset(features: {'src_texts': Value(dtype='string', id=None), 'task': Value(dtype='string', id=None), 'tgt_texts': Value(dtype='string', id=None)}, num_rows: 5462), 'scitail': Dataset(features: {'src_texts': Value(dtype='string', id=None), 'task': Value(dtype='string', id=None), 'tgt_texts': Value(dtype='string', id=None)}, num_rows: 1303)} 12/08/2020 10:41:24 - INFO - __main__ - *** Evaluate *** 12/08/2020 10:41:24 - INFO - seq2seq.utils.utils - using task specific params for wmt16-en-ro: {'max_length': 300, 'num_beams': 4} 12/08/2020 10:41:24 - INFO - seq2seq.trainers.trainer - ***** Running Evaluation ***** 12/08/2020 10:41:24 - INFO - seq2seq.trainers.trainer - Num examples = 1998 12/08/2020 10:41:24 - INFO - seq2seq.trainers.trainer - Batch size = 64 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 32/32 [00:37<00:00, 1.19s/it][libprotobuf FATAL /sentencepiece/src/../third_party/protobuf-lite/google/protobuf/repeated_field.h:1505] CHECK failed: (index) >= (0): terminate called after throwing an instance of 'google::protobuf::FatalException' what(): CHECK failed: (index) >= (0): Aborted
{ "avatar_url": "https://avatars.githubusercontent.com/u/6278280?v=4", "events_url": "https://api.github.com/users/rabeehk/events{/privacy}", "followers_url": "https://api.github.com/users/rabeehk/followers", "following_url": "https://api.github.com/users/rabeehk/following{/other_user}", "gists_url": "https://api.github.com/users/rabeehk/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/rabeehk", "id": 6278280, "login": "rabeehk", "node_id": "MDQ6VXNlcjYyNzgyODA=", "organizations_url": "https://api.github.com/users/rabeehk/orgs", "received_events_url": "https://api.github.com/users/rabeehk/received_events", "repos_url": "https://api.github.com/users/rabeehk/repos", "site_admin": false, "starred_url": "https://api.github.com/users/rabeehk/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rabeehk/subscriptions", "type": "User", "url": "https://api.github.com/users/rabeehk", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1286/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1286/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
4 days, 6:38:21
https://api.github.com/repos/huggingface/datasets/issues/1285
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1285/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1285/comments
https://api.github.com/repos/huggingface/datasets/issues/1285/events
https://github.com/huggingface/datasets/issues/1285
759,278,758
MDU6SXNzdWU3NTkyNzg3NTg=
1,285
boolq does not work
{ "avatar_url": "https://avatars.githubusercontent.com/u/6278280?v=4", "events_url": "https://api.github.com/users/rabeehk/events{/privacy}", "followers_url": "https://api.github.com/users/rabeehk/followers", "following_url": "https://api.github.com/users/rabeehk/following{/other_user}", "gists_url": "https://api.github.com/users/rabeehk/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/rabeehk", "id": 6278280, "login": "rabeehk", "node_id": "MDQ6VXNlcjYyNzgyODA=", "organizations_url": "https://api.github.com/users/rabeehk/orgs", "received_events_url": "https://api.github.com/users/rabeehk/received_events", "repos_url": "https://api.github.com/users/rabeehk/repos", "site_admin": false, "starred_url": "https://api.github.com/users/rabeehk/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rabeehk/subscriptions", "type": "User", "url": "https://api.github.com/users/rabeehk", "user_view_type": "public" }
[]
closed
false
null
[]
[ "here is the minimal code to reproduce\r\n\r\n`datasets>>> datasets.load_dataset(\"boolq\", \"train\")\r\n\r\nthe errors\r\n\r\n```\r\n`cahce dir /idiap/temp/rkarimi/cache_home_1/datasets\r\ncahce dir /idiap/temp/rkarimi/cache_home_1/datasets\r\nUsing custom data configuration train\r\nDownloading and preparing dataset boolq/train (download: Unknown size, generated: Unknown size, post-processed: Unknown size, total: Unknown size) to /idiap/temp/rkarimi/cache_home_1/datasets/boolq/train/0.1.0/2987db1f15deaa19500ae24de560eabeaf1f8ef51df88c0470beeec72943bf11...\r\ncahce dir /idiap/temp/rkarimi/cache_home_1/datasets\r\ncahce dir /idiap/temp/rkarimi/cache_home_1/datasets/downloads\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/idiap/user/rkarimi/libs/anaconda3/envs/internship/lib/python3.7/site-packages/datasets/load.py\", line 611, in load_dataset\r\n ignore_verifications=ignore_verifications,\r\n File \"/idiap/user/rkarimi/libs/anaconda3/envs/internship/lib/python3.7/site-packages/datasets/builder.py\", line 476, in download_and_prepare\r\n dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs\r\n File \"/idiap/user/rkarimi/libs/anaconda3/envs/internship/lib/python3.7/site-packages/datasets/builder.py\", line 531, in _download_and_prepare\r\n split_generators = self._split_generators(dl_manager, **split_generators_kwargs)\r\n File \" /idiap/home/rkarimi/.cache/huggingface/modules/datasets_modules/datasets/boolq/2987db1f15deaa19500ae24de560eabeaf1f8ef51df88c0470beeec72943bf11/boolq.py\", line 74, in _split_generators\r\n downloaded_files = dl_manager.download_custom(urls_to_download, tf.io.gfile.copy)\r\n File \"/idiap/user/rkarimi/libs/anaconda3/envs/internship/lib/python3.7/site-packages/datasets/utils/download_manager.py\", line 149, in download_custom\r\n custom_download(url, path)\r\n File \"/idiap/user/rkarimi/libs/anaconda3/envs/internship/lib/python3.7/site-packages/tensorflow/python/lib/io/file_io.py\", line 516, in copy_v2\r\n compat.path_to_bytes(src), compat.path_to_bytes(dst), overwrite)\r\n\r\n\r\n\r\n```", "This has been fixed by #881 \r\nthis fix will be available in the next release soon.\r\n\r\nIf you don't want to wait for the release you can actually load the latest version of boolq by specifying `script_version=\"master\"` in `load_dataset`", "thank you this solved this issue, for now seems to work, thanks " ]
2020-12-08T09:28:47
2020-12-08T09:47:10
2020-12-08T09:47:10
CONTRIBUTOR
null
null
null
null
Hi I am getting this error when trying to load boolq, thanks for your help ts_boolq_default_0.1.0_2987db1f15deaa19500ae24de560eabeaf1f8ef51df88c0470beeec72943bf11.lock Traceback (most recent call last): File "finetune_t5_trainer.py", line 274, in <module> main() File "finetune_t5_trainer.py", line 147, in main for task in data_args.tasks] File "finetune_t5_trainer.py", line 147, in <listcomp> for task in data_args.tasks] File "/remote/idiap.svm/user.active/rkarimi/dev/ruse/seq2seq/tasks/tasks.py", line 58, in get_dataset dataset = self.load_dataset(split=split) File "/remote/idiap.svm/user.active/rkarimi/dev/ruse/seq2seq/tasks/tasks.py", line 54, in load_dataset return datasets.load_dataset(self.task.name, split=split) File "/idiap/user/rkarimi/libs/anaconda3/envs/internship/lib/python3.7/site-packages/datasets/load.py", line 611, in load_dataset ignore_verifications=ignore_verifications, File "/idiap/user/rkarimi/libs/anaconda3/envs/internship/lib/python3.7/site-packages/datasets/builder.py", line 476, in download_and_prepare dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs File "/idiap/user/rkarimi/libs/anaconda3/envs/internship/lib/python3.7/site-packages/datasets/builder.py", line 531, in _download_and_prepare split_generators = self._split_generators(dl_manager, **split_generators_kwargs) File " /idiap/home/rkarimi/.cache/huggingface/modules/datasets_modules/datasets/boolq/2987db1f15deaa19500ae24de560eabeaf1f8ef51df88c0470beeec72943bf11/boolq.py", line 74, in _split_generators downloaded_files = dl_manager.download_custom(urls_to_download, tf.io.gfile.copy) File "/idiap/user/rkarimi/libs/anaconda3/envs/internship/lib/python3.7/site-packages/datasets/utils/download_manager.py", line 149, in download_custom custom_download(url, path) File "/idiap/user/rkarimi/libs/anaconda3/envs/internship/lib/python3.7/site-packages/tensorflow/python/lib/io/file_io.py", line 516, in copy_v2 compat.path_to_bytes(src), compat.path_to_bytes(dst), overwrite) tensorflow.python.framework.errors_impl.AlreadyExistsError: file already exists
{ "avatar_url": "https://avatars.githubusercontent.com/u/6278280?v=4", "events_url": "https://api.github.com/users/rabeehk/events{/privacy}", "followers_url": "https://api.github.com/users/rabeehk/followers", "following_url": "https://api.github.com/users/rabeehk/following{/other_user}", "gists_url": "https://api.github.com/users/rabeehk/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/rabeehk", "id": 6278280, "login": "rabeehk", "node_id": "MDQ6VXNlcjYyNzgyODA=", "organizations_url": "https://api.github.com/users/rabeehk/orgs", "received_events_url": "https://api.github.com/users/rabeehk/received_events", "repos_url": "https://api.github.com/users/rabeehk/repos", "site_admin": false, "starred_url": "https://api.github.com/users/rabeehk/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rabeehk/subscriptions", "type": "User", "url": "https://api.github.com/users/rabeehk", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1285/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1285/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
0:18:23
https://api.github.com/repos/huggingface/datasets/issues/1167
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1167/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1167/comments
https://api.github.com/repos/huggingface/datasets/issues/1167/events
https://github.com/huggingface/datasets/issues/1167
757,722,921
MDU6SXNzdWU3NTc3MjI5MjE=
1,167
❓ On-the-fly tokenization with datasets, tokenizers, and torch Datasets and Dataloaders
{ "avatar_url": "https://avatars.githubusercontent.com/u/61748653?v=4", "events_url": "https://api.github.com/users/pietrolesci/events{/privacy}", "followers_url": "https://api.github.com/users/pietrolesci/followers", "following_url": "https://api.github.com/users/pietrolesci/following{/other_user}", "gists_url": "https://api.github.com/users/pietrolesci/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/pietrolesci", "id": 61748653, "login": "pietrolesci", "node_id": "MDQ6VXNlcjYxNzQ4NjUz", "organizations_url": "https://api.github.com/users/pietrolesci/orgs", "received_events_url": "https://api.github.com/users/pietrolesci/received_events", "repos_url": "https://api.github.com/users/pietrolesci/repos", "site_admin": false, "starred_url": "https://api.github.com/users/pietrolesci/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pietrolesci/subscriptions", "type": "User", "url": "https://api.github.com/users/pietrolesci", "user_view_type": "public" }
[ { "color": "d876e3", "default": true, "description": "Further information is requested", "id": 1935892912, "name": "question", "node_id": "MDU6TGFiZWwxOTM1ODkyOTEy", "url": "https://api.github.com/repos/huggingface/datasets/labels/question" }, { "color": "c5def5", "default": false, "description": "Generic discussion on the library", "id": 2067400324, "name": "generic discussion", "node_id": "MDU6TGFiZWwyMDY3NDAwMzI0", "url": "https://api.github.com/repos/huggingface/datasets/labels/generic%20discussion" } ]
closed
false
null
[]
[ "We're working on adding on-the-fly transforms in datasets.\r\nCurrently the only on-the-fly functions that can be applied are in `set_format` in which we transform the data in either numpy/torch/tf tensors or pandas.\r\nFor example\r\n```python\r\ndataset.set_format(\"torch\")\r\n```\r\napplies `torch.Tensor` to the dataset entries on-the-fly.\r\n\r\nWe plan to extend this to user-defined formatting transforms.\r\nFor example\r\n```python\r\ndataset.set_format(transform=tokenize)\r\n```\r\n\r\nWhat do you think ?", "You can now use `set_transform` to define custom formatting transforms. " ]
2020-12-05T17:02:56
2023-07-20T15:49:42
2023-07-20T15:49:42
NONE
null
null
null
null
Hi there, I have a question regarding "on-the-fly" tokenization. This question was elicited by reading the "How to train a new language model from scratch using Transformers and Tokenizers" [here](https://huggingface.co/blog/how-to-train). Towards the end there is this sentence: "If your dataset is very large, you can opt to load and tokenize examples on the fly, rather than as a preprocessing step". I've tried coming up with a solution that would combine both `datasets` and `tokenizers`, but did not manage to find a good pattern. I guess the solution would entail wrapping a dataset into a Pytorch dataset. As a concrete example from the [docs](https://huggingface.co/transformers/custom_datasets.html) ```python import torch class SquadDataset(torch.utils.data.Dataset): def __init__(self, encodings): # instead of doing this beforehand, I'd like to do tokenization on the fly self.encodings = encodings def __getitem__(self, idx): return {key: torch.tensor(val[idx]) for key, val in self.encodings.items()} def __len__(self): return len(self.encodings.input_ids) train_dataset = SquadDataset(train_encodings) ``` How would one implement this with "on-the-fly" tokenization exploiting the vectorized capabilities of tokenizers? ---- Edit: I have come up with this solution. It does what I want, but I feel it's not very elegant ```python class CustomPytorchDataset(Dataset): def __init__(self): self.dataset = some_hf_dataset(...) self.tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased") def __getitem__(self, batch_idx): instance = self.dataset[text_col][batch_idx] tokenized_text = self.tokenizer(instance, truncation=True, padding=True) return tokenized_text def __len__(self): return len(self.dataset) @staticmethod def collate_fn(batch): # batch is a list, however it will always contain 1 item because we should not use the # batch_size argument as batch_size is controlled by the sampler return {k: torch.tensor(v) for k, v in batch[0].items()} torch_ds = CustomPytorchDataset() # NOTE: batch_sampler returns list of integers and since here we have SequentialSampler # it returns: [1, 2, 3], [4, 5, 6], etc. - check calling `list(batch_sampler)` batch_sampler = BatchSampler(SequentialSampler(torch_ds), batch_size=3, drop_last=True) # NOTE: no `batch_size` as now the it is controlled by the sampler! dl = DataLoader(dataset=torch_ds, sampler=batch_sampler, collate_fn=torch_ds.collate_fn) ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 3, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 3, "url": "https://api.github.com/repos/huggingface/datasets/issues/1167/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1167/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
956 days, 22:46:46
https://api.github.com/repos/huggingface/datasets/issues/1115
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1115/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1115/comments
https://api.github.com/repos/huggingface/datasets/issues/1115/events
https://github.com/huggingface/datasets/issues/1115
757,127,527
MDU6SXNzdWU3NTcxMjc1Mjc=
1,115
Incorrect URL for MRQA SQuAD train subset
{ "avatar_url": "https://avatars.githubusercontent.com/u/6259768?v=4", "events_url": "https://api.github.com/users/yuxiang-wu/events{/privacy}", "followers_url": "https://api.github.com/users/yuxiang-wu/followers", "following_url": "https://api.github.com/users/yuxiang-wu/following{/other_user}", "gists_url": "https://api.github.com/users/yuxiang-wu/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/yuxiang-wu", "id": 6259768, "login": "yuxiang-wu", "node_id": "MDQ6VXNlcjYyNTk3Njg=", "organizations_url": "https://api.github.com/users/yuxiang-wu/orgs", "received_events_url": "https://api.github.com/users/yuxiang-wu/received_events", "repos_url": "https://api.github.com/users/yuxiang-wu/repos", "site_admin": false, "starred_url": "https://api.github.com/users/yuxiang-wu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yuxiang-wu/subscriptions", "type": "User", "url": "https://api.github.com/users/yuxiang-wu", "user_view_type": "public" }
[]
closed
false
null
[]
[ "good catch !" ]
2020-12-04T14:05:24
2020-12-06T17:14:22
2020-12-06T17:14:22
CONTRIBUTOR
null
null
null
null
https://github.com/huggingface/datasets/blob/4ef4c8f8b7a60e35c6fa21115fca9faae91c9f74/datasets/mrqa/mrqa.py#L53 The URL for `train+SQuAD` subset of MRQA points to the dev set instead of train set. It should be `https://s3.us-east-2.amazonaws.com/mrqa/release/v2/train/SQuAD.jsonl.gz`.
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1115/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1115/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
2 days, 3:08:58
https://api.github.com/repos/huggingface/datasets/issues/1110
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1110/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1110/comments
https://api.github.com/repos/huggingface/datasets/issues/1110/events
https://github.com/huggingface/datasets/issues/1110
757,082,677
MDU6SXNzdWU3NTcwODI2Nzc=
1,110
Using a feature named "_type" fails with certain operations
{ "avatar_url": "https://avatars.githubusercontent.com/u/15979778?v=4", "events_url": "https://api.github.com/users/dcfidalgo/events{/privacy}", "followers_url": "https://api.github.com/users/dcfidalgo/followers", "following_url": "https://api.github.com/users/dcfidalgo/following{/other_user}", "gists_url": "https://api.github.com/users/dcfidalgo/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/dcfidalgo", "id": 15979778, "login": "dcfidalgo", "node_id": "MDQ6VXNlcjE1OTc5Nzc4", "organizations_url": "https://api.github.com/users/dcfidalgo/orgs", "received_events_url": "https://api.github.com/users/dcfidalgo/received_events", "repos_url": "https://api.github.com/users/dcfidalgo/repos", "site_admin": false, "starred_url": "https://api.github.com/users/dcfidalgo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dcfidalgo/subscriptions", "type": "User", "url": "https://api.github.com/users/dcfidalgo", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Thanks for reporting !\r\n\r\nIndeed this is a keyword in the library that is used to encode/decode features to a python dictionary that we can save/load to json.\r\nWe can probably change `_type` to something that is less likely to collide with user feature names.\r\nIn this case we would want something backward compatible though.\r\n\r\nFeel free to try a fix and open a PR, and to ping me if I can help :) " ]
2020-12-04T12:56:33
2022-01-14T18:07:00
2022-01-14T18:07:00
CONTRIBUTOR
null
null
null
null
A column named `_type` leads to a `TypeError: unhashable type: 'dict'` for certain operations: ```python from datasets import Dataset, concatenate_datasets ds = Dataset.from_dict({"_type": ["whatever"]}).map() concatenate_datasets([ds]) # or simply Dataset(ds._data) ``` Context: We are using datasets to persist data coming from elasticsearch to feed to our pipeline, and elasticsearch has a `_type` field, hence the strange name of the column. Not sure if you wish to support this specific column name, but if you do i would be happy to try a fix and provide a PR. I already had a look into it and i think the culprit is the `datasets.features.generate_from_dict` function. It uses the hard coded `_type` string to figure out if it reached the end of the nested feature object from a serialized dict. Best wishes and keep up the awesome work!
{ "avatar_url": "https://avatars.githubusercontent.com/u/15979778?v=4", "events_url": "https://api.github.com/users/dcfidalgo/events{/privacy}", "followers_url": "https://api.github.com/users/dcfidalgo/followers", "following_url": "https://api.github.com/users/dcfidalgo/following{/other_user}", "gists_url": "https://api.github.com/users/dcfidalgo/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/dcfidalgo", "id": 15979778, "login": "dcfidalgo", "node_id": "MDQ6VXNlcjE1OTc5Nzc4", "organizations_url": "https://api.github.com/users/dcfidalgo/orgs", "received_events_url": "https://api.github.com/users/dcfidalgo/received_events", "repos_url": "https://api.github.com/users/dcfidalgo/repos", "site_admin": false, "starred_url": "https://api.github.com/users/dcfidalgo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dcfidalgo/subscriptions", "type": "User", "url": "https://api.github.com/users/dcfidalgo", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1110/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1110/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
406 days, 5:10:27
https://api.github.com/repos/huggingface/datasets/issues/1103
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1103/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1103/comments
https://api.github.com/repos/huggingface/datasets/issues/1103/events
https://github.com/huggingface/datasets/issues/1103
757,016,820
MDU6SXNzdWU3NTcwMTY4MjA=
1,103
Add support to download kaggle datasets
{ "avatar_url": "https://avatars.githubusercontent.com/u/1183441?v=4", "events_url": "https://api.github.com/users/abhishekkrthakur/events{/privacy}", "followers_url": "https://api.github.com/users/abhishekkrthakur/followers", "following_url": "https://api.github.com/users/abhishekkrthakur/following{/other_user}", "gists_url": "https://api.github.com/users/abhishekkrthakur/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/abhishekkrthakur", "id": 1183441, "login": "abhishekkrthakur", "node_id": "MDQ6VXNlcjExODM0NDE=", "organizations_url": "https://api.github.com/users/abhishekkrthakur/orgs", "received_events_url": "https://api.github.com/users/abhishekkrthakur/received_events", "repos_url": "https://api.github.com/users/abhishekkrthakur/repos", "site_admin": false, "starred_url": "https://api.github.com/users/abhishekkrthakur/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/abhishekkrthakur/subscriptions", "type": "User", "url": "https://api.github.com/users/abhishekkrthakur", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
closed
false
null
[]
[ "Hey, I think this is great idea. Any plan to integrate kaggle private datasets loading to `datasets`?", "The workflow for downloading a Kaggle dataset and turning it into an HF dataset is pretty simple:\r\n```python\r\n!kaggle datasets download -p path\r\nds = load_dataset(path)\r\n```\r\n\r\nNative support would make our download logic even more complex, and I don't think this is a good idea considering this particular feature is not requested often. \r\n\r\nPS: Kaggle should integrate their API with `fsspec` to allow us to use a common interface if they are interested in tighter integrations" ]
2020-12-04T11:08:37
2023-07-20T15:22:24
2023-07-20T15:22:23
CONTRIBUTOR
null
null
null
null
We can use API key
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 3, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 3, "url": "https://api.github.com/repos/huggingface/datasets/issues/1103/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1103/timeline
null
not_planned
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
958 days, 4:13:46
https://api.github.com/repos/huggingface/datasets/issues/1102
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1102/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1102/comments
https://api.github.com/repos/huggingface/datasets/issues/1102/events
https://github.com/huggingface/datasets/issues/1102
757,016,515
MDU6SXNzdWU3NTcwMTY1MTU=
1,102
Add retries to download manager
{ "avatar_url": "https://avatars.githubusercontent.com/u/1183441?v=4", "events_url": "https://api.github.com/users/abhishekkrthakur/events{/privacy}", "followers_url": "https://api.github.com/users/abhishekkrthakur/followers", "following_url": "https://api.github.com/users/abhishekkrthakur/following{/other_user}", "gists_url": "https://api.github.com/users/abhishekkrthakur/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/abhishekkrthakur", "id": 1183441, "login": "abhishekkrthakur", "node_id": "MDQ6VXNlcjExODM0NDE=", "organizations_url": "https://api.github.com/users/abhishekkrthakur/orgs", "received_events_url": "https://api.github.com/users/abhishekkrthakur/received_events", "repos_url": "https://api.github.com/users/abhishekkrthakur/repos", "site_admin": false, "starred_url": "https://api.github.com/users/abhishekkrthakur/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/abhishekkrthakur/subscriptions", "type": "User", "url": "https://api.github.com/users/abhishekkrthakur", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/33657802?v=4", "events_url": "https://api.github.com/users/SBrandeis/events{/privacy}", "followers_url": "https://api.github.com/users/SBrandeis/followers", "following_url": "https://api.github.com/users/SBrandeis/following{/other_user}", "gists_url": "https://api.github.com/users/SBrandeis/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/SBrandeis", "id": 33657802, "login": "SBrandeis", "node_id": "MDQ6VXNlcjMzNjU3ODAy", "organizations_url": "https://api.github.com/users/SBrandeis/orgs", "received_events_url": "https://api.github.com/users/SBrandeis/received_events", "repos_url": "https://api.github.com/users/SBrandeis/repos", "site_admin": false, "starred_url": "https://api.github.com/users/SBrandeis/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SBrandeis/subscriptions", "type": "User", "url": "https://api.github.com/users/SBrandeis", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/33657802?v=4", "events_url": "https://api.github.com/users/SBrandeis/events{/privacy}", "followers_url": "https://api.github.com/users/SBrandeis/followers", "following_url": "https://api.github.com/users/SBrandeis/following{/other_user}", "gists_url": "https://api.github.com/users/SBrandeis/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/SBrandeis", "id": 33657802, "login": "SBrandeis", "node_id": "MDQ6VXNlcjMzNjU3ODAy", "organizations_url": "https://api.github.com/users/SBrandeis/orgs", "received_events_url": "https://api.github.com/users/SBrandeis/received_events", "repos_url": "https://api.github.com/users/SBrandeis/repos", "site_admin": false, "starred_url": "https://api.github.com/users/SBrandeis/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SBrandeis/subscriptions", "type": "User", "url": "https://api.github.com/users/SBrandeis", "user_view_type": "public" } ]
[]
2020-12-04T11:08:11
2020-12-22T15:34:06
2020-12-22T15:34:06
CONTRIBUTOR
null
null
null
null
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1102/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1102/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
18 days, 4:25:55
https://api.github.com/repos/huggingface/datasets/issues/1064
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1064/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1064/comments
https://api.github.com/repos/huggingface/datasets/issues/1064/events
https://github.com/huggingface/datasets/issues/1064
756,382,186
MDU6SXNzdWU3NTYzODIxODY=
1,064
Not support links with 302 redirect
{ "avatar_url": "https://avatars.githubusercontent.com/u/6429850?v=4", "events_url": "https://api.github.com/users/chameleonTK/events{/privacy}", "followers_url": "https://api.github.com/users/chameleonTK/followers", "following_url": "https://api.github.com/users/chameleonTK/following{/other_user}", "gists_url": "https://api.github.com/users/chameleonTK/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/chameleonTK", "id": 6429850, "login": "chameleonTK", "node_id": "MDQ6VXNlcjY0Mjk4NTA=", "organizations_url": "https://api.github.com/users/chameleonTK/orgs", "received_events_url": "https://api.github.com/users/chameleonTK/received_events", "repos_url": "https://api.github.com/users/chameleonTK/repos", "site_admin": false, "starred_url": "https://api.github.com/users/chameleonTK/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chameleonTK/subscriptions", "type": "User", "url": "https://api.github.com/users/chameleonTK", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" }, { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
closed
false
null
[]
[ "Hi !\r\nThis kind of links is now supported by the library since #1316", "> Hi !\r\n> This kind of links is now supported by the library since #1316\r\n\r\nI updated links in TLC datasets to be the github links in this pull request \r\n https://github.com/huggingface/datasets/pull/1737\r\n\r\nEverything works now. Thank you." ]
2020-12-03T17:04:43
2021-01-14T02:51:25
2021-01-14T02:51:25
CONTRIBUTOR
null
null
null
null
I have an issue adding this download link https://github.com/jitkapat/thailitcorpus/releases/download/v.2.0/tlc_v.2.0.tar.gz it might be because it is not a direct link (it returns 302 and redirects to aws that returns 403 for head requests). ``` r.head("https://github.com/jitkapat/thailitcorpus/releases/download/v.2.0/tlc_v.2.0.tar.gz", allow_redirects=True) # <Response [403]> ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/6429850?v=4", "events_url": "https://api.github.com/users/chameleonTK/events{/privacy}", "followers_url": "https://api.github.com/users/chameleonTK/followers", "following_url": "https://api.github.com/users/chameleonTK/following{/other_user}", "gists_url": "https://api.github.com/users/chameleonTK/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/chameleonTK", "id": 6429850, "login": "chameleonTK", "node_id": "MDQ6VXNlcjY0Mjk4NTA=", "organizations_url": "https://api.github.com/users/chameleonTK/orgs", "received_events_url": "https://api.github.com/users/chameleonTK/received_events", "repos_url": "https://api.github.com/users/chameleonTK/repos", "site_admin": false, "starred_url": "https://api.github.com/users/chameleonTK/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chameleonTK/subscriptions", "type": "User", "url": "https://api.github.com/users/chameleonTK", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1064/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1064/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
41 days, 9:46:42
https://api.github.com/repos/huggingface/datasets/issues/1046
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1046/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1046/comments
https://api.github.com/repos/huggingface/datasets/issues/1046/events
https://github.com/huggingface/datasets/issues/1046
756,122,709
MDU6SXNzdWU3NTYxMjI3MDk=
1,046
Dataset.map() turns tensors into lists?
{ "avatar_url": "https://avatars.githubusercontent.com/u/5270804?v=4", "events_url": "https://api.github.com/users/tombosc/events{/privacy}", "followers_url": "https://api.github.com/users/tombosc/followers", "following_url": "https://api.github.com/users/tombosc/following{/other_user}", "gists_url": "https://api.github.com/users/tombosc/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/tombosc", "id": 5270804, "login": "tombosc", "node_id": "MDQ6VXNlcjUyNzA4MDQ=", "organizations_url": "https://api.github.com/users/tombosc/orgs", "received_events_url": "https://api.github.com/users/tombosc/received_events", "repos_url": "https://api.github.com/users/tombosc/repos", "site_admin": false, "starred_url": "https://api.github.com/users/tombosc/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/tombosc/subscriptions", "type": "User", "url": "https://api.github.com/users/tombosc", "user_view_type": "public" }
[]
closed
false
null
[]
[ "A solution is to have the tokenizer return a list instead of a tensor, and then use `dataset_tok.set_format(type = 'torch')` to convert that list into a tensor. Still not sure if bug.", "It is expected behavior, you should set the format to `\"torch\"` as you mentioned to get pytorch tensors back.\r\nBy default datasets returns pure python objects." ]
2020-12-03T11:43:46
2022-10-05T12:12:41
2022-10-05T12:12:41
NONE
null
null
null
null
I apply `Dataset.map()` to a function that returns a dict of torch tensors (like a tokenizer from the repo transformers). However, in the mapped dataset, these tensors have turned to lists! ```import datasets import torch from datasets import load_dataset print("version datasets", datasets.__version__) dataset = load_dataset("snli", split='train[0:50]') def tokenizer_fn(example): # actually uses a tokenizer which does something like: return {'input_ids': torch.tensor([[0, 1, 2]])} print("First item in dataset:\n", dataset[0]) tokenized = tokenizer_fn(dataset[0]) print("Tokenized hyp:\n", tokenized) dataset_tok = dataset.map(tokenizer_fn, batched=False, remove_columns=['label', 'premise', 'hypothesis']) print("Tokenized using map:\n", dataset_tok[0]) print(type(tokenized['input_ids']), type(dataset_tok[0]['input_ids'])) dataset_tok = dataset.map(tokenizer_fn, batched=False, remove_columns=['label', 'premise', 'hypothesis']) print("Tokenized using map:\n", dataset_tok[0]) print(type(tokenized['input_ids']), type(dataset_tok[0]['input_ids'])) ``` The output is: ``` version datasets 1.1.3 Reusing dataset snli (/home/tom/.cache/huggingface/datasets/snli/plain_text/1.0.0/bb1102591c6230bd78813e229d5dd4c7fbf4fc478cec28f298761eb69e5b537c) First item in dataset: {'premise': 'A person on a horse jumps over a broken down airplane.', 'hypothesis': 'A person is training his horse for a competition.', 'label': 1} Tokenized hyp: {'input_ids': tensor([[0, 1, 2]])} Loading cached processed dataset at /home/tom/.cache/huggingface/datasets/snli/plain_text/1.0.0/bb1102591c6230bd78813e229d5dd4c7fbf4fc478cec28f298761eb69e5b537c/cache-fe38f449fe9ac46f.arrow Tokenized using map: {'input_ids': [[0, 1, 2]]} <class 'torch.Tensor'> <class 'list'> ``` Or am I doing something wrong?
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 4, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 4, "url": "https://api.github.com/repos/huggingface/datasets/issues/1046/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1046/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
671 days, 0:28:55
https://api.github.com/repos/huggingface/datasets/issues/1027
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1027/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1027/comments
https://api.github.com/repos/huggingface/datasets/issues/1027/events
https://github.com/huggingface/datasets/issues/1027
755,695,420
MDU6SXNzdWU3NTU2OTU0MjA=
1,027
Hi
{ "avatar_url": "https://avatars.githubusercontent.com/u/75398394?v=4", "events_url": "https://api.github.com/users/suemori87/events{/privacy}", "followers_url": "https://api.github.com/users/suemori87/followers", "following_url": "https://api.github.com/users/suemori87/following{/other_user}", "gists_url": "https://api.github.com/users/suemori87/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/suemori87", "id": 75398394, "login": "suemori87", "node_id": "MDQ6VXNlcjc1Mzk4Mzk0", "organizations_url": "https://api.github.com/users/suemori87/orgs", "received_events_url": "https://api.github.com/users/suemori87/received_events", "repos_url": "https://api.github.com/users/suemori87/repos", "site_admin": false, "starred_url": "https://api.github.com/users/suemori87/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/suemori87/subscriptions", "type": "User", "url": "https://api.github.com/users/suemori87", "user_view_type": "public" }
[]
closed
false
null
[]
[]
2020-12-02T23:47:14
2020-12-03T16:42:41
2020-12-03T16:42:41
NONE
null
null
null
null
## Adding a Dataset - **Name:** *name of the dataset* - **Description:** *short description of the dataset (or link to social media or blog post)* - **Paper:** *link to the dataset paper if available* - **Data:** *link to the Github repository or current dataset location* - **Motivation:** *what are some good reasons to have this dataset* Instructions to add a new dataset can be found [here](https://github.com/huggingface/datasets/blob/master/ADD_NEW_DATASET.md).
{ "avatar_url": "https://avatars.githubusercontent.com/u/10469459?v=4", "events_url": "https://api.github.com/users/yjernite/events{/privacy}", "followers_url": "https://api.github.com/users/yjernite/followers", "following_url": "https://api.github.com/users/yjernite/following{/other_user}", "gists_url": "https://api.github.com/users/yjernite/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/yjernite", "id": 10469459, "login": "yjernite", "node_id": "MDQ6VXNlcjEwNDY5NDU5", "organizations_url": "https://api.github.com/users/yjernite/orgs", "received_events_url": "https://api.github.com/users/yjernite/received_events", "repos_url": "https://api.github.com/users/yjernite/repos", "site_admin": false, "starred_url": "https://api.github.com/users/yjernite/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yjernite/subscriptions", "type": "User", "url": "https://api.github.com/users/yjernite", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1027/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1027/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
16:55:27
https://api.github.com/repos/huggingface/datasets/issues/1026
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1026/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1026/comments
https://api.github.com/repos/huggingface/datasets/issues/1026/events
https://github.com/huggingface/datasets/issues/1026
755,689,195
MDU6SXNzdWU3NTU2ODkxOTU=
1,026
Lío o
{ "avatar_url": "https://avatars.githubusercontent.com/u/10137?v=4", "events_url": "https://api.github.com/users/ghost/events{/privacy}", "followers_url": "https://api.github.com/users/ghost/followers", "following_url": "https://api.github.com/users/ghost/following{/other_user}", "gists_url": "https://api.github.com/users/ghost/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ghost", "id": 10137, "login": "ghost", "node_id": "MDQ6VXNlcjEwMTM3", "organizations_url": "https://api.github.com/users/ghost/orgs", "received_events_url": "https://api.github.com/users/ghost/received_events", "repos_url": "https://api.github.com/users/ghost/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ghost/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ghost/subscriptions", "type": "User", "url": "https://api.github.com/users/ghost", "user_view_type": "public" }
[]
closed
false
null
[]
[]
2020-12-02T23:32:25
2020-12-03T16:42:47
2020-12-03T16:42:47
NONE
null
null
null
null
````l````````` ``` O ``` ````` Ño ``` ```` ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/10469459?v=4", "events_url": "https://api.github.com/users/yjernite/events{/privacy}", "followers_url": "https://api.github.com/users/yjernite/followers", "following_url": "https://api.github.com/users/yjernite/following{/other_user}", "gists_url": "https://api.github.com/users/yjernite/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/yjernite", "id": 10469459, "login": "yjernite", "node_id": "MDQ6VXNlcjEwNDY5NDU5", "organizations_url": "https://api.github.com/users/yjernite/orgs", "received_events_url": "https://api.github.com/users/yjernite/received_events", "repos_url": "https://api.github.com/users/yjernite/repos", "site_admin": false, "starred_url": "https://api.github.com/users/yjernite/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yjernite/subscriptions", "type": "User", "url": "https://api.github.com/users/yjernite", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1026/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1026/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
17:10:22
https://api.github.com/repos/huggingface/datasets/issues/1004
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1004/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1004/comments
https://api.github.com/repos/huggingface/datasets/issues/1004/events
https://github.com/huggingface/datasets/issues/1004
755,325,368
MDU6SXNzdWU3NTUzMjUzNjg=
1,004
how large datasets are handled under the hood
{ "avatar_url": "https://avatars.githubusercontent.com/u/73364383?v=4", "events_url": "https://api.github.com/users/rabeehkarimimahabadi/events{/privacy}", "followers_url": "https://api.github.com/users/rabeehkarimimahabadi/followers", "following_url": "https://api.github.com/users/rabeehkarimimahabadi/following{/other_user}", "gists_url": "https://api.github.com/users/rabeehkarimimahabadi/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/rabeehkarimimahabadi", "id": 73364383, "login": "rabeehkarimimahabadi", "node_id": "MDQ6VXNlcjczMzY0Mzgz", "organizations_url": "https://api.github.com/users/rabeehkarimimahabadi/orgs", "received_events_url": "https://api.github.com/users/rabeehkarimimahabadi/received_events", "repos_url": "https://api.github.com/users/rabeehkarimimahabadi/repos", "site_admin": false, "starred_url": "https://api.github.com/users/rabeehkarimimahabadi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rabeehkarimimahabadi/subscriptions", "type": "User", "url": "https://api.github.com/users/rabeehkarimimahabadi", "user_view_type": "public" }
[]
closed
false
null
[]
[ "This library uses Apache Arrow under the hood to store datasets on disk.\r\nThe advantage of Apache Arrow is that it allows to memory map the dataset. This allows to load datasets bigger than memory and with almost no RAM usage. It also offers excellent I/O speed.\r\n\r\nFor example when you access one element or one batch\r\n```python\r\nfrom datasets import load_dataset\r\n\r\nsquad = load_dataset(\"squad\", split=\"train\")\r\nfirst_element = squad[0]\r\none_batch = squad[:8]\r\n```\r\n\r\nthen only this element/batch is loaded in memory, while the rest of the dataset is memory mapped.", "How can we change how much data is loaded to memory with Arrow? I think that I am having some performance issue with it. When Arrow loads the data from disk it does it in multiprocess? It's almost twice slower training with arrow than in memory.\r\n\r\nEDIT:\r\nMy fault! I had not seen the `dataloader_num_workers` in `TrainingArguments` ! Now I can parallelize and go fast! Sorry, and thanks.", "> How can we change how much data is loaded to memory with Arrow? I think that I am having some performance issue with it. When Arrow loads the data from disk it does it in multiprocess? It's almost twice slower training with arrow than in memory.\r\n\r\nLoading arrow data from disk is done with memory-mapping. This allows to load huge datasets without filling your RAM.\r\nMemory mapping is almost instantaneous and is done within one process.\r\n\r\nThen, the speed of querying examples from the dataset is I/O bounded depending on your disk. If it's an SSD then fetching examples from the dataset will be very fast.\r\nBut since the I/O speed of an SSD is lower than the one of RAM it's expected to be slower to fetch data from disk than from memory.\r\nStill, if you load the dataset in different processes then it can be faster but there will still be the I/O bottleneck of the disk.\r\n\r\n> EDIT:\r\n> My fault! I had not seen the `dataloader_num_workers` in `TrainingArguments` ! Now I can parallelize and go fast! Sorry, and thanks.\r\n\r\nOk let me know if that helps !\r\n" ]
2020-12-02T14:32:40
2022-10-05T12:13:29
2022-10-05T12:13:29
NONE
null
null
null
null
Hi I want to use multiple large datasets with a mapping style dataloader, where they cannot fit into memory, could you tell me how you handled the datasets under the hood? is this you bring all in memory in case of mapping style ones? or is this some sharding under the hood and you bring in memory when necessary, thanks
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1004/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1004/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
671 days, 21:40:49
https://api.github.com/repos/huggingface/datasets/issues/996
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/996/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/996/comments
https://api.github.com/repos/huggingface/datasets/issues/996/events
https://github.com/huggingface/datasets/issues/996
755,176,084
MDU6SXNzdWU3NTUxNzYwODQ=
996
NotADirectoryError while loading the CNN/Dailymail dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/75367920?v=4", "events_url": "https://api.github.com/users/arc-bu/events{/privacy}", "followers_url": "https://api.github.com/users/arc-bu/followers", "following_url": "https://api.github.com/users/arc-bu/following{/other_user}", "gists_url": "https://api.github.com/users/arc-bu/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/arc-bu", "id": 75367920, "login": "arc-bu", "node_id": "MDQ6VXNlcjc1MzY3OTIw", "organizations_url": "https://api.github.com/users/arc-bu/orgs", "received_events_url": "https://api.github.com/users/arc-bu/received_events", "repos_url": "https://api.github.com/users/arc-bu/repos", "site_admin": false, "starred_url": "https://api.github.com/users/arc-bu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/arc-bu/subscriptions", "type": "User", "url": "https://api.github.com/users/arc-bu", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Looks like the google drive download failed.\r\nI'm getting a `Google Drive - Quota exceeded` error while looking at the downloaded file.\r\n\r\nWe should consider finding a better host than google drive for this dataset imo\r\nrelated : #873 #864 ", "It is working now, thank you. \r\n\r\nShould I leave this issue open to address the Quota-exceeded error?", "Yes please. It's been happening several times, we definitely need to address it", "Any updates on this one? I'm facing a similar issue trying to add CelebA.", "I've looked into it and couldn't find a solution. This looks like a Google Drive limitation..\r\nPlease try to use other hosts when possible", "The original links are google drive links. Would it be feasible for HF to maintain their own servers for this? Also, I think the same issue must also exist with TFDS.", "It's possible to host data on our side but we should ask the authors. TFDS has the same issue and doesn't have a solution either afaik.\r\nOtherwise you can use the google drive link, but it it's not that convenient because of this quota issue.", "Okay. I imagine asking every author who shares their dataset on Google Drive will also be cumbersome.", "I am getting this error as well. Is there a fix?", "Not as long as the data is stored on GG drive unfortunately.\r\nMaybe we can ask if there's a mirror ?\r\n\r\nHi @JafferWilson is there a download link to get cnn dailymail from another host than GG drive ?\r\n\r\nTo give you some context, this library provides tools to download and process datasets. For CNN DailyMail the data are downloaded from the link you provide on your github repository. Unfortunately because of GG drive quotas, many users are not able to load this dataset.", "The following copy of CNN/DM dataset, fixed the problem for me:\r\nhttps://huggingface.co/datasets/ccdv/cnn_dailymail", "Thanks for the link @mrazizi !\r\n\r\nApparently the original authors don't host the dataset themselves (\"for legal reasons\", source [here](https://github.com/abisee/cnn-dailymail/issues/9))." ]
2020-12-02T11:07:56
2022-02-17T14:13:39
2022-02-17T14:13:39
NONE
null
null
null
null
Downloading and preparing dataset cnn_dailymail/3.0.0 (download: 558.32 MiB, generated: 1.28 GiB, post-processed: Unknown size, total: 1.82 GiB) to /root/.cache/huggingface/datasets/cnn_dailymail/3.0.0/3.0.0/0128610a44e10f25b4af6689441c72af86205282d26399642f7db38fa7535602... --------------------------------------------------------------------------- NotADirectoryError Traceback (most recent call last) <ipython-input-9-cd4bf8bea840> in <module>() 22 23 ---> 24 train = load_dataset('cnn_dailymail', '3.0.0', split='train') 25 validation = load_dataset('cnn_dailymail', '3.0.0', split='validation') 26 test = load_dataset('cnn_dailymail', '3.0.0', split='test') 5 frames /root/.cache/huggingface/modules/datasets_modules/datasets/cnn_dailymail/0128610a44e10f25b4af6689441c72af86205282d26399642f7db38fa7535602/cnn_dailymail.py in _find_files(dl_paths, publisher, url_dict) 132 else: 133 logging.fatal("Unsupported publisher: %s", publisher) --> 134 files = sorted(os.listdir(top_dir)) 135 136 ret_files = [] NotADirectoryError: [Errno 20] Not a directory: '/root/.cache/huggingface/datasets/downloads/1bc05d24fa6dda2468e83a73cf6dc207226e01e3c48a507ea716dc0421da583b/cnn/stories'
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/996/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/996/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
442 days, 3:05:43
https://api.github.com/repos/huggingface/datasets/issues/993
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/993/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/993/comments
https://api.github.com/repos/huggingface/datasets/issues/993/events
https://github.com/huggingface/datasets/issues/993
755,135,768
MDU6SXNzdWU3NTUxMzU3Njg=
993
Problem downloading amazon_reviews_multi
{ "avatar_url": "https://avatars.githubusercontent.com/u/29229602?v=4", "events_url": "https://api.github.com/users/hfawaz/events{/privacy}", "followers_url": "https://api.github.com/users/hfawaz/followers", "following_url": "https://api.github.com/users/hfawaz/following{/other_user}", "gists_url": "https://api.github.com/users/hfawaz/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/hfawaz", "id": 29229602, "login": "hfawaz", "node_id": "MDQ6VXNlcjI5MjI5NjAy", "organizations_url": "https://api.github.com/users/hfawaz/orgs", "received_events_url": "https://api.github.com/users/hfawaz/received_events", "repos_url": "https://api.github.com/users/hfawaz/repos", "site_admin": false, "starred_url": "https://api.github.com/users/hfawaz/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/hfawaz/subscriptions", "type": "User", "url": "https://api.github.com/users/hfawaz", "user_view_type": "public" }
[]
closed
false
null
[]
[ "Hi @hfawaz ! This is working fine for me. Is it a repeated occurence? Have you tried from the latest verion?", "Hi, it seems a connection problem. \r\nNow it says: \r\n`ConnectionError: Couldn't reach https://amazon-reviews-ml.s3-us-west-2.amazonaws.com/json/train/dataset_ja_train.json`" ]
2020-12-02T10:15:57
2022-10-05T12:21:34
2022-10-05T12:21:34
CONTRIBUTOR
null
null
null
null
Thanks for adding the dataset. After trying to load the dataset, I am getting the following error: `ConnectionError: Couldn't reach https://amazon-reviews-ml.s3-us-west-2.amazonaws.com/json/train/dataset_fr_train.json ` I used the following code to load the dataset: `load_dataset( dataset_name, "all_languages", cache_dir=".data" )` I am using version 1.1.3 of `datasets` Note that I can perform a successfull `wget https://amazon-reviews-ml.s3-us-west-2.amazonaws.com/json/train/dataset_fr_train.json`
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 2, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 2, "url": "https://api.github.com/repos/huggingface/datasets/issues/993/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/993/timeline
null
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
672 days, 2:05:37