html_url
stringlengths
46
51
number
int64
1
7.85k
title
stringlengths
1
290
user
dict
labels
listlengths
0
4
state
stringclasses
2 values
locked
bool
1 class
comments
listlengths
0
30
created_at
timestamp[ns, tz=UTC]date
2020-04-14 10:18:02
2025-11-05 18:11:12
updated_at
timestamp[ns, tz=UTC]date
2020-04-27 16:04:17
2025-11-06 09:44:34
closed_at
timestamp[ns, tz=UTC]date
2020-04-14 12:01:40
2025-11-05 16:02:32
βŒ€
author_association
stringclasses
4 values
draft
bool
2 classes
pull_request
dict
body
stringlengths
0
228k
βŒ€
closed_by
dict
reactions
dict
state_reason
stringclasses
4 values
sub_issues_summary
dict
issue_dependencies_summary
dict
is_pull_request
bool
2 classes
https://github.com/huggingface/datasets/pull/6813
6,813
Add Dataset.take and Dataset.skip
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6813). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005153 / 0.011353 (-0.006200) | 0.003560 / 0.011008 (-0.007448) | 0.063142 / 0.038508 (0.024634) | 0.030799 / 0.023109 (0.007690) | 0.241754 / 0.275898 (-0.034144) | 0.264874 / 0.323480 (-0.058606) | 0.003099 / 0.007986 (-0.004887) | 0.002629 / 0.004328 (-0.001700) | 0.049006 / 0.004250 (0.044756) | 0.044831 / 0.037052 (0.007779) | 0.258961 / 0.258489 (0.000472) | 0.286939 / 0.293841 (-0.006902) | 0.026756 / 0.128546 (-0.101791) | 0.010443 / 0.075646 (-0.065204) | 0.207264 / 0.419271 (-0.212007) | 0.035242 / 0.043533 (-0.008291) | 0.250440 / 0.255139 (-0.004699) | 0.265405 / 0.283200 (-0.017794) | 0.018924 / 0.141683 (-0.122759) | 1.138607 / 1.452155 (-0.313547) | 1.203017 / 1.492716 (-0.289700) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.091293 / 0.018006 (0.073286) | 0.303937 / 0.000490 (0.303447) | 0.000266 / 0.000200 (0.000066) | 0.000056 / 0.000054 (0.000002) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018667 / 0.037411 (-0.018744) | 0.061310 / 0.014526 (0.046784) | 0.073565 / 0.176557 (-0.102991) | 0.119044 / 0.737135 (-0.618091) | 0.074484 / 0.296338 (-0.221854) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.286324 / 0.215209 (0.071114) | 2.836637 / 2.077655 (0.758982) | 1.458531 / 1.504120 (-0.045589) | 1.333081 / 1.541195 (-0.208114) | 1.328398 / 1.468490 (-0.140092) | 0.571467 / 4.584777 (-4.013310) | 2.409869 / 3.745712 (-1.335843) | 2.760241 / 5.269862 (-2.509621) | 1.728153 / 4.565676 (-2.837523) | 0.063008 / 0.424275 (-0.361267) | 0.005375 / 0.007607 (-0.002232) | 0.338574 / 0.226044 (0.112530) | 3.355485 / 2.268929 (1.086556) | 1.812741 / 55.444624 (-53.631884) | 1.507435 / 6.876477 (-5.369041) | 1.516957 / 2.142072 (-0.625116) | 0.643790 / 4.805227 (-4.161437) | 0.117465 / 6.500664 (-6.383199) | 0.041960 / 0.075469 (-0.033509) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.993787 / 1.841788 (-0.848001) | 11.439076 / 8.074308 (3.364768) | 9.636815 / 10.191392 (-0.554577) | 0.131292 / 0.680424 (-0.549132) | 0.014916 / 0.534201 (-0.519285) | 0.287309 / 0.579283 (-0.291974) | 0.261971 / 0.434364 (-0.172392) | 0.324453 / 0.540337 (-0.215885) | 0.420306 / 1.386936 (-0.966630) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005138 / 0.011353 (-0.006215) | 0.003719 / 0.011008 (-0.007289) | 0.050411 / 0.038508 (0.011903) | 0.031334 / 0.023109 (0.008225) | 0.281752 / 0.275898 (0.005854) | 0.299445 / 0.323480 (-0.024035) | 0.004194 / 0.007986 (-0.003792) | 0.002737 / 0.004328 (-0.001591) | 0.048527 / 0.004250 (0.044277) | 0.040294 / 0.037052 (0.003242) | 0.291763 / 0.258489 (0.033274) | 0.317597 / 0.293841 (0.023757) | 0.029014 / 0.128546 (-0.099532) | 0.010372 / 0.075646 (-0.065274) | 0.058704 / 0.419271 (-0.360568) | 0.033259 / 0.043533 (-0.010273) | 0.278109 / 0.255139 (0.022970) | 0.299593 / 0.283200 (0.016393) | 0.018048 / 0.141683 (-0.123635) | 1.185558 / 1.452155 (-0.266597) | 1.203481 / 1.492716 (-0.289236) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.091149 / 0.018006 (0.073143) | 0.306152 / 0.000490 (0.305662) | 0.000246 / 0.000200 (0.000046) | 0.000052 / 0.000054 (-0.000003) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022082 / 0.037411 (-0.015330) | 0.074487 / 0.014526 (0.059961) | 0.086112 / 0.176557 (-0.090444) | 0.124303 / 0.737135 (-0.612832) | 0.088831 / 0.296338 (-0.207508) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.291745 / 0.215209 (0.076536) | 2.878397 / 2.077655 (0.800742) | 1.606920 / 1.504120 (0.102801) | 1.492352 / 1.541195 (-0.048843) | 1.509725 / 1.468490 (0.041235) | 0.567087 / 4.584777 (-4.017690) | 2.436423 / 3.745712 (-1.309290) | 2.793930 / 5.269862 (-2.475932) | 1.748329 / 4.565676 (-2.817347) | 0.063424 / 0.424275 (-0.360851) | 0.005476 / 0.007607 (-0.002131) | 0.346211 / 0.226044 (0.120167) | 3.461288 / 2.268929 (1.192360) | 1.979362 / 55.444624 (-53.465262) | 1.702877 / 6.876477 (-5.173600) | 1.699087 / 2.142072 (-0.442985) | 0.645116 / 4.805227 (-4.160112) | 0.116186 / 6.500664 (-6.384478) | 0.041246 / 0.075469 (-0.034223) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.017540 / 1.841788 (-0.824248) | 12.016640 / 8.074308 (3.942332) | 10.234085 / 10.191392 (0.042693) | 0.147558 / 0.680424 (-0.532866) | 0.015096 / 0.534201 (-0.519105) | 0.288077 / 0.579283 (-0.291206) | 0.274629 / 0.434364 (-0.159735) | 0.334097 / 0.540337 (-0.206241) | 0.425476 / 1.386936 (-0.961460) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#55eb1d9a34a91dbf2418166f9f1d92f7181e778b \"CML watermark\")\n" ]
2024-04-16T09:53:42Z
2024-04-16T14:12:14Z
2024-04-16T14:06:07Z
MEMBER
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6813.diff", "html_url": "https://github.com/huggingface/datasets/pull/6813", "merged_at": "2024-04-16T14:06:07Z", "patch_url": "https://github.com/huggingface/datasets/pull/6813.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6813" }
...to be aligned with IterableDataset.take and IterableDataset.skip
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6813/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/pull/6812
6,812
Run CI
{ "avatar_url": "https://avatars.githubusercontent.com/u/1309177?v=4", "events_url": "https://api.github.com/users/charliermarsh/events{/privacy}", "followers_url": "https://api.github.com/users/charliermarsh/followers", "following_url": "https://api.github.com/users/charliermarsh/following{/other_user}", "gists_url": "https://api.github.com/users/charliermarsh/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/charliermarsh", "id": 1309177, "login": "charliermarsh", "node_id": "MDQ6VXNlcjEzMDkxNzc=", "organizations_url": "https://api.github.com/users/charliermarsh/orgs", "received_events_url": "https://api.github.com/users/charliermarsh/received_events", "repos_url": "https://api.github.com/users/charliermarsh/repos", "site_admin": false, "starred_url": "https://api.github.com/users/charliermarsh/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/charliermarsh/subscriptions", "type": "User", "url": "https://api.github.com/users/charliermarsh", "user_view_type": "public" }
[]
closed
false
[ "(Sorry, meant to open this against my own fork. I'm attempting to debug this issue (https://github.com/astral-sh/uv/issues/1921#issuecomment-2058056192) reported by `huggingface/datasets` on the uv repo.)" ]
2024-04-16T01:12:36Z
2024-04-16T01:14:16Z
2024-04-16T01:12:41Z
NONE
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6812.diff", "html_url": "https://github.com/huggingface/datasets/pull/6812", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/6812.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6812" }
null
{ "avatar_url": "https://avatars.githubusercontent.com/u/1309177?v=4", "events_url": "https://api.github.com/users/charliermarsh/events{/privacy}", "followers_url": "https://api.github.com/users/charliermarsh/followers", "following_url": "https://api.github.com/users/charliermarsh/following{/other_user}", "gists_url": "https://api.github.com/users/charliermarsh/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/charliermarsh", "id": 1309177, "login": "charliermarsh", "node_id": "MDQ6VXNlcjEzMDkxNzc=", "organizations_url": "https://api.github.com/users/charliermarsh/orgs", "received_events_url": "https://api.github.com/users/charliermarsh/received_events", "repos_url": "https://api.github.com/users/charliermarsh/repos", "site_admin": false, "starred_url": "https://api.github.com/users/charliermarsh/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/charliermarsh/subscriptions", "type": "User", "url": "https://api.github.com/users/charliermarsh", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6812/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/pull/6811
6,811
add allow_primitive_to_str and allow_decimal_to_str instead of allow_number_to_str
{ "avatar_url": "https://avatars.githubusercontent.com/u/37351874?v=4", "events_url": "https://api.github.com/users/Modexus/events{/privacy}", "followers_url": "https://api.github.com/users/Modexus/followers", "following_url": "https://api.github.com/users/Modexus/following{/other_user}", "gists_url": "https://api.github.com/users/Modexus/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Modexus", "id": 37351874, "login": "Modexus", "node_id": "MDQ6VXNlcjM3MzUxODc0", "organizations_url": "https://api.github.com/users/Modexus/orgs", "received_events_url": "https://api.github.com/users/Modexus/received_events", "repos_url": "https://api.github.com/users/Modexus/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Modexus/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Modexus/subscriptions", "type": "User", "url": "https://api.github.com/users/Modexus", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6811). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "@mariosasko pytest seems to be missing on windows?", "CI is not behaving well today πŸ™‚ ", "I couldn't find an instance of the `allow_number_to_str` parameter (or `array_cast`/`cast_array_to_feature` more generally) being used in the wild. So, I think simply removing `allow_number_to_str` instead of deprecating it should be fine, considering `array_cast`/`cast_array_to_feature` are somewhat hidden. Do you agree @lhoestq? ", "Yup we can remove without any deprecation cycle", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005253 / 0.011353 (-0.006100) | 0.003767 / 0.011008 (-0.007241) | 0.064599 / 0.038508 (0.026091) | 0.030758 / 0.023109 (0.007649) | 0.237437 / 0.275898 (-0.038461) | 0.277580 / 0.323480 (-0.045900) | 0.004220 / 0.007986 (-0.003766) | 0.002738 / 0.004328 (-0.001591) | 0.049393 / 0.004250 (0.045143) | 0.045283 / 0.037052 (0.008231) | 0.249907 / 0.258489 (-0.008582) | 0.283301 / 0.293841 (-0.010540) | 0.027722 / 0.128546 (-0.100825) | 0.010842 / 0.075646 (-0.064804) | 0.219197 / 0.419271 (-0.200074) | 0.036449 / 0.043533 (-0.007084) | 0.237774 / 0.255139 (-0.017365) | 0.257981 / 0.283200 (-0.025218) | 0.018098 / 0.141683 (-0.123585) | 1.161778 / 1.452155 (-0.290376) | 1.212707 / 1.492716 (-0.280010) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.096462 / 0.018006 (0.078456) | 0.305322 / 0.000490 (0.304832) | 0.000218 / 0.000200 (0.000018) | 0.000048 / 0.000054 (-0.000006) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018438 / 0.037411 (-0.018973) | 0.061633 / 0.014526 (0.047107) | 0.073678 / 0.176557 (-0.102879) | 0.122033 / 0.737135 (-0.615103) | 0.074846 / 0.296338 (-0.221493) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.279564 / 0.215209 (0.064355) | 2.756984 / 2.077655 (0.679330) | 1.486525 / 1.504120 (-0.017595) | 1.366474 / 1.541195 (-0.174721) | 1.370192 / 1.468490 (-0.098298) | 0.576940 / 4.584777 (-4.007837) | 2.414088 / 3.745712 (-1.331624) | 2.788423 / 5.269862 (-2.481439) | 1.738695 / 4.565676 (-2.826982) | 0.064456 / 0.424275 (-0.359819) | 0.005536 / 0.007607 (-0.002071) | 0.337266 / 0.226044 (0.111222) | 3.327140 / 2.268929 (1.058212) | 1.837553 / 55.444624 (-53.607072) | 1.538955 / 6.876477 (-5.337521) | 1.575624 / 2.142072 (-0.566448) | 0.639960 / 4.805227 (-4.165267) | 0.117607 / 6.500664 (-6.383057) | 0.042077 / 0.075469 (-0.033393) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.960488 / 1.841788 (-0.881300) | 11.565280 / 8.074308 (3.490972) | 9.702633 / 10.191392 (-0.488759) | 0.139106 / 0.680424 (-0.541318) | 0.013601 / 0.534201 (-0.520600) | 0.291499 / 0.579283 (-0.287784) | 0.277433 / 0.434364 (-0.156930) | 0.325700 / 0.540337 (-0.214637) | 0.421036 / 1.386936 (-0.965900) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005405 / 0.011353 (-0.005948) | 0.003816 / 0.011008 (-0.007192) | 0.050422 / 0.038508 (0.011914) | 0.030473 / 0.023109 (0.007364) | 0.275975 / 0.275898 (0.000077) | 0.298002 / 0.323480 (-0.025478) | 0.004280 / 0.007986 (-0.003706) | 0.002746 / 0.004328 (-0.001583) | 0.049649 / 0.004250 (0.045398) | 0.040675 / 0.037052 (0.003623) | 0.287496 / 0.258489 (0.029007) | 0.315140 / 0.293841 (0.021299) | 0.029835 / 0.128546 (-0.098711) | 0.010443 / 0.075646 (-0.065204) | 0.058299 / 0.419271 (-0.360972) | 0.032944 / 0.043533 (-0.010588) | 0.279468 / 0.255139 (0.024329) | 0.296336 / 0.283200 (0.013136) | 0.018572 / 0.141683 (-0.123111) | 1.177622 / 1.452155 (-0.274532) | 1.238240 / 1.492716 (-0.254477) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.091867 / 0.018006 (0.073861) | 0.299982 / 0.000490 (0.299492) | 0.000217 / 0.000200 (0.000017) | 0.000043 / 0.000054 (-0.000011) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022649 / 0.037411 (-0.014762) | 0.074948 / 0.014526 (0.060422) | 0.087949 / 0.176557 (-0.088607) | 0.125875 / 0.737135 (-0.611261) | 0.089295 / 0.296338 (-0.207044) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.290387 / 0.215209 (0.075178) | 2.820969 / 2.077655 (0.743315) | 1.614607 / 1.504120 (0.110487) | 1.496959 / 1.541195 (-0.044236) | 1.526475 / 1.468490 (0.057985) | 0.570087 / 4.584777 (-4.014690) | 2.423106 / 3.745712 (-1.322606) | 2.825321 / 5.269862 (-2.444540) | 1.765580 / 4.565676 (-2.800097) | 0.063289 / 0.424275 (-0.360986) | 0.005456 / 0.007607 (-0.002151) | 0.344100 / 0.226044 (0.118055) | 3.395733 / 2.268929 (1.126804) | 1.951794 / 55.444624 (-53.492830) | 1.677689 / 6.876477 (-5.198787) | 1.684448 / 2.142072 (-0.457624) | 0.644343 / 4.805227 (-4.160885) | 0.115796 / 6.500664 (-6.384868) | 0.041052 / 0.075469 (-0.034417) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.031487 / 1.841788 (-0.810301) | 12.116156 / 8.074308 (4.041848) | 10.472247 / 10.191392 (0.280855) | 0.142934 / 0.680424 (-0.537490) | 0.015470 / 0.534201 (-0.518731) | 0.290402 / 0.579283 (-0.288882) | 0.272594 / 0.434364 (-0.161770) | 0.328311 / 0.540337 (-0.212027) | 0.424694 / 1.386936 (-0.962242) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#8983a3b4dec315bf25331a6065cb74de9017f0e8 \"CML watermark\")\n" ]
2024-04-15T13:14:38Z
2024-07-03T14:59:42Z
2024-04-16T17:03:17Z
CONTRIBUTOR
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6811.diff", "html_url": "https://github.com/huggingface/datasets/pull/6811", "merged_at": "2024-04-16T17:03:17Z", "patch_url": "https://github.com/huggingface/datasets/pull/6811.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6811" }
Fix #6805
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6811/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/issues/6810
6,810
Allow deleting a subset/config from a no-script dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
closed
false
[ "Probably best to implement this as a CLI command?", "Thanks for your comment, @mariosasko. Or maybe both (in Python and as CLI command)? The Python command would be just the reverse of `push_to_hub`...\r\n\r\nI am working on a draft implementation, so we can discuss about the API and UX.", "2nd time I've come here in the past few days so for reference this doesn't work for me at the moment (token auth error despite successful `whoami` and all auths applied) so I resorted to [reverting](https://discuss.huggingface.co/t/how-to-revert-to-an-earlier-commit-on-a-repo/65735/2) to a previous commit (after installing git lfs)\r\n\r\n```\r\n# git lfs install\r\nGIT_LFS_SKIP_SMUDGE=1 git clone https://huggingface.co/datasets/USER/REPO\r\ncd REPO\r\ngit reset --hard HASH\r\ngit push --force\r\n```" ]
2024-04-15T07:53:26Z
2025-01-11T18:40:40Z
2024-04-30T09:44:25Z
MEMBER
null
null
As proposed by @BramVanroy, it would be neat to have this functionality through the API.
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6810/reactions" }
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/pull/6809
6,809
Make convert_to_parquet CLI command create script branch
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6809). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "@huggingface/datasets once this PR is merged, I would suggest making a release. Do you agree?\r\n- This PR is a follow-up of #6795", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.004963 / 0.011353 (-0.006390) | 0.003121 / 0.011008 (-0.007888) | 0.063421 / 0.038508 (0.024913) | 0.030727 / 0.023109 (0.007618) | 0.237698 / 0.275898 (-0.038200) | 0.266613 / 0.323480 (-0.056867) | 0.004237 / 0.007986 (-0.003749) | 0.002715 / 0.004328 (-0.001614) | 0.049503 / 0.004250 (0.045253) | 0.043705 / 0.037052 (0.006653) | 0.247818 / 0.258489 (-0.010671) | 0.287545 / 0.293841 (-0.006296) | 0.027232 / 0.128546 (-0.101314) | 0.009952 / 0.075646 (-0.065695) | 0.208678 / 0.419271 (-0.210593) | 0.035494 / 0.043533 (-0.008039) | 0.260900 / 0.255139 (0.005761) | 0.264738 / 0.283200 (-0.018461) | 0.018093 / 0.141683 (-0.123590) | 1.130924 / 1.452155 (-0.321231) | 1.178982 / 1.492716 (-0.313734) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.094610 / 0.018006 (0.076604) | 0.304674 / 0.000490 (0.304184) | 0.000215 / 0.000200 (0.000015) | 0.000048 / 0.000054 (-0.000007) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018168 / 0.037411 (-0.019243) | 0.062040 / 0.014526 (0.047514) | 0.075634 / 0.176557 (-0.100922) | 0.119488 / 0.737135 (-0.617647) | 0.074790 / 0.296338 (-0.221548) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.282449 / 0.215209 (0.067240) | 2.773231 / 2.077655 (0.695576) | 1.455156 / 1.504120 (-0.048964) | 1.332652 / 1.541195 (-0.208543) | 1.340795 / 1.468490 (-0.127695) | 0.576588 / 4.584777 (-4.008189) | 2.415513 / 3.745712 (-1.330199) | 2.801569 / 5.269862 (-2.468292) | 1.741039 / 4.565676 (-2.824637) | 0.064386 / 0.424275 (-0.359890) | 0.005293 / 0.007607 (-0.002314) | 0.329732 / 0.226044 (0.103688) | 3.227275 / 2.268929 (0.958347) | 1.793121 / 55.444624 (-53.651503) | 1.515115 / 6.876477 (-5.361362) | 1.518738 / 2.142072 (-0.623335) | 0.664465 / 4.805227 (-4.140762) | 0.118813 / 6.500664 (-6.381851) | 0.041715 / 0.075469 (-0.033754) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.974371 / 1.841788 (-0.867416) | 11.432869 / 8.074308 (3.358561) | 9.607939 / 10.191392 (-0.583453) | 0.143996 / 0.680424 (-0.536427) | 0.014624 / 0.534201 (-0.519577) | 0.286899 / 0.579283 (-0.292384) | 0.265965 / 0.434364 (-0.168399) | 0.324727 / 0.540337 (-0.215611) | 0.420917 / 1.386936 (-0.966019) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005145 / 0.011353 (-0.006207) | 0.003723 / 0.011008 (-0.007286) | 0.050387 / 0.038508 (0.011879) | 0.030734 / 0.023109 (0.007625) | 0.274331 / 0.275898 (-0.001567) | 0.295045 / 0.323480 (-0.028435) | 0.004187 / 0.007986 (-0.003799) | 0.002781 / 0.004328 (-0.001547) | 0.049698 / 0.004250 (0.045448) | 0.040049 / 0.037052 (0.002996) | 0.284016 / 0.258489 (0.025527) | 0.309908 / 0.293841 (0.016067) | 0.028994 / 0.128546 (-0.099552) | 0.010625 / 0.075646 (-0.065021) | 0.059305 / 0.419271 (-0.359967) | 0.032982 / 0.043533 (-0.010551) | 0.273342 / 0.255139 (0.018203) | 0.291726 / 0.283200 (0.008527) | 0.018084 / 0.141683 (-0.123599) | 1.136864 / 1.452155 (-0.315290) | 1.163656 / 1.492716 (-0.329061) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.094868 / 0.018006 (0.076862) | 0.302900 / 0.000490 (0.302410) | 0.000226 / 0.000200 (0.000026) | 0.000053 / 0.000054 (-0.000002) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022142 / 0.037411 (-0.015269) | 0.077457 / 0.014526 (0.062932) | 0.087989 / 0.176557 (-0.088568) | 0.127354 / 0.737135 (-0.609781) | 0.092027 / 0.296338 (-0.204312) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.291196 / 0.215209 (0.075987) | 2.840386 / 2.077655 (0.762731) | 1.571201 / 1.504120 (0.067081) | 1.449429 / 1.541195 (-0.091765) | 1.467189 / 1.468490 (-0.001301) | 0.580991 / 4.584777 (-4.003786) | 2.422566 / 3.745712 (-1.323146) | 2.839621 / 5.269862 (-2.430240) | 1.782987 / 4.565676 (-2.782689) | 0.064765 / 0.424275 (-0.359510) | 0.005338 / 0.007607 (-0.002269) | 0.349148 / 0.226044 (0.123104) | 3.421283 / 2.268929 (1.152355) | 1.943503 / 55.444624 (-53.501122) | 1.653881 / 6.876477 (-5.222596) | 1.698141 / 2.142072 (-0.443931) | 0.667628 / 4.805227 (-4.137599) | 0.118469 / 6.500664 (-6.382195) | 0.041693 / 0.075469 (-0.033776) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.026385 / 1.841788 (-0.815403) | 12.225049 / 8.074308 (4.150741) | 10.363072 / 10.191392 (0.171680) | 0.142682 / 0.680424 (-0.537742) | 0.015698 / 0.534201 (-0.518502) | 0.288148 / 0.579283 (-0.291135) | 0.272639 / 0.434364 (-0.161724) | 0.325305 / 0.540337 (-0.215032) | 0.421395 / 1.386936 (-0.965541) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#2a14271263da2fda9f966af41c7bd885bfa42256 \"CML watermark\")\n" ]
2024-04-15T07:47:26Z
2024-04-17T08:44:26Z
2024-04-17T08:38:18Z
MEMBER
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6809.diff", "html_url": "https://github.com/huggingface/datasets/pull/6809", "merged_at": "2024-04-17T08:38:18Z", "patch_url": "https://github.com/huggingface/datasets/pull/6809.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6809" }
Make convert_to_parquet CLI command create a "script" branch and keep the script file on it. This PR proposes the simplest UX approach: whenever `--revision` is not explicitly passed (i.e., when the script is in the main branch), try to create a "script" branch from the "main" branch; if the "script" branch exists already, then do nothing. Follow-up of: - #6795 Close #6808. CC: @severo
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6809/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/issues/6808
6,808
Make convert_to_parquet CLI command create script branch
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
closed
false
[]
2024-04-15T06:46:07Z
2024-04-17T08:38:19Z
2024-04-17T08:38:19Z
MEMBER
null
null
As proposed by @severo, maybe we should add this functionality as well to the CLI command to convert a script-dataset to Parquet. See: https://github.com/huggingface/datasets/pull/6795#discussion_r1562819168 > When providing support, we sometimes suggest that users store their script in a script branch. What do you think of this alternative to deleting the files?
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6808/reactions" }
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/pull/6806
6,806
Fix hf-internal-testing/dataset_with_script commit SHA in CI test
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6806). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005068 / 0.011353 (-0.006285) | 0.003613 / 0.011008 (-0.007395) | 0.063226 / 0.038508 (0.024718) | 0.030653 / 0.023109 (0.007544) | 0.243981 / 0.275898 (-0.031918) | 0.268596 / 0.323480 (-0.054884) | 0.003109 / 0.007986 (-0.004876) | 0.003292 / 0.004328 (-0.001036) | 0.048857 / 0.004250 (0.044606) | 0.043929 / 0.037052 (0.006876) | 0.264002 / 0.258489 (0.005513) | 0.289028 / 0.293841 (-0.004813) | 0.028053 / 0.128546 (-0.100493) | 0.010837 / 0.075646 (-0.064809) | 0.208084 / 0.419271 (-0.211188) | 0.035592 / 0.043533 (-0.007941) | 0.252639 / 0.255139 (-0.002500) | 0.267599 / 0.283200 (-0.015600) | 0.018097 / 0.141683 (-0.123585) | 1.150811 / 1.452155 (-0.301344) | 1.219449 / 1.492716 (-0.273267) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.095427 / 0.018006 (0.077421) | 0.307270 / 0.000490 (0.306781) | 0.000218 / 0.000200 (0.000018) | 0.000043 / 0.000054 (-0.000011) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018713 / 0.037411 (-0.018698) | 0.065238 / 0.014526 (0.050712) | 0.074650 / 0.176557 (-0.101906) | 0.120130 / 0.737135 (-0.617005) | 0.078457 / 0.296338 (-0.217882) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.283666 / 0.215209 (0.068457) | 2.852818 / 2.077655 (0.775163) | 1.459790 / 1.504120 (-0.044330) | 1.326732 / 1.541195 (-0.214463) | 1.373530 / 1.468490 (-0.094960) | 0.579136 / 4.584777 (-4.005641) | 2.388369 / 3.745712 (-1.357343) | 2.813786 / 5.269862 (-2.456075) | 1.730079 / 4.565676 (-2.835597) | 0.063445 / 0.424275 (-0.360831) | 0.005355 / 0.007607 (-0.002252) | 0.340169 / 0.226044 (0.114124) | 3.391220 / 2.268929 (1.122291) | 1.838003 / 55.444624 (-53.606621) | 1.523518 / 6.876477 (-5.352959) | 1.574007 / 2.142072 (-0.568065) | 0.650265 / 4.805227 (-4.154962) | 0.117114 / 6.500664 (-6.383550) | 0.042430 / 0.075469 (-0.033039) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.955596 / 1.841788 (-0.886191) | 11.546544 / 8.074308 (3.472236) | 9.593613 / 10.191392 (-0.597779) | 0.141502 / 0.680424 (-0.538922) | 0.014251 / 0.534201 (-0.519950) | 0.293825 / 0.579283 (-0.285458) | 0.263088 / 0.434364 (-0.171276) | 0.325035 / 0.540337 (-0.215302) | 0.419372 / 1.386936 (-0.967564) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005567 / 0.011353 (-0.005785) | 0.003670 / 0.011008 (-0.007338) | 0.050338 / 0.038508 (0.011830) | 0.031730 / 0.023109 (0.008621) | 0.278307 / 0.275898 (0.002409) | 0.303170 / 0.323480 (-0.020310) | 0.004276 / 0.007986 (-0.003709) | 0.002720 / 0.004328 (-0.001609) | 0.048675 / 0.004250 (0.044425) | 0.041026 / 0.037052 (0.003974) | 0.291353 / 0.258489 (0.032864) | 0.318487 / 0.293841 (0.024646) | 0.029676 / 0.128546 (-0.098870) | 0.010428 / 0.075646 (-0.065218) | 0.057443 / 0.419271 (-0.361828) | 0.032735 / 0.043533 (-0.010798) | 0.282900 / 0.255139 (0.027761) | 0.297539 / 0.283200 (0.014339) | 0.018237 / 0.141683 (-0.123446) | 1.188047 / 1.452155 (-0.264107) | 1.223283 / 1.492716 (-0.269433) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.090629 / 0.018006 (0.072623) | 0.300898 / 0.000490 (0.300408) | 0.000212 / 0.000200 (0.000012) | 0.000133 / 0.000054 (0.000078) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022200 / 0.037411 (-0.015211) | 0.075310 / 0.014526 (0.060784) | 0.086790 / 0.176557 (-0.089766) | 0.127392 / 0.737135 (-0.609744) | 0.088435 / 0.296338 (-0.207903) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.301308 / 0.215209 (0.086099) | 2.963126 / 2.077655 (0.885471) | 1.639604 / 1.504120 (0.135484) | 1.508776 / 1.541195 (-0.032419) | 1.553280 / 1.468490 (0.084789) | 0.567256 / 4.584777 (-4.017520) | 2.445231 / 3.745712 (-1.300482) | 2.884071 / 5.269862 (-2.385791) | 1.777321 / 4.565676 (-2.788355) | 0.063659 / 0.424275 (-0.360616) | 0.005435 / 0.007607 (-0.002172) | 0.361786 / 0.226044 (0.135742) | 3.624264 / 2.268929 (1.355335) | 2.022661 / 55.444624 (-53.421963) | 1.740581 / 6.876477 (-5.135896) | 1.748503 / 2.142072 (-0.393570) | 0.660783 / 4.805227 (-4.144444) | 0.118045 / 6.500664 (-6.382619) | 0.040940 / 0.075469 (-0.034529) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.015614 / 1.841788 (-0.826174) | 12.094985 / 8.074308 (4.020677) | 10.435581 / 10.191392 (0.244189) | 0.140239 / 0.680424 (-0.540185) | 0.014992 / 0.534201 (-0.519209) | 0.290549 / 0.579283 (-0.288735) | 0.274718 / 0.434364 (-0.159645) | 0.334783 / 0.540337 (-0.205554) | 0.426540 / 1.386936 (-0.960396) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#828aff908450ac7af3a1820bb2eb7b438f2692f5 \"CML watermark\")\n" ]
2024-04-12T08:47:50Z
2024-04-12T09:08:23Z
2024-04-12T09:02:12Z
MEMBER
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6806.diff", "html_url": "https://github.com/huggingface/datasets/pull/6806", "merged_at": "2024-04-12T09:02:12Z", "patch_url": "https://github.com/huggingface/datasets/pull/6806.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6806" }
Fix test using latest commit SHA in hf-internal-testing/dataset_with_script dataset: https://huggingface.co/datasets/hf-internal-testing/dataset_with_script/commits/refs%2Fconvert%2Fparquet Fix #6796.
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6806/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/issues/6805
6,805
Batched mapping of existing string column casts boolean to string
{ "avatar_url": "https://avatars.githubusercontent.com/u/46891489?v=4", "events_url": "https://api.github.com/users/starmpcc/events{/privacy}", "followers_url": "https://api.github.com/users/starmpcc/followers", "following_url": "https://api.github.com/users/starmpcc/following{/other_user}", "gists_url": "https://api.github.com/users/starmpcc/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/starmpcc", "id": 46891489, "login": "starmpcc", "node_id": "MDQ6VXNlcjQ2ODkxNDg5", "organizations_url": "https://api.github.com/users/starmpcc/orgs", "received_events_url": "https://api.github.com/users/starmpcc/received_events", "repos_url": "https://api.github.com/users/starmpcc/repos", "site_admin": false, "starred_url": "https://api.github.com/users/starmpcc/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/starmpcc/subscriptions", "type": "User", "url": "https://api.github.com/users/starmpcc", "user_view_type": "public" }
[]
closed
false
[ "This seems to be hardcoded behavior in table.py `array_cast`.\r\n```python\r\nif (\r\n not allow_number_to_str\r\n and pa.types.is_string(pa_type)\r\n and (pa.types.is_floating(array.type) or pa.types.is_integer(array.type))\r\n ):\r\n raise TypeError(\r\n f\"Couldn't cast array of type {array.type} to {pa_type} since allow_number_to_str is set to {allow_number_to_str}\"\r\n )\r\n if pa.types.is_null(pa_type) and not pa.types.is_null(array.type):\r\n raise TypeError(f\"Couldn't cast array of type {array.type} to {pa_type}\")\r\n return array.cast(pa_type)\r\n```\r\nwhere floats and integers are not cast to string but booleans are.\r\nMaybe this should be extended to booleans?", "Thanks for reporting! @Modexus Do you want to open a PR with the suggested fix?", "I'll gladly create a PR but not sure what the behavior should be.\r\n\r\nShould a value returned from map be cast to the current feature?\r\nAt the moment this seems very inconsistent since `datetime `is also cast (this would only fix `boolean`) but nested structures are not.\r\n\r\n```python\r\ndset = Dataset.from_dict({\"a\": [\"Hello world!\"]})\r\ndset = dset.map(lambda x: {\"a\": date(2021, 1, 1)})\r\n# dset[0][\"a\"] == '2021-01-01'\r\n```\r\n```python\r\ndset = Dataset.from_dict({\"a\": [\"Hello world!\"]})\r\ndset = dset.map(lambda x: {\"a\": [True]})\r\n# dset[0][\"a\"] == [True]\r\n```\r\n\r\nIs there are reason to cast the value if the user doesn't specify it explicitly?\r\nSeems tricky that some things are cast and some are not.", "Indeed, it also makes sense to raise a `TypeError` for temporal and decimal types.\r\n\r\n> Is there are reason to cast the value if the user doesn't specify it explicitly?\r\n\r\nThis is how PyArrow's built-in `cast` behaves - it allows casting from primitive types to strings. Hence, we need `allow_number_to_str` to disallow such casts (e.g., in the [scenario](https://github.com/huggingface/datasets/blob/a3bc89d8bfd47c2a175c3ce16d92b7307cdeafd6/src/datasets/arrow_writer.py#L208) when we are \"trying a type\" to preserve the original type if there is a column in the output dataset with the same name as in the input one).\r\n\r\nPS: In the PR, we can introduce `allow_numeric_to_str` (for floats, integers, decimals, booleans) and `allow_temporal_to_str` (for dates, timestamps, ...) and deprecate `allow_number_to_str` to make it clear what each parameter does.", "Would just `allow_primitive_to_str` work?\r\nThis should include all `numeric`, `boolean `and `temporal`formats.\r\n\r\nNote that at least in the [ C++ implementation](https://arrow.apache.org/docs/cpp/api/utilities.html#_CPPv410is_numericRK8DataType) `numeric `seems to exclude `boolean`.\r\n[](https://arrow.apache.org/docs/cpp/api/utilities.html#_CPPv410is_numericRK8DataType)", "Indeed, `allow_primitive_to_str` sounds better.\r\n\r\nPS: PyArrow's `pa.types.is_primitive` returns `False` for decimal types, but I think is okay for us to treat decimals as primitive types (or we can have `allow_decimal_to_str` to be fully consistent with PyArrow)", "Fixed by:\r\n- #6811" ]
2024-04-12T04:21:41Z
2024-07-03T15:00:07Z
2024-07-03T15:00:07Z
NONE
null
null
### Describe the bug Let the dataset contain a column named 'a', which is of the string type. If 'a' is converted to a boolean using batched mapping, the mapper automatically casts the boolean to a string (e.g., True -> 'true'). It only happens when the original column and the mapped column name are identical. Thank you! ### Steps to reproduce the bug ```python from datasets import Dataset dset = Dataset.from_dict({'a': ['11', '22']}) dset = dset.map(lambda x: {'a': [True for _ in x['a']]}, batched=True) print(dset['a']) ``` ``` > ['true', 'true'] ``` ### Expected behavior [True, True] ### Environment info - `datasets` version: 2.18.0 - Platform: Linux-5.4.0-148-generic-x86_64-with-glibc2.31 - Python version: 3.10.13 - `huggingface_hub` version: 0.21.4 - PyArrow version: 15.0.2 - Pandas version: 2.2.1 - `fsspec` version: 2023.12.2
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6805/reactions" }
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/pull/6804
6,804
Fix --repo-type order in cli upload docs
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6804). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005222 / 0.011353 (-0.006131) | 0.003306 / 0.011008 (-0.007702) | 0.063326 / 0.038508 (0.024818) | 0.031371 / 0.023109 (0.008261) | 0.244947 / 0.275898 (-0.030951) | 0.264141 / 0.323480 (-0.059339) | 0.004186 / 0.007986 (-0.003800) | 0.002676 / 0.004328 (-0.001653) | 0.048690 / 0.004250 (0.044440) | 0.045172 / 0.037052 (0.008120) | 0.256597 / 0.258489 (-0.001892) | 0.284348 / 0.293841 (-0.009493) | 0.026855 / 0.128546 (-0.101691) | 0.009947 / 0.075646 (-0.065699) | 0.206311 / 0.419271 (-0.212961) | 0.035178 / 0.043533 (-0.008355) | 0.251501 / 0.255139 (-0.003638) | 0.261314 / 0.283200 (-0.021886) | 0.018000 / 0.141683 (-0.123683) | 1.144588 / 1.452155 (-0.307566) | 1.193627 / 1.492716 (-0.299089) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.091629 / 0.018006 (0.073623) | 0.298959 / 0.000490 (0.298469) | 0.000207 / 0.000200 (0.000007) | 0.000042 / 0.000054 (-0.000012) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018053 / 0.037411 (-0.019358) | 0.061280 / 0.014526 (0.046754) | 0.074138 / 0.176557 (-0.102419) | 0.119048 / 0.737135 (-0.618088) | 0.074572 / 0.296338 (-0.221767) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.282440 / 0.215209 (0.067231) | 2.762017 / 2.077655 (0.684362) | 1.474452 / 1.504120 (-0.029668) | 1.361489 / 1.541195 (-0.179706) | 1.359696 / 1.468490 (-0.108795) | 0.569640 / 4.584777 (-4.015137) | 2.398098 / 3.745712 (-1.347614) | 2.731399 / 5.269862 (-2.538462) | 1.697432 / 4.565676 (-2.868245) | 0.063330 / 0.424275 (-0.360945) | 0.005416 / 0.007607 (-0.002191) | 0.346510 / 0.226044 (0.120465) | 3.276473 / 2.268929 (1.007544) | 1.837605 / 55.444624 (-53.607019) | 1.538654 / 6.876477 (-5.337822) | 1.553943 / 2.142072 (-0.588129) | 0.640571 / 4.805227 (-4.164657) | 0.116736 / 6.500664 (-6.383928) | 0.041701 / 0.075469 (-0.033768) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.975846 / 1.841788 (-0.865942) | 11.151727 / 8.074308 (3.077419) | 9.436281 / 10.191392 (-0.755111) | 0.141027 / 0.680424 (-0.539397) | 0.014389 / 0.534201 (-0.519812) | 0.285575 / 0.579283 (-0.293708) | 0.263753 / 0.434364 (-0.170610) | 0.321893 / 0.540337 (-0.218444) | 0.420280 / 1.386936 (-0.966656) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005148 / 0.011353 (-0.006205) | 0.003264 / 0.011008 (-0.007744) | 0.049828 / 0.038508 (0.011320) | 0.031234 / 0.023109 (0.008125) | 0.271079 / 0.275898 (-0.004819) | 0.295256 / 0.323480 (-0.028224) | 0.004128 / 0.007986 (-0.003857) | 0.002637 / 0.004328 (-0.001692) | 0.048145 / 0.004250 (0.043895) | 0.039691 / 0.037052 (0.002638) | 0.287229 / 0.258489 (0.028740) | 0.310477 / 0.293841 (0.016636) | 0.028936 / 0.128546 (-0.099610) | 0.010392 / 0.075646 (-0.065254) | 0.057774 / 0.419271 (-0.361497) | 0.032557 / 0.043533 (-0.010975) | 0.275146 / 0.255139 (0.020007) | 0.291283 / 0.283200 (0.008084) | 0.017724 / 0.141683 (-0.123958) | 1.186831 / 1.452155 (-0.265324) | 1.220086 / 1.492716 (-0.272630) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.093575 / 0.018006 (0.075569) | 0.297198 / 0.000490 (0.296709) | 0.000216 / 0.000200 (0.000016) | 0.000044 / 0.000054 (-0.000010) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021683 / 0.037411 (-0.015728) | 0.075347 / 0.014526 (0.060821) | 0.085453 / 0.176557 (-0.091103) | 0.125422 / 0.737135 (-0.611713) | 0.087185 / 0.296338 (-0.209153) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.301520 / 0.215209 (0.086311) | 2.951614 / 2.077655 (0.873959) | 1.659897 / 1.504120 (0.155777) | 1.528097 / 1.541195 (-0.013097) | 1.552031 / 1.468490 (0.083541) | 0.576297 / 4.584777 (-4.008480) | 2.492349 / 3.745712 (-1.253363) | 2.805999 / 5.269862 (-2.463862) | 1.757556 / 4.565676 (-2.808121) | 0.064940 / 0.424275 (-0.359335) | 0.005314 / 0.007607 (-0.002293) | 0.358838 / 0.226044 (0.132793) | 3.576890 / 2.268929 (1.307961) | 2.030788 / 55.444624 (-53.413837) | 1.743650 / 6.876477 (-5.132826) | 1.745229 / 2.142072 (-0.396844) | 0.647840 / 4.805227 (-4.157387) | 0.116637 / 6.500664 (-6.384027) | 0.040555 / 0.075469 (-0.034915) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.009130 / 1.841788 (-0.832657) | 11.951145 / 8.074308 (3.876836) | 9.968355 / 10.191392 (-0.223037) | 0.139959 / 0.680424 (-0.540465) | 0.015985 / 0.534201 (-0.518216) | 0.286594 / 0.579283 (-0.292689) | 0.275805 / 0.434364 (-0.158559) | 0.328484 / 0.540337 (-0.211854) | 0.419818 / 1.386936 (-0.967118) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#89a58cdfc59ecc83662a47b638cf82a5b99f4a48 \"CML watermark\")\n" ]
2024-04-11T15:39:09Z
2024-04-11T16:24:57Z
2024-04-11T16:18:47Z
MEMBER
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6804.diff", "html_url": "https://github.com/huggingface/datasets/pull/6804", "merged_at": "2024-04-11T16:18:47Z", "patch_url": "https://github.com/huggingface/datasets/pull/6804.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6804" }
null
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6804/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/pull/6803
6,803
#6791 Improve type checking around FAISS
{ "avatar_url": "https://avatars.githubusercontent.com/u/8976546?v=4", "events_url": "https://api.github.com/users/Dref360/events{/privacy}", "followers_url": "https://api.github.com/users/Dref360/followers", "following_url": "https://api.github.com/users/Dref360/following{/other_user}", "gists_url": "https://api.github.com/users/Dref360/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Dref360", "id": 8976546, "login": "Dref360", "node_id": "MDQ6VXNlcjg5NzY1NDY=", "organizations_url": "https://api.github.com/users/Dref360/orgs", "received_events_url": "https://api.github.com/users/Dref360/received_events", "repos_url": "https://api.github.com/users/Dref360/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Dref360/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Dref360/subscriptions", "type": "User", "url": "https://api.github.com/users/Dref360", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6803). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "CI failures are unrelated.", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005063 / 0.011353 (-0.006290) | 0.003598 / 0.011008 (-0.007410) | 0.062929 / 0.038508 (0.024421) | 0.031723 / 0.023109 (0.008614) | 0.246503 / 0.275898 (-0.029395) | 0.268742 / 0.323480 (-0.054738) | 0.003249 / 0.007986 (-0.004737) | 0.002613 / 0.004328 (-0.001715) | 0.049001 / 0.004250 (0.044751) | 0.045740 / 0.037052 (0.008687) | 0.261182 / 0.258489 (0.002693) | 0.297328 / 0.293841 (0.003487) | 0.026925 / 0.128546 (-0.101621) | 0.010588 / 0.075646 (-0.065059) | 0.208954 / 0.419271 (-0.210317) | 0.035286 / 0.043533 (-0.008246) | 0.277678 / 0.255139 (0.022539) | 0.269313 / 0.283200 (-0.013887) | 0.019865 / 0.141683 (-0.121818) | 1.145883 / 1.452155 (-0.306272) | 1.196766 / 1.492716 (-0.295950) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.093886 / 0.018006 (0.075879) | 0.305118 / 0.000490 (0.304629) | 0.000207 / 0.000200 (0.000008) | 0.000048 / 0.000054 (-0.000006) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018473 / 0.037411 (-0.018938) | 0.061719 / 0.014526 (0.047193) | 0.074980 / 0.176557 (-0.101577) | 0.122354 / 0.737135 (-0.614781) | 0.076111 / 0.296338 (-0.220227) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.280222 / 0.215209 (0.065013) | 2.692820 / 2.077655 (0.615165) | 1.440897 / 1.504120 (-0.063223) | 1.313829 / 1.541195 (-0.227366) | 1.324392 / 1.468490 (-0.144098) | 0.570114 / 4.584777 (-4.014662) | 2.373946 / 3.745712 (-1.371766) | 2.804485 / 5.269862 (-2.465377) | 1.753595 / 4.565676 (-2.812081) | 0.062660 / 0.424275 (-0.361615) | 0.005267 / 0.007607 (-0.002340) | 0.323108 / 0.226044 (0.097063) | 3.257302 / 2.268929 (0.988373) | 1.802613 / 55.444624 (-53.642011) | 1.510590 / 6.876477 (-5.365886) | 1.567452 / 2.142072 (-0.574621) | 0.649872 / 4.805227 (-4.155355) | 0.117245 / 6.500664 (-6.383419) | 0.042260 / 0.075469 (-0.033209) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.976068 / 1.841788 (-0.865720) | 11.565981 / 8.074308 (3.491672) | 9.598650 / 10.191392 (-0.592742) | 0.129903 / 0.680424 (-0.550520) | 0.014925 / 0.534201 (-0.519276) | 0.290732 / 0.579283 (-0.288551) | 0.271236 / 0.434364 (-0.163128) | 0.325450 / 0.540337 (-0.214888) | 0.420218 / 1.386936 (-0.966718) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005404 / 0.011353 (-0.005949) | 0.003710 / 0.011008 (-0.007298) | 0.050982 / 0.038508 (0.012474) | 0.031340 / 0.023109 (0.008231) | 0.279221 / 0.275898 (0.003323) | 0.300936 / 0.323480 (-0.022544) | 0.004251 / 0.007986 (-0.003735) | 0.002697 / 0.004328 (-0.001631) | 0.049335 / 0.004250 (0.045085) | 0.040979 / 0.037052 (0.003926) | 0.287121 / 0.258489 (0.028632) | 0.315100 / 0.293841 (0.021259) | 0.029093 / 0.128546 (-0.099454) | 0.010618 / 0.075646 (-0.065028) | 0.059095 / 0.419271 (-0.360177) | 0.032953 / 0.043533 (-0.010580) | 0.274861 / 0.255139 (0.019722) | 0.292284 / 0.283200 (0.009085) | 0.017882 / 0.141683 (-0.123801) | 1.150590 / 1.452155 (-0.301565) | 1.203501 / 1.492716 (-0.289215) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.096868 / 0.018006 (0.078862) | 0.306460 / 0.000490 (0.305971) | 0.000230 / 0.000200 (0.000030) | 0.000058 / 0.000054 (0.000003) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022031 / 0.037411 (-0.015381) | 0.074847 / 0.014526 (0.060321) | 0.086951 / 0.176557 (-0.089606) | 0.125706 / 0.737135 (-0.611429) | 0.088244 / 0.296338 (-0.208094) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.297861 / 0.215209 (0.082652) | 2.923172 / 2.077655 (0.845518) | 1.628511 / 1.504120 (0.124391) | 1.499907 / 1.541195 (-0.041288) | 1.490060 / 1.468490 (0.021570) | 0.564087 / 4.584777 (-4.020690) | 2.441201 / 3.745712 (-1.304511) | 2.805283 / 5.269862 (-2.464578) | 1.762703 / 4.565676 (-2.802974) | 0.063038 / 0.424275 (-0.361237) | 0.005276 / 0.007607 (-0.002331) | 0.343413 / 0.226044 (0.117369) | 3.400858 / 2.268929 (1.131930) | 2.039937 / 55.444624 (-53.404687) | 1.674622 / 6.876477 (-5.201855) | 1.688371 / 2.142072 (-0.453702) | 0.635321 / 4.805227 (-4.169907) | 0.120235 / 6.500664 (-6.380429) | 0.041106 / 0.075469 (-0.034363) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.017469 / 1.841788 (-0.824319) | 12.383734 / 8.074308 (4.309426) | 10.352393 / 10.191392 (0.161001) | 0.131981 / 0.680424 (-0.548443) | 0.015204 / 0.534201 (-0.518997) | 0.286157 / 0.579283 (-0.293126) | 0.278270 / 0.434364 (-0.156094) | 0.325105 / 0.540337 (-0.215233) | 0.422301 / 1.386936 (-0.964635) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#9323521505b7fab098fbe2a304389ee2d59783ff \"CML watermark\")\n" ]
2024-04-11T14:54:30Z
2024-04-11T15:44:09Z
2024-04-11T15:38:04Z
CONTRIBUTOR
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6803.diff", "html_url": "https://github.com/huggingface/datasets/pull/6803", "merged_at": "2024-04-11T15:38:04Z", "patch_url": "https://github.com/huggingface/datasets/pull/6803.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6803" }
Fixes #6791 Small PR to raise a better error when a dataset is not embedded properly.
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6803/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/pull/6802
6,802
Fix typo in docs (upload CLI)
{ "avatar_url": "https://avatars.githubusercontent.com/u/11801849?v=4", "events_url": "https://api.github.com/users/Wauplin/events{/privacy}", "followers_url": "https://api.github.com/users/Wauplin/followers", "following_url": "https://api.github.com/users/Wauplin/following{/other_user}", "gists_url": "https://api.github.com/users/Wauplin/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Wauplin", "id": 11801849, "login": "Wauplin", "node_id": "MDQ6VXNlcjExODAxODQ5", "organizations_url": "https://api.github.com/users/Wauplin/orgs", "received_events_url": "https://api.github.com/users/Wauplin/received_events", "repos_url": "https://api.github.com/users/Wauplin/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Wauplin/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Wauplin/subscriptions", "type": "User", "url": "https://api.github.com/users/Wauplin", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6802). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.004991 / 0.011353 (-0.006362) | 0.003574 / 0.011008 (-0.007434) | 0.062369 / 0.038508 (0.023861) | 0.029966 / 0.023109 (0.006857) | 0.256140 / 0.275898 (-0.019758) | 0.283705 / 0.323480 (-0.039775) | 0.003170 / 0.007986 (-0.004816) | 0.002732 / 0.004328 (-0.001597) | 0.048048 / 0.004250 (0.043798) | 0.044497 / 0.037052 (0.007445) | 0.273206 / 0.258489 (0.014717) | 0.294593 / 0.293841 (0.000752) | 0.027251 / 0.128546 (-0.101295) | 0.010205 / 0.075646 (-0.065441) | 0.205979 / 0.419271 (-0.213293) | 0.035416 / 0.043533 (-0.008117) | 0.256260 / 0.255139 (0.001121) | 0.270580 / 0.283200 (-0.012620) | 0.019659 / 0.141683 (-0.122024) | 1.138722 / 1.452155 (-0.313432) | 1.170535 / 1.492716 (-0.322182) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.091588 / 0.018006 (0.073582) | 0.301280 / 0.000490 (0.300791) | 0.000209 / 0.000200 (0.000009) | 0.000048 / 0.000054 (-0.000006) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.019684 / 0.037411 (-0.017727) | 0.061166 / 0.014526 (0.046640) | 0.072999 / 0.176557 (-0.103558) | 0.119264 / 0.737135 (-0.617871) | 0.074555 / 0.296338 (-0.221784) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.283210 / 0.215209 (0.068001) | 2.762284 / 2.077655 (0.684629) | 1.472700 / 1.504120 (-0.031420) | 1.352734 / 1.541195 (-0.188461) | 1.363287 / 1.468490 (-0.105203) | 0.558175 / 4.584777 (-4.026602) | 2.391648 / 3.745712 (-1.354064) | 2.787109 / 5.269862 (-2.482752) | 1.725635 / 4.565676 (-2.840042) | 0.061827 / 0.424275 (-0.362448) | 0.005351 / 0.007607 (-0.002256) | 0.337540 / 0.226044 (0.111496) | 3.353181 / 2.268929 (1.084252) | 1.829599 / 55.444624 (-53.615026) | 1.567691 / 6.876477 (-5.308786) | 1.605680 / 2.142072 (-0.536393) | 0.642182 / 4.805227 (-4.163045) | 0.117321 / 6.500664 (-6.383343) | 0.042555 / 0.075469 (-0.032915) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.991099 / 1.841788 (-0.850689) | 11.545219 / 8.074308 (3.470911) | 9.777574 / 10.191392 (-0.413818) | 0.130237 / 0.680424 (-0.550186) | 0.015068 / 0.534201 (-0.519133) | 0.286029 / 0.579283 (-0.293254) | 0.266778 / 0.434364 (-0.167586) | 0.321468 / 0.540337 (-0.218869) | 0.425371 / 1.386936 (-0.961565) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005144 / 0.011353 (-0.006208) | 0.004046 / 0.011008 (-0.006962) | 0.050552 / 0.038508 (0.012043) | 0.030716 / 0.023109 (0.007607) | 0.273462 / 0.275898 (-0.002436) | 0.290649 / 0.323480 (-0.032831) | 0.004093 / 0.007986 (-0.003893) | 0.002700 / 0.004328 (-0.001628) | 0.048833 / 0.004250 (0.044582) | 0.040059 / 0.037052 (0.003007) | 0.282496 / 0.258489 (0.024007) | 0.309176 / 0.293841 (0.015335) | 0.029207 / 0.128546 (-0.099339) | 0.010740 / 0.075646 (-0.064907) | 0.057692 / 0.419271 (-0.361580) | 0.032570 / 0.043533 (-0.010963) | 0.269048 / 0.255139 (0.013909) | 0.287351 / 0.283200 (0.004151) | 0.017565 / 0.141683 (-0.124118) | 1.161628 / 1.452155 (-0.290526) | 1.187236 / 1.492716 (-0.305480) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.095552 / 0.018006 (0.077546) | 0.312449 / 0.000490 (0.311959) | 0.000219 / 0.000200 (0.000019) | 0.000052 / 0.000054 (-0.000003) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022425 / 0.037411 (-0.014986) | 0.074941 / 0.014526 (0.060416) | 0.086784 / 0.176557 (-0.089772) | 0.125630 / 0.737135 (-0.611506) | 0.088632 / 0.296338 (-0.207706) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.293003 / 0.215209 (0.077794) | 2.881826 / 2.077655 (0.804172) | 1.612840 / 1.504120 (0.108720) | 1.492727 / 1.541195 (-0.048468) | 1.520023 / 1.468490 (0.051532) | 0.558715 / 4.584777 (-4.026062) | 2.431093 / 3.745712 (-1.314619) | 2.782672 / 5.269862 (-2.487189) | 1.721611 / 4.565676 (-2.844065) | 0.063466 / 0.424275 (-0.360809) | 0.005221 / 0.007607 (-0.002386) | 0.352917 / 0.226044 (0.126873) | 3.443742 / 2.268929 (1.174814) | 1.981190 / 55.444624 (-53.463435) | 1.695396 / 6.876477 (-5.181081) | 1.709959 / 2.142072 (-0.432113) | 0.649267 / 4.805227 (-4.155960) | 0.116604 / 6.500664 (-6.384060) | 0.040688 / 0.075469 (-0.034781) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.023182 / 1.841788 (-0.818605) | 12.046760 / 8.074308 (3.972452) | 10.294706 / 10.191392 (0.103314) | 0.132323 / 0.680424 (-0.548101) | 0.016141 / 0.534201 (-0.518060) | 0.286620 / 0.579283 (-0.292663) | 0.272299 / 0.434364 (-0.162065) | 0.320995 / 0.540337 (-0.219343) | 0.424138 / 1.386936 (-0.962798) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#873b7c8e354bfbd1873272a03d1392550d2cac39 \"CML watermark\")\n", "> Should it also be applied to this example a few lines later ?\r\n\r\nYes!", "done in https://github.com/huggingface/datasets/pull/6804" ]
2024-04-11T10:05:05Z
2024-04-11T16:19:00Z
2024-04-11T13:19:43Z
CONTRIBUTOR
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6802.diff", "html_url": "https://github.com/huggingface/datasets/pull/6802", "merged_at": "2024-04-11T13:19:43Z", "patch_url": "https://github.com/huggingface/datasets/pull/6802.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6802" }
Related to https://huggingface.slack.com/archives/C04RG8YRVB8/p1712643948574129 (interal) Positional args must be placed before optional args. Feel free to merge whenever it's ready.
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6802/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/issues/6801
6,801
got fileNotFound
{ "avatar_url": "https://avatars.githubusercontent.com/u/93729155?v=4", "events_url": "https://api.github.com/users/laoniandisko/events{/privacy}", "followers_url": "https://api.github.com/users/laoniandisko/followers", "following_url": "https://api.github.com/users/laoniandisko/following{/other_user}", "gists_url": "https://api.github.com/users/laoniandisko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/laoniandisko", "id": 93729155, "login": "laoniandisko", "node_id": "U_kgDOBZYxgw", "organizations_url": "https://api.github.com/users/laoniandisko/orgs", "received_events_url": "https://api.github.com/users/laoniandisko/received_events", "repos_url": "https://api.github.com/users/laoniandisko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/laoniandisko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/laoniandisko/subscriptions", "type": "User", "url": "https://api.github.com/users/laoniandisko", "user_view_type": "public" }
[]
closed
false
[ "Hi! I'll open a PR on the Hub to fix this, but please use the Hub's [Community tab](https://huggingface.co/datasets/nyanko7/danbooru2023/discussions) to report such issues in the future.", "I've opened a [PR](https://huggingface.co/datasets/nyanko7/danbooru2023/discussions/8) in the repo, so let's continue the discussion there" ]
2024-04-11T04:57:41Z
2024-04-12T16:47:43Z
2024-04-12T16:47:43Z
NONE
null
null
### Describe the bug When I use load_dataset to load the nyanko7/danbooru2023 data set, the cache is read in the form of a symlink. There may be a problem with the arrow_dataset initialization process and I get FileNotFoundError: [Errno 2] No such file or directory: '2945000.jpg' ### Steps to reproduce the bug #code show as below from datasets import load_dataset data = load_dataset("nyanko7/danbooru2023",cache_dir=<symlink>) data["train"][0] ### Expected behavior I should get this result: {'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=365x256 at 0x7FB730CB4070>, 'label': 0} ### Environment info datasets==2.12.0 python==3.10.14
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6801/reactions" }
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/issues/6800
6,800
High overhead when loading lots of subsets from the same dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/53355258?v=4", "events_url": "https://api.github.com/users/loicmagne/events{/privacy}", "followers_url": "https://api.github.com/users/loicmagne/followers", "following_url": "https://api.github.com/users/loicmagne/following{/other_user}", "gists_url": "https://api.github.com/users/loicmagne/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/loicmagne", "id": 53355258, "login": "loicmagne", "node_id": "MDQ6VXNlcjUzMzU1MjU4", "organizations_url": "https://api.github.com/users/loicmagne/orgs", "received_events_url": "https://api.github.com/users/loicmagne/received_events", "repos_url": "https://api.github.com/users/loicmagne/repos", "site_admin": false, "starred_url": "https://api.github.com/users/loicmagne/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/loicmagne/subscriptions", "type": "User", "url": "https://api.github.com/users/loicmagne", "user_view_type": "public" }
[]
open
false
[ "Hi !\r\n\r\nIt's possible to multiple files at once:\r\n\r\n```python\r\ndata_files = \"data/*.jsonl\"\r\n# Or pass a list of files\r\nlangs = ['ka-ml', 'br-sr', 'ka-pt', 'id-ko', ..., 'fi-ze_zh', 'he-kk', 'ka-tr']\r\ndata_files = [f\"data/{lang}.jsonl\" for lang in langs]\r\nds = load_dataset(\"loicmagne/open-subtitles-250-bitext-mining\", data_files=data_files, split=\"train\")\r\n```\r\n\r\nAlso maybe you can add a subset called \"all\" for people that want to load all the data without having to list all the languages ?\r\n\r\n```yaml\r\n - config_name: all\r\n data_files: data/*.jsonl\r\n```\r\n", "Thanks for your reply, it is indeed much faster, however the result is a dataset where all the subsets are \"merged\" together, the language pair is lost:\r\n```\r\nDatasetDict({\r\n train: Dataset({\r\n features: ['sentence1', 'sentence2'],\r\n num_rows: 247809\r\n })\r\n})\r\n```\r\nI guess I could add a 'lang' feature for each row in the dataset, is there a better way to do it ?", "Hi @lhoestq over at https://github.com/embeddings-benchmark/mteb/issues/530 we have started examining these issues and would love to make a PR for datasets if we believe there is a way to improve the speed. As I assume you have a better overview than me @lhoestq, would you be interested in a PR, and might you have an idea about where we would start working on it?\r\n\r\nWe see a speed comparison of \r\n1. 15 minutes (for ~20% of the languages) when loaded using a for loop\r\n2. 17 minutes using the your suggestion\r\n3. ~30 seconds when using @loicmagne \"merged\" method.\r\n\r\nWorth mentioning is that solution 2 looses the language information.", "Can you retry using `datasets` 2.19 ? We improved a lot the speed of downloading datasets with tons of small files.\r\n\r\n```\r\npip install -U datasets\r\n```\r\n\r\nNow this takes 17sec on my side instead of the 17min minutes @loicmagne mentioned :)\r\n\r\n```python\r\n>>> %time ds = load_dataset(\"loicmagne/open-subtitles-250-bitext-mining\", data_files=\"data/*.jsonl\")\r\nDownloading readme: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 13.7k/13.7k [00:00<00:00, 5.47MB/s]\r\nResolving data files: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 250/250 [00:00<00:00, 612.51it/s]\r\nDownloading data: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 250/250 [00:12<00:00, 19.68files/s]\r\nGenerating train split: 247809 examples [00:00, 1057071.08 examples/s]\r\nCPU times: user 4.95 s, sys: 3.1 s, total: 8.05 s\r\nWall time: 17.4 s\r\n```", "> Can you retry using `datasets` 2.19 ? We improved a lot the speed of downloading datasets with tons of small files.\r\n> \r\n> ```\r\n> pip install -U datasets\r\n> ```\r\n> \r\n> Now this takes 17sec on my side instead of the 17min minutes @loicmagne mentioned :)\r\n> \r\n> ```python\r\n> >>> %time ds = load_dataset(\"loicmagne/open-subtitles-250-bitext-mining\", data_files=\"data/*.jsonl\")\r\n> Downloading readme: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 13.7k/13.7k [00:00<00:00, 5.47MB/s]\r\n> Resolving data files: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 250/250 [00:00<00:00, 612.51it/s]\r\n> Downloading data: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 250/250 [00:12<00:00, 19.68files/s]\r\n> Generating train split: 247809 examples [00:00, 1057071.08 examples/s]\r\n> CPU times: user 4.95 s, sys: 3.1 s, total: 8.05 s\r\n> Wall time: 17.4 s\r\n> ```\r\n\r\nI was actually just noticing that, I bumped from 2.18 to 2.19 and got a massive speedup, amazing!\r\n\r\nAbout the fact that subset names are lost when loading all files at once, currently my solution is to add a 'lang' feature to each rows, convert to polars and use:\r\n\r\n```python\r\nds_split = ds.to_polars().group_by('lang')\r\n```\r\n\r\nIt's fast so I think it's an acceptable solution, but is there a better way to do it ?", "It's the fastest way I think :)\r\n\r\nAlternatively you can download the dataset repository locally using [huggingface_hub](https://huggingface.co/docs/huggingface_hub/guides/download) (either via CLI or in python) and load the subsets one by one locally using a for loop as you were doing before (just pass the directory path to load_dataset instead of the dataset_id). " ]
2024-04-10T21:08:57Z
2024-04-24T13:48:05Z
null
NONE
null
null
### Describe the bug I have a multilingual dataset that contains a lot of subsets. Each subset corresponds to a pair of languages, you can see here an example with 250 subsets: [https://hf.co/datasets/loicmagne/open-subtitles-250-bitext-mining](). As part of the MTEB benchmark, we may need to load all the subsets of the dataset. The dataset is relatively small and contains only ~45MB of data, but when I try to load every subset, it takes 15 minutes from the HF hub and 13 minutes from the cache This issue https://github.com/huggingface/datasets/issues/5499 also referenced this overhead, but I'm wondering if there is anything I can do to speedup loading different subsets of the same dataset, both when loading from disk and from the HF hub? Currently each subset is stored in a jsonl file ### Steps to reproduce the bug ``` from datasets import load_dataset for subset in ['ka-ml', 'br-sr', 'bg-br', 'kk-lv', 'br-sk', 'br-fi', 'eu-ze_zh', 'kk-nl', 'kk-vi', 'ja-kk', 'br-sv', 'kk-zh_cn', 'kk-ms', 'br-et', 'br-hu', 'eo-kk', 'br-tr', 'ko-tl', 'te-zh_tw', 'br-hr', 'br-nl', 'ka-si', 'br-cs', 'br-is', 'br-ro', 'br-de', 'et-kk', 'fr-hy', 'br-no', 'is-ko', 'br-da', 'br-en', 'eo-lt', 'is-ze_zh', 'eu-ko', 'br-it', 'br-id', 'eu-zh_cn', 'is-ja', 'br-sl', 'br-gl', 'br-pt_br', 'br-es', 'br-pt', 'is-th', 'fa-is', 'br-ca', 'eu-ka', 'is-zh_cn', 'eu-ur', 'id-kk', 'br-sq', 'eu-ja', 'uk-ur', 'is-zh_tw', 'ka-ko', 'eu-zh_tw', 'eu-th', 'eu-is', 'is-tl', 'br-eo', 'eo-ze_zh', 'eu-te', 'ar-kk', 'eo-lv', 'ko-ze_zh', 'ml-ze_zh', 'is-lt', 'br-fr', 'ko-te', 'kk-sl', 'eu-fa', 'eo-ko', 'ka-ze_en', 'eo-eu', 'ta-zh_tw', 'eu-lv', 'ko-lv', 'lt-tl', 'eu-si', 'hy-ru', 'ar-is', 'eu-lt', 'eu-tl', 'eu-uk', 'ka-ze_zh', 'si-ze_zh', 'el-is', 'bn-is', 'ko-ze_en', 'eo-si', 'cs-kk', 'is-uk', 'eu-ze_en', 'ta-ze_zh', 'is-pl', 'is-mk', 'eu-ta', 'ko-lt', 'is-lv', 'fa-ko', 'bn-ko', 'hi-is', 'bn-ze_zh', 'bn-eu', 'bn-ja', 'is-ml', 'eu-ru', 'ko-ta', 'is-vi', 'ja-tl', 'eu-mk', 'eu-he', 'ka-zh_tw', 'ka-zh_cn', 'si-tl', 'is-kk', 'eu-fi', 'fi-ko', 'is-ur', 'ka-th', 'ko-ur', 'eo-ja', 'he-is', 'is-tr', 'ka-ur', 'et-ko', 'eu-vi', 'is-sk', 'gl-is', 'fr-is', 'is-sq', 'hu-is', 'fr-kk', 'eu-sq', 'is-ru', 'ja-ka', 'fi-tl', 'ka-lv', 'fi-is', 'is-si', 'ar-ko', 'ko-sl', 'ar-eu', 'ko-si', 'bg-is', 'eu-hu', 'ko-sv', 'bn-hu', 'kk-ro', 'eu-hi', 'ka-ms', 'ko-th', 'ko-sr', 'ko-mk', 'fi-kk', 'ka-vi', 'eu-ml', 'ko-ml', 'de-ko', 'fa-ze_zh', 'eu-sk', 'is-sl', 'et-is', 'eo-is', 'is-sr', 'is-ze_en', 'kk-pt_br', 'hr-hy', 'kk-pl', 'ja-ta', 'is-ms', 'hi-ze_en', 'is-ro', 'ko-zh_cn', 'el-eu', 'ka-pl', 'ka-sq', 'eu-sl', 'fa-ka', 'ko-no', 'si-ze_en', 'ko-uk', 'ja-ze_zh', 'hu-ko', 'kk-no', 'eu-pl', 'is-pt_br', 'bn-lv', 'tl-zh_cn', 'is-nl', 'he-ko', 'ko-sq', 'ta-th', 'lt-ta', 'da-ko', 'ca-is', 'is-ta', 'bn-fi', 'ja-ml', 'lv-si', 'eu-sv', 'ja-te', 'bn-ur', 'bn-ca', 'bs-ko', 'bs-is', 'eu-sr', 'ko-vi', 'ko-zh_tw', 'et-tl', 'kk-tr', 'eo-vi', 'is-it', 'ja-ko', 'eo-et', 'id-is', 'bn-et', 'bs-eu', 'bn-lt', 'tl-uk', 'bn-zh_tw', 'da-eu', 'el-ko', 'no-tl', 'ko-sk', 'is-pt', 'hu-kk', 'si-zh_tw', 'si-te', 'ka-ru', 'lt-ml', 'af-ja', 'bg-eu', 'eo-th', 'cs-is', 'pl-ze_zh', 'el-kk', 'kk-sv', 'ka-nl', 'ko-pl', 'bg-ko', 'ka-pt_br', 'et-eu', 'tl-zh_tw', 'ka-pt', 'id-ko', 'fi-ze_zh', 'he-kk', 'ka-tr']: load_dataset('loicmagne/open-subtitles-250-bitext-mining', subset) ``` ### Expected behavior Faster loading? ### Environment info Copy-and-paste the text below in your GitHub issue. - `datasets` version: 2.18.0 - Platform: Linux-6.5.0-27-generic-x86_64-with-glibc2.35 - Python version: 3.10.12 - `huggingface_hub` version: 0.22.2 - PyArrow version: 15.0.2 - Pandas version: 2.2.2 - `fsspec` version: 2023.5.0
null
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/6800/reactions" }
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/pull/6799
6,799
fix `DatasetBuilder._split_generators` incomplete type annotation
{ "avatar_url": "https://avatars.githubusercontent.com/u/33965649?v=4", "events_url": "https://api.github.com/users/JonasLoos/events{/privacy}", "followers_url": "https://api.github.com/users/JonasLoos/followers", "following_url": "https://api.github.com/users/JonasLoos/following{/other_user}", "gists_url": "https://api.github.com/users/JonasLoos/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/JonasLoos", "id": 33965649, "login": "JonasLoos", "node_id": "MDQ6VXNlcjMzOTY1NjQ5", "organizations_url": "https://api.github.com/users/JonasLoos/orgs", "received_events_url": "https://api.github.com/users/JonasLoos/received_events", "repos_url": "https://api.github.com/users/JonasLoos/repos", "site_admin": false, "starred_url": "https://api.github.com/users/JonasLoos/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/JonasLoos/subscriptions", "type": "User", "url": "https://api.github.com/users/JonasLoos", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6799). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "The CI failures are unrelated to the changes", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.004974 / 0.011353 (-0.006378) | 0.003153 / 0.011008 (-0.007856) | 0.062785 / 0.038508 (0.024277) | 0.029504 / 0.023109 (0.006395) | 0.245558 / 0.275898 (-0.030340) | 0.274022 / 0.323480 (-0.049457) | 0.003173 / 0.007986 (-0.004813) | 0.002643 / 0.004328 (-0.001686) | 0.048917 / 0.004250 (0.044667) | 0.042965 / 0.037052 (0.005912) | 0.261266 / 0.258489 (0.002777) | 0.291546 / 0.293841 (-0.002295) | 0.027860 / 0.128546 (-0.100686) | 0.010397 / 0.075646 (-0.065249) | 0.205981 / 0.419271 (-0.213290) | 0.035663 / 0.043533 (-0.007870) | 0.250466 / 0.255139 (-0.004673) | 0.273947 / 0.283200 (-0.009253) | 0.016659 / 0.141683 (-0.125023) | 1.147884 / 1.452155 (-0.304270) | 1.187609 / 1.492716 (-0.305107) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.095564 / 0.018006 (0.077558) | 0.300086 / 0.000490 (0.299597) | 0.000212 / 0.000200 (0.000012) | 0.000049 / 0.000054 (-0.000005) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018100 / 0.037411 (-0.019311) | 0.061342 / 0.014526 (0.046816) | 0.073747 / 0.176557 (-0.102810) | 0.120577 / 0.737135 (-0.616559) | 0.075797 / 0.296338 (-0.220541) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.288766 / 0.215209 (0.073557) | 2.835274 / 2.077655 (0.757620) | 1.515288 / 1.504120 (0.011168) | 1.396097 / 1.541195 (-0.145098) | 1.424293 / 1.468490 (-0.044197) | 0.568356 / 4.584777 (-4.016421) | 2.393171 / 3.745712 (-1.352541) | 2.756219 / 5.269862 (-2.513642) | 1.731343 / 4.565676 (-2.834334) | 0.062542 / 0.424275 (-0.361733) | 0.005385 / 0.007607 (-0.002223) | 0.340876 / 0.226044 (0.114832) | 3.376649 / 2.268929 (1.107720) | 1.856135 / 55.444624 (-53.588490) | 1.581802 / 6.876477 (-5.294675) | 1.591081 / 2.142072 (-0.550992) | 0.647963 / 4.805227 (-4.157264) | 0.119218 / 6.500664 (-6.381446) | 0.042660 / 0.075469 (-0.032809) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.005017 / 1.841788 (-0.836770) | 11.670779 / 8.074308 (3.596471) | 9.533790 / 10.191392 (-0.657602) | 0.141571 / 0.680424 (-0.538853) | 0.013987 / 0.534201 (-0.520214) | 0.286598 / 0.579283 (-0.292685) | 0.260123 / 0.434364 (-0.174240) | 0.324186 / 0.540337 (-0.216151) | 0.421246 / 1.386936 (-0.965690) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005196 / 0.011353 (-0.006157) | 0.003697 / 0.011008 (-0.007311) | 0.049530 / 0.038508 (0.011022) | 0.030892 / 0.023109 (0.007783) | 0.284787 / 0.275898 (0.008889) | 0.302833 / 0.323480 (-0.020647) | 0.004203 / 0.007986 (-0.003783) | 0.002736 / 0.004328 (-0.001592) | 0.050203 / 0.004250 (0.045953) | 0.040335 / 0.037052 (0.003283) | 0.292508 / 0.258489 (0.034019) | 0.317918 / 0.293841 (0.024077) | 0.029144 / 0.128546 (-0.099403) | 0.010171 / 0.075646 (-0.065475) | 0.058130 / 0.419271 (-0.361141) | 0.032743 / 0.043533 (-0.010790) | 0.281354 / 0.255139 (0.026215) | 0.296951 / 0.283200 (0.013751) | 0.018399 / 0.141683 (-0.123284) | 1.158852 / 1.452155 (-0.293303) | 1.189750 / 1.492716 (-0.302966) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.093073 / 0.018006 (0.075066) | 0.301779 / 0.000490 (0.301290) | 0.000209 / 0.000200 (0.000009) | 0.000051 / 0.000054 (-0.000003) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021565 / 0.037411 (-0.015846) | 0.075237 / 0.014526 (0.060711) | 0.087368 / 0.176557 (-0.089188) | 0.126955 / 0.737135 (-0.610180) | 0.088456 / 0.296338 (-0.207883) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.291225 / 0.215209 (0.076016) | 2.863220 / 2.077655 (0.785565) | 1.616936 / 1.504120 (0.112817) | 1.500553 / 1.541195 (-0.040641) | 1.501693 / 1.468490 (0.033203) | 0.560118 / 4.584777 (-4.024659) | 2.439241 / 3.745712 (-1.306472) | 2.786804 / 5.269862 (-2.483058) | 1.737772 / 4.565676 (-2.827905) | 0.063668 / 0.424275 (-0.360607) | 0.005320 / 0.007607 (-0.002287) | 0.344539 / 0.226044 (0.118495) | 3.418803 / 2.268929 (1.149874) | 1.981791 / 55.444624 (-53.462834) | 1.698484 / 6.876477 (-5.177993) | 1.686815 / 2.142072 (-0.455258) | 0.646911 / 4.805227 (-4.158316) | 0.116969 / 6.500664 (-6.383696) | 0.040380 / 0.075469 (-0.035089) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.017337 / 1.841788 (-0.824451) | 11.858212 / 8.074308 (3.783904) | 10.270287 / 10.191392 (0.078895) | 0.154266 / 0.680424 (-0.526158) | 0.014886 / 0.534201 (-0.519315) | 0.292354 / 0.579283 (-0.286929) | 0.270888 / 0.434364 (-0.163476) | 0.333289 / 0.540337 (-0.207049) | 0.423001 / 1.386936 (-0.963935) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#d9cc95f6d0513bbc692bb73c669346e3d1825cb0 \"CML watermark\")\n" ]
2024-04-10T17:46:08Z
2024-04-11T15:41:06Z
2024-04-11T15:34:58Z
CONTRIBUTOR
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6799.diff", "html_url": "https://github.com/huggingface/datasets/pull/6799", "merged_at": "2024-04-11T15:34:58Z", "patch_url": "https://github.com/huggingface/datasets/pull/6799.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6799" }
solve #6798: add missing `StreamingDownloadManager` type annotation to the `dl_manager` argument of the `DatasetBuilder._split_generators` function
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6799/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/issues/6798
6,798
`DatasetBuilder._split_generators` incomplete type annotation
{ "avatar_url": "https://avatars.githubusercontent.com/u/33965649?v=4", "events_url": "https://api.github.com/users/JonasLoos/events{/privacy}", "followers_url": "https://api.github.com/users/JonasLoos/followers", "following_url": "https://api.github.com/users/JonasLoos/following{/other_user}", "gists_url": "https://api.github.com/users/JonasLoos/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/JonasLoos", "id": 33965649, "login": "JonasLoos", "node_id": "MDQ6VXNlcjMzOTY1NjQ5", "organizations_url": "https://api.github.com/users/JonasLoos/orgs", "received_events_url": "https://api.github.com/users/JonasLoos/received_events", "repos_url": "https://api.github.com/users/JonasLoos/repos", "site_admin": false, "starred_url": "https://api.github.com/users/JonasLoos/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/JonasLoos/subscriptions", "type": "User", "url": "https://api.github.com/users/JonasLoos", "user_view_type": "public" }
[]
closed
false
[ "Good catch! Feel free to open a PR with the suggested fix :).", "There is also the [`MockDownloadManager`](https://github.com/JonasLoos/datasets/blob/main/src/datasets/download/mock_download_manager.py#L33), which seems like it might get passed here too. However, to me, it doesn't really seem relevant to the users of the datasets library, so I would just ignore it. What do you think, @mariosasko?", "The API (`dummy_data` CLI command ) that uses the `MockDownloadManager` has been deprecated, so ignoring it sounds good!" ]
2024-04-10T14:38:50Z
2024-04-11T15:34:59Z
2024-04-11T15:34:59Z
CONTRIBUTOR
null
null
### Describe the bug The [`DatasetBuilder._split_generators`](https://github.com/huggingface/datasets/blob/0f27d7b77c73412cfc50b24354bfd7a3e838202f/src/datasets/builder.py#L1449) function has currently the following signature: ```python class DatasetBuilder: def _split_generators(self, dl_manager: DownloadManager): ... ``` However, the `dl_manager` argument can also be of type [`StreamingDownloadManager`](https://github.com/huggingface/datasets/blob/0f27d7b77c73412cfc50b24354bfd7a3e838202f/src/datasets/download/streaming_download_manager.py#L962), which has different functionality. For example, the `download` function doesn't download, but rather just returns the given url(s). I suggest changing the function signature to: ```python class DatasetBuilder: def _split_generators(self, dl_manager: Union[DownloadManager, StreamingDownloadManager]): ... ``` and also adjust the docstring accordingly. I would like to create a Pull Request to fix this, and have the following questions: * Are there also other options than `DownloadManager`, and `StreamingDownloadManager`? * Should this also be changed in other functions? ### Steps to reproduce the bug Minimal example to print the different class names: ```python import tempfile from datasets import load_dataset example = b''' from datasets import GeneratorBasedBuilder, DatasetInfo, Features, Value, SplitGenerator class Test(GeneratorBasedBuilder): def _info(self): return DatasetInfo(features=Features({"x": Value("int64")})) def _split_generators(self, dl_manager): print(type(dl_manager)) return [SplitGenerator('test')] def _generate_examples(self): yield 0, {'x': 42} ''' with tempfile.NamedTemporaryFile(suffix='.py') as f: f.write(example) f.flush() load_dataset(f.name, streaming=False) load_dataset(f.name, streaming=True) ``` ### Expected behavior complete type annotations ### Environment info /
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6798/reactions" }
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/pull/6797
6,797
Fix CI test_load_dataset_distributed_with_script
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6797). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "Finally:\r\n- the initial issue seems it was temporary\r\n- there is a different issue now \r\n\r\n```\r\nFAILED tests/test_load.py::ModuleFactoryTest::test_HubDatasetModuleFactoryWithParquetExport - datasets.utils._dataset_viewer.DatasetViewerError: No exported Parquet files available.\r\nFAILED tests/test_load.py::ModuleFactoryTest::test_HubDatasetModuleFactoryWithParquetExport_errors_on_wrong_sha - datasets.utils._dataset_viewer.DatasetViewerError: No exported Parquet files available.\r\nFAILED tests/test_load.py::test_load_dataset_builder_for_community_dataset_with_script - AssertionError: assert 'dataset_with_script' == 'parquet'\r\n \r\n - parquet\r\n + dataset_with_script\r\n```" ]
2024-04-10T06:57:48Z
2024-04-10T08:25:00Z
2024-04-10T08:18:01Z
MEMBER
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6797.diff", "html_url": "https://github.com/huggingface/datasets/pull/6797", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/6797.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6797" }
Fix #6796.
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6797/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/issues/6796
6,796
CI is broken due to hf-internal-testing/dataset_with_script
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
[ "Finally:\r\n- the initial issue seems it was temporary\r\n- there is a different issue now: https://github.com/huggingface/datasets/actions/runs/8627153993/job/23646584590?pr=6797\r\n```\r\nFAILED tests/test_load.py::ModuleFactoryTest::test_HubDatasetModuleFactoryWithParquetExport - datasets.utils._dataset_viewer.DatasetViewerError: No exported Parquet files available.\r\nFAILED tests/test_load.py::ModuleFactoryTest::test_HubDatasetModuleFactoryWithParquetExport_errors_on_wrong_sha - datasets.utils._dataset_viewer.DatasetViewerError: No exported Parquet files available.\r\nFAILED tests/test_load.py::test_load_dataset_builder_for_community_dataset_with_script - AssertionError: assert 'dataset_with_script' == 'parquet'\r\n \r\n - parquet\r\n + dataset_with_script\r\n```\r\n\r\nMaybe related to `hf-internal-testing/dataset_with_script` dataset: https://huggingface.co/datasets/hf-internal-testing/dataset_with_script", "This URL: https://datasets-server.huggingface.co/parquet?dataset=hf-internal-testing/dataset_with_script\r\nraises:\r\n> {\"error\":\"The dataset viewer doesn't support this dataset because it runs arbitrary python code. Please open a discussion in the discussion tab if you think this is an error and tag @lhoestq and @severo.\"}\r\n\r\nWas there a recent change on the Hub enforcing this behavior?", "OK, I just saw this PR:\r\n- https://github.com/huggingface/dataset-viewer/pull/2689\r\n\r\nOnce merged and deployed, it should fix the issue.", "Once the script-dataset has been allowed in the dataset-viewer, we should fix our test to make the CI pass.\r\n\r\nI am addressing this." ]
2024-04-10T06:56:02Z
2024-04-12T09:02:13Z
2024-04-12T09:02:13Z
MEMBER
null
null
CI is broken for test_load_dataset_distributed_with_script. See: https://github.com/huggingface/datasets/actions/runs/8614926216/job/23609378127 ``` FAILED tests/test_load.py::test_load_dataset_distributed_with_script[None] - assert False + where False = all(<generator object test_load_dataset_distributed_with_script.<locals>.<genexpr> at 0x7f0c741de3b0>) FAILED tests/test_load.py::test_load_dataset_distributed_with_script[force_redownload] - assert False + where False = all(<generator object test_load_dataset_distributed_with_script.<locals>.<genexpr> at 0x7f0be45f6ea0>) ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6796/reactions" }
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/pull/6795
6,795
Add CLI function to convert script-dataset to Parquet
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6795). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "@huggingface/datasets once this PR is merged, I would suggest making a release. Do you agree?", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005367 / 0.011353 (-0.005986) | 0.003161 / 0.011008 (-0.007847) | 0.063259 / 0.038508 (0.024751) | 0.030550 / 0.023109 (0.007441) | 0.243789 / 0.275898 (-0.032109) | 0.262474 / 0.323480 (-0.061006) | 0.003157 / 0.007986 (-0.004829) | 0.002586 / 0.004328 (-0.001742) | 0.049336 / 0.004250 (0.045085) | 0.046434 / 0.037052 (0.009382) | 0.249142 / 0.258489 (-0.009347) | 0.282953 / 0.293841 (-0.010888) | 0.027881 / 0.128546 (-0.100666) | 0.010069 / 0.075646 (-0.065578) | 0.207937 / 0.419271 (-0.211334) | 0.036005 / 0.043533 (-0.007528) | 0.251850 / 0.255139 (-0.003288) | 0.265156 / 0.283200 (-0.018044) | 0.019780 / 0.141683 (-0.121903) | 1.124301 / 1.452155 (-0.327853) | 1.177392 / 1.492716 (-0.315324) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.091045 / 0.018006 (0.073039) | 0.301258 / 0.000490 (0.300769) | 0.000214 / 0.000200 (0.000014) | 0.000048 / 0.000054 (-0.000006) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018726 / 0.037411 (-0.018686) | 0.061623 / 0.014526 (0.047097) | 0.073905 / 0.176557 (-0.102651) | 0.119444 / 0.737135 (-0.617692) | 0.074614 / 0.296338 (-0.221725) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.287313 / 0.215209 (0.072104) | 2.772864 / 2.077655 (0.695209) | 1.465267 / 1.504120 (-0.038853) | 1.343666 / 1.541195 (-0.197528) | 1.329390 / 1.468490 (-0.139100) | 0.570222 / 4.584777 (-4.014555) | 2.421835 / 3.745712 (-1.323877) | 2.747282 / 5.269862 (-2.522579) | 1.728733 / 4.565676 (-2.836943) | 0.063671 / 0.424275 (-0.360604) | 0.005343 / 0.007607 (-0.002264) | 0.335078 / 0.226044 (0.109033) | 3.334305 / 2.268929 (1.065376) | 1.779496 / 55.444624 (-53.665129) | 1.496475 / 6.876477 (-5.380002) | 1.507848 / 2.142072 (-0.634224) | 0.653653 / 4.805227 (-4.151575) | 0.118373 / 6.500664 (-6.382291) | 0.041727 / 0.075469 (-0.033742) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.981985 / 1.841788 (-0.859803) | 11.290978 / 8.074308 (3.216670) | 9.499217 / 10.191392 (-0.692175) | 0.131353 / 0.680424 (-0.549071) | 0.014416 / 0.534201 (-0.519785) | 0.288381 / 0.579283 (-0.290902) | 0.265483 / 0.434364 (-0.168880) | 0.323438 / 0.540337 (-0.216900) | 0.417946 / 1.386936 (-0.968990) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005272 / 0.011353 (-0.006081) | 0.003551 / 0.011008 (-0.007457) | 0.050173 / 0.038508 (0.011665) | 0.031291 / 0.023109 (0.008182) | 0.278658 / 0.275898 (0.002760) | 0.301812 / 0.323480 (-0.021668) | 0.004237 / 0.007986 (-0.003748) | 0.002713 / 0.004328 (-0.001615) | 0.049483 / 0.004250 (0.045233) | 0.039995 / 0.037052 (0.002943) | 0.293101 / 0.258489 (0.034612) | 0.319956 / 0.293841 (0.026116) | 0.029127 / 0.128546 (-0.099419) | 0.010247 / 0.075646 (-0.065400) | 0.057929 / 0.419271 (-0.361342) | 0.032942 / 0.043533 (-0.010591) | 0.281677 / 0.255139 (0.026538) | 0.297937 / 0.283200 (0.014737) | 0.018285 / 0.141683 (-0.123398) | 1.272858 / 1.452155 (-0.179297) | 1.213375 / 1.492716 (-0.279342) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.091110 / 0.018006 (0.073104) | 0.302589 / 0.000490 (0.302099) | 0.000214 / 0.000200 (0.000014) | 0.000070 / 0.000054 (0.000015) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021520 / 0.037411 (-0.015891) | 0.075013 / 0.014526 (0.060487) | 0.088695 / 0.176557 (-0.087862) | 0.128281 / 0.737135 (-0.608854) | 0.090611 / 0.296338 (-0.205727) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.297457 / 0.215209 (0.082248) | 2.928612 / 2.077655 (0.850957) | 1.613245 / 1.504120 (0.109125) | 1.485263 / 1.541195 (-0.055931) | 1.496885 / 1.468490 (0.028395) | 0.570120 / 4.584777 (-4.014657) | 2.487532 / 3.745712 (-1.258180) | 2.761552 / 5.269862 (-2.508309) | 1.731864 / 4.565676 (-2.833812) | 0.062989 / 0.424275 (-0.361286) | 0.005428 / 0.007607 (-0.002179) | 0.354932 / 0.226044 (0.128888) | 3.524475 / 2.268929 (1.255547) | 1.977684 / 55.444624 (-53.466941) | 1.692568 / 6.876477 (-5.183909) | 1.673003 / 2.142072 (-0.469069) | 0.643976 / 4.805227 (-4.161251) | 0.116499 / 6.500664 (-6.384165) | 0.040772 / 0.075469 (-0.034697) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.020354 / 1.841788 (-0.821434) | 12.143991 / 8.074308 (4.069683) | 10.354058 / 10.191392 (0.162666) | 0.145460 / 0.680424 (-0.534964) | 0.015356 / 0.534201 (-0.518845) | 0.307190 / 0.579283 (-0.272093) | 0.276664 / 0.434364 (-0.157699) | 0.350068 / 0.540337 (-0.190269) | 0.440824 / 1.386936 (-0.946112) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#a3bc89d8bfd47c2a175c3ce16d92b7307cdeafd6 \"CML watermark\")\n" ]
2024-04-09T14:45:12Z
2024-04-17T08:41:23Z
2024-04-12T15:27:04Z
MEMBER
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6795.diff", "html_url": "https://github.com/huggingface/datasets/pull/6795", "merged_at": "2024-04-12T15:27:04Z", "patch_url": "https://github.com/huggingface/datasets/pull/6795.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6795" }
Close #6690.
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6795/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/pull/6794
6,794
Multithreaded downloads
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6794). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "CI is failing because of the missing parquet export of one test dataset, PR to fix this at https://github.com/huggingface/dataset-viewer/pull/2689", "I took your comments into account :) lmk what you think @mariosasko ", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.004956 / 0.011353 (-0.006397) | 0.003282 / 0.011008 (-0.007726) | 0.064028 / 0.038508 (0.025520) | 0.030420 / 0.023109 (0.007311) | 0.240097 / 0.275898 (-0.035801) | 0.266356 / 0.323480 (-0.057124) | 0.003116 / 0.007986 (-0.004869) | 0.002597 / 0.004328 (-0.001731) | 0.050230 / 0.004250 (0.045980) | 0.043864 / 0.037052 (0.006812) | 0.258711 / 0.258489 (0.000222) | 0.290816 / 0.293841 (-0.003025) | 0.027898 / 0.128546 (-0.100648) | 0.009941 / 0.075646 (-0.065705) | 0.208917 / 0.419271 (-0.210355) | 0.035891 / 0.043533 (-0.007642) | 0.253332 / 0.255139 (-0.001807) | 0.274300 / 0.283200 (-0.008900) | 0.019466 / 0.141683 (-0.122217) | 1.133896 / 1.452155 (-0.318259) | 1.178130 / 1.492716 (-0.314586) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.091093 / 0.018006 (0.073087) | 0.293632 / 0.000490 (0.293142) | 0.000216 / 0.000200 (0.000016) | 0.000042 / 0.000054 (-0.000013) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.017722 / 0.037411 (-0.019689) | 0.060241 / 0.014526 (0.045715) | 0.072024 / 0.176557 (-0.104533) | 0.118521 / 0.737135 (-0.618615) | 0.071107 / 0.296338 (-0.225232) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.280950 / 0.215209 (0.065741) | 2.781361 / 2.077655 (0.703706) | 1.477949 / 1.504120 (-0.026171) | 1.356388 / 1.541195 (-0.184807) | 1.361808 / 1.468490 (-0.106682) | 0.565499 / 4.584777 (-4.019278) | 2.389206 / 3.745712 (-1.356506) | 2.712782 / 5.269862 (-2.557079) | 1.701402 / 4.565676 (-2.864274) | 0.063619 / 0.424275 (-0.360656) | 0.005321 / 0.007607 (-0.002286) | 0.336783 / 0.226044 (0.110739) | 3.299628 / 2.268929 (1.030699) | 1.794686 / 55.444624 (-53.649939) | 1.504207 / 6.876477 (-5.372270) | 1.524637 / 2.142072 (-0.617436) | 0.642833 / 4.805227 (-4.162395) | 0.117808 / 6.500664 (-6.382856) | 0.041539 / 0.075469 (-0.033930) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.960193 / 1.841788 (-0.881595) | 11.229147 / 8.074308 (3.154839) | 9.380653 / 10.191392 (-0.810739) | 0.137184 / 0.680424 (-0.543240) | 0.013399 / 0.534201 (-0.520802) | 0.314904 / 0.579283 (-0.264379) | 0.262539 / 0.434364 (-0.171825) | 0.354007 / 0.540337 (-0.186331) | 0.451698 / 1.386936 (-0.935238) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005207 / 0.011353 (-0.006146) | 0.003660 / 0.011008 (-0.007348) | 0.049931 / 0.038508 (0.011423) | 0.030918 / 0.023109 (0.007809) | 0.271243 / 0.275898 (-0.004655) | 0.295706 / 0.323480 (-0.027774) | 0.004106 / 0.007986 (-0.003879) | 0.002750 / 0.004328 (-0.001578) | 0.048337 / 0.004250 (0.044086) | 0.039944 / 0.037052 (0.002892) | 0.284013 / 0.258489 (0.025524) | 0.306827 / 0.293841 (0.012987) | 0.029183 / 0.128546 (-0.099363) | 0.010033 / 0.075646 (-0.065613) | 0.058126 / 0.419271 (-0.361146) | 0.032427 / 0.043533 (-0.011106) | 0.276471 / 0.255139 (0.021332) | 0.288428 / 0.283200 (0.005229) | 0.017549 / 0.141683 (-0.124134) | 1.142361 / 1.452155 (-0.309793) | 1.184514 / 1.492716 (-0.308202) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.090350 / 0.018006 (0.072344) | 0.292511 / 0.000490 (0.292021) | 0.000215 / 0.000200 (0.000015) | 0.000041 / 0.000054 (-0.000013) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021572 / 0.037411 (-0.015840) | 0.074310 / 0.014526 (0.059784) | 0.086102 / 0.176557 (-0.090455) | 0.123507 / 0.737135 (-0.613629) | 0.087397 / 0.296338 (-0.208941) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.294038 / 0.215209 (0.078829) | 2.889662 / 2.077655 (0.812007) | 1.591775 / 1.504120 (0.087655) | 1.468815 / 1.541195 (-0.072379) | 1.470226 / 1.468490 (0.001736) | 0.574557 / 4.584777 (-4.010220) | 2.481377 / 3.745712 (-1.264335) | 2.763368 / 5.269862 (-2.506493) | 1.713707 / 4.565676 (-2.851969) | 0.064158 / 0.424275 (-0.360117) | 0.005553 / 0.007607 (-0.002054) | 0.353480 / 0.226044 (0.127436) | 3.447689 / 2.268929 (1.178760) | 1.975802 / 55.444624 (-53.468822) | 1.673561 / 6.876477 (-5.202915) | 1.637212 / 2.142072 (-0.504860) | 0.640667 / 4.805227 (-4.164560) | 0.114618 / 6.500664 (-6.386046) | 0.038912 / 0.075469 (-0.036557) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.007581 / 1.841788 (-0.834207) | 11.874250 / 8.074308 (3.799942) | 10.312692 / 10.191392 (0.121300) | 0.142705 / 0.680424 (-0.537719) | 0.015438 / 0.534201 (-0.518763) | 0.285919 / 0.579283 (-0.293364) | 0.278223 / 0.434364 (-0.156141) | 0.323806 / 0.540337 (-0.216531) | 0.415007 / 1.386936 (-0.971929) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#0f1f27c69f6cc8d085b66a8a2ba0440a39bc5bce \"CML watermark\")\n" ]
2024-04-09T11:13:19Z
2024-04-15T21:24:13Z
2024-04-15T21:18:08Z
MEMBER
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6794.diff", "html_url": "https://github.com/huggingface/datasets/pull/6794", "merged_at": "2024-04-15T21:18:08Z", "patch_url": "https://github.com/huggingface/datasets/pull/6794.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6794" }
...for faster dataset download when there are many many small files (e.g. imagefolder, audiofolder) ### Behcnmark for example on [lhoestq/tmp-images-writer_batch_size](https://hf.co/datasets/lhoestq/tmp-images-writer_batch_size) (128 images) | | duration of the download step in `load_dataset()` | |--| ----------------------------------------------------------------------| | Before | 58s | | Now | 3s | This should fix issues with the Dataset Viewer taking too much time to show up for imagefolder/audiofolder datasets. ### Implementation details The main change is in the `DownloadManager`: ```diff - download_func = partial(self._download, download_config=download_config) + download_func = partial(self._download_batched, download_config=download_config) downloaded_path_or_paths = map_nested( download_func, url_or_urls, map_tuple=True, num_proc=download_config.num_proc, desc="Downloading data files", + batched=True, + batch_size=-1, ) ``` and `_download_batched` is a multithreaded function. I only enable multithreading if there are more than 16 files and files are small though, otherwise the progress bar that counts the number of downloaded files is not fluid (updating when a big batch of big files are done downloading). To do so I simply check if the first file is smaller than 20MB. I also had to tweak `map_nested` to support batching. In particular it slices the data correctly if the user also enables multiprocessing.
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 1, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/6794/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/issues/6793
6,793
Loading just one particular split is not possible for imagenet-1k
{ "avatar_url": "https://avatars.githubusercontent.com/u/165930106?v=4", "events_url": "https://api.github.com/users/PaulPSta/events{/privacy}", "followers_url": "https://api.github.com/users/PaulPSta/followers", "following_url": "https://api.github.com/users/PaulPSta/following{/other_user}", "gists_url": "https://api.github.com/users/PaulPSta/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/PaulPSta", "id": 165930106, "login": "PaulPSta", "node_id": "U_kgDOCePkeg", "organizations_url": "https://api.github.com/users/PaulPSta/orgs", "received_events_url": "https://api.github.com/users/PaulPSta/received_events", "repos_url": "https://api.github.com/users/PaulPSta/repos", "site_admin": false, "starred_url": "https://api.github.com/users/PaulPSta/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/PaulPSta/subscriptions", "type": "User", "url": "https://api.github.com/users/PaulPSta", "user_view_type": "public" }
[]
open
false
[ "+1", "+1\n" ]
2024-04-08T14:39:14Z
2025-06-23T09:55:08Z
null
NONE
null
null
### Describe the bug I'd expect the following code to download just the validation split but instead I get all data on my disk (train, test and validation splits) ` from datasets import load_dataset dataset = load_dataset("imagenet-1k", split="validation", trust_remote_code=True) ` Is it expected to work like that? ### Steps to reproduce the bug 1. Install the required libraries (python, datasets, huggingface_hub) 2. Login using huggingface cli 2. Run the code in the description ### Expected behavior Just a single (validation) split should be downloaded. ### Environment info python: 3.12.2 datasets: 2.18.0 huggingface_hub: 0.22.2
null
{ "+1": 5, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 5, "url": "https://api.github.com/repos/huggingface/datasets/issues/6793/reactions" }
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/pull/6792
6,792
Fix cache conflict in `_check_legacy_cache2`
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6792). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005212 / 0.011353 (-0.006141) | 0.003536 / 0.011008 (-0.007472) | 0.063042 / 0.038508 (0.024534) | 0.032654 / 0.023109 (0.009545) | 0.242040 / 0.275898 (-0.033858) | 0.267735 / 0.323480 (-0.055745) | 0.003188 / 0.007986 (-0.004797) | 0.002697 / 0.004328 (-0.001631) | 0.050127 / 0.004250 (0.045877) | 0.045960 / 0.037052 (0.008908) | 0.260926 / 0.258489 (0.002437) | 0.293953 / 0.293841 (0.000112) | 0.028352 / 0.128546 (-0.100194) | 0.010558 / 0.075646 (-0.065088) | 0.208104 / 0.419271 (-0.211167) | 0.035889 / 0.043533 (-0.007644) | 0.246265 / 0.255139 (-0.008874) | 0.271819 / 0.283200 (-0.011381) | 0.018491 / 0.141683 (-0.123192) | 1.299274 / 1.452155 (-0.152881) | 1.205932 / 1.492716 (-0.286784) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.095574 / 0.018006 (0.077568) | 0.306493 / 0.000490 (0.306003) | 0.000216 / 0.000200 (0.000016) | 0.000042 / 0.000054 (-0.000012) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018304 / 0.037411 (-0.019107) | 0.061312 / 0.014526 (0.046786) | 0.074483 / 0.176557 (-0.102073) | 0.122231 / 0.737135 (-0.614905) | 0.075315 / 0.296338 (-0.221024) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.275632 / 0.215209 (0.060423) | 2.696402 / 2.077655 (0.618747) | 1.418657 / 1.504120 (-0.085463) | 1.300014 / 1.541195 (-0.241181) | 1.299148 / 1.468490 (-0.169342) | 0.561893 / 4.584777 (-4.022884) | 2.410710 / 3.745712 (-1.335002) | 2.749058 / 5.269862 (-2.520803) | 1.712835 / 4.565676 (-2.852841) | 0.062278 / 0.424275 (-0.361997) | 0.005040 / 0.007607 (-0.002567) | 0.330352 / 0.226044 (0.104308) | 3.291274 / 2.268929 (1.022345) | 1.780987 / 55.444624 (-53.663638) | 1.514764 / 6.876477 (-5.361713) | 1.533892 / 2.142072 (-0.608181) | 0.632307 / 4.805227 (-4.172921) | 0.116011 / 6.500664 (-6.384653) | 0.041964 / 0.075469 (-0.033505) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.982713 / 1.841788 (-0.859075) | 11.521597 / 8.074308 (3.447289) | 9.713063 / 10.191392 (-0.478329) | 0.132115 / 0.680424 (-0.548309) | 0.014564 / 0.534201 (-0.519637) | 0.294087 / 0.579283 (-0.285196) | 0.267399 / 0.434364 (-0.166965) | 0.327967 / 0.540337 (-0.212370) | 0.419279 / 1.386936 (-0.967657) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005098 / 0.011353 (-0.006255) | 0.003513 / 0.011008 (-0.007495) | 0.050121 / 0.038508 (0.011613) | 0.030842 / 0.023109 (0.007732) | 0.271323 / 0.275898 (-0.004575) | 0.293592 / 0.323480 (-0.029887) | 0.004225 / 0.007986 (-0.003761) | 0.002802 / 0.004328 (-0.001527) | 0.049035 / 0.004250 (0.044785) | 0.040748 / 0.037052 (0.003696) | 0.282542 / 0.258489 (0.024053) | 0.303779 / 0.293841 (0.009938) | 0.029213 / 0.128546 (-0.099333) | 0.010578 / 0.075646 (-0.065068) | 0.058053 / 0.419271 (-0.361219) | 0.032830 / 0.043533 (-0.010703) | 0.272226 / 0.255139 (0.017087) | 0.290485 / 0.283200 (0.007285) | 0.017968 / 0.141683 (-0.123714) | 1.166998 / 1.452155 (-0.285156) | 1.256354 / 1.492716 (-0.236362) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.096126 / 0.018006 (0.078120) | 0.306303 / 0.000490 (0.305813) | 0.000246 / 0.000200 (0.000047) | 0.000049 / 0.000054 (-0.000006) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022413 / 0.037411 (-0.014998) | 0.075008 / 0.014526 (0.060482) | 0.087703 / 0.176557 (-0.088854) | 0.127358 / 0.737135 (-0.609777) | 0.088817 / 0.296338 (-0.207521) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.301103 / 0.215209 (0.085894) | 2.965441 / 2.077655 (0.887787) | 1.608075 / 1.504120 (0.103955) | 1.479214 / 1.541195 (-0.061981) | 1.492039 / 1.468490 (0.023549) | 0.574455 / 4.584777 (-4.010322) | 2.483234 / 3.745712 (-1.262478) | 2.795901 / 5.269862 (-2.473961) | 1.742034 / 4.565676 (-2.823642) | 0.064170 / 0.424275 (-0.360105) | 0.005572 / 0.007607 (-0.002035) | 0.349500 / 0.226044 (0.123456) | 3.482161 / 2.268929 (1.213232) | 1.950065 / 55.444624 (-53.494559) | 1.675270 / 6.876477 (-5.201207) | 1.674534 / 2.142072 (-0.467538) | 0.657478 / 4.805227 (-4.147749) | 0.117534 / 6.500664 (-6.383130) | 0.040880 / 0.075469 (-0.034589) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.035276 / 1.841788 (-0.806511) | 12.035581 / 8.074308 (3.961273) | 10.127778 / 10.191392 (-0.063614) | 0.142289 / 0.680424 (-0.538134) | 0.014702 / 0.534201 (-0.519499) | 0.288206 / 0.579283 (-0.291077) | 0.282251 / 0.434364 (-0.152113) | 0.323479 / 0.540337 (-0.216858) | 0.419019 / 1.386936 (-0.967917) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#0f27d7b77c73412cfc50b24354bfd7a3e838202f \"CML watermark\")\n" ]
2024-04-08T14:05:42Z
2024-04-09T11:34:08Z
2024-04-09T11:27:58Z
MEMBER
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6792.diff", "html_url": "https://github.com/huggingface/datasets/pull/6792", "merged_at": "2024-04-09T11:27:57Z", "patch_url": "https://github.com/huggingface/datasets/pull/6792.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6792" }
It was reloading from the wrong cache dir because of a bug in `_check_legacy_cache2`. This function should not trigger if there are config_kwars like `sample_by=` fix https://github.com/huggingface/datasets/issues/6758
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6792/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/issues/6791
6,791
`add_faiss_index` raises ValueError: not enough values to unpack (expected 2, got 1)
{ "avatar_url": "https://avatars.githubusercontent.com/u/40491005?v=4", "events_url": "https://api.github.com/users/NeuralFlux/events{/privacy}", "followers_url": "https://api.github.com/users/NeuralFlux/followers", "following_url": "https://api.github.com/users/NeuralFlux/following{/other_user}", "gists_url": "https://api.github.com/users/NeuralFlux/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/NeuralFlux", "id": 40491005, "login": "NeuralFlux", "node_id": "MDQ6VXNlcjQwNDkxMDA1", "organizations_url": "https://api.github.com/users/NeuralFlux/orgs", "received_events_url": "https://api.github.com/users/NeuralFlux/received_events", "repos_url": "https://api.github.com/users/NeuralFlux/repos", "site_admin": false, "starred_url": "https://api.github.com/users/NeuralFlux/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/NeuralFlux/subscriptions", "type": "User", "url": "https://api.github.com/users/NeuralFlux", "user_view_type": "public" }
[]
closed
false
[ "I realized I was passing a string column to this instead of float. Is it possible to add a warning or error to prevent users from falsely believing there's a bug?", "Hello!\r\n\r\nI agree that we could add some safeguards around the type of `ds[column]`. At least for FAISS, we need the column to be made of embeddings as FAISS doesn't perform the embeddings itself.\r\n\r\nI can propose a PR sometime this week.", "@Dref360 thanks for the initiative!" ]
2024-04-08T01:57:03Z
2024-04-11T15:38:05Z
2024-04-11T15:38:05Z
NONE
null
null
### Describe the bug Calling `add_faiss_index` on a `Dataset` with a column argument raises a ValueError. The following is the trace ```python 214 def replacement_add(self, x): 215 """Adds vectors to the index. 216 The index must be trained before vectors can be added to it. 217 The vectors are implicitly numbered in sequence. When `n` vectors are (...) 224 `dtype` must be float32. 225 """ --> 227 n, d = x.shape 228 assert d == self.d 229 x = np.ascontiguousarray(x, dtype='float32') ValueError: not enough values to unpack (expected 2, got 1) ``` ### Steps to reproduce the bug 1. Load any dataset like `ds = datasets.load_dataset("wikimedia/wikipedia", "20231101.en")["train"]` 2. Add an FAISS index on any column `ds.add_faiss_index('title')` ### Expected behavior The index should be created ### Environment info - `datasets` version: 2.18.0 - Platform: Linux-6.5.0-26-generic-x86_64-with-glibc2.35 - Python version: 3.9.19 - `huggingface_hub` version: 0.22.2 - PyArrow version: 15.0.2 - Pandas version: 2.2.1 - `fsspec` version: 2024.2.0 - `faiss-cpu` version: 1.8.0
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6791/reactions" }
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/issues/6790
6,790
PyArrow 'Memory mapping file failed: Cannot allocate memory' bug
{ "avatar_url": "https://avatars.githubusercontent.com/u/25725697?v=4", "events_url": "https://api.github.com/users/lasuomela/events{/privacy}", "followers_url": "https://api.github.com/users/lasuomela/followers", "following_url": "https://api.github.com/users/lasuomela/following{/other_user}", "gists_url": "https://api.github.com/users/lasuomela/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lasuomela", "id": 25725697, "login": "lasuomela", "node_id": "MDQ6VXNlcjI1NzI1Njk3", "organizations_url": "https://api.github.com/users/lasuomela/orgs", "received_events_url": "https://api.github.com/users/lasuomela/received_events", "repos_url": "https://api.github.com/users/lasuomela/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lasuomela/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lasuomela/subscriptions", "type": "User", "url": "https://api.github.com/users/lasuomela", "user_view_type": "public" }
[]
open
false
[ "Thanks for a very clean explanation. This happened to me too, and I don't have sudo access to update the value. I wonder if there might be another workaround.", "One option is to just have more data in each file - /proc/sys/vm/max_map_count limits the maximum number of concurrently open files, but I don't know if the size of a single file is restricted in any way. E.g. 5000 files with 1GB each is 5TB of data. https://huggingface.co/docs/datasets/v2.18.0/en/package_reference/main_classes#datasets.concatenate_datasets can come in handy.", "> One option is to just have more data in each file - /proc/sys/vm/max_map_count limits the maximum number of concurrently open files, but I don't know if the size of a single file is restricted in any way. E.g. 5000 files with 1GB each is 5TB of data. https://huggingface.co/docs/datasets/v2.18.0/en/package_reference/main_classes#datasets.concatenate_datasets can come in handy.\n\nI still got the same error even if i only got 25k arrow files and i found this error will raise when each process of \"load_from_disk\" was allocated vram more than 128T", "Another option is to use `load_from_disk` for a certain limit (let's say, 50,000 for safety), and the other calls for the function could include the argument `keep_in_memory=True` (e.g. disable mmap).\n\nExample:\n```python\nimport os\nfrom datasets import load_from_disk\n\npath = \"datasets_dir\"\nfiles = os.listdir(path)\nmmap_threshold = 50000 # how many `load_from_disk` calls should include mmap\nfor i, file in enumerate(files):\n full_path = os.path.join(path, file)\n \n disable_mmap = i >= mmap_threshold\n _ = load_from_disk(full_path, keep_in_memory=disable_mmap)\n```" ]
2024-04-07T19:25:39Z
2025-09-08T14:04:59Z
null
NONE
null
null
### Describe the bug Hello, I've been struggling with a problem using Huggingface datasets caused by PyArrow memory allocation. I finally managed to solve it, and thought to document it since similar issues have been raised here before (https://github.com/huggingface/datasets/issues/5710, https://github.com/huggingface/datasets/issues/6176). In my case, I was trying to load ~70k dataset files from disk using `datasets.load_from_disk(data_path)` (meaning 70k repeated calls to load_from_disk). This triggered an (uninformative) exception around 64k loaded files: ``` File "pyarrow/io.pxi", line 1053, in pyarrow.lib.memory_map File "pyarrow/io.pxi", line 1000, in pyarrow.lib.MemoryMappedFile._open File "pyarrow/error.pxi", line 154, in pyarrow.lib.pyarrow_internal_check_status File "pyarrow/error.pxi", line 91, in pyarrow.lib.check_status OSError: Memory mapping file failed: Cannot allocate memory ``` Despite system RAM usage being very low. After a lot of digging around, I discovered that my Ubuntu machine had a limit on the maximum number of memory mapped files in `/proc/sys/vm/max_map_count` set to 65530, which was causing my data loader to crash. Increasing the limit in the file (`echo <new_mmap_size> | sudo tee /proc/sys/vm/max_map_count`) made the issue go away. While this isn't a bug as such in either Datasets or PyArrow, this behavior can be very confusing to users. Maybe this should be mentioned in documentation? I suspect the other issues raised here about memory mapping OOM errors could actually be consequence of system configuration. Br, Lauri ### Steps to reproduce the bug ``` import numpy as np import pyarrow as pa import tqdm # Write some data to disk arr = pa.array(np.arange(100)) schema = pa.schema([ pa.field('nums', arr.type) ]) with pa.OSFile('arraydata.arrow', 'wb') as sink: with pa.ipc.new_file(sink, schema=schema) as writer: batch = pa.record_batch([arr], schema=schema) writer.write(batch) # Number of times to open the memory map nums = 70000 # Read the data back arrays = [pa.memory_map('arraydata.arrow', 'r') for _ in tqdm.tqdm(range(nums))] ``` ### Expected behavior No errors. ### Environment info datasets: 2.18.0 pyarrow: 15.0.0
null
{ "+1": 4, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 4, "url": "https://api.github.com/repos/huggingface/datasets/issues/6790/reactions" }
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/issues/6789
6,789
Issue with map
{ "avatar_url": "https://avatars.githubusercontent.com/u/102672238?v=4", "events_url": "https://api.github.com/users/Nsohko/events{/privacy}", "followers_url": "https://api.github.com/users/Nsohko/followers", "following_url": "https://api.github.com/users/Nsohko/following{/other_user}", "gists_url": "https://api.github.com/users/Nsohko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Nsohko", "id": 102672238, "login": "Nsohko", "node_id": "U_kgDOBh6nbg", "organizations_url": "https://api.github.com/users/Nsohko/orgs", "received_events_url": "https://api.github.com/users/Nsohko/received_events", "repos_url": "https://api.github.com/users/Nsohko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Nsohko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Nsohko/subscriptions", "type": "User", "url": "https://api.github.com/users/Nsohko", "user_view_type": "public" }
[]
open
false
[ "Default `writer_batch_size `is set to 1000 (see [map](https://huggingface.co/docs/datasets/v2.16.1/en/package_reference/main_classes#datasets.Dataset.map)).\r\nThe \"tmp1335llua\" is probably the temp file it creates while writing to disk.\r\nMaybe try lowering the `writer_batch_size`.\r\n\r\nFor multi-processing you should probably pass the `processor `as an argument (with e.g. partial) to the function or create it inside so that the sub-processes have access to it and maybe add `if __name__ == \"__main__\"` (not sure that's necessary?).\r\n", "Hi @Modexus,\r\n\r\nThank you very much for the help! Yep after playing around with map, I managed to get the parallel processing to work by implementing it like you suggested.\r\n\r\nRegarding the temp files, it seems like the temp files just keep growing in size as the map continues. Eventually, once map finishes, the temp files are deleted, but they are instead saved as cache .arrow files. These cache files are absolutely gigantic (~ 30-50x the size of the initial dataset!).\r\n\r\nAfter playing around with the `prepare_dataset()` function above, it seems this issue is caused by the following line in the function, where the log-Mel spectrogram of the audio is calculated:\r\n\r\n`# compute log-Mel input features from input audio array\r\n batch[\"input_features\"] = processor.feature_extractor(audio[\"array\"], \r\n sampling_rate=audio[\"sampling_rate\"]).input_features[0]\r\n`\r\n\r\nWhen I remove this line, the final cache files are approximately the same size as the initial dataset.\r\n\r\nCan I check whether this is expected behavior with the whisper feature extractor? I cant imagine the spectrograms are that large!\r\n\r\nThank you so much for the help!", "I'm having a similar issue with the spectrographs taking up an incredibly large amount of space. (i.e. 100GB for 3GB of audio). Is this really normal behavior?", "Upon taking a look at the hex contents of the mapped dataset files I found that the overwhelming majority of the data contained within them was duplicated junk similar to this. I'm not very familiar with the inner workings of AI but I have to assume this is an inefficient way of storing data at best and a bug at worst.\r\n![image](https://github.com/huggingface/datasets/assets/157770431/70bcbf59-d9ac-4fbf-9b8c-c9e3acc1b539)\r\n", "Same problem, dataset.map takes long time to process 12GB raw audio data and create 200GB cache file. Is there any method can run process(map) during train, instead current run \r\nonce and save cache file ? ", "Same issue here. Just trying to normalise image data for a 300MB dataset, ends up with an 11GB cache. The initial .map() call takes 80s over the 15000 images, but then simply iterating over the dataset takes almost 2 minutes. It should be doing no processing here! Something seems wrong.\r\nkeep_in_memory=True also offers no speedup.\r\nEDIT: Running the normalisation with set_transform (i.e. on the fly) iterates through the dataset in 18s. With no normalisation it takes around 14s. No reason for .map() to take 5 mins!", "@eufrizz How you handle this using set_transform?\r\nI have a really big dataset of size 1.2TB and i am going to use it for fine-tunning whisper model. if i use map for dataset_preparing function it will take over 20 days!!!", "> @eufrizz How you handle this using set_transform?\n> I have a really big dataset of size 1.2TB and i am going to use it for fine-tunning whisper model. if i use map for dataset_preparing function it will take over 20 days!!!\n\nJust give the preprocessing function you were using for map to set_transform. Just look at the set_transform documentation. If you're going to do lots of epochs you might be better off just saving the preprocessed data into a new dataset. " ]
2024-04-07T02:52:06Z
2024-07-23T12:41:38Z
null
NONE
null
null
### Describe the bug Map has been taking extremely long to preprocess my data. It seems to process 1000 examples (which it does really fast in about 10 seconds), then it hangs for a good 1-2 minutes, before it moves on to the next batch of 1000 examples. It also keeps eating up my hard drive space for some reason by creating a file named tmp1335llua that is over 300GB. Trying to set num_proc to be >1 also gives me the following error: NameError: name 'processor' is not defined Please advise on how I could optimise this? ### Steps to reproduce the bug In general, I have been using map as per normal. Here is a snippet of my code: ```` ########################### DATASET LOADING AND PREP ######################### def load_custom_dataset(split): ds = [] if split == 'train': for dset in args.train_datasets: ds.append(load_from_disk(dset)) if split == 'test': for dset in args.test_datasets: ds.append(load_from_disk(dset)) ds_to_return = concatenate_datasets(ds) ds_to_return = ds_to_return.shuffle(seed=22) return ds_to_return def prepare_dataset(batch): # load and (possibly) resample audio data to 16kHz audio = batch["audio"] # compute log-Mel input features from input audio array batch["input_features"] = processor.feature_extractor(audio["array"], sampling_rate=audio["sampling_rate"]).input_features[0] # compute input length of audio sample in seconds batch["input_length"] = len(audio["array"]) / audio["sampling_rate"] # optional pre-processing steps transcription = batch["sentence"] if do_lower_case: transcription = transcription.lower() if do_remove_punctuation: transcription = normalizer(transcription).strip() # encode target text to label ids batch["labels"] = processor.tokenizer(transcription).input_ids return batch print('DATASET PREPARATION IN PROGRESS...') # case 3: combine_and_shuffle is true, only train provided # load train datasets train_set = load_custom_dataset('train') # split dataset raw_dataset = DatasetDict() raw_dataset = train_set.train_test_split(test_size = args.test_size, shuffle=True, seed=42) raw_dataset = raw_dataset.cast_column("audio", Audio(sampling_rate=args.sampling_rate)) print("Before Map:") print(raw_dataset) raw_dataset = raw_dataset.map(prepare_dataset, num_proc=1) print("After Map:") print(raw_dataset) ```` ### Expected behavior Based on the speed at which map is processing examples, I would expect a 5-6 hours completion for all mapping However, because it hangs every 1000 examples, I instead roughly estimate it would take about 40 hours! Moreover, i cant even finish the map because it keeps exponentially eating up my hard drive space ### Environment info - `datasets` version: 2.18.0 - Platform: Windows-10-10.0.22631-SP0 - Python version: 3.10.14 - `huggingface_hub` version: 0.22.2 - PyArrow version: 15.0.2 - Pandas version: 2.2.1 - `fsspec` version: 2024.2.0
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6789/reactions" }
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/issues/6788
6,788
A Question About the Map Function
{ "avatar_url": "https://avatars.githubusercontent.com/u/87431052?v=4", "events_url": "https://api.github.com/users/yslanprime/events{/privacy}", "followers_url": "https://api.github.com/users/yslanprime/followers", "following_url": "https://api.github.com/users/yslanprime/following{/other_user}", "gists_url": "https://api.github.com/users/yslanprime/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/yslanprime", "id": 87431052, "login": "yslanprime", "node_id": "MDQ6VXNlcjg3NDMxMDUy", "organizations_url": "https://api.github.com/users/yslanprime/orgs", "received_events_url": "https://api.github.com/users/yslanprime/received_events", "repos_url": "https://api.github.com/users/yslanprime/repos", "site_admin": false, "starred_url": "https://api.github.com/users/yslanprime/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yslanprime/subscriptions", "type": "User", "url": "https://api.github.com/users/yslanprime", "user_view_type": "public" }
[]
closed
false
[ "All data is saved in the arrow format on disk.\r\nIf you return a tensor it gets converted to arrow before saving to disk when using map.\r\n\r\nTo get a tensor when you access data elements you can use `dataset.set_format(\"pt\")`.\r\nNote that this just changes how the data is loaded, not how it is stored.", "> All data is saved in the arrow format on disk. If you return a tensor it gets converted to arrow before saving to disk when using map.\r\n> \r\n> To get a tensor when you access data elements you can use `dataset.set_format(\"pt\")`. Note that this just changes how the data is loaded, not how it is stored.\r\n\r\nThank you very much for your explanation, I understand what you mean now. So you're saying that when streaming=True, there's no need to convert it to the arrow format and save it to disk. But if we directly load all formats and then convert them into the arrow format after passing through the map function, it will convert torch.Tensor into a List. I see." ]
2024-04-06T11:45:23Z
2024-04-11T05:29:35Z
2024-04-11T05:29:35Z
NONE
null
null
### Describe the bug Hello, I have a question regarding the map function in the Hugging Face datasets. The situation is as follows: when I load a jsonl file using load_dataset(..., streaming=False), and then utilize the map function to process it, I specify that the returned example should be of type Torch.tensor. However, I noticed that after applying the map function, the datatype automatically changes to List, which leads to errors in my program. I attempted to use load_dataset(..., streaming=True), and this issue no longer occurs. I'm not entirely clear on why this happens. Could you please provide some insights into this? ### Steps to reproduce the bug 1.dataset = load_dataset(xxx, streaming = False) 2. dataset.map(function), function will return torch.Tensor. 3. you will find the format of data in dataset is List. ### Expected behavior I expected to receieve the format of data is torch.Tensor. ### Environment info 2.18.0
{ "avatar_url": "https://avatars.githubusercontent.com/u/87431052?v=4", "events_url": "https://api.github.com/users/yslanprime/events{/privacy}", "followers_url": "https://api.github.com/users/yslanprime/followers", "following_url": "https://api.github.com/users/yslanprime/following{/other_user}", "gists_url": "https://api.github.com/users/yslanprime/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/yslanprime", "id": 87431052, "login": "yslanprime", "node_id": "MDQ6VXNlcjg3NDMxMDUy", "organizations_url": "https://api.github.com/users/yslanprime/orgs", "received_events_url": "https://api.github.com/users/yslanprime/received_events", "repos_url": "https://api.github.com/users/yslanprime/repos", "site_admin": false, "starred_url": "https://api.github.com/users/yslanprime/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yslanprime/subscriptions", "type": "User", "url": "https://api.github.com/users/yslanprime", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6788/reactions" }
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/issues/6787
6,787
TimeoutError in map
{ "avatar_url": "https://avatars.githubusercontent.com/u/48146603?v=4", "events_url": "https://api.github.com/users/Jiaxin-Wen/events{/privacy}", "followers_url": "https://api.github.com/users/Jiaxin-Wen/followers", "following_url": "https://api.github.com/users/Jiaxin-Wen/following{/other_user}", "gists_url": "https://api.github.com/users/Jiaxin-Wen/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Jiaxin-Wen", "id": 48146603, "login": "Jiaxin-Wen", "node_id": "MDQ6VXNlcjQ4MTQ2NjAz", "organizations_url": "https://api.github.com/users/Jiaxin-Wen/orgs", "received_events_url": "https://api.github.com/users/Jiaxin-Wen/received_events", "repos_url": "https://api.github.com/users/Jiaxin-Wen/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Jiaxin-Wen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Jiaxin-Wen/subscriptions", "type": "User", "url": "https://api.github.com/users/Jiaxin-Wen", "user_view_type": "public" }
[]
open
false
[ "From my current understanding, this timeout is only used when we need to get the results.\r\n\r\nOne of:\r\n1. All tasks are done\r\n2. One worker died\r\n\r\nYour function should work fine and it's definitely a bug if it doesn't.", "When one of the `map`'s worker processes crashes, the linked code re-raises an error from the crash and returns it to the caller.\r\n\r\nIf your question is how to limit the time of long-running tasks/worker processes, such functionality doesn't exist in `datasets` (yet), which means you need to implement it yourself.\r\n\r\nE.g., you can implement it using the built-in `signal` module like this:\r\n```python\r\nimport time\r\nimport signal\r\nfrom contextlib import contextmanager\r\n\r\nfrom datasets import Dataset\r\n\r\n\r\n@contextmanager\r\ndef max_exec_time(t):\r\n def raise_timeout_handler(signum, frame):\r\n raise TimeoutError\r\n \r\n orig_handler = signal.getsignal(signal.SIGALRM)\r\n signal.signal(signal.SIGALRM, raise_timeout_handler)\r\n try:\r\n signal.alarm(t)\r\n yield\r\n finally:\r\n signal.alarm(0)\r\n signal.signal(signal.SIGALRM, orig_handler)\r\n\r\n\r\ndef worker(example, rank):\r\n try:\r\n with max_exec_time(20): # 20 sec execution limit\r\n if rank % 2 == 0:\r\n time.sleep(50) # simulate a long-running task\r\n example[\"a\"] = 100\r\n except TimeoutError:\r\n example[\"a\"] = None # Or return empty batches here in the \"batched\" mode\r\n return example\r\n\r\ndata = Dataset.from_list([{\"a\": 1}, {\"a\": 2}])\r\ndata = data.map(worker, num_proc=2, with_rank=True)\r\nprint(data[0])\r\n```", "> From my current understanding, this timeout is only used when we need to get the results.\r\n> \r\n> One of:\r\n> \r\n> 1. All tasks are done\r\n> 2. One worker died\r\n> \r\n> Your function should work fine and it's definitely a bug if it doesn't.\r\n\r\nthanks for responding! can you reproduce the stuck with the above example code?", "> When one of the `map`'s worker processes crashes, the linked code re-raises an error from the crash and returns it to the caller.\r\n> \r\n> If your question is how to limit the time of long-running tasks/worker processes, such functionality doesn't exist in `datasets` (yet), which means you need to implement it yourself.\r\n> \r\n> E.g., you can implement it using the built-in `signal` module like this:\r\n> \r\n> ```python\r\n> import time\r\n> import signal\r\n> from contextlib import contextmanager\r\n> \r\n> from datasets import Dataset\r\n> \r\n> \r\n> @contextmanager\r\n> def max_exec_time(t):\r\n> def raise_timeout_handler(signum, frame):\r\n> raise TimeoutError\r\n> \r\n> orig_handler = signal.getsignal(signal.SIGALRM)\r\n> signal.signal(signal.SIGALRM, raise_timeout_handler)\r\n> try:\r\n> signal.alarm(t)\r\n> yield\r\n> finally:\r\n> signal.alarm(0)\r\n> signal.signal(signal.SIGALRM, orig_handler)\r\n> \r\n> \r\n> def worker(example, rank):\r\n> try:\r\n> with max_exec_time(20): # 20 sec execution limit\r\n> if rank % 2 == 0:\r\n> time.sleep(50) # simulate a long-running task\r\n> example[\"a\"] = 100\r\n> except TimeoutError:\r\n> example[\"a\"] = None # Or return empty batches here in the \"batched\" mode\r\n> return example\r\n> \r\n> data = Dataset.from_list([{\"a\": 1}, {\"a\": 2}])\r\n> data = data.map(worker, num_proc=2, with_rank=True)\r\n> print(data[0])\r\n> ```\r\n\r\nthanks for responding! However, I don't think we should use `signal` in the context of multiprocessing since sometimes it will crash one process and raise the following error\r\nhttps://github.com/huggingface/datasets/blob/c3ddb1ef00334a6f973679a51e783905fbc9ef0b/src/datasets/utils/py_utils.py#L664", "> thanks for responding! However, I don't think we should use signal in the context of multiprocessing since sometimes it will crash one process and raise the following error\r\n\r\nThe above code has `try/except` to catch the error from the handler. Or do you get an error other than `TimeoutError`?", "> > thanks for responding! However, I don't think we should use signal in the context of multiprocessing since sometimes it will crash one process and raise the following error\r\n> \r\n> The above code has `try/except` to catch the error from the handler. Or do you get an error other than `TimeoutError`?\r\n\r\nyup, it will raise the RuntimeError: https://github.com/huggingface/datasets/blob/c3ddb1ef00334a6f973679a51e783905fbc9ef0b/src/datasets/utils/py_utils.py#L667C19-L670C22\r\n\r\n```\r\n raise RuntimeError(\r\n \"One of the subprocesses has abruptly died during map operation.\"\r\n \"To debug the error, disable multiprocessing.\"\r\n )\r\n```", "What @mariosasko proposed it's very useful for debugging. Thank you!" ]
2024-04-06T06:25:39Z
2024-08-14T02:09:57Z
null
CONTRIBUTOR
null
null
### Describe the bug ```python from datasets import Dataset def worker(example): while True: continue example['a'] = 100 return example data = Dataset.from_list([{"a": 1}, {"a": 2}]) data = data.map(worker) print(data[0]) ``` I'm implementing a worker function whose runtime will depend on specific examples (e.g., while most examples take 0.01s in worker, several examples may take 50s). Therefore, I would like to know how the current implementation will handle those subprocesses that require a long (e.g., >= 5min) or even infinite time. I notice that the current implementation set a timeout of 0.05 second https://github.com/huggingface/datasets/blob/c3ddb1ef00334a6f973679a51e783905fbc9ef0b/src/datasets/utils/py_utils.py#L674 However, this example code still gets stuck. ### Steps to reproduce the bug run the example above ### Expected behavior I want to set a default worker to handle these timeout cases, instead of getting stuck ### Environment info main branch version
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6787/reactions" }
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/pull/6786
6,786
Make Image cast storage faster
{ "avatar_url": "https://avatars.githubusercontent.com/u/37351874?v=4", "events_url": "https://api.github.com/users/Modexus/events{/privacy}", "followers_url": "https://api.github.com/users/Modexus/followers", "following_url": "https://api.github.com/users/Modexus/following{/other_user}", "gists_url": "https://api.github.com/users/Modexus/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Modexus", "id": 37351874, "login": "Modexus", "node_id": "MDQ6VXNlcjM3MzUxODc0", "organizations_url": "https://api.github.com/users/Modexus/orgs", "received_events_url": "https://api.github.com/users/Modexus/received_events", "repos_url": "https://api.github.com/users/Modexus/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Modexus/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Modexus/subscriptions", "type": "User", "url": "https://api.github.com/users/Modexus", "user_view_type": "public" }
[]
open
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6786). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "Hi ! Thanks for diving into this, this conversion to python lists is indeed quite slow.\r\n\r\nArray2DExtensionType and Array3DExtensionType currently rely on pyarrow lists, but we will soon modify them to use FixedShapeTensorArray instead which is more efficient (e.g. doesn't need to store an offset for each value). So ideally it would be cool to speed this code up without using those extension types or it will be blocking to improve Array2DExtensionType and Array3DExtensionType.\r\n\r\nIf I understand correctly you just need the logic from ArrayExtensionArray.to_numpy ? If so feel free to make a separate function and ArrayExtensionArray.to_numpy can call it", "Hey! I didn't have time to look into this but I just stumbled upon another problem. \r\nWhile my fix kind of made it usable I now pre-embedded the images and even as Array3D they are really slow to load. \r\nDon't think this can be resolved with using ArrayExtensionArray.to_numpy.\r\n\r\nI think actually making the Array3DExtensionType faster would probably resolve both issues as you mentioned.\r\nIs there an update on using FixedShapeTensorArray?\r\nI'd gladly help implementing/testing it if there is some outline how to do it.", "No one is working on this atm afaik (and actually we don't have any ETA unfortunately).\r\n\r\nTo do this change I think we need to:\r\n- update the `_ArrayXD` parent class of all the `Array2D`, `Array3D` types to use `pa.fixed_shape_tensor` type\r\n ```diff\r\n - pa_type = globals()[self.__class__.__name__ + \"ExtensionType\"](self.shape, self.dtype)\r\n + pa_type = pa.fixed_shape_tensor(self.shape, string_to_arrow(self.dtype))\r\n ```\r\n- remove the old extension type `_ArrayXDExtensionType` and extension array `ArrayExtensionArray`\r\n- probably update some functions in `features.py` that were using those types and use the new ones instead", "Thanks, I have looked into this and have a working solution at least for my specific case.\r\nBut I had quite a few issues along the way that are not solved nicely.\r\nIt follows your suggestion though internally it is then just a fixed_shape_tensor as there is no ExtensionType anymore.\r\n\r\nHopefully, I can create a separate PR with these changes soon.", "Nice, thanks @Modexus ! ", "I have run into some issues, notably I don't think `FixedShapeTensorArray` is completely supported by `pandas `and `polars`.\r\nWell it seems to work for `pandas `but one loses the actual shape of the extension.\r\n`Polars `just throws an error and this cannot be changed with `schema_overrides` as they are applied after.\r\n\r\nI have tried to somehow cast the `FixedShapeTensorArray` to something else like a nested FixedSizeLists, however I have not found a clean solution to do that.\r\nIf somebody has a clean solution to cast it to something such that the shape survives the roundtrip to `pandas`/`polars `and back, it may be possible.\r\n", "Can we start using FixedShapeTensor or FixedSizeList even if pandas/polars don't support them fully yet ?\r\n\r\nWe would still get the benefit of optimized conversion to numpy" ]
2024-04-05T17:00:46Z
2024-10-01T09:09:14Z
null
CONTRIBUTOR
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6786.diff", "html_url": "https://github.com/huggingface/datasets/pull/6786", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/6786.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6786" }
PR for issue #6782. Makes `cast_storage` of the `Image` class faster by removing the slow call to `.pylist`. Instead directly convert each `ListArray` item to either `Array2DExtensionType` or `Array3DExtensionType`. This also preserves the `dtype` removing the warning if the array is already `uint8`.
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6786/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/pull/6785
6,785
rename datasets-server to dataset-viewer
{ "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "events_url": "https://api.github.com/users/severo/events{/privacy}", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/severo", "id": 1676121, "login": "severo", "node_id": "MDQ6VXNlcjE2NzYxMjE=", "organizations_url": "https://api.github.com/users/severo/orgs", "received_events_url": "https://api.github.com/users/severo/received_events", "repos_url": "https://api.github.com/users/severo/repos", "site_admin": false, "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "type": "User", "url": "https://api.github.com/users/severo", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6785). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005224 / 0.011353 (-0.006129) | 0.003938 / 0.011008 (-0.007070) | 0.063829 / 0.038508 (0.025321) | 0.030975 / 0.023109 (0.007865) | 0.265090 / 0.275898 (-0.010808) | 0.290994 / 0.323480 (-0.032486) | 0.003083 / 0.007986 (-0.004902) | 0.002810 / 0.004328 (-0.001518) | 0.048860 / 0.004250 (0.044609) | 0.044663 / 0.037052 (0.007611) | 0.272161 / 0.258489 (0.013672) | 0.306966 / 0.293841 (0.013125) | 0.028028 / 0.128546 (-0.100518) | 0.010616 / 0.075646 (-0.065031) | 0.211649 / 0.419271 (-0.207623) | 0.035906 / 0.043533 (-0.007626) | 0.251779 / 0.255139 (-0.003360) | 0.275543 / 0.283200 (-0.007657) | 0.017710 / 0.141683 (-0.123973) | 1.127015 / 1.452155 (-0.325139) | 1.173319 / 1.492716 (-0.319397) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.090625 / 0.018006 (0.072619) | 0.301973 / 0.000490 (0.301483) | 0.000217 / 0.000200 (0.000017) | 0.000053 / 0.000054 (-0.000002) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018868 / 0.037411 (-0.018543) | 0.062402 / 0.014526 (0.047876) | 0.074053 / 0.176557 (-0.102504) | 0.121484 / 0.737135 (-0.615652) | 0.078674 / 0.296338 (-0.217664) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.277821 / 0.215209 (0.062612) | 2.761642 / 2.077655 (0.683987) | 1.452735 / 1.504120 (-0.051385) | 1.336303 / 1.541195 (-0.204891) | 1.343045 / 1.468490 (-0.125445) | 0.560917 / 4.584777 (-4.023860) | 2.353427 / 3.745712 (-1.392286) | 2.699067 / 5.269862 (-2.570795) | 1.704752 / 4.565676 (-2.860925) | 0.062668 / 0.424275 (-0.361607) | 0.005120 / 0.007607 (-0.002487) | 0.330455 / 0.226044 (0.104410) | 3.264604 / 2.268929 (0.995675) | 1.791940 / 55.444624 (-53.652685) | 1.526083 / 6.876477 (-5.350394) | 1.541429 / 2.142072 (-0.600643) | 0.630343 / 4.805227 (-4.174884) | 0.115189 / 6.500664 (-6.385475) | 0.041716 / 0.075469 (-0.033753) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.975008 / 1.841788 (-0.866779) | 11.326924 / 8.074308 (3.252616) | 9.810300 / 10.191392 (-0.381092) | 0.141068 / 0.680424 (-0.539356) | 0.013950 / 0.534201 (-0.520251) | 0.285691 / 0.579283 (-0.293592) | 0.257968 / 0.434364 (-0.176396) | 0.322976 / 0.540337 (-0.217361) | 0.411114 / 1.386936 (-0.975822) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005176 / 0.011353 (-0.006177) | 0.003631 / 0.011008 (-0.007377) | 0.050006 / 0.038508 (0.011498) | 0.030622 / 0.023109 (0.007513) | 0.277364 / 0.275898 (0.001466) | 0.299752 / 0.323480 (-0.023728) | 0.004110 / 0.007986 (-0.003876) | 0.002694 / 0.004328 (-0.001634) | 0.048966 / 0.004250 (0.044715) | 0.039634 / 0.037052 (0.002582) | 0.289959 / 0.258489 (0.031470) | 0.320689 / 0.293841 (0.026848) | 0.029285 / 0.128546 (-0.099261) | 0.010435 / 0.075646 (-0.065211) | 0.057432 / 0.419271 (-0.361840) | 0.032554 / 0.043533 (-0.010979) | 0.277354 / 0.255139 (0.022215) | 0.296872 / 0.283200 (0.013673) | 0.017338 / 0.141683 (-0.124344) | 1.134174 / 1.452155 (-0.317981) | 1.184695 / 1.492716 (-0.308021) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.089953 / 0.018006 (0.071947) | 0.299372 / 0.000490 (0.298882) | 0.000212 / 0.000200 (0.000012) | 0.000043 / 0.000054 (-0.000012) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021349 / 0.037411 (-0.016062) | 0.075167 / 0.014526 (0.060641) | 0.085910 / 0.176557 (-0.090647) | 0.124729 / 0.737135 (-0.612406) | 0.088313 / 0.296338 (-0.208025) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.291939 / 0.215209 (0.076730) | 2.851077 / 2.077655 (0.773423) | 1.609382 / 1.504120 (0.105262) | 1.469656 / 1.541195 (-0.071539) | 1.490469 / 1.468490 (0.021979) | 0.570421 / 4.584777 (-4.014356) | 2.441438 / 3.745712 (-1.304274) | 2.756514 / 5.269862 (-2.513347) | 1.714202 / 4.565676 (-2.851474) | 0.063656 / 0.424275 (-0.360619) | 0.005640 / 0.007607 (-0.001967) | 0.336240 / 0.226044 (0.110196) | 3.355434 / 2.268929 (1.086505) | 1.947553 / 55.444624 (-53.497072) | 1.672776 / 6.876477 (-5.203700) | 1.685316 / 2.142072 (-0.456757) | 0.638849 / 4.805227 (-4.166378) | 0.116304 / 6.500664 (-6.384360) | 0.041588 / 0.075469 (-0.033881) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.026700 / 1.841788 (-0.815088) | 12.044628 / 8.074308 (3.970319) | 10.464007 / 10.191392 (0.272615) | 0.156169 / 0.680424 (-0.524255) | 0.015624 / 0.534201 (-0.518577) | 0.287233 / 0.579283 (-0.292050) | 0.270374 / 0.434364 (-0.163990) | 0.325255 / 0.540337 (-0.215083) | 0.412021 / 1.386936 (-0.974915) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#6f7f1718e3db54d7923ebe4383301fdd380c18b9 \"CML watermark\")\n" ]
2024-04-05T16:37:05Z
2024-04-08T12:41:13Z
2024-04-08T12:35:02Z
COLLABORATOR
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6785.diff", "html_url": "https://github.com/huggingface/datasets/pull/6785", "merged_at": "2024-04-08T12:35:02Z", "patch_url": "https://github.com/huggingface/datasets/pull/6785.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6785" }
See https://github.com/huggingface/dataset-viewer/issues/2650 Tell me if it's OK, or if it's a breaking change that must be handled differently. Also note that the docs page is still https://huggingface.co/docs/datasets-server/, so I didn't change it. And the API URL is still https://datasets-server.huggingface.co/ (and [might always be](https://github.com/huggingface/dataset-viewer/issues/2666)), so I let it too.
{ "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "events_url": "https://api.github.com/users/severo/events{/privacy}", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/severo", "id": 1676121, "login": "severo", "node_id": "MDQ6VXNlcjE2NzYxMjE=", "organizations_url": "https://api.github.com/users/severo/orgs", "received_events_url": "https://api.github.com/users/severo/received_events", "repos_url": "https://api.github.com/users/severo/repos", "site_admin": false, "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "type": "User", "url": "https://api.github.com/users/severo", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6785/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/pull/6784
6,784
Extract data on the fly in packaged builders
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6784). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "CI failures are unrelated, so this is ready for the review", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005130 / 0.011353 (-0.006223) | 0.003784 / 0.011008 (-0.007224) | 0.064899 / 0.038508 (0.026391) | 0.029456 / 0.023109 (0.006347) | 0.253384 / 0.275898 (-0.022514) | 0.273509 / 0.323480 (-0.049971) | 0.004116 / 0.007986 (-0.003870) | 0.002713 / 0.004328 (-0.001615) | 0.053984 / 0.004250 (0.049733) | 0.043538 / 0.037052 (0.006485) | 0.264696 / 0.258489 (0.006207) | 0.298321 / 0.293841 (0.004480) | 0.027916 / 0.128546 (-0.100630) | 0.010734 / 0.075646 (-0.064912) | 0.208284 / 0.419271 (-0.210988) | 0.035873 / 0.043533 (-0.007659) | 0.251028 / 0.255139 (-0.004111) | 0.270835 / 0.283200 (-0.012364) | 0.017475 / 0.141683 (-0.124208) | 1.130728 / 1.452155 (-0.321426) | 1.188672 / 1.492716 (-0.304044) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.094191 / 0.018006 (0.076185) | 0.304064 / 0.000490 (0.303575) | 0.000251 / 0.000200 (0.000051) | 0.000058 / 0.000054 (0.000004) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018414 / 0.037411 (-0.018998) | 0.061550 / 0.014526 (0.047024) | 0.074200 / 0.176557 (-0.102357) | 0.120250 / 0.737135 (-0.616885) | 0.076018 / 0.296338 (-0.220321) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.302517 / 0.215209 (0.087308) | 2.943936 / 2.077655 (0.866282) | 1.584847 / 1.504120 (0.080727) | 1.464501 / 1.541195 (-0.076694) | 1.472402 / 1.468490 (0.003912) | 0.570971 / 4.584777 (-4.013806) | 2.383207 / 3.745712 (-1.362505) | 2.811520 / 5.269862 (-2.458342) | 1.746997 / 4.565676 (-2.818680) | 0.063391 / 0.424275 (-0.360884) | 0.005296 / 0.007607 (-0.002311) | 0.358948 / 0.226044 (0.132903) | 3.604704 / 2.268929 (1.335776) | 1.935813 / 55.444624 (-53.508812) | 1.659944 / 6.876477 (-5.216533) | 1.687151 / 2.142072 (-0.454922) | 0.658044 / 4.805227 (-4.147183) | 0.120425 / 6.500664 (-6.380240) | 0.042694 / 0.075469 (-0.032775) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.986308 / 1.841788 (-0.855479) | 11.727945 / 8.074308 (3.653637) | 9.532785 / 10.191392 (-0.658607) | 0.140071 / 0.680424 (-0.540352) | 0.013472 / 0.534201 (-0.520729) | 0.285828 / 0.579283 (-0.293455) | 0.261571 / 0.434364 (-0.172793) | 0.323114 / 0.540337 (-0.217223) | 0.418132 / 1.386936 (-0.968804) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005428 / 0.011353 (-0.005925) | 0.003954 / 0.011008 (-0.007054) | 0.050336 / 0.038508 (0.011828) | 0.029941 / 0.023109 (0.006831) | 0.281483 / 0.275898 (0.005585) | 0.304822 / 0.323480 (-0.018658) | 0.004151 / 0.007986 (-0.003835) | 0.002862 / 0.004328 (-0.001466) | 0.049196 / 0.004250 (0.044945) | 0.040266 / 0.037052 (0.003213) | 0.293515 / 0.258489 (0.035026) | 0.319165 / 0.293841 (0.025324) | 0.029186 / 0.128546 (-0.099360) | 0.010838 / 0.075646 (-0.064809) | 0.058789 / 0.419271 (-0.360483) | 0.032847 / 0.043533 (-0.010686) | 0.280164 / 0.255139 (0.025025) | 0.299609 / 0.283200 (0.016410) | 0.018291 / 0.141683 (-0.123392) | 1.153858 / 1.452155 (-0.298297) | 1.219108 / 1.492716 (-0.273608) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.093783 / 0.018006 (0.075777) | 0.301526 / 0.000490 (0.301037) | 0.000211 / 0.000200 (0.000011) | 0.000055 / 0.000054 (0.000001) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022105 / 0.037411 (-0.015306) | 0.074844 / 0.014526 (0.060318) | 0.087147 / 0.176557 (-0.089409) | 0.127678 / 0.737135 (-0.609457) | 0.088630 / 0.296338 (-0.207709) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.286805 / 0.215209 (0.071596) | 2.828664 / 2.077655 (0.751009) | 1.579771 / 1.504120 (0.075651) | 1.463137 / 1.541195 (-0.078058) | 1.509238 / 1.468490 (0.040748) | 0.583425 / 4.584777 (-4.001352) | 2.424905 / 3.745712 (-1.320807) | 2.819354 / 5.269862 (-2.450508) | 1.784695 / 4.565676 (-2.780981) | 0.063374 / 0.424275 (-0.360901) | 0.005337 / 0.007607 (-0.002270) | 0.342291 / 0.226044 (0.116247) | 3.404319 / 2.268929 (1.135390) | 1.956909 / 55.444624 (-53.487716) | 1.694317 / 6.876477 (-5.182160) | 1.696256 / 2.142072 (-0.445817) | 0.655748 / 4.805227 (-4.149480) | 0.116785 / 6.500664 (-6.383879) | 0.040930 / 0.075469 (-0.034539) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.034463 / 1.841788 (-0.807325) | 12.252041 / 8.074308 (4.177733) | 10.593960 / 10.191392 (0.402568) | 0.139311 / 0.680424 (-0.541112) | 0.016177 / 0.534201 (-0.518023) | 0.288910 / 0.579283 (-0.290373) | 0.281588 / 0.434364 (-0.152776) | 0.323066 / 0.540337 (-0.217272) | 0.427604 / 1.386936 (-0.959332) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#a188022dc43a76a119d90c03832d51d6e4a94d91 \"CML watermark\")\n" ]
2024-04-05T16:12:25Z
2024-04-16T16:37:47Z
2024-04-16T16:31:29Z
COLLABORATOR
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6784.diff", "html_url": "https://github.com/huggingface/datasets/pull/6784", "merged_at": "2024-04-16T16:31:29Z", "patch_url": "https://github.com/huggingface/datasets/pull/6784.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6784" }
Instead of waiting for data files to be extracted in the packaged builders, we can prepend the compression prefix and extract them as they are being read (using `fsspec`). This saves disk space (deleting extracted archives is not set by default) and slightly speeds up dataset generation (less disk reads)
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6784/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/issues/6783
6,783
AttributeError: module 'numpy' has no attribute 'object'. in Kaggle Notebook
{ "avatar_url": "https://avatars.githubusercontent.com/u/26062262?v=4", "events_url": "https://api.github.com/users/petrov826/events{/privacy}", "followers_url": "https://api.github.com/users/petrov826/followers", "following_url": "https://api.github.com/users/petrov826/following{/other_user}", "gists_url": "https://api.github.com/users/petrov826/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/petrov826", "id": 26062262, "login": "petrov826", "node_id": "MDQ6VXNlcjI2MDYyMjYy", "organizations_url": "https://api.github.com/users/petrov826/orgs", "received_events_url": "https://api.github.com/users/petrov826/received_events", "repos_url": "https://api.github.com/users/petrov826/repos", "site_admin": false, "starred_url": "https://api.github.com/users/petrov826/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/petrov826/subscriptions", "type": "User", "url": "https://api.github.com/users/petrov826", "user_view_type": "public" }
[]
closed
false
[ "Hi! You can fix this by updating the `datasets` package with `pip install -U datasets` and restarting the notebook.\r\n", "Kaggle removed the problematic `datasets==2.1.0` pin last week, so I'm closing this issue (now it pre-installs the latest version)." ]
2024-04-05T14:31:48Z
2024-04-11T17:18:53Z
2024-04-11T17:18:53Z
NONE
null
null
### Describe the bug # problem I can't resample audio dataset in Kaggle Notebook. It looks like some code in `datasets` library use aliases that were deprecated in NumPy 1.20. ## code for resampling ``` from datasets import load_dataset, Audio from transformers import AutoFeatureExtractor from transformers import AutoModelForAudioClassification, TrainingArguments, Trainer minds = load_dataset("PolyAI/minds14", name="en-US", split="train") feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base") def preprocess_function(examples): audio_arrays = [x["array"] for x in examples["audio"]] inputs = feature_extractor( audio_arrays, sampling_rate=feature_extractor.sampling_rate, max_length=16000, truncation=True ) return inputs dataset = dataset.map(preprocess_function, remove_columns="audio", batched=True, batch_size=100) ``` ## the error I got <details> <summary>Click to expand</summary> ``` --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) Cell In[20], line 1 ----> 1 dataset = dataset.map(preprocess_function, remove_columns="audio", batched=True, batch_size=100) 2 dataset File /opt/conda/lib/python3.10/site-packages/datasets/arrow_dataset.py:1955, in Dataset.map(self, function, with_indices, with_rank, input_columns, batched, batch_size, drop_last_batch, remove_columns, keep_in_memory, load_from_cache_file, cache_file_name, writer_batch_size, features, disable_nullable, fn_kwargs, num_proc, suffix_template, new_fingerprint, desc) 1952 disable_tqdm = not logging.is_progress_bar_enabled() 1954 if num_proc is None or num_proc == 1: -> 1955 return self._map_single( 1956 function=function, 1957 with_indices=with_indices, 1958 with_rank=with_rank, 1959 input_columns=input_columns, 1960 batched=batched, 1961 batch_size=batch_size, 1962 drop_last_batch=drop_last_batch, 1963 remove_columns=remove_columns, 1964 keep_in_memory=keep_in_memory, 1965 load_from_cache_file=load_from_cache_file, 1966 cache_file_name=cache_file_name, 1967 writer_batch_size=writer_batch_size, 1968 features=features, 1969 disable_nullable=disable_nullable, 1970 fn_kwargs=fn_kwargs, 1971 new_fingerprint=new_fingerprint, 1972 disable_tqdm=disable_tqdm, 1973 desc=desc, 1974 ) 1975 else: 1977 def format_cache_file_name(cache_file_name, rank): File /opt/conda/lib/python3.10/site-packages/datasets/arrow_dataset.py:520, in transmit_tasks.<locals>.wrapper(*args, **kwargs) 518 self: "Dataset" = kwargs.pop("self") 519 # apply actual function --> 520 out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) 521 datasets: List["Dataset"] = list(out.values()) if isinstance(out, dict) else [out] 522 for dataset in datasets: 523 # Remove task templates if a column mapping of the template is no longer valid File /opt/conda/lib/python3.10/site-packages/datasets/arrow_dataset.py:487, in transmit_format.<locals>.wrapper(*args, **kwargs) 480 self_format = { 481 "type": self._format_type, 482 "format_kwargs": self._format_kwargs, 483 "columns": self._format_columns, 484 "output_all_columns": self._output_all_columns, 485 } 486 # apply actual function --> 487 out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) 488 datasets: List["Dataset"] = list(out.values()) if isinstance(out, dict) else [out] 489 # re-apply format to the output File /opt/conda/lib/python3.10/site-packages/datasets/fingerprint.py:458, in fingerprint_transform.<locals>._fingerprint.<locals>.wrapper(*args, **kwargs) 452 kwargs[fingerprint_name] = update_fingerprint( 453 self._fingerprint, transform, kwargs_for_fingerprint 454 ) 456 # Call actual function --> 458 out = func(self, *args, **kwargs) 460 # Update fingerprint of in-place transforms + update in-place history of transforms 462 if inplace: # update after calling func so that the fingerprint doesn't change if the function fails File /opt/conda/lib/python3.10/site-packages/datasets/arrow_dataset.py:2356, in Dataset._map_single(self, function, with_indices, with_rank, input_columns, batched, batch_size, drop_last_batch, remove_columns, keep_in_memory, load_from_cache_file, cache_file_name, writer_batch_size, features, disable_nullable, fn_kwargs, new_fingerprint, rank, offset, disable_tqdm, desc, cache_only) 2354 writer.write_table(batch) 2355 else: -> 2356 writer.write_batch(batch) 2357 if update_data and writer is not None: 2358 writer.finalize() # close_stream=bool(buf_writer is None)) # We only close if we are writing in a file File /opt/conda/lib/python3.10/site-packages/datasets/arrow_writer.py:507, in ArrowWriter.write_batch(self, batch_examples, writer_batch_size) 505 col_try_type = try_features[col] if try_features is not None and col in try_features else None 506 typed_sequence = OptimizedTypedSequence(batch_examples[col], type=col_type, try_type=col_try_type, col=col) --> 507 arrays.append(pa.array(typed_sequence)) 508 inferred_features[col] = typed_sequence.get_inferred_type() 509 schema = inferred_features.arrow_schema if self.pa_writer is None else self.schema File /opt/conda/lib/python3.10/site-packages/pyarrow/array.pxi:236, in pyarrow.lib.array() File /opt/conda/lib/python3.10/site-packages/pyarrow/array.pxi:110, in pyarrow.lib._handle_arrow_array_protocol() File /opt/conda/lib/python3.10/site-packages/datasets/arrow_writer.py:184, in TypedSequence.__arrow_array__(self, type) 182 out = numpy_to_pyarrow_listarray(data) 183 elif isinstance(data, list) and data and isinstance(first_non_null_value(data)[1], np.ndarray): --> 184 out = list_of_np_array_to_pyarrow_listarray(data) 185 else: 186 trying_cast_to_python_objects = True File /opt/conda/lib/python3.10/site-packages/datasets/features/features.py:1174, in list_of_np_array_to_pyarrow_listarray(l_arr, type) 1172 """Build a PyArrow ListArray from a possibly nested list of NumPy arrays""" 1173 if len(l_arr) > 0: -> 1174 return list_of_pa_arrays_to_pyarrow_listarray( 1175 [numpy_to_pyarrow_listarray(arr, type=type) if arr is not None else None for arr in l_arr] 1176 ) 1177 else: 1178 return pa.array([], type=type) File /opt/conda/lib/python3.10/site-packages/datasets/features/features.py:1163, in list_of_pa_arrays_to_pyarrow_listarray(l_arr) 1160 null_indices = [i for i, arr in enumerate(l_arr) if arr is None] 1161 l_arr = [arr for arr in l_arr if arr is not None] 1162 offsets = np.cumsum( -> 1163 [0] + [len(arr) for arr in l_arr], dtype=np.object 1164 ) # convert to dtype object to allow None insertion 1165 offsets = np.insert(offsets, null_indices, None) 1166 offsets = pa.array(offsets, type=pa.int32()) File /opt/conda/lib/python3.10/site-packages/numpy/__init__.py:324, in __getattr__(attr) 319 warnings.warn( 320 f"In the future `np.{attr}` will be defined as the " 321 "corresponding NumPy scalar.", FutureWarning, stacklevel=2) 323 if attr in __former_attrs__: --> 324 raise AttributeError(__former_attrs__[attr]) 326 if attr == 'testing': 327 import numpy.testing as testing AttributeError: module 'numpy' has no attribute 'object'. `np.object` was a deprecated alias for the builtin `object`. To avoid this error in existing code, use `object` by itself. Doing this will not modify any behavior and is safe. The aliases was originally deprecated in NumPy 1.20; for more details and guidance see the original release note at: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations ``` </details> ### Steps to reproduce the bug Run above code in Kaggle Notebook. ### Expected behavior I can resample audio data without fail. ### Environment info - `datasets` version: 2.1.0 - Platform: Linux-5.15.133+-x86_64-with-glibc2.31 - Python version: 3.10.13 - PyArrow version: 11.0.0 - Pandas version: 2.2.1
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6783/reactions" }
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/issues/6782
6,782
Image cast_storage very slow for arrays (e.g. numpy, tensors)
{ "avatar_url": "https://avatars.githubusercontent.com/u/37351874?v=4", "events_url": "https://api.github.com/users/Modexus/events{/privacy}", "followers_url": "https://api.github.com/users/Modexus/followers", "following_url": "https://api.github.com/users/Modexus/following{/other_user}", "gists_url": "https://api.github.com/users/Modexus/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Modexus", "id": 37351874, "login": "Modexus", "node_id": "MDQ6VXNlcjM3MzUxODc0", "organizations_url": "https://api.github.com/users/Modexus/orgs", "received_events_url": "https://api.github.com/users/Modexus/received_events", "repos_url": "https://api.github.com/users/Modexus/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Modexus/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Modexus/subscriptions", "type": "User", "url": "https://api.github.com/users/Modexus", "user_view_type": "public" }
[]
open
false
[ "This may be a solution that only changes `cast_storage` of `Image`.\r\nHowever, I'm not totally sure that the assumptions hold that are made about the `ListArray`.\r\n\r\n```python\r\nelif pa.types.is_list(storage.type):\r\n from .features import Array3DExtensionType\r\n\r\n def get_shapes(arr):\r\n shape = ()\r\n while isinstance(arr, pa.ListArray):\r\n len_curr = len(arr)\r\n arr = arr.flatten()\r\n len_new = len(arr)\r\n shape = shape + (len_new // len_curr,)\r\n return shape\r\n\r\n def get_dtypes(arr):\r\n dtype = storage.type\r\n while hasattr(dtype, \"value_type\"):\r\n dtype = dtype.value_type\r\n return dtype\r\n\r\n arrays = []\r\n for i, is_null in enumerate(storage.is_null()):\r\n if not is_null.as_py():\r\n storage_part = storage.take([i])\r\n shape = get_shapes(storage_part)\r\n dtype = get_dtypes(storage_part)\r\n\r\n extension_type = Array3DExtensionType(shape=shape, dtype=str(dtype))\r\n array = pa.ExtensionArray.from_storage(extension_type, storage_part)\r\n arrays.append(array.to_numpy().squeeze(0))\r\n else:\r\n arrays.append(None)\r\n\r\n bytes_array = pa.array(\r\n [encode_np_array(arr)[\"bytes\"] if arr is not None else None for arr in arrays],\r\n type=pa.binary(),\r\n )\r\n path_array = pa.array([None] * len(storage), type=pa.string())\r\n storage = pa.StructArray.from_arrays(\r\n [bytes_array, path_array], [\"bytes\", \"path\"], mask=bytes_array.is_null()\r\n )\r\n```\r\n(Edited): to handle nulls\r\n\r\nNotably this doesn't change anything about the passing through of data or other things, just in the `Image` class.\r\nSeems quite fast:\r\n```bash\r\nFri Apr 5 17:55:51 2024 restats\r\n\r\n 63818 function calls (61995 primitive calls) in 0.812 seconds\r\n\r\n Ordered by: cumulative time\r\n List reduced from 1051 to 20 due to restriction <20>\r\n\r\n ncalls tottime percall cumtime percall filename:lineno(function)\r\n 47/1 0.000 0.000 0.810 0.810 {built-in method builtins.exec}\r\n 2/1 0.000 0.000 0.810 0.810 <string>:1(<module>)\r\n 2/1 0.000 0.000 0.809 0.809 arrow_dataset.py:594(wrapper)\r\n 2/1 0.000 0.000 0.809 0.809 arrow_dataset.py:551(wrapper)\r\n 2/1 0.000 0.000 0.809 0.809 arrow_dataset.py:2916(map)\r\n 3 0.000 0.000 0.807 0.269 arrow_dataset.py:3277(_map_single)\r\n 1 0.000 0.000 0.760 0.760 arrow_writer.py:589(finalize)\r\n 1 0.000 0.000 0.760 0.760 arrow_writer.py:423(write_examples_on_file)\r\n 1 0.000 0.000 0.759 0.759 arrow_writer.py:527(write_batch)\r\n 1 0.001 0.001 0.754 0.754 arrow_writer.py:161(__arrow_array__)\r\n 2/1 0.000 0.000 0.719 0.719 table.py:1800(wrapper)\r\n 1 0.000 0.000 0.719 0.719 table.py:1950(cast_array_to_feature)\r\n 1 0.006 0.006 0.718 0.718 image.py:209(cast_storage)\r\n 1 0.000 0.000 0.451 0.451 image.py:361(encode_np_array)\r\n 1 0.000 0.000 0.444 0.444 image.py:343(image_to_bytes)\r\n 1 0.000 0.000 0.413 0.413 Image.py:2376(save)\r\n 1 0.000 0.000 0.413 0.413 PngImagePlugin.py:1233(_save)\r\n 1 0.000 0.000 0.413 0.413 ImageFile.py:517(_save)\r\n 1 0.000 0.000 0.413 0.413 ImageFile.py:545(_encode_tile)\r\n 397 0.409 0.001 0.409 0.001 {method 'encode' of 'ImagingEncoder' objects}\r\n```", "Also encounter this problem. Has been strugging with it for a long time...", "This actually applies to all arrays (numpy or tensors like in torch), not only from external files.\r\n```python\r\nimport numpy as np\r\nimport datasets\r\n\r\nds = datasets.Dataset.from_dict(\r\n {\"image\": [np.random.randint(0, 255, (2048, 2048, 3), dtype=np.uint8)]},\r\n features=datasets.Features({\"image\": datasets.Image(decode=True)}),\r\n)\r\nds.set_format(\"numpy\")\r\n\r\nds = ds.map(load_from_cache_file=False)\r\n```" ]
2024-04-05T13:46:54Z
2024-04-10T14:36:13Z
null
CONTRIBUTOR
null
null
Update: see comments below ### Describe the bug Operations that save an image from a path are very slow. I believe the reason for this is that the image data (`numpy`) is converted into `pyarrow` format but then back to python using `.pylist()` before being converted to a numpy array again. `pylist` is already slow but used on a multi-dimensional numpy array such as an image it takes a very long time. From the trace below we can see that `__arrow_array__` takes a long time. It is currently also called in `get_inferred_type`, this should be removable #6781 but doesn't change the underyling issue. The conversion to `pyarrow` and back also leads to the `numpy` array having type `int64` which causes a warning message because the image type excepts `uint8`. However, originally the `numpy` image array was in `uint8`. ### Steps to reproduce the bug ```python from PIL import Image import numpy as np import datasets import cProfile image = Image.fromarray(np.random.randint(0, 255, (2048, 2048, 3), dtype=np.uint8)) image.save("test_image.jpg") ds = datasets.Dataset.from_dict( {"image": ["test_image.jpg"]}, features=datasets.Features({"image": datasets.Image(decode=True)}), ) # load as numpy array, e.g. for further processing with map # same result as map returning numpy arrays ds.set_format("numpy") cProfile.run("ds.map(writer_batch_size=1, load_from_cache_file=False)", "restats") ``` ```bash Fri Apr 5 14:56:17 2024 restats 66817 function calls (64992 primitive calls) in 33.382 seconds Ordered by: cumulative time List reduced from 1073 to 20 due to restriction <20> ncalls tottime percall cumtime percall filename:lineno(function) 46/1 0.000 0.000 33.382 33.382 {built-in method builtins.exec} 1 0.000 0.000 33.382 33.382 <string>:1(<module>) 1 0.000 0.000 33.382 33.382 arrow_dataset.py:594(wrapper) 1 0.000 0.000 33.382 33.382 arrow_dataset.py:551(wrapper) 1 0.000 0.000 33.379 33.379 arrow_dataset.py:2916(map) 4 0.000 0.000 33.327 8.332 arrow_dataset.py:3277(_map_single) 1 0.000 0.000 33.311 33.311 arrow_writer.py:465(write) 2 0.000 0.000 33.311 16.656 arrow_writer.py:423(write_examples_on_file) 1 0.000 0.000 33.311 33.311 arrow_writer.py:527(write_batch) 2 14.484 7.242 33.260 16.630 arrow_writer.py:161(__arrow_array__) 1 0.001 0.001 16.438 16.438 arrow_writer.py:121(get_inferred_type) 1 0.000 0.000 14.398 14.398 threading.py:637(wait) 1 0.000 0.000 14.398 14.398 threading.py:323(wait) 8 14.398 1.800 14.398 1.800 {method 'acquire' of '_thread.lock' objects} 4/2 0.000 0.000 4.337 2.169 table.py:1800(wrapper) 2 0.000 0.000 4.337 2.169 table.py:1950(cast_array_to_feature) 2 0.475 0.238 4.337 2.169 image.py:209(cast_storage) 9 2.583 0.287 2.583 0.287 {built-in method numpy.array} 2 0.000 0.000 1.284 0.642 image.py:319(encode_np_array) 2 0.000 0.000 1.246 0.623 image.py:301(image_to_bytes) ``` ### Expected behavior The `numpy` image data should be passed through as it will be directly consumed by `pillow` to convert it to bytes. As an example one can replace `list_of_np_array_to_pyarrow_listarray(data)` in `__arrow_array__` with just `out = data` as a test. We have to change `cast_storage` of the `Image` feature so it handles the passed through data (& if to handle type before) ```python bytes_array = pa.array( [encode_np_array(arr)["bytes"] if arr is not None else None for arr in storage], type=pa.binary(), ) ``` Leading to the following: ```bash Fri Apr 5 15:44:27 2024 restats 66419 function calls (64595 primitive calls) in 0.937 seconds Ordered by: cumulative time List reduced from 1023 to 20 due to restriction <20> ncalls tottime percall cumtime percall filename:lineno(function) 47/1 0.000 0.000 0.935 0.935 {built-in method builtins.exec} 2/1 0.000 0.000 0.935 0.935 <string>:1(<module>) 2/1 0.000 0.000 0.934 0.934 arrow_dataset.py:594(wrapper) 2/1 0.000 0.000 0.934 0.934 arrow_dataset.py:551(wrapper) 2/1 0.000 0.000 0.934 0.934 arrow_dataset.py:2916(map) 4 0.000 0.000 0.933 0.233 arrow_dataset.py:3277(_map_single) 1 0.000 0.000 0.883 0.883 arrow_writer.py:466(write) 2 0.000 0.000 0.883 0.441 arrow_writer.py:424(write_examples_on_file) 1 0.000 0.000 0.882 0.882 arrow_writer.py:528(write_batch) 2 0.000 0.000 0.877 0.439 arrow_writer.py:161(__arrow_array__) 4/2 0.000 0.000 0.877 0.439 table.py:1800(wrapper) 2 0.000 0.000 0.877 0.439 table.py:1950(cast_array_to_feature) 2 0.009 0.005 0.877 0.439 image.py:209(cast_storage) 2 0.000 0.000 0.868 0.434 image.py:335(encode_np_array) 2 0.000 0.000 0.856 0.428 image.py:317(image_to_bytes) 2 0.000 0.000 0.822 0.411 Image.py:2376(save) 2 0.000 0.000 0.822 0.411 PngImagePlugin.py:1233(_save) 2 0.000 0.000 0.822 0.411 ImageFile.py:517(_save) 2 0.000 0.000 0.821 0.411 ImageFile.py:545(_encode_tile) 589 0.803 0.001 0.803 0.001 {method 'encode' of 'ImagingEncoder' objects} ``` This is of course only a test as it passes through all `numpy` arrays irrespective of if they should be an image. Also I guess `cast_storage` is meant for casting `pyarrow` storage exclusively. Converting to `pyarrow` array seems like a good solution as it also handles `pytorch` tensors etc., maybe there is a more efficient way to create a PIL image from a `pyarrow` array? Not sure how this should be handled but I would be happy to help if there is a good solution. ### Environment info - `datasets` version: 2.18.1.dev0 - Platform: Linux-6.7.11-200.fc39.x86_64-x86_64-with-glibc2.38 - Python version: 3.12.2 - `huggingface_hub` version: 0.22.2 - PyArrow version: 15.0.2 - Pandas version: 2.2.1 - `fsspec` version: 2024.3.1
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6782/reactions" }
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/pull/6781
6,781
Remove get_inferred_type from ArrowWriter write_batch
{ "avatar_url": "https://avatars.githubusercontent.com/u/37351874?v=4", "events_url": "https://api.github.com/users/Modexus/events{/privacy}", "followers_url": "https://api.github.com/users/Modexus/followers", "following_url": "https://api.github.com/users/Modexus/following{/other_user}", "gists_url": "https://api.github.com/users/Modexus/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Modexus", "id": 37351874, "login": "Modexus", "node_id": "MDQ6VXNlcjM3MzUxODc0", "organizations_url": "https://api.github.com/users/Modexus/orgs", "received_events_url": "https://api.github.com/users/Modexus/received_events", "repos_url": "https://api.github.com/users/Modexus/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Modexus/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Modexus/subscriptions", "type": "User", "url": "https://api.github.com/users/Modexus", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6781). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "Close in favor of #6786." ]
2024-04-05T13:21:05Z
2024-04-09T07:49:11Z
2024-04-09T07:49:11Z
CONTRIBUTOR
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6781.diff", "html_url": "https://github.com/huggingface/datasets/pull/6781", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/6781.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6781" }
Inferring the type seems to be unnecessary given that the pyarrow array has already been created. Because pyarrow array creation is sometimes extremely slow this doubles the time write_batch takes.
{ "avatar_url": "https://avatars.githubusercontent.com/u/37351874?v=4", "events_url": "https://api.github.com/users/Modexus/events{/privacy}", "followers_url": "https://api.github.com/users/Modexus/followers", "following_url": "https://api.github.com/users/Modexus/following{/other_user}", "gists_url": "https://api.github.com/users/Modexus/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Modexus", "id": 37351874, "login": "Modexus", "node_id": "MDQ6VXNlcjM3MzUxODc0", "organizations_url": "https://api.github.com/users/Modexus/orgs", "received_events_url": "https://api.github.com/users/Modexus/received_events", "repos_url": "https://api.github.com/users/Modexus/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Modexus/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Modexus/subscriptions", "type": "User", "url": "https://api.github.com/users/Modexus", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6781/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/pull/6780
6,780
Fix CI
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6780). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005074 / 0.011353 (-0.006279) | 0.003395 / 0.011008 (-0.007614) | 0.062358 / 0.038508 (0.023849) | 0.031041 / 0.023109 (0.007932) | 0.244039 / 0.275898 (-0.031859) | 0.266361 / 0.323480 (-0.057119) | 0.003201 / 0.007986 (-0.004785) | 0.002609 / 0.004328 (-0.001719) | 0.049269 / 0.004250 (0.045018) | 0.045713 / 0.037052 (0.008661) | 0.264075 / 0.258489 (0.005586) | 0.295428 / 0.293841 (0.001587) | 0.027882 / 0.128546 (-0.100664) | 0.010424 / 0.075646 (-0.065222) | 0.208417 / 0.419271 (-0.210854) | 0.035728 / 0.043533 (-0.007805) | 0.246803 / 0.255139 (-0.008336) | 0.267169 / 0.283200 (-0.016031) | 0.019797 / 0.141683 (-0.121885) | 1.163299 / 1.452155 (-0.288856) | 1.196118 / 1.492716 (-0.296599) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.106091 / 0.018006 (0.088085) | 0.303970 / 0.000490 (0.303480) | 0.000219 / 0.000200 (0.000019) | 0.000042 / 0.000054 (-0.000012) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.017955 / 0.037411 (-0.019456) | 0.060539 / 0.014526 (0.046013) | 0.072884 / 0.176557 (-0.103673) | 0.119205 / 0.737135 (-0.617931) | 0.074072 / 0.296338 (-0.222266) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.272676 / 0.215209 (0.057467) | 2.715169 / 2.077655 (0.637514) | 1.419090 / 1.504120 (-0.085030) | 1.303903 / 1.541195 (-0.237292) | 1.311903 / 1.468490 (-0.156587) | 0.562005 / 4.584777 (-4.022772) | 2.432817 / 3.745712 (-1.312896) | 2.770599 / 5.269862 (-2.499263) | 1.723043 / 4.565676 (-2.842633) | 0.064341 / 0.424275 (-0.359934) | 0.004923 / 0.007607 (-0.002684) | 0.330507 / 0.226044 (0.104463) | 3.240829 / 2.268929 (0.971901) | 1.787638 / 55.444624 (-53.656986) | 1.522971 / 6.876477 (-5.353506) | 1.529496 / 2.142072 (-0.612576) | 0.645768 / 4.805227 (-4.159459) | 0.116405 / 6.500664 (-6.384259) | 0.041524 / 0.075469 (-0.033945) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.968515 / 1.841788 (-0.873272) | 11.628911 / 8.074308 (3.554603) | 9.495023 / 10.191392 (-0.696369) | 0.142219 / 0.680424 (-0.538204) | 0.013859 / 0.534201 (-0.520342) | 0.285727 / 0.579283 (-0.293556) | 0.276842 / 0.434364 (-0.157522) | 0.321247 / 0.540337 (-0.219090) | 0.409958 / 1.386936 (-0.976978) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005102 / 0.011353 (-0.006251) | 0.003213 / 0.011008 (-0.007796) | 0.049250 / 0.038508 (0.010742) | 0.030649 / 0.023109 (0.007540) | 0.276629 / 0.275898 (0.000731) | 0.297315 / 0.323480 (-0.026165) | 0.004198 / 0.007986 (-0.003787) | 0.002744 / 0.004328 (-0.001585) | 0.047899 / 0.004250 (0.043649) | 0.040596 / 0.037052 (0.003544) | 0.287248 / 0.258489 (0.028759) | 0.313573 / 0.293841 (0.019732) | 0.029067 / 0.128546 (-0.099480) | 0.010122 / 0.075646 (-0.065524) | 0.058869 / 0.419271 (-0.360402) | 0.033012 / 0.043533 (-0.010521) | 0.272995 / 0.255139 (0.017856) | 0.297102 / 0.283200 (0.013903) | 0.018209 / 0.141683 (-0.123474) | 1.157785 / 1.452155 (-0.294369) | 1.184999 / 1.492716 (-0.307717) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.094228 / 0.018006 (0.076221) | 0.302055 / 0.000490 (0.301565) | 0.000221 / 0.000200 (0.000021) | 0.000044 / 0.000054 (-0.000010) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022020 / 0.037411 (-0.015391) | 0.074970 / 0.014526 (0.060444) | 0.087682 / 0.176557 (-0.088875) | 0.126506 / 0.737135 (-0.610629) | 0.092046 / 0.296338 (-0.204293) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.295634 / 0.215209 (0.080425) | 2.891554 / 2.077655 (0.813899) | 1.579963 / 1.504120 (0.075843) | 1.462924 / 1.541195 (-0.078271) | 1.463806 / 1.468490 (-0.004684) | 0.558371 / 4.584777 (-4.026406) | 2.513500 / 3.745712 (-1.232212) | 2.754146 / 5.269862 (-2.515716) | 1.762317 / 4.565676 (-2.803360) | 0.063965 / 0.424275 (-0.360310) | 0.005538 / 0.007607 (-0.002069) | 0.348114 / 0.226044 (0.122070) | 3.484558 / 2.268929 (1.215630) | 1.940002 / 55.444624 (-53.504623) | 1.658469 / 6.876477 (-5.218008) | 1.645777 / 2.142072 (-0.496295) | 0.639367 / 4.805227 (-4.165861) | 0.115605 / 6.500664 (-6.385059) | 0.040647 / 0.075469 (-0.034822) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.036002 / 1.841788 (-0.805786) | 12.286895 / 8.074308 (4.212587) | 10.146719 / 10.191392 (-0.044673) | 0.140867 / 0.680424 (-0.539557) | 0.015517 / 0.534201 (-0.518684) | 0.290126 / 0.579283 (-0.289157) | 0.298702 / 0.434364 (-0.135662) | 0.325518 / 0.540337 (-0.214819) | 0.412597 / 1.386936 (-0.974339) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#c3ddb1ef00334a6f973679a51e783905fbc9ef0b \"CML watermark\")\n" ]
2024-04-04T17:45:04Z
2024-04-04T18:46:04Z
2024-04-04T18:23:34Z
COLLABORATOR
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6780.diff", "html_url": "https://github.com/huggingface/datasets/pull/6780", "merged_at": "2024-04-04T18:23:34Z", "patch_url": "https://github.com/huggingface/datasets/pull/6780.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6780" }
Updates the `wmt_t2t` test to pin the `revision` to the version with a loading script (cc @albertvillanova). Additionally, it replaces the occurrences of the `lhoestq/test` repo id with `hf-internal-testing/dataset_with_script` and re-enables logging checks in the `Dataset.from_sql` tests.
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/6780/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/pull/6779
6,779
Install dependencies with `uv` in CI
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6779). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005336 / 0.011353 (-0.006017) | 0.004052 / 0.011008 (-0.006956) | 0.063475 / 0.038508 (0.024967) | 0.032963 / 0.023109 (0.009854) | 0.243906 / 0.275898 (-0.031992) | 0.269048 / 0.323480 (-0.054432) | 0.003363 / 0.007986 (-0.004622) | 0.002802 / 0.004328 (-0.001527) | 0.049487 / 0.004250 (0.045236) | 0.046990 / 0.037052 (0.009938) | 0.260169 / 0.258489 (0.001680) | 0.289145 / 0.293841 (-0.004696) | 0.028030 / 0.128546 (-0.100517) | 0.010706 / 0.075646 (-0.064940) | 0.213640 / 0.419271 (-0.205632) | 0.035866 / 0.043533 (-0.007667) | 0.245106 / 0.255139 (-0.010033) | 0.269588 / 0.283200 (-0.013612) | 0.019791 / 0.141683 (-0.121892) | 1.117684 / 1.452155 (-0.334470) | 1.183389 / 1.492716 (-0.309327) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.095736 / 0.018006 (0.077730) | 0.302586 / 0.000490 (0.302097) | 0.000220 / 0.000200 (0.000020) | 0.000051 / 0.000054 (-0.000003) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018985 / 0.037411 (-0.018426) | 0.062097 / 0.014526 (0.047571) | 0.075617 / 0.176557 (-0.100939) | 0.120570 / 0.737135 (-0.616566) | 0.075949 / 0.296338 (-0.220390) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.279597 / 0.215209 (0.064388) | 2.754319 / 2.077655 (0.676665) | 1.444147 / 1.504120 (-0.059973) | 1.328414 / 1.541195 (-0.212781) | 1.371073 / 1.468490 (-0.097417) | 0.553851 / 4.584777 (-4.030926) | 2.351694 / 3.745712 (-1.394018) | 2.860771 / 5.269862 (-2.409091) | 1.749664 / 4.565676 (-2.816013) | 0.061736 / 0.424275 (-0.362539) | 0.005073 / 0.007607 (-0.002534) | 0.329974 / 0.226044 (0.103930) | 3.300487 / 2.268929 (1.031558) | 1.812809 / 55.444624 (-53.631815) | 1.559018 / 6.876477 (-5.317458) | 1.628664 / 2.142072 (-0.513408) | 0.635757 / 4.805227 (-4.169471) | 0.116468 / 6.500664 (-6.384196) | 0.042641 / 0.075469 (-0.032828) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.972048 / 1.841788 (-0.869740) | 11.952721 / 8.074308 (3.878412) | 9.754274 / 10.191392 (-0.437118) | 0.132026 / 0.680424 (-0.548398) | 0.015352 / 0.534201 (-0.518849) | 0.290574 / 0.579283 (-0.288709) | 0.275384 / 0.434364 (-0.158980) | 0.330688 / 0.540337 (-0.209650) | 0.414868 / 1.386936 (-0.972068) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005412 / 0.011353 (-0.005941) | 0.003814 / 0.011008 (-0.007194) | 0.049988 / 0.038508 (0.011480) | 0.031617 / 0.023109 (0.008507) | 0.278975 / 0.275898 (0.003077) | 0.303540 / 0.323480 (-0.019940) | 0.004265 / 0.007986 (-0.003721) | 0.002804 / 0.004328 (-0.001525) | 0.049518 / 0.004250 (0.045268) | 0.041176 / 0.037052 (0.004123) | 0.291248 / 0.258489 (0.032759) | 0.317401 / 0.293841 (0.023560) | 0.029501 / 0.128546 (-0.099045) | 0.010392 / 0.075646 (-0.065255) | 0.057906 / 0.419271 (-0.361365) | 0.033056 / 0.043533 (-0.010477) | 0.280202 / 0.255139 (0.025063) | 0.298684 / 0.283200 (0.015484) | 0.018071 / 0.141683 (-0.123612) | 1.167691 / 1.452155 (-0.284464) | 1.211322 / 1.492716 (-0.281394) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.092325 / 0.018006 (0.074318) | 0.301209 / 0.000490 (0.300719) | 0.000221 / 0.000200 (0.000021) | 0.000043 / 0.000054 (-0.000011) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021432 / 0.037411 (-0.015980) | 0.074556 / 0.014526 (0.060031) | 0.086049 / 0.176557 (-0.090508) | 0.125151 / 0.737135 (-0.611984) | 0.088279 / 0.296338 (-0.208059) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.296755 / 0.215209 (0.081546) | 2.922650 / 2.077655 (0.844995) | 1.606031 / 1.504120 (0.101911) | 1.489692 / 1.541195 (-0.051502) | 1.530206 / 1.468490 (0.061716) | 0.577827 / 4.584777 (-4.006950) | 2.459716 / 3.745712 (-1.285997) | 2.825192 / 5.269862 (-2.444669) | 1.788110 / 4.565676 (-2.777566) | 0.064011 / 0.424275 (-0.360264) | 0.005616 / 0.007607 (-0.001991) | 0.341612 / 0.226044 (0.115568) | 3.455123 / 2.268929 (1.186194) | 1.961635 / 55.444624 (-53.482990) | 1.688107 / 6.876477 (-5.188370) | 1.725490 / 2.142072 (-0.416583) | 0.656011 / 4.805227 (-4.149216) | 0.117633 / 6.500664 (-6.383031) | 0.041386 / 0.075469 (-0.034083) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.025786 / 1.841788 (-0.816002) | 12.294598 / 8.074308 (4.220290) | 10.241136 / 10.191392 (0.049744) | 0.130577 / 0.680424 (-0.549847) | 0.016094 / 0.534201 (-0.518107) | 0.291193 / 0.579283 (-0.288090) | 0.273016 / 0.434364 (-0.161348) | 0.327553 / 0.540337 (-0.212784) | 0.418556 / 1.386936 (-0.968380) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#3575036af2fd5cccff7fa60de30e2e444cf8a54e \"CML watermark\")\n" ]
2024-04-04T17:02:51Z
2024-04-08T13:34:01Z
2024-04-08T13:27:44Z
COLLABORATOR
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6779.diff", "html_url": "https://github.com/huggingface/datasets/pull/6779", "merged_at": "2024-04-08T13:27:43Z", "patch_url": "https://github.com/huggingface/datasets/pull/6779.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6779" }
`diffusers` (https://github.com/huggingface/diffusers/pull/7116) and `huggingface_hub` (https://github.com/huggingface/huggingface_hub/pull/2072) also use `uv` to install their dependencies, so we can do the same here. It seems to make the "Install dependencies" step in the `ubuntu` jobs 5-8x faster and 1.5-2x in the `windows` one. Besides introducing `uv` in CI, this PR bumps the `tensorflow` minimal version requirement to align with Transformers and simplifies the SpaCy hashing tests (use blank language models instead of the pre-trained ones)
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6779/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/issues/6778
6,778
Dataset.to_csv() missing commas in columns with lists
{ "avatar_url": "https://avatars.githubusercontent.com/u/100041276?v=4", "events_url": "https://api.github.com/users/mpickard-dataprof/events{/privacy}", "followers_url": "https://api.github.com/users/mpickard-dataprof/followers", "following_url": "https://api.github.com/users/mpickard-dataprof/following{/other_user}", "gists_url": "https://api.github.com/users/mpickard-dataprof/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mpickard-dataprof", "id": 100041276, "login": "mpickard-dataprof", "node_id": "U_kgDOBfaCPA", "organizations_url": "https://api.github.com/users/mpickard-dataprof/orgs", "received_events_url": "https://api.github.com/users/mpickard-dataprof/received_events", "repos_url": "https://api.github.com/users/mpickard-dataprof/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mpickard-dataprof/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mpickard-dataprof/subscriptions", "type": "User", "url": "https://api.github.com/users/mpickard-dataprof", "user_view_type": "public" }
[]
open
false
[ "Hello!\r\n\r\nThis is due to how pandas write numpy arrays to csv. [Source](https://stackoverflow.com/questions/54753179/to-csv-saves-np-array-as-string-instead-of-as-a-list)\r\nTo fix this, you can convert them to list yourselves.\r\n\r\n```python\r\ndf = ds.to_pandas()\r\ndf['int'] = df['int'].apply(lambda arr: list(arr))\r\ndf.to_csv(index=False, '../output/temp.csv')\r\n```\r\n\r\nI think it would be good if `datasets` would do the conversion itself, but it's a breaking change and I would wait for the greenlight from someone from HF." ]
2024-04-04T16:46:13Z
2024-04-08T15:24:41Z
null
NONE
null
null
### Describe the bug The `to_csv()` method does not output commas in lists. So when the Dataset is loaded back in the data structure of the column with a list is not correct. Here's an example: Obviously, it's not as trivial as inserting commas in the list, since its a comma-separated file. But hopefully there's a way to export the list in a way that it'll be imported by `load_dataset()` correctly. ### Steps to reproduce the bug Here's some code to reproduce the bug: ```python from datasets import Dataset ds = Dataset.from_dict( { "pokemon": ["bulbasaur", "squirtle"], "type": ["grass", "water"] } ) def ascii_to_hex(text): return [ord(c) for c in text] ds = ds.map(lambda x: {"int": ascii_to_hex(x['pokemon'])}) ds.to_csv('../output/temp.csv') ``` temp.csv then contains: ``` ### Expected behavior ACTUAL OUTPUT: ``` pokemon,type,int bulbasaur,grass,[ 98 117 108 98 97 115 97 117 114] squirtle,water,[115 113 117 105 114 116 108 101] ``` EXPECTED OUTPUT: ``` pokemon,type,int bulbasaur,grass,[98, 117, 108, 98, 97, 115, 97, 117, 114] squirtle,water,[115, 113, 117, 105, 114, 116, 108, 101] ``` or probably something more like this since it's a CSV file: ``` pokemon,type,int bulbasaur,grass,"[98, 117, 108, 98, 97, 115, 97, 117, 114]" squirtle,water,"[115, 113, 117, 105, 114, 116, 108, 101]" ``` ### Environment info ### Package Version Name: datasets Version: 2.16.1 ### Python version: 3.10.12 ### OS Info PRETTY_NAME="Ubuntu 22.04.4 LTS" NAME="Ubuntu" VERSION_ID="22.04" VERSION="22.04.4 LTS (Jammy Jellyfish)" VERSION_CODENAME=jammy ID=ubuntu ID_LIKE=debian ... UBUNTU_CODENAME=jammy
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6778/reactions" }
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/issues/6777
6,777
.Jsonl metadata not detected
{ "avatar_url": "https://avatars.githubusercontent.com/u/81643693?v=4", "events_url": "https://api.github.com/users/nighting0le01/events{/privacy}", "followers_url": "https://api.github.com/users/nighting0le01/followers", "following_url": "https://api.github.com/users/nighting0le01/following{/other_user}", "gists_url": "https://api.github.com/users/nighting0le01/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/nighting0le01", "id": 81643693, "login": "nighting0le01", "node_id": "MDQ6VXNlcjgxNjQzNjkz", "organizations_url": "https://api.github.com/users/nighting0le01/orgs", "received_events_url": "https://api.github.com/users/nighting0le01/received_events", "repos_url": "https://api.github.com/users/nighting0le01/repos", "site_admin": false, "starred_url": "https://api.github.com/users/nighting0le01/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/nighting0le01/subscriptions", "type": "User", "url": "https://api.github.com/users/nighting0le01", "user_view_type": "public" }
[]
open
false
[ "Hi! `metadata.jsonl` (or `metadata.csv`) is the only allowed name for the `imagefolder`'s metadata files.", "@mariosasko hey i tried with metadata.jsonl also and it still doesn't get the right columns", "@mariosasko it says metadata.csv not found\r\n<img width=\"1150\" alt=\"image\" src=\"https://github.com/huggingface/datasets/assets/81643693/3754980c-6185-4413-88fa-b499bcdd4195\">\r\n\r\ndataset = load_dataset('/dataset',metadata.csv) \r\n\r\n| workspace\r\n|| source code\r\n| dataset\r\n| |-- images\r\n| |-- metadata.csv\r\n| |-- metadata.jsonl\r\n| |-- padded_images\r\n\r\nExample of metadata.jsonl file\r\n{\"caption\": \"a drawing depicts a full shot of a black t-shirt with a triangular pattern on the front there is a white label on the left side of the triangle\", \"image\": \"images/212734.png\", \"gaussian_padded_image\": \"padded_images/p_212734.png\"}\r\n{\"caption\": \"an eye-level full shot of a large elephant and a baby elephant standing in a watering hole on the left side is a small elephant with its head turned to the right of dry land, trees, and bushes\", \"image\": \"images/212735.png\", \"gaussian_padded_image\": \"padded_images/p_212735.png\"}\r\n", "Loading more than one image per row with `imagefolder` is not supported currently. You can subscribe to https://github.com/huggingface/datasets/issues/5760 to see when it will be.\r\n\r\nInstead, you can load the dataset with `Dataset.from_generator`:\r\n```python\r\nimport json\r\nfrom datasets import Dataset, Value, Image, Features\r\n\r\ndef gen():\r\n with open(\"./dataset/metadata.jsonl\") as f:\r\n for line in f:\r\n line = json.loads(line)\r\n yield {\"caption\": line[\"caption\"], \"image\": os.path.join(\"./dataset\", line[\"image\"], \"gaussian_padded_image\": os.path.join(\"./dataset\", line[\"gaussian_padded_image\"]))}\r\n\r\nfeatures = Features({\"caption\": Value(\"string\"), \"image\": Image(), \"gaussian_padded_image\": Image()})\r\ndataset = Dataset.from_generator(gen, features=features)\r\n```\r\n(E.g., if you want to share this dataset on the Hub, you can call `dataset.push_to_hub(...)` afterward)", "hi Thanks for sharing this, Actually I was trying with a webdataset format of the data as well and it did'nt work. Could you share how i can create Dataset object from webdataset format of this data?" ]
2024-04-04T06:31:53Z
2024-04-05T21:14:48Z
null
NONE
null
null
### Describe the bug Hi I have the following directory structure: |--dataset | |-- images | |-- metadata1000.csv | |-- metadata1000.jsonl | |-- padded_images Example of metadata1000.jsonl file {"caption": "a drawing depicts a full shot of a black t-shirt with a triangular pattern on the front there is a white label on the left side of the triangle", "image": "images/212734.png", "gaussian_padded_image": "padded_images/p_212734.png"} {"caption": "an eye-level full shot of a large elephant and a baby elephant standing in a watering hole on the left side is a small elephant with its head turned to the right of dry land, trees, and bushes", "image": "images/212735.png", "gaussian_padded_image": "padded_images/p_212735.png"} . . . I'm trying to use dataset = load_dataset("imagefolder", data_dir='/dataset/', split='train') to load the the dataset, however it is not able to load according to the fields in the metadata1000.jsonl . please assist to load the data properly also getting ``` File "/workspace/train_trans_vae.py", line 1089, in <module> print(get_metadata_patterns('/dataset/')) File "/opt/conda/lib/python3.10/site-packages/datasets/data_files.py", line 499, in get_metadata_patterns raise FileNotFoundError(f"The directory at {base_path} doesn't contain any metadata file") from None FileNotFoundError: The directory at /dataset/ doesn't contain any metadata file ``` when trying ``` from datasets.data_files import get_metadata_patterns print(get_metadata_patterns('/dataset/')) ``` ### Steps to reproduce the bug dataset Version: 2.18.0 make a similar jsonl and similar directory format ### Expected behavior creates a dataset object with the column names, caption,image,gaussian_padded_image ### Environment info dataset Version: 2.18.0
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6777/reactions" }
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/issues/6775
6,775
IndexError: Invalid key: 0 is out of bounds for size 0
{ "avatar_url": "https://avatars.githubusercontent.com/u/38481564?v=4", "events_url": "https://api.github.com/users/kk2491/events{/privacy}", "followers_url": "https://api.github.com/users/kk2491/followers", "following_url": "https://api.github.com/users/kk2491/following{/other_user}", "gists_url": "https://api.github.com/users/kk2491/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/kk2491", "id": 38481564, "login": "kk2491", "node_id": "MDQ6VXNlcjM4NDgxNTY0", "organizations_url": "https://api.github.com/users/kk2491/orgs", "received_events_url": "https://api.github.com/users/kk2491/received_events", "repos_url": "https://api.github.com/users/kk2491/repos", "site_admin": false, "starred_url": "https://api.github.com/users/kk2491/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/kk2491/subscriptions", "type": "User", "url": "https://api.github.com/users/kk2491", "user_view_type": "public" }
[]
open
false
[ "Same problem.", "Hi! You should be able to fix this by passing `remove_unused_columns=False` to the `transformers` `TrainingArguments` as explained in https://github.com/huggingface/peft/issues/1299.\r\n\r\n(I'm not familiar with Vertex AI, but I'd assume `remove_unused_columns` can be passed as a flag to the docker container) ", "I had the same problem, but I spent a whole day trying different combination with my own dataset with the example data set and found the reason: the example data is multi-turn conversation between human and assistant, so # Humman or # Assistant appear at least twice. If your own custom data only has single turn conversation, it might end up with the same error. What you can do is repeat your single turn conversation twice in your training data (keep the key 'text' the same) and maybe it works. I guess the reason is the specific way processing the data requires and counts multi-turn only (single turn will be discarded so it ends up with no training data), but since I am using Google Vertex AI, I don't have direct access to the underlying code so that was just my guess. ", "> Hi! You should be able to fix this by passing `remove_unused_columns=False` to the `transformers` `TrainingArguments` as explained in [huggingface/peft#1299](https://github.com/huggingface/peft/issues/1299).\r\n> \r\n> (I'm not familiar with Vertex AI, but I'd assume `remove_unused_columns` can be passed as a flag to the docker container)\r\n\r\n@mariosasko Thanks for the response and suggestion. \r\nWhen I set `remove_unused_columns` as `False` , I end up getting different error (will post the error soon). \r\nEither the Vertex-AI does not support `remove_unused_columns` or my dataset is completely wrong. \r\n\r\nThank you, \r\nKK", "> I had the same problem, but I spent a whole day trying different combination with my own dataset with the example data set and found the reason: the example data is multi-turn conversation between human and assistant, so # Humman or # Assistant appear at least twice. If your own custom data only has single turn conversation, it might end up with the same error. What you can do is repeat your single turn conversation twice in your training data (keep the key 'text' the same) and maybe it works. I guess the reason is the specific way processing the data requires and counts multi-turn only (single turn will be discarded so it ends up with no training data), but since I am using Google Vertex AI, I don't have direct access to the underlying code so that was just my guess.\r\n\r\n@cyberyu Thanks for your suggestions. \r\nI have tried the approach you suggested, copied the same conversation in each jsonl element so every jsonl item has 2 `HUMAN` and `ASSISTANT`. \r\nHowever in my case, the issue persists. I am gonna give few more tries, and post the results here. \r\nYou can find my dataset [here](https://huggingface.co/datasets/kk2491/test/tree/main) \r\n\r\nThank you, \r\nKK ", "> > I had the same problem, but I spent a whole day trying different combination with my own dataset with the example data set and found the reason: the example data is multi-turn conversation between human and assistant, so # Humman or # Assistant appear at least twice. If your own custom data only has single turn conversation, it might end up with the same error. What you can do is repeat your single turn conversation twice in your training data (keep the key 'text' the same) and maybe it works. I guess the reason is the specific way processing the data requires and counts multi-turn only (single turn will be discarded so it ends up with no training data), but since I am using Google Vertex AI, I don't have direct access to the underlying code so that was just my guess.\r\n> \r\n> @cyberyu Thanks for your suggestions. I have tried the approach you suggested, copied the same conversation in each jsonl element so every jsonl item has 2 `HUMAN` and `ASSISTANT`. However in my case, the issue persists. I am gonna give few more tries, and post the results here. You can find my dataset [here](https://huggingface.co/datasets/kk2491/test/tree/main)\r\n> \r\n> Thank you, KK\r\n\r\nI think another reason is your training sample length is too short. I saw a relevant report (https://discuss.huggingface.co/t/indexerror-invalid-key-16-is-out-of-bounds-for-size-0/14298/16) stating that the processing code might have a bug discarding sequence length short than max_seq_length, which is 512. Not sure the Vertex AI backend code has fixed that bug or not. So I tried to add some garbage content in your data, and extended the length longer than 512 for a single turn, and repeated twice. You can copy the following line as 5 repeated lines as your training data jsonl file of five samples (no eval or test needed, for speed up, set evaluation step to 5 and training step to 10,), and it will pass.\r\n\r\n{\"text\":\"### Human: You are a helpful AI Assistant familiar with customer service. You are a helpful AI Assistant familiar with customer service. You are a helpful AI Assistant familiar with customer service. You are a helpful AI Assistant familiar with customer service. You are a helpful AI Assistant familiar with customer service. You are a helpful AI Assistant familiar with customer service. You are a helpful AI Assistant familiar with customer service. You are a helpful AI Assistant familiar with customer service. You are a helpful AI Assistant familiar with customer service. You are a helpful AI Assistant familiar with customer service. You are a helpful AI Assistant familiar with customer service. You are a helpful AI Assistant familiar with customer service. You are a helpful AI Assistant familiar with customer service. You will handle customers queries and provide effective help message. Please provide response to 'Can Interplai software optimize routes for minimizing package handling and transfer times in distribution centers'? ### Assistant: Yes, Interplai software can optimize routes for distribution centers by streamlining package handling processes, minimizing transfer times between loading docks and storage areas, and optimizing warehouse layouts for efficient order fulfillment. ### Human: You are a helpful AI Assistant familiar with customer service. You are a helpful AI Assistant familiar with customer service. You are a helpful AI Assistant familiar with customer service. You are a helpful AI Assistant familiar with customer service. You are a helpful AI Assistant familiar with customer service. You are a helpful AI Assistant familiar with customer service. You are a helpful AI Assistant familiar with customer service. You are a helpful AI Assistant familiar with customer service. You are a helpful AI Assistant familiar with customer service. You are a helpful AI Assistant familiar with customer service. You are a helpful AI Assistant familiar with customer service. You are a helpful AI Assistant familiar with customer service. You are a helpful AI Assistant familiar with customer service. You will handle customers queries and provide effective help message. Please provide response to 'Can Interplai software optimize routes for minimizing package handling and transfer times in distribution centers'? ### Assistant: Yes, Interplai software can optimize routes for distribution centers by streamlining package handling processes, minimizing transfer times between loading docks and storage areas, and optimizing warehouse layouts for efficient order fulfillment.\"}\r\n", "@cyberyu **Thank you so much, You saved my day (+ so many days)**. \r\nI tried the example you provided above, and the training is successfully completed in Vertex-AI (through GUI). \r\nI never thought there would be constraints on the length of the samples and also on the number of turns. \r\nI will update my complete dataset and see update here once the training is completed. \r\n\r\nThank you, \r\nKK " ]
2024-04-03T17:06:30Z
2024-04-08T01:24:35Z
null
NONE
null
null
### Describe the bug I am trying to fine-tune llama2-7b model in GCP. The notebook I am using for this can be found [here](https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/main/notebooks/community/model_garden/model_garden_pytorch_llama2_peft_finetuning.ipynb). When I use the dataset given in the example, the training gets successfully completed (example dataset can be found [here](https://huggingface.co/datasets/timdettmers/openassistant-guanaco)). However when I use my own dataset which is in the same format as the example dataset, I get the below error (my dataset can be found [here](https://huggingface.co/datasets/kk2491/finetune_dataset_002)). ![image](https://github.com/huggingface/datasets/assets/38481564/47fa2de3-95e0-478b-a35f-58cbaf90427a) I see the files are being read correctly from the logs: ![image](https://github.com/huggingface/datasets/assets/38481564/b0b6316c-2cc7-476c-9674-ca2222c8f4e3) ### Steps to reproduce the bug 1. Clone the [vertex-ai-samples](https://github.com/GoogleCloudPlatform/vertex-ai-samples) repository. 2. Run the [llama2-7b peft fine-tuning](https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/main/notebooks/community/model_garden/model_garden_pytorch_llama2_peft_finetuning.ipynb). 3. Change the dataset `kk2491/finetune_dataset_002` ### Expected behavior The training should complete successfully, and model gets deployed to an endpoint. ### Environment info Python version : Python 3.10.12 Dataset : https://huggingface.co/datasets/kk2491/finetune_dataset_002
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6775/reactions" }
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/issues/6774
6,774
Generating split is very slow when Image format is PNG
{ "avatar_url": "https://avatars.githubusercontent.com/u/22740819?v=4", "events_url": "https://api.github.com/users/Tramac/events{/privacy}", "followers_url": "https://api.github.com/users/Tramac/followers", "following_url": "https://api.github.com/users/Tramac/following{/other_user}", "gists_url": "https://api.github.com/users/Tramac/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Tramac", "id": 22740819, "login": "Tramac", "node_id": "MDQ6VXNlcjIyNzQwODE5", "organizations_url": "https://api.github.com/users/Tramac/orgs", "received_events_url": "https://api.github.com/users/Tramac/received_events", "repos_url": "https://api.github.com/users/Tramac/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Tramac/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Tramac/subscriptions", "type": "User", "url": "https://api.github.com/users/Tramac", "user_view_type": "public" }
[]
open
false
[ "I think this is due to the speed of reading a `png` image using pillow compared to a `jpg` image.\r\nNotably the same is true with `tiff`, it is even faster than `jpg` in my case." ]
2024-04-03T07:47:31Z
2024-04-10T17:28:17Z
null
NONE
null
null
### Describe the bug When I create a dataset, it gets stuck while generating cached data. The image format is PNG, and it will not get stuck when the image format is jpeg. ![image](https://github.com/huggingface/datasets/assets/22740819/3b888fd8-e6d6-488f-b828-95a8f206a152) After debugging, I know that it is because of the `pa.array` operation in [arrow_writer](https://github.com/huggingface/datasets/blob/2.13.0/src/datasets/arrow_writer.py#L553), but i don't why. ### Steps to reproduce the bug ``` from datasets import Dataset def generator(lines): for line in lines: img = Image.open(open(line["url"], "rb")) # print(img.format) # "PNG" yield { "image": img, } lines = open(dataset_path, "r") dataset = Dataset.from_generator( generator, gen_kwargs={"lines": lines} ) ``` ### Expected behavior Generating split done. ### Environment info datasets 2.13.0
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6774/reactions" }
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/issues/6773
6,773
Dataset on Hub re-downloads every time?
{ "avatar_url": "https://avatars.githubusercontent.com/u/9099139?v=4", "events_url": "https://api.github.com/users/manestay/events{/privacy}", "followers_url": "https://api.github.com/users/manestay/followers", "following_url": "https://api.github.com/users/manestay/following{/other_user}", "gists_url": "https://api.github.com/users/manestay/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/manestay", "id": 9099139, "login": "manestay", "node_id": "MDQ6VXNlcjkwOTkxMzk=", "organizations_url": "https://api.github.com/users/manestay/orgs", "received_events_url": "https://api.github.com/users/manestay/received_events", "repos_url": "https://api.github.com/users/manestay/repos", "site_admin": false, "starred_url": "https://api.github.com/users/manestay/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/manestay/subscriptions", "type": "User", "url": "https://api.github.com/users/manestay", "user_view_type": "public" }
[]
closed
false
[ "The caching works as expected when I try to reproduce this locally or on Colab...", "hi @mariosasko , Thank you for checking. I also tried running this again just now, and it seems like the `load_dataset()` caches properly (though I'll double check later).\r\n\r\nI think the issue might be in the caching of the function output for `territories.map(lambda row: {'Claimants': row['Claimants'].split(';')})`. My current run re-ran this, even though I have run this many times before, and as demonstrated by loading from cache, the loaded dataset is the same.\r\n\r\nI wonder if the issue stems from using CSV output. Do you recommend changing to Parquet, and if so, is there an easy way to take the already uploaded data on the Hub and reformat?", "This issue seems similar to https://github.com/huggingface/datasets/issues/6184 (`dill` serializes objects defined outside the `__main__` module by reference). You should be able to work around this limitation by defining the lambdas outside of `load_borderlines_hf` (as module variables) and then setting their `__module__` attribute's value to `None` to force serializing them by value, e.g., like this: \r\n```python\r\nsplit_Claimants_row = lambda row: {'Claimants': row['Claimants'].split(';')}\r\nsplit_Claimants_row.__module__ = None\r\n```", "Thank you, I'll give this a try. Your fix makes sense to me, so this issue can be closed for now.\r\n\r\nUnrelated comment -- for \"Downloads last month\" on the hub page, I'm assuming for this project that each downloaded CSV is 1 download? The dataset consists of 51 CSVs, so I'm trying to see why it's incrementing so quickly (1125 2 days ago, 1246 right now).", "This doc explains how we count \"Downloads last month\": https://huggingface.co/docs/hub/datasets-download-stats" ]
2024-04-02T17:23:22Z
2024-04-08T18:43:45Z
2024-04-08T18:43:45Z
NONE
null
null
### Describe the bug Hi, I have a dataset on the hub [here](https://huggingface.co/datasets/manestay/borderlines). It has 1k+ downloads, which I sure is mostly just me and my colleagues working with it. It should have far fewer, since I'm using the same machine with a properly set up HF_HOME variable. However, whenever I run the below function `load_borderlines_hf`, it downloads the entire dataset from the hub and then does the other logic: https://github.com/manestay/borderlines/blob/4e161f444661e2ebfe643f3fe149d9258d63a57d/run_gpt/lib.py#L80 Let me know what I'm doing wrong here, or if it's a bug with the `datasets` library itself. On the hub I have my data stored in CSVs, but several columns are lists, so that's why I have the code to map splitting on `;`. I looked into dataset loading scripts, but it seemed difficult to set up. I have verified that other `datasets` and `models` on my system are using the cache properly (e.g. I have a 13B parameter model and large datasets, but those are cached and don't redownload). __EDIT: __ as pointed out in the discussion below, it may be the `map()` calls that aren't being cached properly. Supposing the `load_dataset()` retrieve from the cache, then it should be the case that the `map()` calls also retrieve from the cached output. But the `map()` commands re-execute sometimes. ### Steps to reproduce the bug 1. Copy and paste the function from [here](https://github.com/manestay/borderlines/blob/4e161f444661e2ebfe643f3fe149d9258d63a57d/run_gpt/lib.py#L80) (lines 80-100) 2. Run it in Python `load_borderlines_hf(None)` 3. It completes successfully, downloading from HF hub, then doing the mapping logic etc. 4. If you run it again after some time, it will re-download, ignoring the cache ### Expected behavior Re-running the code, which calls `datasets.load_dataset('manestay/borderlines', 'territories')`, should use the cached version ### Environment info - `datasets` version: 2.16.1 - Platform: Linux-5.14.21-150500.55.7-default-x86_64-with-glibc2.31 - Python version: 3.10.13 - `huggingface_hub` version: 0.20.3 - PyArrow version: 15.0.0 - Pandas version: 1.5.3 - `fsspec` version: 2023.10.0
{ "avatar_url": "https://avatars.githubusercontent.com/u/9099139?v=4", "events_url": "https://api.github.com/users/manestay/events{/privacy}", "followers_url": "https://api.github.com/users/manestay/followers", "following_url": "https://api.github.com/users/manestay/following{/other_user}", "gists_url": "https://api.github.com/users/manestay/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/manestay", "id": 9099139, "login": "manestay", "node_id": "MDQ6VXNlcjkwOTkxMzk=", "organizations_url": "https://api.github.com/users/manestay/orgs", "received_events_url": "https://api.github.com/users/manestay/received_events", "repos_url": "https://api.github.com/users/manestay/repos", "site_admin": false, "starred_url": "https://api.github.com/users/manestay/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/manestay/subscriptions", "type": "User", "url": "https://api.github.com/users/manestay", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6773/reactions" }
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/pull/6772
6,772
`remove_columns`/`rename_columns` doc fixes
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6772). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005728 / 0.011353 (-0.005624) | 0.003809 / 0.011008 (-0.007199) | 0.062930 / 0.038508 (0.024422) | 0.032320 / 0.023109 (0.009211) | 0.251072 / 0.275898 (-0.024826) | 0.275397 / 0.323480 (-0.048083) | 0.003314 / 0.007986 (-0.004671) | 0.002869 / 0.004328 (-0.001460) | 0.049070 / 0.004250 (0.044819) | 0.049282 / 0.037052 (0.012229) | 0.263546 / 0.258489 (0.005057) | 0.291471 / 0.293841 (-0.002370) | 0.028462 / 0.128546 (-0.100084) | 0.010528 / 0.075646 (-0.065119) | 0.211249 / 0.419271 (-0.208023) | 0.036840 / 0.043533 (-0.006693) | 0.250038 / 0.255139 (-0.005101) | 0.268883 / 0.283200 (-0.014317) | 0.021417 / 0.141683 (-0.120266) | 1.139754 / 1.452155 (-0.312400) | 1.197319 / 1.492716 (-0.295397) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.094191 / 0.018006 (0.076185) | 0.302413 / 0.000490 (0.301923) | 0.000220 / 0.000200 (0.000020) | 0.000048 / 0.000054 (-0.000006) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018490 / 0.037411 (-0.018922) | 0.063361 / 0.014526 (0.048835) | 0.075854 / 0.176557 (-0.100702) | 0.121499 / 0.737135 (-0.615637) | 0.075982 / 0.296338 (-0.220356) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.286030 / 0.215209 (0.070821) | 2.778487 / 2.077655 (0.700832) | 1.440963 / 1.504120 (-0.063157) | 1.326217 / 1.541195 (-0.214977) | 1.359228 / 1.468490 (-0.109262) | 0.566999 / 4.584777 (-4.017778) | 2.453344 / 3.745712 (-1.292368) | 2.841448 / 5.269862 (-2.428413) | 1.825197 / 4.565676 (-2.740479) | 0.062301 / 0.424275 (-0.361974) | 0.004948 / 0.007607 (-0.002659) | 0.334578 / 0.226044 (0.108534) | 3.302327 / 2.268929 (1.033399) | 1.799808 / 55.444624 (-53.644817) | 1.529693 / 6.876477 (-5.346783) | 1.564684 / 2.142072 (-0.577389) | 0.632891 / 4.805227 (-4.172336) | 0.116594 / 6.500664 (-6.384070) | 0.042695 / 0.075469 (-0.032774) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.999994 / 1.841788 (-0.841794) | 12.767365 / 8.074308 (4.693057) | 10.550439 / 10.191392 (0.359047) | 0.133437 / 0.680424 (-0.546986) | 0.015252 / 0.534201 (-0.518949) | 0.293285 / 0.579283 (-0.285998) | 0.274773 / 0.434364 (-0.159590) | 0.328718 / 0.540337 (-0.211619) | 0.428021 / 1.386936 (-0.958915) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005538 / 0.011353 (-0.005815) | 0.003738 / 0.011008 (-0.007271) | 0.050179 / 0.038508 (0.011671) | 0.032441 / 0.023109 (0.009332) | 0.294721 / 0.275898 (0.018823) | 0.322616 / 0.323480 (-0.000864) | 0.004255 / 0.007986 (-0.003731) | 0.002913 / 0.004328 (-0.001416) | 0.049044 / 0.004250 (0.044794) | 0.042361 / 0.037052 (0.005309) | 0.304162 / 0.258489 (0.045673) | 0.332757 / 0.293841 (0.038916) | 0.029355 / 0.128546 (-0.099191) | 0.010546 / 0.075646 (-0.065100) | 0.058213 / 0.419271 (-0.361058) | 0.032648 / 0.043533 (-0.010885) | 0.298241 / 0.255139 (0.043102) | 0.313710 / 0.283200 (0.030510) | 0.017836 / 0.141683 (-0.123847) | 1.135050 / 1.452155 (-0.317104) | 1.178277 / 1.492716 (-0.314439) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.094387 / 0.018006 (0.076381) | 0.301955 / 0.000490 (0.301466) | 0.000220 / 0.000200 (0.000020) | 0.000052 / 0.000054 (-0.000002) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.023135 / 0.037411 (-0.014276) | 0.078109 / 0.014526 (0.063583) | 0.087519 / 0.176557 (-0.089037) | 0.127815 / 0.737135 (-0.609320) | 0.090107 / 0.296338 (-0.206231) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.289149 / 0.215209 (0.073940) | 2.832354 / 2.077655 (0.754699) | 1.574003 / 1.504120 (0.069883) | 1.449190 / 1.541195 (-0.092005) | 1.465798 / 1.468490 (-0.002692) | 0.561953 / 4.584777 (-4.022824) | 2.445788 / 3.745712 (-1.299924) | 2.882453 / 5.269862 (-2.387409) | 1.813267 / 4.565676 (-2.752409) | 0.063163 / 0.424275 (-0.361112) | 0.005785 / 0.007607 (-0.001822) | 0.340125 / 0.226044 (0.114081) | 3.355370 / 2.268929 (1.086442) | 1.924226 / 55.444624 (-53.520398) | 1.643242 / 6.876477 (-5.233234) | 1.650149 / 2.142072 (-0.491924) | 0.654818 / 4.805227 (-4.150409) | 0.114968 / 6.500664 (-6.385696) | 0.042044 / 0.075469 (-0.033425) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.024867 / 1.841788 (-0.816921) | 12.656140 / 8.074308 (4.581832) | 10.927014 / 10.191392 (0.735622) | 0.155929 / 0.680424 (-0.524495) | 0.015356 / 0.534201 (-0.518845) | 0.289834 / 0.579283 (-0.289449) | 0.280889 / 0.434364 (-0.153475) | 0.331490 / 0.540337 (-0.208847) | 0.418037 / 1.386936 (-0.968899) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#ad3467e9b138d1a9b87b661828a71139f4e46ece \"CML watermark\")\n" ]
2024-04-02T15:41:28Z
2024-04-02T16:28:45Z
2024-04-02T16:17:46Z
COLLABORATOR
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6772.diff", "html_url": "https://github.com/huggingface/datasets/pull/6772", "merged_at": "2024-04-02T16:17:46Z", "patch_url": "https://github.com/huggingface/datasets/pull/6772.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6772" }
Use more consistent wording in `remove_columns` to explain why it's faster than `map` and update `remove_columns`/`rename_columns` docstrings to fix in-place calls. Reported in https://github.com/huggingface/datasets/issues/6700
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6772/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/issues/6771
6,771
Datasets FileNotFoundError when trying to generate examples.
{ "avatar_url": "https://avatars.githubusercontent.com/u/26197115?v=4", "events_url": "https://api.github.com/users/RitchieP/events{/privacy}", "followers_url": "https://api.github.com/users/RitchieP/followers", "following_url": "https://api.github.com/users/RitchieP/following{/other_user}", "gists_url": "https://api.github.com/users/RitchieP/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/RitchieP", "id": 26197115, "login": "RitchieP", "node_id": "MDQ6VXNlcjI2MTk3MTE1", "organizations_url": "https://api.github.com/users/RitchieP/orgs", "received_events_url": "https://api.github.com/users/RitchieP/received_events", "repos_url": "https://api.github.com/users/RitchieP/repos", "site_admin": false, "starred_url": "https://api.github.com/users/RitchieP/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/RitchieP/subscriptions", "type": "User", "url": "https://api.github.com/users/RitchieP", "user_view_type": "public" }
[]
closed
false
[ "Hi! I've opened a PR in the repo to fix this issue: https://huggingface.co/datasets/RitchieP/VerbaLex_voice/discussions/6", "@mariosasko Thanks for the PR and help! Guess I could close the issue for now. Appreciate the help!" ]
2024-04-02T10:24:57Z
2024-04-04T14:22:03Z
2024-04-04T14:22:03Z
NONE
null
null
### Discussed in https://github.com/huggingface/datasets/discussions/6768 <div type='discussions-op-text'> <sup>Originally posted by **RitchieP** April 1, 2024</sup> Currently, I have a dataset hosted on Huggingface with a custom script [here](https://huggingface.co/datasets/RitchieP/VerbaLex_voice). I'm loading my dataset as below. ```py from datasets import load_dataset, IterableDatasetDict dataset = IterableDatasetDict() dataset["train"] = load_dataset("RitchieP/VerbaLex_voice", "ar", split="train", use_auth_token=True, streaming=True) dataset["test"] = load_dataset("RitchieP/VerbaLex_voice", "ar", split="test", use_auth_token=True, streaming=True) ``` And when I try to see the data I have loaded with ```py list(dataset["train"].take(1)) ``` And it gives me this stack trace ``` --------------------------------------------------------------------------- FileNotFoundError Traceback (most recent call last) Cell In[2], line 1 ----> 1 list(dataset["train"].take(1)) File /opt/conda/lib/python3.10/site-packages/datasets/iterable_dataset.py:1388, in IterableDataset.__iter__(self) 1385 yield formatter.format_row(pa_table) 1386 return -> 1388 for key, example in ex_iterable: 1389 if self.features: 1390 # `IterableDataset` automatically fills missing columns with None. 1391 # This is done with `_apply_feature_types_on_example`. 1392 example = _apply_feature_types_on_example( 1393 example, self.features, token_per_repo_id=self._token_per_repo_id 1394 ) File /opt/conda/lib/python3.10/site-packages/datasets/iterable_dataset.py:1044, in TakeExamplesIterable.__iter__(self) 1043 def __iter__(self): -> 1044 yield from islice(self.ex_iterable, self.n) File /opt/conda/lib/python3.10/site-packages/datasets/iterable_dataset.py:234, in ExamplesIterable.__iter__(self) 233 def __iter__(self): --> 234 yield from self.generate_examples_fn(**self.kwargs) File ~/.cache/huggingface/modules/datasets_modules/datasets/RitchieP--VerbaLex_voice/9465eaee58383cf9d7c3e14111d7abaea56398185a641b646897d6df4e4732f7/VerbaLex_voice.py:127, in VerbaLexVoiceDataset._generate_examples(self, local_extracted_archive_paths, archives, meta_path) 125 for i, audio_archive in enumerate(archives): 126 print(audio_archive) --> 127 for path, file in audio_archive: 128 _, filename = os.path.split(path) 129 if filename in metadata: File /opt/conda/lib/python3.10/site-packages/datasets/download/streaming_download_manager.py:869, in _IterableFromGenerator.__iter__(self) 868 def __iter__(self): --> 869 yield from self.generator(*self.args, **self.kwargs) File /opt/conda/lib/python3.10/site-packages/datasets/download/streaming_download_manager.py:919, in ArchiveIterable._iter_from_urlpath(cls, urlpath, download_config) 915 @classmethod 916 def _iter_from_urlpath( 917 cls, urlpath: str, download_config: Optional[DownloadConfig] = None 918 ) -> Generator[Tuple, None, None]: --> 919 compression = _get_extraction_protocol(urlpath, download_config=download_config) 920 # Set block_size=0 to get faster streaming 921 # (e.g. for hf:// and https:// it uses streaming Requests file-like instances) 922 with xopen(urlpath, "rb", download_config=download_config, block_size=0) as f: File /opt/conda/lib/python3.10/site-packages/datasets/download/streaming_download_manager.py:400, in _get_extraction_protocol(urlpath, download_config) 398 urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config) 399 try: --> 400 with fsspec.open(urlpath, **(storage_options or {})) as f: 401 return _get_extraction_protocol_with_magic_number(f) 402 except FileNotFoundError: File /opt/conda/lib/python3.10/site-packages/fsspec/core.py:100, in OpenFile.__enter__(self) 97 def __enter__(self): 98 mode = self.mode.replace("t", "").replace("b", "") + "b" --> 100 f = self.fs.open(self.path, mode=mode) 102 self.fobjects = [f] 104 if self.compression is not None: File /opt/conda/lib/python3.10/site-packages/fsspec/spec.py:1307, in AbstractFileSystem.open(self, path, mode, block_size, cache_options, compression, **kwargs) 1305 else: 1306 ac = kwargs.pop("autocommit", not self._intrans) -> 1307 f = self._open( 1308 path, 1309 mode=mode, 1310 block_size=block_size, 1311 autocommit=ac, 1312 cache_options=cache_options, 1313 **kwargs, 1314 ) 1315 if compression is not None: 1316 from fsspec.compression import compr File /opt/conda/lib/python3.10/site-packages/fsspec/implementations/local.py:180, in LocalFileSystem._open(self, path, mode, block_size, **kwargs) 178 if self.auto_mkdir and "w" in mode: 179 self.makedirs(self._parent(path), exist_ok=True) --> 180 return LocalFileOpener(path, mode, fs=self, **kwargs) File /opt/conda/lib/python3.10/site-packages/fsspec/implementations/local.py:302, in LocalFileOpener.__init__(self, path, mode, autocommit, fs, compression, **kwargs) 300 self.compression = get_compression(path, compression) 301 self.blocksize = io.DEFAULT_BUFFER_SIZE --> 302 self._open() File /opt/conda/lib/python3.10/site-packages/fsspec/implementations/local.py:307, in LocalFileOpener._open(self) 305 if self.f is None or self.f.closed: 306 if self.autocommit or "w" not in self.mode: --> 307 self.f = open(self.path, mode=self.mode) 308 if self.compression: 309 compress = compr[self.compression] FileNotFoundError: [Errno 2] No such file or directory: '/kaggle/working/h' ``` After looking into the stack trace, and referring to the source codes, it looks like its trying to access a directory in the notebook's environment and I don't understand why. Not sure if its a bug in Datasets library, so I'm opening a discussions first. Feel free to ask for more information if needed. Appreciate any help in advance!</div> Hi, referring to the discussion title above, after further digging, I think it's an issue within the datasets library. But not quite sure where it is. If you require any more info or actions from me, please let me know. Appreciate any help in advance!
{ "avatar_url": "https://avatars.githubusercontent.com/u/26197115?v=4", "events_url": "https://api.github.com/users/RitchieP/events{/privacy}", "followers_url": "https://api.github.com/users/RitchieP/followers", "following_url": "https://api.github.com/users/RitchieP/following{/other_user}", "gists_url": "https://api.github.com/users/RitchieP/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/RitchieP", "id": 26197115, "login": "RitchieP", "node_id": "MDQ6VXNlcjI2MTk3MTE1", "organizations_url": "https://api.github.com/users/RitchieP/orgs", "received_events_url": "https://api.github.com/users/RitchieP/received_events", "repos_url": "https://api.github.com/users/RitchieP/repos", "site_admin": false, "starred_url": "https://api.github.com/users/RitchieP/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/RitchieP/subscriptions", "type": "User", "url": "https://api.github.com/users/RitchieP", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6771/reactions" }
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/issues/6770
6,770
[Bug Report] `datasets==2.18.0` is not compatible with `fsspec==2023.12.2`
{ "avatar_url": "https://avatars.githubusercontent.com/u/19348888?v=4", "events_url": "https://api.github.com/users/fshp971/events{/privacy}", "followers_url": "https://api.github.com/users/fshp971/followers", "following_url": "https://api.github.com/users/fshp971/following{/other_user}", "gists_url": "https://api.github.com/users/fshp971/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/fshp971", "id": 19348888, "login": "fshp971", "node_id": "MDQ6VXNlcjE5MzQ4ODg4", "organizations_url": "https://api.github.com/users/fshp971/orgs", "received_events_url": "https://api.github.com/users/fshp971/received_events", "repos_url": "https://api.github.com/users/fshp971/repos", "site_admin": false, "starred_url": "https://api.github.com/users/fshp971/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/fshp971/subscriptions", "type": "User", "url": "https://api.github.com/users/fshp971", "user_view_type": "public" }
[]
closed
false
[ "You should be able to fix this by updating `huggingface_hub` with `pip install -U huggingface_hub`. We use this package under the hood to resolve the Hub's files." ]
2024-04-01T20:17:48Z
2024-04-11T17:31:44Z
2024-04-11T17:31:44Z
NONE
null
null
### Describe the bug `Datasets==2.18.0` is not compatible with `fsspec==2023.12.2`. I have to downgrade fsspec to `fsspec==2023.10.0` to make `Datasets==2.18.0` work properly. ### Steps to reproduce the bug To reproduce the bug: 1. Make sure that `Datasets==2.18.0` and `fsspec==2023.12.2`. 2. Run the following code: ``` from datasets import load_dataset dataset = load_dataset("trec") ``` 3. Then one will get the following error message: ``` Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/opt/conda/lib/python3.10/site-packages/datasets/load.py", line 2556, in load_dataset builder_instance = load_dataset_builder( File "/opt/conda/lib/python3.10/site-packages/datasets/load.py", line 2265, in load_dataset_builder builder_instance: DatasetBuilder = builder_cls( File "/opt/conda/lib/python3.10/site-packages/datasets/builder.py", line 371, in __init__ self.config, self.config_id = self._create_builder_config( File "/opt/conda/lib/python3.10/site-packages/datasets/builder.py", line 620, in _create_builder_config builder_config._resolve_data_files( File "/opt/conda/lib/python3.10/site-packages/datasets/builder.py", line 211, in _resolve_data_files self.data_files = self.data_files.resolve(base_path, download_config) File "/opt/conda/lib/python3.10/site-packages/datasets/data_files.py", line 799, in resolve out[key] = data_files_patterns_list.resolve(base_path, download_config) File "/opt/conda/lib/python3.10/site-packages/datasets/data_files.py", line 752, in resolve resolve_pattern( File "/opt/conda/lib/python3.10/site-packages/datasets/data_files.py", line 393, in resolve_pattern raise FileNotFoundError(error_msg) FileNotFoundError: Unable to find 'hf://datasets/trec@65752bf53af25bc935a0dce92fb5b6c930728450/default/train/0000.parquet' with any supported extension ['.csv', '.tsv', '.json', '.jsonl', '.parquet', '.geoparquet', '.gpq', '.arrow', '.txt', '.tar', '.blp', '.bmp', '.dib', '.bufr', '.cur', '.pcx', '.dcx', '.dds', '.ps', '.eps', '.fit', '.fits', '.fli', '.flc', '.ftc', '.ftu', '.gbr', '.gif', '.grib', '.h5', '.hdf', '.png', '.apng', '.jp2', '.j2k', '.jpc', '.jpf', '.jpx', '.j2c', '.icns', '.ico', '.im', '.iim', '.tif', '.tiff', '.jfif', '.jpe', '.jpg', '.jpeg', '.mpg', '.mpeg', '.msp', '.pcd', '.pxr', '.pbm', '.pgm', '.ppm', '.pnm', '.psd', '.bw', '.rgb', '.rgba', '.sgi', '.ras', '.tga', '.icb', '.vda', '.vst', '.webp', '.wmf', '.emf', '.xbm', '.xpm', '.BLP', '.BMP', '.DIB', '.BUFR', '.CUR', '.PCX', '.DCX', '.DDS', '.PS', '.EPS', '.FIT', '.FITS', '.FLI', '.FLC', '.FTC', '.FTU', '.GBR', '.GIF', '.GRIB', '.H5', '.HDF', '.PNG', '.APNG', '.JP2', '.J2K', '.JPC', '.JPF', '.JPX', '.J2C', '.ICNS', '.ICO', '.IM', '.IIM', '.TIF', '.TIFF', '.JFIF', '.JPE', '.JPG', '.JPEG', '.MPG', '.MPEG', '.MSP', '.PCD', '.PXR', '.PBM', '.PGM', '.PPM', '.PNM', '.PSD', '.BW', '.RGB', '.RGBA', '.SGI', '.RAS', '.TGA', '.ICB', '.VDA', '.VST', '.WEBP', '.WMF', '.EMF', '.XBM', '.XPM', '.aiff', '.au', '.avr', '.caf', '.flac', '.htk', '.svx', '.mat4', '.mat5', '.mpc2k', '.ogg', '.paf', '.pvf', '.raw', '.rf64', '.sd2', '.sds', '.ircam', '.voc', '.w64', '.wav', '.nist', '.wavex', '.wve', '.xi', '.mp3', '.opus', '.AIFF', '.AU', '.AVR', '.CAF', '.FLAC', '.HTK', '.SVX', '.MAT4', '.MAT5', '.MPC2K', '.OGG', '.PAF', '.PVF', '.RAW', '.RF64', '.SD2', '.SDS', '.IRCAM', '.VOC', '.W64', '.WAV', '.NIST', '.WAVEX', '.WVE', '.XI', '.MP3', '.OPUS', '.zip'] ``` 4. Similar issue also found for the following code: ``` dataset = load_dataset("sst", "default") ``` ### Expected behavior If the dataset is loaded correctly, one will have: ``` >>> print(dataset) DatasetDict({ train: Dataset({ features: ['text', 'coarse_label', 'fine_label'], num_rows: 5452 }) test: Dataset({ features: ['text', 'coarse_label', 'fine_label'], num_rows: 500 }) }) >>> ``` ### Environment info - `datasets` version: 2.18.0 - Platform: Linux-6.2.0-35-generic-x86_64-with-glibc2.31 - Python version: 3.10.13 - `huggingface_hub` version: 0.20.3 - PyArrow version: 15.0.1 - Pandas version: 2.2.1 - `fsspec` version: 2023.12.2
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/6770/reactions" }
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/issues/6769
6,769
(Willing to PR) Datasets with custom python objects
{ "avatar_url": "https://avatars.githubusercontent.com/u/5236035?v=4", "events_url": "https://api.github.com/users/fzyzcjy/events{/privacy}", "followers_url": "https://api.github.com/users/fzyzcjy/followers", "following_url": "https://api.github.com/users/fzyzcjy/following{/other_user}", "gists_url": "https://api.github.com/users/fzyzcjy/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/fzyzcjy", "id": 5236035, "login": "fzyzcjy", "node_id": "MDQ6VXNlcjUyMzYwMzU=", "organizations_url": "https://api.github.com/users/fzyzcjy/orgs", "received_events_url": "https://api.github.com/users/fzyzcjy/received_events", "repos_url": "https://api.github.com/users/fzyzcjy/repos", "site_admin": false, "starred_url": "https://api.github.com/users/fzyzcjy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/fzyzcjy/subscriptions", "type": "User", "url": "https://api.github.com/users/fzyzcjy", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
[]
2024-04-01T13:18:47Z
2024-04-01T13:36:58Z
null
CONTRIBUTOR
null
null
### Feature request Hi thanks for the library! I would like to have a huggingface Dataset, and one of its column is custom (non-serializable) Python objects. For example, a minimal code: ``` class MyClass: pass dataset = datasets.Dataset.from_list([ dict(a=MyClass(), b='hello'), ]) ``` It gives error: ``` ArrowInvalid: Could not convert <__main__.MyClass object at 0x7a852830d050> with type MyClass: did not recognize Python value type when inferring an Arrow data type ``` I guess it is because Dataset forces to convert everything into arrow format. However, is there any ways to make the scenario work? Thanks! ### Motivation (see above) ### Your contribution Yes, I am happy to PR! Cross-posted: https://discuss.huggingface.co/t/datasets-with-custom-python-objects/79050?u=fzyzcjy EDIT: possibly related https://github.com/huggingface/datasets/issues/5766
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 1, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/6769/reactions" }
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/pull/6767
6,767
fixing the issue 6755(small typo)
{ "avatar_url": "https://avatars.githubusercontent.com/u/63234112?v=4", "events_url": "https://api.github.com/users/JINO-ROHIT/events{/privacy}", "followers_url": "https://api.github.com/users/JINO-ROHIT/followers", "following_url": "https://api.github.com/users/JINO-ROHIT/following{/other_user}", "gists_url": "https://api.github.com/users/JINO-ROHIT/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/JINO-ROHIT", "id": 63234112, "login": "JINO-ROHIT", "node_id": "MDQ6VXNlcjYzMjM0MTEy", "organizations_url": "https://api.github.com/users/JINO-ROHIT/orgs", "received_events_url": "https://api.github.com/users/JINO-ROHIT/received_events", "repos_url": "https://api.github.com/users/JINO-ROHIT/repos", "site_admin": false, "starred_url": "https://api.github.com/users/JINO-ROHIT/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/JINO-ROHIT/subscriptions", "type": "User", "url": "https://api.github.com/users/JINO-ROHIT", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6767). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005526 / 0.011353 (-0.005827) | 0.003839 / 0.011008 (-0.007169) | 0.064027 / 0.038508 (0.025519) | 0.032316 / 0.023109 (0.009206) | 0.250707 / 0.275898 (-0.025191) | 0.269222 / 0.323480 (-0.054258) | 0.004335 / 0.007986 (-0.003651) | 0.002703 / 0.004328 (-0.001626) | 0.049621 / 0.004250 (0.045370) | 0.047499 / 0.037052 (0.010446) | 0.262362 / 0.258489 (0.003873) | 0.292765 / 0.293841 (-0.001076) | 0.028661 / 0.128546 (-0.099885) | 0.010835 / 0.075646 (-0.064811) | 0.208910 / 0.419271 (-0.210362) | 0.036624 / 0.043533 (-0.006909) | 0.247448 / 0.255139 (-0.007691) | 0.270593 / 0.283200 (-0.012607) | 0.018988 / 0.141683 (-0.122695) | 1.141224 / 1.452155 (-0.310931) | 1.204944 / 1.492716 (-0.287772) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.096324 / 0.018006 (0.078318) | 0.292495 / 0.000490 (0.292006) | 0.000232 / 0.000200 (0.000032) | 0.000043 / 0.000054 (-0.000012) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018379 / 0.037411 (-0.019032) | 0.065216 / 0.014526 (0.050690) | 0.074071 / 0.176557 (-0.102486) | 0.120793 / 0.737135 (-0.616343) | 0.075882 / 0.296338 (-0.220456) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.286354 / 0.215209 (0.071145) | 2.800766 / 2.077655 (0.723111) | 1.474126 / 1.504120 (-0.029994) | 1.358232 / 1.541195 (-0.182963) | 1.400639 / 1.468490 (-0.067851) | 0.578354 / 4.584777 (-4.006423) | 2.454441 / 3.745712 (-1.291271) | 2.927003 / 5.269862 (-2.342859) | 1.826127 / 4.565676 (-2.739550) | 0.063049 / 0.424275 (-0.361226) | 0.005010 / 0.007607 (-0.002597) | 0.342174 / 0.226044 (0.116129) | 3.415900 / 2.268929 (1.146971) | 1.854096 / 55.444624 (-53.590528) | 1.568626 / 6.876477 (-5.307851) | 1.660138 / 2.142072 (-0.481934) | 0.664059 / 4.805227 (-4.141168) | 0.120496 / 6.500664 (-6.380168) | 0.044664 / 0.075469 (-0.030805) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.988434 / 1.841788 (-0.853353) | 12.525563 / 8.074308 (4.451255) | 10.016862 / 10.191392 (-0.174530) | 0.134043 / 0.680424 (-0.546381) | 0.014349 / 0.534201 (-0.519852) | 0.287173 / 0.579283 (-0.292110) | 0.266499 / 0.434364 (-0.167865) | 0.325425 / 0.540337 (-0.214912) | 0.418772 / 1.386936 (-0.968164) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005675 / 0.011353 (-0.005678) | 0.004238 / 0.011008 (-0.006770) | 0.051048 / 0.038508 (0.012540) | 0.033428 / 0.023109 (0.010319) | 0.283406 / 0.275898 (0.007508) | 0.309321 / 0.323480 (-0.014159) | 0.004354 / 0.007986 (-0.003631) | 0.003101 / 0.004328 (-0.001228) | 0.049369 / 0.004250 (0.045119) | 0.043252 / 0.037052 (0.006200) | 0.293097 / 0.258489 (0.034608) | 0.324392 / 0.293841 (0.030551) | 0.030524 / 0.128546 (-0.098022) | 0.010977 / 0.075646 (-0.064669) | 0.058546 / 0.419271 (-0.360726) | 0.033295 / 0.043533 (-0.010238) | 0.284929 / 0.255139 (0.029790) | 0.302925 / 0.283200 (0.019726) | 0.018586 / 0.141683 (-0.123097) | 1.156552 / 1.452155 (-0.295602) | 1.208856 / 1.492716 (-0.283860) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.096938 / 0.018006 (0.078932) | 0.305375 / 0.000490 (0.304886) | 0.000227 / 0.000200 (0.000027) | 0.000044 / 0.000054 (-0.000011) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022658 / 0.037411 (-0.014754) | 0.078125 / 0.014526 (0.063599) | 0.087892 / 0.176557 (-0.088665) | 0.127745 / 0.737135 (-0.609390) | 0.089806 / 0.296338 (-0.206533) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.292434 / 0.215209 (0.077225) | 2.862329 / 2.077655 (0.784674) | 1.607948 / 1.504120 (0.103828) | 1.487179 / 1.541195 (-0.054016) | 1.542234 / 1.468490 (0.073744) | 0.579446 / 4.584777 (-4.005331) | 2.478549 / 3.745712 (-1.267163) | 2.923493 / 5.269862 (-2.346369) | 1.833161 / 4.565676 (-2.732515) | 0.064289 / 0.424275 (-0.359986) | 0.005638 / 0.007607 (-0.001969) | 0.350111 / 0.226044 (0.124067) | 3.436035 / 2.268929 (1.167107) | 1.970592 / 55.444624 (-53.474032) | 1.717474 / 6.876477 (-5.159002) | 1.753150 / 2.142072 (-0.388922) | 0.660495 / 4.805227 (-4.144732) | 0.119302 / 6.500664 (-6.381362) | 0.042633 / 0.075469 (-0.032836) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.018761 / 1.841788 (-0.823027) | 12.859834 / 8.074308 (4.785525) | 10.547789 / 10.191392 (0.356397) | 0.131986 / 0.680424 (-0.548438) | 0.016469 / 0.534201 (-0.517732) | 0.288585 / 0.579283 (-0.290698) | 0.270499 / 0.434364 (-0.163865) | 0.325801 / 0.540337 (-0.214537) | 0.416551 / 1.386936 (-0.970385) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#7599f15537b094bfd18de5af7bb2a482c06d7a0e \"CML watermark\")\n" ]
2024-03-31T16:13:37Z
2024-04-02T14:14:02Z
2024-04-02T14:01:18Z
CONTRIBUTOR
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6767.diff", "html_url": "https://github.com/huggingface/datasets/pull/6767", "merged_at": "2024-04-02T14:01:18Z", "patch_url": "https://github.com/huggingface/datasets/pull/6767.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6767" }
Fixed the issue #6755 on the typo mistake
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6767/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/issues/6765
6,765
Compatibility issue between s3fs, fsspec, and datasets
{ "avatar_url": "https://avatars.githubusercontent.com/u/33383515?v=4", "events_url": "https://api.github.com/users/njbrake/events{/privacy}", "followers_url": "https://api.github.com/users/njbrake/followers", "following_url": "https://api.github.com/users/njbrake/following{/other_user}", "gists_url": "https://api.github.com/users/njbrake/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/njbrake", "id": 33383515, "login": "njbrake", "node_id": "MDQ6VXNlcjMzMzgzNTE1", "organizations_url": "https://api.github.com/users/njbrake/orgs", "received_events_url": "https://api.github.com/users/njbrake/received_events", "repos_url": "https://api.github.com/users/njbrake/repos", "site_admin": false, "starred_url": "https://api.github.com/users/njbrake/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/njbrake/subscriptions", "type": "User", "url": "https://api.github.com/users/njbrake", "user_view_type": "public" }
[]
closed
false
[ "Hi! Instead of running `pip install` separately for each package, you should pass all the packages to a single `pip install` call (in this case, `pip install datasets s3fs`) to let `pip` properly resolve their versions.", "> Hi! Instead of running `pip install` separately for each package, you should pass all the packages to a single `pip install` call (in this case, `pip install datasets s3fs`) to let `pip` properly resolve their versions.\r\n\r\nThanks so much! My inexperience with pip is showing πŸ˜† πŸ™ˆ ", "> Hi! Instead of running `pip install` separately for each package, you should pass all the packages to a single `pip install` call (in this case, `pip install datasets s3fs`) to let `pip` properly resolve their versions.\r\n\r\nyou are awesome bro", "Hey, the suggestion by @mariosasko unfortunately only address this issue via pip. The original message was about poetry and I am still facing a dependency conflict with that.\r\n\r\nThe following command complains first about `fsspec` (`... no versions of fsspec match ...`) and then I get an error.\r\n\r\nCommand:\r\n`poetry add datasets s3fs` \r\n\r\nError: \r\n` ... your project ... depends on both datasets (^3.1.0) and s3fs (^2024.10.0), version solving failed`\r\n\r\nInstalling first `s3fs` and then the rest of the huggingface libraries, like `datasets`, also did not help." ]
2024-03-29T19:57:24Z
2024-11-12T14:50:48Z
2024-04-03T14:33:12Z
NONE
null
null
### Describe the bug Here is the full error stack when installing: ``` ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts. datasets 2.18.0 requires fsspec[http]<=2024.2.0,>=2023.1.0, but you have fsspec 2024.3.1 which is incompatible. Successfully installed aiobotocore-2.12.1 aioitertools-0.11.0 botocore-1.34.51 fsspec-2024.3.1 jmespath-1.0.1 s3fs-2024.3.1 urllib3-2.0.7 wrapt-1.16.0 ``` When I install with pip, pip allows this error to exist while still installing s3fs, but this error breaks poetry, since poetry will refuse to install s3fs because of the dependency conflict. Maybe I'm missing something so maybe it's not a bug but some mistake on my end? Any input would be helpful. Thanks! ### Steps to reproduce the bug 1. conda create -n tmp python=3.10 -y 2. conda activate tmp 3. pip install datasets 4. pip install s3fs ### Expected behavior I would expect there to be no error. ### Environment info MacOS (ARM), Python3.10, conda 23.11.0.
{ "avatar_url": "https://avatars.githubusercontent.com/u/33383515?v=4", "events_url": "https://api.github.com/users/njbrake/events{/privacy}", "followers_url": "https://api.github.com/users/njbrake/followers", "following_url": "https://api.github.com/users/njbrake/following{/other_user}", "gists_url": "https://api.github.com/users/njbrake/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/njbrake", "id": 33383515, "login": "njbrake", "node_id": "MDQ6VXNlcjMzMzgzNTE1", "organizations_url": "https://api.github.com/users/njbrake/orgs", "received_events_url": "https://api.github.com/users/njbrake/received_events", "repos_url": "https://api.github.com/users/njbrake/repos", "site_admin": false, "starred_url": "https://api.github.com/users/njbrake/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/njbrake/subscriptions", "type": "User", "url": "https://api.github.com/users/njbrake", "user_view_type": "public" }
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/6765/reactions" }
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/issues/6764
6,764
load_dataset can't work with symbolic links
{ "avatar_url": "https://avatars.githubusercontent.com/u/13640533?v=4", "events_url": "https://api.github.com/users/VladimirVincan/events{/privacy}", "followers_url": "https://api.github.com/users/VladimirVincan/followers", "following_url": "https://api.github.com/users/VladimirVincan/following{/other_user}", "gists_url": "https://api.github.com/users/VladimirVincan/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/VladimirVincan", "id": 13640533, "login": "VladimirVincan", "node_id": "MDQ6VXNlcjEzNjQwNTMz", "organizations_url": "https://api.github.com/users/VladimirVincan/orgs", "received_events_url": "https://api.github.com/users/VladimirVincan/received_events", "repos_url": "https://api.github.com/users/VladimirVincan/repos", "site_admin": false, "starred_url": "https://api.github.com/users/VladimirVincan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/VladimirVincan/subscriptions", "type": "User", "url": "https://api.github.com/users/VladimirVincan", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
[ "In fact,You can use a hard link instead of a symbolic link.Hard link works" ]
2024-03-29T17:49:28Z
2025-04-29T15:06:28Z
null
NONE
null
null
### Feature request Enable the `load_dataset` function to load local datasets with symbolic links. E.g, this dataset can be loaded: β”œβ”€β”€ example_dataset/ β”‚ β”œβ”€β”€ data/ β”‚ β”‚ β”œβ”€β”€ train/ β”‚ β”‚ β”‚ β”œβ”€β”€ file0 β”‚ β”‚ β”‚ β”œβ”€β”€ file1 β”‚ β”‚ β”œβ”€β”€ dev/ β”‚ β”‚ β”‚ β”œβ”€β”€ file2 β”‚ β”‚ β”‚ β”œβ”€β”€ file3 β”‚ β”œβ”€β”€ metadata.csv while this dataset can't: β”œβ”€β”€ example_dataset_symlink/ β”‚ β”œβ”€β”€ data/ β”‚ β”‚ β”œβ”€β”€ train/ β”‚ β”‚ β”‚ β”œβ”€β”€ sym0 -> file0 β”‚ β”‚ β”‚ β”œβ”€β”€ sym1 -> file1 β”‚ β”‚ β”œβ”€β”€ dev/ β”‚ β”‚ β”‚ β”œβ”€β”€ sym2 -> file2 β”‚ β”‚ β”‚ β”œβ”€β”€ sym3 -> file3 β”‚ β”œβ”€β”€ metadata.csv I have created an example dataset in order to reproduce the problem: 1. Unzip `example_dataset.zip`. 2. Run `no_symlink.sh`. Training should start without issues. 3. Run `symlink.sh`. You will see that all four examples will be in train split, instead of having two examples in train and two examples in dev. The script won't load the correct audio files. [example_dataset.zip](https://github.com/huggingface/datasets/files/14807053/example_dataset.zip) ### Motivation I have a very large dataset locally. Instead of initiating training on the entire dataset, I need to start training on smaller subsets of the data. Due to the purpose of the experiments I am running, I will need to create many smaller datasets with overlapping data. Instead of copying the all the files for each subset, I would prefer copying symbolic links of the data. This way, the memory usage would not significantly increase beyond the initial dataset size. Advantages of this approach: - It would leave a smaller memory footprint on the hard drive - Creating smaller datasets would be much faster ### Your contribution I would gladly contribute, if this is something useful to the community. It seems like a simple change of code, something like `file_path = os.path.realpath(file_path)` should be added before loading the files. If anyone has insights on how to incorporate this functionality, I would greatly appreciate your knowledge and input.
null
{ "+1": 9, "-1": 0, "confused": 0, "eyes": 3, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 12, "url": "https://api.github.com/repos/huggingface/datasets/issues/6764/reactions" }
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/pull/6763
6,763
Fix issue with case sensitivity when loading dataset from local cache
{ "avatar_url": "https://avatars.githubusercontent.com/u/58537872?v=4", "events_url": "https://api.github.com/users/Sumsky21/events{/privacy}", "followers_url": "https://api.github.com/users/Sumsky21/followers", "following_url": "https://api.github.com/users/Sumsky21/following{/other_user}", "gists_url": "https://api.github.com/users/Sumsky21/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Sumsky21", "id": 58537872, "login": "Sumsky21", "node_id": "MDQ6VXNlcjU4NTM3ODcy", "organizations_url": "https://api.github.com/users/Sumsky21/orgs", "received_events_url": "https://api.github.com/users/Sumsky21/received_events", "repos_url": "https://api.github.com/users/Sumsky21/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Sumsky21/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Sumsky21/subscriptions", "type": "User", "url": "https://api.github.com/users/Sumsky21", "user_view_type": "public" }
[]
open
false
[ "I also need this feature for [\"Cnam-LMSSC/vibravox \"](https://huggingface.co/datasets/Cnam-LMSSC/vibravox)\r\n\r\n\r\nEDIT: Upgrading to `2.19.0` fixed my problem thanks to [this PR](https://github.com/huggingface/datasets/pull/6754)" ]
2024-03-28T14:52:35Z
2024-04-20T12:16:45Z
null
NONE
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6763.diff", "html_url": "https://github.com/huggingface/datasets/pull/6763", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/6763.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6763" }
When a dataset with upper-cases in its name is first loaded using `load_dataset()`, the local cache directory is created with all lowercase letters. However, upon subsequent loads, the current version attempts to locate the cache directory using the dataset's original name, which includes uppercase letters. This discrepancy can lead to confusion and, particularly in offline mode, results in errors. ### Reproduce ```bash ~$ python Python 3.9.19 (main, Mar 21 2024, 17:11:28) [GCC 11.2.0] :: Anaconda, Inc. on linux Type "help", "copyright", "credits" or "license" for more information. >>> from datasets import load_dataset >>> dataset = load_dataset("locuslab/TOFU", "full") >>> quit() ~$ export HF_DATASETS_OFFLINE=1 ~$ python Python 3.9.19 (main, Mar 21 2024, 17:11:28) [GCC 11.2.0] :: Anaconda, Inc. on linux Type "help", "copyright", "credits" or "license" for more information. >>> from datasets import load_dataset >>> dataset = load_dataset("locuslab/TOFU", "full") Traceback (most recent call last): File "<stdin>", line 1, in <module> File "xxxxxx/anaconda3/envs/llm/lib/python3.9/site-packages/datasets/load.py", line 2556, in load_dataset builder_instance = load_dataset_builder( File "xxxxxx/anaconda3/envs/llm/lib/python3.9/site-packages/datasets/load.py", line 2228, in load_dataset_builder dataset_module = dataset_module_factory( File "xxxxxx/anaconda3/envs/llm/lib/python3.9/site-packages/datasets/load.py", line 1871, in dataset_module_factory raise ConnectionError(f"Couldn't reach the Hugging Face Hub for dataset '{path}': {e1}") from None ConnectionError: Couldn't reach the Hugging Face Hub for dataset 'locuslab/TOFU': Offline mode is enabled. >>> ``` I fix this issue by lowering the dataset name (`.lower()`) when generating cache_dir.
null
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/6763/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/pull/6762
6,762
Allow polars as valid output type
{ "avatar_url": "https://avatars.githubusercontent.com/u/11325244?v=4", "events_url": "https://api.github.com/users/psmyth94/events{/privacy}", "followers_url": "https://api.github.com/users/psmyth94/followers", "following_url": "https://api.github.com/users/psmyth94/following{/other_user}", "gists_url": "https://api.github.com/users/psmyth94/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/psmyth94", "id": 11325244, "login": "psmyth94", "node_id": "MDQ6VXNlcjExMzI1MjQ0", "organizations_url": "https://api.github.com/users/psmyth94/orgs", "received_events_url": "https://api.github.com/users/psmyth94/received_events", "repos_url": "https://api.github.com/users/psmyth94/repos", "site_admin": false, "starred_url": "https://api.github.com/users/psmyth94/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/psmyth94/subscriptions", "type": "User", "url": "https://api.github.com/users/psmyth94", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6762). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "Hello @lhoestq, I added the test and modified [update_data](https://github.com/huggingface/datasets/blob/bececdac927160b5c7e883736d7cc79d5699ad0a/src/datasets/arrow_dataset.py#L3437) to include `polars` as an updatable type. Although, it seems pretty redundant to do the type checks both before `validate_function_output` and then immediately afterward within the call stack. Could consider adding `allowable_types` in `validation_function_output`.", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005530 / 0.011353 (-0.005823) | 0.004012 / 0.011008 (-0.006996) | 0.062474 / 0.038508 (0.023966) | 0.031896 / 0.023109 (0.008787) | 0.239620 / 0.275898 (-0.036278) | 0.264694 / 0.323480 (-0.058785) | 0.003199 / 0.007986 (-0.004786) | 0.003141 / 0.004328 (-0.001187) | 0.048726 / 0.004250 (0.044475) | 0.044795 / 0.037052 (0.007743) | 0.250661 / 0.258489 (-0.007828) | 0.279658 / 0.293841 (-0.014183) | 0.029857 / 0.128546 (-0.098689) | 0.012293 / 0.075646 (-0.063353) | 0.203626 / 0.419271 (-0.215646) | 0.036284 / 0.043533 (-0.007249) | 0.241678 / 0.255139 (-0.013461) | 0.259380 / 0.283200 (-0.023820) | 0.020400 / 0.141683 (-0.121283) | 1.142334 / 1.452155 (-0.309821) | 1.199068 / 1.492716 (-0.293648) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.097348 / 0.018006 (0.079341) | 0.303468 / 0.000490 (0.302978) | 0.000219 / 0.000200 (0.000019) | 0.000044 / 0.000054 (-0.000011) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018646 / 0.037411 (-0.018766) | 0.062374 / 0.014526 (0.047848) | 0.074585 / 0.176557 (-0.101972) | 0.120380 / 0.737135 (-0.616755) | 0.075685 / 0.296338 (-0.220653) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.277488 / 0.215209 (0.062279) | 2.741734 / 2.077655 (0.664080) | 1.451901 / 1.504120 (-0.052219) | 1.341712 / 1.541195 (-0.199482) | 1.395209 / 1.468490 (-0.073282) | 0.736334 / 4.584777 (-3.848443) | 2.358225 / 3.745712 (-1.387487) | 2.951838 / 5.269862 (-2.318023) | 1.892027 / 4.565676 (-2.673649) | 0.077913 / 0.424275 (-0.346362) | 0.005188 / 0.007607 (-0.002419) | 0.328790 / 0.226044 (0.102745) | 3.259387 / 2.268929 (0.990459) | 1.826102 / 55.444624 (-53.618522) | 1.526635 / 6.876477 (-5.349842) | 1.576392 / 2.142072 (-0.565680) | 0.786244 / 4.805227 (-4.018983) | 0.133909 / 6.500664 (-6.366756) | 0.044544 / 0.075469 (-0.030925) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.965314 / 1.841788 (-0.876474) | 11.786831 / 8.074308 (3.712523) | 9.568519 / 10.191392 (-0.622873) | 0.140628 / 0.680424 (-0.539796) | 0.014442 / 0.534201 (-0.519759) | 0.300876 / 0.579283 (-0.278407) | 0.262647 / 0.434364 (-0.171717) | 0.339141 / 0.540337 (-0.201196) | 0.430254 / 1.386936 (-0.956683) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.006020 / 0.011353 (-0.005333) | 0.004191 / 0.011008 (-0.006818) | 0.050006 / 0.038508 (0.011498) | 0.033247 / 0.023109 (0.010138) | 0.270677 / 0.275898 (-0.005221) | 0.299539 / 0.323480 (-0.023941) | 0.004391 / 0.007986 (-0.003595) | 0.002825 / 0.004328 (-0.001504) | 0.048573 / 0.004250 (0.044322) | 0.042461 / 0.037052 (0.005409) | 0.283812 / 0.258489 (0.025323) | 0.324302 / 0.293841 (0.030461) | 0.033264 / 0.128546 (-0.095282) | 0.012405 / 0.075646 (-0.063241) | 0.060298 / 0.419271 (-0.358973) | 0.034833 / 0.043533 (-0.008700) | 0.271133 / 0.255139 (0.015994) | 0.290712 / 0.283200 (0.007512) | 0.019762 / 0.141683 (-0.121920) | 1.138644 / 1.452155 (-0.313511) | 1.204628 / 1.492716 (-0.288088) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.096171 / 0.018006 (0.078164) | 0.308916 / 0.000490 (0.308427) | 0.000213 / 0.000200 (0.000013) | 0.000046 / 0.000054 (-0.000009) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.023077 / 0.037411 (-0.014334) | 0.078865 / 0.014526 (0.064339) | 0.091031 / 0.176557 (-0.085526) | 0.133536 / 0.737135 (-0.603599) | 0.093308 / 0.296338 (-0.203030) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.301466 / 0.215209 (0.086257) | 2.995190 / 2.077655 (0.917535) | 1.616545 / 1.504120 (0.112425) | 1.472572 / 1.541195 (-0.068622) | 1.477191 / 1.468490 (0.008701) | 0.730240 / 4.584777 (-3.854537) | 0.966591 / 3.745712 (-2.779121) | 2.979970 / 5.269862 (-2.289892) | 1.908275 / 4.565676 (-2.657401) | 0.081346 / 0.424275 (-0.342929) | 0.005150 / 0.007607 (-0.002458) | 0.349066 / 0.226044 (0.123022) | 3.504363 / 2.268929 (1.235435) | 1.973355 / 55.444624 (-53.471270) | 1.659337 / 6.876477 (-5.217139) | 1.701282 / 2.142072 (-0.440790) | 0.813493 / 4.805227 (-3.991735) | 0.133537 / 6.500664 (-6.367127) | 0.041207 / 0.075469 (-0.034262) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.020368 / 1.841788 (-0.821420) | 12.444848 / 8.074308 (4.370540) | 10.113832 / 10.191392 (-0.077560) | 0.137782 / 0.680424 (-0.542642) | 0.015217 / 0.534201 (-0.518984) | 0.300419 / 0.579283 (-0.278864) | 0.128868 / 0.434364 (-0.305496) | 0.342831 / 0.540337 (-0.197506) | 0.443036 / 1.386936 (-0.943900) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#5f42139a2c5583a55d34a2f60d537f5fba285c28 \"CML watermark\")\n" ]
2024-03-28T13:40:28Z
2024-08-16T15:54:37Z
2024-08-16T13:10:37Z
CONTRIBUTOR
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6762.diff", "html_url": "https://github.com/huggingface/datasets/pull/6762", "merged_at": "2024-08-16T13:10:37Z", "patch_url": "https://github.com/huggingface/datasets/pull/6762.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6762" }
I was trying out polars as an output for a map function and found that it wasn't a valid return type in `validate_function_output`. Thought that we should accommodate this by creating and adding it to the `allowed_processed_input_types` variable.
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6762/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/pull/6761
6,761
Remove deprecated code
{ "avatar_url": "https://avatars.githubusercontent.com/u/11801849?v=4", "events_url": "https://api.github.com/users/Wauplin/events{/privacy}", "followers_url": "https://api.github.com/users/Wauplin/followers", "following_url": "https://api.github.com/users/Wauplin/following{/other_user}", "gists_url": "https://api.github.com/users/Wauplin/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Wauplin", "id": 11801849, "login": "Wauplin", "node_id": "MDQ6VXNlcjExODAxODQ5", "organizations_url": "https://api.github.com/users/Wauplin/orgs", "received_events_url": "https://api.github.com/users/Wauplin/received_events", "repos_url": "https://api.github.com/users/Wauplin/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Wauplin/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Wauplin/subscriptions", "type": "User", "url": "https://api.github.com/users/Wauplin", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6761). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "Thanks for cleaning this :) I'm also fine with renaming `hf_dataset_url` (and not `get_dataset_url` as you said in your OP)", "(Yep, `hf_dataset_url` is fine, made a mistake writing the PR description)", "@albertvillanova Sorry about that, tests are now fixed! :)", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005357 / 0.011353 (-0.005995) | 0.003788 / 0.011008 (-0.007220) | 0.063630 / 0.038508 (0.025122) | 0.031353 / 0.023109 (0.008244) | 0.247525 / 0.275898 (-0.028373) | 0.282052 / 0.323480 (-0.041428) | 0.004247 / 0.007986 (-0.003739) | 0.002750 / 0.004328 (-0.001579) | 0.049467 / 0.004250 (0.045217) | 0.046663 / 0.037052 (0.009610) | 0.266440 / 0.258489 (0.007951) | 0.295230 / 0.293841 (0.001389) | 0.028271 / 0.128546 (-0.100276) | 0.011116 / 0.075646 (-0.064530) | 0.222092 / 0.419271 (-0.197179) | 0.036627 / 0.043533 (-0.006906) | 0.252607 / 0.255139 (-0.002532) | 0.271231 / 0.283200 (-0.011969) | 0.019070 / 0.141683 (-0.122613) | 1.152645 / 1.452155 (-0.299509) | 1.211267 / 1.492716 (-0.281449) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.095002 / 0.018006 (0.076996) | 0.304054 / 0.000490 (0.303564) | 0.000212 / 0.000200 (0.000012) | 0.000056 / 0.000054 (0.000001) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018251 / 0.037411 (-0.019161) | 0.061929 / 0.014526 (0.047403) | 0.074641 / 0.176557 (-0.101916) | 0.122643 / 0.737135 (-0.614492) | 0.076744 / 0.296338 (-0.219594) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.284605 / 0.215209 (0.069396) | 2.774638 / 2.077655 (0.696984) | 1.473907 / 1.504120 (-0.030213) | 1.351054 / 1.541195 (-0.190141) | 1.348840 / 1.468490 (-0.119650) | 0.576243 / 4.584777 (-4.008534) | 2.444110 / 3.745712 (-1.301602) | 2.814741 / 5.269862 (-2.455121) | 1.762666 / 4.565676 (-2.803010) | 0.063959 / 0.424275 (-0.360316) | 0.005011 / 0.007607 (-0.002596) | 0.338406 / 0.226044 (0.112361) | 3.361213 / 2.268929 (1.092284) | 1.832674 / 55.444624 (-53.611950) | 1.564229 / 6.876477 (-5.312248) | 1.570843 / 2.142072 (-0.571230) | 0.657134 / 4.805227 (-4.148093) | 0.120041 / 6.500664 (-6.380623) | 0.048594 / 0.075469 (-0.026875) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.965328 / 1.841788 (-0.876460) | 11.704441 / 8.074308 (3.630133) | 9.895462 / 10.191392 (-0.295930) | 0.131913 / 0.680424 (-0.548511) | 0.015175 / 0.534201 (-0.519026) | 0.292022 / 0.579283 (-0.287261) | 0.269752 / 0.434364 (-0.164612) | 0.330453 / 0.540337 (-0.209884) | 0.421659 / 1.386936 (-0.965277) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005472 / 0.011353 (-0.005881) | 0.003809 / 0.011008 (-0.007199) | 0.049594 / 0.038508 (0.011086) | 0.031858 / 0.023109 (0.008748) | 0.277622 / 0.275898 (0.001724) | 0.296092 / 0.323480 (-0.027388) | 0.004209 / 0.007986 (-0.003777) | 0.002726 / 0.004328 (-0.001603) | 0.048057 / 0.004250 (0.043806) | 0.043317 / 0.037052 (0.006265) | 0.288371 / 0.258489 (0.029882) | 0.312847 / 0.293841 (0.019007) | 0.029110 / 0.128546 (-0.099437) | 0.010792 / 0.075646 (-0.064854) | 0.058694 / 0.419271 (-0.360577) | 0.033315 / 0.043533 (-0.010218) | 0.281225 / 0.255139 (0.026086) | 0.297044 / 0.283200 (0.013844) | 0.018897 / 0.141683 (-0.122786) | 1.156417 / 1.452155 (-0.295738) | 1.221393 / 1.492716 (-0.271323) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.095065 / 0.018006 (0.077059) | 0.304107 / 0.000490 (0.303618) | 0.000213 / 0.000200 (0.000014) | 0.000043 / 0.000054 (-0.000012) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021658 / 0.037411 (-0.015753) | 0.075948 / 0.014526 (0.061423) | 0.087019 / 0.176557 (-0.089537) | 0.127309 / 0.737135 (-0.609827) | 0.092251 / 0.296338 (-0.204087) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.291906 / 0.215209 (0.076697) | 2.865007 / 2.077655 (0.787352) | 1.591647 / 1.504120 (0.087527) | 1.474499 / 1.541195 (-0.066696) | 1.496644 / 1.468490 (0.028154) | 0.575337 / 4.584777 (-4.009440) | 2.569426 / 3.745712 (-1.176287) | 2.872611 / 5.269862 (-2.397251) | 1.804278 / 4.565676 (-2.761399) | 0.064225 / 0.424275 (-0.360050) | 0.005574 / 0.007607 (-0.002033) | 0.347724 / 0.226044 (0.121680) | 3.426418 / 2.268929 (1.157490) | 1.966270 / 55.444624 (-53.478355) | 1.687790 / 6.876477 (-5.188686) | 1.728530 / 2.142072 (-0.413542) | 0.650251 / 4.805227 (-4.154977) | 0.118381 / 6.500664 (-6.382283) | 0.041693 / 0.075469 (-0.033776) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.014203 / 1.841788 (-0.827585) | 12.219496 / 8.074308 (4.145188) | 10.469677 / 10.191392 (0.278285) | 0.141840 / 0.680424 (-0.538584) | 0.015104 / 0.534201 (-0.519097) | 0.288453 / 0.579283 (-0.290830) | 0.287467 / 0.434364 (-0.146897) | 0.331046 / 0.540337 (-0.209292) | 0.423731 / 1.386936 (-0.963205) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#66d6242626eada79cfba4df39d99cd2bacb1cbea \"CML watermark\")\n" ]
2024-03-28T09:57:57Z
2024-03-29T13:27:26Z
2024-03-29T13:18:13Z
CONTRIBUTOR
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6761.diff", "html_url": "https://github.com/huggingface/datasets/pull/6761", "merged_at": "2024-03-29T13:18:13Z", "patch_url": "https://github.com/huggingface/datasets/pull/6761.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6761" }
What does this PR do? 1. remove `list_files_info` in favor of `list_repo_tree`. As of `0.23`, `list_files_info` will be removed for good. `datasets` had a utility to support both pre-0.20 and post-0.20 versions. Since `hfh` version is already pinned to `>=0.21.2`, I removed the legacy part. 2. `preupload_lfs_files` had also a different behavior between `<0.20` and `>=0.20`. I remove it since huggingface_hub is now pinned to `>=0.21.2` 3. `hf_hub_url` is overwritten to default to the dataset repo_type. I do think it is misleading to keep the same method naming for it. I renamed it to `get_dataset_url` for clarity. Let me know if you prefer to see this change reverted.
{ "avatar_url": "https://avatars.githubusercontent.com/u/11801849?v=4", "events_url": "https://api.github.com/users/Wauplin/events{/privacy}", "followers_url": "https://api.github.com/users/Wauplin/followers", "following_url": "https://api.github.com/users/Wauplin/following{/other_user}", "gists_url": "https://api.github.com/users/Wauplin/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Wauplin", "id": 11801849, "login": "Wauplin", "node_id": "MDQ6VXNlcjExODAxODQ5", "organizations_url": "https://api.github.com/users/Wauplin/orgs", "received_events_url": "https://api.github.com/users/Wauplin/received_events", "repos_url": "https://api.github.com/users/Wauplin/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Wauplin/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Wauplin/subscriptions", "type": "User", "url": "https://api.github.com/users/Wauplin", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 1, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/6761/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/issues/6760
6,760
Load codeparrot/apps raising UnicodeDecodeError in datasets-2.18.0
{ "avatar_url": "https://avatars.githubusercontent.com/u/17897916?v=4", "events_url": "https://api.github.com/users/yucc-leon/events{/privacy}", "followers_url": "https://api.github.com/users/yucc-leon/followers", "following_url": "https://api.github.com/users/yucc-leon/following{/other_user}", "gists_url": "https://api.github.com/users/yucc-leon/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/yucc-leon", "id": 17897916, "login": "yucc-leon", "node_id": "MDQ6VXNlcjE3ODk3OTE2", "organizations_url": "https://api.github.com/users/yucc-leon/orgs", "received_events_url": "https://api.github.com/users/yucc-leon/received_events", "repos_url": "https://api.github.com/users/yucc-leon/repos", "site_admin": false, "starred_url": "https://api.github.com/users/yucc-leon/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yucc-leon/subscriptions", "type": "User", "url": "https://api.github.com/users/yucc-leon", "user_view_type": "public" }
[]
open
false
[ "The same error with mteb datasets.", "Unfortunately, I'm unable to reproduce this error locally or on Colab.", "Here is the requirements.txt from a clean virtual environment (managed by conda) where I only install `datasets` by \r\n`pip install datasets`. \r\nThe pip list:\r\n```\r\naiohttp==3.9.3\r\naiosignal==1.3.1\r\nattrs==23.2.0\r\ncertifi==2024.2.2\r\ncharset-normalizer==3.3.2\r\ndatasets==2.18.0\r\ndill==0.3.8\r\nfilelock==3.13.3\r\nfrozenlist==1.4.1\r\nfsspec==2024.2.0\r\nhuggingface-hub==0.22.2\r\nidna==3.6\r\nmultidict==6.0.5\r\nmultiprocess==0.70.16\r\nnumpy==1.26.4\r\npackaging==24.0\r\npandas==2.2.1\r\npyarrow==15.0.2\r\npyarrow-hotfix==0.6\r\npython-dateutil==2.9.0.post0\r\npytz==2024.1\r\nPyYAML==6.0.1\r\nrequests==2.31.0\r\nsix==1.16.0\r\ntqdm==4.66.2\r\ntyping_extensions==4.11.0\r\ntzdata==2024.1\r\nurllib3==2.2.1\r\nxxhash==3.4.1\r\nyarl==1.9.4\r\n```\r\nAnd the error can be reproduced.\r\n\r\nDowngrading to datasets==2.14.6 changes some packages' versions:\r\n\r\n```\r\nSuccessfully installed datasets-2.14.6 dill-0.3.7 fsspec-2023.10.0 multiprocess-0.70.15\r\n```\r\nand the dataset can be downloaded and loaded. \r\n\r\nThen I upgrade the version to 2.18.0 again; now the dataset can be loaded with such a line:\r\n```Using the latest cached version of the module from /home/xxx/.cache/huggingface/modules/datasets_modules/datasets/codeparrot--apps/04ac807715d07d6e5cc580f59cdc8213cd7dc4529d0bb819cca72c9f8e8c1aa5 (last modified on Sun Apr 7 09:06:43 2024) since it couldn't be found locally at codeparrot/apps, or remotely on the Hugging Face Hub. ```\r\n\r\nSo the latest version works wrong when requesting the dataset info. \r\n\r\n**But if you cannot reproduce this, I may ignore some detailed information: I use `HF_ENDPOINT=https://hf-mirror.com` for some reason (if not use this I cannot connect to huggingface resources) and the error occurs when requesting the dataset's info card.** \r\nMaybe the error is caused by this environment variable.\r\nI'll open an issue in the author's repo now.", "> Here is the requirements.txt from a clean virtual environment (managed by conda) where I only install `datasets` by `pip install datasets`. The pip list:\r\n> \r\n> ```\r\n> aiohttp==3.9.3\r\n> aiosignal==1.3.1\r\n> attrs==23.2.0\r\n> certifi==2024.2.2\r\n> charset-normalizer==3.3.2\r\n> datasets==2.18.0\r\n> dill==0.3.8\r\n> filelock==3.13.3\r\n> frozenlist==1.4.1\r\n> fsspec==2024.2.0\r\n> huggingface-hub==0.22.2\r\n> idna==3.6\r\n> multidict==6.0.5\r\n> multiprocess==0.70.16\r\n> numpy==1.26.4\r\n> packaging==24.0\r\n> pandas==2.2.1\r\n> pyarrow==15.0.2\r\n> pyarrow-hotfix==0.6\r\n> python-dateutil==2.9.0.post0\r\n> pytz==2024.1\r\n> PyYAML==6.0.1\r\n> requests==2.31.0\r\n> six==1.16.0\r\n> tqdm==4.66.2\r\n> typing_extensions==4.11.0\r\n> tzdata==2024.1\r\n> urllib3==2.2.1\r\n> xxhash==3.4.1\r\n> yarl==1.9.4\r\n> ```\r\n> \r\n> And the error can be reproduced.\r\n> \r\n> Downgrading to datasets==2.14.6 changes some packages' versions:\r\n> \r\n> ```\r\n> Successfully installed datasets-2.14.6 dill-0.3.7 fsspec-2023.10.0 multiprocess-0.70.15\r\n> ```\r\n> \r\n> and the dataset can be downloaded and loaded.\r\n> \r\n> Then I upgrade the version to 2.18.0 again; now the dataset can be loaded with such a line: `Using the latest cached version of the module from /home/xxx/.cache/huggingface/modules/datasets_modules/datasets/codeparrot--apps/04ac807715d07d6e5cc580f59cdc8213cd7dc4529d0bb819cca72c9f8e8c1aa5 (last modified on Sun Apr 7 09:06:43 2024) since it couldn't be found locally at codeparrot/apps, or remotely on the Hugging Face Hub. `\r\n> \r\n> So the latest version works wrong when requesting the dataset info.\r\n> \r\n> **But if you cannot reproduce this, I may ignore some detailed information: I use `HF_ENDPOINT=https://hf-mirror.com` for some reason (if not use this I cannot connect to huggingface resources) and the error occurs when requesting the dataset's info card.** Maybe the error is caused by this environment variable. I'll open an issue in the author's repo now.\r\n\r\nThis is useful and my same error is settled!!!" ]
2024-03-28T03:44:26Z
2024-06-19T07:06:40Z
null
NONE
null
null
### Describe the bug This happens with datasets-2.18.0; I downgraded the version to 2.14.6 fixing this temporarily. ``` Traceback (most recent call last): File "/home/xxx/miniconda3/envs/py310/lib/python3.10/site-packages/datasets/load.py", line 2556, in load_dataset builder_instance = load_dataset_builder( File "/home/xxx/miniconda3/envs/py310/lib/python3.10/site-packages/datasets/load.py", line 2228, in load_dataset_builder dataset_module = dataset_module_factory( File "/home/xxx/miniconda3/envs/py310/lib/python3.10/site-packages/datasets/load.py", line 1879, in dataset_module_factory raise e1 from None File "/home/xxx/miniconda3/envs/py310/lib/python3.10/site-packages/datasets/load.py", line 1831, in dataset_module_factory can_load_config_from_parquet_export = "DEFAULT_CONFIG_NAME" not in f.read() File "/home/xxx/miniconda3/envs/py310/lib/python3.10/codecs.py", line 322, in decode (result, consumed) = self._buffer_decode(data, self.errors, final) UnicodeDecodeError: 'utf-8' codec can't decode byte 0x8b in position 1: invalid start byte ``` ### Steps to reproduce the bug 1. Using Python3.10/3.11 2. Install datasets-2.18.0 3. test with ``` from datasets import load_dataset dataset = load_dataset("codeparrot/apps") ``` ### Expected behavior Normally it should manage to download and load the dataset without such error. ### Environment info Ubuntu, Python3.10/3.11
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6760/reactions" }
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/issues/6759
6,759
Persistent multi-process Pool
{ "avatar_url": "https://avatars.githubusercontent.com/u/4337024?v=4", "events_url": "https://api.github.com/users/fostiropoulos/events{/privacy}", "followers_url": "https://api.github.com/users/fostiropoulos/followers", "following_url": "https://api.github.com/users/fostiropoulos/following{/other_user}", "gists_url": "https://api.github.com/users/fostiropoulos/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/fostiropoulos", "id": 4337024, "login": "fostiropoulos", "node_id": "MDQ6VXNlcjQzMzcwMjQ=", "organizations_url": "https://api.github.com/users/fostiropoulos/orgs", "received_events_url": "https://api.github.com/users/fostiropoulos/received_events", "repos_url": "https://api.github.com/users/fostiropoulos/repos", "site_admin": false, "starred_url": "https://api.github.com/users/fostiropoulos/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/fostiropoulos/subscriptions", "type": "User", "url": "https://api.github.com/users/fostiropoulos", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
[]
2024-03-26T17:35:25Z
2024-03-26T17:35:25Z
null
NONE
null
null
### Feature request Running .map and filter functions with `num_procs` consecutively instantiates several multiprocessing pools iteratively. As instantiating a Pool is very resource intensive it can be a bottleneck to performing iteratively filtering. My ideas: 1. There should be an option to declare `persistent_workers` similar to pytorch DataLoader. Downside would be that would be complex to determine the correct resource allocation and deallocation of the pool. i.e. the dataset can outlive the utility of the pool. 2. Provide a pool as an argument. Downside would be the expertise required by the user. Upside, is that there is better resource management. ### Motivation Is really slow to iteratively perform map and filter operations on a dataset. ### Your contribution If approved I could integrate it. I would need to know what method would be most suitable to implement from the two options above.
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6759/reactions" }
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/issues/6758
6,758
Passing `sample_by` to `load_dataset` when loading text data does not work
{ "avatar_url": "https://avatars.githubusercontent.com/u/823693?v=4", "events_url": "https://api.github.com/users/ntoxeg/events{/privacy}", "followers_url": "https://api.github.com/users/ntoxeg/followers", "following_url": "https://api.github.com/users/ntoxeg/following{/other_user}", "gists_url": "https://api.github.com/users/ntoxeg/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ntoxeg", "id": 823693, "login": "ntoxeg", "node_id": "MDQ6VXNlcjgyMzY5Mw==", "organizations_url": "https://api.github.com/users/ntoxeg/orgs", "received_events_url": "https://api.github.com/users/ntoxeg/received_events", "repos_url": "https://api.github.com/users/ntoxeg/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ntoxeg/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ntoxeg/subscriptions", "type": "User", "url": "https://api.github.com/users/ntoxeg", "user_view_type": "public" }
[]
closed
false
[ "Thanks for reporting! We are working on a fix." ]
2024-03-26T14:55:33Z
2024-04-09T11:27:59Z
2024-04-09T11:27:59Z
NONE
null
null
### Describe the bug I have a dataset that consists of a bunch of text files, each representing an example. There is an undocumented `sample_by` argument for the `TextConfig` class that is used by `Text` to decide whether to split files into lines, paragraphs or take them whole. Passing `sample_by=β€œdocument”` to `load_dataset` results in files getting split into lines regardless. I have edited `src/datasets/packaged_modules/text/text.py` for myself to switch the default and it works fine. As a side note, the `if-else` for `sample_by` will silently load an empty dataset if someone makes a typo in the argument, which is not ideal. ### Steps to reproduce the bug 1. Prepare data as a bunch of files in a directory. 2. Load that data via `load_dataset(β€œtext”, data_files=<data_dir>/<files_glob>, …, sample_by=β€œdocument”)`. 3. Inspect the resultant dataset β€” every item should have the form of `{β€œtext”: <a line from a file>}`. ### Expected behavior `load_dataset(β€œtext”, data_files=<data_dir>/<files_glob>, …, sample_by=β€œdocument”)` should result in a dataset with items of the form `{β€œtext”: <one document>}`. ### Environment info - `datasets` version: 2.18.0 - Platform: Linux-5.15.0-1046-nvidia-x86_64-with-glibc2.35 - Python version: 3.11.8 - `huggingface_hub` version: 0.21.4 - PyArrow version: 15.0.2 - Pandas version: 2.2.1 - `fsspec` version: 2024.2.0
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6758/reactions" }
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/pull/6757
6,757
Test disabling transformers containers in docs CI
{ "avatar_url": "https://avatars.githubusercontent.com/u/11801849?v=4", "events_url": "https://api.github.com/users/Wauplin/events{/privacy}", "followers_url": "https://api.github.com/users/Wauplin/followers", "following_url": "https://api.github.com/users/Wauplin/following{/other_user}", "gists_url": "https://api.github.com/users/Wauplin/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Wauplin", "id": 11801849, "login": "Wauplin", "node_id": "MDQ6VXNlcjExODAxODQ5", "organizations_url": "https://api.github.com/users/Wauplin/orgs", "received_events_url": "https://api.github.com/users/Wauplin/received_events", "repos_url": "https://api.github.com/users/Wauplin/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Wauplin/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Wauplin/subscriptions", "type": "User", "url": "https://api.github.com/users/Wauplin", "user_view_type": "public" }
[]
open
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6757). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "On slack it was mentioned that it was actually slower for `datasets`, should we close this one or am I missing something ?", "@lhoestq I converted to draft. Want to make some more tests and will let you know" ]
2024-03-25T17:16:11Z
2024-03-27T16:26:35Z
null
CONTRIBUTOR
true
{ "diff_url": "https://github.com/huggingface/datasets/pull/6757.diff", "html_url": "https://github.com/huggingface/datasets/pull/6757", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/6757.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6757" }
Related to https://github.com/huggingface/doc-builder/pull/487 and [internal slack thread](https://huggingface.slack.com/archives/C04F8N7FQNL/p1711384899462349?thread_ts=1711041424.720769&cid=C04F8N7FQNL). There is now a `custom_container` option when building docs in CI. When set to `""` (instead of `"huggingface/transformers-doc-builder"` by default), we don't run the CI inside a container, therefore saving ~2min of download time. The plan is to test disabling the transformers container on a few "big" repo and if everything works correctly, we will stop making it the default container. More details on https://github.com/huggingface/doc-builder/pull/487. cc @mishig25
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 1, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/6757/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/issues/6756
6,756
Support SQLite files?
{ "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "events_url": "https://api.github.com/users/severo/events{/privacy}", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/severo", "id": 1676121, "login": "severo", "node_id": "MDQ6VXNlcjE2NzYxMjE=", "organizations_url": "https://api.github.com/users/severo/orgs", "received_events_url": "https://api.github.com/users/severo/received_events", "repos_url": "https://api.github.com/users/severo/repos", "site_admin": false, "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "type": "User", "url": "https://api.github.com/users/severo", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
closed
false
[ "You can use `Dataset.from_sql(path_to_sql_file)` already. Though we haven't added the Sql dataset builder to the `_PACKAGED_DATASETS_MODULES` list or in `_EXTENSION_TO_MODULE` to map `.sqlite` to the Sql dataset builder\r\n\r\nThis would allow to load a dataset repository with a `.sqlite` file using `load_dataset` and enable the Dataset Viewer", "Considering `Dataset.from_sql`'s (extremely) low usage, I don't think many users are interested in using this format for their datasets. Also, SQLite files are hard/impossible to stream efficiently and require custom logic to define splits/subsets, so IMO we shouldn't encourage people to use SQLite on the Hub.\r\n\r\n@severo Do you have some real-world examples of datasets published in this format?", "No. Indeed, it seems better to explicitly not support sqlite" ]
2024-03-25T11:48:05Z
2024-03-26T16:09:32Z
2024-03-26T16:09:32Z
COLLABORATOR
null
null
### Feature request Support loading a dataset from a SQLite file https://huggingface.co/datasets/severo/test_iris_sqlite/tree/main ### Motivation SQLite is a popular file format. ### Your contribution See discussion on slack: https://huggingface.slack.com/archives/C04L6P8KNQ5/p1702481859117909 (internal) In particular: a SQLite file can contain multiple tables, which might be matched to multiple configs. Maybe the detail of splits and configs should be defined in the README YAML, or use the same format as for ZIP files: `Iris.sqlite::Iris`. See dataset here: https://huggingface.co/datasets/severo/test_iris_sqlite Note: should we also support DuckDB files?
{ "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "events_url": "https://api.github.com/users/severo/events{/privacy}", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/severo", "id": 1676121, "login": "severo", "node_id": "MDQ6VXNlcjE2NzYxMjE=", "organizations_url": "https://api.github.com/users/severo/orgs", "received_events_url": "https://api.github.com/users/severo/received_events", "repos_url": "https://api.github.com/users/severo/repos", "site_admin": false, "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "type": "User", "url": "https://api.github.com/users/severo", "user_view_type": "public" }
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/6756/reactions" }
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/issues/6755
6,755
Small typo on the documentation
{ "avatar_url": "https://avatars.githubusercontent.com/u/4337024?v=4", "events_url": "https://api.github.com/users/fostiropoulos/events{/privacy}", "followers_url": "https://api.github.com/users/fostiropoulos/followers", "following_url": "https://api.github.com/users/fostiropoulos/following{/other_user}", "gists_url": "https://api.github.com/users/fostiropoulos/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/fostiropoulos", "id": 4337024, "login": "fostiropoulos", "node_id": "MDQ6VXNlcjQzMzcwMjQ=", "organizations_url": "https://api.github.com/users/fostiropoulos/orgs", "received_events_url": "https://api.github.com/users/fostiropoulos/received_events", "repos_url": "https://api.github.com/users/fostiropoulos/repos", "site_admin": false, "starred_url": "https://api.github.com/users/fostiropoulos/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/fostiropoulos/subscriptions", "type": "User", "url": "https://api.github.com/users/fostiropoulos", "user_view_type": "public" }
[ { "color": "7057ff", "default": true, "description": "Good for newcomers", "id": 1935892877, "name": "good first issue", "node_id": "MDU6TGFiZWwxOTM1ODkyODc3", "url": "https://api.github.com/repos/huggingface/datasets/labels/good%20first%20issue" } ]
closed
false
[ "Thanks for reporting @fostiropoulos! I've edited your comment to fix the link to the problematic line.\r\n", "@mariosasko can i take this up?", "#self-assign" ]
2024-03-24T21:47:52Z
2024-04-02T14:01:19Z
2024-04-02T14:01:19Z
NONE
null
null
### Describe the bug There is a small typo on https://github.com/huggingface/datasets/blob/d5468836fe94e8be1ae093397dd43d4a2503b926/src/datasets/dataset_dict.py#L938 It should be `caching is enabled`. ### Steps to reproduce the bug Please visit https://github.com/huggingface/datasets/blob/d5468836fe94e8be1ae093397dd43d4a2503b926/src/datasets/dataset_dict.py#L938 ### Expected behavior `caching is enabled` ### Environment info - `datasets` version: 2.17.1 - Platform: Linux-5.15.0-101-generic-x86_64-with-glibc2.35 - Python version: 3.11.7 - `huggingface_hub` version: 0.20.3 - PyArrow version: 15.0.0 - Pandas version: 2.2.1 - `fsspec` version: 2023.10.0
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6755/reactions" }
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/pull/6754
6,754
Fix cache path to snakecase for `CachedDatasetModuleFactory` and `Cache`
{ "avatar_url": "https://avatars.githubusercontent.com/u/26690193?v=4", "events_url": "https://api.github.com/users/izhx/events{/privacy}", "followers_url": "https://api.github.com/users/izhx/followers", "following_url": "https://api.github.com/users/izhx/following{/other_user}", "gists_url": "https://api.github.com/users/izhx/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/izhx", "id": 26690193, "login": "izhx", "node_id": "MDQ6VXNlcjI2NjkwMTkz", "organizations_url": "https://api.github.com/users/izhx/orgs", "received_events_url": "https://api.github.com/users/izhx/received_events", "repos_url": "https://api.github.com/users/izhx/repos", "site_admin": false, "starred_url": "https://api.github.com/users/izhx/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/izhx/subscriptions", "type": "User", "url": "https://api.github.com/users/izhx", "user_view_type": "public" }
[]
closed
false
[ "@lhoestq hi πŸ˜ƒ, is there something else I need to do to check this change?", "I added two tests and passed them on my server.\r\n\r\n```\r\npytest tests/packaged_modules/test_cache.py \r\n========================================================================== test session starts ==========================================================================\r\nplatform linux -- Python 3.11.5, pytest-8.1.1, pluggy-1.4.0\r\nrootdir: /mnt/nas/datasets\r\nconfigfile: pyproject.toml\r\nplugins: xdist-3.5.0, datadir-1.5.0\r\ncollected 8 items \r\n\r\ntests/packaged_modules/test_cache.py ........ [100%]\r\n\r\n========================================================================== 8 passed in 50.71s ===========================================================================\r\n\r\n```\r\n\r\n```\r\npytest tests/test_load.py\r\n========================================================================== test session starts ==========================================================================\r\nplatform linux -- Python 3.11.5, pytest-8.1.1, pluggy-1.4.0\r\nrootdir: /mnt/nas/datasets\r\nconfigfile: pyproject.toml\r\nplugins: xdist-3.5.0, datadir-1.5.0\r\ncollected 151 items \r\n\r\ntests/test_load.py .............................................................................................................................................. [ 94%]\r\n......... [100%]\r\n\r\n...\r\n\r\n============================================================= 151 passed, 29 warnings in 578.36s (0:09:38) ==============================================================\r\n```\r\n", "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6754). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "Hi @izhx! I have also faced this issue, happy to see it already addressed, looking forward for PR merge :)", "@lhoestq What do you think of these tests? πŸ˜€", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005060 / 0.011353 (-0.006293) | 0.003251 / 0.011008 (-0.007757) | 0.063538 / 0.038508 (0.025030) | 0.031178 / 0.023109 (0.008069) | 0.249971 / 0.275898 (-0.025927) | 0.284828 / 0.323480 (-0.038652) | 0.004183 / 0.007986 (-0.003802) | 0.002656 / 0.004328 (-0.001673) | 0.049585 / 0.004250 (0.045335) | 0.042656 / 0.037052 (0.005604) | 0.270962 / 0.258489 (0.012473) | 0.296091 / 0.293841 (0.002250) | 0.028065 / 0.128546 (-0.100482) | 0.010545 / 0.075646 (-0.065102) | 0.207323 / 0.419271 (-0.211948) | 0.035977 / 0.043533 (-0.007556) | 0.257315 / 0.255139 (0.002176) | 0.272238 / 0.283200 (-0.010962) | 0.017984 / 0.141683 (-0.123699) | 1.131314 / 1.452155 (-0.320840) | 1.180259 / 1.492716 (-0.312457) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.090977 / 0.018006 (0.072971) | 0.284021 / 0.000490 (0.283531) | 0.000264 / 0.000200 (0.000065) | 0.000044 / 0.000054 (-0.000011) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.017852 / 0.037411 (-0.019559) | 0.061288 / 0.014526 (0.046762) | 0.073844 / 0.176557 (-0.102713) | 0.121371 / 0.737135 (-0.615764) | 0.075036 / 0.296338 (-0.221303) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.287599 / 0.215209 (0.072390) | 2.821172 / 2.077655 (0.743517) | 1.459904 / 1.504120 (-0.044216) | 1.340224 / 1.541195 (-0.200970) | 1.357350 / 1.468490 (-0.111140) | 0.557344 / 4.584777 (-4.027433) | 2.412177 / 3.745712 (-1.333535) | 2.745126 / 5.269862 (-2.524735) | 1.754600 / 4.565676 (-2.811077) | 0.062487 / 0.424275 (-0.361788) | 0.005306 / 0.007607 (-0.002301) | 0.338856 / 0.226044 (0.112811) | 3.354953 / 2.268929 (1.086024) | 1.803208 / 55.444624 (-53.641417) | 1.553051 / 6.876477 (-5.323426) | 1.554790 / 2.142072 (-0.587282) | 0.651380 / 4.805227 (-4.153847) | 0.117777 / 6.500664 (-6.382887) | 0.041992 / 0.075469 (-0.033477) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.977588 / 1.841788 (-0.864200) | 11.363058 / 8.074308 (3.288750) | 9.791770 / 10.191392 (-0.399622) | 0.130708 / 0.680424 (-0.549716) | 0.013798 / 0.534201 (-0.520403) | 0.288313 / 0.579283 (-0.290970) | 0.268170 / 0.434364 (-0.166194) | 0.324815 / 0.540337 (-0.215522) | 0.419260 / 1.386936 (-0.967676) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005187 / 0.011353 (-0.006166) | 0.003348 / 0.011008 (-0.007660) | 0.050309 / 0.038508 (0.011801) | 0.031334 / 0.023109 (0.008225) | 0.279542 / 0.275898 (0.003644) | 0.299608 / 0.323480 (-0.023872) | 0.004202 / 0.007986 (-0.003784) | 0.002735 / 0.004328 (-0.001593) | 0.050321 / 0.004250 (0.046070) | 0.039793 / 0.037052 (0.002740) | 0.289972 / 0.258489 (0.031483) | 0.313887 / 0.293841 (0.020046) | 0.028797 / 0.128546 (-0.099750) | 0.010166 / 0.075646 (-0.065480) | 0.059228 / 0.419271 (-0.360044) | 0.032667 / 0.043533 (-0.010866) | 0.278409 / 0.255139 (0.023270) | 0.292208 / 0.283200 (0.009008) | 0.017577 / 0.141683 (-0.124106) | 1.175046 / 1.452155 (-0.277109) | 1.200766 / 1.492716 (-0.291950) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.092236 / 0.018006 (0.074230) | 0.298860 / 0.000490 (0.298370) | 0.000211 / 0.000200 (0.000011) | 0.000043 / 0.000054 (-0.000011) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021475 / 0.037411 (-0.015936) | 0.074414 / 0.014526 (0.059888) | 0.087746 / 0.176557 (-0.088811) | 0.124757 / 0.737135 (-0.612378) | 0.088513 / 0.296338 (-0.207826) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.296583 / 0.215209 (0.081374) | 2.894978 / 2.077655 (0.817323) | 1.590806 / 1.504120 (0.086686) | 1.463251 / 1.541195 (-0.077944) | 1.478751 / 1.468490 (0.010261) | 0.571724 / 4.584777 (-4.013053) | 2.454356 / 3.745712 (-1.291356) | 2.789275 / 5.269862 (-2.480586) | 1.753866 / 4.565676 (-2.811811) | 0.064787 / 0.424275 (-0.359488) | 0.005321 / 0.007607 (-0.002287) | 0.348454 / 0.226044 (0.122410) | 3.453052 / 2.268929 (1.184124) | 1.972237 / 55.444624 (-53.472388) | 1.677822 / 6.876477 (-5.198655) | 1.674750 / 2.142072 (-0.467322) | 0.649353 / 4.805227 (-4.155874) | 0.117135 / 6.500664 (-6.383529) | 0.040018 / 0.075469 (-0.035451) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.029812 / 1.841788 (-0.811976) | 11.945063 / 8.074308 (3.870755) | 10.238380 / 10.191392 (0.046988) | 0.146225 / 0.680424 (-0.534199) | 0.015262 / 0.534201 (-0.518939) | 0.286632 / 0.579283 (-0.292651) | 0.272952 / 0.434364 (-0.161412) | 0.323098 / 0.540337 (-0.217239) | 0.423549 / 1.386936 (-0.963387) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#91b07b90915d7f7313d44ca3ff67673b9ad26bf4 \"CML watermark\")\n" ]
2024-03-24T06:59:15Z
2024-04-15T15:45:44Z
2024-04-15T15:38:51Z
CONTRIBUTOR
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6754.diff", "html_url": "https://github.com/huggingface/datasets/pull/6754", "merged_at": "2024-04-15T15:38:51Z", "patch_url": "https://github.com/huggingface/datasets/pull/6754.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6754" }
Fix https://github.com/huggingface/datasets/issues/6750#issuecomment-2016678729 I didn't find a guideline on how to run the tests, so i just run the following steps to make sure that this bug is fixed. 1. `python test.py`, 2. then `HF_DATASETS_OFFLINE=1 python test.py` The `test.py` is ``` import datasets datasets.utils.logging.set_verbosity_info() ds = datasets.load_dataset('izhx/STS17-debug') print(ds) ds = datasets.load_dataset('C-MTEB/AFQMC', revision='b44c3b011063adb25877c13823db83bb193913c4') print(ds) ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6754/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/issues/6753
6,753
Type error when importing datasets on Kaggle
{ "avatar_url": "https://avatars.githubusercontent.com/u/18300717?v=4", "events_url": "https://api.github.com/users/jtv199/events{/privacy}", "followers_url": "https://api.github.com/users/jtv199/followers", "following_url": "https://api.github.com/users/jtv199/following{/other_user}", "gists_url": "https://api.github.com/users/jtv199/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jtv199", "id": 18300717, "login": "jtv199", "node_id": "MDQ6VXNlcjE4MzAwNzE3", "organizations_url": "https://api.github.com/users/jtv199/orgs", "received_events_url": "https://api.github.com/users/jtv199/received_events", "repos_url": "https://api.github.com/users/jtv199/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jtv199/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jtv199/subscriptions", "type": "User", "url": "https://api.github.com/users/jtv199", "user_view_type": "public" }
[]
closed
false
[ "I have the same problem \r\nIt seems that it only appears when you are using GPU \r\nIt seems to work fine with the 2.17 version though", "Same here.", "> I have the same problem\r\n> It seems that it only appears when you are using GPU\r\n> It seems to work fine with the 2.17 version though\r\n\r\nI downgraded from 2.18 to 2.17, and it works with CPU/GPU .. except now pyarrow complains\r\n\r\n```\r\n...\r\nFile /opt/conda/lib/python3.10/site-packages/pyarrow/array.pxi:830, in pyarrow.lib._PandasConvertible.to_pandas()\r\n\r\nFile /opt/conda/lib/python3.10/site-packages/pyarrow/table.pxi:3989, in pyarrow.lib.Table._to_pandas()\r\n\r\nImportError: cannot import name table_to_blockmanager\r\n```\r\n\r\nsee also https://www.kaggle.com/competitions/pii-detection-removal-from-educational-data/discussion/487474#2722594", "Solved for me by downgrading `!pip install -U datasets==2.16.0` Works with gpu aswell", "I think you should remain open this issue. It works at the previous version but not the latter versions. It is possible as a bug that the maintainer could take note for.", "> Solved for me by downgrading `!pip install -U datasets==2.16.0` Works with gpu as well\r\n\r\nVerified it's working w/ GPU if I make these 3 updates.\r\n\r\n```\r\ndatasets==2.16.0\r\nfsspec==2023.10.0\r\ngcsfs==2023.10.0\r\n```\r\n\r\nbut the issue shouldn't be closed, this is just a workaround until they get the issue with 2.18.0 resolved.\r\n\r\nSee also: https://www.kaggle.com/competitions/pii-detection-removal-from-educational-data/discussion/487474", "> > Solved for me by downgrading `!pip install -U datasets==2.16.0` Works with gpu as well\r\n> \r\n> Verified it's working w/ GPU if I make these 3 updates.\r\n> \r\n> ```\r\n> datasets==2.16.0\r\n> fsspec==2023.10.0\r\n> gcsfs==2023.10.0\r\n> ```\r\n> \r\n> but the issue shouldn't be closed, this is just a workaround until they get the issue with 2.18.0 resolved.\r\n> \r\n> See also: https://www.kaggle.com/competitions/pii-detection-removal-from-educational-data/discussion/487474\r\n\r\nThis also works for me, thanks", "I am seeing similar error but with pandas while using kaggle kernel. \r\n`---> 38 PANDAS_VERSION = version.parse(importlib.metadata.version(\"pandas\"))\r\nTypeError: expected string or bytes-like object\r\n\r\n`" ]
2024-03-24T03:01:30Z
2024-10-02T11:49:35Z
2024-03-30T00:23:49Z
NONE
null
null
### Describe the bug When trying to run ``` import datasets print(datasets.__version__) ``` It generates the following error ``` TypeError: expected string or bytes-like object ``` It looks like It cannot find the valid versions of `fsspec` though fsspec version is fine when I checked Via command ``` import fsspec print(fsspec.__version__) ​ # output: 2024.3.1 ``` Detailed crash report ``` --------------------------------------------------------------------------- TypeError Traceback (most recent call last) Cell In[1], line 1 ----> 1 import datasets 2 print(datasets.__version__) File /opt/conda/lib/python3.10/site-packages/datasets/__init__.py:18 1 # ruff: noqa 2 # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. 3 # (...) 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 16 __version__ = "2.18.0" ---> 18 from .arrow_dataset import Dataset 19 from .arrow_reader import ReadInstruction 20 from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder File /opt/conda/lib/python3.10/site-packages/datasets/arrow_dataset.py:66 63 from multiprocess import Pool 64 from tqdm.contrib.concurrent import thread_map ---> 66 from . import config 67 from .arrow_reader import ArrowReader 68 from .arrow_writer import ArrowWriter, OptimizedTypedSequence File /opt/conda/lib/python3.10/site-packages/datasets/config.py:41 39 # Imports 40 DILL_VERSION = version.parse(importlib.metadata.version("dill")) ---> 41 FSSPEC_VERSION = version.parse(importlib.metadata.version("fsspec")) 42 PANDAS_VERSION = version.parse(importlib.metadata.version("pandas")) 43 PYARROW_VERSION = version.parse(importlib.metadata.version("pyarrow")) File /opt/conda/lib/python3.10/site-packages/packaging/version.py:49, in parse(version) 43 """ 44 Parse the given version string and return either a :class:`Version` object 45 or a :class:`LegacyVersion` object depending on if the given version is 46 a valid PEP 440 version or a legacy version. 47 """ 48 try: ---> 49 return Version(version) 50 except InvalidVersion: 51 return LegacyVersion(version) File /opt/conda/lib/python3.10/site-packages/packaging/version.py:264, in Version.__init__(self, version) 261 def __init__(self, version: str) -> None: 262 263 # Validate the version and parse it into pieces --> 264 match = self._regex.search(version) 265 if not match: 266 raise InvalidVersion(f"Invalid version: '{version}'") TypeError: expected string or bytes-like object ``` ### Steps to reproduce the bug 1. run `!pip install -U datasets` on kaggle 2. check datasets is installed via ``` import datasets print(datasets.__version__) ``` ### Expected behavior Expected to print datasets version, like `2.18.0` ### Environment info Running on Kaggle, latest enviornment , here is the notebook https://www.kaggle.com/code/jtv199/mistrial-7b-part2
{ "avatar_url": "https://avatars.githubusercontent.com/u/18300717?v=4", "events_url": "https://api.github.com/users/jtv199/events{/privacy}", "followers_url": "https://api.github.com/users/jtv199/followers", "following_url": "https://api.github.com/users/jtv199/following{/other_user}", "gists_url": "https://api.github.com/users/jtv199/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jtv199", "id": 18300717, "login": "jtv199", "node_id": "MDQ6VXNlcjE4MzAwNzE3", "organizations_url": "https://api.github.com/users/jtv199/orgs", "received_events_url": "https://api.github.com/users/jtv199/received_events", "repos_url": "https://api.github.com/users/jtv199/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jtv199/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jtv199/subscriptions", "type": "User", "url": "https://api.github.com/users/jtv199", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6753/reactions" }
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/issues/6752
6,752
Precision being changed from float16 to float32 unexpectedly
{ "avatar_url": "https://avatars.githubusercontent.com/u/21228908?v=4", "events_url": "https://api.github.com/users/gcervantes8/events{/privacy}", "followers_url": "https://api.github.com/users/gcervantes8/followers", "following_url": "https://api.github.com/users/gcervantes8/following{/other_user}", "gists_url": "https://api.github.com/users/gcervantes8/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/gcervantes8", "id": 21228908, "login": "gcervantes8", "node_id": "MDQ6VXNlcjIxMjI4OTA4", "organizations_url": "https://api.github.com/users/gcervantes8/orgs", "received_events_url": "https://api.github.com/users/gcervantes8/received_events", "repos_url": "https://api.github.com/users/gcervantes8/repos", "site_admin": false, "starred_url": "https://api.github.com/users/gcervantes8/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gcervantes8/subscriptions", "type": "User", "url": "https://api.github.com/users/gcervantes8", "user_view_type": "public" }
[]
open
false
[ "This is because of the formatter (`torch` in this case).\r\nIt defaults to `float32`.\r\n\r\nYou can load it in `float16` using `dataset.set_format(\"torch\", dtype=torch.float16)`.", "I am having a similar issue when _building_ a dataset given `float16`-valued data.\n\n```py\nfrom datasets import Features, Value, Dataset\n\nd = Dataset.from_dict({\"data\": np.ones(4, dtype=\"float16\")}, features=Features({\"data\": Value(\"float16\")}))\ntype(d[\"data\"][0]) # returns <class 'float'>\n```\n\nNote that I specifically indicate that my data is `float16`-valued in the `features` argument above" ]
2024-03-23T20:53:56Z
2025-11-04T22:40:01Z
null
NONE
null
null
### Describe the bug I'm loading a HuggingFace Dataset for images. I'm running a preprocessing (map operation) step that runs a few operations, one of them being conversion to float16. The Dataset features also say that the 'img' is of type float16. Whenever I take an image from that HuggingFace Dataset instance, the type turns out to be float32. ### Steps to reproduce the bug ```python import torchvision.transforms.v2 as transforms from datasets import load_dataset dataset = load_dataset('cifar10', split='test') dataset = dataset.with_format("torch") data_transform = transforms.Compose([transforms.Resize((32, 32)), transforms.ToDtype(torch.float16, scale=True), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]), ]) def _preprocess(examples): # Permutes from (BS x H x W x C) to (BS x C x H x W) images = torch.permute(examples['img'], (0, 3, 2, 1)) examples['img'] = data_transform(images) return examples dataset = dataset.map(_preprocess, batched=True, batch_size=8) ``` Now at this point the dataset.features are showing float16 which is great because that's what I want. ```python print(data_loader.features['img']) Sequence(feature=Sequence(feature=Sequence(feature=Value(dtype='float16', id=None), length=-1, id=None), length=-1, id=None), length=-1, id=None) ``` But when I try to sample an image from this dataloader; I'm getting a float32 image, when I'm expecting float16: ```python print(next(iter(data_loader))['img'].dtype) torch.float32 ``` ### Expected behavior I'm expecting the images loaded after the transformation to stay in float16. ### Environment info - `datasets` version: 2.18.0 - Platform: Linux-5.15.146.1-microsoft-standard-WSL2-x86_64-with-glibc2.31 - Python version: 3.10.9 - `huggingface_hub` version: 0.21.4 - PyArrow version: 14.0.2 - Pandas version: 2.0.3 - `fsspec` version: 2023.10.0
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6752/reactions" }
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/pull/6751
6,751
Use 'with' operator for some download functions
{ "avatar_url": "https://avatars.githubusercontent.com/u/31669?v=4", "events_url": "https://api.github.com/users/Moisan/events{/privacy}", "followers_url": "https://api.github.com/users/Moisan/followers", "following_url": "https://api.github.com/users/Moisan/following{/other_user}", "gists_url": "https://api.github.com/users/Moisan/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Moisan", "id": 31669, "login": "Moisan", "node_id": "MDQ6VXNlcjMxNjY5", "organizations_url": "https://api.github.com/users/Moisan/orgs", "received_events_url": "https://api.github.com/users/Moisan/received_events", "repos_url": "https://api.github.com/users/Moisan/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Moisan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Moisan/subscriptions", "type": "User", "url": "https://api.github.com/users/Moisan", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6751). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "I was mistaken on the intent of those functions, closing the PR." ]
2024-03-23T16:32:08Z
2024-03-26T00:40:57Z
2024-03-26T00:40:57Z
NONE
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6751.diff", "html_url": "https://github.com/huggingface/datasets/pull/6751", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/6751.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6751" }
Some functions in `streaming_download_manager.py` are not closing the file they open which lead to `Unclosed file` warnings in our code. This fixes a few of them.
{ "avatar_url": "https://avatars.githubusercontent.com/u/31669?v=4", "events_url": "https://api.github.com/users/Moisan/events{/privacy}", "followers_url": "https://api.github.com/users/Moisan/followers", "following_url": "https://api.github.com/users/Moisan/following{/other_user}", "gists_url": "https://api.github.com/users/Moisan/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Moisan", "id": 31669, "login": "Moisan", "node_id": "MDQ6VXNlcjMxNjY5", "organizations_url": "https://api.github.com/users/Moisan/orgs", "received_events_url": "https://api.github.com/users/Moisan/received_events", "repos_url": "https://api.github.com/users/Moisan/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Moisan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Moisan/subscriptions", "type": "User", "url": "https://api.github.com/users/Moisan", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6751/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/issues/6750
6,750
`load_dataset` requires a network connection for local download?
{ "avatar_url": "https://avatars.githubusercontent.com/u/6306695?v=4", "events_url": "https://api.github.com/users/MiroFurtado/events{/privacy}", "followers_url": "https://api.github.com/users/MiroFurtado/followers", "following_url": "https://api.github.com/users/MiroFurtado/following{/other_user}", "gists_url": "https://api.github.com/users/MiroFurtado/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/MiroFurtado", "id": 6306695, "login": "MiroFurtado", "node_id": "MDQ6VXNlcjYzMDY2OTU=", "organizations_url": "https://api.github.com/users/MiroFurtado/orgs", "received_events_url": "https://api.github.com/users/MiroFurtado/received_events", "repos_url": "https://api.github.com/users/MiroFurtado/repos", "site_admin": false, "starred_url": "https://api.github.com/users/MiroFurtado/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/MiroFurtado/subscriptions", "type": "User", "url": "https://api.github.com/users/MiroFurtado", "user_view_type": "public" }
[]
closed
false
[ "Are you using `HF_DATASETS_OFFLINE=1` ?", "> Are you using `HF_DATASETS_OFFLINE=1` ?\r\n\r\nThis doesn't work for me. `datasets=2.18.0`\r\n\r\n`test.py`:\r\n```\r\nimport datasets\r\n\r\ndatasets.utils.logging.set_verbosity_info()\r\n\r\nds = datasets.load_dataset('C-MTEB/AFQMC', revision='b44c3b011063adb25877c13823db83bb193913c4')\r\n\r\nprint(ds)\r\n```\r\n\r\nrun `python test.py`\r\n```\r\nGenerating dataset afqmc (/home/data/.cache/huggingface/datasets/C-MTEB___afqmc/default/0.0.0/b44c3b011063adb25877c13823db83bb193913c4)\r\nDownloading and preparing dataset afqmc/default to /home/data/.cache/huggingface/datasets/C-MTEB___afqmc/default/0.0.0/b44c3b011063adb25877c13823db83bb193913c4...\r\nDataset not on Hf google storage. Downloading and preparing it from source\r\nhf://datasets/C-MTEB/AFQMC@b44c3b011063adb25877c13823db83bb193913c4/data/validation-00000-of-00001-b8fc393b5ddedac7.parquet not found in cache or force_download set to True, downloading to /home/data/.cache/huggingface/datasets/downloads/78949f93104662359f4f3d5a2f7ec1ae37af5a5af44420a51212ea08c0be966b.incomplete\r\nDownloading data: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 240k/240k [00:01<00:00, 178kB/s]\r\nstoring hf://datasets/C-MTEB/AFQMC@b44c3b011063adb25877c13823db83bb193913c4/data/validation-00000-of-00001-b8fc393b5ddedac7.parquet in cache at /home/data/.cache/huggingface/datasets/downloads/78949f93104662359f4f3d5a2f7ec1ae37af5a5af44420a51212ea08c0be966b\r\ncreating metadata file for /home/data/.cache/huggingface/datasets/downloads/78949f93104662359f4f3d5a2f7ec1ae37af5a5af44420a51212ea08c0be966b\r\nDownloading took 0.0 min\r\nChecksum Computation took 0.0 min\r\nGenerating test split\r\nGenerating test split: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 3861/3861 [00:00<00:00, 3972.00 examples/s]\r\nGenerating train split\r\nGenerating train split: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 34334/34334 [00:00<00:00, 34355.50 examples/s]\r\nGenerating validation split\r\nGenerating validation split: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 4316/4316 [00:00<00:00, 4477.00 examples/s]\r\nAll the splits matched successfully.\r\nDataset afqmc downloaded and prepared to /home/data/.cache/huggingface/datasets/C-MTEB___afqmc/default/0.0.0/b44c3b011063adb25877c13823db83bb193913c4. Subsequent calls will reuse this data.\r\nDatasetDict({\r\n test: Dataset({\r\n features: ['sentence1', 'sentence2', 'score', 'idx'],\r\n num_rows: 3861\r\n })\r\n train: Dataset({\r\n features: ['sentence1', 'sentence2', 'score', 'idx'],\r\n num_rows: 34334\r\n })\r\n validation: Dataset({\r\n features: ['sentence1', 'sentence2', 'score', 'idx'],\r\n num_rows: 4316\r\n })\r\n})\r\n```\r\n\r\nThen run `HF_DATASETS_OFFLINE=1 python test.py`\r\n```\r\nTraceback (most recent call last):\r\n File \"test.py\", line 9, in <module>\r\n ds = datasets.load_dataset('C-MTEB/AFQMC', revision='b44c3b011063adb25877c13823db83bb193913c4')\r\n File \"/dev/shm/tmp_env/lib/python3.10/site-packages/datasets/load.py\", line 2556, in load_dataset\r\n builder_instance = load_dataset_builder(\r\n File \"/dev/shm/tmp_env/lib/python3.10/site-packages/datasets/load.py\", line 2228, in load_dataset_builder\r\n dataset_module = dataset_module_factory(\r\n File \"/dev/shm/tmp_env/lib/python3.10/site-packages/datasets/load.py\", line 1871, in dataset_module_factory\r\n raise ConnectionError(f\"Couldn't reach the Hugging Face Hub for dataset '{path}': {e1}\") from None\r\nConnectionError: Couldn't reach the Hugging Face Hub for dataset 'C-MTEB/AFQMC': Offline mode is enabled.\r\n```\r\n\r\n", "I was having similar inexplicable issues.\r\n\r\nDoing this I *think* helped, but, `datasets` still *clearly* does not want to respect the cache:\r\n\r\n```python\r\npip install --upgrade datasets # now it is 2.18.0\r\nHF_DATASETS_OFFLINE=\"1\" python blah.py\r\n```\r\n\r\nOr similarly, I must spacify that env var to resuse the cache, IE, no arg to `load_dataset` helps it reuse the cache:\r\n\r\n```python\r\n\r\nimport os\r\nos.environ[\"HF_DATASETS_OFFLINE\"] = \"1\"\r\n\r\nimport logging\r\nlogging.basicConfig(level=logging.DEBUG)\r\n\r\nimport datasets\r\n# >>> datasets.__version__\r\n# '2.18.0'\r\n\r\ndatasets.utils.logging.set_verbosity_info()\r\ndata = datasets.load_dataset(\"c-s-ale/dolly-15k-instruction-alpaca-format\")\r\n```" ]
2024-03-23T01:06:32Z
2024-04-15T15:38:52Z
2024-04-15T15:38:52Z
NONE
null
null
### Describe the bug Hi all - I see that in the past a network dependency has been mistakenly introduced into `load_dataset` even for local loads. Is it possible this has happened again? ### Steps to reproduce the bug ``` >>> import datasets >>> datasets.load_dataset("hh-rlhf") Repo card metadata block was not found. Setting CardData to empty. *hangs bc i'm firewalled* ```` stack trace from ctrl-c: ``` ^CTraceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/jobuser/.local/lib/python3.10/site-packages/datasets/load.py", line 2582, in load_dataset builder_instance.download_and_prepare( output_path = get_from_cache( [0/122] File "/home/jobuser/.local/lib/python3.10/site-packages/datasets/utils/file_utils.py", line 532, in get_from_cache response = http_head( File "/home/jobuser/.local/lib/python3.10/site-packages/datasets/utils/file_utils.py", line 419, in http_head response = _request_with_retry( File "/home/jobuser/.local/lib/python3.10/site-packages/datasets/utils/file_utils.py", line 304, in _request_with_retry response = requests.request(method=method.upper(), url=url, timeout=timeout, **params) File "/home/jobuser/build/lipy-flytekit-image/environments/satellites/python/lib/python3.10/site-packages/requests/api.py", line 59, in request return session.request(method=method, url=url, **kwargs) File "/home/jobuser/build/lipy-flytekit-image/environments/satellites/python/lib/python3.10/site-packages/requests/sessions.py", line 587, in request resp = self.send(prep, **send_kwargs) File "/home/jobuser/build/lipy-flytekit-image/environments/satellites/python/lib/python3.10/site-packages/requests/sessions.py", line 701, in send r = adapter.send(request, **kwargs) File "/home/jobuser/build/lipy-flytekit-image/environments/satellites/python/lib/python3.10/site-packages/requests/adapters.py", line 487, in send resp = conn.urlopen( File "/home/jobuser/build/lipy-flytekit-image/environments/satellites/python/lib/python3.10/site-packages/urllib3/connectionpool.py", line 703, in urlopen httplib_response = self._make_request( File "/home/jobuser/build/lipy-flytekit-image/environments/satellites/python/lib/python3.10/site-packages/urllib3/connectionpool.py", line 386, in _make_request self._validate_conn(conn) File "/home/jobuser/build/lipy-flytekit-image/environments/satellites/python/lib/python3.10/site-packages/urllib3/connectionpool.py", line 1042, in _validate_conn conn.connect() File "/home/jobuser/build/lipy-flytekit-image/environments/satellites/python/lib/python3.10/site-packages/urllib3/connection.py", line 363, in connect self.sock = conn = self._new_conn() File "/home/jobuser/build/lipy-flytekit-image/environments/satellites/python/lib/python3.10/site-packages/urllib3/connection.py", line 174, in _new_conn conn = connection.create_connection( File "/home/jobuser/build/lipy-flytekit-image/environments/satellites/python/lib/python3.10/site-packages/urllib3/util/connection.py", line 85, in create_connection sock.connect(sa) KeyboardInterrupt ``` ### Expected behavior loads the dataset ### Environment info ``` > pip show datasets Name: datasets Version: 2.18.0 ``` Python 3.10.2
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/6750/reactions" }
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/pull/6749
6,749
Fix fsspec tqdm callback
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6749). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005017 / 0.011353 (-0.006336) | 0.002958 / 0.011008 (-0.008050) | 0.063455 / 0.038508 (0.024946) | 0.028206 / 0.023109 (0.005096) | 0.230884 / 0.275898 (-0.045014) | 0.252688 / 0.323480 (-0.070792) | 0.002995 / 0.007986 (-0.004991) | 0.002613 / 0.004328 (-0.001716) | 0.046477 / 0.004250 (0.042226) | 0.040662 / 0.037052 (0.003609) | 0.241824 / 0.258489 (-0.016665) | 0.269063 / 0.293841 (-0.024778) | 0.027336 / 0.128546 (-0.101210) | 0.010614 / 0.075646 (-0.065032) | 0.216087 / 0.419271 (-0.203184) | 0.035667 / 0.043533 (-0.007866) | 0.238657 / 0.255139 (-0.016482) | 0.253433 / 0.283200 (-0.029767) | 0.017433 / 0.141683 (-0.124250) | 1.120856 / 1.452155 (-0.331299) | 1.157415 / 1.492716 (-0.335302) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.088028 / 0.018006 (0.070022) | 0.277368 / 0.000490 (0.276878) | 0.000204 / 0.000200 (0.000004) | 0.000049 / 0.000054 (-0.000005) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.017956 / 0.037411 (-0.019455) | 0.061061 / 0.014526 (0.046535) | 0.073323 / 0.176557 (-0.103234) | 0.119254 / 0.737135 (-0.617881) | 0.074308 / 0.296338 (-0.222031) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.285118 / 0.215209 (0.069908) | 2.785796 / 2.077655 (0.708142) | 1.476436 / 1.504120 (-0.027684) | 1.356505 / 1.541195 (-0.184690) | 1.362505 / 1.468490 (-0.105985) | 0.554064 / 4.584777 (-4.030713) | 2.395774 / 3.745712 (-1.349938) | 2.713703 / 5.269862 (-2.556159) | 1.701020 / 4.565676 (-2.864657) | 0.062370 / 0.424275 (-0.361905) | 0.004944 / 0.007607 (-0.002663) | 0.327948 / 0.226044 (0.101904) | 3.243739 / 2.268929 (0.974811) | 1.803881 / 55.444624 (-53.640743) | 1.551635 / 6.876477 (-5.324841) | 1.560627 / 2.142072 (-0.581446) | 0.628187 / 4.805227 (-4.177040) | 0.115824 / 6.500664 (-6.384840) | 0.041655 / 0.075469 (-0.033814) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.968797 / 1.841788 (-0.872991) | 11.220905 / 8.074308 (3.146597) | 9.322584 / 10.191392 (-0.868808) | 0.139629 / 0.680424 (-0.540795) | 0.013823 / 0.534201 (-0.520378) | 0.286700 / 0.579283 (-0.292583) | 0.263517 / 0.434364 (-0.170847) | 0.341264 / 0.540337 (-0.199074) | 0.418834 / 1.386936 (-0.968102) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005404 / 0.011353 (-0.005949) | 0.003630 / 0.011008 (-0.007378) | 0.048977 / 0.038508 (0.010469) | 0.029980 / 0.023109 (0.006871) | 0.274671 / 0.275898 (-0.001227) | 0.295671 / 0.323480 (-0.027808) | 0.004230 / 0.007986 (-0.003756) | 0.002656 / 0.004328 (-0.001672) | 0.048603 / 0.004250 (0.044353) | 0.044323 / 0.037052 (0.007271) | 0.286499 / 0.258489 (0.028010) | 0.313199 / 0.293841 (0.019358) | 0.030079 / 0.128546 (-0.098468) | 0.010480 / 0.075646 (-0.065166) | 0.058226 / 0.419271 (-0.361045) | 0.054920 / 0.043533 (0.011387) | 0.274921 / 0.255139 (0.019783) | 0.296559 / 0.283200 (0.013360) | 0.019164 / 0.141683 (-0.122519) | 1.154703 / 1.452155 (-0.297452) | 1.207015 / 1.492716 (-0.285701) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.089368 / 0.018006 (0.071362) | 0.301196 / 0.000490 (0.300706) | 0.000208 / 0.000200 (0.000008) | 0.000047 / 0.000054 (-0.000008) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021355 / 0.037411 (-0.016056) | 0.074688 / 0.014526 (0.060162) | 0.085840 / 0.176557 (-0.090716) | 0.125784 / 0.737135 (-0.611351) | 0.087103 / 0.296338 (-0.209235) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.296727 / 0.215209 (0.081518) | 2.884922 / 2.077655 (0.807267) | 1.586515 / 1.504120 (0.082395) | 1.474417 / 1.541195 (-0.066777) | 1.492105 / 1.468490 (0.023615) | 0.570016 / 4.584777 (-4.014761) | 2.435760 / 3.745712 (-1.309952) | 2.657999 / 5.269862 (-2.611863) | 1.740160 / 4.565676 (-2.825516) | 0.063743 / 0.424275 (-0.360532) | 0.005048 / 0.007607 (-0.002559) | 0.341279 / 0.226044 (0.115235) | 3.396185 / 2.268929 (1.127256) | 1.952825 / 55.444624 (-53.491800) | 1.676669 / 6.876477 (-5.199808) | 1.773158 / 2.142072 (-0.368915) | 0.650664 / 4.805227 (-4.154563) | 0.116815 / 6.500664 (-6.383849) | 0.040813 / 0.075469 (-0.034656) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.999836 / 1.841788 (-0.841952) | 11.854540 / 8.074308 (3.780232) | 10.245516 / 10.191392 (0.054124) | 0.141235 / 0.680424 (-0.539189) | 0.015562 / 0.534201 (-0.518639) | 0.287556 / 0.579283 (-0.291727) | 0.274946 / 0.434364 (-0.159418) | 0.324652 / 0.540337 (-0.215685) | 0.449204 / 1.386936 (-0.937733) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#ed2b406d045349dad16738985c947fe743260710 \"CML watermark\")\n" ]
2024-03-22T11:44:11Z
2024-03-22T14:51:45Z
2024-03-22T14:45:39Z
MEMBER
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6749.diff", "html_url": "https://github.com/huggingface/datasets/pull/6749", "merged_at": "2024-03-22T14:45:39Z", "patch_url": "https://github.com/huggingface/datasets/pull/6749.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6749" }
Following changes at https://github.com/fsspec/filesystem_spec/pull/1497 for `fsspec>=2024.2.0`
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6749/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/issues/6748
6,748
Strange slicing behavior
{ "avatar_url": "https://avatars.githubusercontent.com/u/20135317?v=4", "events_url": "https://api.github.com/users/Luciennnnnnn/events{/privacy}", "followers_url": "https://api.github.com/users/Luciennnnnnn/followers", "following_url": "https://api.github.com/users/Luciennnnnnn/following{/other_user}", "gists_url": "https://api.github.com/users/Luciennnnnnn/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Luciennnnnnn", "id": 20135317, "login": "Luciennnnnnn", "node_id": "MDQ6VXNlcjIwMTM1MzE3", "organizations_url": "https://api.github.com/users/Luciennnnnnn/orgs", "received_events_url": "https://api.github.com/users/Luciennnnnnn/received_events", "repos_url": "https://api.github.com/users/Luciennnnnnn/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Luciennnnnnn/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Luciennnnnnn/subscriptions", "type": "User", "url": "https://api.github.com/users/Luciennnnnnn", "user_view_type": "public" }
[]
open
false
[ "As explained in the [docs](https://huggingface.co/docs/datasets/v2.18.0/en/access#slicing), slicing a `Dataset` returns a dictionary that maps its column names to their values. So, `len(dataset[:300])=2` is expected, assuming your dataset has 2 columns (the returned dict has 2 keys, but each value in the dict has 300 items).\r\n` " ]
2024-03-22T01:49:13Z
2024-03-22T16:43:57Z
null
NONE
null
null
### Describe the bug I have loaded a dataset, and then slice first 300 samples using `:` ops, however, the resulting dataset is not expected, as the output below: ```bash len(dataset)=1050324 len(dataset[:300])=2 len(dataset[0:300])=2 len(dataset.select(range(300)))=300 ``` ### Steps to reproduce the bug load a dataset then: ```bash dataset = load_from_disk(args.train_data_dir) print(f"{len(dataset)=}", flush=True) print(f"{len(dataset[:300])=}", flush=True) print(f"{len(dataset[0:300])=}", flush=True) print(f"{len(dataset.select(range(300)))=}", flush=True) ``` ### Expected behavior ```bash len(dataset)=1050324 len(dataset[:300])=300 len(dataset[0:300])=300 len(dataset.select(range(300)))=300 ``` ### Environment info - `datasets` version: 2.16.1 - Platform: Linux-5.15.0-60-generic-x86_64-with-glibc2.35 - Python version: 3.10.11 - `huggingface_hub` version: 0.20.2 - PyArrow version: 10.0.1 - Pandas version: 1.5.3 - `fsspec` version: 2023.10.0
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6748/reactions" }
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/pull/6747
6,747
chore(deps): bump fsspec
{ "avatar_url": "https://avatars.githubusercontent.com/u/3659196?v=4", "events_url": "https://api.github.com/users/shcheklein/events{/privacy}", "followers_url": "https://api.github.com/users/shcheklein/followers", "following_url": "https://api.github.com/users/shcheklein/following{/other_user}", "gists_url": "https://api.github.com/users/shcheklein/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/shcheklein", "id": 3659196, "login": "shcheklein", "node_id": "MDQ6VXNlcjM2NTkxOTY=", "organizations_url": "https://api.github.com/users/shcheklein/orgs", "received_events_url": "https://api.github.com/users/shcheklein/received_events", "repos_url": "https://api.github.com/users/shcheklein/repos", "site_admin": false, "starred_url": "https://api.github.com/users/shcheklein/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/shcheklein/subscriptions", "type": "User", "url": "https://api.github.com/users/shcheklein", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6747). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005129 / 0.011353 (-0.006224) | 0.003788 / 0.011008 (-0.007220) | 0.063456 / 0.038508 (0.024948) | 0.029079 / 0.023109 (0.005969) | 0.237228 / 0.275898 (-0.038670) | 0.260554 / 0.323480 (-0.062926) | 0.003090 / 0.007986 (-0.004895) | 0.002730 / 0.004328 (-0.001599) | 0.049040 / 0.004250 (0.044789) | 0.042432 / 0.037052 (0.005380) | 0.256954 / 0.258489 (-0.001535) | 0.285912 / 0.293841 (-0.007929) | 0.027568 / 0.128546 (-0.100978) | 0.010402 / 0.075646 (-0.065245) | 0.206773 / 0.419271 (-0.212499) | 0.035381 / 0.043533 (-0.008152) | 0.243147 / 0.255139 (-0.011992) | 0.259419 / 0.283200 (-0.023781) | 0.019503 / 0.141683 (-0.122180) | 1.145537 / 1.452155 (-0.306618) | 1.204070 / 1.492716 (-0.288646) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.092298 / 0.018006 (0.074291) | 0.300042 / 0.000490 (0.299553) | 0.000236 / 0.000200 (0.000036) | 0.000052 / 0.000054 (-0.000002) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018624 / 0.037411 (-0.018788) | 0.063832 / 0.014526 (0.049306) | 0.075849 / 0.176557 (-0.100707) | 0.120919 / 0.737135 (-0.616216) | 0.075878 / 0.296338 (-0.220461) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.275545 / 0.215209 (0.060336) | 2.706004 / 2.077655 (0.628349) | 1.406398 / 1.504120 (-0.097722) | 1.287154 / 1.541195 (-0.254041) | 1.298278 / 1.468490 (-0.170212) | 0.559763 / 4.584777 (-4.025014) | 2.434104 / 3.745712 (-1.311608) | 2.786338 / 5.269862 (-2.483523) | 1.720951 / 4.565676 (-2.844726) | 0.062082 / 0.424275 (-0.362193) | 0.004931 / 0.007607 (-0.002676) | 0.329998 / 0.226044 (0.103954) | 3.222105 / 2.268929 (0.953176) | 1.777539 / 55.444624 (-53.667085) | 1.533845 / 6.876477 (-5.342632) | 1.520357 / 2.142072 (-0.621715) | 0.638850 / 4.805227 (-4.166377) | 0.116718 / 6.500664 (-6.383946) | 0.042215 / 0.075469 (-0.033254) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.962791 / 1.841788 (-0.878997) | 11.509889 / 8.074308 (3.435581) | 9.507676 / 10.191392 (-0.683716) | 0.140780 / 0.680424 (-0.539644) | 0.014187 / 0.534201 (-0.520014) | 0.286363 / 0.579283 (-0.292920) | 0.263316 / 0.434364 (-0.171048) | 0.322099 / 0.540337 (-0.218239) | 0.415602 / 1.386936 (-0.971334) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005175 / 0.011353 (-0.006178) | 0.003631 / 0.011008 (-0.007377) | 0.050277 / 0.038508 (0.011769) | 0.031879 / 0.023109 (0.008770) | 0.269966 / 0.275898 (-0.005933) | 0.297229 / 0.323480 (-0.026251) | 0.004278 / 0.007986 (-0.003707) | 0.002936 / 0.004328 (-0.001393) | 0.048686 / 0.004250 (0.044436) | 0.044262 / 0.037052 (0.007209) | 0.284578 / 0.258489 (0.026089) | 0.313681 / 0.293841 (0.019840) | 0.029064 / 0.128546 (-0.099482) | 0.010700 / 0.075646 (-0.064946) | 0.058366 / 0.419271 (-0.360905) | 0.051341 / 0.043533 (0.007809) | 0.271262 / 0.255139 (0.016123) | 0.290791 / 0.283200 (0.007591) | 0.019044 / 0.141683 (-0.122639) | 1.149514 / 1.452155 (-0.302641) | 1.209277 / 1.492716 (-0.283439) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.094879 / 0.018006 (0.076872) | 0.302196 / 0.000490 (0.301707) | 0.000217 / 0.000200 (0.000018) | 0.000052 / 0.000054 (-0.000002) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021715 / 0.037411 (-0.015696) | 0.075122 / 0.014526 (0.060596) | 0.087393 / 0.176557 (-0.089164) | 0.125583 / 0.737135 (-0.611553) | 0.088722 / 0.296338 (-0.207617) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.295158 / 0.215209 (0.079949) | 2.930208 / 2.077655 (0.852553) | 1.590197 / 1.504120 (0.086077) | 1.459038 / 1.541195 (-0.082156) | 1.471690 / 1.468490 (0.003200) | 0.570279 / 4.584777 (-4.014498) | 2.456971 / 3.745712 (-1.288741) | 2.675315 / 5.269862 (-2.594547) | 1.750122 / 4.565676 (-2.815554) | 0.062905 / 0.424275 (-0.361370) | 0.005118 / 0.007607 (-0.002489) | 0.344263 / 0.226044 (0.118219) | 3.472460 / 2.268929 (1.203532) | 1.931707 / 55.444624 (-53.512917) | 1.658537 / 6.876477 (-5.217939) | 1.785794 / 2.142072 (-0.356278) | 0.637149 / 4.805227 (-4.168078) | 0.115838 / 6.500664 (-6.384826) | 0.040771 / 0.075469 (-0.034698) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.002869 / 1.841788 (-0.838919) | 12.048825 / 8.074308 (3.974517) | 10.407979 / 10.191392 (0.216587) | 0.150300 / 0.680424 (-0.530124) | 0.015299 / 0.534201 (-0.518902) | 0.286277 / 0.579283 (-0.293006) | 0.312186 / 0.434364 (-0.122178) | 0.322633 / 0.540337 (-0.217704) | 0.438431 / 1.386936 (-0.948505) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#d5468836fe94e8be1ae093397dd43d4a2503b926 \"CML watermark\")\n" ]
2024-03-21T21:25:49Z
2024-03-22T16:40:15Z
2024-03-22T16:28:40Z
CONTRIBUTOR
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6747.diff", "html_url": "https://github.com/huggingface/datasets/pull/6747", "merged_at": "2024-03-22T16:28:40Z", "patch_url": "https://github.com/huggingface/datasets/pull/6747.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6747" }
There were a few fixes released recently, some DVC ecosystem packages require newer version of `fsspec`.
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6747/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/issues/6746
6,746
ExpectedMoreSplits error when loading C4 dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/65165345?v=4", "events_url": "https://api.github.com/users/billwang485/events{/privacy}", "followers_url": "https://api.github.com/users/billwang485/followers", "following_url": "https://api.github.com/users/billwang485/following{/other_user}", "gists_url": "https://api.github.com/users/billwang485/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/billwang485", "id": 65165345, "login": "billwang485", "node_id": "MDQ6VXNlcjY1MTY1MzQ1", "organizations_url": "https://api.github.com/users/billwang485/orgs", "received_events_url": "https://api.github.com/users/billwang485/received_events", "repos_url": "https://api.github.com/users/billwang485/repos", "site_admin": false, "starred_url": "https://api.github.com/users/billwang485/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/billwang485/subscriptions", "type": "User", "url": "https://api.github.com/users/billwang485", "user_view_type": "public" }
[]
closed
false
[ "Hi ! We updated the `allenai/c4` repository to allow people to specify which language to load easily (the the [c4 dataset page](https://huggingface.co/datasets/allenai/c4))\r\n\r\nTo fix this issue **you can update** `datasets` and remove the mention of the legacy configuration name \"allenai--c4\":\r\n\r\n```python\r\ntraindata = load_dataset('allenai/c4', data_files={'train': 'en/c4-train.00000-of-01024.json.gz'}, split='train')\r\nvaldata = load_dataset('allenai/c4', data_files={'validation': 'en/c4-validation.00000-of-00008.json.gz'}, split='validation')\r\n```", "Did you solve this problem?I have the same bug.It is no use to delete \"allenai--c4\".", "Did you solve it? I met this problem too.", "But after I romove allenai--c4,it still fails", "For me it works this way. I'm using datasets version 2.17.0", "First, pip install --upgrade datasets.\r\nSecond, Update the following two lines of code in data.py (in lib)\r\ntraindata = load_dataset('allenai/c4', data_files={'train': 'en/c4-train.00000-of-01024.json.gz'}, split='train')\r\nvaldata = load_dataset('allenai/c4', data_files={'validation': 'en/c4-validation.00000-of-00008.json.gz'}, split='validation')", "The error is in the Wanda repository: https://github.com/locuslab/wanda\r\n- https://github.com/locuslab/wanda/issues/57\r\n\r\nConcretely, in these code lines:\r\nhttps://github.com/locuslab/wanda/blob/8e8fc87b4a2f9955baa7e76e64d5fce7fa8724a6/lib/data.py#L43-L44\r\n\r\nPlease report there and/or make the fix in their code.", "> traindata = load_dataset('allenai/c4', data_files={'train': 'en/c4-train.00000-of-01024.json.gz'}, split='train')\r\n> valdata = load_dataset('allenai/c4', data_files={'validation': 'en/c4-validation.00000-of-00008.json.gz'}, split='validation')\r\n\r\nSolved for me ! Thanks!" ]
2024-03-21T02:53:04Z
2024-09-18T19:57:14Z
2024-07-29T07:21:08Z
NONE
null
null
### Describe the bug I encounter bug when running the example command line ```python python main.py \ --model decapoda-research/llama-7b-hf \ --prune_method wanda \ --sparsity_ratio 0.5 \ --sparsity_type unstructured \ --save out/llama_7b/unstructured/wanda/ ``` The bug occurred at these lines of code (when loading c4 dataset) ```python traindata = load_dataset('allenai/c4', 'allenai--c4', data_files={'train': 'en/c4-train.00000-of-01024.json.gz'}, split='train') valdata = load_dataset('allenai/c4', 'allenai--c4', data_files={'validation': 'en/c4-validation.00000-of-00008.json.gz'}, split='validation') ``` The error message states: ``` raise ExpectedMoreSplits(str(set(expected_splits) - set(recorded_splits))) datasets.utils.info_utils.ExpectedMoreSplits: {'validation'} ``` ### Steps to reproduce the bug 1. I encounter bug when running the example command line ### Expected behavior The error message states: ``` raise ExpectedMoreSplits(str(set(expected_splits) - set(recorded_splits))) datasets.utils.info_utils.ExpectedMoreSplits: {'validation'} ``` ### Environment info I'm using cuda 12.4, so I use ```pip install pytorch``` instead of conda provided in install.md Also, I've tried another environment using the same commands in install.md, but the same bug occured
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6746/reactions" }
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/issues/6745
6,745
Scraping the whole of github including private repos is bad; kindly stop
{ "avatar_url": "https://avatars.githubusercontent.com/u/10137?v=4", "events_url": "https://api.github.com/users/ghost/events{/privacy}", "followers_url": "https://api.github.com/users/ghost/followers", "following_url": "https://api.github.com/users/ghost/following{/other_user}", "gists_url": "https://api.github.com/users/ghost/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ghost", "id": 10137, "login": "ghost", "node_id": "MDQ6VXNlcjEwMTM3", "organizations_url": "https://api.github.com/users/ghost/orgs", "received_events_url": "https://api.github.com/users/ghost/received_events", "repos_url": "https://api.github.com/users/ghost/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ghost/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ghost/subscriptions", "type": "User", "url": "https://api.github.com/users/ghost", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
closed
false
[ "It's not twitter here" ]
2024-03-20T20:54:06Z
2024-03-21T12:28:04Z
2024-03-21T10:24:56Z
NONE
null
null
### Feature request https://github.com/bigcode-project/opt-out-v2 - opt out is not consent. kindly quit this ridiculous nonsense. ### Motivation [EDITED: insults not tolerated] ### Your contribution [EDITED: insults not tolerated]
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6745/reactions" }
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/issues/6744
6,744
Option to disable file locking
{ "avatar_url": "https://avatars.githubusercontent.com/u/35767167?v=4", "events_url": "https://api.github.com/users/VRehnberg/events{/privacy}", "followers_url": "https://api.github.com/users/VRehnberg/followers", "following_url": "https://api.github.com/users/VRehnberg/following{/other_user}", "gists_url": "https://api.github.com/users/VRehnberg/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/VRehnberg", "id": 35767167, "login": "VRehnberg", "node_id": "MDQ6VXNlcjM1NzY3MTY3", "organizations_url": "https://api.github.com/users/VRehnberg/orgs", "received_events_url": "https://api.github.com/users/VRehnberg/received_events", "repos_url": "https://api.github.com/users/VRehnberg/repos", "site_admin": false, "starred_url": "https://api.github.com/users/VRehnberg/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/VRehnberg/subscriptions", "type": "User", "url": "https://api.github.com/users/VRehnberg", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
[]
2024-03-20T15:59:45Z
2024-03-20T15:59:45Z
null
NONE
null
null
### Feature request Commands such as `load_dataset` creates file locks with `filelock.FileLock`. It would be good if there was a way to disable this. ### Motivation File locking doesn't work on all file-systems (in my case NFS mounted Weka). If the `cache_dir` only had small files then it would be possible to point to local disk and the problem would be solved. However, as cache_dir is both where the small info files are written and the processed datasets are put this isn't a feasible solution. Considering https://github.com/huggingface/datasets/issues/6395 I still do think this is something that belongs in HuggingFace. The possibility to control packages separately is valuable. It might be that a user has their dataset on a file-system that doesn't support file-locking while they are using file locking on local disk to control some other type of access. ### Your contribution My suggested solution: ``` diff --git a/src/datasets/utils/_filelock.py b/src/datasets/utils/_filelock.py index 19620e6e..58f41a02 100644 --- a/src/datasets/utils/_filelock.py +++ b/src/datasets/utils/_filelock.py @@ -18,11 +18,15 @@ import os from filelock import FileLock as FileLock_ -from filelock import UnixFileLock +from filelock import SoftFileLock, UnixFileLock from filelock import __version__ as _filelock_version from packaging import version +if os.getenv('HF_USE_SOFTFILELOCK', 'false').lower() in ('true', '1'): + FileLock_ = SoftFileLock + + class FileLock(FileLock_): """ A `filelock.FileLock` initializer that handles long paths. ```
null
{ "+1": 13, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 13, "url": "https://api.github.com/repos/huggingface/datasets/issues/6744/reactions" }
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/pull/6743
6,743
Allow null values in dict columns
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6743). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005013 / 0.011353 (-0.006340) | 0.003228 / 0.011008 (-0.007780) | 0.062763 / 0.038508 (0.024255) | 0.028937 / 0.023109 (0.005828) | 0.240777 / 0.275898 (-0.035121) | 0.266972 / 0.323480 (-0.056508) | 0.003073 / 0.007986 (-0.004913) | 0.002769 / 0.004328 (-0.001560) | 0.049265 / 0.004250 (0.045015) | 0.042061 / 0.037052 (0.005009) | 0.261714 / 0.258489 (0.003225) | 0.284896 / 0.293841 (-0.008944) | 0.027717 / 0.128546 (-0.100829) | 0.010430 / 0.075646 (-0.065216) | 0.209022 / 0.419271 (-0.210249) | 0.035941 / 0.043533 (-0.007591) | 0.246849 / 0.255139 (-0.008290) | 0.263205 / 0.283200 (-0.019994) | 0.019489 / 0.141683 (-0.122193) | 1.102595 / 1.452155 (-0.349559) | 1.170493 / 1.492716 (-0.322223) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.093611 / 0.018006 (0.075604) | 0.302041 / 0.000490 (0.301551) | 0.000223 / 0.000200 (0.000023) | 0.000052 / 0.000054 (-0.000003) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018720 / 0.037411 (-0.018692) | 0.062199 / 0.014526 (0.047673) | 0.074888 / 0.176557 (-0.101669) | 0.120184 / 0.737135 (-0.616951) | 0.076756 / 0.296338 (-0.219583) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.287484 / 0.215209 (0.072275) | 2.787777 / 2.077655 (0.710123) | 1.488957 / 1.504120 (-0.015163) | 1.362678 / 1.541195 (-0.178517) | 1.364571 / 1.468490 (-0.103919) | 0.563139 / 4.584777 (-4.021638) | 2.422224 / 3.745712 (-1.323488) | 2.798011 / 5.269862 (-2.471850) | 1.751159 / 4.565676 (-2.814517) | 0.062740 / 0.424275 (-0.361536) | 0.004918 / 0.007607 (-0.002689) | 0.338285 / 0.226044 (0.112240) | 3.316012 / 2.268929 (1.047083) | 1.845975 / 55.444624 (-53.598650) | 1.553187 / 6.876477 (-5.323290) | 1.564582 / 2.142072 (-0.577490) | 0.645987 / 4.805227 (-4.159240) | 0.118216 / 6.500664 (-6.382448) | 0.041243 / 0.075469 (-0.034226) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.970265 / 1.841788 (-0.871522) | 11.783152 / 8.074308 (3.708844) | 9.516584 / 10.191392 (-0.674808) | 0.148086 / 0.680424 (-0.532338) | 0.013689 / 0.534201 (-0.520512) | 0.289657 / 0.579283 (-0.289626) | 0.265966 / 0.434364 (-0.168398) | 0.328483 / 0.540337 (-0.211854) | 0.433544 / 1.386936 (-0.953392) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005235 / 0.011353 (-0.006118) | 0.003515 / 0.011008 (-0.007493) | 0.049484 / 0.038508 (0.010976) | 0.029264 / 0.023109 (0.006154) | 0.278518 / 0.275898 (0.002620) | 0.298948 / 0.323480 (-0.024532) | 0.004308 / 0.007986 (-0.003678) | 0.002751 / 0.004328 (-0.001577) | 0.048952 / 0.004250 (0.044701) | 0.045379 / 0.037052 (0.008327) | 0.292633 / 0.258489 (0.034144) | 0.319405 / 0.293841 (0.025564) | 0.030201 / 0.128546 (-0.098345) | 0.010657 / 0.075646 (-0.064990) | 0.057842 / 0.419271 (-0.361430) | 0.053359 / 0.043533 (0.009826) | 0.281136 / 0.255139 (0.025997) | 0.295388 / 0.283200 (0.012188) | 0.018786 / 0.141683 (-0.122897) | 1.187181 / 1.452155 (-0.264974) | 1.198394 / 1.492716 (-0.294323) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.093861 / 0.018006 (0.075855) | 0.304019 / 0.000490 (0.303529) | 0.000220 / 0.000200 (0.000020) | 0.000053 / 0.000054 (-0.000002) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021582 / 0.037411 (-0.015829) | 0.075381 / 0.014526 (0.060855) | 0.087886 / 0.176557 (-0.088671) | 0.125078 / 0.737135 (-0.612057) | 0.089339 / 0.296338 (-0.206999) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.295797 / 0.215209 (0.080588) | 2.912021 / 2.077655 (0.834367) | 1.592191 / 1.504120 (0.088071) | 1.471270 / 1.541195 (-0.069925) | 1.475535 / 1.468490 (0.007045) | 0.564114 / 4.584777 (-4.020663) | 2.442882 / 3.745712 (-1.302830) | 2.679433 / 5.269862 (-2.590428) | 1.752097 / 4.565676 (-2.813579) | 0.062748 / 0.424275 (-0.361527) | 0.005068 / 0.007607 (-0.002539) | 0.345554 / 0.226044 (0.119509) | 3.456929 / 2.268929 (1.188000) | 1.962781 / 55.444624 (-53.481844) | 1.688313 / 6.876477 (-5.188164) | 1.817392 / 2.142072 (-0.324681) | 0.639588 / 4.805227 (-4.165639) | 0.116148 / 6.500664 (-6.384516) | 0.040851 / 0.075469 (-0.034618) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.009852 / 1.841788 (-0.831936) | 12.031749 / 8.074308 (3.957440) | 10.305107 / 10.191392 (0.113715) | 0.132960 / 0.680424 (-0.547464) | 0.014779 / 0.534201 (-0.519422) | 0.288903 / 0.579283 (-0.290381) | 0.275417 / 0.434364 (-0.158947) | 0.322628 / 0.540337 (-0.217709) | 0.445060 / 1.386936 (-0.941876) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#f234fce40d5ffc96fac5198d8cc89817970d87ee \"CML watermark\")\n", "notify https://huggingface.co/datasets/chaoyi-wu/PMC-Inline/discussions/1 once it's merged in dataset-viewer" ]
2024-03-19T16:54:22Z
2024-04-08T13:08:42Z
2024-03-19T20:05:19Z
COLLABORATOR
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6743.diff", "html_url": "https://github.com/huggingface/datasets/pull/6743", "merged_at": "2024-03-19T20:05:19Z", "patch_url": "https://github.com/huggingface/datasets/pull/6743.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6743" }
Fix #6738
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6743/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/pull/6742
6,742
Fix missing download_config in get_data_patterns
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6742). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005394 / 0.011353 (-0.005959) | 0.003780 / 0.011008 (-0.007228) | 0.063459 / 0.038508 (0.024951) | 0.028883 / 0.023109 (0.005774) | 0.239159 / 0.275898 (-0.036739) | 0.258123 / 0.323480 (-0.065357) | 0.003134 / 0.007986 (-0.004851) | 0.003452 / 0.004328 (-0.000876) | 0.049255 / 0.004250 (0.045005) | 0.042727 / 0.037052 (0.005675) | 0.257387 / 0.258489 (-0.001102) | 0.280762 / 0.293841 (-0.013079) | 0.027921 / 0.128546 (-0.100625) | 0.010867 / 0.075646 (-0.064779) | 0.207878 / 0.419271 (-0.211393) | 0.036003 / 0.043533 (-0.007530) | 0.247457 / 0.255139 (-0.007682) | 0.260231 / 0.283200 (-0.022969) | 0.019741 / 0.141683 (-0.121942) | 1.143645 / 1.452155 (-0.308510) | 1.188789 / 1.492716 (-0.303927) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.092065 / 0.018006 (0.074059) | 0.286021 / 0.000490 (0.285531) | 0.000220 / 0.000200 (0.000020) | 0.000048 / 0.000054 (-0.000006) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018934 / 0.037411 (-0.018477) | 0.062474 / 0.014526 (0.047949) | 0.073384 / 0.176557 (-0.103172) | 0.121276 / 0.737135 (-0.615860) | 0.077792 / 0.296338 (-0.218546) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.285352 / 0.215209 (0.070143) | 2.783110 / 2.077655 (0.705456) | 1.487983 / 1.504120 (-0.016137) | 1.364264 / 1.541195 (-0.176930) | 1.388757 / 1.468490 (-0.079733) | 0.568347 / 4.584777 (-4.016430) | 2.402451 / 3.745712 (-1.343261) | 2.835577 / 5.269862 (-2.434285) | 1.754853 / 4.565676 (-2.810824) | 0.063355 / 0.424275 (-0.360920) | 0.005010 / 0.007607 (-0.002598) | 0.332061 / 0.226044 (0.106016) | 3.287121 / 2.268929 (1.018193) | 1.829520 / 55.444624 (-53.615104) | 1.542669 / 6.876477 (-5.333808) | 1.560679 / 2.142072 (-0.581393) | 0.642371 / 4.805227 (-4.162856) | 0.118636 / 6.500664 (-6.382028) | 0.042262 / 0.075469 (-0.033207) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.984803 / 1.841788 (-0.856985) | 11.578044 / 8.074308 (3.503735) | 9.383428 / 10.191392 (-0.807964) | 0.141367 / 0.680424 (-0.539057) | 0.014047 / 0.534201 (-0.520154) | 0.291505 / 0.579283 (-0.287778) | 0.270199 / 0.434364 (-0.164165) | 0.329874 / 0.540337 (-0.210463) | 0.429386 / 1.386936 (-0.957550) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005322 / 0.011353 (-0.006031) | 0.004023 / 0.011008 (-0.006986) | 0.050126 / 0.038508 (0.011618) | 0.029937 / 0.023109 (0.006828) | 0.275985 / 0.275898 (0.000087) | 0.297965 / 0.323480 (-0.025515) | 0.004429 / 0.007986 (-0.003557) | 0.002729 / 0.004328 (-0.001599) | 0.048995 / 0.004250 (0.044744) | 0.044940 / 0.037052 (0.007888) | 0.288397 / 0.258489 (0.029908) | 0.317716 / 0.293841 (0.023875) | 0.029705 / 0.128546 (-0.098841) | 0.010972 / 0.075646 (-0.064674) | 0.058592 / 0.419271 (-0.360680) | 0.054640 / 0.043533 (0.011108) | 0.276456 / 0.255139 (0.021317) | 0.295119 / 0.283200 (0.011919) | 0.020032 / 0.141683 (-0.121651) | 1.175740 / 1.452155 (-0.276415) | 1.227246 / 1.492716 (-0.265471) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.092204 / 0.018006 (0.074197) | 0.300344 / 0.000490 (0.299855) | 0.000213 / 0.000200 (0.000013) | 0.000050 / 0.000054 (-0.000004) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021540 / 0.037411 (-0.015871) | 0.076252 / 0.014526 (0.061726) | 0.087582 / 0.176557 (-0.088975) | 0.125977 / 0.737135 (-0.611159) | 0.090649 / 0.296338 (-0.205689) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.294544 / 0.215209 (0.079335) | 2.883736 / 2.077655 (0.806082) | 1.570932 / 1.504120 (0.066812) | 1.449082 / 1.541195 (-0.092113) | 1.463262 / 1.468490 (-0.005228) | 0.559625 / 4.584777 (-4.025152) | 2.448593 / 3.745712 (-1.297119) | 2.663857 / 5.269862 (-2.606005) | 1.757812 / 4.565676 (-2.807865) | 0.061999 / 0.424275 (-0.362276) | 0.005100 / 0.007607 (-0.002507) | 0.343620 / 0.226044 (0.117575) | 3.487059 / 2.268929 (1.218130) | 1.963078 / 55.444624 (-53.481546) | 1.661758 / 6.876477 (-5.214719) | 1.799130 / 2.142072 (-0.342942) | 0.650194 / 4.805227 (-4.155034) | 0.117375 / 6.500664 (-6.383289) | 0.040957 / 0.075469 (-0.034512) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.037882 / 1.841788 (-0.803906) | 12.239784 / 8.074308 (4.165476) | 10.478186 / 10.191392 (0.286794) | 0.164446 / 0.680424 (-0.515978) | 0.014901 / 0.534201 (-0.519300) | 0.302485 / 0.579283 (-0.276798) | 0.283994 / 0.434364 (-0.150370) | 0.338473 / 0.540337 (-0.201864) | 0.468901 / 1.386936 (-0.918035) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#5fa934e275d240d9b1228b2f598bc96390299339 \"CML watermark\")\n" ]
2024-03-19T14:29:25Z
2024-03-19T18:24:39Z
2024-03-19T18:15:13Z
MEMBER
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6742.diff", "html_url": "https://github.com/huggingface/datasets/pull/6742", "merged_at": "2024-03-19T18:15:13Z", "patch_url": "https://github.com/huggingface/datasets/pull/6742.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6742" }
Reported in https://github.com/huggingface/datasets-server/issues/2607
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6742/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/pull/6741
6,741
Fix offline mode with single config
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6741). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005093 / 0.011353 (-0.006260) | 0.003317 / 0.011008 (-0.007692) | 0.064795 / 0.038508 (0.026287) | 0.030373 / 0.023109 (0.007263) | 0.258776 / 0.275898 (-0.017122) | 0.269768 / 0.323480 (-0.053711) | 0.004186 / 0.007986 (-0.003799) | 0.002630 / 0.004328 (-0.001699) | 0.048643 / 0.004250 (0.044392) | 0.044220 / 0.037052 (0.007168) | 0.265113 / 0.258489 (0.006624) | 0.292202 / 0.293841 (-0.001639) | 0.027468 / 0.128546 (-0.101079) | 0.010123 / 0.075646 (-0.065523) | 0.226869 / 0.419271 (-0.192402) | 0.035739 / 0.043533 (-0.007794) | 0.253193 / 0.255139 (-0.001946) | 0.271002 / 0.283200 (-0.012198) | 0.017201 / 0.141683 (-0.124482) | 1.105836 / 1.452155 (-0.346318) | 1.161559 / 1.492716 (-0.331158) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.090481 / 0.018006 (0.072475) | 0.299013 / 0.000490 (0.298524) | 0.000220 / 0.000200 (0.000020) | 0.000047 / 0.000054 (-0.000007) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.017684 / 0.037411 (-0.019727) | 0.061580 / 0.014526 (0.047054) | 0.074370 / 0.176557 (-0.102186) | 0.119468 / 0.737135 (-0.617667) | 0.074671 / 0.296338 (-0.221668) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.284778 / 0.215209 (0.069569) | 2.780241 / 2.077655 (0.702586) | 1.504025 / 1.504120 (-0.000095) | 1.386644 / 1.541195 (-0.154550) | 1.402038 / 1.468490 (-0.066452) | 0.555180 / 4.584777 (-4.029597) | 2.410973 / 3.745712 (-1.334740) | 2.773252 / 5.269862 (-2.496610) | 1.722784 / 4.565676 (-2.842892) | 0.062773 / 0.424275 (-0.361502) | 0.004959 / 0.007607 (-0.002648) | 0.337163 / 0.226044 (0.111119) | 3.356947 / 2.268929 (1.088019) | 1.880953 / 55.444624 (-53.563671) | 1.556049 / 6.876477 (-5.320427) | 1.578589 / 2.142072 (-0.563483) | 0.641993 / 4.805227 (-4.163234) | 0.118624 / 6.500664 (-6.382040) | 0.042202 / 0.075469 (-0.033268) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.995321 / 1.841788 (-0.846467) | 12.257597 / 8.074308 (4.183289) | 9.646214 / 10.191392 (-0.545178) | 0.131124 / 0.680424 (-0.549300) | 0.014119 / 0.534201 (-0.520082) | 0.287597 / 0.579283 (-0.291686) | 0.266983 / 0.434364 (-0.167381) | 0.328165 / 0.540337 (-0.212173) | 0.422405 / 1.386936 (-0.964531) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005091 / 0.011353 (-0.006262) | 0.003358 / 0.011008 (-0.007650) | 0.049136 / 0.038508 (0.010628) | 0.031075 / 0.023109 (0.007966) | 0.275047 / 0.275898 (-0.000851) | 0.296845 / 0.323480 (-0.026635) | 0.004949 / 0.007986 (-0.003037) | 0.002586 / 0.004328 (-0.001743) | 0.048164 / 0.004250 (0.043913) | 0.040754 / 0.037052 (0.003702) | 0.288715 / 0.258489 (0.030226) | 0.312383 / 0.293841 (0.018542) | 0.029372 / 0.128546 (-0.099174) | 0.010097 / 0.075646 (-0.065549) | 0.056752 / 0.419271 (-0.362520) | 0.033128 / 0.043533 (-0.010405) | 0.274986 / 0.255139 (0.019847) | 0.292692 / 0.283200 (0.009493) | 0.018309 / 0.141683 (-0.123374) | 1.190320 / 1.452155 (-0.261834) | 1.222529 / 1.492716 (-0.270188) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.091717 / 0.018006 (0.073711) | 0.300278 / 0.000490 (0.299788) | 0.000217 / 0.000200 (0.000017) | 0.000065 / 0.000054 (0.000011) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021394 / 0.037411 (-0.016018) | 0.074918 / 0.014526 (0.060392) | 0.087461 / 0.176557 (-0.089095) | 0.125499 / 0.737135 (-0.611636) | 0.087484 / 0.296338 (-0.208854) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.296557 / 0.215209 (0.081348) | 2.905527 / 2.077655 (0.827872) | 1.624640 / 1.504120 (0.120520) | 1.505495 / 1.541195 (-0.035700) | 1.514066 / 1.468490 (0.045576) | 0.569376 / 4.584777 (-4.015401) | 2.448575 / 3.745712 (-1.297137) | 2.772805 / 5.269862 (-2.497057) | 1.757287 / 4.565676 (-2.808390) | 0.064209 / 0.424275 (-0.360066) | 0.005688 / 0.007607 (-0.001919) | 0.353175 / 0.226044 (0.127131) | 3.481591 / 2.268929 (1.212662) | 1.995384 / 55.444624 (-53.449240) | 1.684623 / 6.876477 (-5.191854) | 1.675750 / 2.142072 (-0.466323) | 0.644463 / 4.805227 (-4.160764) | 0.115393 / 6.500664 (-6.385271) | 0.040671 / 0.075469 (-0.034799) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.037487 / 1.841788 (-0.804301) | 11.902194 / 8.074308 (3.827886) | 10.148579 / 10.191392 (-0.042813) | 0.150261 / 0.680424 (-0.530163) | 0.015001 / 0.534201 (-0.519200) | 0.291008 / 0.579283 (-0.288275) | 0.278758 / 0.434364 (-0.155606) | 0.334037 / 0.540337 (-0.206301) | 0.419942 / 1.386936 (-0.966994) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#dcd01046388fc052d37acc5a450bea69e3c57afc \"CML watermark\")\n" ]
2024-03-19T10:48:32Z
2024-03-25T16:35:21Z
2024-03-25T16:23:59Z
MEMBER
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6741.diff", "html_url": "https://github.com/huggingface/datasets/pull/6741", "merged_at": "2024-03-25T16:23:59Z", "patch_url": "https://github.com/huggingface/datasets/pull/6741.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6741" }
Reported in https://github.com/huggingface/datasets/issues/4760 The cache was not able to reload a dataset with a single config form the cache if the config name is not specificed For example ```python from datasets import load_dataset, config config.HF_DATASETS_OFFLINE = True load_dataset("openai_humaneval") ``` This was due to a regression in https://github.com/huggingface/datasets/pull/6632
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6741/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/issues/6740
6,740
Support for loading geotiff files as a part of the ImageFolder
{ "avatar_url": "https://avatars.githubusercontent.com/u/31362090?v=4", "events_url": "https://api.github.com/users/sunny1401/events{/privacy}", "followers_url": "https://api.github.com/users/sunny1401/followers", "following_url": "https://api.github.com/users/sunny1401/following{/other_user}", "gists_url": "https://api.github.com/users/sunny1401/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/sunny1401", "id": 31362090, "login": "sunny1401", "node_id": "MDQ6VXNlcjMxMzYyMDkw", "organizations_url": "https://api.github.com/users/sunny1401/orgs", "received_events_url": "https://api.github.com/users/sunny1401/received_events", "repos_url": "https://api.github.com/users/sunny1401/repos", "site_admin": false, "starred_url": "https://api.github.com/users/sunny1401/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sunny1401/subscriptions", "type": "User", "url": "https://api.github.com/users/sunny1401", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
closed
false
[]
2024-03-18T20:00:39Z
2024-03-27T18:19:48Z
2024-03-27T18:19:20Z
NONE
null
null
### Feature request Request for adding rasterio support to load geotiff as a part of ImageFolder, instead of using PIL ### Motivation As of now, there are many datasets in HuggingFace Hub which are predominantly focussed towards RemoteSensing or are from RemoteSensing. The current ImageFolder (if I have understood correctly) uses PIL. This is not really optimized because mostly these datasets have images with many channels and additional metadata. Using PIL makes one loose it unless we provide a custom script. Hence, maybe an API could be added to have this in common? ### Your contribution If the issue is accepted - i can contribute the code, because I would like to have it automated and generalised.
{ "avatar_url": "https://avatars.githubusercontent.com/u/31362090?v=4", "events_url": "https://api.github.com/users/sunny1401/events{/privacy}", "followers_url": "https://api.github.com/users/sunny1401/followers", "following_url": "https://api.github.com/users/sunny1401/following{/other_user}", "gists_url": "https://api.github.com/users/sunny1401/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/sunny1401", "id": 31362090, "login": "sunny1401", "node_id": "MDQ6VXNlcjMxMzYyMDkw", "organizations_url": "https://api.github.com/users/sunny1401/orgs", "received_events_url": "https://api.github.com/users/sunny1401/received_events", "repos_url": "https://api.github.com/users/sunny1401/repos", "site_admin": false, "starred_url": "https://api.github.com/users/sunny1401/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sunny1401/subscriptions", "type": "User", "url": "https://api.github.com/users/sunny1401", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6740/reactions" }
not_planned
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/pull/6739
6,739
Transpose images with EXIF Orientation tag
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6739). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005295 / 0.011353 (-0.006058) | 0.003402 / 0.011008 (-0.007606) | 0.062860 / 0.038508 (0.024352) | 0.029627 / 0.023109 (0.006518) | 0.238359 / 0.275898 (-0.037539) | 0.262940 / 0.323480 (-0.060540) | 0.003077 / 0.007986 (-0.004909) | 0.002676 / 0.004328 (-0.001652) | 0.048731 / 0.004250 (0.044480) | 0.043989 / 0.037052 (0.006936) | 0.255702 / 0.258489 (-0.002787) | 0.282667 / 0.293841 (-0.011174) | 0.028019 / 0.128546 (-0.100527) | 0.010195 / 0.075646 (-0.065451) | 0.205472 / 0.419271 (-0.213800) | 0.036551 / 0.043533 (-0.006982) | 0.243282 / 0.255139 (-0.011857) | 0.261925 / 0.283200 (-0.021274) | 0.020506 / 0.141683 (-0.121177) | 1.137228 / 1.452155 (-0.314927) | 1.183935 / 1.492716 (-0.308782) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.100290 / 0.018006 (0.082284) | 0.316279 / 0.000490 (0.315790) | 0.000239 / 0.000200 (0.000039) | 0.000043 / 0.000054 (-0.000011) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.017979 / 0.037411 (-0.019432) | 0.061616 / 0.014526 (0.047090) | 0.072989 / 0.176557 (-0.103568) | 0.118667 / 0.737135 (-0.618468) | 0.074266 / 0.296338 (-0.222072) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.287971 / 0.215209 (0.072762) | 2.845235 / 2.077655 (0.767581) | 1.501983 / 1.504120 (-0.002137) | 1.389824 / 1.541195 (-0.151370) | 1.415616 / 1.468490 (-0.052874) | 0.568727 / 4.584777 (-4.016050) | 2.368330 / 3.745712 (-1.377382) | 2.844329 / 5.269862 (-2.425532) | 1.809038 / 4.565676 (-2.756639) | 0.063699 / 0.424275 (-0.360576) | 0.004972 / 0.007607 (-0.002635) | 0.340092 / 0.226044 (0.114048) | 3.369146 / 2.268929 (1.100217) | 1.863423 / 55.444624 (-53.581201) | 1.608334 / 6.876477 (-5.268142) | 1.624479 / 2.142072 (-0.517594) | 0.632439 / 4.805227 (-4.172788) | 0.116862 / 6.500664 (-6.383802) | 0.042558 / 0.075469 (-0.032911) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.967922 / 1.841788 (-0.873866) | 11.730612 / 8.074308 (3.656304) | 9.321333 / 10.191392 (-0.870059) | 0.142604 / 0.680424 (-0.537819) | 0.013934 / 0.534201 (-0.520267) | 0.285992 / 0.579283 (-0.293292) | 0.267639 / 0.434364 (-0.166724) | 0.324972 / 0.540337 (-0.215365) | 0.427077 / 1.386936 (-0.959859) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005806 / 0.011353 (-0.005547) | 0.003771 / 0.011008 (-0.007237) | 0.049542 / 0.038508 (0.011034) | 0.030182 / 0.023109 (0.007073) | 0.303923 / 0.275898 (0.028025) | 0.325623 / 0.323480 (0.002143) | 0.004327 / 0.007986 (-0.003659) | 0.002818 / 0.004328 (-0.001510) | 0.048237 / 0.004250 (0.043987) | 0.047490 / 0.037052 (0.010437) | 0.316556 / 0.258489 (0.058067) | 0.348352 / 0.293841 (0.054512) | 0.029444 / 0.128546 (-0.099102) | 0.010544 / 0.075646 (-0.065102) | 0.057382 / 0.419271 (-0.361890) | 0.056210 / 0.043533 (0.012677) | 0.305495 / 0.255139 (0.050356) | 0.321570 / 0.283200 (0.038370) | 0.019546 / 0.141683 (-0.122137) | 1.141732 / 1.452155 (-0.310423) | 1.223626 / 1.492716 (-0.269091) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.093864 / 0.018006 (0.075858) | 0.309715 / 0.000490 (0.309226) | 0.000217 / 0.000200 (0.000017) | 0.000053 / 0.000054 (-0.000002) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022047 / 0.037411 (-0.015364) | 0.074885 / 0.014526 (0.060359) | 0.088440 / 0.176557 (-0.088117) | 0.127033 / 0.737135 (-0.610103) | 0.089048 / 0.296338 (-0.207290) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.292624 / 0.215209 (0.077415) | 2.877592 / 2.077655 (0.799937) | 1.607036 / 1.504120 (0.102916) | 1.487819 / 1.541195 (-0.053376) | 1.517318 / 1.468490 (0.048828) | 0.553321 / 4.584777 (-4.031456) | 2.415577 / 3.745712 (-1.330135) | 2.691411 / 5.269862 (-2.578450) | 1.743395 / 4.565676 (-2.822282) | 0.062187 / 0.424275 (-0.362088) | 0.005073 / 0.007607 (-0.002534) | 0.342907 / 0.226044 (0.116863) | 3.402054 / 2.268929 (1.133126) | 1.979481 / 55.444624 (-53.465143) | 1.702885 / 6.876477 (-5.173592) | 1.868279 / 2.142072 (-0.273794) | 0.640095 / 4.805227 (-4.165132) | 0.117138 / 6.500664 (-6.383526) | 0.042197 / 0.075469 (-0.033272) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.007495 / 1.841788 (-0.834292) | 12.037309 / 8.074308 (3.963001) | 10.227670 / 10.191392 (0.036278) | 0.149533 / 0.680424 (-0.530891) | 0.015282 / 0.534201 (-0.518919) | 0.287357 / 0.579283 (-0.291926) | 0.285109 / 0.434364 (-0.149255) | 0.324027 / 0.540337 (-0.216311) | 0.442482 / 1.386936 (-0.944454) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#19b40860acf3b3ba8db727fcf3b1b99ebb8d7e33 \"CML watermark\")\n", "This commit seems to crash the whole program if the EXIF tag is contaminated like that in #7668 (which is common, due to str codec misconfiguration) and thus I'd like to suggest making the parsing feature optional, or just implement some error handling." ]
2024-03-18T16:43:06Z
2025-07-03T11:33:18Z
2024-03-19T15:29:42Z
COLLABORATOR
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6739.diff", "html_url": "https://github.com/huggingface/datasets/pull/6739", "merged_at": "2024-03-19T15:29:41Z", "patch_url": "https://github.com/huggingface/datasets/pull/6739.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6739" }
Closes https://github.com/huggingface/datasets/issues/6252
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6739/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/issues/6738
6,738
Dict feature is non-nullable while nested dict feature is
{ "avatar_url": "https://avatars.githubusercontent.com/u/16348744?v=4", "events_url": "https://api.github.com/users/polinaeterna/events{/privacy}", "followers_url": "https://api.github.com/users/polinaeterna/followers", "following_url": "https://api.github.com/users/polinaeterna/following{/other_user}", "gists_url": "https://api.github.com/users/polinaeterna/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/polinaeterna", "id": 16348744, "login": "polinaeterna", "node_id": "MDQ6VXNlcjE2MzQ4NzQ0", "organizations_url": "https://api.github.com/users/polinaeterna/orgs", "received_events_url": "https://api.github.com/users/polinaeterna/received_events", "repos_url": "https://api.github.com/users/polinaeterna/repos", "site_admin": false, "starred_url": "https://api.github.com/users/polinaeterna/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/polinaeterna/subscriptions", "type": "User", "url": "https://api.github.com/users/polinaeterna", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
[ "It looks like a bug, by default every feature should be nullable.", "I've linked a PR with a fix :)", "@mariosasko awesome thank you!" ]
2024-03-18T14:31:47Z
2024-03-20T10:24:15Z
2024-03-19T20:05:20Z
CONTRIBUTOR
null
null
When i try to create a `Dataset` object with None values inside a dict column, like this: ```python from datasets import Dataset, Features, Value Dataset.from_dict( { "dict": [{"a": 0, "b": 0}, None], }, features=Features( {"dict": {"a": Value("int16"), "b": Value("int16")}} ) ) ``` i get `ValueError: Got None but expected a dictionary instead`. At the same time, having None in _nested_ dict feature works, for example, this doesn't throw any errors: ```python from datasets import Dataset, Features, Value, Sequence dataset = Dataset.from_dict( { "list_dict": [[{"a": 0, "b": 0}], None], "sequence_dict": [[{"a": 0, "b": 0}], None], }, features=Features({ "list_dict": [{"a": Value("int16"), "b": Value("int16")}], "sequence_dict": Sequence({"a": Value("int16"), "b": Value("int16")}), }) ) ``` Other types of features also seem to be nullable (but I haven't checked all of them). Version of `datasets` is the latest atm (2.18.0) Is this an expected behavior or a bug?
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6738/reactions" }
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/issues/6737
6,737
Invalid pattern: '**' can only be an entire path component
{ "avatar_url": "https://avatars.githubusercontent.com/u/28976175?v=4", "events_url": "https://api.github.com/users/JPonsa/events{/privacy}", "followers_url": "https://api.github.com/users/JPonsa/followers", "following_url": "https://api.github.com/users/JPonsa/following{/other_user}", "gists_url": "https://api.github.com/users/JPonsa/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/JPonsa", "id": 28976175, "login": "JPonsa", "node_id": "MDQ6VXNlcjI4OTc2MTc1", "organizations_url": "https://api.github.com/users/JPonsa/orgs", "received_events_url": "https://api.github.com/users/JPonsa/received_events", "repos_url": "https://api.github.com/users/JPonsa/repos", "site_admin": false, "starred_url": "https://api.github.com/users/JPonsa/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/JPonsa/subscriptions", "type": "User", "url": "https://api.github.com/users/JPonsa", "user_view_type": "public" }
[]
closed
false
[ "I couldn't reproduce the issue on my side on MacOS, I guess the issue comes from the recent `fsspec` on Windows.\r\n\r\nCan you try downgrading to `fsspec==2023.9.2` for now ? It would also be great to investigate this and see if we need a fix in `datasets` or `fsspec`", "I had the same issue! \r\nDowngrading to fsspec from 2023.10.0 to 2023.9.2 solved it for me.\r\n\r\n(env: python 3.11.7, datasets version: 2.15.0, Windows 10 22H2, Build 19045.4170)\r\n\r\nThanks a lot!", "Ubuntu 20.04 had the same issue\r\npython 3.9 \r\n\r\nFile \"/home/delight-gpu/Workspace2/azuryl/FLAP/main.py\", line 112, in <module>\r\n main()\r\n File \"/home/delight-gpu/Workspace2/azuryl/FLAP/main.py\", line 85, in main\r\n prune_flap(args, model, tokenizer, device)\r\n File \"/home/delight-gpu/Workspace2/azuryl/FLAP/lib/prune.py\", line 294, in prune_flap\r\n dataloader, _ = get_loaders(\"wikitext2\", nsamples=args.nsamples,seed=args.seed,seqlen=model.seqlen,tokenizer=tokenizer)\r\n File \"/home/delight-gpu/Workspace2/azuryl/FLAP/lib/data.py\", line 159, in get_loaders\r\n return get_wikitext2(nsamples, seed, seqlen, tokenizer)\r\n File \"/home/delight-gpu/Workspace2/azuryl/FLAP/lib/data.py\", line 79, in get_wikitext2\r\n traindata = load_dataset('wikitext', 'wikitext-2-raw-v1', split='train')\r\n File \"/home/azuryl/anaconda3/envs/flap/lib/python3.9/site-packages/datasets/load.py\", line 1767, in load_dataset\r\n builder_instance = load_dataset_builder(\r\n File \"/home/azuryl/anaconda3/envs/flap/lib/python3.9/site-packages/datasets/load.py\", line 1498, in load_dataset_builder\r\n dataset_module = dataset_module_factory(\r\n File \"/home/azuryl/anaconda3/envs/flap/lib/python3.9/site-packages/datasets/load.py\", line 1215, in dataset_module_factory\r\n raise e1 from None\r\n File \"/home/azuryl/anaconda3/envs/flap/lib/python3.9/site-packages/datasets/load.py\", line 1192, in dataset_module_factory\r\n return HubDatasetModuleFactoryWithoutScript(\r\n File \"/home/azuryl/anaconda3/envs/flap/lib/python3.9/site-packages/datasets/load.py\", line 765, in get_module\r\n else get_data_patterns_in_dataset_repository(hfh_dataset_info, self.data_dir)\r\n File \"/home/azuryl/anaconda3/envs/flap/lib/python3.9/site-packages/datasets/data_files.py\", line 675, in get_data_patterns_in_dataset_repository\r\n return _get_data_files_patterns(resolver)\r\n File \"/home/azuryl/anaconda3/envs/flap/lib/python3.9/site-packages/datasets/data_files.py\", line 236, in _get_data_files_patterns\r\n data_files = pattern_resolver(pattern)\r\n File \"/home/azuryl/anaconda3/envs/flap/lib/python3.9/site-packages/datasets/data_files.py\", line 486, in _resolve_single_pattern_in_dataset_repository\r\n glob_iter = [PurePath(filepath) for filepath in fs.glob(PurePath(pattern).as_posix()) if fs.isfile(filepath)]\r\n File \"/home/azuryl/anaconda3/envs/flap/lib/python3.9/site-packages/fsspec/spec.py\", line 606, in glob\r\n pattern = glob_translate(path + (\"/\" if ends_with_sep else \"\"))\r\n File \"/home/azuryl/anaconda3/envs/flap/lib/python3.9/site-packages/fsspec/utils.py\", line 734, in glob_translate\r\n raise ValueError(\r\nValueError: Invalid pattern: '**' can only be an entire path component", "on ubuntu you just need to have the latest `datasets` and `fsspec`\r\n\r\n```\r\npip install -U datasets fsspec\r\n```", "The issue was caused by an incompatibility between the versions of `datasets`, `huggingface-hub` and `fsspec`.\r\n\r\nThe issue was fixed in:\r\n- huggingface-hub-0.21.2: https://github.com/huggingface/huggingface_hub/pull/2056\r\n- and datasets-2.18.0: https://github.com/huggingface/datasets/pull/6687\r\n - datasets-2.19.1 fixed the minimum requirement huggingface-hub >= 0.21.2: https://github.com/huggingface/datasets/pull/6713", "@albertvillanova, thank you for this solution. I encountered the same issue and had to use:\r\n```\r\nconda install -c conda-forge huggingface_hub=0.21.2 datasets=2.19.1\r\n```\r\n\r\nCheers", "Cheers!" ]
2024-03-16T19:28:46Z
2024-07-23T14:23:28Z
2024-05-13T11:32:57Z
NONE
null
null
### Describe the bug ValueError: Invalid pattern: '**' can only be an entire path component when loading any dataset ### Steps to reproduce the bug import datasets ds = datasets.load_dataset("TokenBender/code_instructions_122k_alpaca_style") ### Expected behavior loading the dataset successfully ### Environment info - `datasets` version: 2.18.0 - Platform: Windows-10-10.0.22631-SP0 - Python version: 3.11.7 - `huggingface_hub` version: 0.20.3 - PyArrow version: 15.0.0 - Pandas version: 2.2.1 - `fsspec` version: 2023.12.2
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 10, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 10, "url": "https://api.github.com/repos/huggingface/datasets/issues/6737/reactions" }
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/issues/6736
6,736
Mosaic Streaming (MDS) Support
{ "avatar_url": "https://avatars.githubusercontent.com/u/2498509?v=4", "events_url": "https://api.github.com/users/siddk/events{/privacy}", "followers_url": "https://api.github.com/users/siddk/followers", "following_url": "https://api.github.com/users/siddk/following{/other_user}", "gists_url": "https://api.github.com/users/siddk/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/siddk", "id": 2498509, "login": "siddk", "node_id": "MDQ6VXNlcjI0OTg1MDk=", "organizations_url": "https://api.github.com/users/siddk/orgs", "received_events_url": "https://api.github.com/users/siddk/received_events", "repos_url": "https://api.github.com/users/siddk/repos", "site_admin": false, "starred_url": "https://api.github.com/users/siddk/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/siddk/subscriptions", "type": "User", "url": "https://api.github.com/users/siddk", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
[ "Hi ! that would be great :) Though note that `datasets` doesn't implement format-specific resuming when streaming, so in general I think it's better if users can use the mosaic-streaming library to read their MDS datasets. I wonder if they support `hf://` paths though...\n\nAnyway for those interested, the code for WebDataset is a single file here: https://github.com/huggingface/datasets/blob/main/src/datasets/packaged_modules/webdataset/webdataset.py.\n\nIt implements `_split_generators` that downloads files and returns the lists of splits (train/validation/test) and `_split_generators` to generate examples (dicts) from the downloaded files. Streaming is automatically supported by making download steps lazy and by extending `open()` to work with remote URLs.\n\nedit: we could try with datasets like https://huggingface.co/datasets/jhu-clsp/mmBERT-pretrain-p2-fineweb2-remaining" ]
2024-03-16T18:42:04Z
2025-09-10T14:32:13Z
null
NONE
null
null
### Feature request I'm a huge fan of the current HF Datasets `webdataset` integration (especially the built-in streaming support). However, I'd love to upload some robotics and multimodal datasets I've processed for use with [Mosaic Streaming](https://docs.mosaicml.com/projects/streaming/en/stable/), specifically their [MDS Format](https://docs.mosaicml.com/projects/streaming/en/stable/fundamentals/dataset_format.html#mds). Because the shard files have similar semantics to WebDataset, I'm hoping that adding such support won't be too much trouble? ### Motivation One of the downsides with WebDataset is a lack of out-of-the-box determinism (especially for large-scale training and reproducibility), easy job resumption, and the ability to quickly debug / visualize individual examples. Mosaic Streaming provides a [great interface for this out of the box](https://docs.mosaicml.com/projects/streaming/en/stable/#key-features), so I'd love to see it supported in HF Datasets. ### Your contribution Happy to help test things / provide example data. Can potentially submit a PR if maintainers could point me to the necessary WebDataset logic / steps for adding a new streaming format!
null
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/6736/reactions" }
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/pull/6735
6,735
Add `mode` parameter to `Image` feature
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6735). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005009 / 0.011353 (-0.006344) | 0.003547 / 0.011008 (-0.007461) | 0.063014 / 0.038508 (0.024506) | 0.027699 / 0.023109 (0.004589) | 0.247140 / 0.275898 (-0.028758) | 0.273610 / 0.323480 (-0.049870) | 0.003115 / 0.007986 (-0.004871) | 0.002712 / 0.004328 (-0.001616) | 0.049134 / 0.004250 (0.044883) | 0.041582 / 0.037052 (0.004530) | 0.269992 / 0.258489 (0.011503) | 0.294516 / 0.293841 (0.000675) | 0.027818 / 0.128546 (-0.100728) | 0.010568 / 0.075646 (-0.065078) | 0.207710 / 0.419271 (-0.211561) | 0.035767 / 0.043533 (-0.007766) | 0.260058 / 0.255139 (0.004919) | 0.277615 / 0.283200 (-0.005585) | 0.020192 / 0.141683 (-0.121491) | 1.116863 / 1.452155 (-0.335292) | 1.156868 / 1.492716 (-0.335848) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.095087 / 0.018006 (0.077081) | 0.303249 / 0.000490 (0.302759) | 0.000215 / 0.000200 (0.000015) | 0.000053 / 0.000054 (-0.000001) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018866 / 0.037411 (-0.018545) | 0.063853 / 0.014526 (0.049328) | 0.073863 / 0.176557 (-0.102693) | 0.121399 / 0.737135 (-0.615737) | 0.076014 / 0.296338 (-0.220325) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.289843 / 0.215209 (0.074634) | 2.844085 / 2.077655 (0.766431) | 1.528022 / 1.504120 (0.023902) | 1.397352 / 1.541195 (-0.143843) | 1.394676 / 1.468490 (-0.073814) | 0.555899 / 4.584777 (-4.028878) | 2.354010 / 3.745712 (-1.391702) | 2.737715 / 5.269862 (-2.532146) | 1.731260 / 4.565676 (-2.834416) | 0.062315 / 0.424275 (-0.361960) | 0.004920 / 0.007607 (-0.002687) | 0.342921 / 0.226044 (0.116877) | 3.416529 / 2.268929 (1.147600) | 1.862941 / 55.444624 (-53.581684) | 1.599661 / 6.876477 (-5.276816) | 1.617200 / 2.142072 (-0.524873) | 0.635129 / 4.805227 (-4.170099) | 0.121651 / 6.500664 (-6.379013) | 0.041867 / 0.075469 (-0.033602) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.990825 / 1.841788 (-0.850962) | 11.435576 / 8.074308 (3.361268) | 9.490194 / 10.191392 (-0.701198) | 0.133295 / 0.680424 (-0.547129) | 0.014061 / 0.534201 (-0.520140) | 0.288648 / 0.579283 (-0.290635) | 0.268874 / 0.434364 (-0.165490) | 0.323288 / 0.540337 (-0.217049) | 0.426090 / 1.386936 (-0.960846) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.006712 / 0.011353 (-0.004641) | 0.003723 / 0.011008 (-0.007285) | 0.049814 / 0.038508 (0.011306) | 0.039323 / 0.023109 (0.016213) | 0.279244 / 0.275898 (0.003346) | 0.297139 / 0.323480 (-0.026341) | 0.004197 / 0.007986 (-0.003788) | 0.002753 / 0.004328 (-0.001576) | 0.048820 / 0.004250 (0.044569) | 0.049593 / 0.037052 (0.012541) | 0.287247 / 0.258489 (0.028758) | 0.338078 / 0.293841 (0.044237) | 0.029303 / 0.128546 (-0.099243) | 0.010292 / 0.075646 (-0.065354) | 0.057852 / 0.419271 (-0.361419) | 0.053390 / 0.043533 (0.009857) | 0.275155 / 0.255139 (0.020016) | 0.292891 / 0.283200 (0.009692) | 0.020007 / 0.141683 (-0.121676) | 1.161731 / 1.452155 (-0.290424) | 1.232162 / 1.492716 (-0.260555) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.092848 / 0.018006 (0.074842) | 0.301180 / 0.000490 (0.300690) | 0.000236 / 0.000200 (0.000036) | 0.000050 / 0.000054 (-0.000005) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022477 / 0.037411 (-0.014934) | 0.077012 / 0.014526 (0.062486) | 0.087335 / 0.176557 (-0.089222) | 0.126761 / 0.737135 (-0.610374) | 0.089249 / 0.296338 (-0.207090) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.290722 / 0.215209 (0.075513) | 2.884485 / 2.077655 (0.806830) | 1.565775 / 1.504120 (0.061656) | 1.442369 / 1.541195 (-0.098825) | 1.453995 / 1.468490 (-0.014495) | 0.563193 / 4.584777 (-4.021584) | 2.413610 / 3.745712 (-1.332102) | 2.684567 / 5.269862 (-2.585295) | 1.753322 / 4.565676 (-2.812354) | 0.061879 / 0.424275 (-0.362396) | 0.005080 / 0.007607 (-0.002527) | 0.347274 / 0.226044 (0.121229) | 3.435836 / 2.268929 (1.166907) | 1.937893 / 55.444624 (-53.506731) | 1.657824 / 6.876477 (-5.218653) | 1.777767 / 2.142072 (-0.364305) | 0.656757 / 4.805227 (-4.148471) | 0.117144 / 6.500664 (-6.383520) | 0.040691 / 0.075469 (-0.034778) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.012435 / 1.841788 (-0.829353) | 12.038001 / 8.074308 (3.963693) | 10.363947 / 10.191392 (0.172555) | 0.140711 / 0.680424 (-0.539713) | 0.014937 / 0.534201 (-0.519264) | 0.291070 / 0.579283 (-0.288213) | 0.277180 / 0.434364 (-0.157184) | 0.327433 / 0.540337 (-0.212904) | 0.439767 / 1.386936 (-0.947169) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#0b55ec53e980855d71ae22f8b3d12b2a0d476a51 \"CML watermark\")\n" ]
2024-03-15T17:21:12Z
2024-03-18T15:47:48Z
2024-03-18T15:41:33Z
COLLABORATOR
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6735.diff", "html_url": "https://github.com/huggingface/datasets/pull/6735", "merged_at": "2024-03-18T15:41:33Z", "patch_url": "https://github.com/huggingface/datasets/pull/6735.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6735" }
Fix https://github.com/huggingface/datasets/issues/6675
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6735/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/issues/6734
6,734
Tokenization slows towards end of dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/98723285?v=4", "events_url": "https://api.github.com/users/ethansmith2000/events{/privacy}", "followers_url": "https://api.github.com/users/ethansmith2000/followers", "following_url": "https://api.github.com/users/ethansmith2000/following{/other_user}", "gists_url": "https://api.github.com/users/ethansmith2000/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ethansmith2000", "id": 98723285, "login": "ethansmith2000", "node_id": "U_kgDOBeJl1Q", "organizations_url": "https://api.github.com/users/ethansmith2000/orgs", "received_events_url": "https://api.github.com/users/ethansmith2000/received_events", "repos_url": "https://api.github.com/users/ethansmith2000/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ethansmith2000/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ethansmith2000/subscriptions", "type": "User", "url": "https://api.github.com/users/ethansmith2000", "user_view_type": "public" }
[]
open
false
[ "Hi ! First note that if the dataset is not heterogeneous / shuffled, there might be places in the data with shorter texts that are faster to tokenize.\r\n\r\nMoreover, the way `num_proc` works is by slicing the dataset and passing each slice to a process to run the `map()` function. So at the very end of `map()`, some processes might have finished transforming their slice of data while others are still running, causing the throughput to become lower.", "I did see some comments about how num_proc=None could help and outputting numpy arrays can also help in the docs, but this seems quite odd now dropping down to 1it/s\r\n\r\n```bash\r\nRunning tokenizer on dataset (num_proc=48): 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‰| 46048888/46390354 [12:33:30<4:20:32, 21.84 examples/s]\r\nRunning tokenizer on dataset (num_proc=48): 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‰| 46049888/46390354 [12:36:11<8:37:59, 10.95 examples/s]\r\nRunning tokenizer on dataset (num_proc=48): 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‰| 46050888/46390354 [12:46:35<24:56:56, 3.78 examples/s]\r\nRunning tokenizer on dataset (num_proc=48): 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‰| 46051888/46390354 [12:56:43<35:08:10, 2.68 examples/s]\r\nRunning tokenizer on dataset (num_proc=48): 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‰| 46052888/46390354 [13:06:58<42:05:41, 2.23 examples/s]\r\nRunning tokenizer on dataset (num_proc=48): 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‰| 46053888/46390354 [13:16:01<44:40:18, 2.09 examples/s]\r\nRunning tokenizer on dataset (num_proc=48): 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‰| 46054888/46390354 [13:25:11<46:35:28, 2.00 examples/s]\r\nRunning tokenizer on dataset (num_proc=48): 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‰| 46055888/46390354 [13:34:23<47:55:34, 1.94 examples/s]\r\n```\r\n\r\n", "@ethansmith2000 Hi, did you solve this problem? I'm strugging with the same problem now.", "So, is there a way to solve this problem now?" ]
2024-03-15T03:27:36Z
2025-02-20T17:40:54Z
null
NONE
null
null
### Describe the bug Mapped tokenization slows down substantially towards end of dataset. train set started off very slow, caught up to 20k then tapered off til the end. what's particularly strange is that the tokenization crashed a few times before due to errors with invalid tokens somewhere or corrupted downloads, and the speed ups/downs consistently happened the same times ```bash Running tokenizer on dataset (num_proc=48): 0%| | 847000/881416735 [12:18<252:45:45, 967.72 examples/s] Running tokenizer on dataset (num_proc=48): 0%| | 848000/881416735 [12:19<224:16:10, 1090.66 examples/s] Running tokenizer on dataset (num_proc=48): 10%|β–‰ | 84964000/881416735 [3:48:00<11:21:34, 19476.01 examples/s] Running tokenizer on dataset (num_proc=48): 10%|β–‰ | 84967000/881416735 [3:48:00<12:04:01, 18333.79 examples/s] Running tokenizer on dataset (num_proc=48): 61%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 538631977/881416735 [13:46:40<27:50:04, 3420.84 examples/s] Running tokenizer on dataset (num_proc=48): 61%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 538632977/881416735 [13:46:40<23:48:20, 3999.77 examples/s] Running tokenizer on dataset (num_proc=48): 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‰| 881365886/881416735 [38:30:19<04:34, 185.10 examples/s] Running tokenizer on dataset (num_proc=48): 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‰| 881366886/881416735 [38:30:25<04:36, 180.57 examples/s] ``` and validation set as well ```bash Running tokenizer on dataset (num_proc=48): 90%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‰ | 41544000/46390354 [28:44<02:37, 30798.76 examples/s] Running tokenizer on dataset (num_proc=48): 90%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‰ | 41550000/46390354 [28:44<02:08, 37698.08 examples/s] Running tokenizer on dataset (num_proc=48): 96%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‹| 44747422/46390354 [2:15:48<12:22:44, 36.87 examples/s] Running tokenizer on dataset (num_proc=48): 96%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‹| 44747422/46390354 [2:16:00<12:22:44, 36.87 examples/s] ``` ### Steps to reproduce the bug using the following kwargs ```python with accelerator.main_process_first(): lm_datasets = tokenized_datasets.map( group_texts, batched=True, num_proc=48 load_from_cache_file=True, desc=f"Grouping texts in chunks of {block_size}", ) ``` running through slurm script ```bash #SBATCH --partition=gpu-nvidia-a100 #SBATCH --nodes=1 #SBATCH --ntasks=1 #SBATCH --gpus-per-task=8 #SBATCH --cpus-per-task=96 ``` using this dataset https://huggingface.co/datasets/togethercomputer/RedPajama-Data-1T ### Expected behavior Constant speed throughout ### Environment info - `datasets` version: 2.15.0 - Platform: Linux-5.15.0-1049-aws-x86_64-with-glibc2.10 - Python version: 3.8.18 - `huggingface_hub` version: 0.19.4 - PyArrow version: 14.0.1 - Pandas version: 2.0.3 - `fsspec` version: 2023.10.0
null
{ "+1": 2, "-1": 0, "confused": 0, "eyes": 1, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 3, "url": "https://api.github.com/repos/huggingface/datasets/issues/6734/reactions" }
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/issues/6733
6,733
EmptyDatasetError when loading dataset downloaded with HuggingFace cli
{ "avatar_url": "https://avatars.githubusercontent.com/u/77196999?v=4", "events_url": "https://api.github.com/users/StwayneXG/events{/privacy}", "followers_url": "https://api.github.com/users/StwayneXG/followers", "following_url": "https://api.github.com/users/StwayneXG/following{/other_user}", "gists_url": "https://api.github.com/users/StwayneXG/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/StwayneXG", "id": 77196999, "login": "StwayneXG", "node_id": "MDQ6VXNlcjc3MTk2OTk5", "organizations_url": "https://api.github.com/users/StwayneXG/orgs", "received_events_url": "https://api.github.com/users/StwayneXG/received_events", "repos_url": "https://api.github.com/users/StwayneXG/repos", "site_admin": false, "starred_url": "https://api.github.com/users/StwayneXG/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/StwayneXG/subscriptions", "type": "User", "url": "https://api.github.com/users/StwayneXG", "user_view_type": "public" }
[]
open
false
[ "Hi! `datasets` is not compatible with `huggingface_hub`'s cache structure, hence the error.\r\n\r\nYou can track https://github.com/huggingface/datasets/issues/5080 to get notified when this is implemented." ]
2024-03-14T16:41:27Z
2024-03-15T18:09:02Z
null
NONE
null
null
### Describe the bug I am using a cluster that does not have access to the internet when given a job. I tried downloading the dataset using the huggingface-cli command and then loading it with load_dataset but I get an error: ```raise EmptyDatasetError(f"The directory at {base_path} doesn't contain any data files") from None``` The dataset I'm using is "lmsys/chatbot_arena_conversations". The folder structure is - README.md - data - train-00000-of-00001-cced8514c7ed782a.parquet ### Steps to reproduce the bug 1. Download dataset using HuggingFace CLI: ```huggingface-cli download lmsys/chatbot_arena_conversations --local-dir ./lmsys/chatbot_arena_conversations``` 2. In Python ``` from datasets import load_dataset load_dataset("lmsys/chatbot_arena_conversations") ``` ### Expected behavior Should return a Dataset Dict in the form of ``` DatasetDict({ train: Dataset({ features: [...], num_rows: 33,000 }) }) ``` ### Environment info Python 3.11.5 Datasets 2.18.0 Transformers 4.38.2 Pytorch 2.2.0 Pyarrow 15.0.1 Rocky Linux release 8.9 (Green Obsidian)
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6733/reactions" }
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/issues/6731
6,731
Unexpected behavior when using load_dataset with streaming=True in a for loop
{ "avatar_url": "https://avatars.githubusercontent.com/u/42908296?v=4", "events_url": "https://api.github.com/users/uApiv/events{/privacy}", "followers_url": "https://api.github.com/users/uApiv/followers", "following_url": "https://api.github.com/users/uApiv/following{/other_user}", "gists_url": "https://api.github.com/users/uApiv/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/uApiv", "id": 42908296, "login": "uApiv", "node_id": "MDQ6VXNlcjQyOTA4Mjk2", "organizations_url": "https://api.github.com/users/uApiv/orgs", "received_events_url": "https://api.github.com/users/uApiv/received_events", "repos_url": "https://api.github.com/users/uApiv/repos", "site_admin": false, "starred_url": "https://api.github.com/users/uApiv/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/uApiv/subscriptions", "type": "User", "url": "https://api.github.com/users/uApiv", "user_view_type": "public" }
[]
closed
false
[ "This is normal behavior in python when using `lambda`: the `i` defined in your `lambda` refers to the global variable `i` in your loop, and `i` equals to `1` when you run your `for e in res[0]` line.\r\n\r\nYou should pass `fn_kwargs` that will be passed to your `lambda` instead of using the global variable:\r\n\r\n```python\r\nfrom datasets import load_dataset\r\n\r\nres=[]\r\nfor i in [0,1]:\r\n di = load_dataset(\r\n \"json\", \r\n data_files='path_to.json', \r\n split='train',\r\n streaming=True, \r\n ).map(lambda x, source: {\"source\": source}, fn_kwargs={\"source\": i})\r\n\r\n res.append(di)\r\n\r\nfor e in res[0]:\r\n print(e)\r\n```\r\n\r\nThis doesn't happen in non-streaming since in that case `map` is executed while the variable `i` has the right value. In streaming mode, `map` is executed on-the-fly when you iterate on the dataset.", "Thank you very much for your answer. I think this issue can be closed now." ]
2024-03-12T23:26:43Z
2024-04-16T00:00:00Z
2024-04-16T00:00:00Z
NONE
null
null
### Describe the bug ### My Code ``` from datasets import load_dataset res=[] for i in [0,1]: di=load_dataset( "json", data_files='path_to.json', split='train', streaming=True, ).map(lambda x: {"source": i}) res.append(di) for e in res[0]: print(e) ``` ### Unexpected Behavior Data in `res[0]` has `source=1`. However the expected value is 0. ### FYI I further switch `streaming` to `False`. And the output value is as expected (0). So there may exist bugs in setting `streaming=True` in a for loop. ### Environment Python 3.8.0 datasets==2.18.0 transformers==4.28.1 ### Steps to reproduce the bug 1. Create a Json file with any content. 2. Run the provided code. 3. Switch `streaming` to `False` and run again to see the expected behavior. ### Expected behavior The expected behavior is the data are mapped with its corresponding value in the for loop. ### Environment info Python 3.8.0 datasets==2.18.0 transformers==4.28.1 Ubuntu 20.04
{ "avatar_url": "https://avatars.githubusercontent.com/u/42908296?v=4", "events_url": "https://api.github.com/users/uApiv/events{/privacy}", "followers_url": "https://api.github.com/users/uApiv/followers", "following_url": "https://api.github.com/users/uApiv/following{/other_user}", "gists_url": "https://api.github.com/users/uApiv/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/uApiv", "id": 42908296, "login": "uApiv", "node_id": "MDQ6VXNlcjQyOTA4Mjk2", "organizations_url": "https://api.github.com/users/uApiv/orgs", "received_events_url": "https://api.github.com/users/uApiv/received_events", "repos_url": "https://api.github.com/users/uApiv/repos", "site_admin": false, "starred_url": "https://api.github.com/users/uApiv/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/uApiv/subscriptions", "type": "User", "url": "https://api.github.com/users/uApiv", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6731/reactions" }
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/pull/6730
6,730
Deprecate Pandas builder
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6730). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005301 / 0.011353 (-0.006052) | 0.003701 / 0.011008 (-0.007307) | 0.065830 / 0.038508 (0.027322) | 0.029791 / 0.023109 (0.006682) | 0.251676 / 0.275898 (-0.024222) | 0.283824 / 0.323480 (-0.039655) | 0.003083 / 0.007986 (-0.004903) | 0.004144 / 0.004328 (-0.000185) | 0.053670 / 0.004250 (0.049419) | 0.042020 / 0.037052 (0.004968) | 0.266389 / 0.258489 (0.007899) | 0.296740 / 0.293841 (0.002900) | 0.028320 / 0.128546 (-0.100226) | 0.010604 / 0.075646 (-0.065042) | 0.219881 / 0.419271 (-0.199390) | 0.036216 / 0.043533 (-0.007317) | 0.255718 / 0.255139 (0.000579) | 0.275808 / 0.283200 (-0.007392) | 0.018407 / 0.141683 (-0.123276) | 1.140007 / 1.452155 (-0.312148) | 1.174005 / 1.492716 (-0.318711) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.091230 / 0.018006 (0.073224) | 0.300704 / 0.000490 (0.300215) | 0.000207 / 0.000200 (0.000007) | 0.000043 / 0.000054 (-0.000011) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018950 / 0.037411 (-0.018461) | 0.062177 / 0.014526 (0.047651) | 0.073968 / 0.176557 (-0.102589) | 0.122161 / 0.737135 (-0.614974) | 0.075001 / 0.296338 (-0.221338) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.285675 / 0.215209 (0.070466) | 2.794176 / 2.077655 (0.716522) | 1.478666 / 1.504120 (-0.025454) | 1.361843 / 1.541195 (-0.179351) | 1.383847 / 1.468490 (-0.084643) | 0.568610 / 4.584777 (-4.016167) | 2.402351 / 3.745712 (-1.343361) | 2.860772 / 5.269862 (-2.409089) | 1.768588 / 4.565676 (-2.797089) | 0.063257 / 0.424275 (-0.361018) | 0.004998 / 0.007607 (-0.002609) | 0.340897 / 0.226044 (0.114853) | 3.340238 / 2.268929 (1.071310) | 1.836434 / 55.444624 (-53.608190) | 1.556844 / 6.876477 (-5.319633) | 1.610685 / 2.142072 (-0.531388) | 0.644941 / 4.805227 (-4.160286) | 0.117593 / 6.500664 (-6.383072) | 0.042803 / 0.075469 (-0.032666) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.979181 / 1.841788 (-0.862607) | 11.901365 / 8.074308 (3.827057) | 9.587943 / 10.191392 (-0.603449) | 0.139648 / 0.680424 (-0.540776) | 0.013904 / 0.534201 (-0.520297) | 0.291249 / 0.579283 (-0.288034) | 0.260737 / 0.434364 (-0.173627) | 0.326000 / 0.540337 (-0.214338) | 0.433459 / 1.386936 (-0.953477) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005503 / 0.011353 (-0.005850) | 0.003738 / 0.011008 (-0.007270) | 0.049137 / 0.038508 (0.010629) | 0.031484 / 0.023109 (0.008374) | 0.265783 / 0.275898 (-0.010115) | 0.295125 / 0.323480 (-0.028354) | 0.004074 / 0.007986 (-0.003911) | 0.002707 / 0.004328 (-0.001622) | 0.048340 / 0.004250 (0.044089) | 0.045453 / 0.037052 (0.008401) | 0.276500 / 0.258489 (0.018011) | 0.312002 / 0.293841 (0.018162) | 0.029139 / 0.128546 (-0.099408) | 0.010445 / 0.075646 (-0.065201) | 0.057486 / 0.419271 (-0.361785) | 0.052386 / 0.043533 (0.008853) | 0.267099 / 0.255139 (0.011960) | 0.283193 / 0.283200 (-0.000007) | 0.018368 / 0.141683 (-0.123315) | 1.136207 / 1.452155 (-0.315948) | 1.178418 / 1.492716 (-0.314298) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.089270 / 0.018006 (0.071264) | 0.301087 / 0.000490 (0.300598) | 0.000208 / 0.000200 (0.000008) | 0.000050 / 0.000054 (-0.000005) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021991 / 0.037411 (-0.015421) | 0.075357 / 0.014526 (0.060831) | 0.087781 / 0.176557 (-0.088775) | 0.126923 / 0.737135 (-0.610212) | 0.088491 / 0.296338 (-0.207847) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.293653 / 0.215209 (0.078444) | 2.872156 / 2.077655 (0.794501) | 1.559229 / 1.504120 (0.055109) | 1.441201 / 1.541195 (-0.099993) | 1.472642 / 1.468490 (0.004152) | 0.588463 / 4.584777 (-3.996314) | 2.447685 / 3.745712 (-1.298028) | 2.755752 / 5.269862 (-2.514110) | 1.796591 / 4.565676 (-2.769086) | 0.068024 / 0.424275 (-0.356252) | 0.005148 / 0.007607 (-0.002459) | 0.343572 / 0.226044 (0.117528) | 3.347856 / 2.268929 (1.078927) | 1.945977 / 55.444624 (-53.498647) | 1.648953 / 6.876477 (-5.227524) | 1.804468 / 2.142072 (-0.337604) | 0.651034 / 4.805227 (-4.154193) | 0.118130 / 6.500664 (-6.382534) | 0.041019 / 0.075469 (-0.034450) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.020461 / 1.841788 (-0.821327) | 12.514237 / 8.074308 (4.439929) | 10.696276 / 10.191392 (0.504884) | 0.154549 / 0.680424 (-0.525874) | 0.015964 / 0.534201 (-0.518237) | 0.290392 / 0.579283 (-0.288891) | 0.276074 / 0.434364 (-0.158290) | 0.326253 / 0.540337 (-0.214085) | 0.440383 / 1.386936 (-0.946553) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#29ffc270da34de70cf8e28b2ebeadba1c06d8730 \"CML watermark\")\n" ]
2024-03-12T15:12:13Z
2024-03-12T17:42:33Z
2024-03-12T17:36:24Z
COLLABORATOR
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6730.diff", "html_url": "https://github.com/huggingface/datasets/pull/6730", "merged_at": "2024-03-12T17:36:24Z", "patch_url": "https://github.com/huggingface/datasets/pull/6730.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6730" }
The Pandas packaged builder is undocumented and relies on `pickle` to read the data, making it **unsafe**. Moreover, I haven't seen a single instance of this builder being used (not even using the GH/Hub search), so we should deprecate it.
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6730/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/issues/6729
6,729
Support zipfiles that span multiple disks?
{ "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "events_url": "https://api.github.com/users/severo/events{/privacy}", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/severo", "id": 1676121, "login": "severo", "node_id": "MDQ6VXNlcjE2NzYxMjE=", "organizations_url": "https://api.github.com/users/severo/orgs", "received_events_url": "https://api.github.com/users/severo/received_events", "repos_url": "https://api.github.com/users/severo/repos", "site_admin": false, "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "type": "User", "url": "https://api.github.com/users/severo", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" }, { "color": "d876e3", "default": true, "description": "Further information is requested", "id": 1935892912, "name": "question", "node_id": "MDU6TGFiZWwxOTM1ODkyOTEy", "url": "https://api.github.com/repos/huggingface/datasets/labels/question" } ]
closed
false
[ "@severo were you able to solve it?", "No. cc @albertvillanova @lhoestq @polinaeterna for an evaluation of what it would take to support this feature.", "The underlying issue issue is that the dataset repository has used split ZIP archive files: https://huggingface.co/datasets/PhilEO-community/PhilEO-downstream/tree/main/data\r\n```\r\ndownstream_dataset_patches_npzip.z01\r\ndownstream_dataset_patches_npzip.z02\r\n...\r\ndownstream_dataset_patches_npzip.zip\r\n```\r\nand these are not supported by the Python standard library package `zipfile`.", "It's a pretty bad way to share a dataset since one needs to download the full dataset to use it.\r\n\r\nWe likely won't support this format.", "I agree it is a format we maybe should not support: streaming is not possible.", "I opened a PR in the reported repo to disable the viewer: https://huggingface.co/datasets/PhilEO-community/PhilEO-downstream/discussions/1" ]
2024-03-11T21:07:41Z
2024-06-26T05:08:59Z
2024-06-26T05:05:28Z
COLLABORATOR
null
null
See https://huggingface.co/datasets/PhilEO-community/PhilEO-downstream The dataset viewer gives the following error: ``` Error code: ConfigNamesError Exception: BadZipFile Message: zipfiles that span multiple disks are not supported Traceback: Traceback (most recent call last): File "/src/services/worker/src/worker/job_runners/dataset/config_names.py", line 67, in compute_config_names_response get_dataset_config_names( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/inspect.py", line 347, in get_dataset_config_names dataset_module = dataset_module_factory( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/load.py", line 1871, in dataset_module_factory raise e1 from None File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/load.py", line 1846, in dataset_module_factory return HubDatasetModuleFactoryWithoutScript( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/load.py", line 1240, in get_module module_name, default_builder_kwargs = infer_module_for_data_files( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/load.py", line 584, in infer_module_for_data_files split_modules = { File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/load.py", line 585, in <dictcomp> split: infer_module_for_data_files_list(data_files_list, download_config=download_config) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/load.py", line 526, in infer_module_for_data_files_list return infer_module_for_data_files_list_in_archives(data_files_list, download_config=download_config) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/load.py", line 554, in infer_module_for_data_files_list_in_archives for f in xglob(extracted, recursive=True, download_config=download_config)[ File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/download/streaming_download_manager.py", line 576, in xglob fs, *_ = fsspec.get_fs_token_paths(urlpath, storage_options=storage_options) File "/src/services/worker/.venv/lib/python3.9/site-packages/fsspec/core.py", line 622, in get_fs_token_paths fs = filesystem(protocol, **inkwargs) File "/src/services/worker/.venv/lib/python3.9/site-packages/fsspec/registry.py", line 290, in filesystem return cls(**storage_options) File "/src/services/worker/.venv/lib/python3.9/site-packages/fsspec/spec.py", line 79, in __call__ obj = super().__call__(*args, **kwargs) File "/src/services/worker/.venv/lib/python3.9/site-packages/fsspec/implementations/zip.py", line 57, in __init__ self.zip = zipfile.ZipFile( File "/usr/local/lib/python3.9/zipfile.py", line 1266, in __init__ self._RealGetContents() File "/usr/local/lib/python3.9/zipfile.py", line 1329, in _RealGetContents endrec = _EndRecData(fp) File "/usr/local/lib/python3.9/zipfile.py", line 286, in _EndRecData return _EndRecData64(fpin, -sizeEndCentDir, endrec) File "/usr/local/lib/python3.9/zipfile.py", line 232, in _EndRecData64 raise BadZipFile("zipfiles that span multiple disks are not supported") zipfile.BadZipFile: zipfiles that span multiple disks are not supported ``` The files (https://huggingface.co/datasets/PhilEO-community/PhilEO-downstream/tree/main/data) are: <img width="629" alt="Capture d’écran 2024-03-11 aΜ€ 22 07 30" src="https://github.com/huggingface/datasets/assets/1676121/0bb15a51-d54f-4d73-8572-e427ea644b36">
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6729/reactions" }
not_planned
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/issues/6728
6,728
Issue Downloading Certain Datasets After Setting Custom `HF_ENDPOINT`
{ "avatar_url": "https://avatars.githubusercontent.com/u/10057041?v=4", "events_url": "https://api.github.com/users/padeoe/events{/privacy}", "followers_url": "https://api.github.com/users/padeoe/followers", "following_url": "https://api.github.com/users/padeoe/following{/other_user}", "gists_url": "https://api.github.com/users/padeoe/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/padeoe", "id": 10057041, "login": "padeoe", "node_id": "MDQ6VXNlcjEwMDU3MDQx", "organizations_url": "https://api.github.com/users/padeoe/orgs", "received_events_url": "https://api.github.com/users/padeoe/received_events", "repos_url": "https://api.github.com/users/padeoe/repos", "site_admin": false, "starred_url": "https://api.github.com/users/padeoe/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/padeoe/subscriptions", "type": "User", "url": "https://api.github.com/users/padeoe", "user_view_type": "public" }
[]
closed
false
[ "Through debugging, I found a potential solution is to modify the code in the error handling module of `huggingface_hub`: https://github.com/huggingface/huggingface_hub/commit/56d6c798c44e83d2a3167e74c022737d8fcbe822 ", "@Wauplin ", "Thanks for investigating and reporting the bug @padeoe! I've opened a PR in `huggingface_hub` with your suggested fix! :) https://github.com/huggingface/huggingface_hub/pull/2119" ]
2024-03-11T09:06:38Z
2024-03-15T14:52:07Z
2024-03-15T14:52:07Z
NONE
null
null
### Describe the bug This bug is triggered under the following conditions: - datasets repo ids without organization names trigger errors, such as `bookcorpus`, `gsm8k`, `wikipedia`, rather than in the form of `A/B`. - If `HF_ENDPOINT` is set and the hostname is not in the form of `(hub-ci.)?huggingface.co`. - This issue occurs with `datasets>2.15.0` or `huggingface-hub>0.19.4`. For example, using the latest versions: `datasets==2.18.0` and `huggingface-hub==0.21.4`, ### Steps to reproduce the bug the issue can be reproduced with the following code: 1. install specific datasets and huggingface_hub. ```bash pip install datasets==2.18.0 pip install huggingface_hub==0.21.4 ``` 2. execute python code. ```Python import os os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com' from datasets import load_dataset bookcorpus = load_dataset('bookcorpus', split='train') ``` console output: ``` Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/padeoe/.local/lib/python3.10/site-packages/datasets/load.py", line 2556, in load_dataset builder_instance = load_dataset_builder( File "/home/padeoe/.local/lib/python3.10/site-packages/datasets/load.py", line 2228, in load_dataset_builder dataset_module = dataset_module_factory( File "/home/padeoe/.local/lib/python3.10/site-packages/datasets/load.py", line 1879, in dataset_module_factory raise e1 from None File "/home/padeoe/.local/lib/python3.10/site-packages/datasets/load.py", line 1830, in dataset_module_factory with fs.open(f"datasets/{path}/{filename}", "r", encoding="utf-8") as f: File "/home/padeoe/.local/lib/python3.10/site-packages/fsspec/spec.py", line 1295, in open self.open( File "/home/padeoe/.local/lib/python3.10/site-packages/fsspec/spec.py", line 1307, in open f = self._open( File "/home/padeoe/.local/lib/python3.10/site-packages/huggingface_hub/hf_file_system.py", line 228, in _open return HfFileSystemFile(self, path, mode=mode, revision=revision, block_size=block_size, **kwargs) File "/home/padeoe/.local/lib/python3.10/site-packages/huggingface_hub/hf_file_system.py", line 615, in __init__ self.resolved_path = fs.resolve_path(path, revision=revision) File "/home/padeoe/.local/lib/python3.10/site-packages/huggingface_hub/hf_file_system.py", line 180, in resolve_path repo_and_revision_exist, err = self._repo_and_revision_exist(repo_type, repo_id, revision) File "/home/padeoe/.local/lib/python3.10/site-packages/huggingface_hub/hf_file_system.py", line 117, in _repo_and_revision_exist self._api.repo_info(repo_id, revision=revision, repo_type=repo_type) File "/home/padeoe/.local/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 118, in _inner_fn return fn(*args, **kwargs) File "/home/padeoe/.local/lib/python3.10/site-packages/huggingface_hub/hf_api.py", line 2413, in repo_info return method( File "/home/padeoe/.local/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 118, in _inner_fn return fn(*args, **kwargs) File "/home/padeoe/.local/lib/python3.10/site-packages/huggingface_hub/hf_api.py", line 2286, in dataset_info hf_raise_for_status(r) File "/home/padeoe/.local/lib/python3.10/site-packages/huggingface_hub/utils/_errors.py", line 362, in hf_raise_for_status raise HfHubHTTPError(str(e), response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: 401 Client Error: Unauthorized for url: https://hf-mirror.com/api/datasets/bookcorpus/bookcorpus.py (Request ID: Root=1-65ee8659-5ab10eec5960c63e71f2bb58;b00bdbea-fd6e-4a74-8fe0-bc4682ae090e) ``` ### Expected behavior The dataset was downloaded correctly without any errors. ### Environment info datasets==2.18.0 huggingface-hub==0.21.4
{ "avatar_url": "https://avatars.githubusercontent.com/u/11801849?v=4", "events_url": "https://api.github.com/users/Wauplin/events{/privacy}", "followers_url": "https://api.github.com/users/Wauplin/followers", "following_url": "https://api.github.com/users/Wauplin/following{/other_user}", "gists_url": "https://api.github.com/users/Wauplin/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Wauplin", "id": 11801849, "login": "Wauplin", "node_id": "MDQ6VXNlcjExODAxODQ5", "organizations_url": "https://api.github.com/users/Wauplin/orgs", "received_events_url": "https://api.github.com/users/Wauplin/received_events", "repos_url": "https://api.github.com/users/Wauplin/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Wauplin/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Wauplin/subscriptions", "type": "User", "url": "https://api.github.com/users/Wauplin", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6728/reactions" }
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/pull/6727
6,727
Using a registry instead of calling globals for fetching feature types
{ "avatar_url": "https://avatars.githubusercontent.com/u/11325244?v=4", "events_url": "https://api.github.com/users/psmyth94/events{/privacy}", "followers_url": "https://api.github.com/users/psmyth94/followers", "following_url": "https://api.github.com/users/psmyth94/following{/other_user}", "gists_url": "https://api.github.com/users/psmyth94/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/psmyth94", "id": 11325244, "login": "psmyth94", "node_id": "MDQ6VXNlcjExMzI1MjQ0", "organizations_url": "https://api.github.com/users/psmyth94/orgs", "received_events_url": "https://api.github.com/users/psmyth94/received_events", "repos_url": "https://api.github.com/users/psmyth94/repos", "site_admin": false, "starred_url": "https://api.github.com/users/psmyth94/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/psmyth94/subscriptions", "type": "User", "url": "https://api.github.com/users/psmyth94", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6727). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "looks like some files are missing in your google storage", "cc @mariosasko is it related to https://github.com/huggingface/datasets/pull/6474 ? The files should ideally not move for backward compatibility anyway", "@lhoestq All the files are still there.\r\n\r\nThe problem is that the `natural_questions` is now a no-code dataset, so the test's paths are no longer correct (unless the revision is pinned to the previous version). \r\n\r\n@psmyth94 This has been fixed on `main`, so you can make the CI tests green with the following:\r\n```python\r\ngit remote add upstream https://github.com/huggingface/datasets.git\r\ngit pull upstream main\r\ngit push\r\n```", "Thank you @mariosasko ! I'm updating this branch if you don't mind @psmyth94 ", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.004903 / 0.011353 (-0.006450) | 0.003105 / 0.011008 (-0.007903) | 0.061980 / 0.038508 (0.023471) | 0.029726 / 0.023109 (0.006617) | 0.243406 / 0.275898 (-0.032492) | 0.262530 / 0.323480 (-0.060950) | 0.003905 / 0.007986 (-0.004081) | 0.002617 / 0.004328 (-0.001712) | 0.047851 / 0.004250 (0.043601) | 0.040397 / 0.037052 (0.003345) | 0.259461 / 0.258489 (0.000972) | 0.285059 / 0.293841 (-0.008782) | 0.027321 / 0.128546 (-0.101225) | 0.009876 / 0.075646 (-0.065770) | 0.206999 / 0.419271 (-0.212273) | 0.034906 / 0.043533 (-0.008626) | 0.245120 / 0.255139 (-0.010019) | 0.270490 / 0.283200 (-0.012710) | 0.017341 / 0.141683 (-0.124342) | 1.128182 / 1.452155 (-0.323973) | 1.173024 / 1.492716 (-0.319693) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.089337 / 0.018006 (0.071331) | 0.298256 / 0.000490 (0.297767) | 0.000216 / 0.000200 (0.000016) | 0.000047 / 0.000054 (-0.000007) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018179 / 0.037411 (-0.019233) | 0.061275 / 0.014526 (0.046749) | 0.073137 / 0.176557 (-0.103419) | 0.119603 / 0.737135 (-0.617532) | 0.073969 / 0.296338 (-0.222370) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.283109 / 0.215209 (0.067900) | 2.765441 / 2.077655 (0.687787) | 1.471276 / 1.504120 (-0.032844) | 1.346365 / 1.541195 (-0.194830) | 1.360668 / 1.468490 (-0.107822) | 0.549947 / 4.584777 (-4.034830) | 2.344213 / 3.745712 (-1.401499) | 2.700905 / 5.269862 (-2.568956) | 1.689936 / 4.565676 (-2.875741) | 0.061985 / 0.424275 (-0.362290) | 0.004923 / 0.007607 (-0.002684) | 0.329833 / 0.226044 (0.103788) | 3.277580 / 2.268929 (1.008652) | 1.833987 / 55.444624 (-53.610638) | 1.571023 / 6.876477 (-5.305454) | 1.573259 / 2.142072 (-0.568813) | 0.627504 / 4.805227 (-4.177723) | 0.114106 / 6.500664 (-6.386558) | 0.041197 / 0.075469 (-0.034272) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.967400 / 1.841788 (-0.874388) | 11.046527 / 8.074308 (2.972219) | 9.542214 / 10.191392 (-0.649178) | 0.140745 / 0.680424 (-0.539679) | 0.013627 / 0.534201 (-0.520574) | 0.288429 / 0.579283 (-0.290855) | 0.260509 / 0.434364 (-0.173855) | 0.324704 / 0.540337 (-0.215633) | 0.419366 / 1.386936 (-0.967570) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005123 / 0.011353 (-0.006230) | 0.003119 / 0.011008 (-0.007890) | 0.048931 / 0.038508 (0.010423) | 0.032067 / 0.023109 (0.008958) | 0.276825 / 0.275898 (0.000927) | 0.297589 / 0.323480 (-0.025890) | 0.004075 / 0.007986 (-0.003911) | 0.002579 / 0.004328 (-0.001750) | 0.047862 / 0.004250 (0.043612) | 0.044032 / 0.037052 (0.006980) | 0.289469 / 0.258489 (0.030980) | 0.327269 / 0.293841 (0.033428) | 0.029369 / 0.128546 (-0.099177) | 0.010180 / 0.075646 (-0.065466) | 0.057111 / 0.419271 (-0.362161) | 0.051046 / 0.043533 (0.007513) | 0.276758 / 0.255139 (0.021619) | 0.296084 / 0.283200 (0.012884) | 0.017376 / 0.141683 (-0.124306) | 1.154486 / 1.452155 (-0.297669) | 1.192699 / 1.492716 (-0.300018) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.085981 / 0.018006 (0.067974) | 0.296956 / 0.000490 (0.296466) | 0.000211 / 0.000200 (0.000011) | 0.000050 / 0.000054 (-0.000004) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021239 / 0.037411 (-0.016172) | 0.074851 / 0.014526 (0.060326) | 0.085676 / 0.176557 (-0.090881) | 0.125876 / 0.737135 (-0.611259) | 0.087573 / 0.296338 (-0.208765) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.289220 / 0.215209 (0.074011) | 2.812342 / 2.077655 (0.734688) | 1.572886 / 1.504120 (0.068766) | 1.446442 / 1.541195 (-0.094752) | 1.458737 / 1.468490 (-0.009753) | 0.562010 / 4.584777 (-4.022767) | 2.422896 / 3.745712 (-1.322816) | 2.578408 / 5.269862 (-2.691454) | 1.689998 / 4.565676 (-2.875678) | 0.064782 / 0.424275 (-0.359493) | 0.005051 / 0.007607 (-0.002556) | 0.339982 / 0.226044 (0.113938) | 3.309882 / 2.268929 (1.040953) | 1.910273 / 55.444624 (-53.534351) | 1.649723 / 6.876477 (-5.226753) | 1.744073 / 2.142072 (-0.397999) | 0.651905 / 4.805227 (-4.153323) | 0.114606 / 6.500664 (-6.386058) | 0.040030 / 0.075469 (-0.035439) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.008374 / 1.841788 (-0.833414) | 11.547300 / 8.074308 (3.472992) | 9.966061 / 10.191392 (-0.225331) | 0.144874 / 0.680424 (-0.535550) | 0.014400 / 0.534201 (-0.519801) | 0.285435 / 0.579283 (-0.293848) | 0.274755 / 0.434364 (-0.159609) | 0.323105 / 0.540337 (-0.217232) | 0.439172 / 1.386936 (-0.947764) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#4591ac120e9d6c082b2479d2005c04b9c36f539c \"CML watermark\")\n" ]
2024-03-10T17:47:51Z
2024-03-13T12:08:49Z
2024-03-13T10:46:02Z
CONTRIBUTOR
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6727.diff", "html_url": "https://github.com/huggingface/datasets/pull/6727", "merged_at": "2024-03-13T10:46:02Z", "patch_url": "https://github.com/huggingface/datasets/pull/6727.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6727" }
Hello, When working with bio-data, each feature often has metadata associated with it (e.g. species, lineage, snp position, etc). To store this, I like to use the feature classes with the added `metadata` attribute. However, when saving or loading with custom features, you get an error since that class doesn't exist in the global namespace in `datasets.features.features`. Take for example, ```python from dataclasses import dataclass, field from datasets import Dataset from datasets.features.features import Value, Features @dataclass class FeatureA(Value): metadata: dict = field(default=dict) _type: str = field(default="FeatureA", init=False, repr=False) @dataclass class FeatureB(Value): metadata: dict = field(default_factory=dict) _type: str = field(default="FeatureB", init=False, repr=False) test_data = { "a": [1, 2, 3], "b": [4, 5, 6], } test_data = Dataset.from_dict( test_data, features=Features({ "a": FeatureA("int32", metadata={"species": "lactobacillus acetotolerans"}), "b": FeatureB("int32", metadata={"species": "lactobacillus iners"}), }) ) # returns an error since FeatureA and FeatureB are not in the global namespace test_data.save_to_disk('./test_data') ``` Saving the dataset (0/1 shards): 0%| | 0/3 [00:00<?, ? examples/s] --------------------------------------------------------------------------- KeyError Traceback (most recent call last) Cell In[2], line 28 19 test_data = Dataset.from_dict( 20 test_data, 21 features=Features({ (...) 24 }) 25 ) 27 # returns an error since FeatureA and FeatureB are not in the global namespace ---> 28 test_data.save_to_disk('./test_data') ... File ~\Documents\datasets\src\datasets\features\features.py:1361, in generate_from_dict(obj) 1359 return {key: generate_from_dict(value) for key, value in obj.items()} 1360 obj = dict(obj) -> 1361 class_type = globals()[obj.pop("_type")] 1363 if class_type == Sequence: 1364 return Sequence(feature=generate_from_dict(obj["feature"]), length=obj.get("length", -1)) KeyError: 'FeatureA' We can avoid this by having a registry (like formatters) and doing ```python from datasets.features.features import register_feature register_feature(FeatureA, "FeatureA") register_feature(FeatureB, "FeatureB") test_data.save_to_disk('./test_data') ``` Saving the dataset (1/1 shards): 100%|------| 3/3 [00:00<00:00, 211.13 examples/s] and loading from disk returns with all metadata information ```python from datasets import load_from_disk test_data = load_from_disk('./test_data') test_data.features ``` {'a': FeatureA(dtype='int32', id=None, metadata={'species': 'lactobacillus acetotolerans'}), 'b': FeatureB(dtype='int32', id=None, metadata={'species': 'lactobacillus iners'})}
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6727/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/issues/6726
6,726
Profiling for HF Filesystem shows there are easy performance gains to be made
{ "avatar_url": "https://avatars.githubusercontent.com/u/159512661?v=4", "events_url": "https://api.github.com/users/awgr/events{/privacy}", "followers_url": "https://api.github.com/users/awgr/followers", "following_url": "https://api.github.com/users/awgr/following{/other_user}", "gists_url": "https://api.github.com/users/awgr/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/awgr", "id": 159512661, "login": "awgr", "node_id": "U_kgDOCYH4VQ", "organizations_url": "https://api.github.com/users/awgr/orgs", "received_events_url": "https://api.github.com/users/awgr/received_events", "repos_url": "https://api.github.com/users/awgr/repos", "site_admin": false, "starred_url": "https://api.github.com/users/awgr/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/awgr/subscriptions", "type": "User", "url": "https://api.github.com/users/awgr", "user_view_type": "public" }
[]
open
false
[ "FWIW I debugged this while waiting for it to go", "Oh I forgot to mention you can also cache resolve_pattern, and that seemed to also substantially improves things, if you want to load a dataset twice for whatever reason." ]
2024-03-09T07:08:45Z
2024-03-09T07:11:08Z
null
NONE
null
null
### Describe the bug # Let's make it faster First, an evidence... ![image](https://github.com/huggingface/datasets/assets/159512661/a703a82c-43a0-426c-9d99-24c563d70965) Figure 1: CProfile for loading 3 files from cerebras/SlimPajama-627B train split, and 3 files from test split using streaming=True. X axis is 1106 seconds long. See? It's pretty slow. What is resolve pattern doing? ``` resolve_pattern called with **/train/** and hf://datasets/cerebras/SlimPajama-627B@2d0accdd58c5d5511943ca1f5ff0e3eb5e293543 resolve_pattern took 20.815081119537354 seconds ``` Makes sense. How to improve it? ## Bigger project, biggest payoff Databricks (and consequently, spark) store a compressed manifest file of the files contained in the remote filesystem. Then, you download one tiny file, decompress it, and all the operations are local instead of this shenanigans. It seems pretty straightforward to make dataset uploads compute a manifest and upload it alongside their data. This would make resolution time so fast that nobody would ever think about it again. It also means you either need to have the uploader compute it _every time_, or have a hook that computes it. ## Smaller project, immediate payoff: Be diligent in avoiding deepcopy Revise the _ls_tree method to avoid deepcopy: ``` def _ls_tree( self, path: str, recursive: bool = False, refresh: bool = False, revision: Optional[str] = None, expand_info: bool = True, ): ..... omitted ..... for path_info in tree: if isinstance(path_info, RepoFile): cache_path_info = { "name": root_path + "/" + path_info.path, "size": path_info.size, "type": "file", "blob_id": path_info.blob_id, "lfs": path_info.lfs, "last_commit": path_info.last_commit, "security": path_info.security, } else: cache_path_info = { "name": root_path + "/" + path_info.path, "size": 0, "type": "directory", "tree_id": path_info.tree_id, "last_commit": path_info.last_commit, } parent_path = self._parent(cache_path_info["name"]) self.dircache.setdefault(parent_path, []).append(cache_path_info) out.append(cache_path_info) return copy.deepcopy(out) # copy to not let users modify the dircache ``` Observe this deepcopy at the end. It is making a copy of a very simple data structure. We do not need to copy. We can simply generate the data structure twice instead. It will be much faster. ``` def _ls_tree( self, path: str, recursive: bool = False, refresh: bool = False, revision: Optional[str] = None, expand_info: bool = True, ): ..... omitted ..... def make_cache_path_info(path_info): if isinstance(path_info, RepoFile): return { "name": root_path + "/" + path_info.path, "size": path_info.size, "type": "file", "blob_id": path_info.blob_id, "lfs": path_info.lfs, "last_commit": path_info.last_commit, "security": path_info.security, } else: return { "name": root_path + "/" + path_info.path, "size": 0, "type": "directory", "tree_id": path_info.tree_id, "last_commit": path_info.last_commit, } for path_info in tree: cache_path_info = make_cache_path_info(path_info) out_cache_path_info = make_cache_path_info(path_info) # copy to not let users modify the dircache parent_path = self._parent(cache_path_info["name"]) self.dircache.setdefault(parent_path, []).append(cache_path_info) out.append(out_cache_path_info) return out ``` Note there is no longer a deepcopy in this method. We have replaced it with generating the output twice. This is substantially faster. For me, the entire resolution went from 1100s to 360s. ## Medium project, medium payoff After the above change, we have this profile: ![image](https://github.com/huggingface/datasets/assets/159512661/db7b83da-2dfc-4c2e-abab-0ede9477876c) Figure 2: x-axis is 355 seconds. Note that globbing and _ls_tree deep copy is gone. No surprise there. It's much faster now, but we still spend ~187seconds in get_fs_token_paths. Well get_fs_token_paths is part of fsspec. We don't need to fix that because we can trust their developers to write high performance code. Probably the caller has misconfigured something. Let's take a look at the storage_options being provided to the filesystem that is constructed during this call. Ah yes, streaming_download_manager::_prepare_single_hop_path_and_storage_options. We know streaming download manager is not compatible with async right now, but we really need this specific part of the code to be async. We're spending so much time checking isDir on the remote filesystem, it's a huge waste. We can make the call easily 20-30x faster by using async, removing this performance bottleneck almost entirely (and reducing the total time of this part of the code to <30s. There is no reason to block async isDir calls for streaming. I'm not going to mess w/ this one myself; I didn't write the streaming impl, and I don't know how it works, but I know the isDir check can be async. ### Steps to reproduce the bug ``` with cProfile.Profile() as pr: pr.enable() # Begin Data if not os.path.exists(data_cache_dir): os.makedirs(data_cache_dir, exist_ok=True) training_dataset = load_dataset(training_dataset_name, split=training_split, cache_dir=data_cache_dir, streaming=True).take(training_slice) eval_dataset = load_dataset(eval_dataset_name, split=eval_split, cache_dir=data_cache_dir, streaming=True).take(eval_slice) # End Data pr.disable() pr.create_stats() if not os.path.exists(profiling_path): os.makedirs(profiling_path, exist_ok=True) pr.dump_stats(os.path.join(profiling_path, "cprofile.prof")) ``` run this code for "cerebras/SlimPajama-627B" and whatever other params ### Expected behavior Something better. ### Environment info - `datasets` version: 2.18.0 - Platform: Linux-5.15.146.1-microsoft-standard-WSL2-x86_64-with-glibc2.35 - Python version: 3.10.13 - `huggingface_hub` version: 0.21.3 - PyArrow version: 15.0.0 - Pandas version: 2.2.1 - `fsspec` version: 2024.2.0
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 1, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/6726/reactions" }
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/issues/6725
6,725
Request for a comparison of huggingface datasets compared with other data format especially webdataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/20135317?v=4", "events_url": "https://api.github.com/users/Luciennnnnnn/events{/privacy}", "followers_url": "https://api.github.com/users/Luciennnnnnn/followers", "following_url": "https://api.github.com/users/Luciennnnnnn/following{/other_user}", "gists_url": "https://api.github.com/users/Luciennnnnnn/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Luciennnnnnn", "id": 20135317, "login": "Luciennnnnnn", "node_id": "MDQ6VXNlcjIwMTM1MzE3", "organizations_url": "https://api.github.com/users/Luciennnnnnn/orgs", "received_events_url": "https://api.github.com/users/Luciennnnnnn/received_events", "repos_url": "https://api.github.com/users/Luciennnnnnn/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Luciennnnnnn/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Luciennnnnnn/subscriptions", "type": "User", "url": "https://api.github.com/users/Luciennnnnnn", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
[]
2024-03-08T08:23:01Z
2024-03-08T08:23:01Z
null
NONE
null
null
### Feature request Request for a comparison of huggingface datasets compared with other data format especially webdataset ### Motivation I see huggingface datasets uses Apache Arrow as its backend, it seems to be great, but I'm curious about how it is good compared with other dataset format, like webdataset, what's the pros/cons of them. ### Your contribution More information
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6725/reactions" }
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/issues/6724
6,724
Dataset with loading script does not work in renamed repos
{ "avatar_url": "https://avatars.githubusercontent.com/u/2779410?v=4", "events_url": "https://api.github.com/users/BramVanroy/events{/privacy}", "followers_url": "https://api.github.com/users/BramVanroy/followers", "following_url": "https://api.github.com/users/BramVanroy/following{/other_user}", "gists_url": "https://api.github.com/users/BramVanroy/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/BramVanroy", "id": 2779410, "login": "BramVanroy", "node_id": "MDQ6VXNlcjI3Nzk0MTA=", "organizations_url": "https://api.github.com/users/BramVanroy/orgs", "received_events_url": "https://api.github.com/users/BramVanroy/received_events", "repos_url": "https://api.github.com/users/BramVanroy/repos", "site_admin": false, "starred_url": "https://api.github.com/users/BramVanroy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BramVanroy/subscriptions", "type": "User", "url": "https://api.github.com/users/BramVanroy", "user_view_type": "public" }
[]
open
false
[]
2024-03-07T17:38:38Z
2024-03-07T20:06:25Z
null
CONTRIBUTOR
null
null
### Describe the bug My data repository was first called `BramVanroy/hplt-mono-v1-2` but I then renamed to use underscores instead of dashes. However, it seems that `datasets` retrieves the old repo name when it checks whether the repo contains data loading scripts in this line. https://github.com/huggingface/datasets/blob/6fb6c834f008996c994b0a86c3808d0a33d44525/src/datasets/load.py#L1845 When I print `filename` it returns `hplt-mono-v1-2.py` but the files in the repo are of course `['.gitattributes', 'README.md', 'hplt_mono_v1_2.py']`. So the `filename` is the original reponame instead of the renamed one. I am not sure if this is a caching issue or not or how I can resolve it. ### Steps to reproduce the bug ``` from datasets import load_dataset ds = load_dataset( "BramVanroy/hplt-mono-v1-2", "ky", trust_remote_code=True ) ``` ### Expected behavior That the most recent repo name is used when `filename` is generated. ### Environment info - `datasets` version: 2.16.1 - Platform: Linux-5.14.0-284.25.1.el9_2.x86_64-x86_64-with-glibc2.34 - Python version: 3.10.13 - `huggingface_hub` version: 0.20.2 - PyArrow version: 14.0.1 - Pandas version: 2.1.3 - `fsspec` version: 2023.10.0
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6724/reactions" }
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/pull/6723
6,723
get_dataset_default_config_name docstring
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6723). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005658 / 0.011353 (-0.005694) | 0.003883 / 0.011008 (-0.007125) | 0.064007 / 0.038508 (0.025499) | 0.030370 / 0.023109 (0.007261) | 0.246677 / 0.275898 (-0.029221) | 0.270846 / 0.323480 (-0.052634) | 0.003102 / 0.007986 (-0.004884) | 0.002931 / 0.004328 (-0.001397) | 0.049446 / 0.004250 (0.045196) | 0.043555 / 0.037052 (0.006503) | 0.261810 / 0.258489 (0.003321) | 0.289705 / 0.293841 (-0.004136) | 0.028676 / 0.128546 (-0.099870) | 0.010778 / 0.075646 (-0.064868) | 0.210604 / 0.419271 (-0.208667) | 0.035987 / 0.043533 (-0.007546) | 0.248034 / 0.255139 (-0.007105) | 0.265019 / 0.283200 (-0.018181) | 0.018522 / 0.141683 (-0.123161) | 1.096364 / 1.452155 (-0.355791) | 1.152750 / 1.492716 (-0.339966) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.093987 / 0.018006 (0.075981) | 0.306143 / 0.000490 (0.305653) | 0.000218 / 0.000200 (0.000018) | 0.000045 / 0.000054 (-0.000009) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018727 / 0.037411 (-0.018685) | 0.061983 / 0.014526 (0.047457) | 0.074254 / 0.176557 (-0.102303) | 0.121256 / 0.737135 (-0.615880) | 0.076756 / 0.296338 (-0.219582) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.278824 / 0.215209 (0.063615) | 2.815960 / 2.077655 (0.738305) | 1.472946 / 1.504120 (-0.031174) | 1.349722 / 1.541195 (-0.191473) | 1.327844 / 1.468490 (-0.140646) | 0.574964 / 4.584777 (-4.009813) | 2.403458 / 3.745712 (-1.342254) | 2.769293 / 5.269862 (-2.500569) | 1.736970 / 4.565676 (-2.828706) | 0.063144 / 0.424275 (-0.361131) | 0.004983 / 0.007607 (-0.002625) | 0.331212 / 0.226044 (0.105168) | 3.231496 / 2.268929 (0.962567) | 1.798487 / 55.444624 (-53.646138) | 1.523010 / 6.876477 (-5.353467) | 1.559973 / 2.142072 (-0.582099) | 0.657036 / 4.805227 (-4.148191) | 0.119084 / 6.500664 (-6.381580) | 0.042982 / 0.075469 (-0.032487) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.976433 / 1.841788 (-0.865355) | 11.475946 / 8.074308 (3.401638) | 9.339369 / 10.191392 (-0.852023) | 0.141761 / 0.680424 (-0.538662) | 0.014506 / 0.534201 (-0.519695) | 0.289944 / 0.579283 (-0.289340) | 0.273667 / 0.434364 (-0.160697) | 0.326682 / 0.540337 (-0.213655) | 0.458946 / 1.386936 (-0.927990) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005194 / 0.011353 (-0.006159) | 0.003713 / 0.011008 (-0.007295) | 0.049297 / 0.038508 (0.010789) | 0.029723 / 0.023109 (0.006614) | 0.278664 / 0.275898 (0.002766) | 0.296387 / 0.323480 (-0.027093) | 0.004215 / 0.007986 (-0.003771) | 0.002680 / 0.004328 (-0.001648) | 0.048276 / 0.004250 (0.044025) | 0.044454 / 0.037052 (0.007402) | 0.290510 / 0.258489 (0.032021) | 0.319028 / 0.293841 (0.025187) | 0.029177 / 0.128546 (-0.099369) | 0.010361 / 0.075646 (-0.065285) | 0.056993 / 0.419271 (-0.362279) | 0.050765 / 0.043533 (0.007232) | 0.278234 / 0.255139 (0.023095) | 0.295848 / 0.283200 (0.012649) | 0.018776 / 0.141683 (-0.122906) | 1.134866 / 1.452155 (-0.317288) | 1.204083 / 1.492716 (-0.288634) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.094397 / 0.018006 (0.076391) | 0.304693 / 0.000490 (0.304203) | 0.000207 / 0.000200 (0.000007) | 0.000044 / 0.000054 (-0.000010) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021322 / 0.037411 (-0.016090) | 0.075384 / 0.014526 (0.060859) | 0.086961 / 0.176557 (-0.089596) | 0.124424 / 0.737135 (-0.612711) | 0.087802 / 0.296338 (-0.208536) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.305542 / 0.215209 (0.090333) | 2.980678 / 2.077655 (0.903023) | 1.632348 / 1.504120 (0.128228) | 1.501466 / 1.541195 (-0.039728) | 1.517681 / 1.468490 (0.049191) | 0.579318 / 4.584777 (-4.005459) | 2.460734 / 3.745712 (-1.284978) | 2.650164 / 5.269862 (-2.619697) | 1.752061 / 4.565676 (-2.813615) | 0.064561 / 0.424275 (-0.359714) | 0.005097 / 0.007607 (-0.002510) | 0.359613 / 0.226044 (0.133569) | 3.518549 / 2.268929 (1.249620) | 1.962575 / 55.444624 (-53.482050) | 1.686108 / 6.876477 (-5.190369) | 1.787873 / 2.142072 (-0.354199) | 0.653715 / 4.805227 (-4.151512) | 0.117617 / 6.500664 (-6.383048) | 0.040359 / 0.075469 (-0.035110) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.021533 / 1.841788 (-0.820255) | 11.974817 / 8.074308 (3.900509) | 10.073530 / 10.191392 (-0.117862) | 0.141477 / 0.680424 (-0.538947) | 0.015081 / 0.534201 (-0.519120) | 0.292622 / 0.579283 (-0.286661) | 0.291043 / 0.434364 (-0.143321) | 0.347822 / 0.540337 (-0.192516) | 0.443647 / 1.386936 (-0.943289) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#6fb6c834f008996c994b0a86c3808d0a33d44525 \"CML watermark\")\n" ]
2024-03-07T17:09:29Z
2024-03-07T17:27:29Z
2024-03-07T17:21:20Z
MEMBER
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6723.diff", "html_url": "https://github.com/huggingface/datasets/pull/6723", "merged_at": "2024-03-07T17:21:20Z", "patch_url": "https://github.com/huggingface/datasets/pull/6723.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6723" }
fix https://github.com/huggingface/datasets/pull/6722
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6723/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/pull/6722
6,722
Add details in docstring
{ "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "events_url": "https://api.github.com/users/severo/events{/privacy}", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/severo", "id": 1676121, "login": "severo", "node_id": "MDQ6VXNlcjE2NzYxMjE=", "organizations_url": "https://api.github.com/users/severo/orgs", "received_events_url": "https://api.github.com/users/severo/received_events", "repos_url": "https://api.github.com/users/severo/repos", "site_admin": false, "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "type": "User", "url": "https://api.github.com/users/severo", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6722). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update." ]
2024-03-07T17:02:07Z
2024-03-07T17:21:10Z
2024-03-07T17:21:08Z
COLLABORATOR
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6722.diff", "html_url": "https://github.com/huggingface/datasets/pull/6722", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/6722.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6722" }
see https://github.com/huggingface/datasets-server/pull/2554#discussion_r1516516867
{ "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "events_url": "https://api.github.com/users/severo/events{/privacy}", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/severo", "id": 1676121, "login": "severo", "node_id": "MDQ6VXNlcjE2NzYxMjE=", "organizations_url": "https://api.github.com/users/severo/orgs", "received_events_url": "https://api.github.com/users/severo/received_events", "repos_url": "https://api.github.com/users/severo/repos", "site_admin": false, "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "type": "User", "url": "https://api.github.com/users/severo", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6722/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/issues/6721
6,721
Hi,do you know how to load the dataset from local file now?
{ "avatar_url": "https://avatars.githubusercontent.com/u/50232044?v=4", "events_url": "https://api.github.com/users/Gera001/events{/privacy}", "followers_url": "https://api.github.com/users/Gera001/followers", "following_url": "https://api.github.com/users/Gera001/following{/other_user}", "gists_url": "https://api.github.com/users/Gera001/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Gera001", "id": 50232044, "login": "Gera001", "node_id": "MDQ6VXNlcjUwMjMyMDQ0", "organizations_url": "https://api.github.com/users/Gera001/orgs", "received_events_url": "https://api.github.com/users/Gera001/received_events", "repos_url": "https://api.github.com/users/Gera001/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Gera001/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Gera001/subscriptions", "type": "User", "url": "https://api.github.com/users/Gera001", "user_view_type": "public" }
[]
open
false
[ "\r\n@Gera001\r\n# Loading Dataset from Local Files Using πŸ€—Hugging Face.\r\n\r\nTo load a dataset from local files using the Hugging Face datasets library, you can use the `load_dataset` function.\r\n\r\n```\r\nfrom datasets import load_dataset\r\ndataset = load_dataset('csv', data_files={'train': 'path/to/train.csv',\r\n 'test': 'path/to/test.csv'})\r\n```\r\n\r\nReference to [HF Datasets docs for loading from local](https://huggingface.co/docs/datasets/en/loading#csv). \r\n\r\n@albertvillanova\r\nthis issue can be closed here.", "like this: from datasets import load_from_disk\r\ndataset = load_from_disk(data_path)\r\n", "@ge00009 \r\n> like this: from datasets import load_from_disk dataset = load_from_disk(data_path)\r\n\r\nLoads a dataset that was previously saved using `save_to_disk()`.\r\n\r\nReference link:\r\nhttps://huggingface.co/docs/datasets/en/package_reference/loading_methods#datasets.load_from_disk.example" ]
2024-03-07T13:58:40Z
2024-03-31T08:09:25Z
null
NONE
null
null
Hi, if I want to load the dataset from local file, then how to specify the configuration name? _Originally posted by @WHU-gentle in https://github.com/huggingface/datasets/issues/2976#issuecomment-1333455222_
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6721/reactions" }
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/issues/6720
6,720
TypeError: 'str' object is not callable
{ "avatar_url": "https://avatars.githubusercontent.com/u/2779410?v=4", "events_url": "https://api.github.com/users/BramVanroy/events{/privacy}", "followers_url": "https://api.github.com/users/BramVanroy/followers", "following_url": "https://api.github.com/users/BramVanroy/following{/other_user}", "gists_url": "https://api.github.com/users/BramVanroy/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/BramVanroy", "id": 2779410, "login": "BramVanroy", "node_id": "MDQ6VXNlcjI3Nzk0MTA=", "organizations_url": "https://api.github.com/users/BramVanroy/orgs", "received_events_url": "https://api.github.com/users/BramVanroy/received_events", "repos_url": "https://api.github.com/users/BramVanroy/repos", "site_admin": false, "starred_url": "https://api.github.com/users/BramVanroy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BramVanroy/subscriptions", "type": "User", "url": "https://api.github.com/users/BramVanroy", "user_view_type": "public" }
[]
closed
false
[ "Hi ! I opened a PR to fix an issue in the Features defined in your code\r\n\r\nBasically changing\r\n```python\r\nSequence(\"float32\")\r\n```\r\n\r\nto\r\n```python\r\nSequence(Value(\"float32\"))\r\n```\r\n\r\n\r\nhttps://huggingface.co/datasets/BramVanroy/hplt_mono_v1_2/discussions/1", "D'oh! Was wondering why the `str() is not callable` was in there. Glad the error is my end though, and not related to zstandard (which I had not used in the past).\r\n\r\nThanks a lot!" ]
2024-03-07T11:07:09Z
2024-03-08T07:34:53Z
2024-03-07T15:13:58Z
CONTRIBUTOR
null
null
### Describe the bug I am trying to get the HPLT datasets on the hub. Downloading/re-uploading would be too time- and resource consuming so I wrote [a dataset loader script](https://huggingface.co/datasets/BramVanroy/hplt_mono_v1_2/blob/main/hplt_mono_v1_2.py). I think I am very close but for some reason I always get the error below. It happens during the clean-up phase where the directory cannot be removed because it is not empty. My only guess would be that this may have to do with zstandard ``` Traceback (most recent call last): File "/home/local/vanroy/dutch-instruction-datasets/.venv/lib/python3.10/site-packages/datasets/builder.py", line 1744, in _prepare_split_single writer.write(example, key) File "/home/local/vanroy/dutch-instruction-datasets/.venv/lib/python3.10/site-packages/datasets/arrow_writer.py", line 492, in write self.write_examples_on_file() File "/home/local/vanroy/dutch-instruction-datasets/.venv/lib/python3.10/site-packages/datasets/arrow_writer.py", line 434, in write_examples_on_file if self.schema File "/home/local/vanroy/dutch-instruction-datasets/.venv/lib/python3.10/site-packages/datasets/arrow_writer.py", line 409, in schema else (pa.schema(self._features.type) if self._features is not None else None) File "/home/local/vanroy/dutch-instruction-datasets/.venv/lib/python3.10/site-packages/datasets/features/features.py", line 1643, in type return get_nested_type(self) File "/home/local/vanroy/dutch-instruction-datasets/.venv/lib/python3.10/site-packages/datasets/features/features.py", line 1209, in get_nested_type {key: get_nested_type(schema[key]) for key in schema} File "/home/local/vanroy/dutch-instruction-datasets/.venv/lib/python3.10/site-packages/datasets/features/features.py", line 1209, in <dictcomp> {key: get_nested_type(schema[key]) for key in schema} File "/home/local/vanroy/dutch-instruction-datasets/.venv/lib/python3.10/site-packages/datasets/features/features.py", line 1221, in get_nested_type value_type = get_nested_type(schema.feature) File "/home/local/vanroy/dutch-instruction-datasets/.venv/lib/python3.10/site-packages/datasets/features/features.py", line 1228, in get_nested_type return schema() TypeError: 'str' object is not callable During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/local/vanroy/dutch-instruction-datasets/.venv/lib/python3.10/site-packages/datasets/builder.py", line 1753, in _prepare_split_single num_examples, num_bytes = writer.finalize() File "/home/local/vanroy/dutch-instruction-datasets/.venv/lib/python3.10/site-packages/datasets/arrow_writer.py", line 588, in finalize self.write_examples_on_file() File "/home/local/vanroy/dutch-instruction-datasets/.venv/lib/python3.10/site-packages/datasets/arrow_writer.py", line 434, in write_examples_on_file if self.schema File "/home/local/vanroy/dutch-instruction-datasets/.venv/lib/python3.10/site-packages/datasets/arrow_writer.py", line 409, in schema else (pa.schema(self._features.type) if self._features is not None else None) File "/home/local/vanroy/dutch-instruction-datasets/.venv/lib/python3.10/site-packages/datasets/features/features.py", line 1643, in type return get_nested_type(self) File "/home/local/vanroy/dutch-instruction-datasets/.venv/lib/python3.10/site-packages/datasets/features/features.py", line 1209, in get_nested_type {key: get_nested_type(schema[key]) for key in schema} File "/home/local/vanroy/dutch-instruction-datasets/.venv/lib/python3.10/site-packages/datasets/features/features.py", line 1209, in <dictcomp> {key: get_nested_type(schema[key]) for key in schema} File "/home/local/vanroy/dutch-instruction-datasets/.venv/lib/python3.10/site-packages/datasets/features/features.py", line 1221, in get_nested_type value_type = get_nested_type(schema.feature) File "/home/local/vanroy/dutch-instruction-datasets/.venv/lib/python3.10/site-packages/datasets/features/features.py", line 1228, in get_nested_type return schema() TypeError: 'str' object is not callable The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/home/local/vanroy/dutch-instruction-datasets/.venv/lib/python3.10/site-packages/datasets/builder.py", line 959, in incomplete_dir yield tmp_dir File "/home/local/vanroy/dutch-instruction-datasets/.venv/lib/python3.10/site-packages/datasets/builder.py", line 1005, in download_and_prepare self._download_and_prepare( File "/home/local/vanroy/dutch-instruction-datasets/.venv/lib/python3.10/site-packages/datasets/builder.py", line 1767, in _download_and_prepare super()._download_and_prepare( File "/home/local/vanroy/dutch-instruction-datasets/.venv/lib/python3.10/site-packages/datasets/builder.py", line 1100, in _download_and_prepare self._prepare_split(split_generator, **prepare_split_kwargs) File "/home/local/vanroy/dutch-instruction-datasets/.venv/lib/python3.10/site-packages/datasets/builder.py", line 1605, in _prepare_split for job_id, done, content in self._prepare_split_single( File "/home/local/vanroy/dutch-instruction-datasets/.venv/lib/python3.10/site-packages/datasets/builder.py", line 1762, in _prepare_split_single raise DatasetGenerationError("An error occurred while generating the dataset") from e datasets.exceptions.DatasetGenerationError: An error occurred while generating the dataset During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/pricie/vanroy/.config/JetBrains/PyCharm2023.3/scratches/scratch_5.py", line 4, in <module> ds = load_dataset( File "/home/local/vanroy/dutch-instruction-datasets/.venv/lib/python3.10/site-packages/datasets/load.py", line 2549, in load_dataset builder_instance.download_and_prepare( File "/home/local/vanroy/dutch-instruction-datasets/.venv/lib/python3.10/site-packages/datasets/builder.py", line 985, in download_and_prepare with incomplete_dir(self._output_dir) as tmp_output_dir: File "/home/pricie/vanroy/.pyenv/versions/3.10.13/lib/python3.10/contextlib.py", line 153, in __exit__ self.gen.throw(typ, value, traceback) File "/home/local/vanroy/dutch-instruction-datasets/.venv/lib/python3.10/site-packages/datasets/builder.py", line 966, in incomplete_dir shutil.rmtree(tmp_dir) File "/home/pricie/vanroy/.pyenv/versions/3.10.13/lib/python3.10/shutil.py", line 731, in rmtree onerror(os.rmdir, path, sys.exc_info()) File "/home/pricie/vanroy/.pyenv/versions/3.10.13/lib/python3.10/shutil.py", line 729, in rmtree os.rmdir(path) OSError: [Errno 39] Directory not empty: '/home/pricie/vanroy/.cache/huggingface/datasets/BramVanroy___hplt_mono_v1_2/ky/1.2.0/7ab138629fe7e9e29fe93ce63d809d5ef9d963273b829f61ab538e012dc9cc47.incomplete' ``` Interestingly, though, this directory _does_ appear to be empty: ```shell > cd /home/pricie/vanroy/.cache/huggingface/datasets/BramVanroy___hplt_mono_v1_2/ky/1.2.0/7ab138629fe7e9e29fe93ce63d809d5ef9d963273b829f61ab538e012dc9cc47.incomplete > ls -lah total 0 drwxr-xr-x. 1 vanroy vanroy 0 Mar 7 12:01 . drwxr-xr-x. 1 vanroy vanroy 304 Mar 7 11:52 .. > cd .. > ls 7ab138629fe7e9e29fe93ce63d809d5ef9d963273b829f61ab538e012dc9cc47_builder.lock 7ab138629fe7e9e29fe93ce63d809d5ef9d963273b829f61ab538e012dc9cc47.incomplete ``` ### Steps to reproduce the bug ```python from datasets import load_dataset ds = load_dataset( "BramVanroy/hplt_mono_v1_2", "ky", trust_remote_code=True ) ``` ### Expected behavior No error. ### Environment info - `datasets` version: 2.16.1 - Platform: Linux-5.14.0-284.25.1.el9_2.x86_64-x86_64-with-glibc2.34 - Python version: 3.10.13 - `huggingface_hub` version: 0.20.2 - PyArrow version: 14.0.1 - Pandas version: 2.1.3 - `fsspec` version: 2023.10.0
{ "avatar_url": "https://avatars.githubusercontent.com/u/2779410?v=4", "events_url": "https://api.github.com/users/BramVanroy/events{/privacy}", "followers_url": "https://api.github.com/users/BramVanroy/followers", "following_url": "https://api.github.com/users/BramVanroy/following{/other_user}", "gists_url": "https://api.github.com/users/BramVanroy/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/BramVanroy", "id": 2779410, "login": "BramVanroy", "node_id": "MDQ6VXNlcjI3Nzk0MTA=", "organizations_url": "https://api.github.com/users/BramVanroy/orgs", "received_events_url": "https://api.github.com/users/BramVanroy/received_events", "repos_url": "https://api.github.com/users/BramVanroy/repos", "site_admin": false, "starred_url": "https://api.github.com/users/BramVanroy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BramVanroy/subscriptions", "type": "User", "url": "https://api.github.com/users/BramVanroy", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6720/reactions" }
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/issues/6719
6,719
Is there any way to solve hanging of IterableDataset using split by node + filtering during inference
{ "avatar_url": "https://avatars.githubusercontent.com/u/8136905?v=4", "events_url": "https://api.github.com/users/ssharpe42/events{/privacy}", "followers_url": "https://api.github.com/users/ssharpe42/followers", "following_url": "https://api.github.com/users/ssharpe42/following{/other_user}", "gists_url": "https://api.github.com/users/ssharpe42/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ssharpe42", "id": 8136905, "login": "ssharpe42", "node_id": "MDQ6VXNlcjgxMzY5MDU=", "organizations_url": "https://api.github.com/users/ssharpe42/orgs", "received_events_url": "https://api.github.com/users/ssharpe42/received_events", "repos_url": "https://api.github.com/users/ssharpe42/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ssharpe42/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ssharpe42/subscriptions", "type": "User", "url": "https://api.github.com/users/ssharpe42", "user_view_type": "public" }
[]
open
false
[]
2024-03-05T15:55:13Z
2024-03-05T15:55:13Z
null
NONE
null
null
### Describe the bug I am using an iterable dataset in a multi-node setup, trying to do training/inference while filtering the data on the fly. I usually do not use `split_dataset_by_node` but it is very slow using the IterableDatasetShard in `accelerate` and `transformers`. When I filter after applying `split_dataset_by_node`, it results in shards that are not equal sizes due to unequal samples filtered from each one. The distributed process hangs when trying to accomplish this. Is there any way to resolve this or is it impossible to implement? ### Steps to reproduce the bug Here is a toy example of what I am trying to do that reproduces the behavior ``` # torchrun --nproc-per-node 2 file.py import os import pandas as pd import torch from accelerate import Accelerator from datasets import Features, Value, load_dataset from datasets.distributed import split_dataset_by_node from torch.utils.data import DataLoader accelerator = Accelerator(device_placement=True, dispatch_batches=False) if accelerator.is_main_process: if not os.path.exists("scratch_data"): os.mkdir("scratch_data") n_shards = 4 for i in range(n_shards): df = pd.DataFrame({"id": list(range(10 * i, 10 * (i + 1)))}) df.to_parquet(f"scratch_data/shard_{i}.parquet") world_size = accelerator.num_processes local_rank = accelerator.process_index def collate_fn(examples): input_ids = [] for example in examples: input_ids.append(example["id"]) return torch.LongTensor(input_ids) dataset = load_dataset( "parquet", data_dir="scratch_data", split="train", streaming=True ) dataset = ( split_dataset_by_node(dataset, rank=local_rank, world_size=world_size) .filter(lambda x: x["id"] < 35) .shuffle(seed=42, buffer_size=100) ) batch_size = 2 train_dataloader = DataLoader( dataset, batch_size=batch_size, collate_fn=collate_fn, num_workers=2 ) for x in train_dataloader: x = x.to(accelerator.device) print({"rank": local_rank, "id": x}) y = accelerator.gather_for_metrics(x) if accelerator.is_main_process: print("gathered", y) ``` ### Expected behavior Is there any way to continue training/inference on the GPUs that have remaining data left without waiting for the others? Is it impossible to filter when ### Environment info - `datasets` version: 2.18.0 - Platform: Linux-5.10.209-198.812.amzn2.x86_64-x86_64-with-glibc2.31 - Python version: 3.10.13 - `huggingface_hub` version: 0.21.3 - PyArrow version: 15.0.0 - Pandas version: 2.2.1 - `fsspec` version: 2023.6.0
null
{ "+1": 4, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 4, "url": "https://api.github.com/repos/huggingface/datasets/issues/6719/reactions" }
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/pull/6718
6,718
Fix concurrent script loading with force_redownload
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6718). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005074 / 0.011353 (-0.006279) | 0.003505 / 0.011008 (-0.007503) | 0.063683 / 0.038508 (0.025175) | 0.029308 / 0.023109 (0.006199) | 0.246648 / 0.275898 (-0.029250) | 0.265546 / 0.323480 (-0.057933) | 0.004108 / 0.007986 (-0.003878) | 0.002683 / 0.004328 (-0.001646) | 0.048634 / 0.004250 (0.044383) | 0.043786 / 0.037052 (0.006733) | 0.262197 / 0.258489 (0.003708) | 0.291582 / 0.293841 (-0.002259) | 0.027472 / 0.128546 (-0.101074) | 0.010213 / 0.075646 (-0.065434) | 0.206744 / 0.419271 (-0.212527) | 0.036195 / 0.043533 (-0.007337) | 0.249090 / 0.255139 (-0.006049) | 0.280002 / 0.283200 (-0.003198) | 0.018568 / 0.141683 (-0.123115) | 1.124844 / 1.452155 (-0.327311) | 1.159358 / 1.492716 (-0.333359) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.093186 / 0.018006 (0.075180) | 0.302331 / 0.000490 (0.301842) | 0.000217 / 0.000200 (0.000017) | 0.000046 / 0.000054 (-0.000008) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018727 / 0.037411 (-0.018684) | 0.061730 / 0.014526 (0.047204) | 0.074330 / 0.176557 (-0.102226) | 0.119769 / 0.737135 (-0.617366) | 0.075611 / 0.296338 (-0.220727) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.285063 / 0.215209 (0.069854) | 2.824809 / 2.077655 (0.747155) | 1.481858 / 1.504120 (-0.022262) | 1.350193 / 1.541195 (-0.191002) | 1.358012 / 1.468490 (-0.110478) | 0.557842 / 4.584777 (-4.026935) | 2.380729 / 3.745712 (-1.364983) | 2.798891 / 5.269862 (-2.470970) | 1.719288 / 4.565676 (-2.846388) | 0.061705 / 0.424275 (-0.362570) | 0.005431 / 0.007607 (-0.002176) | 0.343233 / 0.226044 (0.117189) | 3.375223 / 2.268929 (1.106295) | 1.838188 / 55.444624 (-53.606436) | 1.570015 / 6.876477 (-5.306461) | 1.573157 / 2.142072 (-0.568915) | 0.650678 / 4.805227 (-4.154549) | 0.116412 / 6.500664 (-6.384252) | 0.041754 / 0.075469 (-0.033715) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.970431 / 1.841788 (-0.871357) | 11.317128 / 8.074308 (3.242819) | 9.691240 / 10.191392 (-0.500152) | 0.142260 / 0.680424 (-0.538164) | 0.014131 / 0.534201 (-0.520070) | 0.289910 / 0.579283 (-0.289373) | 0.265648 / 0.434364 (-0.168715) | 0.323130 / 0.540337 (-0.217208) | 0.447005 / 1.386936 (-0.939931) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005322 / 0.011353 (-0.006031) | 0.003755 / 0.011008 (-0.007253) | 0.049646 / 0.038508 (0.011138) | 0.029669 / 0.023109 (0.006560) | 0.284151 / 0.275898 (0.008253) | 0.298351 / 0.323480 (-0.025128) | 0.004183 / 0.007986 (-0.003803) | 0.002683 / 0.004328 (-0.001645) | 0.048814 / 0.004250 (0.044563) | 0.045017 / 0.037052 (0.007965) | 0.287358 / 0.258489 (0.028869) | 0.317394 / 0.293841 (0.023553) | 0.030025 / 0.128546 (-0.098521) | 0.010854 / 0.075646 (-0.064793) | 0.058694 / 0.419271 (-0.360578) | 0.052287 / 0.043533 (0.008754) | 0.279038 / 0.255139 (0.023899) | 0.295442 / 0.283200 (0.012242) | 0.019413 / 0.141683 (-0.122270) | 1.146106 / 1.452155 (-0.306048) | 1.197777 / 1.492716 (-0.294939) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.092191 / 0.018006 (0.074184) | 0.302672 / 0.000490 (0.302182) | 0.000623 / 0.000200 (0.000423) | 0.000048 / 0.000054 (-0.000006) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022067 / 0.037411 (-0.015345) | 0.081760 / 0.014526 (0.067235) | 0.087548 / 0.176557 (-0.089009) | 0.126405 / 0.737135 (-0.610730) | 0.089331 / 0.296338 (-0.207008) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.295821 / 0.215209 (0.080612) | 2.897930 / 2.077655 (0.820276) | 1.604500 / 1.504120 (0.100380) | 1.471502 / 1.541195 (-0.069692) | 1.497918 / 1.468490 (0.029428) | 0.576179 / 4.584777 (-4.008598) | 2.452103 / 3.745712 (-1.293609) | 2.668043 / 5.269862 (-2.601818) | 1.753544 / 4.565676 (-2.812133) | 0.064410 / 0.424275 (-0.359865) | 0.005027 / 0.007607 (-0.002580) | 0.351509 / 0.226044 (0.125465) | 3.479208 / 2.268929 (1.210280) | 1.990356 / 55.444624 (-53.454269) | 1.684920 / 6.876477 (-5.191556) | 1.794251 / 2.142072 (-0.347821) | 0.662692 / 4.805227 (-4.142535) | 0.118589 / 6.500664 (-6.382076) | 0.040813 / 0.075469 (-0.034656) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.002390 / 1.841788 (-0.839398) | 12.004617 / 8.074308 (3.930309) | 10.216005 / 10.191392 (0.024613) | 0.154354 / 0.680424 (-0.526070) | 0.015554 / 0.534201 (-0.518647) | 0.288741 / 0.579283 (-0.290542) | 0.276774 / 0.434364 (-0.157590) | 0.327055 / 0.540337 (-0.213282) | 0.435121 / 1.386936 (-0.951815) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#f45bc6caa25115a04c41b278671a5a89457eb66c \"CML watermark\")\n" ]
2024-03-05T15:04:20Z
2024-03-07T14:05:53Z
2024-03-07T13:58:04Z
MEMBER
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6718.diff", "html_url": "https://github.com/huggingface/datasets/pull/6718", "merged_at": "2024-03-07T13:58:04Z", "patch_url": "https://github.com/huggingface/datasets/pull/6718.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6718" }
I added `lock_importable_file` in `get_dataset_builder_class` and `extend_dataset_builder_for_streaming` to fix the issue, and I also added a test cc @clefourrier
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 2, "total_count": 2, "url": "https://api.github.com/repos/huggingface/datasets/issues/6718/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/issues/6717
6,717
`remove_columns` method used with a streaming enable dataset mode produces a LibsndfileError on multichannel audio
{ "avatar_url": "https://avatars.githubusercontent.com/u/53187038?v=4", "events_url": "https://api.github.com/users/jhauret/events{/privacy}", "followers_url": "https://api.github.com/users/jhauret/followers", "following_url": "https://api.github.com/users/jhauret/following{/other_user}", "gists_url": "https://api.github.com/users/jhauret/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jhauret", "id": 53187038, "login": "jhauret", "node_id": "MDQ6VXNlcjUzMTg3MDM4", "organizations_url": "https://api.github.com/users/jhauret/orgs", "received_events_url": "https://api.github.com/users/jhauret/received_events", "repos_url": "https://api.github.com/users/jhauret/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jhauret/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jhauret/subscriptions", "type": "User", "url": "https://api.github.com/users/jhauret", "user_view_type": "public" }
[]
open
false
[ "And it also works well with `dataset = dataset.select_columns([\"audio\"])`", "Same issue here, disable stream=True fix the problem" ]
2024-03-05T09:33:26Z
2024-08-14T17:54:20Z
null
NONE
null
null
### Describe the bug When loading a HF dataset in streaming mode and removing some columns, it is impossible to load a sample if the audio contains more than one channel. I have the impression that the time axis and channels are swapped or concatenated. ### Steps to reproduce the bug Minimal error code: ```python from datasets import load_dataset dataset_name = "zinc75/Vibravox_dummy" config_name = "BWE_Larynx_microphone" # if we use "ASR_Larynx_microphone" subset which is a monochannel audio, no error is thrown. dataset = load_dataset( path=dataset_name, name=config_name, split="train", streaming=True ) dataset = dataset.remove_columns(["sensor_id"]) # dataset = dataset.map(lambda x:x, remove_columns=["sensor_id"]) # The commented version does not produce an error, but loses the dataset features. sample = next(iter(dataset)) ``` Error: ``` Traceback (most recent call last): File "/home/julien/Bureau/github/vibravox/tmp.py", line 15, in <module> sample = next(iter(dataset)) ^^^^^^^^^^^^^^^^^^^ File "/home/julien/.pyenv/versions/vibravox/lib/python3.11/site-packages/datasets/iterable_dataset.py", line 1392, in __iter__ example = _apply_feature_types_on_example( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/julien/.pyenv/versions/vibravox/lib/python3.11/site-packages/datasets/iterable_dataset.py", line 1080, in _apply_feature_types_on_example encoded_example = features.encode_example(example) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/julien/.pyenv/versions/vibravox/lib/python3.11/site-packages/datasets/features/features.py", line 1889, in encode_example return encode_nested_example(self, example) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/julien/.pyenv/versions/vibravox/lib/python3.11/site-packages/datasets/features/features.py", line 1244, in encode_nested_example {k: encode_nested_example(schema[k], obj.get(k), level=level + 1) for k in schema} File "/home/julien/.pyenv/versions/vibravox/lib/python3.11/site-packages/datasets/features/features.py", line 1244, in <dictcomp> {k: encode_nested_example(schema[k], obj.get(k), level=level + 1) for k in schema} ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/julien/.pyenv/versions/vibravox/lib/python3.11/site-packages/datasets/features/features.py", line 1300, in encode_nested_example return schema.encode_example(obj) if obj is not None else None ^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/julien/.pyenv/versions/vibravox/lib/python3.11/site-packages/datasets/features/audio.py", line 98, in encode_example sf.write(buffer, value["array"], value["sampling_rate"], format="wav") File "/home/julien/.pyenv/versions/vibravox/lib/python3.11/site-packages/soundfile.py", line 343, in write with SoundFile(file, 'w', samplerate, channels, ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/julien/.pyenv/versions/vibravox/lib/python3.11/site-packages/soundfile.py", line 658, in __init__ self._file = self._open(file, mode_int, closefd) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/julien/.pyenv/versions/vibravox/lib/python3.11/site-packages/soundfile.py", line 1216, in _open raise LibsndfileError(err, prefix="Error opening {0!r}: ".format(self.name)) soundfile.LibsndfileError: Error opening <_io.BytesIO object at 0x7fd795d24680>: Format not recognised. Process finished with exit code 1 ``` ### Expected behavior I would expect this code to run without error. ### Environment info - `datasets` version: 2.18.0 - Platform: Linux-6.5.0-21-generic-x86_64-with-glibc2.35 - Python version: 3.11.0 - `huggingface_hub` version: 0.21.3 - PyArrow version: 15.0.0 - Pandas version: 2.2.1 - `fsspec` version: 2023.10.0
null
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/6717/reactions" }
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/issues/6716
6,716
Non-deterministic `Dataset.builder_name` value
{ "avatar_url": "https://avatars.githubusercontent.com/u/17039389?v=4", "events_url": "https://api.github.com/users/harupy/events{/privacy}", "followers_url": "https://api.github.com/users/harupy/followers", "following_url": "https://api.github.com/users/harupy/following{/other_user}", "gists_url": "https://api.github.com/users/harupy/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/harupy", "id": 17039389, "login": "harupy", "node_id": "MDQ6VXNlcjE3MDM5Mzg5", "organizations_url": "https://api.github.com/users/harupy/orgs", "received_events_url": "https://api.github.com/users/harupy/received_events", "repos_url": "https://api.github.com/users/harupy/repos", "site_admin": false, "starred_url": "https://api.github.com/users/harupy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/harupy/subscriptions", "type": "User", "url": "https://api.github.com/users/harupy", "user_view_type": "public" }
[]
closed
false
[ "When `rotten_tomatoes` is printed out, the following warning message is also printed out:\r\n\r\n```\r\nYou can avoid this message in future by passing the argument `trust_remote_code=True`.\r\nPassing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`.\r\n```", "Hi ! This behavior happens because the dataset was originakky created using a dataset script [rotten_tomatoes.py](https://huggingface.co/datasets/rotten_tomatoes/blob/26f40d324d7b281d8b3fb1c47f30f8b9957f206b/rotten_tomatoes.py) and because we added features recently allowing to download the dataset directly from Parquet files (parquet builder) without running the dataset script (rotten_tomatoes). The flakiness must come from the availability of the Parquet files (we automatically export them in the refs/convert/parquet branch and we recently had to move some files).\r\n\r\nAnyway the easy fix on our side is to remove the dataset script completely, let me open a PR at https://huggingface.co/datasets/rotten_tomatoes\r\n\r\nEDIT: opened https://huggingface.co/datasets/rotten_tomatoes/discussions/6, feel free to comment there if you're ok with that change", "@lhoestq Thanks for the comment, explanation, and patch!", "> we automatically export them in the refs/convert/parquet branch\r\n\r\nWhen this operation is in progress, the parquet files become temporarily unavailable?", "> When this operation is in progress, the parquet files become temporarily unavailable?\r\n\r\nYes correct. I just merged the patch btw :)", "@lhoestq Thanks for merging the PR! I think this issue can be closed." ]
2024-03-05T09:23:21Z
2024-03-19T07:58:14Z
2024-03-19T07:58:14Z
NONE
null
null
### Describe the bug I'm not sure if this is a bug, but `print(ds.builder_name)` in the following code sometimes prints out `rotten_tomatoes` instead of `parquet`: ```python import datasets for _ in range(100): ds = datasets.load_dataset("rotten_tomatoes", split="train") print(ds.builder_name) # prints out "rotten_tomatoes" sometimes instead of "parquet" ``` Output: ``` ... parquet parquet parquet rotten_tomatoes parquet parquet parquet ... ``` Here's a reproduction using GitHub Actions: https://github.com/mlflow/mlflow/actions/runs/8153247984/job/22284263613?pr=11329#step:12:241 One of our tests is flaky because `builder_name` is not deterministic. ### Steps to reproduce the bug 1. Run the code above. ### Expected behavior Always prints out `parquet`? ### Environment info ``` Copy-and-paste the text below in your GitHub issue. - `datasets` version: 2.18.0 - Platform: Linux-6.5.0-1015-azure-x86_64-with-glibc2.34 - Python version: 3.8.18 - `huggingface_hub` version: 0.21.3 - PyArrow version: 15.0.0 - Pandas version: 2.0.3 - `fsspec` version: 2024.2.0 ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/17039389?v=4", "events_url": "https://api.github.com/users/harupy/events{/privacy}", "followers_url": "https://api.github.com/users/harupy/followers", "following_url": "https://api.github.com/users/harupy/following{/other_user}", "gists_url": "https://api.github.com/users/harupy/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/harupy", "id": 17039389, "login": "harupy", "node_id": "MDQ6VXNlcjE3MDM5Mzg5", "organizations_url": "https://api.github.com/users/harupy/orgs", "received_events_url": "https://api.github.com/users/harupy/received_events", "repos_url": "https://api.github.com/users/harupy/repos", "site_admin": false, "starred_url": "https://api.github.com/users/harupy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/harupy/subscriptions", "type": "User", "url": "https://api.github.com/users/harupy", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6716/reactions" }
completed
{ "completed": 0, "percent_completed": 0, "total": 0 }
{ "blocked_by": 0, "blocking": 0, "total_blocked_by": 0, "total_blocking": 0 }
false
https://github.com/huggingface/datasets/pull/6715
6,715
Fix sliced ConcatenationTable pickling with mixed schemas vertically
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6715). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005294 / 0.011353 (-0.006059) | 0.003598 / 0.011008 (-0.007411) | 0.062798 / 0.038508 (0.024290) | 0.027479 / 0.023109 (0.004370) | 0.247146 / 0.275898 (-0.028752) | 0.272103 / 0.323480 (-0.051377) | 0.002979 / 0.007986 (-0.005007) | 0.002701 / 0.004328 (-0.001628) | 0.049384 / 0.004250 (0.045134) | 0.041562 / 0.037052 (0.004510) | 0.269924 / 0.258489 (0.011435) | 0.290749 / 0.293841 (-0.003092) | 0.028285 / 0.128546 (-0.100261) | 0.010464 / 0.075646 (-0.065183) | 0.207000 / 0.419271 (-0.212272) | 0.036186 / 0.043533 (-0.007347) | 0.254524 / 0.255139 (-0.000615) | 0.274843 / 0.283200 (-0.008356) | 0.020044 / 0.141683 (-0.121638) | 1.119223 / 1.452155 (-0.332931) | 1.156557 / 1.492716 (-0.336159) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.092014 / 0.018006 (0.074008) | 0.297349 / 0.000490 (0.296859) | 0.000205 / 0.000200 (0.000005) | 0.000048 / 0.000054 (-0.000006) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018617 / 0.037411 (-0.018794) | 0.061879 / 0.014526 (0.047354) | 0.072877 / 0.176557 (-0.103680) | 0.121850 / 0.737135 (-0.615286) | 0.074686 / 0.296338 (-0.221653) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.281204 / 0.215209 (0.065995) | 2.728688 / 2.077655 (0.651033) | 1.469659 / 1.504120 (-0.034461) | 1.355306 / 1.541195 (-0.185889) | 1.350598 / 1.468490 (-0.117892) | 0.563669 / 4.584777 (-4.021108) | 2.377177 / 3.745712 (-1.368535) | 2.767402 / 5.269862 (-2.502460) | 1.720188 / 4.565676 (-2.845489) | 0.062594 / 0.424275 (-0.361681) | 0.005004 / 0.007607 (-0.002603) | 0.333017 / 0.226044 (0.106972) | 3.354543 / 2.268929 (1.085615) | 1.840031 / 55.444624 (-53.604593) | 1.545548 / 6.876477 (-5.330929) | 1.569858 / 2.142072 (-0.572214) | 0.642680 / 4.805227 (-4.162547) | 0.117463 / 6.500664 (-6.383201) | 0.042472 / 0.075469 (-0.032997) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.977436 / 1.841788 (-0.864351) | 11.285982 / 8.074308 (3.211673) | 9.441848 / 10.191392 (-0.749544) | 0.140773 / 0.680424 (-0.539650) | 0.013783 / 0.534201 (-0.520418) | 0.292304 / 0.579283 (-0.286979) | 0.275011 / 0.434364 (-0.159353) | 0.339094 / 0.540337 (-0.201244) | 0.447593 / 1.386936 (-0.939343) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005258 / 0.011353 (-0.006095) | 0.003539 / 0.011008 (-0.007469) | 0.049920 / 0.038508 (0.011412) | 0.029789 / 0.023109 (0.006680) | 0.277187 / 0.275898 (0.001288) | 0.296817 / 0.323480 (-0.026663) | 0.004133 / 0.007986 (-0.003852) | 0.002679 / 0.004328 (-0.001649) | 0.048999 / 0.004250 (0.044749) | 0.044087 / 0.037052 (0.007034) | 0.290359 / 0.258489 (0.031870) | 0.319572 / 0.293841 (0.025731) | 0.030248 / 0.128546 (-0.098298) | 0.010453 / 0.075646 (-0.065194) | 0.058734 / 0.419271 (-0.360537) | 0.051216 / 0.043533 (0.007683) | 0.278667 / 0.255139 (0.023528) | 0.298792 / 0.283200 (0.015592) | 0.019131 / 0.141683 (-0.122552) | 1.131814 / 1.452155 (-0.320340) | 1.167208 / 1.492716 (-0.325508) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.088316 / 0.018006 (0.070309) | 0.297143 / 0.000490 (0.296653) | 0.000207 / 0.000200 (0.000007) | 0.000048 / 0.000054 (-0.000006) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022457 / 0.037411 (-0.014954) | 0.075251 / 0.014526 (0.060726) | 0.086747 / 0.176557 (-0.089809) | 0.124975 / 0.737135 (-0.612161) | 0.087320 / 0.296338 (-0.209019) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.292339 / 0.215209 (0.077130) | 2.860196 / 2.077655 (0.782541) | 1.599058 / 1.504120 (0.094938) | 1.476104 / 1.541195 (-0.065091) | 1.509109 / 1.468490 (0.040619) | 0.564056 / 4.584777 (-4.020721) | 2.388870 / 3.745712 (-1.356842) | 2.582356 / 5.269862 (-2.687506) | 1.726033 / 4.565676 (-2.839644) | 0.061788 / 0.424275 (-0.362487) | 0.005021 / 0.007607 (-0.002586) | 0.345644 / 0.226044 (0.119600) | 3.384000 / 2.268929 (1.115071) | 1.946591 / 55.444624 (-53.498033) | 1.693485 / 6.876477 (-5.182992) | 1.790300 / 2.142072 (-0.351773) | 0.654637 / 4.805227 (-4.150590) | 0.116271 / 6.500664 (-6.384393) | 0.040710 / 0.075469 (-0.034759) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.007367 / 1.841788 (-0.834421) | 11.868065 / 8.074308 (3.793757) | 10.146212 / 10.191392 (-0.045180) | 0.128902 / 0.680424 (-0.551522) | 0.015259 / 0.534201 (-0.518942) | 0.288087 / 0.579283 (-0.291196) | 0.281516 / 0.434364 (-0.152848) | 0.325755 / 0.540337 (-0.214583) | 0.424814 / 1.386936 (-0.962122) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#8247202a7ed1c3164c88f8f183513c5f003aa2af \"CML watermark\")\n" ]
2024-03-04T21:02:07Z
2024-03-05T11:23:05Z
2024-03-05T11:17:04Z
MEMBER
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6715.diff", "html_url": "https://github.com/huggingface/datasets/pull/6715", "merged_at": "2024-03-05T11:17:04Z", "patch_url": "https://github.com/huggingface/datasets/pull/6715.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6715" }
A sliced + pickled ConcatenationTable could end up with a different schema than the original schema, if the slice only contains blocks with only a subset of the columns. This can lead to issues when saving datasets from a concatenation of datasets with mixed schemas Reported in https://discuss.huggingface.co/t/datasetdict-save-to-disk-with-num-proc-1-seems-to-hang-with-error/75595
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6715/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/pull/6714
6,714
Expand no-code dataset info with datasets-server info
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6714). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005237 / 0.011353 (-0.006116) | 0.003614 / 0.011008 (-0.007394) | 0.063349 / 0.038508 (0.024841) | 0.027297 / 0.023109 (0.004187) | 0.236203 / 0.275898 (-0.039695) | 0.260029 / 0.323480 (-0.063451) | 0.003096 / 0.007986 (-0.004889) | 0.003342 / 0.004328 (-0.000987) | 0.048703 / 0.004250 (0.044453) | 0.043121 / 0.037052 (0.006069) | 0.257491 / 0.258489 (-0.000998) | 0.282861 / 0.293841 (-0.010980) | 0.027701 / 0.128546 (-0.100845) | 0.010634 / 0.075646 (-0.065012) | 0.207369 / 0.419271 (-0.211903) | 0.035799 / 0.043533 (-0.007734) | 0.240445 / 0.255139 (-0.014694) | 0.261977 / 0.283200 (-0.021223) | 0.018175 / 0.141683 (-0.123508) | 1.143964 / 1.452155 (-0.308191) | 1.230057 / 1.492716 (-0.262659) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.096656 / 0.018006 (0.078650) | 0.303434 / 0.000490 (0.302944) | 0.000225 / 0.000200 (0.000025) | 0.000051 / 0.000054 (-0.000004) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018454 / 0.037411 (-0.018957) | 0.061792 / 0.014526 (0.047266) | 0.073384 / 0.176557 (-0.103172) | 0.120148 / 0.737135 (-0.616988) | 0.074221 / 0.296338 (-0.222118) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.290291 / 0.215209 (0.075082) | 2.822908 / 2.077655 (0.745254) | 1.483139 / 1.504120 (-0.020981) | 1.349619 / 1.541195 (-0.191576) | 1.356588 / 1.468490 (-0.111902) | 0.571723 / 4.584777 (-4.013054) | 2.402696 / 3.745712 (-1.343016) | 2.832215 / 5.269862 (-2.437647) | 1.794962 / 4.565676 (-2.770714) | 0.062707 / 0.424275 (-0.361568) | 0.004997 / 0.007607 (-0.002610) | 0.343093 / 0.226044 (0.117049) | 3.383028 / 2.268929 (1.114100) | 1.818624 / 55.444624 (-53.626000) | 1.549859 / 6.876477 (-5.326618) | 1.667838 / 2.142072 (-0.474235) | 0.648574 / 4.805227 (-4.156653) | 0.119181 / 6.500664 (-6.381484) | 0.042074 / 0.075469 (-0.033395) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.982039 / 1.841788 (-0.859748) | 11.411759 / 8.074308 (3.337451) | 9.783405 / 10.191392 (-0.407987) | 0.129577 / 0.680424 (-0.550847) | 0.014091 / 0.534201 (-0.520110) | 0.297925 / 0.579283 (-0.281358) | 0.263884 / 0.434364 (-0.170480) | 0.346032 / 0.540337 (-0.194305) | 0.444806 / 1.386936 (-0.942130) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005527 / 0.011353 (-0.005826) | 0.003677 / 0.011008 (-0.007332) | 0.050245 / 0.038508 (0.011737) | 0.030070 / 0.023109 (0.006961) | 0.272640 / 0.275898 (-0.003258) | 0.296555 / 0.323480 (-0.026925) | 0.004247 / 0.007986 (-0.003738) | 0.003833 / 0.004328 (-0.000495) | 0.049341 / 0.004250 (0.045091) | 0.046604 / 0.037052 (0.009552) | 0.282765 / 0.258489 (0.024276) | 0.314924 / 0.293841 (0.021084) | 0.029749 / 0.128546 (-0.098797) | 0.010524 / 0.075646 (-0.065122) | 0.057859 / 0.419271 (-0.361412) | 0.053172 / 0.043533 (0.009640) | 0.274906 / 0.255139 (0.019767) | 0.290566 / 0.283200 (0.007366) | 0.019299 / 0.141683 (-0.122384) | 1.164092 / 1.452155 (-0.288062) | 1.205074 / 1.492716 (-0.287642) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.093943 / 0.018006 (0.075936) | 0.298746 / 0.000490 (0.298256) | 0.000232 / 0.000200 (0.000032) | 0.000054 / 0.000054 (-0.000000) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022098 / 0.037411 (-0.015313) | 0.075523 / 0.014526 (0.060997) | 0.086784 / 0.176557 (-0.089773) | 0.124610 / 0.737135 (-0.612525) | 0.087743 / 0.296338 (-0.208595) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.298555 / 0.215209 (0.083346) | 2.951493 / 2.077655 (0.873838) | 1.611448 / 1.504120 (0.107328) | 1.481503 / 1.541195 (-0.059692) | 1.497937 / 1.468490 (0.029447) | 0.580402 / 4.584777 (-4.004375) | 2.433308 / 3.745712 (-1.312404) | 2.712717 / 5.269862 (-2.557145) | 1.766286 / 4.565676 (-2.799391) | 0.063973 / 0.424275 (-0.360303) | 0.005006 / 0.007607 (-0.002601) | 0.354541 / 0.226044 (0.128497) | 3.486448 / 2.268929 (1.217519) | 1.972779 / 55.444624 (-53.471846) | 1.709018 / 6.876477 (-5.167458) | 1.864242 / 2.142072 (-0.277831) | 0.678213 / 4.805227 (-4.127014) | 0.119525 / 6.500664 (-6.381140) | 0.041387 / 0.075469 (-0.034082) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.021337 / 1.841788 (-0.820451) | 12.049563 / 8.074308 (3.975255) | 10.424701 / 10.191392 (0.233309) | 0.131444 / 0.680424 (-0.548980) | 0.015644 / 0.534201 (-0.518557) | 0.293712 / 0.579283 (-0.285571) | 0.279160 / 0.434364 (-0.155204) | 0.327991 / 0.540337 (-0.212346) | 0.435455 / 1.386936 (-0.951481) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#1fe9483acc1ccaf19f3c199b99391921a8526215 \"CML watermark\")\n" ]
2024-03-04T19:18:10Z
2024-03-04T20:28:30Z
2024-03-04T20:22:15Z
COLLABORATOR
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6714.diff", "html_url": "https://github.com/huggingface/datasets/pull/6714", "merged_at": "2024-03-04T20:22:15Z", "patch_url": "https://github.com/huggingface/datasets/pull/6714.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6714" }
E.g., to have info about a dataset's number of examples for more informative TQDM bars.
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6714/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/pull/6713
6,713
Bump huggingface-hub lower version to 0.21.2
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6713). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "@lhoestq if you agree, I could make a patch release tomorrow morning.", "sure :)", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005086 / 0.011353 (-0.006267) | 0.003695 / 0.011008 (-0.007313) | 0.063430 / 0.038508 (0.024922) | 0.026798 / 0.023109 (0.003689) | 0.253761 / 0.275898 (-0.022138) | 0.301301 / 0.323480 (-0.022179) | 0.004160 / 0.007986 (-0.003825) | 0.002783 / 0.004328 (-0.001545) | 0.050698 / 0.004250 (0.046448) | 0.040899 / 0.037052 (0.003846) | 0.269024 / 0.258489 (0.010535) | 0.323467 / 0.293841 (0.029626) | 0.027756 / 0.128546 (-0.100791) | 0.010684 / 0.075646 (-0.064963) | 0.207128 / 0.419271 (-0.212144) | 0.035874 / 0.043533 (-0.007659) | 0.251620 / 0.255139 (-0.003519) | 0.268668 / 0.283200 (-0.014532) | 0.017387 / 0.141683 (-0.124296) | 1.139230 / 1.452155 (-0.312925) | 1.183613 / 1.492716 (-0.309103) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.096337 / 0.018006 (0.078331) | 0.305014 / 0.000490 (0.304524) | 0.000219 / 0.000200 (0.000019) | 0.000050 / 0.000054 (-0.000005) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018086 / 0.037411 (-0.019325) | 0.061626 / 0.014526 (0.047100) | 0.072598 / 0.176557 (-0.103959) | 0.119944 / 0.737135 (-0.617192) | 0.074549 / 0.296338 (-0.221789) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.282661 / 0.215209 (0.067452) | 2.804473 / 2.077655 (0.726818) | 1.444602 / 1.504120 (-0.059517) | 1.313977 / 1.541195 (-0.227217) | 1.319426 / 1.468490 (-0.149064) | 0.570176 / 4.584777 (-4.014601) | 2.397895 / 3.745712 (-1.347818) | 2.760208 / 5.269862 (-2.509654) | 1.732457 / 4.565676 (-2.833220) | 0.062743 / 0.424275 (-0.361533) | 0.004950 / 0.007607 (-0.002657) | 0.338500 / 0.226044 (0.112456) | 3.287249 / 2.268929 (1.018320) | 1.777495 / 55.444624 (-53.667130) | 1.521255 / 6.876477 (-5.355222) | 1.517317 / 2.142072 (-0.624756) | 0.642202 / 4.805227 (-4.163025) | 0.116501 / 6.500664 (-6.384163) | 0.042418 / 0.075469 (-0.033052) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.968966 / 1.841788 (-0.872822) | 11.490531 / 8.074308 (3.416223) | 9.507803 / 10.191392 (-0.683589) | 0.141570 / 0.680424 (-0.538854) | 0.014000 / 0.534201 (-0.520201) | 0.284237 / 0.579283 (-0.295046) | 0.269341 / 0.434364 (-0.165022) | 0.321654 / 0.540337 (-0.218683) | 0.446914 / 1.386936 (-0.940022) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005280 / 0.011353 (-0.006072) | 0.003794 / 0.011008 (-0.007214) | 0.050328 / 0.038508 (0.011820) | 0.029756 / 0.023109 (0.006647) | 0.273403 / 0.275898 (-0.002495) | 0.297346 / 0.323480 (-0.026133) | 0.004310 / 0.007986 (-0.003676) | 0.002858 / 0.004328 (-0.001470) | 0.048833 / 0.004250 (0.044583) | 0.045696 / 0.037052 (0.008644) | 0.291034 / 0.258489 (0.032545) | 0.318899 / 0.293841 (0.025058) | 0.029809 / 0.128546 (-0.098737) | 0.010710 / 0.075646 (-0.064936) | 0.058183 / 0.419271 (-0.361089) | 0.051761 / 0.043533 (0.008228) | 0.275022 / 0.255139 (0.019883) | 0.291614 / 0.283200 (0.008414) | 0.017975 / 0.141683 (-0.123708) | 1.148489 / 1.452155 (-0.303666) | 1.218111 / 1.492716 (-0.274605) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.091806 / 0.018006 (0.073799) | 0.299413 / 0.000490 (0.298923) | 0.000219 / 0.000200 (0.000019) | 0.000044 / 0.000054 (-0.000010) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021506 / 0.037411 (-0.015905) | 0.075537 / 0.014526 (0.061011) | 0.087020 / 0.176557 (-0.089536) | 0.125270 / 0.737135 (-0.611865) | 0.088038 / 0.296338 (-0.208300) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.300401 / 0.215209 (0.085192) | 2.932571 / 2.077655 (0.854916) | 1.609502 / 1.504120 (0.105383) | 1.480078 / 1.541195 (-0.061117) | 1.514902 / 1.468490 (0.046412) | 0.575591 / 4.584777 (-4.009186) | 2.461873 / 3.745712 (-1.283839) | 2.728099 / 5.269862 (-2.541762) | 1.760054 / 4.565676 (-2.805622) | 0.064371 / 0.424275 (-0.359904) | 0.004990 / 0.007607 (-0.002617) | 0.350134 / 0.226044 (0.124090) | 3.453249 / 2.268929 (1.184321) | 1.979760 / 55.444624 (-53.464865) | 1.741128 / 6.876477 (-5.135348) | 1.825734 / 2.142072 (-0.316339) | 0.654902 / 4.805227 (-4.150325) | 0.116989 / 6.500664 (-6.383676) | 0.040800 / 0.075469 (-0.034669) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.033352 / 1.841788 (-0.808436) | 12.196711 / 8.074308 (4.122403) | 10.315114 / 10.191392 (0.123722) | 0.132541 / 0.680424 (-0.547882) | 0.016455 / 0.534201 (-0.517746) | 0.289025 / 0.579283 (-0.290258) | 0.281464 / 0.434364 (-0.152900) | 0.325302 / 0.540337 (-0.215036) | 0.428469 / 1.386936 (-0.958467) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#7093b4b1a69f413e452119c87669af9e8ceaf749 \"CML watermark\")\n" ]
2024-03-04T13:00:52Z
2024-03-04T18:14:03Z
2024-03-04T18:06:05Z
MEMBER
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6713.diff", "html_url": "https://github.com/huggingface/datasets/pull/6713", "merged_at": "2024-03-04T18:06:05Z", "patch_url": "https://github.com/huggingface/datasets/pull/6713.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6713" }
This should fix the version compatibility issue when using `huggingface_hub` < 0.21.2 and latest fsspec (>=2023.12.0). See my comment: https://github.com/huggingface/datasets/pull/6687#issuecomment-1976493336 >> EDIT: the fix has been released in `huggingface_hub` 0.21.2 - I removed my commits that were using `huggingface_hub@main` > >Please note that people using `huggingface_hub` < 0.21.2 and latest `fsspec` will have issues when using `datasets`: >- https://github.com/huggingface/lighteval/actions/runs/8139147047/job/22241658122?pr=86 >- https://github.com/huggingface/lighteval/pull/84 CC: @clefourrier
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 1, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/6713/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/pull/6712
6,712
fix CastError pickling
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6712). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005151 / 0.011353 (-0.006202) | 0.003813 / 0.011008 (-0.007196) | 0.062957 / 0.038508 (0.024449) | 0.028282 / 0.023109 (0.005173) | 0.246036 / 0.275898 (-0.029862) | 0.290024 / 0.323480 (-0.033456) | 0.004009 / 0.007986 (-0.003977) | 0.002749 / 0.004328 (-0.001580) | 0.049351 / 0.004250 (0.045101) | 0.041143 / 0.037052 (0.004090) | 0.264782 / 0.258489 (0.006293) | 0.290711 / 0.293841 (-0.003130) | 0.027248 / 0.128546 (-0.101298) | 0.010691 / 0.075646 (-0.064955) | 0.205926 / 0.419271 (-0.213345) | 0.035652 / 0.043533 (-0.007880) | 0.246357 / 0.255139 (-0.008782) | 0.267851 / 0.283200 (-0.015348) | 0.018498 / 0.141683 (-0.123185) | 1.135996 / 1.452155 (-0.316159) | 1.181841 / 1.492716 (-0.310875) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.094054 / 0.018006 (0.076048) | 0.305470 / 0.000490 (0.304980) | 0.000225 / 0.000200 (0.000025) | 0.000043 / 0.000054 (-0.000012) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018842 / 0.037411 (-0.018569) | 0.061532 / 0.014526 (0.047006) | 0.073483 / 0.176557 (-0.103073) | 0.119426 / 0.737135 (-0.617709) | 0.075385 / 0.296338 (-0.220954) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.285544 / 0.215209 (0.070335) | 2.774256 / 2.077655 (0.696601) | 1.475719 / 1.504120 (-0.028401) | 1.353841 / 1.541195 (-0.187353) | 1.381891 / 1.468490 (-0.086599) | 0.570619 / 4.584777 (-4.014158) | 2.380300 / 3.745712 (-1.365412) | 2.788767 / 5.269862 (-2.481095) | 1.741790 / 4.565676 (-2.823886) | 0.061810 / 0.424275 (-0.362465) | 0.005004 / 0.007607 (-0.002603) | 0.334963 / 0.226044 (0.108918) | 3.286388 / 2.268929 (1.017459) | 1.831669 / 55.444624 (-53.612955) | 1.523372 / 6.876477 (-5.353105) | 1.581551 / 2.142072 (-0.560521) | 0.639642 / 4.805227 (-4.165585) | 0.117356 / 6.500664 (-6.383308) | 0.043277 / 0.075469 (-0.032192) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.973005 / 1.841788 (-0.868782) | 11.590148 / 8.074308 (3.515839) | 9.521262 / 10.191392 (-0.670130) | 0.143243 / 0.680424 (-0.537181) | 0.013529 / 0.534201 (-0.520672) | 0.285724 / 0.579283 (-0.293559) | 0.265642 / 0.434364 (-0.168721) | 0.366098 / 0.540337 (-0.174239) | 0.444410 / 1.386936 (-0.942526) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005347 / 0.011353 (-0.006006) | 0.003797 / 0.011008 (-0.007212) | 0.050441 / 0.038508 (0.011933) | 0.032812 / 0.023109 (0.009703) | 0.281278 / 0.275898 (0.005379) | 0.304524 / 0.323480 (-0.018956) | 0.005039 / 0.007986 (-0.002946) | 0.002735 / 0.004328 (-0.001594) | 0.049184 / 0.004250 (0.044933) | 0.046751 / 0.037052 (0.009698) | 0.292093 / 0.258489 (0.033604) | 0.322087 / 0.293841 (0.028246) | 0.029775 / 0.128546 (-0.098771) | 0.010540 / 0.075646 (-0.065106) | 0.057927 / 0.419271 (-0.361345) | 0.054240 / 0.043533 (0.010707) | 0.281537 / 0.255139 (0.026398) | 0.298386 / 0.283200 (0.015186) | 0.019773 / 0.141683 (-0.121910) | 1.157161 / 1.452155 (-0.294994) | 1.210395 / 1.492716 (-0.282321) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.095098 / 0.018006 (0.077091) | 0.306952 / 0.000490 (0.306462) | 0.000211 / 0.000200 (0.000011) | 0.000050 / 0.000054 (-0.000004) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022602 / 0.037411 (-0.014809) | 0.075242 / 0.014526 (0.060716) | 0.087134 / 0.176557 (-0.089422) | 0.127923 / 0.737135 (-0.609212) | 0.088645 / 0.296338 (-0.207693) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.304187 / 0.215209 (0.088978) | 2.977120 / 2.077655 (0.899465) | 1.663592 / 1.504120 (0.159473) | 1.527601 / 1.541195 (-0.013594) | 1.540121 / 1.468490 (0.071631) | 0.562492 / 4.584777 (-4.022285) | 2.473836 / 3.745712 (-1.271876) | 2.656782 / 5.269862 (-2.613080) | 1.754212 / 4.565676 (-2.811464) | 0.062330 / 0.424275 (-0.361945) | 0.005149 / 0.007607 (-0.002459) | 0.354905 / 0.226044 (0.128860) | 3.503587 / 2.268929 (1.234659) | 2.015682 / 55.444624 (-53.428943) | 1.744421 / 6.876477 (-5.132056) | 1.923120 / 2.142072 (-0.218952) | 0.652209 / 4.805227 (-4.153018) | 0.119406 / 6.500664 (-6.381258) | 0.042840 / 0.075469 (-0.032630) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.009164 / 1.841788 (-0.832624) | 12.379654 / 8.074308 (4.305346) | 10.408696 / 10.191392 (0.217304) | 0.141674 / 0.680424 (-0.538750) | 0.016815 / 0.534201 (-0.517386) | 0.292453 / 0.579283 (-0.286830) | 0.277577 / 0.434364 (-0.156787) | 0.325024 / 0.540337 (-0.215313) | 0.433181 / 1.386936 (-0.953755) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#b7a16a08c940e65397305aec5f1b484d91cee75a \"CML watermark\")\n" ]
2024-03-04T11:14:18Z
2024-03-04T20:23:47Z
2024-03-04T20:17:17Z
MEMBER
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6712.diff", "html_url": "https://github.com/huggingface/datasets/pull/6712", "merged_at": "2024-03-04T20:17:17Z", "patch_url": "https://github.com/huggingface/datasets/pull/6712.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6712" }
reported in https://discuss.huggingface.co/t/datasetdict-save-to-disk-with-num-proc-1-seems-to-hang-with-error/75595
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6712/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/pull/6711
6,711
3x Faster Text Preprocessing
{ "avatar_url": "https://avatars.githubusercontent.com/u/1983160?v=4", "events_url": "https://api.github.com/users/ashvardanian/events{/privacy}", "followers_url": "https://api.github.com/users/ashvardanian/followers", "following_url": "https://api.github.com/users/ashvardanian/following{/other_user}", "gists_url": "https://api.github.com/users/ashvardanian/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ashvardanian", "id": 1983160, "login": "ashvardanian", "node_id": "MDQ6VXNlcjE5ODMxNjA=", "organizations_url": "https://api.github.com/users/ashvardanian/orgs", "received_events_url": "https://api.github.com/users/ashvardanian/received_events", "repos_url": "https://api.github.com/users/ashvardanian/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ashvardanian/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ashvardanian/subscriptions", "type": "User", "url": "https://api.github.com/users/ashvardanian", "user_view_type": "public" }
[]
open
false
[ "Unfortunately, that won't improve the performance. StringZilla repository has extensive benchmarks comparing against different built-in functionality of several programming languages. Using `re.finditer` for tokenization is practically the slowest anti-pattern I've encountered in any language. The gap between that and a SIMD-accelerated kernel can be as big as 10 MB/s vs 10 GB/s.\n\nI understand the need to keep the dependencies minimal. It helps the package remain small and portable. At this point, StringZilla provides 105 binaries for different OS and hardware versions (more portable than NumPy) and the [binary size generally ranges from 50 KB to 250 KB](https://pypi.org/project/stringzilla/), smaller than a single JPEG. \n", "The `text` builder is not very popular, so I'm also not a fan of introducing a dependency for it.\r\n\r\nMoreover, I couldn't find any projects of this size/usage depending on StringZilla (with GitHub search), so we should at least wait for its greater adoption to merge this PR.\r\n", "> Moreover, I couldn't find any projects of this size/usage depending on StringZilla (with GitHub search), so we should at least wait for its greater adoption to merge this PR.\r\n\r\nMeanwhile I understand that you want to wait for a greater adoption - if things change in the future you would be stuck with an unsupported dependency (although I think, that really applies to everything) - the performance improvement is really significant!\r\nI wonder if it's worth, perhaps, to provide an additional 'datasets.extras' library by huggingface which support these 3rd party improvements.\r\nIt would reduce the risk on the core components and, at the same time, it would definitely help on the performance side!", "Hi @lhoestq & @mariosasko! Hope you both are doing well!\r\n\r\nI've just pushed another year's worth of work on string algorithms, including both CPU- and GPU-accelerated processing, and I'm curious if they can be of bigger help this time πŸ€— \r\n\r\n## Tokenization\r\n\r\n> Maybe we can improve speed and memory usage using built-in tools though, like regex `finditer`?\r\n\r\nRevisiting this question, I've just benchmarked that exact use case, byte-set search for line breaks and whitespaces between words. The results on Intel Ice Lake+ CPUs installed in most DGX servers look like this:\r\n\r\n| Library | Shorter Words | Longer Lines |\r\n| --------------------------- | -------------: | -------------: |\r\n| `re.finditer` 🐍 | 0.04 GiB/s | 0.19 GiB/s |\r\n| `sz.Str.find_first_of` 🐍 | __0.11 GiB/s__ | __8.79 GiB/s__ |\r\n\r\nMoreover, on tokenization, with new Apache Arrow-compatible containers, one may easily end up with a 10x lower memory consumption, making much larger datasets feasible for in-memory processing.\r\n\r\n## Hashing\r\n\r\nI've also noticed that `xxhash` is used for non-cryptographic hashes. It's indeed a very good choice, but I believe it may be further improved:\r\n\r\n| Library | Languages | Shorter Words | Longer Lines |\r\n| -------------------- | ----------------- | -------------: | --------------: |\r\n| `hash` 🐍 | Py | 0.13 GiB/s | 4.27 GiB/s |\r\n| `xxhash.xxh3_64` 🐍 | C, C++, Rs, Py... | 0.04 GiB/s | 6.38 GiB/s |\r\n| `sz.hash` 🐍 | C, C++, Rs, Py... | __0.14 GiB/s__ | __9.19 GiB/s__ |\r\n\r\n---\r\n\r\nThere is a lot of other functionality for bulk-computing of MinHashes or Levenshtein distances on both CPUs and GPUs, but I'm not sure if it might be of interest to the `datasets` team. Happy to help if it is πŸ€— \r\n\r\n> All the benchmarks can be found in the [StringWa.rs repo](https://github.com/ashvardanian/StringWa.rs/).", "Could you also compare with PyArrow string functions ? They offer good speed iirc and return arrow arrays directly (hence no need for an extra data copy)", "Hi @lhoestq! I've been researching the PyArrow functionality over the last couple of days and not entirely sure if there are many obvious functionality overlaps. The functionality that I've already compared in [StringWa.rs, is sorting](https://github.com/ashvardanian/StringWa.rs/?tab=readme-ov-file#sequence-operations):\r\n\r\n| Library | Short Words | Long Lines |\r\n| ------------------------------------------- | ---------------------------: | -------------------------: |\r\n| `list.sort` on 1x CPU | 47.06 M comparisons/s | 22.36 M comparisons/s |\r\n| `pandas.Series.sort_values` on 1x CPU | 9.39 M comparisons/s | 11.93 M comparisons/s |\r\n| `pyarrow.compute.sort_indices` on 1x CPU | 62.17 M comparisons/s | 5.53 M comparisons/s |\r\n| `polars.Series.sort` on 1x CPU | 223.38 M comparisons/s | __181.60 M comparisons/s__ |\r\n| `cudf.Series.sort_values` on 1x GPU | __9'463.59 M comparisons/s__ | 66.44 M comparisons/s |\r\n| `stringzilla.Strs.sorted` on 1x CPU | 171.13 M comparisons/s | 77.88 M comparisons/s |\r\n\r\nPlease let me know if there's other functionality I should compare or add to the library πŸ€— \r\n\r\n---\r\n\r\nPS: StringZilla's `Strs` class and PyArrow arrays are directly convertible without extra memory allocations:\r\n\r\n```py\r\nfrom pyarrow import foreign_buffer\r\nfrom stringzilla import Strs\r\n\r\nstrs = Strs([\"alpha\", \"beta\", \"gamma\"])\r\narrow = foreign_buffer(strs.address, strs.nbytes, strs)\r\n```\r\n\r\nAnd only slightly harder to convert in reverse direction:\r\n\r\n```py\r\narr = pa.Array.from_buffers(\r\n pa.large_string() if strs.offsets_are_large else pa.string(),\r\n len(strs),\r\n [None,\r\n pa.foreign_buffer(strs.offsets_address, strs.offsets_nbytes, strs),\r\n pa.foreign_buffer(strs.tape_address, strs.tape_nbytes, strs)],\r\n)\r\n```", "Following up, here's a [success story](https://ashvardanian.com/posts/image-processing-with-strings) from integrating StringZilla into a package with 100M+ PyPI downloads lat year, replacing OpenCV for a 4x images processing speedup.", "Interesting comparisons for sorting, what about the string functions that would be useful here ?" ]
2024-03-03T19:03:04Z
2025-09-22T09:20:22Z
null
NONE
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6711.diff", "html_url": "https://github.com/huggingface/datasets/pull/6711", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/6711.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6711" }
I was preparing some datasets for AI training and noticed that `datasets` by HuggingFace uses the conventional `open` mechanism to read the file and split it into chunks. I thought it can be significantly accelerated, and [started with a benchmark](https://gist.github.com/ashvardanian/55c2052e9f78b05b8d614aa90cb12347): ```sh $ pip install --upgrade --force-reinstall datasets $ python benchmark_huggingface_datasets.py xlsum.csv Generating train split: 1004598 examples [00:47, 21116.16 examples/s] Time taken to load the dataset: 48.66838526725769 seconds Time taken to chunk the dataset into parts of size 10000: 0.11466407775878906 seconds Total time taken: 48.78304934501648 seconds ``` For benchmarks I've used a [large CSV file with mixed UTF-8 content](https://github.com/ashvardanian/StringZilla/blob/main/CONTRIBUTING.md#benchmarking-datasets), most common in modern large-scale pre-training pipelines. I've later patched the `datasets` library to use `stringzilla`, which resulted in significantly lower memory consumption and in 2.9x throughput improvement on the AWS `r7iz` instances. That's using slow SSDs mounted over the network. Performance on local SSDs on something like a DGX-H100 should be even higher: ```sh $ pip install -e . $ python benchmark_huggingface_datasets.py xlsum.csv Generating train split: 1004598 examples [00:15, 64529.90 examples/s] Time taken to load the dataset: 16.45028805732727 seconds Time taken to chunk the dataset into parts of size 10000: 0.1291060447692871 seconds Total time taken: 16.579394102096558 seconds ``` I've already [pushed the patches to my fork](https://github.com/ashvardanian/datasets/tree/faster-text-parsers), and would love to contribute them to the upstream repository. --- All the tests pass, but they leave a couple of important questions open. The default Python `open(..., newline=None)` uses universal newlines, where `\n`, `\r`, and `\r\n` are all converted to `\n` on the fly. I am not sure if its a good idea for a general purpose dataset preparation pipeline? I can simulate the same behavior (which I don't yet do) for `"line"` splitter. Adjusting it for `"paragraph"`-splitter would be harder. Should we stick exactly to the old Pythonic behavior or stay closer to how C and other programming languages do that?
null
{ "+1": 8, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 8, "url": "https://api.github.com/repos/huggingface/datasets/issues/6711/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/pull/6710
6,710
Persist IterableDataset epoch in workers
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6710). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005283 / 0.011353 (-0.006070) | 0.003866 / 0.011008 (-0.007142) | 0.063124 / 0.038508 (0.024616) | 0.030240 / 0.023109 (0.007131) | 0.232855 / 0.275898 (-0.043043) | 0.257538 / 0.323480 (-0.065942) | 0.004165 / 0.007986 (-0.003820) | 0.002826 / 0.004328 (-0.001502) | 0.049735 / 0.004250 (0.045485) | 0.045297 / 0.037052 (0.008244) | 0.251831 / 0.258489 (-0.006658) | 0.277812 / 0.293841 (-0.016029) | 0.030004 / 0.128546 (-0.098542) | 0.012319 / 0.075646 (-0.063328) | 0.206881 / 0.419271 (-0.212391) | 0.036561 / 0.043533 (-0.006972) | 0.234364 / 0.255139 (-0.020775) | 0.258316 / 0.283200 (-0.024884) | 0.017815 / 0.141683 (-0.123867) | 1.114111 / 1.452155 (-0.338043) | 1.165428 / 1.492716 (-0.327288) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.099302 / 0.018006 (0.081296) | 0.309195 / 0.000490 (0.308705) | 0.000261 / 0.000200 (0.000061) | 0.000044 / 0.000054 (-0.000010) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018765 / 0.037411 (-0.018646) | 0.063123 / 0.014526 (0.048597) | 0.075437 / 0.176557 (-0.101119) | 0.122570 / 0.737135 (-0.614566) | 0.076637 / 0.296338 (-0.219702) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.289965 / 0.215209 (0.074756) | 2.839053 / 2.077655 (0.761398) | 1.503463 / 1.504120 (-0.000657) | 1.390833 / 1.541195 (-0.150361) | 1.401918 / 1.468490 (-0.066572) | 0.711000 / 4.584777 (-3.873777) | 2.325513 / 3.745712 (-1.420199) | 2.831630 / 5.269862 (-2.438231) | 1.908370 / 4.565676 (-2.657307) | 0.077867 / 0.424275 (-0.346408) | 0.005509 / 0.007607 (-0.002098) | 0.336494 / 0.226044 (0.110450) | 3.358587 / 2.268929 (1.089658) | 1.901067 / 55.444624 (-53.543558) | 1.590130 / 6.876477 (-5.286347) | 1.753850 / 2.142072 (-0.388223) | 0.792458 / 4.805227 (-4.012769) | 0.135584 / 6.500664 (-6.365080) | 0.042028 / 0.075469 (-0.033441) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.966162 / 1.841788 (-0.875625) | 11.705310 / 8.074308 (3.631002) | 9.158842 / 10.191392 (-1.032550) | 0.128793 / 0.680424 (-0.551631) | 0.014422 / 0.534201 (-0.519779) | 0.299009 / 0.579283 (-0.280274) | 0.262873 / 0.434364 (-0.171491) | 0.340836 / 0.540337 (-0.199501) | 0.464440 / 1.386936 (-0.922496) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005951 / 0.011353 (-0.005402) | 0.003984 / 0.011008 (-0.007024) | 0.051432 / 0.038508 (0.012924) | 0.033223 / 0.023109 (0.010113) | 0.263972 / 0.275898 (-0.011926) | 0.289060 / 0.323480 (-0.034420) | 0.004446 / 0.007986 (-0.003540) | 0.002891 / 0.004328 (-0.001438) | 0.049347 / 0.004250 (0.045096) | 0.041191 / 0.037052 (0.004138) | 0.278334 / 0.258489 (0.019844) | 0.314065 / 0.293841 (0.020224) | 0.032020 / 0.128546 (-0.096526) | 0.012472 / 0.075646 (-0.063174) | 0.061288 / 0.419271 (-0.357984) | 0.033489 / 0.043533 (-0.010044) | 0.266831 / 0.255139 (0.011692) | 0.283008 / 0.283200 (-0.000192) | 0.018491 / 0.141683 (-0.123192) | 1.133634 / 1.452155 (-0.318521) | 1.154627 / 1.492716 (-0.338089) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.101831 / 0.018006 (0.083825) | 0.317942 / 0.000490 (0.317452) | 0.000217 / 0.000200 (0.000018) | 0.000056 / 0.000054 (0.000002) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022608 / 0.037411 (-0.014803) | 0.076776 / 0.014526 (0.062250) | 0.088686 / 0.176557 (-0.087870) | 0.129092 / 0.737135 (-0.608044) | 0.090780 / 0.296338 (-0.205558) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.286762 / 0.215209 (0.071553) | 2.824307 / 2.077655 (0.746652) | 1.547215 / 1.504120 (0.043095) | 1.424522 / 1.541195 (-0.116673) | 1.446414 / 1.468490 (-0.022076) | 0.723683 / 4.584777 (-3.861094) | 0.974129 / 3.745712 (-2.771583) | 2.952552 / 5.269862 (-2.317309) | 1.903663 / 4.565676 (-2.662013) | 0.078786 / 0.424275 (-0.345489) | 0.005130 / 0.007607 (-0.002477) | 0.338925 / 0.226044 (0.112881) | 3.378557 / 2.268929 (1.109629) | 1.892951 / 55.444624 (-53.551674) | 1.599844 / 6.876477 (-5.276633) | 1.611963 / 2.142072 (-0.530109) | 0.793614 / 4.805227 (-4.011613) | 0.133795 / 6.500664 (-6.366869) | 0.040777 / 0.075469 (-0.034692) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.001391 / 1.841788 (-0.840397) | 12.166811 / 8.074308 (4.092503) | 10.588180 / 10.191392 (0.396788) | 0.141609 / 0.680424 (-0.538815) | 0.020941 / 0.534201 (-0.513260) | 0.340149 / 0.579283 (-0.239134) | 0.122988 / 0.434364 (-0.311376) | 0.339747 / 0.540337 (-0.200591) | 0.434338 / 1.386936 (-0.952598) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#4ba47a35820069e7de9f374479d06b2a7935767e \"CML watermark\")\n" ]
2024-03-02T12:08:50Z
2024-07-01T17:51:25Z
2024-07-01T17:45:30Z
MEMBER
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6710.diff", "html_url": "https://github.com/huggingface/datasets/pull/6710", "merged_at": "2024-07-01T17:45:30Z", "patch_url": "https://github.com/huggingface/datasets/pull/6710.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6710" }
Use shared memory for the IterableDataset epoch. This way calling `ds.set_epoch()` in the main process will update the epoch in the DataLoader workers as well. This is useful especially because the epoch is used to compute the `effective_seed` used for shuffling. I used torch's shared memory in case users want to send dataset copies without shared memory using pickle. I also find it easier to use than using `multiprocessing.shared_memory` than requires unlinking only in the main process, or `mp.Value` that is not picklable. close https://github.com/huggingface/datasets/issues/6673 cc @rwightman
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6710/reactions" }
null
null
null
true
https://github.com/huggingface/datasets/pull/6709
6,709
set dev version
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6709). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005081 / 0.011353 (-0.006272) | 0.004182 / 0.011008 (-0.006826) | 0.063377 / 0.038508 (0.024869) | 0.027880 / 0.023109 (0.004770) | 0.247260 / 0.275898 (-0.028638) | 0.273580 / 0.323480 (-0.049900) | 0.002995 / 0.007986 (-0.004991) | 0.002804 / 0.004328 (-0.001524) | 0.049669 / 0.004250 (0.045418) | 0.042469 / 0.037052 (0.005417) | 0.268606 / 0.258489 (0.010117) | 0.292867 / 0.293841 (-0.000973) | 0.028077 / 0.128546 (-0.100469) | 0.011031 / 0.075646 (-0.064615) | 0.210225 / 0.419271 (-0.209047) | 0.035723 / 0.043533 (-0.007810) | 0.252131 / 0.255139 (-0.003008) | 0.272895 / 0.283200 (-0.010304) | 0.019809 / 0.141683 (-0.121874) | 1.138500 / 1.452155 (-0.313655) | 1.167752 / 1.492716 (-0.324964) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.094881 / 0.018006 (0.076875) | 0.300168 / 0.000490 (0.299678) | 0.000207 / 0.000200 (0.000007) | 0.000050 / 0.000054 (-0.000004) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.017917 / 0.037411 (-0.019494) | 0.061854 / 0.014526 (0.047328) | 0.074481 / 0.176557 (-0.102075) | 0.120075 / 0.737135 (-0.617061) | 0.074627 / 0.296338 (-0.221711) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.287888 / 0.215209 (0.072679) | 2.770165 / 2.077655 (0.692510) | 1.500071 / 1.504120 (-0.004049) | 1.374857 / 1.541195 (-0.166338) | 1.427291 / 1.468490 (-0.041200) | 0.558431 / 4.584777 (-4.026346) | 2.439352 / 3.745712 (-1.306361) | 2.787471 / 5.269862 (-2.482391) | 1.742636 / 4.565676 (-2.823041) | 0.061716 / 0.424275 (-0.362559) | 0.004961 / 0.007607 (-0.002646) | 0.345209 / 0.226044 (0.119164) | 3.360253 / 2.268929 (1.091325) | 1.847945 / 55.444624 (-53.596680) | 1.595733 / 6.876477 (-5.280744) | 1.642350 / 2.142072 (-0.499723) | 0.638639 / 4.805227 (-4.166588) | 0.116918 / 6.500664 (-6.383746) | 0.042132 / 0.075469 (-0.033338) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.980602 / 1.841788 (-0.861185) | 11.545402 / 8.074308 (3.471094) | 9.452471 / 10.191392 (-0.738921) | 0.129930 / 0.680424 (-0.550494) | 0.014143 / 0.534201 (-0.520058) | 0.290302 / 0.579283 (-0.288981) | 0.263785 / 0.434364 (-0.170579) | 0.339580 / 0.540337 (-0.200758) | 0.450355 / 1.386936 (-0.936581) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005565 / 0.011353 (-0.005788) | 0.003764 / 0.011008 (-0.007244) | 0.050082 / 0.038508 (0.011574) | 0.030354 / 0.023109 (0.007245) | 0.250609 / 0.275898 (-0.025289) | 0.277200 / 0.323480 (-0.046280) | 0.004276 / 0.007986 (-0.003710) | 0.002805 / 0.004328 (-0.001523) | 0.048765 / 0.004250 (0.044514) | 0.045477 / 0.037052 (0.008425) | 0.267704 / 0.258489 (0.009215) | 0.303214 / 0.293841 (0.009373) | 0.029393 / 0.128546 (-0.099153) | 0.010623 / 0.075646 (-0.065023) | 0.058201 / 0.419271 (-0.361070) | 0.053131 / 0.043533 (0.009599) | 0.258682 / 0.255139 (0.003543) | 0.276069 / 0.283200 (-0.007131) | 0.018260 / 0.141683 (-0.123423) | 1.141542 / 1.452155 (-0.310613) | 1.185780 / 1.492716 (-0.306936) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.096857 / 0.018006 (0.078850) | 0.300656 / 0.000490 (0.300167) | 0.000450 / 0.000200 (0.000250) | 0.000059 / 0.000054 (0.000005) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022416 / 0.037411 (-0.014995) | 0.074781 / 0.014526 (0.060255) | 0.087299 / 0.176557 (-0.089257) | 0.127616 / 0.737135 (-0.609519) | 0.088382 / 0.296338 (-0.207957) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.298639 / 0.215209 (0.083430) | 2.940002 / 2.077655 (0.862347) | 1.709707 / 1.504120 (0.205587) | 1.556502 / 1.541195 (0.015307) | 1.592841 / 1.468490 (0.124351) | 0.570237 / 4.584777 (-4.014539) | 2.467576 / 3.745712 (-1.278137) | 2.741021 / 5.269862 (-2.528840) | 1.776526 / 4.565676 (-2.789151) | 0.063999 / 0.424275 (-0.360276) | 0.005068 / 0.007607 (-0.002539) | 0.360727 / 0.226044 (0.134682) | 3.535404 / 2.268929 (1.266476) | 2.035345 / 55.444624 (-53.409279) | 1.755916 / 6.876477 (-5.120561) | 1.889281 / 2.142072 (-0.252791) | 0.649025 / 4.805227 (-4.156202) | 0.118210 / 6.500664 (-6.382454) | 0.040815 / 0.075469 (-0.034654) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.005650 / 1.841788 (-0.836138) | 12.228314 / 8.074308 (4.154006) | 10.147363 / 10.191392 (-0.044029) | 0.159258 / 0.680424 (-0.521166) | 0.015288 / 0.534201 (-0.518913) | 0.288144 / 0.579283 (-0.291139) | 0.281319 / 0.434364 (-0.153045) | 0.323380 / 0.540337 (-0.216958) | 0.426887 / 1.386936 (-0.960049) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#8b04ccb486f3831b4b0d2474119823efa3815709 \"CML watermark\")\n" ]
2024-03-01T21:01:14Z
2024-03-01T21:07:35Z
2024-03-01T21:01:23Z
MEMBER
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/6709.diff", "html_url": "https://github.com/huggingface/datasets/pull/6709", "merged_at": "2024-03-01T21:01:23Z", "patch_url": "https://github.com/huggingface/datasets/pull/6709.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6709" }
null
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6709/reactions" }
null
null
null
true