id
int64
599M
2.47B
url
stringlengths
58
61
repository_url
stringclasses
1 value
events_url
stringlengths
65
68
labels
listlengths
0
4
active_lock_reason
null
updated_at
stringlengths
20
20
assignees
listlengths
0
4
html_url
stringlengths
46
51
author_association
stringclasses
4 values
state_reason
stringclasses
3 values
draft
bool
2 classes
milestone
dict
comments
sequencelengths
0
30
title
stringlengths
1
290
reactions
dict
node_id
stringlengths
18
32
pull_request
dict
created_at
stringlengths
20
20
comments_url
stringlengths
67
70
body
stringlengths
0
228k
user
dict
labels_url
stringlengths
72
75
timeline_url
stringlengths
67
70
state
stringclasses
2 values
locked
bool
1 class
number
int64
1
7.11k
performed_via_github_app
null
closed_at
stringlengths
20
20
assignee
dict
is_pull_request
bool
2 classes
614,748,552
https://api.github.com/repos/huggingface/datasets/issues/66
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/66/events
[]
null
2020-05-08T13:39:23Z
[]
https://github.com/huggingface/datasets/pull/66
CONTRIBUTOR
null
false
null
[]
[Datasets] ReadME
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/66/reactions" }
MDExOlB1bGxSZXF1ZXN0NDE1MjM5Njgy
{ "diff_url": "https://github.com/huggingface/datasets/pull/66.diff", "html_url": "https://github.com/huggingface/datasets/pull/66", "merged_at": "2020-05-08T13:39:22Z", "patch_url": "https://github.com/huggingface/datasets/pull/66.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/66" }
2020-05-08T13:37:43Z
https://api.github.com/repos/huggingface/datasets/issues/66/comments
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
https://api.github.com/repos/huggingface/datasets/issues/66/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/66/timeline
closed
false
66
null
2020-05-08T13:39:22Z
null
true
614,746,516
https://api.github.com/repos/huggingface/datasets/issues/65
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/65/events
[]
null
2020-05-08T13:35:41Z
[]
https://github.com/huggingface/datasets/pull/65
CONTRIBUTOR
null
false
null
[]
fix math dataset and xcopa
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/65/reactions" }
MDExOlB1bGxSZXF1ZXN0NDE1MjM4MDEw
{ "diff_url": "https://github.com/huggingface/datasets/pull/65.diff", "html_url": "https://github.com/huggingface/datasets/pull/65", "merged_at": "2020-05-08T13:35:40Z", "patch_url": "https://github.com/huggingface/datasets/pull/65.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/65" }
2020-05-08T13:33:55Z
https://api.github.com/repos/huggingface/datasets/issues/65/comments
- fixes math dataset and xcopa, uploaded both of the to S3
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
https://api.github.com/repos/huggingface/datasets/issues/65/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/65/timeline
closed
false
65
null
2020-05-08T13:35:40Z
null
true
614,737,057
https://api.github.com/repos/huggingface/datasets/issues/64
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/64/events
[]
null
2020-05-08T13:17:31Z
[]
https://github.com/huggingface/datasets/pull/64
CONTRIBUTOR
null
false
null
[]
[Datasets] Make master ready for datasets adding
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/64/reactions" }
MDExOlB1bGxSZXF1ZXN0NDE1MjMwMjYy
{ "diff_url": "https://github.com/huggingface/datasets/pull/64.diff", "html_url": "https://github.com/huggingface/datasets/pull/64", "merged_at": "2020-05-08T13:17:30Z", "patch_url": "https://github.com/huggingface/datasets/pull/64.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/64" }
2020-05-08T13:17:00Z
https://api.github.com/repos/huggingface/datasets/issues/64/comments
Add all relevant files so that datasets can now be added on master
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
https://api.github.com/repos/huggingface/datasets/issues/64/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/64/timeline
closed
false
64
null
2020-05-08T13:17:30Z
null
true
614,666,365
https://api.github.com/repos/huggingface/datasets/issues/63
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/63/events
[]
null
2020-05-08T17:39:22Z
[]
https://github.com/huggingface/datasets/pull/63
CONTRIBUTOR
null
false
null
[]
[Dataset scripts] add all datasets scripts
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/63/reactions" }
MDExOlB1bGxSZXF1ZXN0NDE1MTczODU5
{ "diff_url": "https://github.com/huggingface/datasets/pull/63.diff", "html_url": "https://github.com/huggingface/datasets/pull/63", "merged_at": "2020-05-08T11:34:00Z", "patch_url": "https://github.com/huggingface/datasets/pull/63.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/63" }
2020-05-08T10:50:15Z
https://api.github.com/repos/huggingface/datasets/issues/63/comments
As mentioned, we can have the canonical datasets in the master. For now I also want to include all the data as present on S3 to make the synchronization easier when uploading new datastes. @mariamabarham @lhoestq @thomwolf - what do you think? If this is ok for you, I can sync up the master with the `add_dataset` branch: https://github.com/huggingface/nlp/pull/37 so that master is up to date.
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
https://api.github.com/repos/huggingface/datasets/issues/63/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/63/timeline
closed
false
63
null
2020-05-08T11:34:00Z
null
true
614,630,830
https://api.github.com/repos/huggingface/datasets/issues/62
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/62/events
[]
null
2020-05-08T09:45:47Z
[]
https://github.com/huggingface/datasets/pull/62
CONTRIBUTOR
null
false
null
[]
[Cached Path] Better error message
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/62/reactions" }
MDExOlB1bGxSZXF1ZXN0NDE1MTQ1NDAx
{ "diff_url": "https://github.com/huggingface/datasets/pull/62.diff", "html_url": "https://github.com/huggingface/datasets/pull/62", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/62.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/62" }
2020-05-08T09:39:47Z
https://api.github.com/repos/huggingface/datasets/issues/62/comments
IMO returning `None` in this function only leads to confusion and is never helpful.
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
https://api.github.com/repos/huggingface/datasets/issues/62/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/62/timeline
closed
false
62
null
2020-05-08T09:45:47Z
null
true
614,607,474
https://api.github.com/repos/huggingface/datasets/issues/61
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/61/events
[]
null
2020-05-08T08:56:32Z
[]
https://github.com/huggingface/datasets/pull/61
CONTRIBUTOR
null
false
null
[]
[Load] rename setup_module to prepare_module
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/61/reactions" }
MDExOlB1bGxSZXF1ZXN0NDE1MTI3MTU4
{ "diff_url": "https://github.com/huggingface/datasets/pull/61.diff", "html_url": "https://github.com/huggingface/datasets/pull/61", "merged_at": "2020-05-08T08:56:16Z", "patch_url": "https://github.com/huggingface/datasets/pull/61.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/61" }
2020-05-08T08:54:22Z
https://api.github.com/repos/huggingface/datasets/issues/61/comments
rename setup_module to prepare_module due to issues with pytests `setup_module` function. See: PR #59.
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
https://api.github.com/repos/huggingface/datasets/issues/61/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/61/timeline
closed
false
61
null
2020-05-08T08:56:16Z
null
true
614,372,553
https://api.github.com/repos/huggingface/datasets/issues/60
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/60/events
[]
null
2020-05-08T10:38:32Z
[]
https://github.com/huggingface/datasets/pull/60
MEMBER
null
false
null
[]
Update to simplify some datasets conversion
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/60/reactions" }
MDExOlB1bGxSZXF1ZXN0NDE0OTQyNjEy
{ "diff_url": "https://github.com/huggingface/datasets/pull/60.diff", "html_url": "https://github.com/huggingface/datasets/pull/60", "merged_at": "2020-05-08T10:18:24Z", "patch_url": "https://github.com/huggingface/datasets/pull/60.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/60" }
2020-05-07T22:02:24Z
https://api.github.com/repos/huggingface/datasets/issues/60/comments
This PR updates the encoding of `Values` like `integers`, `boolean` and `float` to use python casting and avoid having to cast in the dataset scripts, as mentioned here: https://github.com/huggingface/nlp/pull/37#discussion_r420176626 We could also change (not included in this PR yet): - `supervized_keys` to make them a NamedTuple instead of a dataclass, and - handle specifically the `Translation` features. as mentioned here: https://github.com/huggingface/nlp/pull/37#discussion_r421740236 @patrickvonplaten @mariamabarham tell me if you want these two last changes as well.
{ "avatar_url": "https://avatars.githubusercontent.com/u/7353373?v=4", "events_url": "https://api.github.com/users/thomwolf/events{/privacy}", "followers_url": "https://api.github.com/users/thomwolf/followers", "following_url": "https://api.github.com/users/thomwolf/following{/other_user}", "gists_url": "https://api.github.com/users/thomwolf/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/thomwolf", "id": 7353373, "login": "thomwolf", "node_id": "MDQ6VXNlcjczNTMzNzM=", "organizations_url": "https://api.github.com/users/thomwolf/orgs", "received_events_url": "https://api.github.com/users/thomwolf/received_events", "repos_url": "https://api.github.com/users/thomwolf/repos", "site_admin": false, "starred_url": "https://api.github.com/users/thomwolf/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/thomwolf/subscriptions", "type": "User", "url": "https://api.github.com/users/thomwolf" }
https://api.github.com/repos/huggingface/datasets/issues/60/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/60/timeline
closed
false
60
null
2020-05-08T10:18:24Z
null
true
614,366,045
https://api.github.com/repos/huggingface/datasets/issues/59
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/59/events
[]
null
2020-05-08T10:57:57Z
[]
https://github.com/huggingface/datasets/pull/59
MEMBER
null
false
null
[]
Fix tests
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/59/reactions" }
MDExOlB1bGxSZXF1ZXN0NDE0OTM3NTgx
{ "diff_url": "https://github.com/huggingface/datasets/pull/59.diff", "html_url": "https://github.com/huggingface/datasets/pull/59", "merged_at": "2020-05-08T10:46:51Z", "patch_url": "https://github.com/huggingface/datasets/pull/59.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/59" }
2020-05-07T21:48:09Z
https://api.github.com/repos/huggingface/datasets/issues/59/comments
@patrickvonplaten I've broken a bit the tests with #25 while simplifying and re-organizing the `load.py` and `download_manager.py` scripts. I'm trying to fix them here but I have a weird error, do you think you can have a look? ```bash (datasets) MacBook-Pro-de-Thomas:datasets thomwolf$ python -m pytest -sv ./tests/test_dataset_common.py::DatasetTest::test_builder_class_snli ============================================================================= test session starts ============================================================================= platform darwin -- Python 3.7.7, pytest-5.4.1, py-1.8.1, pluggy-0.13.1 -- /Users/thomwolf/miniconda2/envs/datasets/bin/python cachedir: .pytest_cache rootdir: /Users/thomwolf/Documents/GitHub/datasets plugins: xdist-1.31.0, forked-1.1.3 collected 1 item tests/test_dataset_common.py::DatasetTest::test_builder_class_snli ERROR =================================================================================== ERRORS ==================================================================================== ____________________________________________________________ ERROR at setup of DatasetTest.test_builder_class_snli ____________________________________________________________ file_path = <module 'tests.test_dataset_common' from '/Users/thomwolf/Documents/GitHub/datasets/tests/test_dataset_common.py'> download_config = DownloadConfig(cache_dir=None, force_download=False, resume_download=False, local_files_only=False, proxies=None, user_agent=None, extract_compressed_file=True, force_extract=True) download_kwargs = {} def setup_module(file_path: str, download_config: Optional[DownloadConfig] = None, **download_kwargs,) -> DatasetBuilder: r""" Download/extract/cache a dataset to add to the lib from a path or url which can be: - a path to a local directory containing the dataset processing python script - an url to a S3 directory with a dataset processing python script Dataset codes are cached inside the lib to allow easy import (avoid ugly sys.path tweaks) and using cloudpickle (among other things). Return: tuple of the unique id associated to the dataset the local path to the dataset """ if download_config is None: download_config = DownloadConfig(**download_kwargs) download_config.extract_compressed_file = True download_config.force_extract = True > name = list(filter(lambda x: x, file_path.split("/")))[-1] + ".py" E AttributeError: module 'tests.test_dataset_common' has no attribute 'split' src/nlp/load.py:169: AttributeError ============================================================================== warnings summary =============================================================================== /Users/thomwolf/miniconda2/envs/datasets/lib/python3.7/site-packages/tensorflow_core/python/pywrap_tensorflow_internal.py:15 /Users/thomwolf/miniconda2/envs/datasets/lib/python3.7/site-packages/tensorflow_core/python/pywrap_tensorflow_internal.py:15: DeprecationWarning: the imp module is deprecated in favour of importlib; see the module's documentation for alternative uses import imp -- Docs: https://docs.pytest.org/en/latest/warnings.html =========================================================================== short test summary info =========================================================================== ERROR tests/test_dataset_common.py::DatasetTest::test_builder_class_snli - AttributeError: module 'tests.test_dataset_common' has no attribute 'split' ========================================================================= 1 warning, 1 error in 3.63s ========================================================================= ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/7353373?v=4", "events_url": "https://api.github.com/users/thomwolf/events{/privacy}", "followers_url": "https://api.github.com/users/thomwolf/followers", "following_url": "https://api.github.com/users/thomwolf/following{/other_user}", "gists_url": "https://api.github.com/users/thomwolf/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/thomwolf", "id": 7353373, "login": "thomwolf", "node_id": "MDQ6VXNlcjczNTMzNzM=", "organizations_url": "https://api.github.com/users/thomwolf/orgs", "received_events_url": "https://api.github.com/users/thomwolf/received_events", "repos_url": "https://api.github.com/users/thomwolf/repos", "site_admin": false, "starred_url": "https://api.github.com/users/thomwolf/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/thomwolf/subscriptions", "type": "User", "url": "https://api.github.com/users/thomwolf" }
https://api.github.com/repos/huggingface/datasets/issues/59/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/59/timeline
closed
false
59
null
2020-05-08T10:46:51Z
null
true
614,362,308
https://api.github.com/repos/huggingface/datasets/issues/58
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/58/events
[]
null
2020-05-07T21:48:01Z
[]
https://github.com/huggingface/datasets/pull/58
MEMBER
null
false
null
[]
Aborted PR - Fix tests
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/58/reactions" }
MDExOlB1bGxSZXF1ZXN0NDE0OTM0NTY4
{ "diff_url": "https://github.com/huggingface/datasets/pull/58.diff", "html_url": "https://github.com/huggingface/datasets/pull/58", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/58.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/58" }
2020-05-07T21:40:19Z
https://api.github.com/repos/huggingface/datasets/issues/58/comments
@patrickvonplaten I've broken a bit the tests with #25 while simplifying and re-organizing the `load.py` and `download_manager.py` scripts. I'm trying to fix them here but I have a weird error, do you think you can have a look? ```bash (datasets) MacBook-Pro-de-Thomas:datasets thomwolf$ python -m pytest -sv ./tests/test_dataset_common.py::DatasetTest::test_builder_class_snli ============================================================================= test session starts ============================================================================= platform darwin -- Python 3.7.7, pytest-5.4.1, py-1.8.1, pluggy-0.13.1 -- /Users/thomwolf/miniconda2/envs/datasets/bin/python cachedir: .pytest_cache rootdir: /Users/thomwolf/Documents/GitHub/datasets plugins: xdist-1.31.0, forked-1.1.3 collected 1 item tests/test_dataset_common.py::DatasetTest::test_builder_class_snli ERROR =================================================================================== ERRORS ==================================================================================== ____________________________________________________________ ERROR at setup of DatasetTest.test_builder_class_snli ____________________________________________________________ file_path = <module 'tests.test_dataset_common' from '/Users/thomwolf/Documents/GitHub/datasets/tests/test_dataset_common.py'> download_config = DownloadConfig(cache_dir=None, force_download=False, resume_download=False, local_files_only=False, proxies=None, user_agent=None, extract_compressed_file=True, force_extract=True) download_kwargs = {} def setup_module(file_path: str, download_config: Optional[DownloadConfig] = None, **download_kwargs,) -> DatasetBuilder: r""" Download/extract/cache a dataset to add to the lib from a path or url which can be: - a path to a local directory containing the dataset processing python script - an url to a S3 directory with a dataset processing python script Dataset codes are cached inside the lib to allow easy import (avoid ugly sys.path tweaks) and using cloudpickle (among other things). Return: tuple of the unique id associated to the dataset the local path to the dataset """ if download_config is None: download_config = DownloadConfig(**download_kwargs) download_config.extract_compressed_file = True download_config.force_extract = True > name = list(filter(lambda x: x, file_path.split("/")))[-1] + ".py" E AttributeError: module 'tests.test_dataset_common' has no attribute 'split' src/nlp/load.py:169: AttributeError ============================================================================== warnings summary =============================================================================== /Users/thomwolf/miniconda2/envs/datasets/lib/python3.7/site-packages/tensorflow_core/python/pywrap_tensorflow_internal.py:15 /Users/thomwolf/miniconda2/envs/datasets/lib/python3.7/site-packages/tensorflow_core/python/pywrap_tensorflow_internal.py:15: DeprecationWarning: the imp module is deprecated in favour of importlib; see the module's documentation for alternative uses import imp -- Docs: https://docs.pytest.org/en/latest/warnings.html =========================================================================== short test summary info =========================================================================== ERROR tests/test_dataset_common.py::DatasetTest::test_builder_class_snli - AttributeError: module 'tests.test_dataset_common' has no attribute 'split' ========================================================================= 1 warning, 1 error in 3.63s ========================================================================= ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/7353373?v=4", "events_url": "https://api.github.com/users/thomwolf/events{/privacy}", "followers_url": "https://api.github.com/users/thomwolf/followers", "following_url": "https://api.github.com/users/thomwolf/following{/other_user}", "gists_url": "https://api.github.com/users/thomwolf/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/thomwolf", "id": 7353373, "login": "thomwolf", "node_id": "MDQ6VXNlcjczNTMzNzM=", "organizations_url": "https://api.github.com/users/thomwolf/orgs", "received_events_url": "https://api.github.com/users/thomwolf/received_events", "repos_url": "https://api.github.com/users/thomwolf/repos", "site_admin": false, "starred_url": "https://api.github.com/users/thomwolf/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/thomwolf/subscriptions", "type": "User", "url": "https://api.github.com/users/thomwolf" }
https://api.github.com/repos/huggingface/datasets/issues/58/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/58/timeline
closed
false
58
null
2020-05-07T21:41:27Z
null
true
614,261,638
https://api.github.com/repos/huggingface/datasets/issues/57
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/57/events
[]
null
2020-05-08T13:20:30Z
[]
https://github.com/huggingface/datasets/pull/57
MEMBER
null
false
null
[]
Better cached path
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/57/reactions" }
MDExOlB1bGxSZXF1ZXN0NDE0ODUzMDM5
{ "diff_url": "https://github.com/huggingface/datasets/pull/57.diff", "html_url": "https://github.com/huggingface/datasets/pull/57", "merged_at": "2020-05-08T13:20:28Z", "patch_url": "https://github.com/huggingface/datasets/pull/57.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/57" }
2020-05-07T18:36:00Z
https://api.github.com/repos/huggingface/datasets/issues/57/comments
### Changes: - The `cached_path` no longer returns None if the file is missing/the url doesn't work. Instead, it can raise `FileNotFoundError` (missing file), `ConnectionError` (no cache and unreachable url) or `ValueError` (parsing error) - Fix requests to firebase API that doesn't handle HEAD requests... - Allow custom download in datasets script: it allows to use `tf.io.gfile.copy` for example, to download from google storage. I added an example: the `boolq` script
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
https://api.github.com/repos/huggingface/datasets/issues/57/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/57/timeline
closed
false
57
null
2020-05-08T13:20:28Z
null
true
614,236,869
https://api.github.com/repos/huggingface/datasets/issues/56
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/56/events
[]
null
2020-05-07T17:52:51Z
[]
https://github.com/huggingface/datasets/pull/56
CONTRIBUTOR
null
false
null
[]
[Dataset] Tester add mock function
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/56/reactions" }
MDExOlB1bGxSZXF1ZXN0NDE0ODMyODY4
{ "diff_url": "https://github.com/huggingface/datasets/pull/56.diff", "html_url": "https://github.com/huggingface/datasets/pull/56", "merged_at": "2020-05-07T17:52:50Z", "patch_url": "https://github.com/huggingface/datasets/pull/56.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/56" }
2020-05-07T17:51:37Z
https://api.github.com/repos/huggingface/datasets/issues/56/comments
need to add an empty `extract()` function to make `hansard` dataset test work.
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
https://api.github.com/repos/huggingface/datasets/issues/56/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/56/timeline
closed
false
56
null
2020-05-07T17:52:50Z
null
true
613,968,072
https://api.github.com/repos/huggingface/datasets/issues/55
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/55/events
[]
null
2020-05-11T07:20:02Z
[]
https://github.com/huggingface/datasets/pull/55
MEMBER
null
false
null
[]
Beam datasets
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/55/reactions" }
MDExOlB1bGxSZXF1ZXN0NDE0NjE0MjE1
{ "diff_url": "https://github.com/huggingface/datasets/pull/55.diff", "html_url": "https://github.com/huggingface/datasets/pull/55", "merged_at": "2020-05-11T07:20:00Z", "patch_url": "https://github.com/huggingface/datasets/pull/55.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/55" }
2020-05-07T11:04:32Z
https://api.github.com/repos/huggingface/datasets/issues/55/comments
# Beam datasets ## Intro Beam Datasets are using beam pipelines for preprocessing (basically lots of `.map` over objects called PCollections). The advantage of apache beam is that you can choose which type of runner you want to use to preprocess your data. The main runners are: - the `DirectRunner` to run the pipeline locally (default). However I encountered memory issues for big datasets (like the french or english wikipedia). Small dataset work fine - Google Dataflow. I didn't play with it. - Spark or Flink, two well known data processing frameworks. I tried to use the Spark/Flink local runners provided by apache beam for python and wasn't able to make them work properly though... ## From tfds beam datasets to our own beam datasets Tensorflow datasets used beam and a complicated pipeline to shard the TFRecords files. To allow users to download beam datasets and not having to preprocess them, they also allow to download the already preprocessed datasets from their google storage (the beam pipeline doesn't run in that case). On our side, we replace TFRecords by something else. Arrow or Parquet do the job but I chose Parquet as: 1) there is a builtin apache beam parquet writer that is quite convenient, and 2) reading parquet from the pyarrow library is also simple and effective (there is a mmap option !) Moreover we don't shard datasets in many many files like tfds (they were doing probably doing that mainly because of the limit of 2Gb per TFRecord file). Therefore we have a simpler pipeline that saves each split into one parquet file. We also removed the utilities to use their google storage (for now maybe ? we'll have to discuss it). ## Main changes - Added a BeamWriter to save the output of beam pipelines into parquet files and fill dataset infos - Create a ParquetReader and refactor a bit the arrow_reader.py \> **With this, we can now try to add beam datasets from tfds** I already added the wikipedia one, and I will also try to add the Wiki40b dataset ## Test the wikipedia script You can download and run the beam pipeline for wikipedia (using the `DirectRunner` by default) like this: ``` >>> import nlp >>> nlp.load("datasets/nlp/wikipedia", dataset_config="20200501.frr") ``` This wikipedia dataset (lang: frr, North Frisian) is a small one (~10Mb), but feel free to try bigger ones (and fill 20Gb of swap memory if you try the english one lol) ## Next Should we allow to download preprocessed datasets from the tfds google storage ? Should we try to optimize the beam pipelines to run locally without memory issues ? Should we try other data processing frameworks for big datasets, like spark ? ## About this PR It should be merged after #25 ----------------- I'd be happy to have your feedback and your ideas to improve the processing of big datasets like wikipedia :)
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
https://api.github.com/repos/huggingface/datasets/issues/55/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/55/timeline
closed
false
55
null
2020-05-11T07:20:00Z
null
true
613,513,348
https://api.github.com/repos/huggingface/datasets/issues/54
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/54/events
[]
null
2020-05-06T18:13:00Z
[]
https://github.com/huggingface/datasets/pull/54
CONTRIBUTOR
null
false
null
[]
[Tests] Improved Error message for dummy folder structure
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/54/reactions" }
MDExOlB1bGxSZXF1ZXN0NDE0MjUyODkw
{ "diff_url": "https://github.com/huggingface/datasets/pull/54.diff", "html_url": "https://github.com/huggingface/datasets/pull/54", "merged_at": "2020-05-06T18:12:59Z", "patch_url": "https://github.com/huggingface/datasets/pull/54.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/54" }
2020-05-06T18:11:48Z
https://api.github.com/repos/huggingface/datasets/issues/54/comments
Improved Error message
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
https://api.github.com/repos/huggingface/datasets/issues/54/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/54/timeline
closed
false
54
null
2020-05-06T18:12:59Z
null
true
613,436,158
https://api.github.com/repos/huggingface/datasets/issues/53
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/53/events
[]
null
2020-05-07T15:28:46Z
[]
https://github.com/huggingface/datasets/pull/53
CONTRIBUTOR
null
false
null
[]
[Features] Typo in generate_from_dict
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/53/reactions" }
MDExOlB1bGxSZXF1ZXN0NDE0MTkwMzkz
{ "diff_url": "https://github.com/huggingface/datasets/pull/53.diff", "html_url": "https://github.com/huggingface/datasets/pull/53", "merged_at": "2020-05-07T15:28:45Z", "patch_url": "https://github.com/huggingface/datasets/pull/53.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/53" }
2020-05-06T16:05:23Z
https://api.github.com/repos/huggingface/datasets/issues/53/comments
Change `isinstance` test in features when generating features from dict.
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
https://api.github.com/repos/huggingface/datasets/issues/53/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/53/timeline
closed
false
53
null
2020-05-07T15:28:45Z
null
true
613,339,071
https://api.github.com/repos/huggingface/datasets/issues/52
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/52/events
[]
null
2020-05-06T13:55:19Z
[]
https://github.com/huggingface/datasets/pull/52
CONTRIBUTOR
null
false
null
[]
allow dummy folder structure to handle dict of lists
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/52/reactions" }
MDExOlB1bGxSZXF1ZXN0NDE0MTEyMDAy
{ "diff_url": "https://github.com/huggingface/datasets/pull/52.diff", "html_url": "https://github.com/huggingface/datasets/pull/52", "merged_at": "2020-05-06T13:55:18Z", "patch_url": "https://github.com/huggingface/datasets/pull/52.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/52" }
2020-05-06T13:54:35Z
https://api.github.com/repos/huggingface/datasets/issues/52/comments
`esnli.py` needs that extension of the dummy data testing.
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
https://api.github.com/repos/huggingface/datasets/issues/52/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/52/timeline
closed
false
52
null
2020-05-06T13:55:18Z
null
true
613,266,668
https://api.github.com/repos/huggingface/datasets/issues/51
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/51/events
[]
null
2020-05-07T22:07:19Z
[]
https://github.com/huggingface/datasets/pull/51
CONTRIBUTOR
null
false
null
[]
[Testing] Improved testing structure
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/51/reactions" }
MDExOlB1bGxSZXF1ZXN0NDE0MDUyOTYw
{ "diff_url": "https://github.com/huggingface/datasets/pull/51.diff", "html_url": "https://github.com/huggingface/datasets/pull/51", "merged_at": "2020-05-06T13:20:17Z", "patch_url": "https://github.com/huggingface/datasets/pull/51.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/51" }
2020-05-06T12:03:07Z
https://api.github.com/repos/huggingface/datasets/issues/51/comments
This PR refactors the test design a bit and puts the mock download manager in the `utils` files as it is just a test helper class. as @mariamabarham pointed out, creating a dummy folder structure can be quite hard to grasp. This PR tries to change that to some extent. It follows the following logic for the `dummy` folder structure now: 1.) The data bulider has no config -> the `dummy` folder structure is: `dummy/<version>/dummy_data.zip` 2) The data builder has >= 1 configs -> the `dummy` folder structure is: `dummy/<config_name_1>/<version>/dummy_data.zip` `dummy/<config_name_2>/<version>/dummy_data.zip` Now, the difficult part is how to create the `dummy_data.zip` file. There are two cases: A) The `data_urs` parameter inserted into the `download_and_extract` fn is a **string**: -> the `dummy_data.zip` file zips the folder: `dummy_data/<relative_path_of_folder_structure_of_url>` B) The `data_urs` parameter inserted into the `download_and_extract` fn is a **dict**: -> the `dummy_data.zip` file zips the folder: `dummy_data/<relative_path_of_folder_structure_of_url_behind _key_1>` `dummy_data/<relative_path_of_folder_structure_of_url_behind _key_2>` By relative folder structure I mean `url_path.split('./')[-1]`. As an example the dataset **xquad** by deepmind has the following url path behind the key `de`: `https://github.com/deepmind/xquad/blob/master/xquad.de.json` -> This means that the relative url path should be `xquad.de.json`. @mariamabarham B) is a change from how is was before and I think is makes more sense. While before the `dummy_data.zip` file for xquad with config `de` looked like: `dummy_data/de` it would now look like `dummy_data/xquad.de.json`. I think this is better and easier to understand. Therefore there are currently 6 tests that would have to have changed their dummy folder structure, but which can easily be done (30min). I also added a function: `print_dummy_data_folder_structure` that prints out the expected structures when testing which should be quite helpful.
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
https://api.github.com/repos/huggingface/datasets/issues/51/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/51/timeline
closed
false
51
null
2020-05-06T13:20:18Z
null
true
612,583,126
https://api.github.com/repos/huggingface/datasets/issues/50
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/50/events
[]
null
2020-05-05T13:02:18Z
[]
https://github.com/huggingface/datasets/pull/50
CONTRIBUTOR
null
false
null
[]
[Tests] test only for fast test as a default
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/50/reactions" }
MDExOlB1bGxSZXF1ZXN0NDEzNTAwMjE0
{ "diff_url": "https://github.com/huggingface/datasets/pull/50.diff", "html_url": "https://github.com/huggingface/datasets/pull/50", "merged_at": "2020-05-05T13:02:16Z", "patch_url": "https://github.com/huggingface/datasets/pull/50.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/50" }
2020-05-05T12:59:22Z
https://api.github.com/repos/huggingface/datasets/issues/50/comments
Test only for one config on circle ci to speed up testing. Add all config test as a slow test. @mariamabarham @thomwolf
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
https://api.github.com/repos/huggingface/datasets/issues/50/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/50/timeline
closed
false
50
null
2020-05-05T13:02:16Z
null
true
612,545,483
https://api.github.com/repos/huggingface/datasets/issues/49
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/49/events
[]
null
2020-05-05T13:59:26Z
[]
https://github.com/huggingface/datasets/pull/49
MEMBER
null
false
null
[]
fix flatten nested
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/49/reactions" }
MDExOlB1bGxSZXF1ZXN0NDEzNDY5ODg0
{ "diff_url": "https://github.com/huggingface/datasets/pull/49.diff", "html_url": "https://github.com/huggingface/datasets/pull/49", "merged_at": "2020-05-05T13:59:25Z", "patch_url": "https://github.com/huggingface/datasets/pull/49.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/49" }
2020-05-05T11:55:13Z
https://api.github.com/repos/huggingface/datasets/issues/49/comments
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
https://api.github.com/repos/huggingface/datasets/issues/49/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/49/timeline
closed
false
49
null
2020-05-05T13:59:25Z
null
true
612,504,687
https://api.github.com/repos/huggingface/datasets/issues/48
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/48/events
[]
null
2020-05-05T11:13:58Z
[]
https://github.com/huggingface/datasets/pull/48
CONTRIBUTOR
null
false
null
[]
[Command Convert] remove tensorflow import
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/48/reactions" }
MDExOlB1bGxSZXF1ZXN0NDEzNDM2MTgz
{ "diff_url": "https://github.com/huggingface/datasets/pull/48.diff", "html_url": "https://github.com/huggingface/datasets/pull/48", "merged_at": "2020-05-05T11:13:56Z", "patch_url": "https://github.com/huggingface/datasets/pull/48.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/48" }
2020-05-05T10:41:00Z
https://api.github.com/repos/huggingface/datasets/issues/48/comments
Remove all tensorflow import statements.
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
https://api.github.com/repos/huggingface/datasets/issues/48/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/48/timeline
closed
false
48
null
2020-05-05T11:13:56Z
null
true
612,446,493
https://api.github.com/repos/huggingface/datasets/issues/47
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/47/events
[]
null
2020-05-05T10:40:28Z
[]
https://github.com/huggingface/datasets/pull/47
CONTRIBUTOR
null
false
null
[]
[PyArrow Feature] fix py arrow bool
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/47/reactions" }
MDExOlB1bGxSZXF1ZXN0NDEzMzg5MDc1
{ "diff_url": "https://github.com/huggingface/datasets/pull/47.diff", "html_url": "https://github.com/huggingface/datasets/pull/47", "merged_at": "2020-05-05T10:40:27Z", "patch_url": "https://github.com/huggingface/datasets/pull/47.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/47" }
2020-05-05T08:56:28Z
https://api.github.com/repos/huggingface/datasets/issues/47/comments
To me it seems that `bool` can only be accessed with `bool_` when looking at the pyarrow types: https://arrow.apache.org/docs/python/api/datatypes.html.
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
https://api.github.com/repos/huggingface/datasets/issues/47/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/47/timeline
closed
false
47
null
2020-05-05T10:40:27Z
null
true
612,398,190
https://api.github.com/repos/huggingface/datasets/issues/46
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/46/events
[]
null
2020-05-05T08:37:45Z
[]
https://github.com/huggingface/datasets/pull/46
CONTRIBUTOR
null
false
null
[]
[Features] Strip str key before dict look-up
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/46/reactions" }
MDExOlB1bGxSZXF1ZXN0NDEzMzUxNTY0
{ "diff_url": "https://github.com/huggingface/datasets/pull/46.diff", "html_url": "https://github.com/huggingface/datasets/pull/46", "merged_at": "2020-05-05T08:37:44Z", "patch_url": "https://github.com/huggingface/datasets/pull/46.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/46" }
2020-05-05T07:31:45Z
https://api.github.com/repos/huggingface/datasets/issues/46/comments
The dataset `anli.py` currently fails because it tries to look up a key `1\n` in a dict that only has the key `1`. Added an if statement to strip key if it cannot be found in dict.
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
https://api.github.com/repos/huggingface/datasets/issues/46/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/46/timeline
closed
false
46
null
2020-05-05T08:37:44Z
null
true
612,386,583
https://api.github.com/repos/huggingface/datasets/issues/45
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/45/events
[]
null
2022-10-04T09:32:11Z
[]
https://github.com/huggingface/datasets/pull/45
CONTRIBUTOR
null
false
null
[]
[Load] Separate Module kwargs and builder kwargs.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/45/reactions" }
MDExOlB1bGxSZXF1ZXN0NDEzMzQzMjAy
{ "diff_url": "https://github.com/huggingface/datasets/pull/45.diff", "html_url": "https://github.com/huggingface/datasets/pull/45", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/45.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/45" }
2020-05-05T07:09:54Z
https://api.github.com/repos/huggingface/datasets/issues/45/comments
Kwargs for the `load_module` fn should be passed with `module_xxxx` to `builder_kwargs` of `load` fn. This is a follow-up PR of: https://github.com/huggingface/nlp/pull/41
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
https://api.github.com/repos/huggingface/datasets/issues/45/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/45/timeline
closed
false
45
null
2020-05-08T09:51:22Z
null
true
611,873,486
https://api.github.com/repos/huggingface/datasets/issues/44
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/44/events
[]
null
2020-05-04T13:28:04Z
[]
https://github.com/huggingface/datasets/pull/44
CONTRIBUTOR
null
false
null
[]
[Tests] Fix tests for datasets with no config
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/44/reactions" }
MDExOlB1bGxSZXF1ZXN0NDEyOTUwMzU1
{ "diff_url": "https://github.com/huggingface/datasets/pull/44.diff", "html_url": "https://github.com/huggingface/datasets/pull/44", "merged_at": "2020-05-04T13:28:03Z", "patch_url": "https://github.com/huggingface/datasets/pull/44.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/44" }
2020-05-04T13:25:38Z
https://api.github.com/repos/huggingface/datasets/issues/44/comments
Forgot to fix `None` problem for datasets that have no config this in PR: https://github.com/huggingface/nlp/pull/42
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
https://api.github.com/repos/huggingface/datasets/issues/44/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/44/timeline
closed
false
44
null
2020-05-04T13:28:03Z
null
true
611,773,279
https://api.github.com/repos/huggingface/datasets/issues/43
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/43/events
[]
null
2022-10-04T09:32:02Z
[]
https://github.com/huggingface/datasets/pull/43
CONTRIBUTOR
null
false
null
[]
[Checksums] If no configs exist prevent to run over empty list
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/43/reactions" }
MDExOlB1bGxSZXF1ZXN0NDEyODcxNTE5
{ "diff_url": "https://github.com/huggingface/datasets/pull/43.diff", "html_url": "https://github.com/huggingface/datasets/pull/43", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/43.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/43" }
2020-05-04T10:39:42Z
https://api.github.com/repos/huggingface/datasets/issues/43/comments
`movie_rationales` e.g. has no configs.
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
https://api.github.com/repos/huggingface/datasets/issues/43/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/43/timeline
closed
false
43
null
2020-05-04T13:18:03Z
null
true
611,754,343
https://api.github.com/repos/huggingface/datasets/issues/42
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/42/events
[]
null
2020-05-04T13:10:50Z
[]
https://github.com/huggingface/datasets/pull/42
CONTRIBUTOR
null
false
null
[]
[Tests] allow tests for builders without config
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/42/reactions" }
MDExOlB1bGxSZXF1ZXN0NDEyODU1OTE2
{ "diff_url": "https://github.com/huggingface/datasets/pull/42.diff", "html_url": "https://github.com/huggingface/datasets/pull/42", "merged_at": "2020-05-04T13:10:48Z", "patch_url": "https://github.com/huggingface/datasets/pull/42.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/42" }
2020-05-04T10:06:22Z
https://api.github.com/repos/huggingface/datasets/issues/42/comments
Some dataset scripts have no configs - the tests have to be adapted for this case. In this case the dummy data will be saved as: - natural_questions -> dummy -> -> 1.0.0 (version num) -> -> -> dummy_data.zip
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
https://api.github.com/repos/huggingface/datasets/issues/42/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/42/timeline
closed
false
42
null
2020-05-04T13:10:48Z
null
true
611,739,219
https://api.github.com/repos/huggingface/datasets/issues/41
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/41/events
[]
null
2020-05-04T19:39:07Z
[]
https://github.com/huggingface/datasets/pull/41
CONTRIBUTOR
null
false
null
[]
[Load module] allow kwargs into load module
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/41/reactions" }
MDExOlB1bGxSZXF1ZXN0NDEyODQzNDQy
{ "diff_url": "https://github.com/huggingface/datasets/pull/41.diff", "html_url": "https://github.com/huggingface/datasets/pull/41", "merged_at": "2020-05-04T19:39:06Z", "patch_url": "https://github.com/huggingface/datasets/pull/41.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/41" }
2020-05-04T09:42:11Z
https://api.github.com/repos/huggingface/datasets/issues/41/comments
Currenly it is not possible to force a re-download of the dataset script. This simple change allows to pass ``force_reload=True`` as ``builder_kwargs`` in the ``load.py`` function.
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
https://api.github.com/repos/huggingface/datasets/issues/41/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/41/timeline
closed
false
41
null
2020-05-04T19:39:06Z
null
true
611,721,308
https://api.github.com/repos/huggingface/datasets/issues/40
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/40/events
[]
null
2020-05-04T11:51:51Z
[]
https://github.com/huggingface/datasets/pull/40
MEMBER
null
false
null
[]
Update remote checksums instead of overwrite
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/40/reactions" }
MDExOlB1bGxSZXF1ZXN0NDEyODI4NzU2
{ "diff_url": "https://github.com/huggingface/datasets/pull/40.diff", "html_url": "https://github.com/huggingface/datasets/pull/40", "merged_at": "2020-05-04T11:51:49Z", "patch_url": "https://github.com/huggingface/datasets/pull/40.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/40" }
2020-05-04T09:13:14Z
https://api.github.com/repos/huggingface/datasets/issues/40/comments
When the user uploads a dataset on S3, checksums are also uploaded with the `--upload_checksums` parameter. If the user uploads the dataset in several steps, then the remote checksums file was previously overwritten. Now it's going to be updated with the new checksums.
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
https://api.github.com/repos/huggingface/datasets/issues/40/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/40/timeline
closed
false
40
null
2020-05-04T11:51:49Z
null
true
611,712,135
https://api.github.com/repos/huggingface/datasets/issues/39
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/39/events
[]
null
2020-05-04T08:59:50Z
[]
https://github.com/huggingface/datasets/pull/39
CONTRIBUTOR
null
false
null
[]
[Test] improve slow testing
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/39/reactions" }
MDExOlB1bGxSZXF1ZXN0NDEyODIxNTA4
{ "diff_url": "https://github.com/huggingface/datasets/pull/39.diff", "html_url": "https://github.com/huggingface/datasets/pull/39", "merged_at": "2020-05-04T08:59:49Z", "patch_url": "https://github.com/huggingface/datasets/pull/39.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/39" }
2020-05-04T08:58:33Z
https://api.github.com/repos/huggingface/datasets/issues/39/comments
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
https://api.github.com/repos/huggingface/datasets/issues/39/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/39/timeline
closed
false
39
null
2020-05-04T08:59:49Z
null
true
611,677,656
https://api.github.com/repos/huggingface/datasets/issues/38
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/38/events
[]
null
2020-05-04T09:48:20Z
[ { "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }, { "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" } ]
https://github.com/huggingface/datasets/issues/38
CONTRIBUTOR
completed
null
null
[]
[Checksums] Error for some datasets
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/38/reactions" }
MDU6SXNzdWU2MTE2Nzc2NTY=
null
2020-05-04T08:00:16Z
https://api.github.com/repos/huggingface/datasets/issues/38/comments
The checksums command works very nicely for `squad`. But for `crime_and_punish` and `xnli`, the same bug happens: When running: ``` python nlp-cli nlp-cli test xnli --save_checksums ``` leads to: ``` File "nlp-cli", line 33, in <module> service.run() File "/home/patrick/python_bin/nlp/commands/test.py", line 61, in run ignore_checksums=self._ignore_checksums, File "/home/patrick/python_bin/nlp/builder.py", line 383, in download_and_prepare self._download_and_prepare(dl_manager=dl_manager, download_config=download_config) File "/home/patrick/python_bin/nlp/builder.py", line 627, in _download_and_prepare dl_manager=dl_manager, max_examples_per_split=download_config.max_examples_per_split, File "/home/patrick/python_bin/nlp/builder.py", line 431, in _download_and_prepare split_generators = self._split_generators(dl_manager, **split_generators_kwargs) File "/home/patrick/python_bin/nlp/datasets/xnli/8bf4185a2da1ef2a523186dd660d9adcf0946189e7fa5942ea31c63c07b68a7f/xnli.py", line 95, in _split_generators dl_dir = dl_manager.download_and_extract(_DATA_URL) File "/home/patrick/python_bin/nlp/utils/download_manager.py", line 246, in download_and_extract return self.extract(self.download(url_or_urls)) File "/home/patrick/python_bin/nlp/utils/download_manager.py", line 186, in download self._record_sizes_checksums(url_or_urls, downloaded_path_or_paths) File "/home/patrick/python_bin/nlp/utils/download_manager.py", line 166, in _record_sizes_checksums self._recorded_sizes_checksums[url] = get_size_checksum(path) File "/home/patrick/python_bin/nlp/utils/checksums_utils.py", line 81, in get_size_checksum with open(path, "rb") as f: TypeError: expected str, bytes or os.PathLike object, not tuple ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
https://api.github.com/repos/huggingface/datasets/issues/38/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/38/timeline
closed
false
38
null
2020-05-04T09:48:20Z
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
false
611,670,295
https://api.github.com/repos/huggingface/datasets/issues/37
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/37/events
[]
null
2022-10-04T09:32:17Z
[ { "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }, { "avatar_url": "https://avatars.githubusercontent.com/u/38249783?v=4", "events_url": "https://api.github.com/users/mariamabarham/events{/privacy}", "followers_url": "https://api.github.com/users/mariamabarham/followers", "following_url": "https://api.github.com/users/mariamabarham/following{/other_user}", "gists_url": "https://api.github.com/users/mariamabarham/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariamabarham", "id": 38249783, "login": "mariamabarham", "node_id": "MDQ6VXNlcjM4MjQ5Nzgz", "organizations_url": "https://api.github.com/users/mariamabarham/orgs", "received_events_url": "https://api.github.com/users/mariamabarham/received_events", "repos_url": "https://api.github.com/users/mariamabarham/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariamabarham/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariamabarham/subscriptions", "type": "User", "url": "https://api.github.com/users/mariamabarham" } ]
https://github.com/huggingface/datasets/pull/37
CONTRIBUTOR
null
true
null
[]
[Datasets ToDo-List] add datasets
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/37/reactions" }
MDExOlB1bGxSZXF1ZXN0NDEyNzg5MjQ4
{ "diff_url": "https://github.com/huggingface/datasets/pull/37.diff", "html_url": "https://github.com/huggingface/datasets/pull/37", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/37.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/37" }
2020-05-04T07:47:39Z
https://api.github.com/repos/huggingface/datasets/issues/37/comments
## Description This PR acts as a dashboard to see which datasets are added to the library and work. Cicle-ci should always be green so that we can be sure that newly added datasets are functional. This PR should not be merged. ## Progress **For the following datasets the test commands**: ``` RUN_SLOW=1 pytest tests/test_dataset_common.py::DatasetTest::test_load_real_dataset_<your-dataset-name> ``` and ``` RUN_SLOW=1 pytest tests/test_dataset_common.py::DatasetTest::test_load_dataset_all_configs_<your-dataset-name> ``` **passes**. - [x] Squad - [x] Sentiment140 - [x] XNLI - [x] Crime_and_Punish - [x] movie_rationales - [x] ai2_arc - [x] anli - [x] event2Mind - [x] Fquad - [x] blimp - [x] empathetic_dialogues - [x] cosmos_qa - [x] xquad - [x] blog_authorship_corpus - [x] SNLI - [x] break_data - [x] SQuAD v2 - [x] cfq - [x] eraser_multi_rc - [x] Glue - [x] Tydiqa - [x] wiki_qa - [x] wikitext - [x] winogrande - [x] wiqa - [x] esnli - [x] civil_comments - [x] commonsense_qa - [x] com_qa - [x] coqa - [x] wiki_split - [x] cos_e - [x] xcopa - [x] quarel - [x] quartz - [x] squad_it - [x] quoref - [x] squad_pt - [x] cornell_movie_dialog - [x] SciQ - [x] Scifact - [x] hellaswag - [x] ted_multi (in translate) - [x] Aeslc (summarization) - [x] drop - [x] gap - [x] hansard - [x] opinosis - [x] MLQA - [x] math_dataset ## How-To-Add a dataset **Before adding a dataset make sure that your branch is up to date**: 1. `git checkout add_datasets` 2. `git pull` **Add a dataset via the `convert_dataset.sh` bash script:** Running `bash convert_dataset.sh <file/to/tfds/datascript.py>` (*e.g.* `bash convert_dataset.sh ../tensorflow-datasets/tensorflow_datasets/text/movie_rationales.py`) will automatically run all the steps mentioned in **Add a dataset manually** below. Make sure that you run `convert_dataset.sh` from the root folder of `nlp`. The conversion script should work almost always for step 1): "convert dataset script from tfds to nlp format" and 2) "create checksum file" and step 3) "make style". It can also sometimes automatically run step 4) "create the correct dummy data from tfds", but this will only work if a) there is either no config name or only one config name and b) the `tfds testing/test_data/fake_example` is in the correct form. Nevertheless, the script should always be run in the beginning until an error occurs to be more efficient. If the conversion script does not work or fails at some step, then you can run the steps manually as follows: **Add a dataset manually** Make sure you run all of the following commands from the root of your `nlp` git clone. Also make sure that you changed to this branch: ``` git checkout add_datasets ``` 1) the tfds datascript file should be converted to `nlp` style: ``` python nlp-cli convert --tfds_path <path/to/tensorflow_datasets/text/your_dataset_name>.py --nlp_directory datasets/nlp ``` This will convert the tdfs script and create a folder with the correct name. 2) the checksum file should be added. Use the command: ``` python nlp-cli test datasets/nlp/<your-dataset-folder> --save_checksums --all_configs ``` A checksums.txt file should be created in your folder and the structure should look as follows: squad/ ├── squad.py/ └── urls_checksums/ ...........└── checksums.txt Delete the created `*.lock` file afterward - it should not be uploaded to AWS. 3) run black and isort on your newly added datascript files so that they look nice: ``` make style ``` 4) the dummy data should be added. For this it might be useful to take a look into the structure of other examples as shown in the PR here and at `<path/to/tensorflow_datasets/testing/test_data/test_data/fake_examples>` whether the same data can be used. 5) the data can be uploaded to AWS using the command ``` aws s3 cp datasets/nlp/<your-dataset-folder> s3://datasets.huggingface.co/nlp/<your-dataset-folder> --recursive ``` 6) check whether all works as expected using: ``` RUN_SLOW=1 pytest tests/test_dataset_common.py::DatasetTest::test_load_real_dataset_<your-dataset-name> ``` and ``` RUN_SLOW=1 pytest tests/test_dataset_common.py::DatasetTest::test_load_dataset_all_configs_<your-dataset-name> ``` 7) push to this PR and rerun the circle ci workflow to check whether circle ci stays green. 8) Edit this commend and tick off your newly added dataset :-) ## TODO-list Maybe we can add a TODO-list here for everybody that feels like adding new datasets so that we will not add the same datasets. Here a link to available datasets: https://docs.google.com/spreadsheets/d/1zOtEqOrnVQwdgkC4nJrTY6d-Av02u0XFzeKAtBM2fUI/edit#gid=0 Patrick: - [ ] boolq - *weird download link* - [ ] c4 - *beam dataset*
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
https://api.github.com/repos/huggingface/datasets/issues/37/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/37/timeline
closed
false
37
null
2020-05-08T13:48:23Z
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
true
611,528,349
https://api.github.com/repos/huggingface/datasets/issues/36
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/36/events
[]
null
2020-05-11T08:16:02Z
[]
https://github.com/huggingface/datasets/pull/36
MEMBER
null
false
null
[]
Metrics - refactoring, adding support for download and distributed metrics
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/36/reactions" }
MDExOlB1bGxSZXF1ZXN0NDEyNjgwOTk1
{ "diff_url": "https://github.com/huggingface/datasets/pull/36.diff", "html_url": "https://github.com/huggingface/datasets/pull/36", "merged_at": "2020-05-11T08:16:00Z", "patch_url": "https://github.com/huggingface/datasets/pull/36.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/36" }
2020-05-03T23:00:17Z
https://api.github.com/repos/huggingface/datasets/issues/36/comments
Refactoring metrics to have a similar loading API than the datasets and improving the import system. # Import system The import system has ben upgraded. There are now three types of imports allowed: 1. `library` imports (identified as "absolute imports") ```python import seqeval ``` => we'll test all the imports before running the scripts and if one cannot be imported we'll display an error message like this one: `ImportError: To be able to use this metric/dataset, you need to install the following dependencies ['seqeval'] using 'pip install seqeval' for instance'` 2. `internal` imports (identified as "relative imports") ```python import .c4_utils ``` => we'll assume this point to a file in the same directory/S3-directory as the main script and download this file. 2. `external` imports (identified as "relative imports" with a comment starting with `# From:`) ```python from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py ``` => we'll assume this point to the URL of a python script (if it's a link to a github file, we'll take the raw file automatically). => the script is downloaded and renamed to the import name (here above renamed from `bleu.py` to `nmt_bleu.py`). Renaming the file can be necessary if the distant file has the same name as the dataset/metric processing script. If you forgot to rename the distant script and it has the same name as the dataset/metric, you'll have an explicit error message asking to rename the import anyway. # Hosting metrics Metrics are hosted on a S3 bucket like the dataset processing scripts. # Metrics scripts Metrics scripts have a lot in common with datasets processing scripts. They also have a `metric.info` including citations, descriptions and links to relevant pages. Metrics have more documentation to supply to ensure they are used well. Four examples are already included for reference in [./metrics](./metrics): BLEU, ROUGE, SacreBLEU and SeqEVAL. # Automatic support for distributed/multi-processing metric computation We've also added support for automatic distributed/multi-processing metric computation (e.g. when using DistributedDataParallel). We leverage our own dataset format for smart caching in this case. Here is a quick gist of a standard use of metrics (the simplest usage): ```python import nlp bleu_metric = nlp.load_metric('bleu') # If you only have a single iteration, you can easily compute the score like this predictions = model(inputs) score = bleu_metric.compute(predictions, references) # If you have a loop, you can "add" your predictions and references at each iteration instead of having to save them yourself (the metric object store them efficiently for you) for batch in dataloader: model_input, targets = batch predictions = model(model_inputs) bleu.add(predictions, targets) score = bleu_metric.compute() # Compute the score from all the stored predictions/references ``` Here is a quick gist of a use in a distributed torch setup (should work for any python multi-process setup actually). It's pretty much identical to the second example above: ```python import nlp # You need to give the total number of parallel python processes (num_process) and the id of each process (process_id) bleu = nlp.load_metric('bleu', process_id=torch.distributed.get_rank(),b num_process=torch.distributed.get_world_size()) for batch in dataloader: model_input, targets = batch predictions = model(model_inputs) bleu.add(predictions, targets) score = bleu_metric.compute() # Compute the score on the first node by default (can be set to compute on each node as well) ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/7353373?v=4", "events_url": "https://api.github.com/users/thomwolf/events{/privacy}", "followers_url": "https://api.github.com/users/thomwolf/followers", "following_url": "https://api.github.com/users/thomwolf/following{/other_user}", "gists_url": "https://api.github.com/users/thomwolf/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/thomwolf", "id": 7353373, "login": "thomwolf", "node_id": "MDQ6VXNlcjczNTMzNzM=", "organizations_url": "https://api.github.com/users/thomwolf/orgs", "received_events_url": "https://api.github.com/users/thomwolf/received_events", "repos_url": "https://api.github.com/users/thomwolf/repos", "site_admin": false, "starred_url": "https://api.github.com/users/thomwolf/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/thomwolf/subscriptions", "type": "User", "url": "https://api.github.com/users/thomwolf" }
https://api.github.com/repos/huggingface/datasets/issues/36/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/36/timeline
closed
false
36
null
2020-05-11T08:16:00Z
null
true
611,413,731
https://api.github.com/repos/huggingface/datasets/issues/35
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/35/events
[]
null
2020-05-03T13:24:21Z
[]
https://github.com/huggingface/datasets/pull/35
CONTRIBUTOR
null
false
null
[]
[Tests] fix typo
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 1, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/35/reactions" }
MDExOlB1bGxSZXF1ZXN0NDEyNjAyMTc0
{ "diff_url": "https://github.com/huggingface/datasets/pull/35.diff", "html_url": "https://github.com/huggingface/datasets/pull/35", "merged_at": "2020-05-03T13:24:20Z", "patch_url": "https://github.com/huggingface/datasets/pull/35.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/35" }
2020-05-03T13:23:49Z
https://api.github.com/repos/huggingface/datasets/issues/35/comments
@lhoestq - currently the slow test fail with: ``` _____________________________________________________________________________________ DatasetTest.test_load_real_dataset_xnli _____________________________________________________________________________________ self = <tests.test_dataset_common.DatasetTest testMethod=test_load_real_dataset_xnli>, dataset_name = 'xnli' @slow def test_load_real_dataset(self, dataset_name): with tempfile.TemporaryDirectory() as temp_data_dir: > dataset = load(dataset_name, data_dir=temp_data_dir) tests/test_dataset_common.py:153: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ../../python_bin/nlp/load.py:497: in load dbuilder.download_and_prepare(**download_and_prepare_kwargs) ../../python_bin/nlp/builder.py:383: in download_and_prepare self._download_and_prepare(dl_manager=dl_manager, download_config=download_config) ../../python_bin/nlp/builder.py:627: in _download_and_prepare dl_manager=dl_manager, max_examples_per_split=download_config.max_examples_per_split, ../../python_bin/nlp/builder.py:431: in _download_and_prepare split_generators = self._split_generators(dl_manager, **split_generators_kwargs) ../../python_bin/nlp/datasets/xnli/8bf4185a2da1ef2a523186dd660d9adcf0946189e7fa5942ea31c63c07b68a7f/xnli.py:95: in _split_generators dl_dir = dl_manager.download_and_extract(_DATA_URL) ../../python_bin/nlp/utils/download_manager.py:246: in download_and_extract return self.extract(self.download(url_or_urls)) ../../python_bin/nlp/utils/download_manager.py:186: in download self._record_sizes_checksums(url_or_urls, downloaded_path_or_paths) ../../python_bin/nlp/utils/download_manager.py:166: in _record_sizes_checksums self._recorded_sizes_checksums[url] = get_size_checksum(path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ path = ('', '/tmp/tmpkajlg9yc/downloads/c0f7773c480a3f2d85639d777e0e17e65527460310d80760fd3fc2b2f2960556.c952a63cb17d3d46e412ceb7dbcd656ce2b15cc9ef17f50c28f81c48a7c853b5') def get_size_checksum(path: str) -> Tuple[int, str]: """Compute the file size and the sha256 checksum of a file""" m = sha256() > with open(path, "rb") as f: E TypeError: expected str, bytes or os.PathLike object, not tuple ../../python_bin/nlp/utils/checksums_utils.py:81: TypeError ``` - the checksums probably need to be updated no? And we should also think about how to write a test for the checksums.
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
https://api.github.com/repos/huggingface/datasets/issues/35/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/35/timeline
closed
false
35
null
2020-05-03T13:24:20Z
null
true
611,385,516
https://api.github.com/repos/huggingface/datasets/issues/34
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/34/events
[]
null
2020-05-03T12:18:30Z
[]
https://github.com/huggingface/datasets/pull/34
CONTRIBUTOR
null
false
null
[]
[Tests] add slow tests
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/34/reactions" }
MDExOlB1bGxSZXF1ZXN0NDEyNTg0OTM0
{ "diff_url": "https://github.com/huggingface/datasets/pull/34.diff", "html_url": "https://github.com/huggingface/datasets/pull/34", "merged_at": "2020-05-03T12:18:29Z", "patch_url": "https://github.com/huggingface/datasets/pull/34.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/34" }
2020-05-03T11:01:22Z
https://api.github.com/repos/huggingface/datasets/issues/34/comments
This PR adds a slow test that downloads the "real" dataset. The test is decorated as "slow" so that it will not automatically run on circle ci. Before uploading a dataset, one should test that this test passes, manually by running ``` RUN_SLOW=1 pytest tests/test_dataset_common.py::DatasetTest::test_load_real_dataset_<your-dataset-script-name> ``` This PR should be merged after PR: #33
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
https://api.github.com/repos/huggingface/datasets/issues/34/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/34/timeline
closed
false
34
null
2020-05-03T12:18:29Z
null
true
611,052,081
https://api.github.com/repos/huggingface/datasets/issues/33
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/33/events
[]
null
2020-05-03T12:17:34Z
[]
https://github.com/huggingface/datasets/pull/33
MEMBER
null
false
null
[]
Big cleanup/refactoring for clean serialization
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/33/reactions" }
MDExOlB1bGxSZXF1ZXN0NDEyMzU1ODE0
{ "diff_url": "https://github.com/huggingface/datasets/pull/33.diff", "html_url": "https://github.com/huggingface/datasets/pull/33", "merged_at": "2020-05-03T12:17:33Z", "patch_url": "https://github.com/huggingface/datasets/pull/33.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/33" }
2020-05-01T23:45:57Z
https://api.github.com/repos/huggingface/datasets/issues/33/comments
This PR cleans many base classes to re-build them as `dataclasses`. We can thus use a simple serialization workflow for `DatasetInfo`, including it's `Features` and `SplitDict` based on `dataclasses` `asdict()`. The resulting code is a lot shorter, can be easily serialized/deserialized, dataset info are human-readable and we can get rid of the `dataclass_json` dependency. The scripts have breaking changes and the conversion tool is updated. Example of dataset info in SQuAD script now: ```python def _info(self): return nlp.DatasetInfo( description=_DESCRIPTION, features=nlp.Features({ "id": nlp.Value('string'), "title": nlp.Value('string'), "context": nlp.Value('string'), "question": nlp.Value('string'), "answers": nlp.Sequence({ "text": nlp.Value('string'), "answer_start": nlp.Value('int32'), }), }), # No default supervised_keys (as we have to pass both question # and context as input). supervised_keys=None, homepage="https://rajpurkar.github.io/SQuAD-explorer/", citation=_CITATION, ) ``` Example of serialized dataset info: ```bash { "description": "Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable.\n", "citation": "@article{2016arXiv160605250R,\n author = {{Rajpurkar}, Pranav and {Zhang}, Jian and {Lopyrev},\n Konstantin and {Liang}, Percy},\n title = \"{SQuAD: 100,000+ Questions for Machine Comprehension of Text}\",\n journal = {arXiv e-prints},\n year = 2016,\n eid = {arXiv:1606.05250},\n pages = {arXiv:1606.05250},\narchivePrefix = {arXiv},\n eprint = {1606.05250},\n}\n", "homepage": "https://rajpurkar.github.io/SQuAD-explorer/", "license": "", "features": { "id": { "dtype": "string", "_type": "Value" }, "title": { "dtype": "string", "_type": "Value" }, "context": { "dtype": "string", "_type": "Value" }, "question": { "dtype": "string", "_type": "Value" }, "answers": { "feature": { "text": { "dtype": "string", "_type": "Value" }, "answer_start": { "dtype": "int32", "_type": "Value" } }, "length": -1, "_type": "Sequence" } }, "supervised_keys": null, "name": "squad", "version": { "version_str": "1.0.0", "description": "New split API (https://tensorflow.org/datasets/splits)", "nlp_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0 }, "splits": { "train": { "name": "train", "num_bytes": 79426386, "num_examples": 87599, "dataset_name": "squad" }, "validation": { "name": "validation", "num_bytes": 10491883, "num_examples": 10570, "dataset_name": "squad" } }, "size_in_bytes": 0, "download_size": 35142551, "download_checksums": [] } ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/7353373?v=4", "events_url": "https://api.github.com/users/thomwolf/events{/privacy}", "followers_url": "https://api.github.com/users/thomwolf/followers", "following_url": "https://api.github.com/users/thomwolf/following{/other_user}", "gists_url": "https://api.github.com/users/thomwolf/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/thomwolf", "id": 7353373, "login": "thomwolf", "node_id": "MDQ6VXNlcjczNTMzNzM=", "organizations_url": "https://api.github.com/users/thomwolf/orgs", "received_events_url": "https://api.github.com/users/thomwolf/received_events", "repos_url": "https://api.github.com/users/thomwolf/repos", "site_admin": false, "starred_url": "https://api.github.com/users/thomwolf/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/thomwolf/subscriptions", "type": "User", "url": "https://api.github.com/users/thomwolf" }
https://api.github.com/repos/huggingface/datasets/issues/33/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/33/timeline
closed
false
33
null
2020-05-03T12:17:33Z
null
true
610,715,580
https://api.github.com/repos/huggingface/datasets/issues/32
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/32/events
[]
null
2020-05-03T12:15:58Z
[]
https://github.com/huggingface/datasets/pull/32
MEMBER
null
false
null
[]
Fix map caching notebooks
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/32/reactions" }
MDExOlB1bGxSZXF1ZXN0NDEyMTAzMzIx
{ "diff_url": "https://github.com/huggingface/datasets/pull/32.diff", "html_url": "https://github.com/huggingface/datasets/pull/32", "merged_at": "2020-05-03T12:15:57Z", "patch_url": "https://github.com/huggingface/datasets/pull/32.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/32" }
2020-05-01T11:55:26Z
https://api.github.com/repos/huggingface/datasets/issues/32/comments
Previously, caching results with `.map()` didn't work in notebooks. To reuse a result, `.map()` serializes the functions with `dill.dumps` and then it hashes it. The problem is that when using `dill.dumps` to serialize a function, it also saves its origin (filename + line no.) and the origin of all the `globals` this function needs. However for notebooks and shells, the filename looks like \<ipython-input-13-9ed2afe61d25\> and the line no. changes often. To fix the problem, I added a new dispatch function for code objects that ignore the origin of the code if it comes from a notebook or a python shell. I tested these cases in a notebook: - lambda functions - named functions - methods - classmethods - staticmethods - classes that implement `__call__` The caching now works as expected for all of them :) I also tested the caching in the demo notebook and it works fine !
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
https://api.github.com/repos/huggingface/datasets/issues/32/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/32/timeline
closed
false
32
null
2020-05-03T12:15:57Z
null
true
610,677,641
https://api.github.com/repos/huggingface/datasets/issues/31
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/31/events
[]
null
2020-05-01T22:06:16Z
[]
https://github.com/huggingface/datasets/pull/31
CONTRIBUTOR
null
false
null
[]
[Circle ci] Install a virtual env before running tests
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/31/reactions" }
MDExOlB1bGxSZXF1ZXN0NDEyMDczNDE4
{ "diff_url": "https://github.com/huggingface/datasets/pull/31.diff", "html_url": "https://github.com/huggingface/datasets/pull/31", "merged_at": "2020-05-01T22:06:15Z", "patch_url": "https://github.com/huggingface/datasets/pull/31.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/31" }
2020-05-01T10:11:17Z
https://api.github.com/repos/huggingface/datasets/issues/31/comments
Install a virtual env before running tests to not running into sudo issues when dynamically downloading files. Same number of tests now pass / fail as on my local computer: ![Screenshot from 2020-05-01 12-14-44](https://user-images.githubusercontent.com/23423619/80798814-8a0a0a80-8ba5-11ea-8db8-599d33bbfccd.png)
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
https://api.github.com/repos/huggingface/datasets/issues/31/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/31/timeline
closed
false
31
null
2020-05-01T22:06:15Z
null
true
610,549,072
https://api.github.com/repos/huggingface/datasets/issues/30
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/30/events
[]
null
2022-10-04T09:31:58Z
[]
https://github.com/huggingface/datasets/pull/30
CONTRIBUTOR
null
false
null
[]
add metrics which require download files from github
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/30/reactions" }
MDExOlB1bGxSZXF1ZXN0NDExOTY4Mzk3
{ "diff_url": "https://github.com/huggingface/datasets/pull/30.diff", "html_url": "https://github.com/huggingface/datasets/pull/30", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/30.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/30" }
2020-05-01T04:13:22Z
https://api.github.com/repos/huggingface/datasets/issues/30/comments
To download files from github, I copied the `load_dataset_module` and its dependencies (without the builder) in `load.py` to `metrics/metric_utils.py`. I made the following changes: - copy the needed files in a folder`metric_name` - delete all other files that are not needed For metrics that require an external import, I first create a `<metric_name>_imports.py` file which contains all external urls. Then I create a `<metric_name>.py` in which I will load the external files using `<metric_name>_imports.py`
{ "avatar_url": "https://avatars.githubusercontent.com/u/38249783?v=4", "events_url": "https://api.github.com/users/mariamabarham/events{/privacy}", "followers_url": "https://api.github.com/users/mariamabarham/followers", "following_url": "https://api.github.com/users/mariamabarham/following{/other_user}", "gists_url": "https://api.github.com/users/mariamabarham/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariamabarham", "id": 38249783, "login": "mariamabarham", "node_id": "MDQ6VXNlcjM4MjQ5Nzgz", "organizations_url": "https://api.github.com/users/mariamabarham/orgs", "received_events_url": "https://api.github.com/users/mariamabarham/received_events", "repos_url": "https://api.github.com/users/mariamabarham/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariamabarham/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariamabarham/subscriptions", "type": "User", "url": "https://api.github.com/users/mariamabarham" }
https://api.github.com/repos/huggingface/datasets/issues/30/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/30/timeline
closed
false
30
null
2020-05-11T08:19:54Z
null
true
610,243,997
https://api.github.com/repos/huggingface/datasets/issues/29
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/29/events
[]
null
2020-04-30T19:51:45Z
[]
https://github.com/huggingface/datasets/pull/29
MEMBER
null
false
null
[]
Hf_api small changes
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/29/reactions" }
MDExOlB1bGxSZXF1ZXN0NDExNzIwODMx
{ "diff_url": "https://github.com/huggingface/datasets/pull/29.diff", "html_url": "https://github.com/huggingface/datasets/pull/29", "merged_at": "2020-04-30T19:51:44Z", "patch_url": "https://github.com/huggingface/datasets/pull/29.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/29" }
2020-04-30T17:06:43Z
https://api.github.com/repos/huggingface/datasets/issues/29/comments
From Patrick: ```python from nlp import hf_api api = hf_api.HfApi() api.dataset_list() ``` works :-)
{ "avatar_url": "https://avatars.githubusercontent.com/u/326577?v=4", "events_url": "https://api.github.com/users/julien-c/events{/privacy}", "followers_url": "https://api.github.com/users/julien-c/followers", "following_url": "https://api.github.com/users/julien-c/following{/other_user}", "gists_url": "https://api.github.com/users/julien-c/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/julien-c", "id": 326577, "login": "julien-c", "node_id": "MDQ6VXNlcjMyNjU3Nw==", "organizations_url": "https://api.github.com/users/julien-c/orgs", "received_events_url": "https://api.github.com/users/julien-c/received_events", "repos_url": "https://api.github.com/users/julien-c/repos", "site_admin": false, "starred_url": "https://api.github.com/users/julien-c/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/julien-c/subscriptions", "type": "User", "url": "https://api.github.com/users/julien-c" }
https://api.github.com/repos/huggingface/datasets/issues/29/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/29/timeline
closed
false
29
null
2020-04-30T19:51:44Z
null
true
610,241,907
https://api.github.com/repos/huggingface/datasets/issues/28
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/28/events
[]
null
2020-04-30T19:51:09Z
[]
https://github.com/huggingface/datasets/pull/28
CONTRIBUTOR
null
false
null
[]
[Circle ci] Adds circle ci config
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/28/reactions" }
MDExOlB1bGxSZXF1ZXN0NDExNzE5MTQy
{ "diff_url": "https://github.com/huggingface/datasets/pull/28.diff", "html_url": "https://github.com/huggingface/datasets/pull/28", "merged_at": "2020-04-30T19:51:08Z", "patch_url": "https://github.com/huggingface/datasets/pull/28.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/28" }
2020-04-30T17:03:35Z
https://api.github.com/repos/huggingface/datasets/issues/28/comments
@thomwolf can you take a look and set up circle ci on: https://app.circleci.com/projects/project-dashboard/github/huggingface I think for `nlp` only admins can set it up, which I guess is you :-)
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
https://api.github.com/repos/huggingface/datasets/issues/28/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/28/timeline
closed
false
28
null
2020-04-30T19:51:08Z
null
true
610,230,476
https://api.github.com/repos/huggingface/datasets/issues/27
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/27/events
[]
null
2020-04-30T17:39:25Z
[]
https://github.com/huggingface/datasets/pull/27
CONTRIBUTOR
null
false
null
[]
[Cleanup] Removes all files in testing except test_dataset_common
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/27/reactions" }
MDExOlB1bGxSZXF1ZXN0NDExNzA5OTc0
{ "diff_url": "https://github.com/huggingface/datasets/pull/27.diff", "html_url": "https://github.com/huggingface/datasets/pull/27", "merged_at": "2020-04-30T17:39:23Z", "patch_url": "https://github.com/huggingface/datasets/pull/27.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/27" }
2020-04-30T16:45:21Z
https://api.github.com/repos/huggingface/datasets/issues/27/comments
As far as I know, all files in `tests` were old `tfds test files` so I removed them. We can still look them up on the other library.
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
https://api.github.com/repos/huggingface/datasets/issues/27/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/27/timeline
closed
false
27
null
2020-04-30T17:39:23Z
null
true
610,226,047
https://api.github.com/repos/huggingface/datasets/issues/26
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/26/events
[]
null
2020-04-30T20:12:04Z
[]
https://github.com/huggingface/datasets/pull/26
CONTRIBUTOR
null
false
null
[]
[Tests] Clean tests
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/26/reactions" }
MDExOlB1bGxSZXF1ZXN0NDExNzA2NjA2
{ "diff_url": "https://github.com/huggingface/datasets/pull/26.diff", "html_url": "https://github.com/huggingface/datasets/pull/26", "merged_at": "2020-04-30T20:12:03Z", "patch_url": "https://github.com/huggingface/datasets/pull/26.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/26" }
2020-04-30T16:38:29Z
https://api.github.com/repos/huggingface/datasets/issues/26/comments
the abseil testing library (https://abseil.io/docs/python/quickstart.html) is better than the one I had before, so I decided to switch to that and changed the `setup.py` config file. Abseil has more support and a cleaner API for parametrized testing I think. I added a list of all dataset scripts that are currently on AWS, but will replace that once the API is integrated into this lib. One can now easily test for just a single function for a single dataset with: `tests/test_dataset_common.py::DatasetTest::test_load_dataset_wikipedia` NOTE: This PR is rebased on PR #29 so should be merged after.
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
https://api.github.com/repos/huggingface/datasets/issues/26/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/26/timeline
closed
false
26
null
2020-04-30T20:12:03Z
null
true
609,708,863
https://api.github.com/repos/huggingface/datasets/issues/25
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/25/events
[]
null
2022-10-04T09:32:13Z
[]
https://github.com/huggingface/datasets/pull/25
CONTRIBUTOR
null
false
null
[]
Add script csv datasets
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/25/reactions" }
MDExOlB1bGxSZXF1ZXN0NDExMjQ4Nzg2
{ "diff_url": "https://github.com/huggingface/datasets/pull/25.diff", "html_url": "https://github.com/huggingface/datasets/pull/25", "merged_at": "2020-05-07T21:14:49Z", "patch_url": "https://github.com/huggingface/datasets/pull/25.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/25" }
2020-04-30T08:28:08Z
https://api.github.com/repos/huggingface/datasets/issues/25/comments
This is a PR allowing to create datasets from local CSV files. A usage might be: ```python import nlp ds = nlp.load( path="csv", name="bbc", dataset_files={ nlp.Split.TRAIN: ["datasets/dummy_data/csv/train.csv"], nlp.Split.TEST: [""datasets/dummy_data/csv/test.csv""] }, csv_kwargs={ "skip_rows": 0, "delimiter": ",", "quote_char": "\"", "header_as_column_names": True } ) ``` ``` Downloading and preparing dataset bbc/1.0.0 (download: Unknown size, generated: Unknown size, total: Unknown size) to /home/jplu/.cache/huggingface/datasets/bbc/1.0.0... Dataset bbc downloaded and prepared to /home/jplu/.cache/huggingface/datasets/bbc/1.0.0. Subsequent calls will reuse this data. {'test': Dataset(schema: {'category': 'string', 'text': 'string'}, num_rows: 49), 'train': Dataset(schema: {'category': 'string', 'text': 'string'}, num_rows: 99), 'validation': Dataset(schema: {'category': 'string', 'text': 'string'}, num_rows: 0)} ``` How it is read: - `path`: the `csv` word means "I want to create a CSV dataset" - `name`: the name of this dataset is `bbc` - `dataset_files`: this is a dictionary where each key is the list of files corresponding to the key split. - `csv_kwargs`: this is the keywords arguments to "explain" how to read the CSV files * `skip_rows`: number of rows have to be skipped, starting from the beginning of the file * `delimiter`: which delimiter is used to separate the columns * `quote_char`: which quote char is used to represent a column where the delimiter appears in one of them * `header_as_column_names`: will use the first row (header) of the file as name for the features. Otherwise the names will be automatically generated as `f1`, `f2`, etc... Will be applied after the `skip_rows` parameter. **TODO**: for now the `csv.py` is copied each time we create a new dataset as `ds_name.py`, this behavior will be modified to have only the `csv.py` script copied only once and not for all the CSV datasets.
{ "avatar_url": "https://avatars.githubusercontent.com/u/959590?v=4", "events_url": "https://api.github.com/users/jplu/events{/privacy}", "followers_url": "https://api.github.com/users/jplu/followers", "following_url": "https://api.github.com/users/jplu/following{/other_user}", "gists_url": "https://api.github.com/users/jplu/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jplu", "id": 959590, "login": "jplu", "node_id": "MDQ6VXNlcjk1OTU5MA==", "organizations_url": "https://api.github.com/users/jplu/orgs", "received_events_url": "https://api.github.com/users/jplu/received_events", "repos_url": "https://api.github.com/users/jplu/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jplu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jplu/subscriptions", "type": "User", "url": "https://api.github.com/users/jplu" }
https://api.github.com/repos/huggingface/datasets/issues/25/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/25/timeline
closed
false
25
null
2020-05-07T21:14:49Z
null
true
609,064,987
https://api.github.com/repos/huggingface/datasets/issues/24
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/24/events
[]
null
2020-04-30T19:52:50Z
[]
https://github.com/huggingface/datasets/pull/24
MEMBER
null
false
null
[]
Add checksums
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/24/reactions" }
MDExOlB1bGxSZXF1ZXN0NDEwNzE5MTU0
{ "diff_url": "https://github.com/huggingface/datasets/pull/24.diff", "html_url": "https://github.com/huggingface/datasets/pull/24", "merged_at": "2020-04-30T19:52:49Z", "patch_url": "https://github.com/huggingface/datasets/pull/24.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/24" }
2020-04-29T13:37:29Z
https://api.github.com/repos/huggingface/datasets/issues/24/comments
### Checksums files They are stored next to the dataset script in urls_checksums/checksums.txt. They are used to check the integrity of the datasets downloaded files. I kept the same format as tensorflow-datasets. There is one checksums file for all configs. ### Load a dataset When you do `load("squad")`, it will also download the checksums file and put it next to the script in nlp/datasets/hash/urls_checksums/checksums.txt. It also verifies that the downloaded files checksums match the expected ones. You can ignore checksum tests with `load("squad", ignore_checksums=True)` (under the hood it just adds `ignore_checksums=True` in the `DownloadConfig`) ### Test a dataset There is a new command `nlp-cli test squad` that runs `download_and_prepare` to see if it runs ok, and that verifies that all the checksums match. Allowed arguments are `--name`, `--all_configs`, `--ignore_checksums` and `--register_checksums`. ### Register checksums 1. If the dataset has external dataset files The command `nlp-cli test squad --register_checksums --all_configs` runs `download_and_prepare` on all configs to see if it runs ok, and it creates the checksums file. You can also register one config at a time using `--name` instead ; the checksums file will be completed and not overwritten. If the script is a local script, the checksum file is moved to urls_checksums/checksums.txt next to the local script, to enable the user to upload both the script and the checksums file afterwards with `nlp-cli upload squad`. 2. If the dataset files are all inside the directory of the dataset script The user can directly do `nlp-cli upload squad --register_checksums`, as there is no need to download anything. In this case however, all the dataset must be uploaded at once. -- PS : it doesn't allow to register checksums for canonical datasets, the file has to be added manually on S3 for now (I guess ?) Also I feel like we must be sure that this processes would not constrain too much any user from uploading its dataset. Let me know what you think :)
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
https://api.github.com/repos/huggingface/datasets/issues/24/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/24/timeline
closed
false
24
null
2020-04-30T19:52:49Z
null
true
608,508,706
https://api.github.com/repos/huggingface/datasets/issues/23
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/23/events
[]
null
2022-10-04T09:31:56Z
[]
https://github.com/huggingface/datasets/pull/23
CONTRIBUTOR
null
false
null
[]
Add metrics
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/23/reactions" }
MDExOlB1bGxSZXF1ZXN0NDEwMjczOTU2
{ "diff_url": "https://github.com/huggingface/datasets/pull/23.diff", "html_url": "https://github.com/huggingface/datasets/pull/23", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/23.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/23" }
2020-04-28T18:02:05Z
https://api.github.com/repos/huggingface/datasets/issues/23/comments
This PR is a draft for adding metrics (sacrebleu and seqeval are added) use case examples: `import nlp` **sacrebleu:** ``` refs = [['The dog bit the man.', 'It was not unexpected.', 'The man bit him first.'], ['The dog had bit the man.', 'No one was surprised.', 'The man had bitten the dog.']] sys = ['The dog bit the man.', "It wasn't surprising.", 'The man had just bitten him.'] sacrebleu = nlp.load_metrics('sacrebleu') print(sacrebleu.score) ``` **seqeval:** ``` y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] seqeval = nlp.load_metrics('seqeval') print(seqeval.accuracy_score(y_true, y_pred) print(seqeval.f1_score(y_true, y_pred) ``` _examples are taken from the corresponding web page_ your comments and suggestions are more than welcomed
{ "avatar_url": "https://avatars.githubusercontent.com/u/38249783?v=4", "events_url": "https://api.github.com/users/mariamabarham/events{/privacy}", "followers_url": "https://api.github.com/users/mariamabarham/followers", "following_url": "https://api.github.com/users/mariamabarham/following{/other_user}", "gists_url": "https://api.github.com/users/mariamabarham/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariamabarham", "id": 38249783, "login": "mariamabarham", "node_id": "MDQ6VXNlcjM4MjQ5Nzgz", "organizations_url": "https://api.github.com/users/mariamabarham/orgs", "received_events_url": "https://api.github.com/users/mariamabarham/received_events", "repos_url": "https://api.github.com/users/mariamabarham/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariamabarham/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariamabarham/subscriptions", "type": "User", "url": "https://api.github.com/users/mariamabarham" }
https://api.github.com/repos/huggingface/datasets/issues/23/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/23/timeline
closed
false
23
null
2020-05-11T08:19:38Z
null
true
608,298,586
https://api.github.com/repos/huggingface/datasets/issues/22
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/22/events
[]
null
2020-04-28T17:48:20Z
[]
https://github.com/huggingface/datasets/pull/22
CONTRIBUTOR
null
false
null
[]
adding bleu score code
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/22/reactions" }
MDExOlB1bGxSZXF1ZXN0NDEwMTAyMjU3
{ "diff_url": "https://github.com/huggingface/datasets/pull/22.diff", "html_url": "https://github.com/huggingface/datasets/pull/22", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/22.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/22" }
2020-04-28T13:00:50Z
https://api.github.com/repos/huggingface/datasets/issues/22/comments
this PR add the BLEU score metric to the lib. It can be tested by running the following code. ` from nlp.metrics import bleu hyp1 = "It is a guide to action which ensures that the military always obeys the commands of the party" ref1a = "It is a guide to action that ensures that the military forces always being under the commands of the party " ref1b = "It is the guiding principle which guarantees the military force always being under the command of the Party" ref1c = "It is the practical guide for the army always to heed the directions of the party" list_of_references = [[ref1a, ref1b, ref1c]] hypotheses = [hyp1] bleu = bleu.bleu_score(list_of_references, hypotheses,4, smooth=True) print(bleu) `
{ "avatar_url": "https://avatars.githubusercontent.com/u/38249783?v=4", "events_url": "https://api.github.com/users/mariamabarham/events{/privacy}", "followers_url": "https://api.github.com/users/mariamabarham/followers", "following_url": "https://api.github.com/users/mariamabarham/following{/other_user}", "gists_url": "https://api.github.com/users/mariamabarham/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariamabarham", "id": 38249783, "login": "mariamabarham", "node_id": "MDQ6VXNlcjM4MjQ5Nzgz", "organizations_url": "https://api.github.com/users/mariamabarham/orgs", "received_events_url": "https://api.github.com/users/mariamabarham/received_events", "repos_url": "https://api.github.com/users/mariamabarham/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariamabarham/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariamabarham/subscriptions", "type": "User", "url": "https://api.github.com/users/mariamabarham" }
https://api.github.com/repos/huggingface/datasets/issues/22/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/22/timeline
closed
false
22
null
2020-04-28T17:48:08Z
null
true
607,914,185
https://api.github.com/repos/huggingface/datasets/issues/21
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/21/events
[]
null
2020-05-01T09:29:47Z
[]
https://github.com/huggingface/datasets/pull/21
MEMBER
null
false
null
[]
Cleanup Features - Updating convert command - Fix Download manager
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/21/reactions" }
MDExOlB1bGxSZXF1ZXN0NDA5Nzk2MTM4
{ "diff_url": "https://github.com/huggingface/datasets/pull/21.diff", "html_url": "https://github.com/huggingface/datasets/pull/21", "merged_at": "2020-05-01T09:29:46Z", "patch_url": "https://github.com/huggingface/datasets/pull/21.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/21" }
2020-04-27T23:16:55Z
https://api.github.com/repos/huggingface/datasets/issues/21/comments
This PR makes a number of changes: # Updating `Features` Features are a complex mechanism provided in `tfds` to be able to modify a dataset on-the-fly when serializing to disk and when loading from disk. We don't really need this because (1) it hides too much from the user and (2) our datatype can be directly mapped to Arrow tables on drive so we usually don't need to change the format before/after serialization. This PR extracts and refactors these features in a single `features.py` files. It still keep a number of features classes for easy compatibility with tfds, namely the `Sequence`, `Tensor`, `ClassLabel` and `Translation` features. Some more complex features involving a pre-processing on-the-fly during serialization are kept: - `ClassLabel` which are able to convert from label strings to integers, - `Translation`which does some check on the languages. # Updating the `convert` command We do a few updates here - following the simplification of the `features` (cf above), conversion are updated - we also makes it simpler to convert a single file - some code need to be fixed manually after conversion (e.g. to remove some encoding processing in former tfds `Text` features. We highlight this code with a "git merge conflict" style syntax for easy manual fixing. # Fix download manager iterator You kept me up quite late on Tuesday night with this `os.scandir` change @lhoestq ;-)
{ "avatar_url": "https://avatars.githubusercontent.com/u/7353373?v=4", "events_url": "https://api.github.com/users/thomwolf/events{/privacy}", "followers_url": "https://api.github.com/users/thomwolf/followers", "following_url": "https://api.github.com/users/thomwolf/following{/other_user}", "gists_url": "https://api.github.com/users/thomwolf/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/thomwolf", "id": 7353373, "login": "thomwolf", "node_id": "MDQ6VXNlcjczNTMzNzM=", "organizations_url": "https://api.github.com/users/thomwolf/orgs", "received_events_url": "https://api.github.com/users/thomwolf/received_events", "repos_url": "https://api.github.com/users/thomwolf/repos", "site_admin": false, "starred_url": "https://api.github.com/users/thomwolf/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/thomwolf/subscriptions", "type": "User", "url": "https://api.github.com/users/thomwolf" }
https://api.github.com/repos/huggingface/datasets/issues/21/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/21/timeline
closed
false
21
null
2020-05-01T09:29:46Z
null
true
607,313,557
https://api.github.com/repos/huggingface/datasets/issues/20
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/20/events
[]
null
2020-04-27T16:04:17Z
[]
https://github.com/huggingface/datasets/pull/20
MEMBER
null
false
null
[]
remove boto3 and promise dependencies
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/20/reactions" }
MDExOlB1bGxSZXF1ZXN0NDA5MzEyMDI1
{ "diff_url": "https://github.com/huggingface/datasets/pull/20.diff", "html_url": "https://github.com/huggingface/datasets/pull/20", "merged_at": "2020-04-27T14:15:45Z", "patch_url": "https://github.com/huggingface/datasets/pull/20.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/20" }
2020-04-27T07:39:45Z
https://api.github.com/repos/huggingface/datasets/issues/20/comments
With the new download manager, we don't need `promise` anymore. I also removed `boto3` as in [this pr](https://github.com/huggingface/transformers/pull/3968)
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
https://api.github.com/repos/huggingface/datasets/issues/20/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/20/timeline
closed
false
20
null
2020-04-27T14:15:45Z
null
true
606,400,645
https://api.github.com/repos/huggingface/datasets/issues/19
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/19/events
[]
null
2020-04-29T09:27:08Z
[]
https://github.com/huggingface/datasets/pull/19
CONTRIBUTOR
null
false
null
[]
Replace tf.constant for TF
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/19/reactions" }
MDExOlB1bGxSZXF1ZXN0NDA4NjIwMjUw
{ "diff_url": "https://github.com/huggingface/datasets/pull/19.diff", "html_url": "https://github.com/huggingface/datasets/pull/19", "merged_at": "2020-04-25T21:18:45Z", "patch_url": "https://github.com/huggingface/datasets/pull/19.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/19" }
2020-04-24T15:32:06Z
https://api.github.com/repos/huggingface/datasets/issues/19/comments
Replace simple tf.constant type of Tensor to tf.ragged.constant which allows to have examples of different size in a tf.data.Dataset. Now the training works with TF. Here the same example than for the PT in collab: ```python import tensorflow as tf import nlp from transformers import BertTokenizerFast, TFBertForQuestionAnswering # Load our training dataset and tokenizer train_dataset = nlp.load('squad', split="train[:1%]") tokenizer = BertTokenizerFast.from_pretrained('bert-base-cased') def get_correct_alignement(context, answer): start_idx = answer['answer_start'][0] text = answer['text'][0] end_idx = start_idx + len(text) if context[start_idx:end_idx] == text: return start_idx, end_idx # When the gold label position is good elif context[start_idx-1:end_idx-1] == text: return start_idx-1, end_idx-1 # When the gold label is off by one character elif context[start_idx-2:end_idx-2] == text: return start_idx-2, end_idx-2 # When the gold label is off by two character else: raise ValueError() # Tokenize our training dataset def convert_to_features(example_batch): # Tokenize contexts and questions (as pairs of inputs) input_pairs = list(zip(example_batch['context'], example_batch['question'])) encodings = tokenizer.batch_encode_plus(input_pairs, pad_to_max_length=True) # Compute start and end tokens for labels using Transformers's fast tokenizers alignement methods. start_positions, end_positions = [], [] for i, (context, answer) in enumerate(zip(example_batch['context'], example_batch['answers'])): start_idx, end_idx = get_correct_alignement(context, answer) start_positions.append([encodings.char_to_token(i, start_idx)]) end_positions.append([encodings.char_to_token(i, end_idx-1)]) if start_positions and end_positions: encodings.update({'start_positions': start_positions, 'end_positions': end_positions}) return encodings train_dataset = train_dataset.map(convert_to_features, batched=True) columns = ['input_ids', 'token_type_ids', 'attention_mask', 'start_positions', 'end_positions'] train_dataset.set_format(type='tensorflow', columns=columns) features = {x: train_dataset[x] for x in columns[:3]} labels = {"output_1": train_dataset["start_positions"]} labels["output_2"] = train_dataset["end_positions"] tfdataset = tf.data.Dataset.from_tensor_slices((features, labels)).batch(8) model = TFBertForQuestionAnswering.from_pretrained("bert-base-cased") loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(reduction=tf.keras.losses.Reduction.NONE, from_logits=True) opt = tf.keras.optimizers.Adam(learning_rate=3e-5) model.compile(optimizer=opt, loss={'output_1': loss_fn, 'output_2': loss_fn}, loss_weights={'output_1': 1., 'output_2': 1.}, metrics=['accuracy']) model.fit(tfdataset, epochs=1, steps_per_epoch=3) ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/959590?v=4", "events_url": "https://api.github.com/users/jplu/events{/privacy}", "followers_url": "https://api.github.com/users/jplu/followers", "following_url": "https://api.github.com/users/jplu/following{/other_user}", "gists_url": "https://api.github.com/users/jplu/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jplu", "id": 959590, "login": "jplu", "node_id": "MDQ6VXNlcjk1OTU5MA==", "organizations_url": "https://api.github.com/users/jplu/orgs", "received_events_url": "https://api.github.com/users/jplu/received_events", "repos_url": "https://api.github.com/users/jplu/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jplu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jplu/subscriptions", "type": "User", "url": "https://api.github.com/users/jplu" }
https://api.github.com/repos/huggingface/datasets/issues/19/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/19/timeline
closed
false
19
null
2020-04-25T21:18:45Z
null
true
606,109,196
https://api.github.com/repos/huggingface/datasets/issues/18
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/18/events
[]
null
2020-04-29T15:27:28Z
[]
https://github.com/huggingface/datasets/pull/18
MEMBER
null
false
null
[]
Updating caching mechanism - Allow dependency in dataset processing scripts - Fix style and quality in the repo
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/18/reactions" }
MDExOlB1bGxSZXF1ZXN0NDA4Mzg0MTc3
{ "diff_url": "https://github.com/huggingface/datasets/pull/18.diff", "html_url": "https://github.com/huggingface/datasets/pull/18", "merged_at": "2020-04-28T16:06:28Z", "patch_url": "https://github.com/huggingface/datasets/pull/18.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/18" }
2020-04-24T07:39:48Z
https://api.github.com/repos/huggingface/datasets/issues/18/comments
This PR has a lot of content (might be hard to review, sorry, in particular because I fixed the style in the repo at the same time). # Style & quality: You can now install the style and quality tools with `pip install -e .[quality]`. This will install black, the compatible version of sort and flake8. You can then clean the style and check the quality before merging your PR with: ```bash make style make quality ``` # Allow dependencies in dataset processing scripts We can now allow (some level) of imports in dataset processing scripts (in addition to PyPi imports). Namely, you can do the two following things: Import from a relative path to a file in the same folder as the dataset processing script: ```python import .c4_utils ``` Or import from a relative path to a file in a folder/archive/github repo to which you provide an URL after the import state with `# From: [URL]`: ```python import .clicr.dataset_code.build_json_dataset # From: https://github.com/clips/clicr ``` In both these cases, after downloading the main dataset processing script, we will identify the location of these dependencies, download them and copy them in the dataset processing script folder. Note that only direct import in the dataset processing script will be handled. We don't recursively explore the additional import to download further files. Also, when we download from an additional directory (in the second case above), we recursively add `__init__.py` to all the sub-folder so you can import from them. This part is still tested for now. If you've seen datasets which required external utilities, tell me and I can test it. # Update the cache to have a better local structure The local structure in the `src/datasets` folder is now: `src/datasets/DATASET_NAME/DATASET_HASH/*` The hash is computed from the full code of the dataset processing script as well as all the local and downloaded dependencies as mentioned above. This way if you change some code in a utility related to your dataset, a new hash should be computed.
{ "avatar_url": "https://avatars.githubusercontent.com/u/7353373?v=4", "events_url": "https://api.github.com/users/thomwolf/events{/privacy}", "followers_url": "https://api.github.com/users/thomwolf/followers", "following_url": "https://api.github.com/users/thomwolf/following{/other_user}", "gists_url": "https://api.github.com/users/thomwolf/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/thomwolf", "id": 7353373, "login": "thomwolf", "node_id": "MDQ6VXNlcjczNTMzNzM=", "organizations_url": "https://api.github.com/users/thomwolf/orgs", "received_events_url": "https://api.github.com/users/thomwolf/received_events", "repos_url": "https://api.github.com/users/thomwolf/repos", "site_admin": false, "starred_url": "https://api.github.com/users/thomwolf/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/thomwolf/subscriptions", "type": "User", "url": "https://api.github.com/users/thomwolf" }
https://api.github.com/repos/huggingface/datasets/issues/18/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/18/timeline
closed
false
18
null
2020-04-28T16:06:28Z
null
true
605,753,027
https://api.github.com/repos/huggingface/datasets/issues/17
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/17/events
[]
null
2020-04-27T18:07:50Z
[]
https://github.com/huggingface/datasets/pull/17
CONTRIBUTOR
null
false
null
[]
Add Pandas as format type
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/17/reactions" }
MDExOlB1bGxSZXF1ZXN0NDA4MDk3NjM0
{ "diff_url": "https://github.com/huggingface/datasets/pull/17.diff", "html_url": "https://github.com/huggingface/datasets/pull/17", "merged_at": "2020-04-27T18:07:48Z", "patch_url": "https://github.com/huggingface/datasets/pull/17.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/17" }
2020-04-23T18:20:14Z
https://api.github.com/repos/huggingface/datasets/issues/17/comments
As detailed in the title ^^
{ "avatar_url": "https://avatars.githubusercontent.com/u/959590?v=4", "events_url": "https://api.github.com/users/jplu/events{/privacy}", "followers_url": "https://api.github.com/users/jplu/followers", "following_url": "https://api.github.com/users/jplu/following{/other_user}", "gists_url": "https://api.github.com/users/jplu/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jplu", "id": 959590, "login": "jplu", "node_id": "MDQ6VXNlcjk1OTU5MA==", "organizations_url": "https://api.github.com/users/jplu/orgs", "received_events_url": "https://api.github.com/users/jplu/received_events", "repos_url": "https://api.github.com/users/jplu/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jplu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jplu/subscriptions", "type": "User", "url": "https://api.github.com/users/jplu" }
https://api.github.com/repos/huggingface/datasets/issues/17/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/17/timeline
closed
false
17
null
2020-04-27T18:07:48Z
null
true
605,661,462
https://api.github.com/repos/huggingface/datasets/issues/16
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/16/events
[]
null
2021-05-05T18:25:24Z
[]
https://github.com/huggingface/datasets/pull/16
MEMBER
null
false
null
[]
create our own DownloadManager
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/16/reactions" }
MDExOlB1bGxSZXF1ZXN0NDA4MDIyMTUz
{ "diff_url": "https://github.com/huggingface/datasets/pull/16.diff", "html_url": "https://github.com/huggingface/datasets/pull/16", "merged_at": "2020-04-25T21:25:10Z", "patch_url": "https://github.com/huggingface/datasets/pull/16.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/16" }
2020-04-23T16:08:07Z
https://api.github.com/repos/huggingface/datasets/issues/16/comments
I tried to create our own - and way simpler - download manager, by replacing all the complicated stuff with our own `cached_path` solution. With this implementation, I tried `dataset = nlp.load('squad')` and it seems to work fine. For the implementation, what I did exactly: - I copied the old download manager - I removed all the dependences to the old `download` files - I replaced all the download + extract calls by calls to `cached_path` - I removed unused parameters (extract_dir, compute_stats) (maybe compute_stats could be re-added later if we want to compute stats...) - I left some functions unimplemented for now. We will probably have to implement them because they are used by some datasets scripts (download_kaggle_data, iter_archive) or because we may need them at some point (download_checksums, _record_sizes_checksums) Let me know if you think that this is going the right direction or if you have remarks. Note: I didn't write any test yet as I wanted to read your remarks first
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
https://api.github.com/repos/huggingface/datasets/issues/16/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/16/timeline
closed
false
16
null
2020-04-25T21:25:10Z
null
true
604,906,708
https://api.github.com/repos/huggingface/datasets/issues/15
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/15/events
[]
null
2022-10-04T09:31:54Z
[]
https://github.com/huggingface/datasets/pull/15
CONTRIBUTOR
null
false
null
[]
[Tests] General Test Design for all dataset scripts
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/15/reactions" }
MDExOlB1bGxSZXF1ZXN0NDA3NDEwOTk3
{ "diff_url": "https://github.com/huggingface/datasets/pull/15.diff", "html_url": "https://github.com/huggingface/datasets/pull/15", "merged_at": "2020-04-27T14:48:02Z", "patch_url": "https://github.com/huggingface/datasets/pull/15.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/15" }
2020-04-22T16:46:01Z
https://api.github.com/repos/huggingface/datasets/issues/15/comments
The general idea is similar to how testing is done in `transformers`. There is one general `test_dataset_common.py` file which has a `DatasetTesterMixin` class. This class implements all of the logic that can be used in a generic way for all dataset classes. The idea is to keep each individual dataset test file as minimal as possible. In order to test whether the specific data set class can download the data and generate the examples **without** downloading the actual data all the time, a MockDataLoaderManager class is used which receives a `mock_folder_structure_fn` function from each individual dataset test file that create "fake" data and which returns the same folder structure that would have been created when using the real data downloader.
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
https://api.github.com/repos/huggingface/datasets/issues/15/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/15/timeline
closed
false
15
null
2020-04-27T14:48:02Z
null
true
604,761,315
https://api.github.com/repos/huggingface/datasets/issues/14
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/14/events
[]
null
2022-10-04T09:31:50Z
[]
https://github.com/huggingface/datasets/pull/14
CONTRIBUTOR
null
false
null
[]
[Download] Only create dir if not already exist
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/14/reactions" }
MDExOlB1bGxSZXF1ZXN0NDA3MjkzNjU5
{ "diff_url": "https://github.com/huggingface/datasets/pull/14.diff", "html_url": "https://github.com/huggingface/datasets/pull/14", "merged_at": "2020-04-23T08:27:33Z", "patch_url": "https://github.com/huggingface/datasets/pull/14.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/14" }
2020-04-22T13:32:51Z
https://api.github.com/repos/huggingface/datasets/issues/14/comments
This was quite annoying to find out :D. Some datasets have save in the same directory. So we should only create a new directory if it doesn't already exist.
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
https://api.github.com/repos/huggingface/datasets/issues/14/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/14/timeline
closed
false
14
null
2020-04-23T08:27:33Z
null
true
604,547,951
https://api.github.com/repos/huggingface/datasets/issues/13
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/13/events
[]
null
2022-10-04T09:31:51Z
[]
https://github.com/huggingface/datasets/pull/13
CONTRIBUTOR
null
false
null
[]
[Make style]
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/13/reactions" }
MDExOlB1bGxSZXF1ZXN0NDA3MTIxMjkw
{ "diff_url": "https://github.com/huggingface/datasets/pull/13.diff", "html_url": "https://github.com/huggingface/datasets/pull/13", "merged_at": "2020-04-23T13:02:22Z", "patch_url": "https://github.com/huggingface/datasets/pull/13.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/13" }
2020-04-22T08:10:06Z
https://api.github.com/repos/huggingface/datasets/issues/13/comments
Added Makefile and applied make style to all. make style runs the following code: ``` style: black --line-length 119 --target-version py35 src isort --recursive src ``` It's the same code that is run in `transformers`.
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
https://api.github.com/repos/huggingface/datasets/issues/13/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/13/timeline
closed
false
13
null
2020-04-23T13:02:22Z
null
true
604,518,583
https://api.github.com/repos/huggingface/datasets/issues/12
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/12/events
[]
null
2022-10-04T09:31:53Z
[]
https://github.com/huggingface/datasets/pull/12
CONTRIBUTOR
null
false
null
[]
[Map Function] add assert statement if map function does not return dict or None
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/12/reactions" }
MDExOlB1bGxSZXF1ZXN0NDA3MDk3MzA4
{ "diff_url": "https://github.com/huggingface/datasets/pull/12.diff", "html_url": "https://github.com/huggingface/datasets/pull/12", "merged_at": "2020-04-24T06:29:03Z", "patch_url": "https://github.com/huggingface/datasets/pull/12.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/12" }
2020-04-22T07:21:24Z
https://api.github.com/repos/huggingface/datasets/issues/12/comments
IMO, if a function is provided that is not a print statement (-> returns variable of type `None`) or a function that updates the datasets (-> returns variable of type `dict`), then a `TypeError` should be raised. Not sure whether you had cases in mind where the user should do something else @thomwolf , but I think a lot of silent errors can be avoided with this assert statement.
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
https://api.github.com/repos/huggingface/datasets/issues/12/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/12/timeline
closed
false
12
null
2020-04-24T06:29:03Z
null
true
603,921,624
https://api.github.com/repos/huggingface/datasets/issues/11
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/11/events
[]
null
2022-10-04T09:31:46Z
[]
https://github.com/huggingface/datasets/pull/11
CONTRIBUTOR
null
false
null
[]
[Convert TFDS to HFDS] Extend script to also allow just converting a single file
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/11/reactions" }
MDExOlB1bGxSZXF1ZXN0NDA2NjExODk2
{ "diff_url": "https://github.com/huggingface/datasets/pull/11.diff", "html_url": "https://github.com/huggingface/datasets/pull/11", "merged_at": "2020-04-21T20:47:00Z", "patch_url": "https://github.com/huggingface/datasets/pull/11.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/11" }
2020-04-21T11:25:33Z
https://api.github.com/repos/huggingface/datasets/issues/11/comments
Adds another argument to be able to convert only a single file
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
https://api.github.com/repos/huggingface/datasets/issues/11/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/11/timeline
closed
false
11
null
2020-04-21T20:47:00Z
null
true
603,909,327
https://api.github.com/repos/huggingface/datasets/issues/10
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/10/events
[]
null
2022-10-04T09:31:44Z
[]
https://github.com/huggingface/datasets/pull/10
CONTRIBUTOR
null
false
null
[]
Name json file "squad.json" instead of "squad.py.json"
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/10/reactions" }
MDExOlB1bGxSZXF1ZXN0NDA2NjAxNzQ2
{ "diff_url": "https://github.com/huggingface/datasets/pull/10.diff", "html_url": "https://github.com/huggingface/datasets/pull/10", "merged_at": "2020-04-21T20:48:06Z", "patch_url": "https://github.com/huggingface/datasets/pull/10.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/10" }
2020-04-21T11:04:28Z
https://api.github.com/repos/huggingface/datasets/issues/10/comments
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
https://api.github.com/repos/huggingface/datasets/issues/10/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/10/timeline
closed
false
10
null
2020-04-21T20:48:06Z
null
true
603,894,874
https://api.github.com/repos/huggingface/datasets/issues/9
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/9/events
[]
null
2022-10-04T09:31:42Z
[]
https://github.com/huggingface/datasets/pull/9
CONTRIBUTOR
null
false
null
[]
[Clean up] Datasets
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/9/reactions" }
MDExOlB1bGxSZXF1ZXN0NDA2NTkwMDQw
{ "diff_url": "https://github.com/huggingface/datasets/pull/9.diff", "html_url": "https://github.com/huggingface/datasets/pull/9", "merged_at": "2020-04-21T20:49:58Z", "patch_url": "https://github.com/huggingface/datasets/pull/9.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/9" }
2020-04-21T10:39:56Z
https://api.github.com/repos/huggingface/datasets/issues/9/comments
Clean up `nlp/datasets` folder. As I understood, eventually the `nlp/datasets` shall not exist anymore at all. The folder `nlp/datasets/nlp` is kept for the moment, but won't be needed in the future, since it will live on S3 (actually it already does) at: `https://s3.console.aws.amazon.com/s3/buckets/datasets.huggingface.co/nlp/?region=us-east-1` and the different `dataset downloader scripts will be added to `nlp/src/nlp` when downloaded by the user. The folder `nlp/datasets/checksums` is kept for now, but won't be needed anymore in the future. The remaining folders/ files are leftovers from tensorflow-datasets and are not needed. The can be looked up in the private tensorflow-dataset repo.
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
https://api.github.com/repos/huggingface/datasets/issues/9/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/9/timeline
closed
false
9
null
2020-04-21T20:49:58Z
null
true
601,783,243
https://api.github.com/repos/huggingface/datasets/issues/8
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/8/events
[]
null
2020-04-29T09:27:11Z
[]
https://github.com/huggingface/datasets/pull/8
CONTRIBUTOR
null
false
null
[]
Fix issue 6: error when the citation is missing in the DatasetInfo
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/8/reactions" }
MDExOlB1bGxSZXF1ZXN0NDA0OTg0NDUz
{ "diff_url": "https://github.com/huggingface/datasets/pull/8.diff", "html_url": "https://github.com/huggingface/datasets/pull/8", "merged_at": "2020-04-20T13:24:12Z", "patch_url": "https://github.com/huggingface/datasets/pull/8.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/8" }
2020-04-17T08:04:26Z
https://api.github.com/repos/huggingface/datasets/issues/8/comments
{ "avatar_url": "https://avatars.githubusercontent.com/u/959590?v=4", "events_url": "https://api.github.com/users/jplu/events{/privacy}", "followers_url": "https://api.github.com/users/jplu/followers", "following_url": "https://api.github.com/users/jplu/following{/other_user}", "gists_url": "https://api.github.com/users/jplu/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jplu", "id": 959590, "login": "jplu", "node_id": "MDQ6VXNlcjk1OTU5MA==", "organizations_url": "https://api.github.com/users/jplu/orgs", "received_events_url": "https://api.github.com/users/jplu/received_events", "repos_url": "https://api.github.com/users/jplu/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jplu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jplu/subscriptions", "type": "User", "url": "https://api.github.com/users/jplu" }
https://api.github.com/repos/huggingface/datasets/issues/8/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/8/timeline
closed
false
8
null
2020-04-20T13:24:12Z
null
true
601,780,534
https://api.github.com/repos/huggingface/datasets/issues/7
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7/events
[]
null
2020-04-29T09:27:13Z
[]
https://github.com/huggingface/datasets/pull/7
CONTRIBUTOR
null
false
null
[]
Fix issue 5: allow empty datasets
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7/reactions" }
MDExOlB1bGxSZXF1ZXN0NDA0OTgyMzA2
{ "diff_url": "https://github.com/huggingface/datasets/pull/7.diff", "html_url": "https://github.com/huggingface/datasets/pull/7", "merged_at": "2020-04-20T13:23:47Z", "patch_url": "https://github.com/huggingface/datasets/pull/7.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7" }
2020-04-17T07:59:56Z
https://api.github.com/repos/huggingface/datasets/issues/7/comments
{ "avatar_url": "https://avatars.githubusercontent.com/u/959590?v=4", "events_url": "https://api.github.com/users/jplu/events{/privacy}", "followers_url": "https://api.github.com/users/jplu/followers", "following_url": "https://api.github.com/users/jplu/following{/other_user}", "gists_url": "https://api.github.com/users/jplu/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jplu", "id": 959590, "login": "jplu", "node_id": "MDQ6VXNlcjk1OTU5MA==", "organizations_url": "https://api.github.com/users/jplu/orgs", "received_events_url": "https://api.github.com/users/jplu/received_events", "repos_url": "https://api.github.com/users/jplu/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jplu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jplu/subscriptions", "type": "User", "url": "https://api.github.com/users/jplu" }
https://api.github.com/repos/huggingface/datasets/issues/7/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7/timeline
closed
false
7
null
2020-04-20T13:23:48Z
null
true
600,330,836
https://api.github.com/repos/huggingface/datasets/issues/6
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6/events
[]
null
2020-04-29T09:23:22Z
[]
https://github.com/huggingface/datasets/issues/6
CONTRIBUTOR
completed
null
null
[]
Error when citation is not given in the DatasetInfo
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6/reactions" }
MDU6SXNzdWU2MDAzMzA4MzY=
null
2020-04-15T14:14:54Z
https://api.github.com/repos/huggingface/datasets/issues/6/comments
The following error is raised when the `citation` parameter is missing when we instantiate a `DatasetInfo`: ``` Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/jplu/dev/jplu/datasets/src/nlp/info.py", line 338, in __repr__ citation_pprint = _indent('"""{}"""'.format(self.citation.strip())) AttributeError: 'NoneType' object has no attribute 'strip' ``` I propose to do the following change in the `info.py` file. The method: ```python def __repr__(self): splits_pprint = _indent("\n".join(["{"] + [ " '{}': {},".format(k, split.num_examples) for k, split in sorted(self.splits.items()) ] + ["}"])) features_pprint = _indent(repr(self.features)) citation_pprint = _indent('"""{}"""'.format(self.citation.strip())) return INFO_STR.format( name=self.name, version=self.version, description=self.description, total_num_examples=self.splits.total_num_examples, features=features_pprint, splits=splits_pprint, citation=citation_pprint, homepage=self.homepage, supervised_keys=self.supervised_keys, # Proto add a \n that we strip. license=str(self.license).strip()) ``` Becomes: ```python def __repr__(self): splits_pprint = _indent("\n".join(["{"] + [ " '{}': {},".format(k, split.num_examples) for k, split in sorted(self.splits.items()) ] + ["}"])) features_pprint = _indent(repr(self.features)) ## the strip is done only is the citation is given citation_pprint = self.citation if self.citation: citation_pprint = _indent('"""{}"""'.format(self.citation.strip())) return INFO_STR.format( name=self.name, version=self.version, description=self.description, total_num_examples=self.splits.total_num_examples, features=features_pprint, splits=splits_pprint, citation=citation_pprint, homepage=self.homepage, supervised_keys=self.supervised_keys, # Proto add a \n that we strip. license=str(self.license).strip()) ``` And now it is ok. @thomwolf are you ok with this fix?
{ "avatar_url": "https://avatars.githubusercontent.com/u/959590?v=4", "events_url": "https://api.github.com/users/jplu/events{/privacy}", "followers_url": "https://api.github.com/users/jplu/followers", "following_url": "https://api.github.com/users/jplu/following{/other_user}", "gists_url": "https://api.github.com/users/jplu/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jplu", "id": 959590, "login": "jplu", "node_id": "MDQ6VXNlcjk1OTU5MA==", "organizations_url": "https://api.github.com/users/jplu/orgs", "received_events_url": "https://api.github.com/users/jplu/received_events", "repos_url": "https://api.github.com/users/jplu/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jplu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jplu/subscriptions", "type": "User", "url": "https://api.github.com/users/jplu" }
https://api.github.com/repos/huggingface/datasets/issues/6/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6/timeline
closed
false
6
null
2020-04-29T09:23:22Z
null
false
600,295,889
https://api.github.com/repos/huggingface/datasets/issues/5
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5/events
[]
null
2020-04-29T09:23:05Z
[]
https://github.com/huggingface/datasets/issues/5
CONTRIBUTOR
completed
null
null
[]
ValueError when a split is empty
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5/reactions" }
MDU6SXNzdWU2MDAyOTU4ODk=
null
2020-04-15T13:25:13Z
https://api.github.com/repos/huggingface/datasets/issues/5/comments
When a split is empty either TEST, VALIDATION or TRAIN I get the following error: ``` Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/jplu/dev/jplu/datasets/src/nlp/load.py", line 295, in load ds = dbuilder.as_dataset(**as_dataset_kwargs) File "/home/jplu/dev/jplu/datasets/src/nlp/builder.py", line 587, in as_dataset datasets = utils.map_nested(build_single_dataset, split, map_tuple=True) File "/home/jplu/dev/jplu/datasets/src/nlp/utils/py_utils.py", line 158, in map_nested for k, v in data_struct.items() File "/home/jplu/dev/jplu/datasets/src/nlp/utils/py_utils.py", line 158, in <dictcomp> for k, v in data_struct.items() File "/home/jplu/dev/jplu/datasets/src/nlp/utils/py_utils.py", line 172, in map_nested return function(data_struct) File "/home/jplu/dev/jplu/datasets/src/nlp/builder.py", line 601, in _build_single_dataset split=split, File "/home/jplu/dev/jplu/datasets/src/nlp/builder.py", line 625, in _as_dataset split_infos=self.info.splits.values(), File "/home/jplu/dev/jplu/datasets/src/nlp/arrow_reader.py", line 200, in read return py_utils.map_nested(_read_instruction_to_ds, instructions) File "/home/jplu/dev/jplu/datasets/src/nlp/utils/py_utils.py", line 172, in map_nested return function(data_struct) File "/home/jplu/dev/jplu/datasets/src/nlp/arrow_reader.py", line 191, in _read_instruction_to_ds file_instructions = make_file_instructions(name, split_infos, instruction) File "/home/jplu/dev/jplu/datasets/src/nlp/arrow_reader.py", line 104, in make_file_instructions absolute_instructions=absolute_instructions, File "/home/jplu/dev/jplu/datasets/src/nlp/arrow_reader.py", line 122, in _make_file_instructions_from_absolutes 'Split empty. This might means that dataset hasn\'t been generated ' ValueError: Split empty. This might means that dataset hasn't been generated yet and info not restored from GCS, or that legacy dataset is used. ``` How to reproduce: ```python import csv import nlp class Bbc(nlp.GeneratorBasedBuilder): VERSION = nlp.Version("1.0.0") def __init__(self, **config): self.train = config.pop("train", None) self.validation = config.pop("validation", None) super(Bbc, self).__init__(**config) def _info(self): return nlp.DatasetInfo(builder=self, description="bla", features=nlp.features.FeaturesDict({"id": nlp.int32, "text": nlp.string, "label": nlp.string})) def _split_generators(self, dl_manager): return [nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"filepath": self.train}), nlp.SplitGenerator(name=nlp.Split.VALIDATION, gen_kwargs={"filepath": self.validation}), nlp.SplitGenerator(name=nlp.Split.TEST, gen_kwargs={"filepath": None})] def _generate_examples(self, filepath): if not filepath: return None, {} with open(filepath) as f: reader = csv.reader(f, delimiter=',', quotechar="\"") lines = list(reader)[1:] for idx, line in enumerate(lines): yield idx, {"id": idx, "text": line[1], "label": line[0]} ``` ```python import nlp dataset = nlp.load("bbc", builder_kwargs={"train": "bbc/data/train.csv", "validation": "bbc/data/test.csv"}) ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/959590?v=4", "events_url": "https://api.github.com/users/jplu/events{/privacy}", "followers_url": "https://api.github.com/users/jplu/followers", "following_url": "https://api.github.com/users/jplu/following{/other_user}", "gists_url": "https://api.github.com/users/jplu/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jplu", "id": 959590, "login": "jplu", "node_id": "MDQ6VXNlcjk1OTU5MA==", "organizations_url": "https://api.github.com/users/jplu/orgs", "received_events_url": "https://api.github.com/users/jplu/received_events", "repos_url": "https://api.github.com/users/jplu/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jplu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jplu/subscriptions", "type": "User", "url": "https://api.github.com/users/jplu" }
https://api.github.com/repos/huggingface/datasets/issues/5/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5/timeline
closed
false
5
null
2020-04-29T09:23:05Z
null
false
600,185,417
https://api.github.com/repos/huggingface/datasets/issues/4
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4/events
[]
null
2020-07-08T16:59:46Z
[]
https://github.com/huggingface/datasets/issues/4
CONTRIBUTOR
completed
null
null
[]
[Feature] Keep the list of labels of a dataset as metadata
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/4/reactions" }
MDU6SXNzdWU2MDAxODU0MTc=
null
2020-04-15T10:17:10Z
https://api.github.com/repos/huggingface/datasets/issues/4/comments
It would be useful to keep the list of the labels of a dataset as metadata. Either directly in the `DatasetInfo` or in the Arrow metadata.
{ "avatar_url": "https://avatars.githubusercontent.com/u/959590?v=4", "events_url": "https://api.github.com/users/jplu/events{/privacy}", "followers_url": "https://api.github.com/users/jplu/followers", "following_url": "https://api.github.com/users/jplu/following{/other_user}", "gists_url": "https://api.github.com/users/jplu/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jplu", "id": 959590, "login": "jplu", "node_id": "MDQ6VXNlcjk1OTU5MA==", "organizations_url": "https://api.github.com/users/jplu/orgs", "received_events_url": "https://api.github.com/users/jplu/received_events", "repos_url": "https://api.github.com/users/jplu/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jplu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jplu/subscriptions", "type": "User", "url": "https://api.github.com/users/jplu" }
https://api.github.com/repos/huggingface/datasets/issues/4/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4/timeline
closed
false
4
null
2020-05-04T06:11:57Z
null
false
600,180,050
https://api.github.com/repos/huggingface/datasets/issues/3
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3/events
[]
null
2020-05-04T06:12:27Z
[]
https://github.com/huggingface/datasets/issues/3
CONTRIBUTOR
completed
null
null
[]
[Feature] More dataset outputs
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3/reactions" }
MDU6SXNzdWU2MDAxODAwNTA=
null
2020-04-15T10:08:14Z
https://api.github.com/repos/huggingface/datasets/issues/3/comments
Add the following dataset outputs: - Spark - Pandas
{ "avatar_url": "https://avatars.githubusercontent.com/u/959590?v=4", "events_url": "https://api.github.com/users/jplu/events{/privacy}", "followers_url": "https://api.github.com/users/jplu/followers", "following_url": "https://api.github.com/users/jplu/following{/other_user}", "gists_url": "https://api.github.com/users/jplu/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jplu", "id": 959590, "login": "jplu", "node_id": "MDQ6VXNlcjk1OTU5MA==", "organizations_url": "https://api.github.com/users/jplu/orgs", "received_events_url": "https://api.github.com/users/jplu/received_events", "repos_url": "https://api.github.com/users/jplu/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jplu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jplu/subscriptions", "type": "User", "url": "https://api.github.com/users/jplu" }
https://api.github.com/repos/huggingface/datasets/issues/3/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3/timeline
closed
false
3
null
2020-05-04T06:12:27Z
null
false
599,767,671
https://api.github.com/repos/huggingface/datasets/issues/2
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2/events
[]
null
2020-05-11T18:55:23Z
[]
https://github.com/huggingface/datasets/issues/2
CONTRIBUTOR
completed
null
null
[]
Issue to read a local dataset
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/2/reactions" }
MDU6SXNzdWU1OTk3Njc2NzE=
null
2020-04-14T18:18:51Z
https://api.github.com/repos/huggingface/datasets/issues/2/comments
Hello, As proposed by @thomwolf, I open an issue to explain what I'm trying to do without success. What I want to do is to create and load a local dataset, the script I have done is the following: ```python import os import csv import nlp class BbcConfig(nlp.BuilderConfig): def __init__(self, **kwargs): super(BbcConfig, self).__init__(**kwargs) class Bbc(nlp.GeneratorBasedBuilder): _DIR = "./data" _DEV_FILE = "test.csv" _TRAINING_FILE = "train.csv" BUILDER_CONFIGS = [BbcConfig(name="bbc", version=nlp.Version("1.0.0"))] def _info(self): return nlp.DatasetInfo(builder=self, features=nlp.features.FeaturesDict({"id": nlp.string, "text": nlp.string, "label": nlp.string})) def _split_generators(self, dl_manager): files = {"train": os.path.join(self._DIR, self._TRAINING_FILE), "dev": os.path.join(self._DIR, self._DEV_FILE)} return [nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"filepath": files["train"]}), nlp.SplitGenerator(name=nlp.Split.VALIDATION, gen_kwargs={"filepath": files["dev"]})] def _generate_examples(self, filepath): with open(filepath) as f: reader = csv.reader(f, delimiter=',', quotechar="\"") lines = list(reader)[1:] for idx, line in enumerate(lines): yield idx, {"idx": idx, "text": line[1], "label": line[0]} ``` The dataset is attached to this issue as well: [data.zip](https://github.com/huggingface/datasets/files/4476928/data.zip) Now the steps to reproduce what I would like to do: 1. unzip data locally (I know the nlp lib can detect and extract archives but I want to reduce and facilitate the reproduction as much as possible) 2. create the `bbc.py` script as above at the same location than the unziped `data` folder. Now I try to load the dataset in three different ways and none works, the first one with the name of the dataset like I would do with TFDS: ```python import nlp from bbc import Bbc dataset = nlp.load("bbc") ``` I get: ``` Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/opt/anaconda3/envs/transformers/lib/python3.7/site-packages/nlp/load.py", line 280, in load dbuilder: DatasetBuilder = builder(path, name, data_dir=data_dir, **builder_kwargs) File "/opt/anaconda3/envs/transformers/lib/python3.7/site-packages/nlp/load.py", line 166, in builder builder_cls = load_dataset(path, name=name, **builder_kwargs) File "/opt/anaconda3/envs/transformers/lib/python3.7/site-packages/nlp/load.py", line 88, in load_dataset local_files_only=local_files_only, File "/opt/anaconda3/envs/transformers/lib/python3.7/site-packages/nlp/utils/file_utils.py", line 214, in cached_path if not is_zipfile(output_path) and not tarfile.is_tarfile(output_path): File "/opt/anaconda3/envs/transformers/lib/python3.7/zipfile.py", line 203, in is_zipfile with open(filename, "rb") as fp: TypeError: expected str, bytes or os.PathLike object, not NoneType ``` But @thomwolf told me that no need to import the script, just put the path of it, then I tried three different way to do: ```python import nlp dataset = nlp.load("bbc.py") ``` And ```python import nlp dataset = nlp.load("./bbc.py") ``` And ```python import nlp dataset = nlp.load("/absolute/path/to/bbc.py") ``` These three ways gives me: ``` Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/opt/anaconda3/envs/transformers/lib/python3.7/site-packages/nlp/load.py", line 280, in load dbuilder: DatasetBuilder = builder(path, name, data_dir=data_dir, **builder_kwargs) File "/opt/anaconda3/envs/transformers/lib/python3.7/site-packages/nlp/load.py", line 166, in builder builder_cls = load_dataset(path, name=name, **builder_kwargs) File "/opt/anaconda3/envs/transformers/lib/python3.7/site-packages/nlp/load.py", line 124, in load_dataset dataset_module = importlib.import_module(module_path) File "/opt/anaconda3/envs/transformers/lib/python3.7/importlib/__init__.py", line 127, in import_module return _bootstrap._gcd_import(name[level:], package, level) File "<frozen importlib._bootstrap>", line 1006, in _gcd_import File "<frozen importlib._bootstrap>", line 983, in _find_and_load File "<frozen importlib._bootstrap>", line 965, in _find_and_load_unlocked ModuleNotFoundError: No module named 'nlp.datasets.2fd72627d92c328b3e9c4a3bf7ec932c48083caca09230cebe4c618da6e93688.bbc' ``` Any idea of what I'm missing? or I might have spot a bug :)
{ "avatar_url": "https://avatars.githubusercontent.com/u/959590?v=4", "events_url": "https://api.github.com/users/jplu/events{/privacy}", "followers_url": "https://api.github.com/users/jplu/followers", "following_url": "https://api.github.com/users/jplu/following{/other_user}", "gists_url": "https://api.github.com/users/jplu/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jplu", "id": 959590, "login": "jplu", "node_id": "MDQ6VXNlcjk1OTU5MA==", "organizations_url": "https://api.github.com/users/jplu/orgs", "received_events_url": "https://api.github.com/users/jplu/received_events", "repos_url": "https://api.github.com/users/jplu/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jplu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jplu/subscriptions", "type": "User", "url": "https://api.github.com/users/jplu" }
https://api.github.com/repos/huggingface/datasets/issues/2/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2/timeline
closed
false
2
null
2020-05-11T18:55:22Z
null
false
599,457,467
https://api.github.com/repos/huggingface/datasets/issues/1
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1/events
[]
null
2022-10-04T09:31:40Z
[]
https://github.com/huggingface/datasets/pull/1
CONTRIBUTOR
null
false
null
[]
changing nlp.bool to nlp.bool_
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1/reactions" }
MDExOlB1bGxSZXF1ZXN0NDAzMDk1NDYw
{ "diff_url": "https://github.com/huggingface/datasets/pull/1.diff", "html_url": "https://github.com/huggingface/datasets/pull/1", "merged_at": "2020-04-14T12:01:40Z", "patch_url": "https://github.com/huggingface/datasets/pull/1.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1" }
2020-04-14T10:18:02Z
https://api.github.com/repos/huggingface/datasets/issues/1/comments
{ "avatar_url": "https://avatars.githubusercontent.com/u/38249783?v=4", "events_url": "https://api.github.com/users/mariamabarham/events{/privacy}", "followers_url": "https://api.github.com/users/mariamabarham/followers", "following_url": "https://api.github.com/users/mariamabarham/following{/other_user}", "gists_url": "https://api.github.com/users/mariamabarham/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariamabarham", "id": 38249783, "login": "mariamabarham", "node_id": "MDQ6VXNlcjM4MjQ5Nzgz", "organizations_url": "https://api.github.com/users/mariamabarham/orgs", "received_events_url": "https://api.github.com/users/mariamabarham/received_events", "repos_url": "https://api.github.com/users/mariamabarham/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariamabarham/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariamabarham/subscriptions", "type": "User", "url": "https://api.github.com/users/mariamabarham" }
https://api.github.com/repos/huggingface/datasets/issues/1/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1/timeline
closed
false
1
null
2020-04-14T12:01:40Z
null
true