Upload regulatory_comments.py
Browse files- regulatory_comments.py +4 -2
regulatory_comments.py
CHANGED
@@ -35,7 +35,9 @@ _HOMEPAGE = "https://www.regulations.gov/"
|
|
35 |
# TODO: Add link to the official dataset URLs here
|
36 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
37 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
38 |
-
_URLS = "https://huggingface.co/datasets/ro-h/regulatory_comments/raw/main/temp.csv"
|
|
|
|
|
39 |
|
40 |
|
41 |
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
|
@@ -98,7 +100,7 @@ class RegComments(datasets.GeneratorBasedBuilder):
|
|
98 |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
|
99 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
100 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
101 |
-
urls = _URLS
|
102 |
data_dir = dl_manager.download_and_extract(urls)
|
103 |
print("urls accessed")
|
104 |
print("File path:", os.path.join(data_dir, "train.csv"))
|
|
|
35 |
# TODO: Add link to the official dataset URLs here
|
36 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
37 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
38 |
+
_URLS = {"url": "https://huggingface.co/datasets/ro-h/regulatory_comments/raw/main/temp.csv"
|
39 |
+
|
40 |
+
}
|
41 |
|
42 |
|
43 |
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
|
|
|
100 |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
|
101 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
102 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
103 |
+
urls = _URLS["url"]
|
104 |
data_dir = dl_manager.download_and_extract(urls)
|
105 |
print("urls accessed")
|
106 |
print("File path:", os.path.join(data_dir, "train.csv"))
|