ro-h commited on
Commit
a1eaf57
1 Parent(s): c0459e9

changed citation

Browse files
Files changed (1) hide show
  1. regulatory_comments.py +122 -0
regulatory_comments.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import json
16
+ import datasets
17
+
18
+ # Description of the dataset
19
+ _DESCRIPTION = """\
20
+ United States governmental agencies often make proposed regulations open to the public for comment.
21
+ Proposed regulations are organized into "dockets". This project will use Regulation.gov public API
22
+ to aggregate and clean public comments for dockets that mention opioid use.
23
+
24
+ Each example will consist of one docket, and include metadata such as docket id, docket title, etc.
25
+ Each docket entry will also include information about the top 10 comments, including comment metadata
26
+ and comment text.
27
+ """
28
+
29
+ # Homepage URL of the dataset
30
+ _HOMEPAGE = "https://www.regulations.gov/"
31
+
32
+ # URL to download the dataset
33
+ _URLS = {"url": "https://huggingface.co/datasets/ro-h/regulatory_comments/raw/main/docket_comments_v4.json"}
34
+ _CITATION = """@misc{ro_huang_regulatory_2023-1,
35
+ author = {{Ro Huang}},
36
+ date = {2023-03-19},
37
+ publisher = {Hugging Face},
38
+ title = {Regulatory Comments},
39
+ url = {https://huggingface.co/datasets/ro-h/regulatory_comments},
40
+ version = {1.1.4},
41
+ bdsk-url-1 = {https://huggingface.co/datasets/ro-h/regulatory_comments}}
42
+ """
43
+
44
+ # Class definition for handling the dataset
45
+ class RegComments(datasets.GeneratorBasedBuilder):
46
+
47
+ # Version of the dataset
48
+ VERSION = datasets.Version("1.1.4")
49
+
50
+ # Method to define the structure of the dataset
51
+ def _info(self):
52
+ # Defining the structure of the dataset
53
+ features = datasets.Features({
54
+ "id": datasets.Value("string"),
55
+ "agency": datasets.Value("string"), #Added In
56
+ "title": datasets.Value("string"),
57
+ "update_date": datasets.Value("string"), #Added In
58
+ "update_time": datasets.Value("string"), #Added In
59
+ "purpose": datasets.Value("string"),
60
+ "keywords": datasets.Sequence(datasets.Value("string")),
61
+ "comments": datasets.Sequence({
62
+ "text": datasets.Value("string"),
63
+ "comment_id": datasets.Value("string"),
64
+ "comment_url": datasets.Value("string"),
65
+ "comment_date": datasets.Value("string"),
66
+ "comment_time": datasets.Value("string"),
67
+ "commenter_fname": datasets.Value("string"),
68
+ "commenter_lname": datasets.Value("string"),
69
+ "comment_length": datasets.Value("int32")
70
+ })
71
+ })
72
+
73
+ # Returning the dataset structure
74
+ return datasets.DatasetInfo(
75
+ description=_DESCRIPTION,
76
+ features=features,
77
+ homepage=_HOMEPAGE
78
+ )
79
+
80
+ # Method to handle dataset splitting (e.g., train/test)
81
+ def _split_generators(self, dl_manager):
82
+ urls = _URLS["url"]
83
+ data_dir = dl_manager.download_and_extract(urls)
84
+ # Defining the split (here, only train split is defined)
85
+ return [
86
+ datasets.SplitGenerator(
87
+ name=datasets.Split.TRAIN,
88
+ gen_kwargs={
89
+ "filepath": data_dir,
90
+ },
91
+ ),
92
+ ]
93
+
94
+ # Method to generate examples from the dataset
95
+ def _generate_examples(self, filepath):
96
+ """This function returns the examples in the raw (text) form."""
97
+ key = 0
98
+ with open(filepath, 'r', encoding='utf-8') as f:
99
+ data = json.load(f)
100
+ for docket in data:
101
+ # Extracting data fields from each docket
102
+ docket_id = docket["id"]
103
+ docket_agency = docket["agency"]
104
+ docket_title = docket["title"]
105
+ docket_update_date = docket["update_date"]
106
+ docket_update_time = docket["update_time"]
107
+ docket_purpose = docket.get("purpose", "unspecified")
108
+ docket_keywords = docket.get("keywords", [])
109
+ comments = docket["comments"]
110
+
111
+ # Yielding each docket with its information
112
+ yield key, {
113
+ "id": docket_id,
114
+ "agency": docket_agency,
115
+ "title": docket_title,
116
+ "update_date": docket_update_date,
117
+ "update_time": docket_update_time,
118
+ "purpose": docket_purpose,
119
+ "keywords": docket_keywords,
120
+ "comments": comments
121
+ }
122
+ key += 1