|
import requests |
|
import pandas as pd |
|
|
|
class RegulationsDataFetcher: |
|
API_KEY = '4T29l93SvmnyNCVFZUFzSfUqTq6k7S0Wqn93sLcH' |
|
BASE_COMMENT_URL = 'https://api.regulations.gov/v4/comments' |
|
BASE_DOCKET_URL = 'https://api.regulations.gov/v4/dockets/' |
|
HEADERS = { |
|
'X-Api-Key': API_KEY, |
|
'Content-Type': 'application/json' |
|
} |
|
|
|
def __init__(self, docket_id): |
|
self.docket_id = docket_id |
|
self.docket_url = self.BASE_DOCKET_URL + docket_id |
|
self.dataset = [] |
|
|
|
def fetch_comments(self, page_number): |
|
"""Fetch a page of comments.""" |
|
url = f'{self.BASE_COMMENT_URL}?filter[docketId]={self.docket_id}&page[number]={page_number}&page[size]=100' |
|
response = requests.get(url, headers=self.HEADERS) |
|
|
|
if response.status_code == 200: |
|
return response.json() |
|
else: |
|
print(f'Failed to retrieve comments: {response.status_code}') |
|
return None |
|
|
|
def get_docket_info(self): |
|
"""Get docket information.""" |
|
response = requests.get(self.docket_url, headers=self.HEADERS) |
|
|
|
if response.status_code == 200: |
|
docket_data = response.json() |
|
return (docket_data['data']['attributes']['agencyId'], |
|
docket_data['data']['attributes']['title'], |
|
docket_data['data']['attributes']['modifyDate']) |
|
else: |
|
print(f'Failed to retrieve docket info: {response.status_code}') |
|
return None |
|
|
|
def fetch_comment_details(self, comment_url): |
|
"""Fetch detailed information of a comment.""" |
|
response = requests.get(comment_url, headers=self.HEADERS) |
|
if response.status_code == 200: |
|
return response.json() |
|
else: |
|
print(f'Failed to retrieve comment details: {response.status_code}') |
|
return None |
|
|
|
def collect_data(self): |
|
"""Collect data by iterating through all comment pages.""" |
|
initial_data = self.fetch_comments(1) |
|
total_pages = initial_data['meta']['totalPages'] if initial_data else 0 |
|
docket_info = self.get_docket_info() |
|
|
|
for page_number in range(1, total_pages + 1): |
|
data = self.fetch_comments(page_number) |
|
|
|
if data: |
|
for comment in data['data']: |
|
comment_details = self.fetch_comment_details(comment['links']['self']) |
|
|
|
if comment_details: |
|
comment_data = comment_details['data']['attributes'] |
|
self.dataset.append({ |
|
'docket_agency': docket_info[0], |
|
'docket_title': docket_info[1], |
|
'docket_date': docket_info[2], |
|
'comment_id': comment['id'], |
|
'comment_url': comment['links']['self'], |
|
'comment_date': comment['attributes']['postedDate'], |
|
'comment_title': comment['attributes']['title'], |
|
'commenter_name': comment_data.get('firstName', '') + " " + comment_data.get('lastName', ''), |
|
'comment_length': len(comment_data.get('comment', '')), |
|
'comment_text': comment_data.get('comment', '') |
|
}) |
|
|
|
if len(self.dataset) == 10: |
|
break |
|
|
|
|
|
|
|
docket_ids = ['SAMHSA-2016-0001', 'SAMHSA-2023-0001', 'DEA-2020-0031', 'CMS-2021-0167', 'DOD-2015-HA-0109'] |
|
all_data = [] |
|
|
|
for docket_id in docket_ids: |
|
fetcher = RegulationsDataFetcher(docket_id) |
|
fetcher.collect_data() |
|
all_data.extend(fetcher.dataset) |
|
|
|
combined_df = pd.DataFrame(all_data) |
|
combined_df.to_csv('temp.csv', index=False) |