Datasets:
revanth7667
commited on
Commit
•
6e55cce
1
Parent(s):
6a0acc8
updated
Browse files- .01_Data/Missing_vs_Population.png +0 -0
- .gitattributes +1 -1
- 02_Codes/05_master_eda.ipynb +0 -0
- 02_Codes/06_master_script.py +172 -0
- data.parquet +0 -0
.01_Data/Missing_vs_Population.png
ADDED
.gitattributes
CHANGED
@@ -47,7 +47,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
47 |
# Image files - uncompressed
|
48 |
*.bmp filter=lfs diff=lfs merge=lfs -text
|
49 |
*.gif filter=lfs diff=lfs merge=lfs -text
|
50 |
-
|
51 |
*.tiff filter=lfs diff=lfs merge=lfs -text
|
52 |
# Image files - compressed
|
53 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
|
|
47 |
# Image files - uncompressed
|
48 |
*.bmp filter=lfs diff=lfs merge=lfs -text
|
49 |
*.gif filter=lfs diff=lfs merge=lfs -text
|
50 |
+
#*.png filter=lfs diff=lfs merge=lfs -text
|
51 |
*.tiff filter=lfs diff=lfs merge=lfs -text
|
52 |
# Image files - compressed
|
53 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
02_Codes/05_master_eda.ipynb
ADDED
The diff for this file is too large to render.
See raw diff
|
|
02_Codes/06_master_script.py
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
To impute the missing death data based on the state level mortality.
|
3 |
+
|
4 |
+
Saves final dataset as data.parquet
|
5 |
+
|
6 |
+
refer to the 05_master_eda.ipynb for EDA and other details like how the data was imputed.
|
7 |
+
"""
|
8 |
+
|
9 |
+
# importing libraries and setting default option
|
10 |
+
import pandas as pd
|
11 |
+
import numpy as np
|
12 |
+
|
13 |
+
pd.set_option("mode.copy_on_write", True)
|
14 |
+
|
15 |
+
# reading the data files
|
16 |
+
df = pd.read_parquet(".01_Data/02_Processed/02_Mortality.parquet")
|
17 |
+
population = pd.read_parquet(".01_Data/02_Processed/01_Population.parquet")
|
18 |
+
|
19 |
+
# ------------------------------------------
|
20 |
+
# initial Cleaning
|
21 |
+
df = df[df["State"] != "AK"] # dropping ALASKA since it is Out of Scope (OOS)
|
22 |
+
|
23 |
+
# We will consider only unintentional drug related deaths since other have very few values
|
24 |
+
df = df[df["Cause"] == "Drug poisonings (overdose) Unintentional (X40-X44)"]
|
25 |
+
|
26 |
+
df = (
|
27 |
+
df.dropna()
|
28 |
+
) # dropping rows with missing values since they are very few and will be imputed later
|
29 |
+
|
30 |
+
# ------------------------------------------
|
31 |
+
# Merge with population data
|
32 |
+
combined = pd.merge(
|
33 |
+
df,
|
34 |
+
population,
|
35 |
+
on=["State", "State_Code", "County", "County_Code", "Year"],
|
36 |
+
how="left",
|
37 |
+
validate="1:1",
|
38 |
+
indicator=True,
|
39 |
+
)
|
40 |
+
|
41 |
+
# ------------------------------------------
|
42 |
+
# Clean the Merged Data
|
43 |
+
df2 = combined[
|
44 |
+
[
|
45 |
+
"State",
|
46 |
+
"State_Code",
|
47 |
+
"County",
|
48 |
+
"County_Code",
|
49 |
+
"Year",
|
50 |
+
"Population",
|
51 |
+
"Deaths",
|
52 |
+
]
|
53 |
+
]
|
54 |
+
|
55 |
+
# ------------------------------------------
|
56 |
+
# calculating Mortality Rate (County Level)
|
57 |
+
df3 = df2.copy()
|
58 |
+
df3["Mortality_Rate"] = df3["Deaths"] / df3["Population"]
|
59 |
+
|
60 |
+
# ------------------------------------------
|
61 |
+
# Mortality Rate (State Level)
|
62 |
+
|
63 |
+
# aggregate at state-cause level
|
64 |
+
df4 = (
|
65 |
+
df3.groupby(["State", "Year"])
|
66 |
+
.agg({"Deaths": "sum", "Population": "sum"})
|
67 |
+
.reset_index()
|
68 |
+
)
|
69 |
+
|
70 |
+
# clacualting mortality rate
|
71 |
+
df4["State_Mortality_Rate"] = df4["Deaths"] / df4["Population"]
|
72 |
+
|
73 |
+
# ------------------------------------------
|
74 |
+
# Creating a list of State-Counties from the POPULATION dataset
|
75 |
+
st_county = population[
|
76 |
+
["State", "State_Code", "County", "County_Code", "Year"]
|
77 |
+
].drop_duplicates()
|
78 |
+
|
79 |
+
# ------------------------------------------
|
80 |
+
# Merging State Mortality Rate with State-County list
|
81 |
+
master = pd.merge(st_county, df4, on=["State", "Year"], how="left", indicator=True)
|
82 |
+
|
83 |
+
# dropping NA rows since we have no state level data for them
|
84 |
+
master = master[master["_merge"] == "both"]
|
85 |
+
|
86 |
+
# Cleaning the merged data
|
87 |
+
master_2 = master[
|
88 |
+
[
|
89 |
+
"State",
|
90 |
+
"State_Code",
|
91 |
+
"County",
|
92 |
+
"County_Code",
|
93 |
+
"Year",
|
94 |
+
"State_Mortality_Rate",
|
95 |
+
]
|
96 |
+
]
|
97 |
+
|
98 |
+
# ------------------------------------------
|
99 |
+
# merge with the county level data
|
100 |
+
df5 = pd.merge(
|
101 |
+
master_2,
|
102 |
+
df3,
|
103 |
+
on=["State", "State_Code", "County", "County_Code", "Year"],
|
104 |
+
how="left",
|
105 |
+
indicator=True,
|
106 |
+
validate="1:1",
|
107 |
+
)
|
108 |
+
|
109 |
+
# adding a new flag to identify if original data or not
|
110 |
+
df5["Original"] = df5["_merge"] == "both"
|
111 |
+
|
112 |
+
# ------------------------------------------
|
113 |
+
# Remap with population data to get county population
|
114 |
+
df6 = pd.merge(
|
115 |
+
df5,
|
116 |
+
population[["County_Code", "Year", "Population"]],
|
117 |
+
on=["County_Code", "Year"],
|
118 |
+
how="left",
|
119 |
+
validate="1:1",
|
120 |
+
indicator="merge2",
|
121 |
+
)
|
122 |
+
|
123 |
+
|
124 |
+
# ------------------------------------------
|
125 |
+
def new_death(row):
|
126 |
+
"""Function to Calcuate the deaths in county using the State Mortality Rate and County Population
|
127 |
+
if the deaths are missing in the original dataset.
|
128 |
+
Max value is limited to 9 since we know that it can't be 10 or more"""
|
129 |
+
|
130 |
+
if pd.isna(row["Deaths"]):
|
131 |
+
return min(int(row["Population_y"] * row["State_Mortality_Rate"]), 9)
|
132 |
+
else:
|
133 |
+
return row["Deaths"]
|
134 |
+
|
135 |
+
|
136 |
+
# ------------------------------------------
|
137 |
+
# calautating the Final Deaths by using the new_death function
|
138 |
+
df7 = df6.copy()
|
139 |
+
df7["Deaths_2"] = df7.apply(new_death, axis=1)
|
140 |
+
|
141 |
+
# ------------------------------------------
|
142 |
+
# Cleaning the dataset
|
143 |
+
df8 = df7[
|
144 |
+
[
|
145 |
+
"State",
|
146 |
+
"State_Code",
|
147 |
+
"County",
|
148 |
+
"County_Code",
|
149 |
+
"Year",
|
150 |
+
"Population_y",
|
151 |
+
"Deaths_2",
|
152 |
+
"Original",
|
153 |
+
"State_Mortality_Rate",
|
154 |
+
]
|
155 |
+
]
|
156 |
+
|
157 |
+
# Renaming columns
|
158 |
+
df8 = df8.rename(columns={"Population_y": "Population", "Deaths_2": "Deaths"})
|
159 |
+
|
160 |
+
# ------------------------------------------
|
161 |
+
# Calculating Mortality Rate for each county, if population is 0 then mortality rate is 0
|
162 |
+
df8["County_Mortality_Rate"] = np.where(
|
163 |
+
df8["Population"] == 0, 0, df8["Deaths"] / df8["Population"]
|
164 |
+
)
|
165 |
+
|
166 |
+
# sorting the rows
|
167 |
+
df9 = df8.sort_values(by=["State", "County", "Year"]).reset_index(drop=True)
|
168 |
+
|
169 |
+
|
170 |
+
# ------------------------------------------
|
171 |
+
# Saving the Final Dataset
|
172 |
+
df9.to_parquet("data.parquet", index=False)
|
data.parquet
ADDED
Binary file (589 kB). View file
|
|