File size: 3,697 Bytes
6a0acc8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
# Impoting required packages
import pandas as pd
import numpy as np
import zipfile

# setting default option
pd.set_option("mode.copy_on_write", True)

# ------------------------------------------
# reading the files
z = zipfile.ZipFile(".01_Data/01_Raw/raw_mortality.zip")
fips = pd.read_csv(".01_Data/01_Raw/county_fips.csv")
abbreviations = pd.read_csv(".01_Data/01_Raw/state_abbreviations.csv")

# extracting list of files from Zip folder to read
# using files starting with "Underlying" so as to ignore system files
file_list = sorted([f for f in z.namelist() if f.startswith("Underlying")])


# ------------------------------------------
# read data selected files and append to list
df_list = []
for file in file_list:
    # read individual files
    df_temp = pd.read_csv(z.open(file), sep="\t")

    # drop the notes columns and remove rows with null values in County column
    df_temp.drop(columns=["Notes"], inplace=True)
    df_temp.dropna(subset=["County"], inplace=True)

    # add the cleaned temp Df to the main list
    df_list.append(df_temp)

# ------------------------------------------
# create the dataframe
df = pd.concat(df_list, ignore_index=True)

# ------------------------------------------
# Correcting Data Types for columns
df2 = df.copy()

# Pad county code with 0 for consistency with other data sets
df2["County Code"] = df2["County Code"].astype(int).astype(str).str.zfill(5)

# padding fips to have consistency
fips["countyfips"] = fips["countyfips"].astype(str).str.zfill(5)

# Convert Year to Int
df2["Year"] = df2["Year"].astype(int)

# Convert Deaths to Int
df2["Deaths"] = df2["Deaths"].replace("Missing", np.nan)
df2["Deaths"] = (
    df2["Deaths"].astype(float).astype("Int64")
)  # making it as int64 so that we retain null values for later analysis

# ------------------------------------------

# Store only the rows related drugs, modify this list later if required
required_causes = [
    "Drug poisonings (overdose) Unintentional (X40-X44)",
    "All other drug-induced causes",
    "Drug poisonings (overdose) Homicide (X85)",
    "Drug poisonings (overdose) Suicide (X60-X64)",
    "Drug poisonings (overdose) Undetermined (Y10-Y14)",
]

# ------------------------------------------------------
# create and optimize subset data
df3 = df2[df2["Drug/Alcohol Induced Cause"].isin(required_causes)]

# remove extra columns
df3.drop(columns=["Year Code", "Drug/Alcohol Induced Cause Code"], inplace=True)

# renaming columns
df3.rename(
    columns={"Drug/Alcohol Induced Cause": "Cause", "County Code": "County_Code"},
    inplace=True,
)

# ------------------------------------------------------
# mapping with fips for proper county names and state name
df4 = pd.merge(
    df3,
    fips,
    how="left",
    left_on="County_Code",
    right_on="countyfips",
    validate="m:1",
    indicator=True,
)

# --------------------------------------------------------
# Prepare final DF for saving
# select required colums
df5 = df4[["BUYER_STATE", "BUYER_COUNTY", "County_Code", "Year", "Cause", "Deaths"]]

# rename columns
df5 = df5.rename(columns={"BUYER_COUNTY": "County", "BUYER_STATE": "State_Code"})

abbreviations = abbreviations.rename(
    columns={
        "state": "State",
        "code": "State_Code",
    }
)

# merge with abbreviations
df6 = pd.merge(
    df5,
    abbreviations[["State", "State_Code"]],
    how="left",
    on="State_Code",
    validate="m:1",
)

# reorder columns to match population data
df6 = df6[["State", "State_Code", "County", "County_Code", "Year", "Cause", "Deaths"]]

# ------------------------------------------
# Writing to Parquet
df6.to_parquet(".01_Data/02_Processed/02_Mortality.parquet", index=False)