Datasets:
File size: 3,450 Bytes
b18457f 6a0acc8 b18457f 6a0acc8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 |
# importing pandas and setting default option
import pandas as pd
pd.set_option("mode.copy_on_write", True)
# reading the raw files
df = pd.read_csv(".01_Data/01_Raw/raw_population.txt", sep="\t")
fips = pd.read_csv(".01_Data/01_Raw/county_fips.csv")
abbreviations = pd.read_csv(".01_Data/01_Raw/state_abbreviations.csv")
# ------------------------------------------
# dropping the unnecessary columns
df1 = df.drop(columns=["Notes"])
# Dropping unnecessary rows
# 1. removing the rows with na values generated due to Notes, using state column for reference
df1 = df1.dropna(subset=["State"])
# 2. Removing Alaska
df1 = df1[df1["State"] != "Alaska"]
# ------------------------------------------
# Correcting Data Types for columns
df2 = df1.copy()
# 1. Saving state code as padded string
df2["State Code"] = df2["State Code"].astype(int).astype(str).str.zfill(2)
# 2. Saving county code as padded string
df2["County Code"] = df2["County Code"].astype(int).astype(str).str.zfill(5)
# padding fips to have consistency
fips["countyfips"] = fips["countyfips"].astype(str).str.zfill(5)
# 3. Converting Year to Integer
df2["Yearly July 1st Estimates"] = df2["Yearly July 1st Estimates"].astype(int)
# 4. Converting Population to Integer
# replacing the missing values with 0 for now <-------------------Change this later if required
df2["Population"] = df2["Population"].replace("Missing", 0)
df2["Population"] = df2["Population"].astype(int)
# ------------------------------------------
# creating subset of data for analysis
df3 = df2.copy()
# rename columns
df3 = df3.rename(
columns={
"Yearly July 1st Estimates": "Year",
"State Code": "State_Code",
"County Code": "County_Code",
}
)
# reorder columns
df3 = df3[
[
"State",
"State_Code",
"County",
"County_Code",
"Year",
"Population",
]
]
# ------------------------------------------
# mapping with fips for proper county names
df4 = pd.merge(
df3,
fips,
how="left",
left_on="County_Code",
right_on="countyfips",
validate="m:1",
indicator=True,
)
# ------------------------------------------
# correcting the county names where fips mapping failed
df4.loc[df4["County"] == "Montgomery County, AR", "BUYER_COUNTY"] = "MONTGOMERY"
df4.loc[df4["County"] == "Kalawao County, HI", "BUYER_COUNTY"] = "KALAWAO"
df4.loc[df4["County"] == "Oglala Lakota County, SD", "BUYER_COUNTY"] = "OGLALA LAKOTA"
# ------------------------------------------
# creating final dataframe
# ------------------------------------------
# mapping state names to abbreviations
# rename abbreviations columns to match with the main dataframe
abbreviations = abbreviations.rename(
columns={
"state": "State",
"code": "State_Code",
}
)
# merge the dataframes
df5 = pd.merge(
df4[["State", "BUYER_COUNTY", "County_Code", "Year", "Population"]],
abbreviations[["State", "State_Code"]],
how="left",
left_on="State",
right_on="State",
validate="m:1",
)
# rename columns
df5 = df5.rename(
columns={
"BUYER_COUNTY": "County",
}
)
# reorder columns
df5 = df5[
[
"State",
"State_Code",
"County",
"County_Code",
"Year",
"Population",
]
]
# ------------------------------------------
# Writing to Parquet
df5.to_parquet(".01_Data/02_processed/01_Population.parquet", index=False)
|