|
""" |
|
Convert the Amazon reviews dataset to parquet format. |
|
|
|
Usage: |
|
$ make download |
|
$ python convert.py |
|
""" |
|
|
|
import os |
|
import gzip |
|
|
|
from glob import glob |
|
|
|
import pandas as pd |
|
|
|
OUTPUT_DIR = "amazon_reviews_2013" |
|
CHUNK_SIZE = 2000000 |
|
|
|
CATEGORIES = { |
|
"Amazon_Instant_Video.txt.gz": "Amazon Instant Video", |
|
"Arts.txt.gz": "Arts", |
|
"Automotive.txt.gz": "Automotive", |
|
"Baby.txt.gz": "Baby", |
|
"Beauty.txt.gz": "Beauty", |
|
"Books.txt.gz": "Book", |
|
"Cell_Phones_&_Accessories.txt.gz": "Cell Phone", |
|
"Clothing_&_Accessories.txt.gz": "Clothing", |
|
"Electronics.txt.gz": "Electronics", |
|
"Gourmet_Foods.txt.gz": "Gourmet Food", |
|
"Health.txt.gz": "Health", |
|
"Home_&_Kitchen.txt.gz": "Home & Kitchen", |
|
"Industrial_&_Scientific.txt.gz": "Industrial & Scientific", |
|
"Jewelry.txt.gz": "Jewelry", |
|
"Kindle_Store.txt.gz": "Kindle Store", |
|
"Movies_&_TV.txt.gz": "Movie & TV", |
|
"Musical_Instruments.txt.gz": "Musical Instrument", |
|
"Music.txt.gz": "Music", |
|
"Office_Products.txt.gz": "Office", |
|
"Patio.txt.gz": "Patio", |
|
"Pet_Supplies.txt.gz": "Pet Supply", |
|
"Shoes.txt.gz": "Shoe", |
|
"Software.txt.gz": "Software", |
|
"Sports_&_Outdoors.txt.gz": "Sports & Outdoor", |
|
"Tools_&_Home_Improvement.txt.gz": "Tools & Home Improvement", |
|
"Toys_&_Games.txt.gz": "Toy & Game", |
|
"Video_Games.txt.gz": "Video Game", |
|
"Watches.txt.gz": "Watch", |
|
} |
|
|
|
|
|
def to_parquet(): |
|
""" |
|
Convert a single file to parquet |
|
""" |
|
n_chunks = 0 |
|
train_data = [] |
|
|
|
for filename in CATEGORIES: |
|
|
|
for entry in parse_file(filename): |
|
train_data.append(entry) |
|
|
|
if len(train_data) == CHUNK_SIZE: |
|
save_parquet(train_data, "train", n_chunks) |
|
train_data = [] |
|
n_chunks += 1 |
|
|
|
if train_data: |
|
save_parquet(train_data, "train", n_chunks) |
|
|
|
return n_chunks |
|
|
|
|
|
def save_parquet(data, split, chunk): |
|
""" |
|
Save data to parquet |
|
""" |
|
fname = os.path.join(OUTPUT_DIR, f"{split}-{chunk:04d}-of-nchunks.parquet") |
|
df = pd.DataFrame(data) |
|
df.to_parquet(fname) |
|
|
|
|
|
def parse_file(filename): |
|
""" |
|
Parse a single file |
|
""" |
|
f = gzip.open(filename, "r") |
|
entry = {} |
|
for line in f: |
|
line = line.decode().strip() |
|
colon_pos = line.find(":") |
|
if colon_pos == -1: |
|
entry["product/category"] = CATEGORIES[filename] |
|
yield entry |
|
entry = {} |
|
continue |
|
e_name = line[:colon_pos] |
|
rest = line[colon_pos + 2 :] |
|
entry[e_name] = rest |
|
|
|
yield entry |
|
|
|
|
|
def rename_chunks(n_chunks): |
|
""" |
|
Replace nchunks in filename by the actual number of chunks |
|
""" |
|
for fname in glob(os.path.join(OUTPUT_DIR, "train-*-of-nchunks.parquet")): |
|
new_fname = fname.replace("-nchunks", f"-{n_chunks:04d}") |
|
os.rename(fname, new_fname) |
|
|
|
|
|
def run(): |
|
""" |
|
Convert all files to parquet |
|
""" |
|
if not os.path.exists(OUTPUT_DIR): |
|
os.makedirs(OUTPUT_DIR) |
|
|
|
n_chunks = to_parquet() |
|
print(f"{n_chunks} chunks saved") |
|
|
|
rename_chunks(n_chunks) |
|
|
|
|
|
if __name__ == "__main__": |
|
run() |
|
|