1T Conte commited on
Commit
e120dd0
1 Parent(s): d0676a1

first commit

Browse files
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ *.txt.gz
Makefile ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ download:
2
+ wget -c "https://snap.stanford.edu/data/amazon/Amazon_Instant_Video.txt.gz"
3
+ wget -c "https://snap.stanford.edu/data/amazon/Arts.txt.gz"
4
+ wget -c "https://snap.stanford.edu/data/amazon/Automotive.txt.gz"
5
+ wget -c "https://snap.stanford.edu/data/amazon/Baby.txt.gz"
6
+ wget -c "https://snap.stanford.edu/data/amazon/Beauty.txt.gz"
7
+ wget -c "https://snap.stanford.edu/data/amazon/Books.txt.gz"
8
+ wget -c "https://snap.stanford.edu/data/amazon/Cell_Phones_&_Accessories.txt.gz"
9
+ wget -c "https://snap.stanford.edu/data/amazon/Clothing_&_Accessories.txt.gz"
10
+ wget -c "https://snap.stanford.edu/data/amazon/Electronics.txt.gz"
11
+ wget -c "https://snap.stanford.edu/data/amazon/Gourmet_Foods.txt.gz"
12
+ wget -c "https://snap.stanford.edu/data/amazon/Health.txt.gz"
13
+ wget -c "https://snap.stanford.edu/data/amazon/Home_&_Kitchen.txt.gz"
14
+ wget -c "https://snap.stanford.edu/data/amazon/Industrial_&_Scientific.txt.gz"
15
+ wget -c "https://snap.stanford.edu/data/amazon/Jewelry.txt.gz"
16
+ wget -c "https://snap.stanford.edu/data/amazon/Kindle_Store.txt.gz"
17
+ wget -c "https://snap.stanford.edu/data/amazon/Movies_&_TV.txt.gz"
18
+ wget -c "https://snap.stanford.edu/data/amazon/Musical_Instruments.txt.gz"
19
+ wget -c "https://snap.stanford.edu/data/amazon/Music.txt.gz"
20
+ wget -c "https://snap.stanford.edu/data/amazon/Office_Products.txt.gz"
21
+ wget -c "https://snap.stanford.edu/data/amazon/Patio.txt.gz"
22
+ wget -c "https://snap.stanford.edu/data/amazon/Pet_Supplies.txt.gz"
23
+ wget -c "https://snap.stanford.edu/data/amazon/Shoes.txt.gz"
24
+ wget -c "https://snap.stanford.edu/data/amazon/Software.txt.gz"
25
+ wget -c "https://snap.stanford.edu/data/amazon/Sports_&_Outdoors.txt.gz"
26
+ wget -c "https://snap.stanford.edu/data/amazon/Tools_&_Home_Improvement.txt.gz"
27
+ wget -c "https://snap.stanford.edu/data/amazon/Toys_&_Games.txt.gz"
28
+ wget -c "https://snap.stanford.edu/data/amazon/Video_Games.txt.gz"
29
+ wget -c "https://snap.stanford.edu/data/amazon/Watches.txt.gz"
amazon_reviews_2013/train-0000-of-0017.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f0d1d457074fcd30e332b12278c520977fadd96f418add7860484a578d16c1d
3
+ size 939927613
amazon_reviews_2013/train-0001-of-0017.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d47af7ac2b3417c6791e913654b995d0fbdd595372bfcaf7d6edd524dc0f955d
3
+ size 1105515091
amazon_reviews_2013/train-0002-of-0017.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a366801b5d7253a28b142c0791083abceb48258dbf3b3e772c012d936085dd2
3
+ size 1104040669
amazon_reviews_2013/train-0003-of-0017.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e45572b024a0d999c9695f280534872ddd498a4fb3d7054a6deb02157d78421
3
+ size 1103357354
amazon_reviews_2013/train-0004-of-0017.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:854a98360b8eb4b7f16351122c9f412b296d93c227b79780df0d66353dc8d519
3
+ size 1088789014
amazon_reviews_2013/train-0005-of-0017.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d811c9290cf7436dfae47ef2d608b38eeefa34d04391b2a6b271349fc0bfd1ef
3
+ size 1108575489
amazon_reviews_2013/train-0006-of-0017.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:296e3d316f5458c7a78bdf8d73918fbe6909e221c096337c0c47880b42d4ce6e
3
+ size 1104722517
amazon_reviews_2013/train-0007-of-0017.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29a867a3b53da6398a4fbaa4578407ebebd4892ecb898376ae519564e545b149
3
+ size 712914822
amazon_reviews_2013/train-0008-of-0017.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a21797891c3eab3e9503cb03ac4167825e5227eb6d150c3330650faf8f20f2c3
3
+ size 636022186
amazon_reviews_2013/train-0009-of-0017.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e972b5ffd01f410a0ebdfaf0f1f7314a7a2c64ce39ed9d676536c8b39258c5ce
3
+ size 1116785087
amazon_reviews_2013/train-0010-of-0017.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1eefee5e93c5858bed9713ce7d1ea22ff971ac3e86459db8d39991ac0cc9282c
3
+ size 1119456878
amazon_reviews_2013/train-0011-of-0017.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc10130cab8cc0f93986b1a9ef01232cfa797b259108f8c9799782bd588baaa3
3
+ size 1112711889
amazon_reviews_2013/train-0012-of-0017.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4601e11d9724c29adf50a84af9f212c08241ac88f5166a32554b3c2000dd3f3
3
+ size 1099536245
amazon_reviews_2013/train-0013-of-0017.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6eac3e6ca90a39c4ee8aed9888996fe74074b6d5777c76351eb350f620de6e6
3
+ size 999966538
amazon_reviews_2013/train-0014-of-0017.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d264d263cfe1a1a70b8b14a17721611c40d00db9bf658465686cdc9f0dcaedf
3
+ size 1005727136
amazon_reviews_2013/train-0015-of-0017.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:780ec688809aafb40c018c66c2a2629701464ea4c9c3fdeaee9ea570bc0c19cc
3
+ size 1000589188
amazon_reviews_2013/train-0016-of-0017.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4566de9a4be1308bf546b30a6ee5a92e54f04b82a20f79d2ce508b985b789b64
3
+ size 666796901
amazon_reviews_2013/train-0017-of-0017.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11e225680cf9fee37a0969a2d1c70caaa6d3e845dc3ca64f16e50b7a57e2b909
3
+ size 528189664
convert.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Convert the Amazon reviews dataset to parquet format.
3
+
4
+ Usage:
5
+ $ make download
6
+ $ python convert.py
7
+ """
8
+
9
+ import os
10
+ import gzip
11
+
12
+ from glob import glob
13
+
14
+ import pandas as pd
15
+
16
+ OUTPUT_DIR = "amazon_reviews_2013"
17
+ CHUNK_SIZE = 2000000
18
+
19
+ CATEGORIES = {
20
+ "Amazon_Instant_Video.txt.gz": "Amazon Instant Video", # 717,651 reviews
21
+ "Arts.txt.gz": "Arts", # 27,980 reviews
22
+ "Automotive.txt.gz": "Automotive", # 188,728 reviews
23
+ "Baby.txt.gz": "Baby", # 184,887 reviews
24
+ "Beauty.txt.gz": "Beauty", # 252,056 reviews
25
+ "Books.txt.gz": "Book", # 12,886,488 reviews
26
+ "Cell_Phones_&_Accessories.txt.gz": "Cell Phone", # 78,930 reviews
27
+ "Clothing_&_Accessories.txt.gz": "Clothing", # 581,933 reviews
28
+ "Electronics.txt.gz": "Electronics", # 1,241,778 reviews
29
+ "Gourmet_Foods.txt.gz": "Gourmet Food", # 154,635 reviews
30
+ "Health.txt.gz": "Health", # 428,781 reviews
31
+ "Home_&_Kitchen.txt.gz": "Home & Kitchen", # 991,794 reviews
32
+ "Industrial_&_Scientific.txt.gz": "Industrial & Scientific", # 137,042 reviews
33
+ "Jewelry.txt.gz": "Jewelry", # 58,621 reviews
34
+ "Kindle_Store.txt.gz": "Kindle Store", # 160,793 reviews
35
+ "Movies_&_TV.txt.gz": "Movie & TV", # 7,850,072 reviews
36
+ "Musical_Instruments.txt.gz": "Musical Instrument", # 85,405 reviews
37
+ "Music.txt.gz": "Music", # 6,396,350 reviews
38
+ "Office_Products.txt.gz": "Office", # 138,084 reviews
39
+ "Patio.txt.gz": "Patio", # 206,250 reviews
40
+ "Pet_Supplies.txt.gz": "Pet Supply", # 217,170 reviews
41
+ "Shoes.txt.gz": "Shoe", # 389,877 reviews
42
+ "Software.txt.gz": "Software", # 95,084 reviews
43
+ "Sports_&_Outdoors.txt.gz": "Sports & Outdoor", # 510,991 reviews
44
+ "Tools_&_Home_Improvement.txt.gz": "Tools & Home Improvement", # 409,499 reviews
45
+ "Toys_&_Games.txt.gz": "Toy & Game", # 435,996 reviews
46
+ "Video_Games.txt.gz": "Video Game", # 463,669 reviews
47
+ "Watches.txt.gz": "Watch", # 68,356 reviews
48
+ }
49
+
50
+
51
+ def to_parquet():
52
+ """
53
+ Convert a single file to parquet
54
+ """
55
+ n_chunks = 0
56
+ train_data = []
57
+
58
+ for filename in CATEGORIES:
59
+
60
+ for entry in parse_file(filename):
61
+ train_data.append(entry)
62
+
63
+ if len(train_data) == CHUNK_SIZE:
64
+ save_parquet(train_data, "train", n_chunks)
65
+ train_data = []
66
+ n_chunks += 1
67
+
68
+ if train_data:
69
+ save_parquet(train_data, "train", n_chunks)
70
+
71
+ return n_chunks
72
+
73
+
74
+ def save_parquet(data, split, chunk):
75
+ """
76
+ Save data to parquet
77
+ """
78
+ fname = os.path.join(OUTPUT_DIR, f"{split}-{chunk:04d}-of-nchunks.parquet")
79
+ df = pd.DataFrame(data)
80
+ df.to_parquet(fname)
81
+
82
+
83
+ def parse_file(filename):
84
+ """
85
+ Parse a single file
86
+ """
87
+ f = gzip.open(filename, "r")
88
+ entry = {}
89
+ for line in f:
90
+ line = line.decode().strip()
91
+ colon_pos = line.find(":")
92
+ if colon_pos == -1:
93
+ entry["product/category"] = CATEGORIES[filename]
94
+ yield entry
95
+ entry = {}
96
+ continue
97
+ e_name = line[:colon_pos]
98
+ rest = line[colon_pos + 2 :]
99
+ entry[e_name] = rest
100
+
101
+ yield entry
102
+
103
+
104
+ def rename_chunks(n_chunks):
105
+ """
106
+ Replace nchunks in filename by the actual number of chunks
107
+ """
108
+ for fname in glob(os.path.join(OUTPUT_DIR, "train-*-of-nchunks.parquet")):
109
+ new_fname = fname.replace("-nchunks", f"-{n_chunks:04d}")
110
+ os.rename(fname, new_fname)
111
+
112
+
113
+ def run():
114
+ """
115
+ Convert all files to parquet
116
+ """
117
+ if not os.path.exists(OUTPUT_DIR):
118
+ os.makedirs(OUTPUT_DIR)
119
+
120
+ n_chunks = to_parquet()
121
+ print(f"{n_chunks} chunks saved")
122
+
123
+ rename_chunks(n_chunks)
124
+
125
+
126
+ if __name__ == "__main__":
127
+ run()