1T Conte commited on
Commit
5ea2e73
1 Parent(s): 275a10f

fix: split train and test set

Browse files
Files changed (38) hide show
  1. .vscode/settings.json +19 -0
  2. amazon_reviews_2013/{test-0000-of-nchunks.parquet → test-0000-of-0018.parquet} +0 -0
  3. amazon_reviews_2013/{test-0001-of-nchunks.parquet → test-0001-of-0018.parquet} +0 -0
  4. amazon_reviews_2013/{test-0002-of-nchunks.parquet → test-0002-of-0018.parquet} +0 -0
  5. amazon_reviews_2013/{test-0003-of-nchunks.parquet → test-0003-of-0018.parquet} +0 -0
  6. amazon_reviews_2013/{test-0004-of-nchunks.parquet → test-0004-of-0018.parquet} +0 -0
  7. amazon_reviews_2013/{test-0005-of-nchunks.parquet → test-0005-of-0018.parquet} +0 -0
  8. amazon_reviews_2013/{test-0006-of-nchunks.parquet → test-0006-of-0018.parquet} +0 -0
  9. amazon_reviews_2013/{test-0007-of-nchunks.parquet → test-0007-of-0018.parquet} +0 -0
  10. amazon_reviews_2013/{test-0008-of-nchunks.parquet → test-0008-of-0018.parquet} +0 -0
  11. amazon_reviews_2013/{test-0009-of-nchunks.parquet → test-0009-of-0018.parquet} +0 -0
  12. amazon_reviews_2013/{test-0010-of-nchunks.parquet → test-0010-of-0018.parquet} +0 -0
  13. amazon_reviews_2013/{test-0011-of-nchunks.parquet → test-0011-of-0018.parquet} +0 -0
  14. amazon_reviews_2013/{test-0012-of-nchunks.parquet → test-0012-of-0018.parquet} +0 -0
  15. amazon_reviews_2013/{test-0013-of-nchunks.parquet → test-0013-of-0018.parquet} +0 -0
  16. amazon_reviews_2013/{test-0014-of-nchunks.parquet → test-0014-of-0018.parquet} +0 -0
  17. amazon_reviews_2013/{test-0015-of-nchunks.parquet → test-0015-of-0018.parquet} +0 -0
  18. amazon_reviews_2013/{test-0016-of-nchunks.parquet → test-0016-of-0018.parquet} +0 -0
  19. amazon_reviews_2013/{test-0017-of-nchunks.parquet → test-0017-of-0018.parquet} +0 -0
  20. amazon_reviews_2013/train-0000-of-0017.parquet +0 -3
  21. amazon_reviews_2013/train-0001-of-0017.parquet +0 -3
  22. amazon_reviews_2013/train-0002-of-0017.parquet +0 -3
  23. amazon_reviews_2013/train-0003-of-0017.parquet +0 -3
  24. amazon_reviews_2013/train-0004-of-0017.parquet +0 -3
  25. amazon_reviews_2013/train-0005-of-0017.parquet +0 -3
  26. amazon_reviews_2013/train-0006-of-0017.parquet +0 -3
  27. amazon_reviews_2013/train-0007-of-0017.parquet +0 -3
  28. amazon_reviews_2013/train-0008-of-0017.parquet +0 -3
  29. amazon_reviews_2013/train-0009-of-0017.parquet +0 -3
  30. amazon_reviews_2013/train-0010-of-0017.parquet +0 -3
  31. amazon_reviews_2013/train-0011-of-0017.parquet +0 -3
  32. amazon_reviews_2013/train-0012-of-0017.parquet +0 -3
  33. amazon_reviews_2013/train-0013-of-0017.parquet +0 -3
  34. amazon_reviews_2013/train-0014-of-0017.parquet +0 -3
  35. amazon_reviews_2013/train-0015-of-0017.parquet +0 -3
  36. amazon_reviews_2013/train-0016-of-0017.parquet +0 -3
  37. amazon_reviews_2013/train-0017-of-0017.parquet +0 -3
  38. convert.py +57 -11
.vscode/settings.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "python.analysis.autoFormatStrings": true,
3
+ "[python]": {
4
+ "diffEditor.ignoreTrimWhitespace": false,
5
+ "editor.formatOnType": true,
6
+ "editor.wordBasedSuggestions": "off",
7
+ "editor.tabSize": 4,
8
+ "editor.formatOnSave": true,
9
+ "editor.defaultFormatter": "ms-python.black-formatter",
10
+ "editor.rulers": [
11
+ 88
12
+ ],
13
+ "editor.renderWhitespace": "all",
14
+ },
15
+ "files.watcherExclude": {
16
+ "**/.git/objects/**": true,
17
+ "**/.git/subtree-cache/**": true,
18
+ },
19
+ }
amazon_reviews_2013/{test-0000-of-nchunks.parquet → test-0000-of-0018.parquet} RENAMED
File without changes
amazon_reviews_2013/{test-0001-of-nchunks.parquet → test-0001-of-0018.parquet} RENAMED
File without changes
amazon_reviews_2013/{test-0002-of-nchunks.parquet → test-0002-of-0018.parquet} RENAMED
File without changes
amazon_reviews_2013/{test-0003-of-nchunks.parquet → test-0003-of-0018.parquet} RENAMED
File without changes
amazon_reviews_2013/{test-0004-of-nchunks.parquet → test-0004-of-0018.parquet} RENAMED
File without changes
amazon_reviews_2013/{test-0005-of-nchunks.parquet → test-0005-of-0018.parquet} RENAMED
File without changes
amazon_reviews_2013/{test-0006-of-nchunks.parquet → test-0006-of-0018.parquet} RENAMED
File without changes
amazon_reviews_2013/{test-0007-of-nchunks.parquet → test-0007-of-0018.parquet} RENAMED
File without changes
amazon_reviews_2013/{test-0008-of-nchunks.parquet → test-0008-of-0018.parquet} RENAMED
File without changes
amazon_reviews_2013/{test-0009-of-nchunks.parquet → test-0009-of-0018.parquet} RENAMED
File without changes
amazon_reviews_2013/{test-0010-of-nchunks.parquet → test-0010-of-0018.parquet} RENAMED
File without changes
amazon_reviews_2013/{test-0011-of-nchunks.parquet → test-0011-of-0018.parquet} RENAMED
File without changes
amazon_reviews_2013/{test-0012-of-nchunks.parquet → test-0012-of-0018.parquet} RENAMED
File without changes
amazon_reviews_2013/{test-0013-of-nchunks.parquet → test-0013-of-0018.parquet} RENAMED
File without changes
amazon_reviews_2013/{test-0014-of-nchunks.parquet → test-0014-of-0018.parquet} RENAMED
File without changes
amazon_reviews_2013/{test-0015-of-nchunks.parquet → test-0015-of-0018.parquet} RENAMED
File without changes
amazon_reviews_2013/{test-0016-of-nchunks.parquet → test-0016-of-0018.parquet} RENAMED
File without changes
amazon_reviews_2013/{test-0017-of-nchunks.parquet → test-0017-of-0018.parquet} RENAMED
File without changes
amazon_reviews_2013/train-0000-of-0017.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:6f0d1d457074fcd30e332b12278c520977fadd96f418add7860484a578d16c1d
3
- size 939927613
 
 
 
 
amazon_reviews_2013/train-0001-of-0017.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d47af7ac2b3417c6791e913654b995d0fbdd595372bfcaf7d6edd524dc0f955d
3
- size 1105515091
 
 
 
 
amazon_reviews_2013/train-0002-of-0017.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9a366801b5d7253a28b142c0791083abceb48258dbf3b3e772c012d936085dd2
3
- size 1104040669
 
 
 
 
amazon_reviews_2013/train-0003-of-0017.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:7e45572b024a0d999c9695f280534872ddd498a4fb3d7054a6deb02157d78421
3
- size 1103357354
 
 
 
 
amazon_reviews_2013/train-0004-of-0017.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:854a98360b8eb4b7f16351122c9f412b296d93c227b79780df0d66353dc8d519
3
- size 1088789014
 
 
 
 
amazon_reviews_2013/train-0005-of-0017.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d811c9290cf7436dfae47ef2d608b38eeefa34d04391b2a6b271349fc0bfd1ef
3
- size 1108575489
 
 
 
 
amazon_reviews_2013/train-0006-of-0017.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:296e3d316f5458c7a78bdf8d73918fbe6909e221c096337c0c47880b42d4ce6e
3
- size 1104722517
 
 
 
 
amazon_reviews_2013/train-0007-of-0017.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:29a867a3b53da6398a4fbaa4578407ebebd4892ecb898376ae519564e545b149
3
- size 712914822
 
 
 
 
amazon_reviews_2013/train-0008-of-0017.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a21797891c3eab3e9503cb03ac4167825e5227eb6d150c3330650faf8f20f2c3
3
- size 636022186
 
 
 
 
amazon_reviews_2013/train-0009-of-0017.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e972b5ffd01f410a0ebdfaf0f1f7314a7a2c64ce39ed9d676536c8b39258c5ce
3
- size 1116785087
 
 
 
 
amazon_reviews_2013/train-0010-of-0017.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:1eefee5e93c5858bed9713ce7d1ea22ff971ac3e86459db8d39991ac0cc9282c
3
- size 1119456878
 
 
 
 
amazon_reviews_2013/train-0011-of-0017.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:fc10130cab8cc0f93986b1a9ef01232cfa797b259108f8c9799782bd588baaa3
3
- size 1112711889
 
 
 
 
amazon_reviews_2013/train-0012-of-0017.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d4601e11d9724c29adf50a84af9f212c08241ac88f5166a32554b3c2000dd3f3
3
- size 1099536245
 
 
 
 
amazon_reviews_2013/train-0013-of-0017.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a6eac3e6ca90a39c4ee8aed9888996fe74074b6d5777c76351eb350f620de6e6
3
- size 999966538
 
 
 
 
amazon_reviews_2013/train-0014-of-0017.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9d264d263cfe1a1a70b8b14a17721611c40d00db9bf658465686cdc9f0dcaedf
3
- size 1005727136
 
 
 
 
amazon_reviews_2013/train-0015-of-0017.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:780ec688809aafb40c018c66c2a2629701464ea4c9c3fdeaee9ea570bc0c19cc
3
- size 1000589188
 
 
 
 
amazon_reviews_2013/train-0016-of-0017.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:4566de9a4be1308bf546b30a6ee5a92e54f04b82a20f79d2ce508b985b789b64
3
- size 666796901
 
 
 
 
amazon_reviews_2013/train-0017-of-0017.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:11e225680cf9fee37a0969a2d1c70caaa6d3e845dc3ca64f16e50b7a57e2b909
3
- size 528189664
 
 
 
 
convert.py CHANGED
@@ -13,8 +13,12 @@ from glob import glob
13
 
14
  import pandas as pd
15
 
 
 
 
16
  OUTPUT_DIR = "amazon_reviews_2013"
17
  CHUNK_SIZE = 2000000
 
18
 
19
  CATEGORIES = {
20
  "Amazon_Instant_Video.txt.gz": "Amazon Instant Video", # 717,651 reviews
@@ -47,37 +51,46 @@ CATEGORIES = {
47
  "Watches.txt.gz": "Watch", # 68,356 reviews
48
  }
49
 
 
 
50
 
51
  def to_parquet():
52
  """
53
  Convert a single file to parquet
54
  """
55
  n_chunks = 0
56
- train_data = []
57
 
58
  for filename in CATEGORIES:
59
 
60
  for entry in parse_file(filename):
61
- train_data.append(entry)
62
 
63
- if len(train_data) == CHUNK_SIZE:
64
- save_parquet(train_data, "train", n_chunks)
65
- train_data = []
66
  n_chunks += 1
67
 
68
- if train_data:
69
- save_parquet(train_data, "train", n_chunks)
 
70
 
71
  return n_chunks
72
 
73
 
74
- def save_parquet(data, split, chunk):
75
  """
76
  Save data to parquet
77
  """
78
- fname = os.path.join(OUTPUT_DIR, f"{split}-{chunk:04d}-of-nchunks.parquet")
 
 
79
  df = pd.DataFrame(data)
80
- df.to_parquet(fname)
 
 
 
 
81
 
82
 
83
  def parse_file(filename):
@@ -101,11 +114,44 @@ def parse_file(filename):
101
  yield entry
102
 
103
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  def rename_chunks(n_chunks):
105
  """
106
  Replace nchunks in filename by the actual number of chunks
107
  """
108
- for fname in glob(os.path.join(OUTPUT_DIR, "train-*-of-nchunks.parquet")):
109
  new_fname = fname.replace("-nchunks", f"-{n_chunks:04d}")
110
  os.rename(fname, new_fname)
111
 
 
13
 
14
  import pandas as pd
15
 
16
+ from sklearn.model_selection import train_test_split
17
+
18
+
19
  OUTPUT_DIR = "amazon_reviews_2013"
20
  CHUNK_SIZE = 2000000
21
+ TEST_SIZE = 0.2
22
 
23
  CATEGORIES = {
24
  "Amazon_Instant_Video.txt.gz": "Amazon Instant Video", # 717,651 reviews
 
51
  "Watches.txt.gz": "Watch", # 68,356 reviews
52
  }
53
 
54
+ CATEGORIES_LIST = list(CATEGORIES.values())
55
+
56
 
57
  def to_parquet():
58
  """
59
  Convert a single file to parquet
60
  """
61
  n_chunks = 0
62
+ data = []
63
 
64
  for filename in CATEGORIES:
65
 
66
  for entry in parse_file(filename):
67
+ data.append(entry)
68
 
69
+ if len(data) == CHUNK_SIZE:
70
+ save_parquet(data, n_chunks)
71
+ data = []
72
  n_chunks += 1
73
 
74
+ if data:
75
+ save_parquet(data, n_chunks)
76
+ n_chunks += 1
77
 
78
  return n_chunks
79
 
80
 
81
+ def save_parquet(data, chunk):
82
  """
83
  Save data to parquet
84
  """
85
+ fname_train = os.path.join(OUTPUT_DIR, f"train-{chunk:04d}-of-nchunks.parquet")
86
+ fname_test = os.path.join(OUTPUT_DIR, f"test-{chunk:04d}-of-nchunks.parquet")
87
+
88
  df = pd.DataFrame(data)
89
+
90
+ df_train, df_test = train_test_split(df, test_size=TEST_SIZE, random_state=42)
91
+
92
+ df_train.to_parquet(fname_train)
93
+ df_test.to_parquet(fname_test)
94
 
95
 
96
  def parse_file(filename):
 
114
  yield entry
115
 
116
 
117
+ def clean(entry):
118
+ """
119
+ Clean the entry
120
+ """
121
+
122
+ if entry["product/price"] == "unknown":
123
+ entry["product/price"] = None
124
+ else:
125
+ entry["product/price"] = float(entry["product/price"])
126
+
127
+ entry["review/score"] = int(entry["review/score"])
128
+ entry["review/time"] = int(entry["review/time"])
129
+ entry["product/category"] = CATEGORIES_LIST.index(entry["product/category"])
130
+
131
+ numerator, demoninator = entry["review/helpfulness"].split("/")
132
+ numerator = int(numerator)
133
+ demoninator = int(demoninator)
134
+
135
+ if demoninator == 0:
136
+ entry["review/helpfulness_ratio"] = 0
137
+ else:
138
+ entry["review/helpfulness_ratio"] = numerator / demoninator
139
+
140
+ entry["review/helpfulness_total_votes"] = demoninator
141
+
142
+ # Remove entries
143
+ del entry["review/userId"]
144
+ del entry["review/profileName"]
145
+ del entry["product/productId"]
146
+
147
+ return entry
148
+
149
+
150
  def rename_chunks(n_chunks):
151
  """
152
  Replace nchunks in filename by the actual number of chunks
153
  """
154
+ for fname in glob(os.path.join(OUTPUT_DIR, "*-of-nchunks.parquet")):
155
  new_fname = fname.replace("-nchunks", f"-{n_chunks:04d}")
156
  os.rename(fname, new_fname)
157