Kurt Stolle commited on
Commit
4849086
·
1 Parent(s): e9e7f5e

Updated build script to include more example data

Browse files
Makefile CHANGED
@@ -1,15 +1,14 @@
1
- .PHONY: prepare webdataset parquet clean
 
 
2
 
3
  prepare:
4
  mkdir -p downloads
5
- python scripts/prepare.py downloads data manifest.csv
6
 
7
- webdataset:
8
  mkdir -p shards
9
- python scripts/build_webdataset.py data shards
10
-
11
- parquet:
12
- python scripts/build_parquet.py
13
 
14
  clean:
15
- rm -vr downloads
 
1
+ .PHONY: prepare build clean
2
+
3
+ all: prepare build
4
 
5
  prepare:
6
  mkdir -p downloads
7
+ python scripts/prepare.py manifest.csv downloads data
8
 
9
+ build:
10
  mkdir -p shards
11
+ python scripts/build.py manifest.csv data shards
 
 
 
12
 
13
  clean:
14
+ rm -vr downloads shards
README.md CHANGED
@@ -50,26 +50,25 @@ data
50
  000000.depth.tiff
51
  000000.vehicle.json
52
  000000.timestamp.txt
53
- 000000.camera.json
54
  000001.image.png
55
  000001.panoptic.png
56
  000001.depth.tiff
57
  000001.vehicle.json
58
  000001.timestamp.txt
59
- 000001.camera.json
60
- ...
61
  000001
62
  ...
 
 
63
  val
64
  000000
65
  ...
66
- 000001
67
- ....
68
  test
69
  000000
70
  ...
71
- 000001
72
- ....
73
 
74
  ```
75
 
@@ -86,7 +85,7 @@ git clone https://huggingface.co/datasets/khwstolle/csvps && cd csvps
86
 
87
  1. Install the [Cityscapes developer kit](https://github.com/mcordts/cityscapesScripts) using `pip`.
88
  ```bash
89
- python -m pip install cityscapesscripts
90
  ```
91
 
92
  2. Run the preparation script provided in this repository.
@@ -102,7 +101,6 @@ make clean
102
 
103
  ## Usage
104
 
105
- Due to the structure, the dataset can be easily loaded using the `webdataset` library.
106
  To convert the `train`, `val` and `test` directories into a `tar` archive, run the following command:
107
 
108
  ```bash
 
50
  000000.depth.tiff
51
  000000.vehicle.json
52
  000000.timestamp.txt
 
53
  000001.image.png
54
  000001.panoptic.png
55
  000001.depth.tiff
56
  000001.vehicle.json
57
  000001.timestamp.txt
58
+ 000000.camera.json
 
59
  000001
60
  ...
61
+ 000001.camera.json
62
+ ...
63
  val
64
  000000
65
  ...
66
+ 000000.camera.json
67
+ ...
68
  test
69
  000000
70
  ...
71
+ 000000.camera.json
 
72
 
73
  ```
74
 
 
85
 
86
  1. Install the [Cityscapes developer kit](https://github.com/mcordts/cityscapesScripts) using `pip`.
87
  ```bash
88
+ python -m pip install -r requirements.txt
89
  ```
90
 
91
  2. Run the preparation script provided in this repository.
 
101
 
102
  ## Usage
103
 
 
104
  To convert the `train`, `val` and `test` directories into a `tar` archive, run the following command:
105
 
106
  ```bash
examples.ipynb CHANGED
@@ -14,44 +14,340 @@
14
  },
15
  {
16
  "cell_type": "code",
17
- "execution_count": 23,
18
  "metadata": {},
19
  "outputs": [
20
  {
21
  "name": "stdout",
22
  "output_type": "stream",
23
  "text": [
24
- "000000/000000: image.png timestamp.txt vehicle.json \n",
25
- "000000/000001: image.png timestamp.txt vehicle.json \n",
26
- "000000/000002: image.png timestamp.txt vehicle.json \n",
27
- "000000/000003: image.png timestamp.txt vehicle.json \n",
28
- "000000/000004: depth.tiff image.png panoptic.png timestamp.txt vehicle.json \n",
29
- "000000/000005: image.png timestamp.txt vehicle.json \n",
30
- "000000/000006: image.png timestamp.txt vehicle.json \n",
31
- "000000/000007: image.png timestamp.txt vehicle.json \n",
32
- "000000/000008: image.png timestamp.txt vehicle.json \n",
33
- "000000/000009: depth.tiff image.png panoptic.png timestamp.txt vehicle.json \n",
34
- "000000/000010: image.png timestamp.txt vehicle.json \n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  ]
36
  }
37
  ],
38
  "source": [
39
  "import webdataset as wds\n",
 
 
 
 
40
  "\n",
41
  "# Create iterable dataset\n",
42
- "ds = wds.WebDataset(\"shards/csvps-val.tar\", shardshuffle=False, verbose=True)\n",
 
43
  "\n",
44
  "# Iterate over the dataset and print the keys and the first few samples\n",
45
  "for i, sample in enumerate(ds):\n",
46
- " if i > 10: \n",
47
  " break\n",
48
- " print(sample[\"__key__\"], end=\": \")\n",
49
- " for k in sample:\n",
50
- " if not k.startswith(\"__\"):\n",
51
- " print(k, end=\" \")\n",
52
- " print(flush=True)\n",
53
  " "
54
  ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  }
56
  ],
57
  "metadata": {
 
14
  },
15
  {
16
  "cell_type": "code",
17
+ "execution_count": 4,
18
  "metadata": {},
19
  "outputs": [
20
  {
21
  "name": "stdout",
22
  "output_type": "stream",
23
  "text": [
24
+ "Sample csvps-val/000030\n",
25
+ "{'frames': {'is_annotated': [False, False, False, False, True, False, False,\n",
26
+ " False, False, True, False, False, False, False,\n",
27
+ " True, False, False, False, False, True, False,\n",
28
+ " False, False, False, True, False, False, False,\n",
29
+ " False, True],\n",
30
+ " 'number': ['000000', '000001', '000002', '000003', '000004',\n",
31
+ " '000005', '000006', '000007', '000008', '000009',\n",
32
+ " '000010', '000011', '000012', '000013', '000014',\n",
33
+ " '000015', '000016', '000017', '000018', '000019',\n",
34
+ " '000020', '000021', '000022', '000023', '000024',\n",
35
+ " '000025', '000026', '000027', '000028', '000029'],\n",
36
+ " 'primary_key': ['lindau_000037_000000', 'lindau_000037_000001',\n",
37
+ " 'lindau_000037_000002', 'lindau_000037_000003',\n",
38
+ " 'lindau_000037_000004', 'lindau_000037_000005',\n",
39
+ " 'lindau_000037_000006', 'lindau_000037_000007',\n",
40
+ " 'lindau_000037_000008', 'lindau_000037_000009',\n",
41
+ " 'lindau_000037_000010', 'lindau_000037_000011',\n",
42
+ " 'lindau_000037_000012', 'lindau_000037_000013',\n",
43
+ " 'lindau_000037_000014', 'lindau_000037_000015',\n",
44
+ " 'lindau_000037_000016', 'lindau_000037_000017',\n",
45
+ " 'lindau_000037_000018', 'lindau_000037_000019',\n",
46
+ " 'lindau_000037_000020', 'lindau_000037_000021',\n",
47
+ " 'lindau_000037_000022', 'lindau_000037_000023',\n",
48
+ " 'lindau_000037_000024', 'lindau_000037_000025',\n",
49
+ " 'lindau_000037_000026', 'lindau_000037_000027',\n",
50
+ " 'lindau_000037_000028', 'lindau_000037_000029'],\n",
51
+ " 'timestamps': [0, 58925424, 117850824, 176776224, 235701592,\n",
52
+ " 294627064, 353552488, 412478000, 471403416,\n",
53
+ " 530328888, 589254288, 648179664, 707105072,\n",
54
+ " 766030408, 824955840, 883881184, 942806640,\n",
55
+ " 1001732056, 1060657528, 1119582984, 1178508320,\n",
56
+ " 1237433744, 1296359088, 1355284520, 1414209856,\n",
57
+ " 1473135304, 1532060720, 1590986120, 1649911576,\n",
58
+ " 1708836952]}}\n",
59
+ "Sample csvps-val/000031\n",
60
+ "{'frames': {'is_annotated': [False, False, False, False, True, False, False,\n",
61
+ " False, False, True, False, False, False, False,\n",
62
+ " True, False, False, False, False, True, False,\n",
63
+ " False, False, False, True, False, False, False,\n",
64
+ " False, True],\n",
65
+ " 'number': ['000000', '000001', '000002', '000003', '000004',\n",
66
+ " '000005', '000006', '000007', '000008', '000009',\n",
67
+ " '000010', '000011', '000012', '000013', '000014',\n",
68
+ " '000015', '000016', '000017', '000018', '000019',\n",
69
+ " '000020', '000021', '000022', '000023', '000024',\n",
70
+ " '000025', '000026', '000027', '000028', '000029'],\n",
71
+ " 'primary_key': ['lindau_000047_000000', 'lindau_000047_000001',\n",
72
+ " 'lindau_000047_000002', 'lindau_000047_000003',\n",
73
+ " 'lindau_000047_000004', 'lindau_000047_000005',\n",
74
+ " 'lindau_000047_000006', 'lindau_000047_000007',\n",
75
+ " 'lindau_000047_000008', 'lindau_000047_000009',\n",
76
+ " 'lindau_000047_000010', 'lindau_000047_000011',\n",
77
+ " 'lindau_000047_000012', 'lindau_000047_000013',\n",
78
+ " 'lindau_000047_000014', 'lindau_000047_000015',\n",
79
+ " 'lindau_000047_000016', 'lindau_000047_000017',\n",
80
+ " 'lindau_000047_000018', 'lindau_000047_000019',\n",
81
+ " 'lindau_000047_000020', 'lindau_000047_000021',\n",
82
+ " 'lindau_000047_000022', 'lindau_000047_000023',\n",
83
+ " 'lindau_000047_000024', 'lindau_000047_000025',\n",
84
+ " 'lindau_000047_000026', 'lindau_000047_000027',\n",
85
+ " 'lindau_000047_000028', 'lindau_000047_000029'],\n",
86
+ " 'timestamps': [0, 58925416, 117850784, 176776216, 235701560,\n",
87
+ " 294627000, 353552352, 412477776, 471403168,\n",
88
+ " 530328552, 589253984, 648179336, 707104784,\n",
89
+ " 766030160, 824955608, 883880992, 942806400,\n",
90
+ " 1001731832, 1060657200, 1119582648, 1178508008,\n",
91
+ " 1237433456, 1296358824, 1355284248, 1414209648,\n",
92
+ " 1473135024, 1532060464, 1590985824, 1649911272,\n",
93
+ " 1708836624]}}\n",
94
+ "Sample csvps-val/000032\n",
95
+ "{'frames': {'is_annotated': [False, False, False, False, True, False, False,\n",
96
+ " False, False, True, False, False, False, False,\n",
97
+ " True, False, False, False, False, True, False,\n",
98
+ " False, False, False, True, False, False, False,\n",
99
+ " False, True],\n",
100
+ " 'number': ['000000', '000001', '000002', '000003', '000004',\n",
101
+ " '000005', '000006', '000007', '000008', '000009',\n",
102
+ " '000010', '000011', '000012', '000013', '000014',\n",
103
+ " '000015', '000016', '000017', '000018', '000019',\n",
104
+ " '000020', '000021', '000022', '000023', '000024',\n",
105
+ " '000025', '000026', '000027', '000028', '000029'],\n",
106
+ " 'primary_key': ['lindau_000057_000000', 'lindau_000057_000001',\n",
107
+ " 'lindau_000057_000002', 'lindau_000057_000003',\n",
108
+ " 'lindau_000057_000004', 'lindau_000057_000005',\n",
109
+ " 'lindau_000057_000006', 'lindau_000057_000007',\n",
110
+ " 'lindau_000057_000008', 'lindau_000057_000009',\n",
111
+ " 'lindau_000057_000010', 'lindau_000057_000011',\n",
112
+ " 'lindau_000057_000012', 'lindau_000057_000013',\n",
113
+ " 'lindau_000057_000014', 'lindau_000057_000015',\n",
114
+ " 'lindau_000057_000016', 'lindau_000057_000017',\n",
115
+ " 'lindau_000057_000018', 'lindau_000057_000019',\n",
116
+ " 'lindau_000057_000020', 'lindau_000057_000021',\n",
117
+ " 'lindau_000057_000022', 'lindau_000057_000023',\n",
118
+ " 'lindau_000057_000024', 'lindau_000057_000025',\n",
119
+ " 'lindau_000057_000026', 'lindau_000057_000027',\n",
120
+ " 'lindau_000057_000028', 'lindau_000057_000029'],\n",
121
+ " 'timestamps': [0, 58925896, 117851752, 176777576, 235703424,\n",
122
+ " 294629232, 353555104, 412480888, 471406760,\n",
123
+ " 530332656, 589258512, 648184360, 707110176,\n",
124
+ " 766036096, 824961936, 883887864, 942813704,\n",
125
+ " 1001739592, 1060665480, 1119591288, 1178517152,\n",
126
+ " 1237442936, 1296368816, 1355294616, 1414220496,\n",
127
+ " 1473146352, 1532072200, 1590998064, 1649923840,\n",
128
+ " 1708849704]}}\n"
129
  ]
130
  }
131
  ],
132
  "source": [
133
  "import webdataset as wds\n",
134
+ "import json\n",
135
+ "\n",
136
+ "from pprint import pformat\n",
137
+ "import os\n",
138
  "\n",
139
  "# Create iterable dataset\n",
140
+ "shard_dir = \"shards/val\"\n",
141
+ "ds = wds.WebDataset([os.path.join(shard_dir, shard_file) for shard_file in os.listdir(shard_dir)], shardshuffle=False, verbose=True)\n",
142
  "\n",
143
  "# Iterate over the dataset and print the keys and the first few samples\n",
144
  "for i, sample in enumerate(ds):\n",
145
+ " if i > 2: \n",
146
  " break\n",
147
+ " meta_data = json.loads(sample[\"json\"].decode())\n",
148
+ " print(\"Sample \" + sample[\"__key__\"])\n",
149
+ " print(pformat(meta_data, compact=True))\n",
 
 
150
  " "
151
  ]
152
+ },
153
+ {
154
+ "cell_type": "markdown",
155
+ "metadata": {},
156
+ "source": [
157
+ "### Sample frames\n",
158
+ "\n",
159
+ "We can use `webdataset`'s compose helper to split the sequences into individual (pairs of) frames."
160
+ ]
161
+ },
162
+ {
163
+ "cell_type": "code",
164
+ "execution_count": 13,
165
+ "metadata": {},
166
+ "outputs": [
167
+ {
168
+ "name": "stdout",
169
+ "output_type": "stream",
170
+ "text": [
171
+ "__key__ (<class 'str'>) csvps-val/000030/000000:000001\n",
172
+ "camera.json (<class 'bytes'>) ...\n",
173
+ "json (<class 'bytes'>) ...\n",
174
+ "panoptic.png (<class 'list'>) 2\n",
175
+ "depth.tiff (<class 'list'>) 2\n",
176
+ "image.png (<class 'list'>) 2\n",
177
+ "vehicle.json (<class 'list'>) 2\n"
178
+ ]
179
+ }
180
+ ],
181
+ "source": [
182
+ "import collections\n",
183
+ "import itertools\n",
184
+ "\n",
185
+ "def find_frame_keys(keys):\n",
186
+ " r\"\"\"\n",
187
+ " Returns a mapping from frame number to the keys of the sample that correspond to \n",
188
+ " that frame.\n",
189
+ " \"\"\"\n",
190
+ " meta_keys = set()\n",
191
+ " frame_keys = collections.defaultdict(list)\n",
192
+ " for key in keys:\n",
193
+ " if key.startswith(\"__\"):\n",
194
+ " continue\n",
195
+ " if \".\" not in key:\n",
196
+ " meta_keys.add(key)\n",
197
+ " continue \n",
198
+ " stem, other = key.split(\".\", 1)\n",
199
+ " if stem.isdigit():\n",
200
+ " frame_keys[stem].append(other)\n",
201
+ " else:\n",
202
+ " meta_keys.add(key)\n",
203
+ " return dict(frame_keys), meta_keys\n",
204
+ "\n",
205
+ "\n",
206
+ "def generate_range(src, length: int = 2, *, missing_ok: bool =True):\n",
207
+ " for sample in src:\n",
208
+ " key = sample[\"__key__\"]\n",
209
+ " frame_keys, meta_keys = find_frame_keys(sample.keys()) \n",
210
+ " \n",
211
+ " pair_keys = set(itertools.chain.from_iterable(frame_keys.values()))\n",
212
+ " meta_data = {key: sample[key] for key in meta_keys}\n",
213
+ "\n",
214
+ " frame_ids = list(frame_keys.keys())\n",
215
+ "\n",
216
+ " for i in range(0, len(frame_keys) - length):\n",
217
+ " ids = frame_ids[i:i + length]\n",
218
+ "\n",
219
+ " pair_data = {\n",
220
+ " \"__key__\": f\"{key}/{ids[0]}:{ids[-1]}\" if len(ids) > 1 else f\"{key}/{ids[0]}\",\n",
221
+ " **meta_data,\n",
222
+ " **{\n",
223
+ " source_key: [sample.get(f\"{frame}.{source_key}\", None) for frame in ids]\n",
224
+ " for source_key in pair_keys\n",
225
+ " }\n",
226
+ " }\n",
227
+ "\n",
228
+ " yield pair_data\n",
229
+ "\n",
230
+ "ds_per_frame = ds.compose(generate_range)\n",
231
+ "\n",
232
+ "sample = next(iter(ds_per_frame))\n",
233
+ "\n",
234
+ "for key, value in sample.items():\n",
235
+ " print(f\"{key} ({type(value)})\", end=\" \")\n",
236
+ " if isinstance(value, list):\n",
237
+ " print(len(value))\n",
238
+ " elif isinstance(value, bytes):\n",
239
+ " print(\"...\")\n",
240
+ " else:\n",
241
+ " print(value)\n"
242
+ ]
243
+ },
244
+ {
245
+ "cell_type": "markdown",
246
+ "metadata": {},
247
+ "source": [
248
+ "## Hugging Face Datasets\n",
249
+ "\n",
250
+ "The WebDataset can be used directly in Hugging Face Datasets."
251
+ ]
252
+ },
253
+ {
254
+ "cell_type": "code",
255
+ "execution_count": 14,
256
+ "metadata": {},
257
+ "outputs": [
258
+ {
259
+ "data": {
260
+ "application/vnd.jupyter.widget-view+json": {
261
+ "model_id": "86f83a11f89e4f37b4acc752f5316585",
262
+ "version_major": 2,
263
+ "version_minor": 0
264
+ },
265
+ "text/plain": [
266
+ "Resolving data files: 0%| | 0/40 [00:00<?, ?it/s]"
267
+ ]
268
+ },
269
+ "metadata": {},
270
+ "output_type": "display_data"
271
+ },
272
+ {
273
+ "data": {
274
+ "application/vnd.jupyter.widget-view+json": {
275
+ "model_id": "c2914d517b4e436388dbf345bbb856c5",
276
+ "version_major": 2,
277
+ "version_minor": 0
278
+ },
279
+ "text/plain": [
280
+ "Downloading data: 0%| | 0/40 [00:00<?, ?files/s]"
281
+ ]
282
+ },
283
+ "metadata": {},
284
+ "output_type": "display_data"
285
+ },
286
+ {
287
+ "data": {
288
+ "application/vnd.jupyter.widget-view+json": {
289
+ "model_id": "0d2ca2ef894e49beae715e904168e870",
290
+ "version_major": 2,
291
+ "version_minor": 0
292
+ },
293
+ "text/plain": [
294
+ "Generating train split: 0 examples [00:00, ? examples/s]"
295
+ ]
296
+ },
297
+ "metadata": {},
298
+ "output_type": "display_data"
299
+ },
300
+ {
301
+ "ename": "DatasetGenerationError",
302
+ "evalue": "An error occurred while generating the dataset",
303
+ "output_type": "error",
304
+ "traceback": [
305
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
306
+ "\u001b[0;31mOSError\u001b[0m Traceback (most recent call last)",
307
+ "File \u001b[0;32m/gpfs/home3/kstolle/.local/opt/miniconda3/envs/multidvps-py312/lib/python3.12/site-packages/datasets/builder.py:1625\u001b[0m, in \u001b[0;36mGeneratorBasedBuilder._prepare_split_single\u001b[0;34m(self, gen_kwargs, fpath, file_format, max_shard_size, split_info, check_duplicate_keys, job_id)\u001b[0m\n\u001b[1;32m 1624\u001b[0m example \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39minfo\u001b[38;5;241m.\u001b[39mfeatures\u001b[38;5;241m.\u001b[39mencode_example(record) \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39minfo\u001b[38;5;241m.\u001b[39mfeatures \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m record\n\u001b[0;32m-> 1625\u001b[0m \u001b[43mwriter\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mwrite\u001b[49m\u001b[43m(\u001b[49m\u001b[43mexample\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mkey\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1626\u001b[0m num_examples_progress_update \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;241m1\u001b[39m\n",
308
+ "File \u001b[0;32m/gpfs/home3/kstolle/.local/opt/miniconda3/envs/multidvps-py312/lib/python3.12/site-packages/datasets/arrow_writer.py:537\u001b[0m, in \u001b[0;36mArrowWriter.write\u001b[0;34m(self, example, key, writer_batch_size)\u001b[0m\n\u001b[1;32m 535\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhkey_record \u001b[38;5;241m=\u001b[39m []\n\u001b[0;32m--> 537\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mwrite_examples_on_file\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
309
+ "File \u001b[0;32m/gpfs/home3/kstolle/.local/opt/miniconda3/envs/multidvps-py312/lib/python3.12/site-packages/datasets/arrow_writer.py:495\u001b[0m, in \u001b[0;36mArrowWriter.write_examples_on_file\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 491\u001b[0m batch_examples[col] \u001b[38;5;241m=\u001b[39m [\n\u001b[1;32m 492\u001b[0m row[\u001b[38;5;241m0\u001b[39m][col]\u001b[38;5;241m.\u001b[39mto_pylist()[\u001b[38;5;241m0\u001b[39m] \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(row[\u001b[38;5;241m0\u001b[39m][col], (pa\u001b[38;5;241m.\u001b[39mArray, pa\u001b[38;5;241m.\u001b[39mChunkedArray)) \u001b[38;5;28;01melse\u001b[39;00m row[\u001b[38;5;241m0\u001b[39m][col]\n\u001b[1;32m 493\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m row \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcurrent_examples\n\u001b[1;32m 494\u001b[0m ]\n\u001b[0;32m--> 495\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mwrite_batch\u001b[49m\u001b[43m(\u001b[49m\u001b[43mbatch_examples\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mbatch_examples\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 496\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcurrent_examples \u001b[38;5;241m=\u001b[39m []\n",
310
+ "File \u001b[0;32m/gpfs/home3/kstolle/.local/opt/miniconda3/envs/multidvps-py312/lib/python3.12/site-packages/datasets/arrow_writer.py:609\u001b[0m, in \u001b[0;36mArrowWriter.write_batch\u001b[0;34m(self, batch_examples, writer_batch_size)\u001b[0m\n\u001b[1;32m 608\u001b[0m pa_table \u001b[38;5;241m=\u001b[39m pa\u001b[38;5;241m.\u001b[39mTable\u001b[38;5;241m.\u001b[39mfrom_arrays(arrays, schema\u001b[38;5;241m=\u001b[39mschema)\n\u001b[0;32m--> 609\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mwrite_table\u001b[49m\u001b[43m(\u001b[49m\u001b[43mpa_table\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mwriter_batch_size\u001b[49m\u001b[43m)\u001b[49m\n",
311
+ "File \u001b[0;32m/gpfs/home3/kstolle/.local/opt/miniconda3/envs/multidvps-py312/lib/python3.12/site-packages/datasets/arrow_writer.py:627\u001b[0m, in \u001b[0;36mArrowWriter.write_table\u001b[0;34m(self, pa_table, writer_batch_size)\u001b[0m\n\u001b[1;32m 626\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_num_examples \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m pa_table\u001b[38;5;241m.\u001b[39mnum_rows\n\u001b[0;32m--> 627\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpa_writer\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mwrite_table\u001b[49m\u001b[43m(\u001b[49m\u001b[43mpa_table\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mwriter_batch_size\u001b[49m\u001b[43m)\u001b[49m\n",
312
+ "File \u001b[0;32m/gpfs/home3/kstolle/.local/opt/miniconda3/envs/multidvps-py312/lib/python3.12/site-packages/pyarrow/ipc.pxi:529\u001b[0m, in \u001b[0;36mpyarrow.lib._CRecordBatchWriter.write_table\u001b[0;34m()\u001b[0m\n",
313
+ "File \u001b[0;32m/gpfs/home3/kstolle/.local/opt/miniconda3/envs/multidvps-py312/lib/python3.12/site-packages/pyarrow/error.pxi:89\u001b[0m, in \u001b[0;36mpyarrow.lib.check_status\u001b[0;34m()\u001b[0m\n",
314
+ "File \u001b[0;32m/gpfs/home3/kstolle/.local/opt/miniconda3/envs/multidvps-py312/lib/python3.12/site-packages/fsspec/implementations/local.py:422\u001b[0m, in \u001b[0;36mLocalFileOpener.write\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 421\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mwrite\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[0;32m--> 422\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mf\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mwrite\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
315
+ "\u001b[0;31mOSError\u001b[0m: [Errno 122] Disk quota exceeded",
316
+ "\nDuring handling of the above exception, another exception occurred:\n",
317
+ "\u001b[0;31mOSError\u001b[0m Traceback (most recent call last)",
318
+ "File \u001b[0;32m/gpfs/home3/kstolle/.local/opt/miniconda3/envs/multidvps-py312/lib/python3.12/site-packages/datasets/builder.py:1634\u001b[0m, in \u001b[0;36mGeneratorBasedBuilder._prepare_split_single\u001b[0;34m(self, gen_kwargs, fpath, file_format, max_shard_size, split_info, check_duplicate_keys, job_id)\u001b[0m\n\u001b[1;32m 1633\u001b[0m num_shards \u001b[38;5;241m=\u001b[39m shard_id \u001b[38;5;241m+\u001b[39m \u001b[38;5;241m1\u001b[39m\n\u001b[0;32m-> 1634\u001b[0m num_examples, num_bytes \u001b[38;5;241m=\u001b[39m \u001b[43mwriter\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfinalize\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1635\u001b[0m writer\u001b[38;5;241m.\u001b[39mclose()\n",
319
+ "File \u001b[0;32m/gpfs/home3/kstolle/.local/opt/miniconda3/envs/multidvps-py312/lib/python3.12/site-packages/datasets/arrow_writer.py:636\u001b[0m, in \u001b[0;36mArrowWriter.finalize\u001b[0;34m(self, close_stream)\u001b[0m\n\u001b[1;32m 635\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhkey_record \u001b[38;5;241m=\u001b[39m []\n\u001b[0;32m--> 636\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mwrite_examples_on_file\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 637\u001b[0m \u001b[38;5;66;03m# If schema is known, infer features even if no examples were written\u001b[39;00m\n",
320
+ "File \u001b[0;32m/gpfs/home3/kstolle/.local/opt/miniconda3/envs/multidvps-py312/lib/python3.12/site-packages/datasets/arrow_writer.py:495\u001b[0m, in \u001b[0;36mArrowWriter.write_examples_on_file\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 491\u001b[0m batch_examples[col] \u001b[38;5;241m=\u001b[39m [\n\u001b[1;32m 492\u001b[0m row[\u001b[38;5;241m0\u001b[39m][col]\u001b[38;5;241m.\u001b[39mto_pylist()[\u001b[38;5;241m0\u001b[39m] \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(row[\u001b[38;5;241m0\u001b[39m][col], (pa\u001b[38;5;241m.\u001b[39mArray, pa\u001b[38;5;241m.\u001b[39mChunkedArray)) \u001b[38;5;28;01melse\u001b[39;00m row[\u001b[38;5;241m0\u001b[39m][col]\n\u001b[1;32m 493\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m row \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcurrent_examples\n\u001b[1;32m 494\u001b[0m ]\n\u001b[0;32m--> 495\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mwrite_batch\u001b[49m\u001b[43m(\u001b[49m\u001b[43mbatch_examples\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mbatch_examples\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 496\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcurrent_examples \u001b[38;5;241m=\u001b[39m []\n",
321
+ "File \u001b[0;32m/gpfs/home3/kstolle/.local/opt/miniconda3/envs/multidvps-py312/lib/python3.12/site-packages/datasets/arrow_writer.py:609\u001b[0m, in \u001b[0;36mArrowWriter.write_batch\u001b[0;34m(self, batch_examples, writer_batch_size)\u001b[0m\n\u001b[1;32m 608\u001b[0m pa_table \u001b[38;5;241m=\u001b[39m pa\u001b[38;5;241m.\u001b[39mTable\u001b[38;5;241m.\u001b[39mfrom_arrays(arrays, schema\u001b[38;5;241m=\u001b[39mschema)\n\u001b[0;32m--> 609\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mwrite_table\u001b[49m\u001b[43m(\u001b[49m\u001b[43mpa_table\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mwriter_batch_size\u001b[49m\u001b[43m)\u001b[49m\n",
322
+ "File \u001b[0;32m/gpfs/home3/kstolle/.local/opt/miniconda3/envs/multidvps-py312/lib/python3.12/site-packages/datasets/arrow_writer.py:627\u001b[0m, in \u001b[0;36mArrowWriter.write_table\u001b[0;34m(self, pa_table, writer_batch_size)\u001b[0m\n\u001b[1;32m 626\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_num_examples \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m pa_table\u001b[38;5;241m.\u001b[39mnum_rows\n\u001b[0;32m--> 627\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpa_writer\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mwrite_table\u001b[49m\u001b[43m(\u001b[49m\u001b[43mpa_table\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mwriter_batch_size\u001b[49m\u001b[43m)\u001b[49m\n",
323
+ "File \u001b[0;32m/gpfs/home3/kstolle/.local/opt/miniconda3/envs/multidvps-py312/lib/python3.12/site-packages/pyarrow/ipc.pxi:529\u001b[0m, in \u001b[0;36mpyarrow.lib._CRecordBatchWriter.write_table\u001b[0;34m()\u001b[0m\n",
324
+ "File \u001b[0;32m/gpfs/home3/kstolle/.local/opt/miniconda3/envs/multidvps-py312/lib/python3.12/site-packages/pyarrow/error.pxi:89\u001b[0m, in \u001b[0;36mpyarrow.lib.check_status\u001b[0;34m()\u001b[0m\n",
325
+ "File \u001b[0;32m/gpfs/home3/kstolle/.local/opt/miniconda3/envs/multidvps-py312/lib/python3.12/site-packages/fsspec/implementations/local.py:422\u001b[0m, in \u001b[0;36mLocalFileOpener.write\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 421\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mwrite\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[0;32m--> 422\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mf\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mwrite\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
326
+ "\u001b[0;31mOSError\u001b[0m: [Errno 122] Disk quota exceeded",
327
+ "\nThe above exception was the direct cause of the following exception:\n",
328
+ "\u001b[0;31mDatasetGenerationError\u001b[0m Traceback (most recent call last)",
329
+ "Cell \u001b[0;32mIn[14], line 3\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mdatasets\u001b[39;00m\n\u001b[0;32m----> 3\u001b[0m dataset \u001b[38;5;241m=\u001b[39m \u001b[43mdatasets\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mload_dataset\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mwebdataset\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdata_dir\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mshards\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43msplit\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtrain\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 4\u001b[0m \u001b[38;5;28mprint\u001b[39m(dataset)\n",
330
+ "File \u001b[0;32m/gpfs/home3/kstolle/.local/opt/miniconda3/envs/multidvps-py312/lib/python3.12/site-packages/datasets/load.py:2151\u001b[0m, in \u001b[0;36mload_dataset\u001b[0;34m(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, verification_mode, keep_in_memory, save_infos, revision, token, streaming, num_proc, storage_options, trust_remote_code, **config_kwargs)\u001b[0m\n\u001b[1;32m 2148\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m builder_instance\u001b[38;5;241m.\u001b[39mas_streaming_dataset(split\u001b[38;5;241m=\u001b[39msplit)\n\u001b[1;32m 2150\u001b[0m \u001b[38;5;66;03m# Download and prepare data\u001b[39;00m\n\u001b[0;32m-> 2151\u001b[0m \u001b[43mbuilder_instance\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdownload_and_prepare\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2152\u001b[0m \u001b[43m \u001b[49m\u001b[43mdownload_config\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdownload_config\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2153\u001b[0m \u001b[43m \u001b[49m\u001b[43mdownload_mode\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdownload_mode\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2154\u001b[0m \u001b[43m \u001b[49m\u001b[43mverification_mode\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mverification_mode\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2155\u001b[0m \u001b[43m \u001b[49m\u001b[43mnum_proc\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mnum_proc\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2156\u001b[0m \u001b[43m \u001b[49m\u001b[43mstorage_options\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstorage_options\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2157\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2159\u001b[0m \u001b[38;5;66;03m# Build dataset for splits\u001b[39;00m\n\u001b[1;32m 2160\u001b[0m keep_in_memory \u001b[38;5;241m=\u001b[39m (\n\u001b[1;32m 2161\u001b[0m keep_in_memory \u001b[38;5;28;01mif\u001b[39;00m keep_in_memory \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m is_small_dataset(builder_instance\u001b[38;5;241m.\u001b[39minfo\u001b[38;5;241m.\u001b[39mdataset_size)\n\u001b[1;32m 2162\u001b[0m )\n",
331
+ "File \u001b[0;32m/gpfs/home3/kstolle/.local/opt/miniconda3/envs/multidvps-py312/lib/python3.12/site-packages/datasets/builder.py:924\u001b[0m, in \u001b[0;36mDatasetBuilder.download_and_prepare\u001b[0;34m(self, output_dir, download_config, download_mode, verification_mode, dl_manager, base_path, file_format, max_shard_size, num_proc, storage_options, **download_and_prepare_kwargs)\u001b[0m\n\u001b[1;32m 922\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m num_proc \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 923\u001b[0m prepare_split_kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mnum_proc\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m num_proc\n\u001b[0;32m--> 924\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_download_and_prepare\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 925\u001b[0m \u001b[43m \u001b[49m\u001b[43mdl_manager\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdl_manager\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 926\u001b[0m \u001b[43m \u001b[49m\u001b[43mverification_mode\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mverification_mode\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 927\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mprepare_split_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 928\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mdownload_and_prepare_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 929\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 930\u001b[0m \u001b[38;5;66;03m# Sync info\u001b[39;00m\n\u001b[1;32m 931\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39minfo\u001b[38;5;241m.\u001b[39mdataset_size \u001b[38;5;241m=\u001b[39m \u001b[38;5;28msum\u001b[39m(split\u001b[38;5;241m.\u001b[39mnum_bytes \u001b[38;5;28;01mfor\u001b[39;00m split \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39minfo\u001b[38;5;241m.\u001b[39msplits\u001b[38;5;241m.\u001b[39mvalues())\n",
332
+ "File \u001b[0;32m/gpfs/home3/kstolle/.local/opt/miniconda3/envs/multidvps-py312/lib/python3.12/site-packages/datasets/builder.py:1648\u001b[0m, in \u001b[0;36mGeneratorBasedBuilder._download_and_prepare\u001b[0;34m(self, dl_manager, verification_mode, **prepare_splits_kwargs)\u001b[0m\n\u001b[1;32m 1647\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_download_and_prepare\u001b[39m(\u001b[38;5;28mself\u001b[39m, dl_manager, verification_mode, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mprepare_splits_kwargs):\n\u001b[0;32m-> 1648\u001b[0m \u001b[38;5;28;43msuper\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_download_and_prepare\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1649\u001b[0m \u001b[43m \u001b[49m\u001b[43mdl_manager\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1650\u001b[0m \u001b[43m \u001b[49m\u001b[43mverification_mode\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1651\u001b[0m \u001b[43m \u001b[49m\u001b[43mcheck_duplicate_keys\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mverification_mode\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m==\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mVerificationMode\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mBASIC_CHECKS\u001b[49m\n\u001b[1;32m 1652\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;129;43;01mor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mverification_mode\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m==\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mVerificationMode\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mALL_CHECKS\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1653\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mprepare_splits_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1654\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n",
333
+ "File \u001b[0;32m/gpfs/home3/kstolle/.local/opt/miniconda3/envs/multidvps-py312/lib/python3.12/site-packages/datasets/builder.py:1000\u001b[0m, in \u001b[0;36mDatasetBuilder._download_and_prepare\u001b[0;34m(self, dl_manager, verification_mode, **prepare_split_kwargs)\u001b[0m\n\u001b[1;32m 996\u001b[0m split_dict\u001b[38;5;241m.\u001b[39madd(split_generator\u001b[38;5;241m.\u001b[39msplit_info)\n\u001b[1;32m 998\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 999\u001b[0m \u001b[38;5;66;03m# Prepare split will record examples associated to the split\u001b[39;00m\n\u001b[0;32m-> 1000\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_prepare_split\u001b[49m\u001b[43m(\u001b[49m\u001b[43msplit_generator\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mprepare_split_kwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1001\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mOSError\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 1002\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mOSError\u001b[39;00m(\n\u001b[1;32m 1003\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mCannot find data file. \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 1004\u001b[0m \u001b[38;5;241m+\u001b[39m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmanual_download_instructions \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 1005\u001b[0m \u001b[38;5;241m+\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124mOriginal error:\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 1006\u001b[0m \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mstr\u001b[39m(e)\n\u001b[1;32m 1007\u001b[0m ) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n",
334
+ "File \u001b[0;32m/gpfs/home3/kstolle/.local/opt/miniconda3/envs/multidvps-py312/lib/python3.12/site-packages/datasets/builder.py:1486\u001b[0m, in \u001b[0;36mGeneratorBasedBuilder._prepare_split\u001b[0;34m(self, split_generator, check_duplicate_keys, file_format, num_proc, max_shard_size)\u001b[0m\n\u001b[1;32m 1484\u001b[0m job_id \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m0\u001b[39m\n\u001b[1;32m 1485\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m pbar:\n\u001b[0;32m-> 1486\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mjob_id\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdone\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcontent\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_prepare_split_single\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1487\u001b[0m \u001b[43m \u001b[49m\u001b[43mgen_kwargs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mgen_kwargs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mjob_id\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mjob_id\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43m_prepare_split_args\u001b[49m\n\u001b[1;32m 1488\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\u001b[43m:\u001b[49m\n\u001b[1;32m 1489\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43;01mif\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mdone\u001b[49m\u001b[43m:\u001b[49m\n\u001b[1;32m 1490\u001b[0m \u001b[43m \u001b[49m\u001b[43mresult\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mcontent\u001b[49m\n",
335
+ "File \u001b[0;32m/gpfs/home3/kstolle/.local/opt/miniconda3/envs/multidvps-py312/lib/python3.12/site-packages/datasets/builder.py:1643\u001b[0m, in \u001b[0;36mGeneratorBasedBuilder._prepare_split_single\u001b[0;34m(self, gen_kwargs, fpath, file_format, max_shard_size, split_info, check_duplicate_keys, job_id)\u001b[0m\n\u001b[1;32m 1641\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(e, SchemaInferenceError) \u001b[38;5;129;01mand\u001b[39;00m e\u001b[38;5;241m.\u001b[39m__context__ \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 1642\u001b[0m e \u001b[38;5;241m=\u001b[39m e\u001b[38;5;241m.\u001b[39m__context__\n\u001b[0;32m-> 1643\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m DatasetGenerationError(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mAn error occurred while generating the dataset\u001b[39m\u001b[38;5;124m\"\u001b[39m) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01me\u001b[39;00m\n\u001b[1;32m 1645\u001b[0m \u001b[38;5;28;01myield\u001b[39;00m job_id, \u001b[38;5;28;01mTrue\u001b[39;00m, (total_num_examples, total_num_bytes, writer\u001b[38;5;241m.\u001b[39m_features, num_shards, shard_lengths)\n",
336
+ "\u001b[0;31mDatasetGenerationError\u001b[0m: An error occurred while generating the dataset"
337
+ ]
338
+ }
339
+ ],
340
+ "source": [
341
+ "import datasets\n",
342
+ "\n",
343
+ "dataset = datasets.load_dataset(\"webdataset\", data_dir=\"shards\", split=\"train\")\n",
344
+ "print(dataset)\n"
345
+ ]
346
+ },
347
+ {
348
+ "cell_type": "markdown",
349
+ "metadata": {},
350
+ "source": []
351
  }
352
  ],
353
  "metadata": {
scripts/build.py ADDED
@@ -0,0 +1,363 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ r"""
3
+ Builds a WebDataset from the Cityscapes Video dataset.
4
+
5
+ Adapted from the `WebDataset documentation<https://github.com/webdataset/webdataset/>`_.
6
+ """
7
+
8
+ import itertools
9
+ import collections
10
+ import typing as T
11
+ from pprint import pformat
12
+ import argparse
13
+ import multiprocessing as mp
14
+ import tarfile
15
+ import pandas as pd
16
+ from io import BytesIO
17
+ import json
18
+
19
+ from pathlib import Path
20
+ from tqdm import tqdm
21
+
22
+
23
+ def parse_args():
24
+ ap = argparse.ArgumentParser(
25
+ description="Build a WebDataset from the Cityscapes Video dataset."
26
+ )
27
+
28
+ # Flags and optional
29
+ ap.add_argument(
30
+ "--shard-size",
31
+ "-s",
32
+ type=int,
33
+ default=10,
34
+ help=("Number of sequences per shard."),
35
+ )
36
+ ap.add_argument(
37
+ "--name",
38
+ "-n",
39
+ type=str,
40
+ default="csvps",
41
+ help=(
42
+ "Name of the dataset. This will be used as the prefix for the tar files."
43
+ ),
44
+ )
45
+ ap.add_argument(
46
+ "--variant",
47
+ type=str,
48
+ default="",
49
+ help=(
50
+ "When passing different manifest variants, this will be used to postfix "
51
+ "each split such that the resulting dataset name is unique."
52
+ ),
53
+ )
54
+ ap.add_argument(
55
+ "--force", "-f", action="store_true", help="Overwrite existing data."
56
+ )
57
+ ap.add_argument(
58
+ "--splits", nargs="+", default=["train", "val", "test"], help="Splits to build."
59
+ )
60
+
61
+ # Positional
62
+ ap.add_argument("manifest", type=Path, help="Path to the manifest CSV file.")
63
+ ap.add_argument("data", type=Path, help="Path to the Cityscapes Video dataset.")
64
+ ap.add_argument("output", type=Path, help="Path to the output directory.")
65
+
66
+ rt = ap.parse_args()
67
+
68
+ # Validation
69
+ if rt.shard_size < 1:
70
+ ap.error("Shard size must be a positive integer.")
71
+ if rt.name == "":
72
+ ap.error("Name must be a non-empty string.")
73
+ if not rt.name.isalnum() and not rt.name.islower():
74
+ ap.error("Name must be a lowercase alpha-numeric string.")
75
+ if rt.variant != "" and not rt.variant.isalnum() and not rt.variant.islower():
76
+ ap.error("Variant must be a lowercase alpha-numeric string.")
77
+ if not rt.manifest.exists():
78
+ ap.error(f"Manifest file not found: {rt.manifest}")
79
+ if not rt.data.exists():
80
+ ap.error(f"Data directory not found: {rt.data}")
81
+ if not rt.output.exists():
82
+ rt.output.mkdir(parents=True)
83
+ print(f"Created output directory: {rt.output}")
84
+
85
+ return rt
86
+
87
+
88
+ PAD_TO: T.Final[int] = 6 # 06-padding is given by the dataset and should not be changed
89
+
90
+
91
+ def pad_number(n: int) -> str:
92
+ r"""
93
+ For sorting, numbers are padded with zeros to a fixed width.
94
+ """
95
+ if not isinstance(n, int):
96
+ msg = f"Expected an integer, got {n} of type {type(n)}"
97
+ raise TypeError(msg)
98
+ return f"{n:0{PAD_TO}d}"
99
+
100
+
101
+ def read_timestamp(path: Path) -> int:
102
+ with path.open("r") as f:
103
+ ts = f.read().strip()
104
+ if not ts.isdigit():
105
+ msg = f"Expected a timestamp, got {ts} from {path}"
106
+ raise ValueError(msg)
107
+ return int(ts)
108
+
109
+
110
+ def write_bytes(tar: tarfile.TarFile, bt: bytes, arc: str):
111
+ r""" "
112
+ Simple utility to write the bytes (e.g. metadata json) directly from memory to
113
+ the tarfile, since these do not exist as a file.
114
+ """
115
+ with BytesIO() as buf:
116
+ buf.write(bt)
117
+
118
+ # The TarInfo object must be created manually since the meta-data
119
+ # JSON is written to a buffer (BytesIO) and not a file.
120
+ tar_info = tarfile.TarInfo(arc)
121
+ tar_info.size = buf.tell() # number of bytes written
122
+
123
+ # Reset the buffer to the beginning before adding it to the tarfile
124
+ buf.seek(0)
125
+
126
+ tar.addfile(tar_info, buf)
127
+
128
+
129
+ def find_sequence_files(
130
+ seq: int,
131
+ group: pd.DataFrame,
132
+ *,
133
+ data_dir: Path,
134
+ dataset_name: str,
135
+ missing_ok: bool = False,
136
+ frame_inputs: T.Sequence[str] = ("image.png", "vehicle.json"),
137
+ frame_annotations: T.Sequence[str] = ("panoptic.png", "depth.tiff"),
138
+ sequence_data: T.Sequence[str] = ("camera.json",),
139
+ separator: str = "/",
140
+ ) -> T.Iterator[tuple[Path | bytes, str]]:
141
+ seq_pad = pad_number(seq)
142
+ seq_dir = data_dir / seq_pad
143
+
144
+ group = group.sort_values("frame")
145
+
146
+ # Add frame-wise data
147
+ primary_keys = group.index.tolist()
148
+ frame_numbers = list(map(pad_number, group["frame"].tolist()))
149
+
150
+ for i, meta in enumerate(
151
+ group.drop(columns=["sequence", "frame", "split"]).to_dict(
152
+ orient="records", index=True
153
+ )
154
+ ):
155
+ frame_06 = frame_numbers[i]
156
+ is_ann = meta["is_annotated"]
157
+
158
+ # Write primary key
159
+ meta["primary_key"] = primary_keys[i]
160
+
161
+ # Add files to the tarfile
162
+ for var in frame_inputs + frame_annotations:
163
+ path_file = seq_dir / f"{frame_06}.{var}"
164
+ if not path_file.exists():
165
+ if missing_ok or (var in frame_annotations and not is_ann):
166
+ continue # missing annotation OK
167
+ msg = f"File not found: {path_file}"
168
+ raise FileNotFoundError(msg)
169
+
170
+ yield (
171
+ path_file,
172
+ separator.join(
173
+ (
174
+ dataset_name,
175
+ # {seq}.{frame}.{var}.{ext}
176
+ path_file.relative_to(data_dir).as_posix().replace("/", "."),
177
+ )
178
+ ),
179
+ )
180
+
181
+ # Add the timestamp to the meta-data if it exists
182
+ path_ts = seq_dir / f"{frame_06}.timestamp.txt"
183
+ if not path_ts.exists():
184
+ if not missing_ok:
185
+ msg = f"Timestamp file not found: {path_ts}"
186
+ raise FileNotFoundError(msg)
187
+ meta["timestamp"] = None
188
+ else:
189
+ meta["timestamp"] = read_timestamp(path_ts)
190
+
191
+ # Write frame metadata
192
+ yield (
193
+ json.dumps(meta).encode("utf-8"),
194
+ f"{dataset_name}/{seq_pad}.{frame_06}.metadata.json",
195
+ )
196
+
197
+ # Add sequence-wise files {seq}.{var}.{ext}, e.g. 000000.camera.json
198
+ for var in sequence_data:
199
+ path_file = seq_dir.with_suffix("." + var)
200
+ if not path_file.exists():
201
+ if missing_ok:
202
+ continue
203
+ msg = f"File not found: {path_file}"
204
+ raise FileNotFoundError(msg)
205
+
206
+ yield (
207
+ path_file,
208
+ separator.join(
209
+ (
210
+ dataset_name,
211
+ # {seq}.{var}.{ext}
212
+ path_file.relative_to(data_dir).as_posix(),
213
+ )
214
+ ),
215
+ )
216
+
217
+ # Write frames array
218
+ yield (
219
+ json.dumps(frame_numbers).encode("utf-8"),
220
+ f"{dataset_name}/{seq_pad}.frames.json",
221
+ )
222
+
223
+
224
+ def run_collector(
225
+ seq: int, group: pd.DataFrame, kwargs: dict
226
+ ) -> tuple[int, list[tuple[Path | bytes, str]]]:
227
+ r"""
228
+ Worker that collects the files for a single sequence.
229
+ """
230
+ return (seq, list(find_sequence_files(seq, group, **kwargs)))
231
+
232
+
233
+ def run_writer(
234
+ tar_path: Path, items: list[list[tuple[Path | bytes, str]]], compression: str = "gz"
235
+ ) -> None:
236
+ r"""
237
+ Worker that writes the files to a tar archive.
238
+ """
239
+ if compression != "":
240
+ tar_path = tar_path.with_suffix(f".tar.{compression}")
241
+ write_mode = f"w:{compression}"
242
+ else:
243
+ tar_path.with_suffix(".tar")
244
+ write_mode = "w"
245
+
246
+ with tarfile.open(tar_path, write_mode) as tar:
247
+ for item in itertools.chain.from_iterable(items):
248
+ try:
249
+ path, arc = item
250
+ except ValueError:
251
+ msg = f"Expected a tuple of length 2, got {item}"
252
+ raise ValueError(msg)
253
+
254
+ if isinstance(path, Path):
255
+ tar.add(path, arcname=arc)
256
+ else:
257
+ write_bytes(tar, path, arc)
258
+
259
+
260
+ def build_shard(
261
+ mfst: pd.DataFrame,
262
+ *,
263
+ tar_dir: Path,
264
+ shard_size: int,
265
+ **kwargs,
266
+ ):
267
+ # Make dirs
268
+ tar_dir.mkdir(exist_ok=True, parents=True)
269
+
270
+ write_log = collections.defaultdict(list)
271
+
272
+ # Create a list of all sequences
273
+ # groups = [(seq, group) for seq, group in mfst.groupby("sequence")]
274
+ # shards = [groups[i : i + shard_size] for i in range(0, len(groups), shard_size)
275
+ n_groups = len(mfst["sequence"].unique())
276
+ n_shards = n_groups // shard_size
277
+
278
+ targets = [None] * n_groups
279
+
280
+ # Start a multiprocessing pool
281
+ n_proc = min(mp.cpu_count(), 16)
282
+ with mp.Pool(n_proc) as pool:
283
+ write_jobs: list[mp.AsyncResult] = []
284
+
285
+ # Data collection
286
+ with tqdm(total=n_groups, desc="Collecting data") as pbar_group:
287
+ for seq, files in pool.starmap(
288
+ run_collector,
289
+ [(seq, group, kwargs) for seq, group in mfst.groupby("sequence")],
290
+ chunksize=min(8, shard_size),
291
+ ):
292
+ assert targets[seq] is None, f"Duplicate sequence: {seq}"
293
+
294
+ pbar_group.update()
295
+
296
+ # Write to the file specs list
297
+ targets[seq] = files
298
+
299
+ # Get a view of only the current shards's files
300
+ shard_index = seq // shard_size
301
+ shard_offset = shard_index * shard_size
302
+ shard_specs = targets[shard_offset : shard_offset + shard_size]
303
+
304
+ # Pad the shard index
305
+ shard_06 = pad_number(shard_index)
306
+
307
+ write_log[shard_06].append(pad_number(seq))
308
+
309
+ # If the shard is fully populated, write it to a tar file in another process
310
+ if all(s is not None for s in shard_specs):
311
+ tar_path = tar_dir / shard_06
312
+
313
+ write_jobs.append(
314
+ pool.apply_async(
315
+ run_writer,
316
+ (tar_path, shard_specs, ""),
317
+ )
318
+ )
319
+
320
+ # Wait for write-workers to finish generating the TAR files
321
+ with tqdm(total=n_shards, desc="Writing shards") as pbar_shard:
322
+ for j in write_jobs:
323
+ j.get()
324
+ pbar_shard.update()
325
+
326
+ pool.close()
327
+ pool.join()
328
+
329
+ print("Created shard files:\n" + pformat(dict(write_log)))
330
+
331
+
332
+ def main():
333
+ args = parse_args()
334
+ manifest = pd.read_csv(args.manifest, index_col="primary_key")
335
+
336
+ # For each split, build a tar archive containing the sorted files
337
+ for split in args.splits:
338
+ split_out = "-".join([s for s in (split, args.variant) if len(s) > 0])
339
+ tar_dir = args.output / split_out
340
+
341
+ if tar_dir.exists():
342
+ if args.force:
343
+ print(f"Removing existing dataset: {tar_dir}")
344
+ for f in tar_dir.glob("*.tar"):
345
+ f.unlink()
346
+ else:
347
+ msg = f"Dataset already exists: {tar_dir}"
348
+ raise FileExistsError(msg)
349
+
350
+ print(f"Generating {split_out} split...")
351
+
352
+ build_shard(
353
+ manifest[manifest["split"] == split],
354
+ tar_dir=tar_dir,
355
+ data_dir=args.data / split,
356
+ shard_size=args.shard_size,
357
+ dataset_name=f"{args.name}-{split_out}",
358
+ missing_ok=True,
359
+ )
360
+
361
+
362
+ if __name__ == "__main__":
363
+ main()
scripts/build_parquet.py DELETED
@@ -1,2 +0,0 @@
1
- print("Building a Parquet dataset is not (yet) implemented. Use WebDataset instead.")
2
- exit(1)
 
 
 
scripts/build_webdataset.py DELETED
@@ -1,49 +0,0 @@
1
- #!/usr/bin/env python
2
- r"""
3
- Builds a WebDataset from the Cityscapes Video dataset.
4
- """
5
-
6
- import argparse
7
- import tarfile
8
-
9
- from pathlib import Path
10
- from tqdm import tqdm
11
-
12
-
13
- def parse_args():
14
- p = argparse.ArgumentParser(
15
- description="Build a WebDataset from the Cityscapes Video dataset."
16
- )
17
- p.add_argument("--prefix", default="csvps", help="Prefix for the tar files.")
18
- p.add_argument("data", type=Path, help="Path to the Cityscapes Video dataset.")
19
- p.add_argument("output", type=Path, help="Path to the output directory.")
20
-
21
- return p.parse_args()
22
-
23
-
24
- def build_dataset(split: str, data_dir: Path, out_dir: Path, *, prefix: str = ""):
25
- data_dir = data_dir / split
26
- name = f"{split}.tar"
27
- if prefix and prefix != "":
28
- name = f"{prefix}-{name}"
29
- tar_path = out_dir / name
30
-
31
- if tar_path.exists():
32
- print(f"Error: Tar archive already exists: {tar_path}")
33
-
34
- with tarfile.open(tar_path, "w") as tar:
35
- # Add the files to the tar archive
36
- for file in tqdm(sorted(data_dir.glob("**/*")), desc=f"Building {tar_path}"):
37
- tar.add(file, arcname=file.relative_to(data_dir))
38
-
39
-
40
- def main():
41
- args = parse_args()
42
-
43
- # For each split, build a tar archive containing the sorted files
44
- for split in ("train", "val", "test"):
45
- build_dataset(split, args.data, args.output, prefix=args.prefix)
46
-
47
-
48
- if __name__ == "__main__":
49
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
scripts/prepare.py CHANGED
@@ -28,7 +28,11 @@ def parse_args() -> argparse.Namespace:
28
  parser = argparse.ArgumentParser(
29
  description="Download and extract the Cityscapes dataset."
30
  )
31
-
 
 
 
 
32
  parser.add_argument(
33
  "downloads_dir",
34
  type=Path,
@@ -39,11 +43,6 @@ def parse_args() -> argparse.Namespace:
39
  type=Path,
40
  help="Path to the directory where extracted files should be moved (e.g., 'data').",
41
  )
42
- parser.add_argument(
43
- "manifest_file",
44
- type=Path,
45
- help="Path to the manifest file (e.g., 'manifest.csv').",
46
- )
47
  return parser.parse_args()
48
 
49
 
@@ -110,13 +109,22 @@ def unzip_and_move(
110
  primary_key = re_name.sub(r"\1", res_path.stem)
111
  sample = manifest.loc[primary_key]
112
 
113
- # New name is: split/<sequence>/<frame>.<type>.<ext>
114
- new_path = (
115
- data_dir
116
- / sample["split"]
117
- / "{:06d}".format(sample["sequence"])
118
- / "{:06d}.{:s}{:s}".format(sample["frame"], pkg_type, res_path.suffix)
119
- )
 
 
 
 
 
 
 
 
 
120
  new_path.parent.mkdir(parents=True, exist_ok=True)
121
 
122
  shutil.move(res_path, new_path)
 
28
  parser = argparse.ArgumentParser(
29
  description="Download and extract the Cityscapes dataset."
30
  )
31
+ parser.add_argument(
32
+ "manifest_file",
33
+ type=Path,
34
+ help="Path to the manifest file (e.g., 'manifest.csv').",
35
+ )
36
  parser.add_argument(
37
  "downloads_dir",
38
  type=Path,
 
43
  type=Path,
44
  help="Path to the directory where extracted files should be moved (e.g., 'data').",
45
  )
 
 
 
 
 
46
  return parser.parse_args()
47
 
48
 
 
109
  primary_key = re_name.sub(r"\1", res_path.stem)
110
  sample = manifest.loc[primary_key]
111
 
112
+ # Build the new path
113
+ new_path = data_dir / sample["split"]
114
+
115
+ if pkg_type in {"camera"}:
116
+ # New name is: split/<sequence>.<type>.<ext>
117
+ new_path /= "{:06d}.{:s}{:s}".format(
118
+ sample["sequence"], pkg_type, res_path.suffix
119
+ )
120
+ else:
121
+ # New name is: split/<sequence>/<frame>.<type>.<ext>
122
+ new_path /= "{:06d}".format(sample["sequence"])
123
+ new_path /= "{:06d}.{:s}{:s}".format(
124
+ sample["frame"], pkg_type, res_path.suffix
125
+ )
126
+ if new_path.is_file():
127
+ continue
128
  new_path.parent.mkdir(parents=True, exist_ok=True)
129
 
130
  shutil.move(res_path, new_path)