Datasets:
Kurt Stolle
commited on
Commit
·
aad0211
1
Parent(s):
a8517b9
Updated examples and script organization
Browse files- Makefile +15 -0
- README.md +4 -4
- examples.ipynb +78 -0
- requirements.txt +2 -0
- scripts/build_parquet.py +2 -0
- scripts/build_webdataset.py +49 -0
- csvps.py → scripts/prepare.py +0 -3
Makefile
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.PHONY: prepare webdataset parquet clean
|
2 |
+
|
3 |
+
prepare:
|
4 |
+
mkdir -p downloads
|
5 |
+
python scripts/prepare.py downloads data manifest.csv
|
6 |
+
|
7 |
+
webdataset:
|
8 |
+
mkdir -p shards
|
9 |
+
python scripts/build_webdataset.py data shards
|
10 |
+
|
11 |
+
parquet:
|
12 |
+
python scripts/build_parquet.py
|
13 |
+
|
14 |
+
clean:
|
15 |
+
rm -vr downloads
|
README.md
CHANGED
@@ -89,13 +89,13 @@ python -m pip install cityscapesscripts
|
|
89 |
Note that this may prompt your [Cityscapes account](https://cityscapes-dataset.com/login/) login credentials.
|
90 |
|
91 |
```bash
|
92 |
-
|
93 |
```
|
94 |
|
95 |
3. Remove the downloaded Cityscapes archive files to save disk space (optional).
|
96 |
|
97 |
```bash
|
98 |
-
|
99 |
```
|
100 |
|
101 |
## Usage
|
@@ -104,7 +104,7 @@ Due to the structure, the dataset can be easily loaded using the `webdataset` li
|
|
104 |
To convert the `train`, `val` and `test` directories into a `tar` archive, run the following command:
|
105 |
|
106 |
```bash
|
107 |
-
|
108 |
```
|
109 |
|
110 |
Subsequently, the dataset can be loaded using the `webdataset` library:
|
@@ -112,7 +112,7 @@ Subsequently, the dataset can be loaded using the `webdataset` library:
|
|
112 |
```python
|
113 |
import webdataset as wds
|
114 |
|
115 |
-
dataset = wds.WebDataset("shards/
|
116 |
```
|
117 |
|
118 |
|
|
|
89 |
Note that this may prompt your [Cityscapes account](https://cityscapes-dataset.com/login/) login credentials.
|
90 |
|
91 |
```bash
|
92 |
+
make prepare
|
93 |
```
|
94 |
|
95 |
3. Remove the downloaded Cityscapes archive files to save disk space (optional).
|
96 |
|
97 |
```bash
|
98 |
+
make clean
|
99 |
```
|
100 |
|
101 |
## Usage
|
|
|
104 |
To convert the `train`, `val` and `test` directories into a `tar` archive, run the following command:
|
105 |
|
106 |
```bash
|
107 |
+
make webdataset
|
108 |
```
|
109 |
|
110 |
Subsequently, the dataset can be loaded using the `webdataset` library:
|
|
|
112 |
```python
|
113 |
import webdataset as wds
|
114 |
|
115 |
+
dataset = wds.WebDataset("shards/train.tar")
|
116 |
```
|
117 |
|
118 |
|
examples.ipynb
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "markdown",
|
5 |
+
"metadata": {},
|
6 |
+
"source": [
|
7 |
+
"## WebDataset\n",
|
8 |
+
"\n",
|
9 |
+
"The dataset can be exported to a `.tar` archive and iterated with the `webdataset`\n",
|
10 |
+
"package.\n",
|
11 |
+
"\n",
|
12 |
+
"After building the WebDataset-formatted archives using `make webdataset`, the dataset can be iterated as follows."
|
13 |
+
]
|
14 |
+
},
|
15 |
+
{
|
16 |
+
"cell_type": "code",
|
17 |
+
"execution_count": 23,
|
18 |
+
"metadata": {},
|
19 |
+
"outputs": [
|
20 |
+
{
|
21 |
+
"name": "stdout",
|
22 |
+
"output_type": "stream",
|
23 |
+
"text": [
|
24 |
+
"000000/000000: image.png timestamp.txt vehicle.json \n",
|
25 |
+
"000000/000001: image.png timestamp.txt vehicle.json \n",
|
26 |
+
"000000/000002: image.png timestamp.txt vehicle.json \n",
|
27 |
+
"000000/000003: image.png timestamp.txt vehicle.json \n",
|
28 |
+
"000000/000004: depth.tiff image.png panoptic.png timestamp.txt vehicle.json \n",
|
29 |
+
"000000/000005: image.png timestamp.txt vehicle.json \n",
|
30 |
+
"000000/000006: image.png timestamp.txt vehicle.json \n",
|
31 |
+
"000000/000007: image.png timestamp.txt vehicle.json \n",
|
32 |
+
"000000/000008: image.png timestamp.txt vehicle.json \n",
|
33 |
+
"000000/000009: depth.tiff image.png panoptic.png timestamp.txt vehicle.json \n",
|
34 |
+
"000000/000010: image.png timestamp.txt vehicle.json \n"
|
35 |
+
]
|
36 |
+
}
|
37 |
+
],
|
38 |
+
"source": [
|
39 |
+
"import webdataset as wds\n",
|
40 |
+
"\n",
|
41 |
+
"# Create iterable dataset\n",
|
42 |
+
"ds = wds.WebDataset(\"shards/csvps-val.tar\", shardshuffle=False, verbose=True)\n",
|
43 |
+
"\n",
|
44 |
+
"# Iterate over the dataset and print the keys and the first few samples\n",
|
45 |
+
"for i, sample in enumerate(ds):\n",
|
46 |
+
" if i > 10: \n",
|
47 |
+
" break\n",
|
48 |
+
" print(sample[\"__key__\"], end=\": \")\n",
|
49 |
+
" for k in sample:\n",
|
50 |
+
" if not k.startswith(\"__\"):\n",
|
51 |
+
" print(k, end=\" \")\n",
|
52 |
+
" print(flush=True)\n",
|
53 |
+
" "
|
54 |
+
]
|
55 |
+
}
|
56 |
+
],
|
57 |
+
"metadata": {
|
58 |
+
"kernelspec": {
|
59 |
+
"display_name": "Python 3",
|
60 |
+
"language": "python",
|
61 |
+
"name": "python3"
|
62 |
+
},
|
63 |
+
"language_info": {
|
64 |
+
"codemirror_mode": {
|
65 |
+
"name": "ipython",
|
66 |
+
"version": 3
|
67 |
+
},
|
68 |
+
"file_extension": ".py",
|
69 |
+
"mimetype": "text/x-python",
|
70 |
+
"name": "python",
|
71 |
+
"nbconvert_exporter": "python",
|
72 |
+
"pygments_lexer": "ipython3",
|
73 |
+
"version": "3.12.2"
|
74 |
+
}
|
75 |
+
},
|
76 |
+
"nbformat": 4,
|
77 |
+
"nbformat_minor": 2
|
78 |
+
}
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
cityscapesscripts
|
2 |
+
webdataset
|
scripts/build_parquet.py
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
print("Building a Parquet dataset is not (yet) implemented. Use WebDataset instead.")
|
2 |
+
exit(1)
|
scripts/build_webdataset.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
r"""
|
3 |
+
Builds a WebDataset from the Cityscapes Video dataset.
|
4 |
+
"""
|
5 |
+
|
6 |
+
import argparse
|
7 |
+
import tarfile
|
8 |
+
|
9 |
+
from pathlib import Path
|
10 |
+
from tqdm import tqdm
|
11 |
+
|
12 |
+
|
13 |
+
def parse_args():
|
14 |
+
p = argparse.ArgumentParser(
|
15 |
+
description="Build a WebDataset from the Cityscapes Video dataset."
|
16 |
+
)
|
17 |
+
p.add_argument("--prefix", default="csvps", help="Prefix for the tar files.")
|
18 |
+
p.add_argument("data", type=Path, help="Path to the Cityscapes Video dataset.")
|
19 |
+
p.add_argument("output", type=Path, help="Path to the output directory.")
|
20 |
+
|
21 |
+
return p.parse_args()
|
22 |
+
|
23 |
+
|
24 |
+
def build_dataset(split: str, data_dir: Path, out_dir: Path, *, prefix: str = ""):
|
25 |
+
data_dir = data_dir / split
|
26 |
+
name = f"{split}.tar"
|
27 |
+
if prefix and prefix != "":
|
28 |
+
name = f"{prefix}-{name}"
|
29 |
+
tar_path = out_dir / name
|
30 |
+
|
31 |
+
if tar_path.exists():
|
32 |
+
print(f"Error: Tar archive already exists: {tar_path}")
|
33 |
+
|
34 |
+
with tarfile.open(tar_path, "w") as tar:
|
35 |
+
# Add the files to the tar archive
|
36 |
+
for file in tqdm(sorted(data_dir.glob("**/*")), desc=f"Building {tar_path}"):
|
37 |
+
tar.add(file, arcname=file.relative_to(data_dir))
|
38 |
+
|
39 |
+
|
40 |
+
def main():
|
41 |
+
args = parse_args()
|
42 |
+
|
43 |
+
# For each split, build a tar archive containing the sorted files
|
44 |
+
for split in ("train", "val", "test"):
|
45 |
+
build_dataset(split, args.data, args.output, prefix=args.prefix)
|
46 |
+
|
47 |
+
|
48 |
+
if __name__ == "__main__":
|
49 |
+
main()
|
csvps.py → scripts/prepare.py
RENAMED
@@ -32,19 +32,16 @@ def parse_args() -> argparse.Namespace:
|
|
32 |
parser.add_argument(
|
33 |
"downloads_dir",
|
34 |
type=Path,
|
35 |
-
default=Path("downloads"),
|
36 |
help="Path to the directory where ZIP files are/will be stored (e.g., 'downloads').",
|
37 |
)
|
38 |
parser.add_argument(
|
39 |
"data_dir",
|
40 |
type=Path,
|
41 |
-
default=Path("data"),
|
42 |
help="Path to the directory where extracted files should be moved (e.g., 'data').",
|
43 |
)
|
44 |
parser.add_argument(
|
45 |
"manifest_file",
|
46 |
type=Path,
|
47 |
-
default=Path("manifest.csv"),
|
48 |
help="Path to the manifest file (e.g., 'manifest.csv').",
|
49 |
)
|
50 |
return parser.parse_args()
|
|
|
32 |
parser.add_argument(
|
33 |
"downloads_dir",
|
34 |
type=Path,
|
|
|
35 |
help="Path to the directory where ZIP files are/will be stored (e.g., 'downloads').",
|
36 |
)
|
37 |
parser.add_argument(
|
38 |
"data_dir",
|
39 |
type=Path,
|
|
|
40 |
help="Path to the directory where extracted files should be moved (e.g., 'data').",
|
41 |
)
|
42 |
parser.add_argument(
|
43 |
"manifest_file",
|
44 |
type=Path,
|
|
|
45 |
help="Path to the manifest file (e.g., 'manifest.csv').",
|
46 |
)
|
47 |
return parser.parse_args()
|