wangyi111 commited on
Commit
e2d8f44
·
verified ·
1 Parent(s): 965cc69

Upload 3 files

Browse files
Files changed (3) hide show
  1. load_ssl4eo_s.ipynb +426 -0
  2. ssl4eo_s_dataset.py +234 -0
  3. ssl4eo_s_lmdb_dataset.py +200 -0
load_ssl4eo_s.ipynb ADDED
@@ -0,0 +1,426 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "### Load from raw data (geotif)"
8
+ ]
9
+ },
10
+ {
11
+ "cell_type": "code",
12
+ "execution_count": 4,
13
+ "metadata": {},
14
+ "outputs": [],
15
+ "source": [
16
+ "import torch\n",
17
+ "import kornia\n",
18
+ "import numpy as np\n",
19
+ "from kornia.augmentation import AugmentationSequential\n",
20
+ "from torch.utils.data import Dataset, DataLoader\n",
21
+ "from ssl4eo_s_dataset import SSL4EO_S\n",
22
+ "import time\n",
23
+ "import os\n",
24
+ "from tqdm import tqdm"
25
+ ]
26
+ },
27
+ {
28
+ "cell_type": "code",
29
+ "execution_count": 3,
30
+ "metadata": {},
31
+ "outputs": [],
32
+ "source": [
33
+ "fnames_path = '../data/example_100_grids/fnames_sampled_union.json.gz'\n",
34
+ "root_dir = '../data/example_100_grids/'"
35
+ ]
36
+ },
37
+ {
38
+ "cell_type": "code",
39
+ "execution_count": 5,
40
+ "metadata": {},
41
+ "outputs": [
42
+ {
43
+ "name": "stdout",
44
+ "output_type": "stream",
45
+ "text": [
46
+ "Grid ID: ('0913063_-154.25_68.50',)\n",
47
+ "dict_keys(['s1_grd', 's2_toa', 's3_olci', 's5p_co', 's5p_no2', 's5p_o3', 's5p_so2', 'dem'])\n",
48
+ "dict_keys(['s1_grd', 's2_toa', 's3_olci', 's5p_co', 's5p_no2', 's5p_o3', 's5p_so2', 'dem'])\n",
49
+ "### S1 GRD ###\n",
50
+ "Number of s1 local patches: 1 Number of time stamps for first local patch: 1\n",
51
+ "Example for one image: torch.Size([1, 2, 224, 224]) torch.float32 ('0913063_-154.25_68.50/1092252_-154.25_68.50/20210530',)\n",
52
+ "### S2 TOA ###\n",
53
+ "Number of s2 local patches: 1 Number of time stamps for first local patch: 4\n",
54
+ "Example for one image: torch.Size([1, 13, 224, 224]) torch.int16 ('0913063_-154.25_68.50/1092252_-154.25_68.50/20191002',)\n",
55
+ "### S3 OLCI ###\n",
56
+ "Number of s3 time stamps: 7\n",
57
+ "Example for one image: torch.Size([1, 21, 96, 96]) torch.float32 ('0913063_-154.25_68.50/20210411',)\n",
58
+ "### S5P ###\n",
59
+ "Number of s5p time stamps for CO/NO2/O3/SO2: 7 5 11 4\n",
60
+ "Example for one CO image: torch.Size([1, 1, 28, 28]) torch.float32 ('0913063_-154.25_68.50/20210401',)\n",
61
+ "Example for one NO2 image: torch.Size([1, 1, 28, 28]) torch.float32 ('0913063_-154.25_68.50/20210401',)\n",
62
+ "Example for one O3 image: torch.Size([1, 1, 28, 28]) torch.float32 ('0913063_-154.25_68.50/20210101',)\n",
63
+ "Example for one SO2 image: torch.Size([1, 1, 28, 28]) torch.float32 ('0913063_-154.25_68.50/20210501',)\n",
64
+ "### DEM ###\n",
65
+ "One DEM image for the grid: torch.Size([1, 1, 960, 960]) torch.float32 ('0913063_-154.25_68.50',)\n",
66
+ "Time: 94.70435643196106\n"
67
+ ]
68
+ }
69
+ ],
70
+ "source": [
71
+ "transform_s1 = AugmentationSequential(\n",
72
+ " #kornia.augmentation.SmallestMaxSize(264),\n",
73
+ " kornia.augmentation.CenterCrop(224),\n",
74
+ ")\n",
75
+ "transform_s2 = AugmentationSequential(\n",
76
+ " #kornia.augmentation.SmallestMaxSize(264),\n",
77
+ " kornia.augmentation.CenterCrop(224),\n",
78
+ ")\n",
79
+ "transform_s3 = AugmentationSequential(\n",
80
+ " kornia.augmentation.SmallestMaxSize(96),\n",
81
+ " kornia.augmentation.CenterCrop(96),\n",
82
+ ")\n",
83
+ "transform_s5p = AugmentationSequential(\n",
84
+ " kornia.augmentation.SmallestMaxSize(28),\n",
85
+ " kornia.augmentation.CenterCrop(28),\n",
86
+ ")\n",
87
+ "transform_dem = AugmentationSequential(\n",
88
+ " kornia.augmentation.SmallestMaxSize(960),\n",
89
+ " kornia.augmentation.CenterCrop(960),\n",
90
+ ")\n",
91
+ "\n",
92
+ "ssl4eo_s = SSL4EO_S(fnames_path, root_dir, transform_s1=transform_s1, transform_s2=transform_s2, transform_s3=transform_s3, transform_s5p=transform_s5p, transform_dem=transform_dem)\n",
93
+ "dataloader = DataLoader(ssl4eo_s, batch_size=1, shuffle=True, num_workers=4) # batch size can only be 1 because of varying number of images per grid\n",
94
+ "\n",
95
+ "start_time = time.time()\n",
96
+ "\n",
97
+ "for i, (sample, meta_data) in enumerate(dataloader):\n",
98
+ " if i == 0:\n",
99
+ " print('Grid ID:', meta_data['dem'][0])\n",
100
+ " print(sample.keys())\n",
101
+ " print(meta_data.keys())\n",
102
+ "\n",
103
+ " print('### S1 GRD ###')\n",
104
+ " print('Number of s1 local patches:', len(meta_data['s1_grd']), ' ', 'Number of time stamps for first local patch:', len(meta_data['s1_grd'][0]))\n",
105
+ " print('Example for one image:', sample['s1_grd'][0][0].shape, sample['s1_grd'][0][0].dtype, meta_data['s1_grd'][0][0])\n",
106
+ " print('### S2 TOA ###')\n",
107
+ " print('Number of s2 local patches:', len(meta_data['s2_toa']), ' ', 'Number of time stamps for first local patch:', len(meta_data['s2_toa'][0]))\n",
108
+ " print('Example for one image:', sample['s2_toa'][0][0].shape, sample['s2_toa'][0][0].dtype, meta_data['s2_toa'][0][0])\n",
109
+ " print('### S3 OLCI ###')\n",
110
+ " print('Number of s3 time stamps:', len(meta_data['s3_olci']))\n",
111
+ " print('Example for one image:', sample['s3_olci'][0].shape, sample['s3_olci'][0].dtype, meta_data['s3_olci'][0])\n",
112
+ " print('### S5P ###')\n",
113
+ " print('Number of s5p time stamps for CO/NO2/O3/SO2:', len(meta_data['s5p_co']), len(meta_data['s5p_no2']), len(meta_data['s5p_o3']), len(meta_data['s5p_so2']))\n",
114
+ " print('Example for one CO image:', sample['s5p_co'][0].shape, sample['s5p_co'][0].dtype, meta_data['s5p_co'][0])\n",
115
+ " print('Example for one NO2 image:', sample['s5p_no2'][0].shape, sample['s5p_no2'][0].dtype, meta_data['s5p_no2'][0])\n",
116
+ " print('Example for one O3 image:', sample['s5p_o3'][0].shape, sample['s5p_o3'][0].dtype, meta_data['s5p_o3'][0])\n",
117
+ " print('Example for one SO2 image:', sample['s5p_so2'][0].shape, sample['s5p_so2'][0].dtype, meta_data['s5p_so2'][0])\n",
118
+ " print('### DEM ###')\n",
119
+ " print('One DEM image for the grid:', sample['dem'].shape, sample['dem'].dtype, meta_data['dem'][0])\n",
120
+ " else:\n",
121
+ " pass\n",
122
+ "\n",
123
+ "print('Time:', time.time()-start_time)"
124
+ ]
125
+ },
126
+ {
127
+ "cell_type": "markdown",
128
+ "metadata": {},
129
+ "source": [
130
+ "### Load from webdataset (npy)"
131
+ ]
132
+ },
133
+ {
134
+ "cell_type": "code",
135
+ "execution_count": 6,
136
+ "metadata": {},
137
+ "outputs": [],
138
+ "source": [
139
+ "import webdataset as wds\n",
140
+ "\n",
141
+ "## 85 grids version\n",
142
+ "webdataset_npy_dir = '../data/example_100_grids_cleaned/webdataset_obj_grid_pad_npy/'\n",
143
+ "# shards path\n",
144
+ "shards_path = os.path.join(webdataset_npy_dir, 'example-{000000..000008}.tar')\n",
145
+ "batch_size = 1\n",
146
+ "#shuffle = 8\n",
147
+ "train_transform = None\n",
148
+ "\n",
149
+ "# pytorch dataset\n",
150
+ "dataset = (\n",
151
+ " wds.WebDataset(shards_path,shardshuffle=True)\n",
152
+ " #.shuffle(shuffle)\n",
153
+ " .decode()\n",
154
+ " #.to_tuple(\"json\")\n",
155
+ " .to_tuple(\"s1_grd.npy\", \n",
156
+ " \"s2_toa.npy\", \n",
157
+ " \"s3_olci.npy\", \n",
158
+ " \"s5p.npy\", \n",
159
+ " \"dem.npy\",\n",
160
+ " \"json\") # also possible to only extract part of the data\n",
161
+ " #.map(sample_one_grid)\n",
162
+ " #.map_tuple(train_transform, identity)\n",
163
+ " ).batched(batch_size, partial=False)\n",
164
+ "\n",
165
+ "# pytorch dataloader\n",
166
+ "dataloader = torch.utils.data.DataLoader(dataset, num_workers=4, batch_size=None)"
167
+ ]
168
+ },
169
+ {
170
+ "cell_type": "code",
171
+ "execution_count": 7,
172
+ "metadata": {},
173
+ "outputs": [
174
+ {
175
+ "name": "stderr",
176
+ "output_type": "stream",
177
+ "text": [
178
+ "85it [00:36, 2.30it/s]"
179
+ ]
180
+ },
181
+ {
182
+ "name": "stdout",
183
+ "output_type": "stream",
184
+ "text": [
185
+ "Time: 36.96s\n"
186
+ ]
187
+ },
188
+ {
189
+ "name": "stderr",
190
+ "output_type": "stream",
191
+ "text": [
192
+ "\n"
193
+ ]
194
+ }
195
+ ],
196
+ "source": [
197
+ "start_time = time.time()\n",
198
+ "for i, data in tqdm(enumerate(dataloader)):\n",
199
+ " #key,img = data\n",
200
+ " pass\n",
201
+ "print(f\"Time: {time.time()-start_time:.2f}s\")"
202
+ ]
203
+ },
204
+ {
205
+ "cell_type": "markdown",
206
+ "metadata": {},
207
+ "source": [
208
+ "### Load from webdataset (pth)"
209
+ ]
210
+ },
211
+ {
212
+ "cell_type": "code",
213
+ "execution_count": 8,
214
+ "metadata": {},
215
+ "outputs": [
216
+ {
217
+ "ename": "NameError",
218
+ "evalue": "name 'wds' is not defined",
219
+ "output_type": "error",
220
+ "traceback": [
221
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
222
+ "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
223
+ "Cell \u001b[0;32mIn[8], line 44\u001b[0m\n\u001b[1;32m 39\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m sample\n\u001b[1;32m 42\u001b[0m \u001b[38;5;66;03m# pytorch dataset\u001b[39;00m\n\u001b[1;32m 43\u001b[0m dataset \u001b[38;5;241m=\u001b[39m (\n\u001b[0;32m---> 44\u001b[0m \u001b[43mwds\u001b[49m\u001b[38;5;241m.\u001b[39mWebDataset(shards_path,shardshuffle\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m)\n\u001b[1;32m 45\u001b[0m \u001b[38;5;66;03m#.shuffle(shuffle)\u001b[39;00m\n\u001b[1;32m 46\u001b[0m \u001b[38;5;241m.\u001b[39mdecode()\n\u001b[1;32m 47\u001b[0m \u001b[38;5;241m.\u001b[39mselect(has_all_modalities) \u001b[38;5;66;03m# only keep samples with all modalities\u001b[39;00m\n\u001b[1;32m 48\u001b[0m \u001b[38;5;241m.\u001b[39mmap(sample_one_local_patch) \u001b[38;5;66;03m# sample one local patch for S1 and S2\u001b[39;00m\n\u001b[1;32m 49\u001b[0m \u001b[38;5;66;03m#.to_tuple(\"json\")\u001b[39;00m\n\u001b[1;32m 50\u001b[0m \u001b[38;5;241m.\u001b[39mto_tuple(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124ms1_grd.pth\u001b[39m\u001b[38;5;124m\"\u001b[39m, \n\u001b[1;32m 51\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124ms2_toa.pth\u001b[39m\u001b[38;5;124m\"\u001b[39m, \n\u001b[1;32m 52\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124ms3_olci.pth\u001b[39m\u001b[38;5;124m\"\u001b[39m, \n\u001b[1;32m 53\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124ms5p_co.pth\u001b[39m\u001b[38;5;124m\"\u001b[39m, \n\u001b[1;32m 54\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124ms5p_no2.pth\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 55\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124ms5p_o3.pth\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 56\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124ms5p_so2.pth\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 57\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mdem.pth\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 58\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mjson\u001b[39m\u001b[38;5;124m\"\u001b[39m) \u001b[38;5;66;03m# also possible to only extract part of the data\u001b[39;00m\n\u001b[1;32m 59\u001b[0m \u001b[38;5;66;03m#.map(sample_one_grid)\u001b[39;00m\n\u001b[1;32m 60\u001b[0m \u001b[38;5;66;03m#.map_tuple(train_transform, identity)\u001b[39;00m\n\u001b[1;32m 61\u001b[0m )\u001b[38;5;241m.\u001b[39mbatched(batch_size, partial\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m)\n\u001b[1;32m 63\u001b[0m \u001b[38;5;66;03m# pytorch dataloader\u001b[39;00m\n\u001b[1;32m 64\u001b[0m dataloader \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mutils\u001b[38;5;241m.\u001b[39mdata\u001b[38;5;241m.\u001b[39mDataLoader(dataset, num_workers\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m4\u001b[39m, batch_size\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m)\n",
224
+ "\u001b[0;31mNameError\u001b[0m: name 'wds' is not defined"
225
+ ]
226
+ }
227
+ ],
228
+ "source": [
229
+ "import random\n",
230
+ "\n",
231
+ "webdataset_pth_dir = '../data/example_100_grids/reformat_webdataset_pth/'\n",
232
+ "# shards path\n",
233
+ "shards_path = os.path.join(webdataset_pth_dir, 'example-{000000..000009}.tar')\n",
234
+ "batch_size = 1\n",
235
+ "#shuffle = 8\n",
236
+ "#train_transform = None\n",
237
+ "\n",
238
+ "def has_all_modalities(sample):\n",
239
+ " required_keys = [\n",
240
+ " \"s1_grd.pth\", \n",
241
+ " \"s2_toa.pth\", \n",
242
+ " \"s3_olci.pth\", \n",
243
+ " \"s5p_co.pth\", \n",
244
+ " \"s5p_no2.pth\",\n",
245
+ " \"s5p_o3.pth\",\n",
246
+ " \"s5p_so2.pth\",\n",
247
+ " \"dem.pth\",\n",
248
+ " \"json\"\n",
249
+ " ]\n",
250
+ " return all(key in sample for key in required_keys)\n",
251
+ "\n",
252
+ "def sample_one_local_patch(sample):\n",
253
+ " s1 = sample[\"s1_grd.pth\"]\n",
254
+ " s2 = sample[\"s2_toa.pth\"]\n",
255
+ " meta_s1 = sample[\"json\"][\"s1_grd\"]\n",
256
+ " meta_s2 = sample[\"json\"][\"s2_toa\"]\n",
257
+ " #idx = torch.randint(0, s1.shape[0], (1,))\n",
258
+ " idx = random.randint(0, s1.shape[0]-1)\n",
259
+ " s1_new = s1[idx]\n",
260
+ " s2_new = s2[idx]\n",
261
+ " meta_s1_new = meta_s1[idx]\n",
262
+ " meta_s2_new = meta_s2[idx]\n",
263
+ " sample[\"s1_grd.pth\"] = s1_new\n",
264
+ " sample[\"s2_toa.pth\"] = s2_new\n",
265
+ " sample[\"json\"][\"s1_grd\"] = meta_s1_new\n",
266
+ " sample[\"json\"][\"s2_toa\"] = meta_s2_new\n",
267
+ " return sample\n",
268
+ "\n",
269
+ "\n",
270
+ "# pytorch dataset\n",
271
+ "dataset = (\n",
272
+ " wds.WebDataset(shards_path,shardshuffle=True)\n",
273
+ " #.shuffle(shuffle)\n",
274
+ " .decode()\n",
275
+ " .select(has_all_modalities) # only keep samples with all modalities\n",
276
+ " .map(sample_one_local_patch) # sample one local patch for S1 and S2\n",
277
+ " #.to_tuple(\"json\")\n",
278
+ " .to_tuple(\"s1_grd.pth\", \n",
279
+ " \"s2_toa.pth\", \n",
280
+ " \"s3_olci.pth\", \n",
281
+ " \"s5p_co.pth\", \n",
282
+ " \"s5p_no2.pth\",\n",
283
+ " \"s5p_o3.pth\",\n",
284
+ " \"s5p_so2.pth\",\n",
285
+ " \"dem.pth\",\n",
286
+ " \"json\") # also possible to only extract part of the data\n",
287
+ " #.map(sample_one_grid)\n",
288
+ " #.map_tuple(train_transform, identity)\n",
289
+ " ).batched(batch_size, partial=False)\n",
290
+ "\n",
291
+ "# pytorch dataloader\n",
292
+ "dataloader = torch.utils.data.DataLoader(dataset, num_workers=4, batch_size=None)"
293
+ ]
294
+ },
295
+ {
296
+ "cell_type": "code",
297
+ "execution_count": 24,
298
+ "metadata": {},
299
+ "outputs": [
300
+ {
301
+ "name": "stderr",
302
+ "output_type": "stream",
303
+ "text": [
304
+ "0it [00:00, ?it/s]"
305
+ ]
306
+ },
307
+ {
308
+ "name": "stderr",
309
+ "output_type": "stream",
310
+ "text": [
311
+ "100it [00:51, 1.94it/s]"
312
+ ]
313
+ },
314
+ {
315
+ "name": "stdout",
316
+ "output_type": "stream",
317
+ "text": [
318
+ "Time: 51.68s\n"
319
+ ]
320
+ },
321
+ {
322
+ "name": "stderr",
323
+ "output_type": "stream",
324
+ "text": [
325
+ "\n"
326
+ ]
327
+ }
328
+ ],
329
+ "source": [
330
+ "start_time = time.time()\n",
331
+ "for i, data in tqdm(enumerate(dataloader)):\n",
332
+ " #key,img = data\n",
333
+ " pass\n",
334
+ "print(f\"Time: {time.time()-start_time:.2f}s\")"
335
+ ]
336
+ },
337
+ {
338
+ "cell_type": "markdown",
339
+ "metadata": {},
340
+ "source": [
341
+ "### Load from lmdb"
342
+ ]
343
+ },
344
+ {
345
+ "cell_type": "code",
346
+ "execution_count": 5,
347
+ "metadata": {},
348
+ "outputs": [],
349
+ "source": [
350
+ "from ssl4eo_s_lmdb_dataset import SSL4EO_S_lmdb\n",
351
+ "\n",
352
+ "root_dir = '../data/example_100_grids/'\n",
353
+ "lmdb_path = root_dir + 'reformat_lmdb/' + 'ssl4eo_s_data.lmdb'\n",
354
+ "key_path = root_dir + 'reformat_lmdb/' + 'ssl4eo_s_key.csv'\n",
355
+ "mode = ['s1_grd', 's2_toa', 's3_olci', 's5p_co', 's5p_no2', 's5p_so2', 's5p_o3', 'dem']\n",
356
+ "\n",
357
+ "dataset = SSL4EO_S_lmdb(lmdb_path=lmdb_path, key_path=key_path, mode=mode)\n",
358
+ "dataloader = DataLoader(dataset, batch_size=1, shuffle=True, num_workers=4)"
359
+ ]
360
+ },
361
+ {
362
+ "cell_type": "code",
363
+ "execution_count": 7,
364
+ "metadata": {},
365
+ "outputs": [
366
+ {
367
+ "name": "stderr",
368
+ "output_type": "stream",
369
+ "text": [
370
+ "/home/wangyi111/miniconda3/envs/pytorch/lib/python3.10/site-packages/torch/utils/data/_utils/collate.py:171: UserWarning: The given NumPy array is not writable, and PyTorch does not support non-writable tensors. This means writing to this tensor will result in undefined behavior. You may want to copy the array to protect its data or make it writable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at ../torch/csrc/utils/tensor_numpy.cpp:206.)\n",
371
+ " return collate([torch.as_tensor(b) for b in batch], collate_fn_map=collate_fn_map)\n",
372
+ "0it [00:00, ?it/s]/home/wangyi111/miniconda3/envs/pytorch/lib/python3.10/site-packages/torch/utils/data/_utils/collate.py:171: UserWarning: The given NumPy array is not writable, and PyTorch does not support non-writable tensors. This means writing to this tensor will result in undefined behavior. You may want to copy the array to protect its data or make it writable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at ../torch/csrc/utils/tensor_numpy.cpp:206.)\n",
373
+ " return collate([torch.as_tensor(b) for b in batch], collate_fn_map=collate_fn_map)\n",
374
+ "/home/wangyi111/miniconda3/envs/pytorch/lib/python3.10/site-packages/torch/utils/data/_utils/collate.py:171: UserWarning: The given NumPy array is not writable, and PyTorch does not support non-writable tensors. This means writing to this tensor will result in undefined behavior. You may want to copy the array to protect its data or make it writable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at ../torch/csrc/utils/tensor_numpy.cpp:206.)\n",
375
+ " return collate([torch.as_tensor(b) for b in batch], collate_fn_map=collate_fn_map)\n",
376
+ "/home/wangyi111/miniconda3/envs/pytorch/lib/python3.10/site-packages/torch/utils/data/_utils/collate.py:171: UserWarning: The given NumPy array is not writable, and PyTorch does not support non-writable tensors. This means writing to this tensor will result in undefined behavior. You may want to copy the array to protect its data or make it writable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at ../torch/csrc/utils/tensor_numpy.cpp:206.)\n",
377
+ " return collate([torch.as_tensor(b) for b in batch], collate_fn_map=collate_fn_map)\n",
378
+ "100it [00:04, 23.99it/s]"
379
+ ]
380
+ },
381
+ {
382
+ "name": "stdout",
383
+ "output_type": "stream",
384
+ "text": [
385
+ "Time: 4.21s\n"
386
+ ]
387
+ },
388
+ {
389
+ "name": "stderr",
390
+ "output_type": "stream",
391
+ "text": [
392
+ "\n"
393
+ ]
394
+ }
395
+ ],
396
+ "source": [
397
+ "start_time = time.time()\n",
398
+ "for i, data in tqdm(enumerate(dataloader)):\n",
399
+ " #key,img = data\n",
400
+ " pass\n",
401
+ "print(f\"Time: {time.time()-start_time:.2f}s\")"
402
+ ]
403
+ }
404
+ ],
405
+ "metadata": {
406
+ "kernelspec": {
407
+ "display_name": "pytorch",
408
+ "language": "python",
409
+ "name": "python3"
410
+ },
411
+ "language_info": {
412
+ "codemirror_mode": {
413
+ "name": "ipython",
414
+ "version": 3
415
+ },
416
+ "file_extension": ".py",
417
+ "mimetype": "text/x-python",
418
+ "name": "python",
419
+ "nbconvert_exporter": "python",
420
+ "pygments_lexer": "ipython3",
421
+ "version": "3.10.13"
422
+ }
423
+ },
424
+ "nbformat": 4,
425
+ "nbformat_minor": 2
426
+ }
ssl4eo_s_dataset.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gzip
3
+ import json
4
+ import numpy as np
5
+ import rasterio
6
+ import re
7
+ from torch.utils.data import Dataset, DataLoader
8
+ import torch
9
+ #from cvtorchvision import cvtransforms
10
+ from kornia.augmentation import AugmentationSequential
11
+ import kornia
12
+ import argparse
13
+
14
+
15
+ class SSL4EO_S(Dataset):
16
+ def __init__(self, fnames_path, root_dir, modality=['s1_grd', 's2_toa', 's3_olci', 's5p_co', 's5p_no2', 's5p_so2', 's5p_o3', 'dem'], transform_s1=None, transform_s2=None, transform_s3=None, transform_s5p=None, transform_dem=None):
17
+ with gzip.open(fnames_path, 'rt', encoding='utf-8') as gz_file:
18
+ self.fnames_json = json.load(gz_file)
19
+ self.grid_ids = list(self.fnames_json.keys())
20
+ self.root_dir = root_dir
21
+ self.transform_s1 = transform_s1
22
+ self.transform_s2 = transform_s2
23
+ self.transform_s3 = transform_s3
24
+ self.transform_s5p = transform_s5p
25
+ self.transform_dem = transform_dem
26
+ self.modality = modality
27
+
28
+ def __len__(self):
29
+ return len(self.grid_ids)
30
+
31
+ def get_s1_s2(self,grid_id,modality):
32
+ arrays = []
33
+ meta_data = []
34
+ local_grids = list(self.fnames_json[grid_id][modality].keys())
35
+ grid_id_coord = self.fnames_json[grid_id]['grid_id_coord']
36
+ for local_grid in local_grids:
37
+ local_fpaths = self.fnames_json[grid_id][modality][local_grid]
38
+ imgs = []
39
+ meta = []
40
+ for local_fpath in local_fpaths:
41
+ with rasterio.open(os.path.join(self.root_dir, local_fpath)) as src:
42
+ img = src.read()
43
+ if modality=='s1_grd' and self.transform_s1:
44
+ #img = self.transform_s1(np.transpose(img, (1, 2, 0)))
45
+ img = torch.from_numpy(img).unsqueeze(0)
46
+ img = self.transform_s1(img).squeeze(0)
47
+ elif modality=='s2_toa' and self.transform_s2:
48
+ #img = self.transform_s2(np.transpose(img.astype(np.int16), (1, 2, 0)))
49
+ img = torch.from_numpy(img.astype(np.int16)).unsqueeze(0)
50
+ img = self.transform_s2(img.to(torch.float16)).squeeze(0)
51
+ imgs.append(img)
52
+ fname = local_fpath.split('/')[-1]
53
+ date = re.search(r'(\d{8})T', fname).group(1)
54
+ meta_info = f"{grid_id_coord}/{local_grid}/{date}"
55
+ meta.append(meta_info)
56
+ arrays.append(imgs)
57
+ meta_data.append(meta)
58
+ return arrays, meta_data
59
+
60
+ def get_s3(self,grid_id):
61
+ arrays = []
62
+ meta_data = []
63
+ fpaths = self.fnames_json[grid_id]['s3_olci']
64
+ grid_id_coord = self.fnames_json[grid_id]['grid_id_coord']
65
+ for fpath in fpaths:
66
+ with rasterio.open(os.path.join(self.root_dir, fpath)) as src:
67
+ img = src.read()
68
+ if self.transform_s3:
69
+ #img = self.transform_s3(np.transpose(img, (1, 2, 0)))
70
+ img = torch.from_numpy(img).unsqueeze(0)
71
+ img = self.transform_s3(img).squeeze(0)
72
+ arrays.append(img)
73
+ fname = fpath.split('/')[-1]
74
+ date = re.search(r'(\d{8})T', fname).group(1)
75
+ meta_info = f"{grid_id_coord}/{date}"
76
+ meta_data.append(meta_info)
77
+ return arrays, meta_data
78
+
79
+ def get_s5p(self,grid_id,modality):
80
+ arrays = []
81
+ meta_data = []
82
+ fpaths = self.fnames_json[grid_id][modality]
83
+ grid_id_coord = self.fnames_json[grid_id]['grid_id_coord']
84
+ for fpath in fpaths:
85
+ with rasterio.open(os.path.join(self.root_dir, fpath)) as src:
86
+ img = src.read()
87
+ if self.transform_s5p:
88
+ #img = self.transform_s5p(np.transpose(img, (1, 2, 0)))
89
+ img = torch.from_numpy(img).unsqueeze(0)
90
+ img = self.transform_s5p(img).squeeze(0)
91
+ arrays.append(img)
92
+ fname = fpath.split('/')[-1]
93
+ match = re.search(r'(\d{4})-(\d{2})-(\d{2})', fname)
94
+ date = f"{match.group(1)}{match.group(2)}{match.group(3)}"
95
+ meta_info = f"{grid_id_coord}/{date}"
96
+ meta_data.append(meta_info)
97
+ return arrays, meta_data
98
+
99
+ def get_dem(self,grid_id):
100
+ fpath = self.fnames_json[grid_id]['dem'][0]
101
+ with rasterio.open(os.path.join(self.root_dir, fpath)) as src:
102
+ img = src.read()
103
+ if self.transform_dem:
104
+ #img = self.transform_dem(np.transpose(img, (1, 2, 0)))
105
+ img = torch.from_numpy(img).unsqueeze(0)
106
+ img = self.transform_dem(img).squeeze(0)
107
+ return img
108
+
109
+ def __getitem__(self, idx):
110
+ grid_id = self.grid_ids[idx]
111
+ grid_id_coord = self.fnames_json[grid_id]['grid_id_coord']
112
+ sample = {}
113
+ meta_data = {}
114
+ # s1
115
+ if 's1_grd' in self.modality:
116
+ arr_s1, meta_s1 = self.get_s1_s2(grid_id,'s1_grd')
117
+ sample['s1_grd'] = arr_s1
118
+ meta_data['s1_grd'] = meta_s1
119
+ # s2
120
+ if 's2_toa' in self.modality:
121
+ arr_s2, meta_s2 = self.get_s1_s2(grid_id,'s2_toa')
122
+ sample['s2_toa'] = arr_s2
123
+ meta_data['s2_toa'] = meta_s2
124
+ # s3
125
+ if 's3_olci' in self.modality:
126
+ arr_s3, meta_s3 = self.get_s3(grid_id)
127
+ sample['s3_olci'] = arr_s3
128
+ meta_data['s3_olci'] = meta_s3
129
+ # s5p_co
130
+ if 's5p_co' in self.modality:
131
+ arr_s5p_co, meta_s5p_co = self.get_s5p(grid_id,'s5p_co')
132
+ sample['s5p_co'] = arr_s5p_co
133
+ meta_data['s5p_co'] = meta_s5p_co
134
+ # s5p_no2
135
+ if 's5p_no2' in self.modality:
136
+ arr_s5p_no2, meta_s5p_no2 = self.get_s5p(grid_id,'s5p_no2')
137
+ sample['s5p_no2'] = arr_s5p_no2
138
+ meta_data['s5p_no2'] = meta_s5p_no2
139
+ # s5p_o3
140
+ if 's5p_o3' in self.modality:
141
+ arr_s5p_o3, meta_s5p_o3 = self.get_s5p(grid_id,'s5p_o3')
142
+ sample['s5p_o3'] = arr_s5p_o3
143
+ meta_data['s5p_o3'] = meta_s5p_o3
144
+ # s5p_so2
145
+ if 's5p_so2' in self.modality:
146
+ arr_s5p_so2, meta_s5p_so2 = self.get_s5p(grid_id,'s5p_so2')
147
+ sample['s5p_so2'] = arr_s5p_so2
148
+ meta_data['s5p_so2'] = meta_s5p_so2
149
+ # dem
150
+ if 'dem' in self.modality:
151
+ arr_dem = self.get_dem(grid_id)
152
+ sample['dem'] = arr_dem
153
+ meta_data['dem'] = grid_id_coord
154
+
155
+ return sample, meta_data
156
+
157
+
158
+ if __name__ == '__main__':
159
+ parser = argparse.ArgumentParser()
160
+ parser.add_argument('--fnames_path', type=str, default='data_loading/fnames.json.gz')
161
+ parser.add_argument('--root_dir', type=str, default='data_loading/data')
162
+ args = parser.parse_args()
163
+
164
+ # transform_s1 = cvtransforms.Compose([
165
+ # cvtransforms.CenterCrop(224),
166
+ # cvtransforms.ToTensor()
167
+ # ])
168
+ # transform_s2 = cvtransforms.Compose([
169
+ # cvtransforms.CenterCrop(224),
170
+ # cvtransforms.ToTensor()
171
+ # ])
172
+ # transform_s3 = cvtransforms.Compose([
173
+ # cvtransforms.CenterCrop(96),
174
+ # cvtransforms.ToTensor()
175
+ # ])
176
+ # transform_s5p = cvtransforms.Compose([
177
+ # cvtransforms.CenterCrop(28),
178
+ # cvtransforms.ToTensor()
179
+ # ])
180
+ # transform_dem = cvtransforms.Compose([
181
+ # cvtransforms.CenterCrop(960),
182
+ # cvtransforms.ToTensor()
183
+ # ])
184
+ transform_s1 = AugmentationSequential(
185
+ #kornia.augmentation.SmallestMaxSize(264),
186
+ kornia.augmentation.CenterCrop(224),
187
+ )
188
+ transform_s2 = AugmentationSequential(
189
+ #kornia.augmentation.SmallestMaxSize(264),
190
+ kornia.augmentation.CenterCrop(224),
191
+ )
192
+ transform_s3 = AugmentationSequential(
193
+ kornia.augmentation.SmallestMaxSize(96),
194
+ kornia.augmentation.CenterCrop(96),
195
+ )
196
+ transform_s5p = AugmentationSequential(
197
+ kornia.augmentation.SmallestMaxSize(28),
198
+ kornia.augmentation.CenterCrop(28),
199
+ )
200
+ transform_dem = AugmentationSequential(
201
+ kornia.augmentation.SmallestMaxSize(960),
202
+ kornia.augmentation.CenterCrop(960),
203
+ )
204
+
205
+
206
+ ssl4eo_s = SSL4EO_S(args.fnames_path, args.root_dir, transform_s1=transform_s1, transform_s2=transform_s2, transform_s3=transform_s3, transform_s5p=transform_s5p, transform_dem=transform_dem)
207
+ dataloader = DataLoader(ssl4eo_s, batch_size=1, shuffle=True, num_workers=0) # batch size can only be 1 because of varying number of images per grid
208
+
209
+ for i, (sample, meta_data) in enumerate(dataloader):
210
+ #print(i)
211
+ print('Grid ID:', meta_data['dem'][0])
212
+ print(sample.keys())
213
+ print(meta_data.keys())
214
+
215
+
216
+ print('### S1 GRD ###')
217
+ print('Number of s1 local patches:', len(meta_data['s1_grd']), ' ', 'Number of time stamps for first local patch:', len(meta_data['s1_grd'][0]))
218
+ print('Example for one image:', sample['s1_grd'][0][0].shape, meta_data['s1_grd'][0][0])
219
+ print('### S2 TOA ###')
220
+ print('Number of s2 local patches:', len(meta_data['s2_toa']), ' ', 'Number of time stamps for first local patch:', len(meta_data['s2_toa'][0]))
221
+ print('Example for one image:', sample['s2_toa'][0][0].shape, meta_data['s2_toa'][0][0])
222
+ print('### S3 OLCI ###')
223
+ print('Number of s3 time stamps:', len(meta_data['s3_olci']))
224
+ print('Example for one image:', sample['s3_olci'][0].shape, meta_data['s3_olci'][0])
225
+ print('### S5P ###')
226
+ print('Number of s5p time stamps for CO/NO2/O3/SO2:', len(meta_data['s5p_co']), len(meta_data['s5p_no2']), len(meta_data['s5p_o3']), len(meta_data['s5p_so2']))
227
+ print('Example for one CO image:', sample['s5p_co'][0].shape, meta_data['s5p_co'][0])
228
+ print('Example for one NO2 image:', sample['s5p_no2'][0].shape, meta_data['s5p_no2'][0])
229
+ print('Example for one O3 image:', sample['s5p_o3'][0].shape, meta_data['s5p_o3'][0])
230
+ print('Example for one SO2 image:', sample['s5p_so2'][0].shape, meta_data['s5p_so2'][0])
231
+ print('### DEM ###')
232
+ print('One DEM image for the grid:', sample['dem'].shape, meta_data['dem'][0])
233
+
234
+ break
ssl4eo_s_lmdb_dataset.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import lmdb
2
+ import os
3
+ import torch
4
+ import numpy as np
5
+ from torch.utils.data import Dataset, DataLoader
6
+ import csv
7
+
8
+
9
+ class SSL4EO_S_lmdb(Dataset):
10
+ def __init__(self, lmdb_path, key_path, slurm_job=False, mode=['s1_grd','s2_toa','s3_olci','s5p_co','s5p_no2','s5p_so2','s5p_o3','dem'], s1_transform=None, s2_transform=None, s3_transform=None, s5p_transform=None, dem_transform=None):
11
+ self.lmdb_path = lmdb_path
12
+ self.key_path = key_path
13
+ self.slurm_job = slurm_job
14
+ self.mode = mode
15
+ self.s1_transform = s1_transform
16
+ self.s2_transform = s2_transform
17
+ self.s3_transform = s3_transform
18
+ self.s5p_transform = s5p_transform
19
+ self.dem_transform = dem_transform
20
+
21
+ if not self.slurm_job:
22
+ self.env = lmdb.open(lmdb_path, readonly=True, lock=False, readahead=False, meminit=False)
23
+ #self.txn = self.env.begin(write=False) # Q: when to close the txn? #
24
+ self.keys = {}
25
+ with open(key_path, 'r') as f:
26
+ reader = csv.reader(f)
27
+ for row in reader:
28
+ modality, meta_info = row[0], row[1]
29
+ if modality=='s1_grd' or modality=='s2_toa':
30
+ _, grid_id, local_grid_id, date = meta_info.split('/')
31
+ if grid_id not in self.keys:
32
+ self.keys[grid_id] = {}
33
+ if modality not in self.keys[grid_id]:
34
+ self.keys[grid_id][modality] = {}
35
+ if local_grid_id not in self.keys[grid_id][modality]:
36
+ self.keys[grid_id][modality][local_grid_id] = []
37
+ self.keys[grid_id][modality][local_grid_id].append(meta_info)
38
+ elif modality=='s3_olci' or modality=='s5p_co' or modality=='s5p_no2' or modality=='s5p_so2' or modality=='s5p_o3':
39
+ _, grid_id, date = meta_info.split('/')
40
+ if grid_id not in self.keys:
41
+ self.keys[grid_id] = {}
42
+ if modality not in self.keys[grid_id]:
43
+ self.keys[grid_id][modality] = []
44
+ self.keys[grid_id][modality].append(meta_info)
45
+ elif modality=='dem':
46
+ _, grid_id = meta_info.split('/')
47
+ if grid_id not in self.keys:
48
+ self.keys[grid_id] = {}
49
+ if modality not in self.keys[grid_id]:
50
+ self.keys[grid_id][modality] = []
51
+ self.keys[grid_id][modality].append(meta_info)
52
+ self.indices = list(self.keys.keys())
53
+
54
+ def __len__(self):
55
+ return len(self.indices)
56
+
57
+ def _init_db(self):
58
+ self.env = lmdb.open(self.lmdb_path, max_readers=1, readonly=True, lock=False, readahead=False, meminit=False)
59
+
60
+ def __getitem__(self, idx):
61
+ if self.slurm_job:
62
+ # Delay loading LMDB data until after initialization
63
+ if self.env is None:
64
+ self._init_db()
65
+ # get all images in a random local grid in one era5 grid (for batch loading)
66
+ grid_id = self.indices[idx]
67
+ grid_keys = self.keys[grid_id]
68
+ sample = {}
69
+ meta_info = {}
70
+
71
+ with self.env.begin(write=False) as txn:
72
+ # s1
73
+ if 's1_grd' in self.mode:
74
+ sample['s1_grd'] = []
75
+ meta_info['s1_grd'] = []
76
+ if 's1_grd' in grid_keys:
77
+ local_grids = list(grid_keys['s1_grd'].keys()) # list of local grid ids
78
+ for local_grid_id in local_grids:
79
+ local_keys = grid_keys['s1_grd'][local_grid_id] # list of 4 keys
80
+ local_meta_info = []
81
+ local_imgs = []
82
+ for key in local_keys:
83
+ #print(key)
84
+ img_bytes = txn.get(key.encode('utf-8'))
85
+ img = np.frombuffer(img_bytes, dtype=np.float32).reshape(264, 264, 2)
86
+ if self.s1_transform:
87
+ img = self.s1_transform(img)
88
+ local_meta_info.append(key)
89
+ local_imgs.append(img)
90
+ ## pad time stamps to 4
91
+ #if len(s1_meta_info) < 4:
92
+ # s1_meta_info += [s1_meta_info[-1]] * (4 - len(s1_meta_info))
93
+ # s1_imgs += [s1_imgs[-1]] * (4 - len(s1_imgs))
94
+ sample['s1_grd'].append(local_imgs)
95
+ meta_info['s1_grd'].append(local_meta_info)
96
+
97
+ # s2
98
+ if 's2_toa' in self.mode:
99
+ sample['s2_toa'] = []
100
+ meta_info['s2_toa'] = []
101
+ if 's2_toa' in grid_keys:
102
+ local_grids = list(grid_keys['s2_toa'].keys())
103
+ for local_grid_id in local_grids:
104
+ local_keys = grid_keys['s2_toa'][local_grid_id]
105
+ local_meta_info = []
106
+ local_imgs = []
107
+ for key in local_keys:
108
+ img_bytes = txn.get(key.encode('utf-8'))
109
+ img = np.frombuffer(img_bytes, dtype=np.int16).reshape(264, 264, 13)
110
+ if self.s2_transform:
111
+ img = self.s2_transform(img)
112
+ local_meta_info.append(key)
113
+ local_imgs.append(img)
114
+ sample['s2_toa'].append(local_imgs)
115
+ meta_info['s2_toa'].append(local_meta_info)
116
+
117
+ # s3
118
+ if 's3_olci' in self.mode:
119
+ sample['s3_olci'] = []
120
+ meta_info['s3_olci'] = []
121
+ if 's3_olci' in grid_keys:
122
+ local_keys = grid_keys['s3_olci']
123
+ for key in local_keys:
124
+ img_bytes = txn.get(key.encode('utf-8'))
125
+ img = np.frombuffer(img_bytes, dtype=np.float32).reshape(96, 96, 21)
126
+ if self.s3_transform:
127
+ img = self.s3_transform(img)
128
+ meta_info['s3_olci'].append(key)
129
+ sample['s3_olci'].append(img)
130
+
131
+ # s5p
132
+ if 's5p_co' in self.mode:
133
+ sample['s5p_co'] = []
134
+ meta_info['s5p_co'] = []
135
+ if 's5p_co' in grid_keys:
136
+ local_keys = grid_keys['s5p_co']
137
+ for key in local_keys:
138
+ img_bytes = txn.get(key.encode('utf-8'))
139
+ img = np.frombuffer(img_bytes, dtype=np.float32).reshape(28, 28, 1)
140
+ if self.s5p_transform:
141
+ img = self.s5p_transform(img)
142
+ meta_info['s5p_co'].append(key)
143
+ sample['s5p_co'].append(img)
144
+
145
+ if 's5p_no2' in self.mode:
146
+ sample['s5p_no2'] = []
147
+ meta_info['s5p_no2'] = []
148
+ if 's5p_no2' in grid_keys:
149
+ local_keys = grid_keys['s5p_no2']
150
+ for key in local_keys:
151
+ img_bytes = txn.get(key.encode('utf-8'))
152
+ img = np.frombuffer(img_bytes, dtype=np.float32).reshape(28, 28, 1)
153
+ if self.s5p_transform:
154
+ img = self.s5p_transform(img)
155
+ meta_info['s5p_no2'].append(key)
156
+ sample['s5p_no2'].append(img)
157
+
158
+ if 's5p_so2' in self.mode:
159
+ sample['s5p_so2'] = []
160
+ meta_info['s5p_so2'] = []
161
+ if 's5p_so2' in grid_keys:
162
+ local_keys = grid_keys['s5p_so2']
163
+ for key in local_keys:
164
+ img_bytes = txn.get(key.encode('utf-8'))
165
+ img = np.frombuffer(img_bytes, dtype=np.float32).reshape(28, 28, 1)
166
+ if self.s5p_transform:
167
+ img = self.s5p_transform(img)
168
+ meta_info['s5p_so2'].append(key)
169
+ sample['s5p_so2'].append(img)
170
+
171
+ if 's5p_o3' in self.mode:
172
+ sample['s5p_o3'] = []
173
+ meta_info['s5p_o3'] = []
174
+ if 's5p_o3' in grid_keys:
175
+ local_keys = grid_keys['s5p_o3']
176
+ for key in local_keys:
177
+ img_bytes = txn.get(key.encode('utf-8'))
178
+ img = np.frombuffer(img_bytes, dtype=np.float32).reshape(28, 28, 1)
179
+ if self.s5p_transform:
180
+ img = self.s5p_transform(img)
181
+ meta_info['s5p_o3'].append(key)
182
+ sample['s5p_o3'].append(img)
183
+
184
+ # dem
185
+ if 'dem' in self.mode:
186
+ sample['dem'] = []
187
+ meta_info['dem'] = []
188
+ if 'dem' in grid_keys:
189
+ local_keys = grid_keys['dem']
190
+ for key in local_keys:
191
+ img_bytes = txn.get(key.encode('utf-8'))
192
+ img = np.frombuffer(img_bytes, dtype=np.float32).reshape(960,960,1)
193
+ if self.dem_transform:
194
+ img = self.dem_transform(img)
195
+ meta_info['dem'].append(key)
196
+ sample['dem'].append(img)
197
+
198
+
199
+
200
+ return sample, meta_info