dmytromishkin
commited on
Commit
·
22889e1
1
Parent(s):
2c41c9d
initial commit
Browse files- README.md +28 -0
- color_mappings.py +182 -0
- handcrafted_solution.py +242 -0
- hoho.py +261 -0
- notebooks/.ipynb_checkpoints/example-checkpoint.ipynb +166 -0
- notebooks/.ipynb_checkpoints/example_on_training-checkpoint.ipynb +0 -0
- notebooks/example_on_training.ipynb +0 -0
- read_write_colmap.py +489 -0
- script.py +145 -0
- viz3d.py +302 -0
README.md
CHANGED
@@ -1,3 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
license: apache-2.0
|
3 |
---
|
|
|
|
1 |
+
# Handcrafted solution example for the S23DR competition
|
2 |
+
|
3 |
+
This repo provides an example of a simple algorithm to reconstruct wireframe and submit to S23DR competition.
|
4 |
+
|
5 |
+
|
6 |
+
The repo consistst of the following parts:
|
7 |
+
|
8 |
+
- `script.py` - the main file, which is run by the competition space. It should produce `submission.parquet` as the result of the run.
|
9 |
+
- `hoho.py` - the file for parsing the dataset at the inference time. Do NOT change it.
|
10 |
+
- `handcrafted_solution.py` - contains the actual implementation of the algorithm
|
11 |
+
- other `*.py` files - helper i/o and visualization utilities
|
12 |
+
- `packages/` - the directory to put python wheels for the custom packages you want to install and use.
|
13 |
+
|
14 |
+
## Solution description
|
15 |
+
|
16 |
+
The solution is is simple.
|
17 |
+
|
18 |
+
1. Using provided (but noisy) semantic segmentation called `gestalt`, it taks the centroids of the vertex classes - `apex` and `eave_end_point` and projects them to 3D using provided (also noisy) monocular depth.
|
19 |
+
2. The vertices are connected using the same segmentation, by checking for edges classes to be present - `['eave', 'ridge', 'rake', 'valley']`.
|
20 |
+
3. All the "per-image" vertex predictions are merged in 3D space if their distance is less than threshold.
|
21 |
+
4. All vertices, which have zero connections, are removed.
|
22 |
+
|
23 |
+
|
24 |
+
## Example on the training set
|
25 |
+
|
26 |
+
See in [notebooks/example_on_training.ipynb](notebooks/example_on_training.ipynb)
|
27 |
+
|
28 |
---
|
29 |
license: apache-2.0
|
30 |
---
|
31 |
+
|
color_mappings.py
ADDED
@@ -0,0 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gestalt_color_mapping = {
|
2 |
+
"unclassified": (215, 62, 138),
|
3 |
+
"apex": (235, 88, 48),
|
4 |
+
"eave_end_point": (248, 130, 228),
|
5 |
+
"flashing_end_point": (71, 11, 161),
|
6 |
+
"ridge": (214, 251, 248),
|
7 |
+
"rake": (13, 94, 47),
|
8 |
+
"eave": (54, 243, 63),
|
9 |
+
"post": (187, 123, 236),
|
10 |
+
"ground_line": (136, 206, 14),
|
11 |
+
"flashing": (162, 162, 32),
|
12 |
+
"step_flashing": (169, 255, 219),
|
13 |
+
"hip": (8, 89, 52),
|
14 |
+
"valley": (85, 27, 65),
|
15 |
+
"roof": (215, 232, 179),
|
16 |
+
"door": (110, 52, 23),
|
17 |
+
"garage": (50, 233, 171),
|
18 |
+
"window": (230, 249, 40),
|
19 |
+
"shutter": (122, 4, 233),
|
20 |
+
"fascia": (95, 230, 240),
|
21 |
+
"soffit": (2, 102, 197),
|
22 |
+
"horizontal_siding": (131, 88, 59),
|
23 |
+
"vertical_siding": (110, 187, 198),
|
24 |
+
"brick": (171, 252, 7),
|
25 |
+
"concrete": (32, 47, 246),
|
26 |
+
"other_wall": (112, 61, 240),
|
27 |
+
"trim": (151, 206, 58),
|
28 |
+
"unknown": (127, 127, 127),
|
29 |
+
}
|
30 |
+
|
31 |
+
ade20k_color_mapping = {
|
32 |
+
'wall': (120, 120, 120),
|
33 |
+
'building;edifice': (180, 120, 120),
|
34 |
+
'sky': (6, 230, 230),
|
35 |
+
'floor;flooring': (80, 50, 50),
|
36 |
+
'tree': (4, 200, 3),
|
37 |
+
'ceiling': (120, 120, 80),
|
38 |
+
'road;route': (140, 140, 140),
|
39 |
+
'bed': (204, 5, 255),
|
40 |
+
'windowpane;window': (230, 230, 230),
|
41 |
+
'grass': (4, 250, 7),
|
42 |
+
'cabinet': (224, 5, 255),
|
43 |
+
'sidewalk;pavement': (235, 255, 7),
|
44 |
+
'person;individual;someone;somebody;mortal;soul': (150, 5, 61),
|
45 |
+
'earth;ground': (120, 120, 70),
|
46 |
+
'door;double;door': (8, 255, 51),
|
47 |
+
'table': (255, 6, 82),
|
48 |
+
'mountain;mount': (143, 255, 140),
|
49 |
+
'plant;flora;plant;life': (204, 255, 4),
|
50 |
+
'curtain;drape;drapery;mantle;pall': (255, 51, 7),
|
51 |
+
'chair': (204, 70, 3),
|
52 |
+
'car;auto;automobile;machine;motorcar': (0, 102, 200),
|
53 |
+
'water': (61, 230, 250),
|
54 |
+
'painting;picture': (255, 6, 51),
|
55 |
+
'sofa;couch;lounge': (11, 102, 255),
|
56 |
+
'shelf': (255, 7, 71),
|
57 |
+
'house': (255, 9, 224),
|
58 |
+
'sea': (9, 7, 230),
|
59 |
+
'mirror': (220, 220, 220),
|
60 |
+
'rug;carpet;carpeting': (255, 9, 92),
|
61 |
+
'field': (112, 9, 255),
|
62 |
+
'armchair': (8, 255, 214),
|
63 |
+
'seat': (7, 255, 224),
|
64 |
+
'fence;fencing': (255, 184, 6),
|
65 |
+
'desk': (10, 255, 71),
|
66 |
+
'rock;stone': (255, 41, 10),
|
67 |
+
'wardrobe;closet;press': (7, 255, 255),
|
68 |
+
'lamp': (224, 255, 8),
|
69 |
+
'bathtub;bathing;tub;bath;tub': (102, 8, 255),
|
70 |
+
'railing;rail': (255, 61, 6),
|
71 |
+
'cushion': (255, 194, 7),
|
72 |
+
'base;pedestal;stand': (255, 122, 8),
|
73 |
+
'box': (0, 255, 20),
|
74 |
+
'column;pillar': (255, 8, 41),
|
75 |
+
'signboard;sign': (255, 5, 153),
|
76 |
+
'chest;of;drawers;chest;bureau;dresser': (6, 51, 255),
|
77 |
+
'counter': (235, 12, 255),
|
78 |
+
'sand': (160, 150, 20),
|
79 |
+
'sink': (0, 163, 255),
|
80 |
+
'skyscraper': (140, 140, 140),
|
81 |
+
'fireplace;hearth;open;fireplace': (250, 10, 15),
|
82 |
+
'refrigerator;icebox': (20, 255, 0),
|
83 |
+
'grandstand;covered;stand': (31, 255, 0),
|
84 |
+
'path': (255, 31, 0),
|
85 |
+
'stairs;steps': (255, 224, 0),
|
86 |
+
'runway': (153, 255, 0),
|
87 |
+
'case;display;case;showcase;vitrine': (0, 0, 255),
|
88 |
+
'pool;table;billiard;table;snooker;table': (255, 71, 0),
|
89 |
+
'pillow': (0, 235, 255),
|
90 |
+
'screen;door;screen': (0, 173, 255),
|
91 |
+
'stairway;staircase': (31, 0, 255),
|
92 |
+
'river': (11, 200, 200),
|
93 |
+
'bridge;span': (255 ,82, 0),
|
94 |
+
'bookcase': (0, 255, 245),
|
95 |
+
'blind;screen': (0, 61, 255),
|
96 |
+
'coffee;table;cocktail;table': (0, 255, 112),
|
97 |
+
'toilet;can;commode;crapper;pot;potty;stool;throne': (0, 255, 133),
|
98 |
+
'flower': (255, 0, 0),
|
99 |
+
'book': (255, 163, 0),
|
100 |
+
'hill': (255, 102, 0),
|
101 |
+
'bench': (194, 255, 0),
|
102 |
+
'countertop': (0, 143, 255),
|
103 |
+
'stove;kitchen;stove;range;kitchen;range;cooking;stove': (51, 255, 0),
|
104 |
+
'palm;palm;tree': (0, 82, 255),
|
105 |
+
'kitchen;island': (0, 255, 41),
|
106 |
+
'computer;computing;machine;computing;device;data;processor;electronic;computer;information;processing;system': (0, 255, 173),
|
107 |
+
'swivel;chair': (10, 0, 255),
|
108 |
+
'boat': (173, 255, 0),
|
109 |
+
'bar': (0, 255, 153),
|
110 |
+
'arcade;machine': (255, 92, 0),
|
111 |
+
'hovel;hut;hutch;shack;shanty': (255, 0, 255),
|
112 |
+
'bus;autobus;coach;charabanc;double-decker;jitney;motorbus;motorcoach;omnibus;passenger;vehicle': (255, 0, 245),
|
113 |
+
'towel': (255, 0, 102),
|
114 |
+
'light;light;source': (255, 173, 0),
|
115 |
+
'truck;motortruck': (255, 0, 20),
|
116 |
+
'tower': (255, 184, 184),
|
117 |
+
'chandelier;pendant;pendent': (0, 31, 255),
|
118 |
+
'awning;sunshade;sunblind': (0, 255, 61),
|
119 |
+
'streetlight;street;lamp': (0, 71, 255),
|
120 |
+
'booth;cubicle;stall;kiosk': (255, 0, 204),
|
121 |
+
'television;television;receiver;television;set;tv;tv;set;idiot;box;boob;tube;telly;goggle;box': (0, 255, 194),
|
122 |
+
'airplane;aeroplane;plane': (0, 255, 82),
|
123 |
+
'dirt;track': (0, 10, 255),
|
124 |
+
'apparel;wearing;apparel;dress;clothes': (0, 112, 255),
|
125 |
+
'pole': (51, 0, 255),
|
126 |
+
'land;ground;soil': (0, 194, 255),
|
127 |
+
'bannister;banister;balustrade;balusters;handrail': (0, 122, 255),
|
128 |
+
'escalator;moving;staircase;moving;stairway': (0, 255, 163),
|
129 |
+
'ottoman;pouf;pouffe;puff;hassock': (255, 153, 0),
|
130 |
+
'bottle': (0, 255, 10),
|
131 |
+
'buffet;counter;sideboard': (255, 112, 0),
|
132 |
+
'poster;posting;placard;notice;bill;card': (143, 255, 0),
|
133 |
+
'stage': (82, 0, 255),
|
134 |
+
'van': (163, 255, 0),
|
135 |
+
'ship': (255, 235, 0),
|
136 |
+
'fountain': (8, 184, 170),
|
137 |
+
'conveyer;belt;conveyor;belt;conveyer;conveyor;transporter': (133, 0, 255),
|
138 |
+
'canopy': (0, 255, 92),
|
139 |
+
'washer;automatic;washer;washing;machine': (184, 0, 255),
|
140 |
+
'plaything;toy': (255, 0, 31),
|
141 |
+
'swimming;pool;swimming;bath;natatorium': (0, 184, 255),
|
142 |
+
'stool': (0, 214, 255),
|
143 |
+
'barrel;cask': (255, 0, 112),
|
144 |
+
'basket;handbasket': (92, 255, 0),
|
145 |
+
'waterfall;falls': (0, 224, 255),
|
146 |
+
'tent;collapsible;shelter': (112, 224, 255),
|
147 |
+
'bag': (70, 184, 160),
|
148 |
+
'minibike;motorbike': (163, 0, 255),
|
149 |
+
'cradle': (153, 0, 255),
|
150 |
+
'oven': (71, 255, 0),
|
151 |
+
'ball': (255, 0, 163),
|
152 |
+
'food;solid;food': (255, 204, 0),
|
153 |
+
'step;stair': (255, 0, 143),
|
154 |
+
'tank;storage;tank': (0, 255, 235),
|
155 |
+
'trade;name;brand;name;brand;marque': (133, 255, 0),
|
156 |
+
'microwave;microwave;oven': (255, 0, 235),
|
157 |
+
'pot;flowerpot': (245, 0, 255),
|
158 |
+
'animal;animate;being;beast;brute;creature;fauna': (255, 0, 122),
|
159 |
+
'bicycle;bike;wheel;cycle': (255, 245, 0),
|
160 |
+
'lake': (10, 190, 212),
|
161 |
+
'dishwasher;dish;washer;dishwashing;machine': (214, 255, 0),
|
162 |
+
'screen;silver;screen;projection;screen': (0, 204, 255),
|
163 |
+
'blanket;cover': (20, 0, 255),
|
164 |
+
'sculpture': (255, 255, 0),
|
165 |
+
'hood;exhaust;hood': (0, 153, 255),
|
166 |
+
'sconce': (0, 41, 255),
|
167 |
+
'vase': (0, 255, 204),
|
168 |
+
'traffic;light;traffic;signal;stoplight': (41, 0, 255),
|
169 |
+
'tray': (41, 255, 0),
|
170 |
+
'ashcan;trash;can;garbage;can;wastebin;ash;bin;ash-bin;ashbin;dustbin;trash;barrel;trash;bin': (173, 0, 255),
|
171 |
+
'fan': (0, 245, 255),
|
172 |
+
'pier;wharf;wharfage;dock': (71, 0, 255),
|
173 |
+
'crt;screen': (122, 0, 255),
|
174 |
+
'plate': (0, 255, 184),
|
175 |
+
'monitor;monitoring;device': (0, 92, 255),
|
176 |
+
'bulletin;board;notice;board': (184, 255, 0),
|
177 |
+
'shower': (0, 133, 255),
|
178 |
+
'radiator': (255, 214, 0),
|
179 |
+
'glass;drinking;glass': (25, 194, 194),
|
180 |
+
'clock': (102, 255, 0),
|
181 |
+
'flag': (92, 0, 255),
|
182 |
+
}
|
handcrafted_solution.py
ADDED
@@ -0,0 +1,242 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Description: This file contains the handcrafted solution for the task of wireframe reconstruction
|
2 |
+
|
3 |
+
import io
|
4 |
+
from read_write_colmap import read_cameras_binary, read_images_binary, read_points3D_binary
|
5 |
+
from PIL import Image as PImage
|
6 |
+
import numpy as np
|
7 |
+
from color_mappings import gestalt_color_mapping, ade20k_color_mapping
|
8 |
+
from collections import defaultdict
|
9 |
+
import cv2
|
10 |
+
from typing import Tuple, List
|
11 |
+
from scipy.spatial.distance import cdist
|
12 |
+
|
13 |
+
|
14 |
+
def empty_solution():
|
15 |
+
'''Return a minimal valid solution, i.e. 2 vertices and 1 edge.'''
|
16 |
+
return np.zeros((2,3)), [(0, 1)], [0]
|
17 |
+
|
18 |
+
|
19 |
+
def convert_entry_to_human_readable(entry):
|
20 |
+
out = {}
|
21 |
+
already_good = ['__key__', 'wf_vertices', 'wf_edges', 'edge_semantics', 'mesh_vertices', 'mesh_faces', 'face_semantics', 'K', 'R', 't']
|
22 |
+
for k, v in entry.items():
|
23 |
+
if k in already_good:
|
24 |
+
out[k] = v
|
25 |
+
continue
|
26 |
+
if k == 'points3d':
|
27 |
+
out[k] = read_points3D_binary(fid=io.BytesIO(v))
|
28 |
+
if k == 'cameras':
|
29 |
+
out[k] = read_cameras_binary(fid=io.BytesIO(v))
|
30 |
+
if k == 'images':
|
31 |
+
out[k] = read_images_binary(fid=io.BytesIO(v))
|
32 |
+
if k in ['ade20k', 'gestalt']:
|
33 |
+
out[k] = [PImage.open(io.BytesIO(x)).convert('RGB') for x in v]
|
34 |
+
if k == 'depthcm':
|
35 |
+
out[k] = [PImage.open(io.BytesIO(x)) for x in entry['depthcm']]
|
36 |
+
return out
|
37 |
+
|
38 |
+
|
39 |
+
def get_vertices_and_edges_from_segmentation(gest_seg_np, edge_th = 50.0):
|
40 |
+
'''Get the vertices and edges from the gestalt segmentation mask of the house'''
|
41 |
+
vertices = []
|
42 |
+
connections = []
|
43 |
+
# Apex
|
44 |
+
apex_color = np.array(gestalt_color_mapping['apex'])
|
45 |
+
apex_mask = cv2.inRange(gest_seg_np, apex_color-0.5, apex_color+0.5)
|
46 |
+
if apex_mask.sum() > 0:
|
47 |
+
output = cv2.connectedComponentsWithStats(apex_mask, 8, cv2.CV_32S)
|
48 |
+
(numLabels, labels, stats, centroids) = output
|
49 |
+
stats, centroids = stats[1:], centroids[1:]
|
50 |
+
|
51 |
+
for i in range(numLabels-1):
|
52 |
+
vert = {"xy": centroids[i], "type": "apex"}
|
53 |
+
vertices.append(vert)
|
54 |
+
|
55 |
+
eave_end_color = np.array(gestalt_color_mapping['eave_end_point'])
|
56 |
+
eave_end_mask = cv2.inRange(gest_seg_np, eave_end_color-0.5, eave_end_color+0.5)
|
57 |
+
if eave_end_mask.sum() > 0:
|
58 |
+
output = cv2.connectedComponentsWithStats(eave_end_mask, 8, cv2.CV_32S)
|
59 |
+
(numLabels, labels, stats, centroids) = output
|
60 |
+
stats, centroids = stats[1:], centroids[1:]
|
61 |
+
|
62 |
+
for i in range(numLabels-1):
|
63 |
+
vert = {"xy": centroids[i], "type": "eave_end_point"}
|
64 |
+
vertices.append(vert)
|
65 |
+
# Connectivity
|
66 |
+
apex_pts = []
|
67 |
+
apex_pts_idxs = []
|
68 |
+
for j, v in enumerate(vertices):
|
69 |
+
apex_pts.append(v['xy'])
|
70 |
+
apex_pts_idxs.append(j)
|
71 |
+
apex_pts = np.array(apex_pts)
|
72 |
+
|
73 |
+
# Ridge connects two apex points
|
74 |
+
for edge_class in ['eave', 'ridge', 'rake', 'valley']:
|
75 |
+
edge_color = np.array(gestalt_color_mapping[edge_class])
|
76 |
+
mask = cv2.morphologyEx(cv2.inRange(gest_seg_np,
|
77 |
+
edge_color-0.5,
|
78 |
+
edge_color+0.5),
|
79 |
+
cv2.MORPH_DILATE, np.ones((11, 11)))
|
80 |
+
line_img = np.copy(gest_seg_np) * 0
|
81 |
+
if mask.sum() > 0:
|
82 |
+
output = cv2.connectedComponentsWithStats(mask, 8, cv2.CV_32S)
|
83 |
+
(numLabels, labels, stats, centroids) = output
|
84 |
+
stats, centroids = stats[1:], centroids[1:]
|
85 |
+
edges = []
|
86 |
+
for i in range(1, numLabels):
|
87 |
+
y,x = np.where(labels == i)
|
88 |
+
xleft_idx = np.argmin(x)
|
89 |
+
x_left = x[xleft_idx]
|
90 |
+
y_left = y[xleft_idx]
|
91 |
+
xright_idx = np.argmax(x)
|
92 |
+
x_right = x[xright_idx]
|
93 |
+
y_right = y[xright_idx]
|
94 |
+
edges.append((x_left, y_left, x_right, y_right))
|
95 |
+
cv2.line(line_img, (x_left, y_left), (x_right, y_right), (255, 255, 255), 2)
|
96 |
+
edges = np.array(edges)
|
97 |
+
if (len(apex_pts) < 2) or len(edges) <1:
|
98 |
+
continue
|
99 |
+
pts_to_edges_dist = np.minimum(cdist(apex_pts, edges[:,:2]), cdist(apex_pts, edges[:,2:]))
|
100 |
+
connectivity_mask = pts_to_edges_dist <= edge_th
|
101 |
+
edge_connects = connectivity_mask.sum(axis=0)
|
102 |
+
for edge_idx, edgesum in enumerate(edge_connects):
|
103 |
+
if edgesum>=2:
|
104 |
+
connected_verts = np.where(connectivity_mask[:,edge_idx])[0]
|
105 |
+
for a_i, a in enumerate(connected_verts):
|
106 |
+
for b in connected_verts[a_i+1:]:
|
107 |
+
connections.append((a, b))
|
108 |
+
return vertices, connections
|
109 |
+
|
110 |
+
def get_uv_depth(vertices, depth):
|
111 |
+
'''Get the depth of the vertices from the depth image'''
|
112 |
+
uv = []
|
113 |
+
for v in vertices:
|
114 |
+
uv.append(v['xy'])
|
115 |
+
uv = np.array(uv)
|
116 |
+
uv_int = uv.astype(np.int32)
|
117 |
+
H, W = depth.shape[:2]
|
118 |
+
uv_int[:, 0] = np.clip( uv_int[:, 0], 0, W-1)
|
119 |
+
uv_int[:, 1] = np.clip( uv_int[:, 1], 0, H-1)
|
120 |
+
vertex_depth = depth[(uv_int[:, 1] , uv_int[:, 0])]
|
121 |
+
return uv, vertex_depth
|
122 |
+
|
123 |
+
|
124 |
+
def merge_vertices_3d(vert_edge_per_image, th=0.1):
|
125 |
+
'''Merge vertices that are close to each other in 3D space and are of same types'''
|
126 |
+
all_3d_vertices = []
|
127 |
+
connections_3d = []
|
128 |
+
all_indexes = []
|
129 |
+
cur_start = 0
|
130 |
+
types = []
|
131 |
+
for cimg_idx, (vertices, connections, vertices_3d) in vert_edge_per_image.items():
|
132 |
+
types += [int(v['type']=='apex') for v in vertices]
|
133 |
+
all_3d_vertices.append(vertices_3d)
|
134 |
+
connections_3d+=[(x+cur_start,y+cur_start) for (x,y) in connections]
|
135 |
+
cur_start+=len(vertices_3d)
|
136 |
+
all_3d_vertices = np.concatenate(all_3d_vertices, axis=0)
|
137 |
+
#print (connections_3d)
|
138 |
+
distmat = cdist(all_3d_vertices, all_3d_vertices)
|
139 |
+
types = np.array(types).reshape(-1,1)
|
140 |
+
same_types = cdist(types, types)
|
141 |
+
mask_to_merge = (distmat <= th) & (same_types==0)
|
142 |
+
new_vertices = []
|
143 |
+
new_connections = []
|
144 |
+
to_merge = sorted(list(set([tuple(a.nonzero()[0].tolist()) for a in mask_to_merge])))
|
145 |
+
to_merge_final = defaultdict(list)
|
146 |
+
for i in range(len(all_3d_vertices)):
|
147 |
+
for j in to_merge:
|
148 |
+
if i in j:
|
149 |
+
to_merge_final[i]+=j
|
150 |
+
for k, v in to_merge_final.items():
|
151 |
+
to_merge_final[k] = list(set(v))
|
152 |
+
already_there = set()
|
153 |
+
merged = []
|
154 |
+
for k, v in to_merge_final.items():
|
155 |
+
if k in already_there:
|
156 |
+
continue
|
157 |
+
merged.append(v)
|
158 |
+
for vv in v:
|
159 |
+
already_there.add(vv)
|
160 |
+
old_idx_to_new = {}
|
161 |
+
count=0
|
162 |
+
for idxs in merged:
|
163 |
+
new_vertices.append(all_3d_vertices[idxs].mean(axis=0))
|
164 |
+
for idx in idxs:
|
165 |
+
old_idx_to_new[idx] = count
|
166 |
+
count +=1
|
167 |
+
#print (connections_3d)
|
168 |
+
new_vertices=np.array(new_vertices)
|
169 |
+
#print (connections_3d)
|
170 |
+
for conn in connections_3d:
|
171 |
+
new_con = sorted((old_idx_to_new[conn[0]], old_idx_to_new[conn[1]]))
|
172 |
+
if new_con[0] == new_con[1]:
|
173 |
+
continue
|
174 |
+
if new_con not in new_connections:
|
175 |
+
new_connections.append(new_con)
|
176 |
+
#print (f'{len(new_vertices)} left after merging {len(all_3d_vertices)} with {th=}')
|
177 |
+
return new_vertices, new_connections
|
178 |
+
|
179 |
+
def prune_not_connected(all_3d_vertices, connections_3d):
|
180 |
+
'''Prune vertices that are not connected to any other vertex'''
|
181 |
+
connected = defaultdict(list)
|
182 |
+
for c in connections_3d:
|
183 |
+
connected[c[0]].append(c)
|
184 |
+
connected[c[1]].append(c)
|
185 |
+
new_indexes = {}
|
186 |
+
new_verts = []
|
187 |
+
connected_out = []
|
188 |
+
for k,v in connected.items():
|
189 |
+
vert = all_3d_vertices[k]
|
190 |
+
if tuple(vert) not in new_verts:
|
191 |
+
new_verts.append(tuple(vert))
|
192 |
+
new_indexes[k]=len(new_verts) -1
|
193 |
+
for k,v in connected.items():
|
194 |
+
for vv in v:
|
195 |
+
connected_out.append((new_indexes[vv[0]],new_indexes[vv[1]]))
|
196 |
+
connected_out=list(set(connected_out))
|
197 |
+
|
198 |
+
return np.array(new_verts), connected_out
|
199 |
+
|
200 |
+
|
201 |
+
def predict(entry, visualize=False) -> Tuple[np.ndarray, List[int]]:
|
202 |
+
good_entry = convert_entry_to_human_readable(entry)
|
203 |
+
vert_edge_per_image = {}
|
204 |
+
for i, (gest, depth, K, R, t) in enumerate(zip(good_entry['gestalt'],
|
205 |
+
good_entry['depthcm'],
|
206 |
+
good_entry['K'],
|
207 |
+
good_entry['R'],
|
208 |
+
good_entry['t']
|
209 |
+
)):
|
210 |
+
gest_seg = gest.resize(depth.size)
|
211 |
+
gest_seg_np = np.array(gest_seg).astype(np.uint8)
|
212 |
+
# Metric3D
|
213 |
+
depth_np = np.array(depth) / 2.5 # 2.5 is the scale estimation coefficient
|
214 |
+
vertices, connections = get_vertices_and_edges_from_segmentation(gest_seg_np, edge_th = 20.)
|
215 |
+
if (len(vertices) < 2) or (len(connections) < 1):
|
216 |
+
print (f'Not enough vertices or connections in image {i}')
|
217 |
+
vert_edge_per_image[i] = np.empty((0, 2)), [], np.empty((0, 3))
|
218 |
+
continue
|
219 |
+
uv, depth_vert = get_uv_depth(vertices, depth_np)
|
220 |
+
# Normalize the uv to the camera intrinsics
|
221 |
+
xy_local = np.ones((len(uv), 3))
|
222 |
+
xy_local[:, 0] = (uv[:, 0] - K[0,2]) / K[0,0]
|
223 |
+
xy_local[:, 1] = (uv[:, 1] - K[1,2]) / K[1,1]
|
224 |
+
# Get the 3D vertices
|
225 |
+
vertices_3d_local = depth_vert[...,None] * (xy_local/np.linalg.norm(xy_local, axis=1)[...,None])
|
226 |
+
world_to_cam = np.eye(4)
|
227 |
+
world_to_cam[:3, :3] = R
|
228 |
+
world_to_cam[:3, 3] = t.reshape(-1)
|
229 |
+
cam_to_world = np.linalg.inv(world_to_cam)
|
230 |
+
vertices_3d = cv2.transform(cv2.convertPointsToHomogeneous(vertices_3d_local), cam_to_world)
|
231 |
+
vertices_3d = cv2.convertPointsFromHomogeneous(vertices_3d).reshape(-1, 3)
|
232 |
+
vert_edge_per_image[i] = vertices, connections, vertices_3d
|
233 |
+
all_3d_vertices, connections_3d = merge_vertices_3d(vert_edge_per_image, 3.0)
|
234 |
+
all_3d_vertices_clean, connections_3d_clean = prune_not_connected(all_3d_vertices, connections_3d)
|
235 |
+
if (len(all_3d_vertices_clean) < 2) or len(connections_3d_clean) < 1:
|
236 |
+
print (f'Not enough vertices or connections in the 3D vertices')
|
237 |
+
return empty_solution()
|
238 |
+
if visualize:
|
239 |
+
from viz3d import plot_estimate_and_gt
|
240 |
+
plot_estimate_and_gt(all_3d_vertices_clean, connections_3d_clean, good_entry['wf_vertices'],
|
241 |
+
good_entry['wf_edges'])
|
242 |
+
return all_3d_vertices_clean, connections_3d_clean, [0 for i in range(len(connections_3d_clean))]
|
hoho.py
ADDED
@@ -0,0 +1,261 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
import shutil
|
4 |
+
from pathlib import Path
|
5 |
+
from typing import Dict
|
6 |
+
|
7 |
+
from PIL import ImageFile
|
8 |
+
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
9 |
+
|
10 |
+
LOCAL_DATADIR = None
|
11 |
+
|
12 |
+
def setup(local_dir='./data/usm-training-data/data'):
|
13 |
+
|
14 |
+
# If we are in the test environment, we need to link the data directory to the correct location
|
15 |
+
tmp_datadir = Path('/tmp/data/data')
|
16 |
+
local_test_datadir = Path('./data/usm-test-data-x/data')
|
17 |
+
local_val_datadir = Path(local_dir)
|
18 |
+
|
19 |
+
os.system('pwd')
|
20 |
+
os.system('ls -lahtr .')
|
21 |
+
|
22 |
+
if tmp_datadir.exists() and not local_test_datadir.exists():
|
23 |
+
global LOCAL_DATADIR
|
24 |
+
LOCAL_DATADIR = local_test_datadir
|
25 |
+
# shutil.move(datadir, './usm-test-data-x/data')
|
26 |
+
print(f"Linking {tmp_datadir} to {LOCAL_DATADIR} (we are in the test environment)")
|
27 |
+
LOCAL_DATADIR.parent.mkdir(parents=True, exist_ok=True)
|
28 |
+
LOCAL_DATADIR.symlink_to(tmp_datadir)
|
29 |
+
else:
|
30 |
+
LOCAL_DATADIR = local_val_datadir
|
31 |
+
print(f"Using {LOCAL_DATADIR} as the data directory (we are running locally)")
|
32 |
+
|
33 |
+
# os.system("ls -lahtr")
|
34 |
+
|
35 |
+
assert LOCAL_DATADIR.exists(), f"Data directory {LOCAL_DATADIR} does not exist"
|
36 |
+
return LOCAL_DATADIR
|
37 |
+
|
38 |
+
|
39 |
+
|
40 |
+
|
41 |
+
import importlib
|
42 |
+
from pathlib import Path
|
43 |
+
import subprocess
|
44 |
+
|
45 |
+
def download_package(package_name, path_to_save='packages'):
|
46 |
+
"""
|
47 |
+
Downloads a package using pip and saves it to a specified directory.
|
48 |
+
|
49 |
+
Parameters:
|
50 |
+
package_name (str): The name of the package to download.
|
51 |
+
path_to_save (str): The path to the directory where the package will be saved.
|
52 |
+
"""
|
53 |
+
try:
|
54 |
+
# pip download webdataset -d packages/webdataset --platform manylinux1_x86_64 --python-version 38 --only-binary=:all:
|
55 |
+
subprocess.check_call([subprocess.sys.executable, "-m", "pip", "download", package_name,
|
56 |
+
"-d", str(Path(path_to_save)/package_name), # Download the package to the specified directory
|
57 |
+
"--platform", "manylinux1_x86_64", # Specify the platform
|
58 |
+
"--python-version", "38", # Specify the Python version
|
59 |
+
"--only-binary=:all:"]) # Download only binary packages
|
60 |
+
print(f'Package "{package_name}" downloaded successfully')
|
61 |
+
except subprocess.CalledProcessError as e:
|
62 |
+
print(f'Failed to downloaded package "{package_name}". Error: {e}')
|
63 |
+
|
64 |
+
|
65 |
+
def install_package_from_local_file(package_name, folder='packages'):
|
66 |
+
"""
|
67 |
+
Installs a package from a local .whl file or a directory containing .whl files using pip.
|
68 |
+
|
69 |
+
Parameters:
|
70 |
+
path_to_file_or_directory (str): The path to the .whl file or the directory containing .whl files.
|
71 |
+
"""
|
72 |
+
try:
|
73 |
+
pth = str(Path(folder) / package_name)
|
74 |
+
subprocess.check_call([subprocess.sys.executable, "-m", "pip", "install",
|
75 |
+
"--no-index", # Do not use package index
|
76 |
+
"--find-links", pth, # Look for packages in the specified directory or at the file
|
77 |
+
package_name]) # Specify the package to install
|
78 |
+
print(f"Package installed successfully from {pth}")
|
79 |
+
except subprocess.CalledProcessError as e:
|
80 |
+
print(f"Failed to install package from {pth}. Error: {e}")
|
81 |
+
|
82 |
+
|
83 |
+
def importt(module_name, as_name=None):
|
84 |
+
"""
|
85 |
+
Imports a module and returns it.
|
86 |
+
|
87 |
+
Parameters:
|
88 |
+
module_name (str): The name of the module to import.
|
89 |
+
as_name (str): The name to use for the imported module. If None, the original module name will be used.
|
90 |
+
|
91 |
+
Returns:
|
92 |
+
The imported module.
|
93 |
+
"""
|
94 |
+
for _ in range(2):
|
95 |
+
try:
|
96 |
+
if as_name is None:
|
97 |
+
print(f'imported {module_name}')
|
98 |
+
return importlib.import_module(module_name)
|
99 |
+
else:
|
100 |
+
print(f'imported {module_name} as {as_name}')
|
101 |
+
return importlib.import_module(module_name, as_name)
|
102 |
+
except ModuleNotFoundError as e:
|
103 |
+
install_package_from_local_file(module_name)
|
104 |
+
print(f"Failed to import module {module_name}. Error: {e}")
|
105 |
+
|
106 |
+
|
107 |
+
def prepare_submission():
|
108 |
+
# Download packages from requirements.txt
|
109 |
+
if Path('requirements.txt').exists():
|
110 |
+
print('downloading packages from requirements.txt')
|
111 |
+
Path('packages').mkdir(exist_ok=True)
|
112 |
+
with open('requirements.txt') as f:
|
113 |
+
packages = f.readlines()
|
114 |
+
for p in packages:
|
115 |
+
download_package(p.strip())
|
116 |
+
|
117 |
+
|
118 |
+
print('all packages downloaded. Don\'t foget to include the packages in the submission by adding them with git lfs.')
|
119 |
+
|
120 |
+
|
121 |
+
def Rt_to_eye_target(im, K, R, t):
|
122 |
+
height = im.height
|
123 |
+
focal_length = K[0,0]
|
124 |
+
fov = 2.0 * np.arctan2((0.5 * height), focal_length) / (np.pi / 180.0)
|
125 |
+
|
126 |
+
x_axis, y_axis, z_axis = R
|
127 |
+
|
128 |
+
eye = -(R.T @ t).squeeze()
|
129 |
+
z_axis = z_axis.squeeze()
|
130 |
+
target = eye + z_axis
|
131 |
+
up = -y_axis
|
132 |
+
|
133 |
+
return eye, target, up, fov
|
134 |
+
|
135 |
+
|
136 |
+
########## general utilities ##########
|
137 |
+
import contextlib
|
138 |
+
import tempfile
|
139 |
+
from pathlib import Path
|
140 |
+
|
141 |
+
@contextlib.contextmanager
|
142 |
+
def working_directory(path):
|
143 |
+
"""Changes working directory and returns to previous on exit."""
|
144 |
+
prev_cwd = Path.cwd()
|
145 |
+
os.chdir(path)
|
146 |
+
try:
|
147 |
+
yield
|
148 |
+
finally:
|
149 |
+
os.chdir(prev_cwd)
|
150 |
+
|
151 |
+
@contextlib.contextmanager
|
152 |
+
def temp_working_directory():
|
153 |
+
with tempfile.TemporaryDirectory(dir='.') as D:
|
154 |
+
with working_directory(D):
|
155 |
+
yield
|
156 |
+
|
157 |
+
|
158 |
+
############# Dataset #############
|
159 |
+
def proc(row, split='train'):
|
160 |
+
# column_names_train = ['ade20k', 'depthcm', 'gestalt', 'colmap', 'KRt', 'mesh', 'wireframe']
|
161 |
+
# column_names_test = ['ade20k', 'depthcm', 'gestalt', 'colmap', 'KRt', 'wireframe']
|
162 |
+
# cols = column_names_train if split == 'train' else column_names_test
|
163 |
+
out = {}
|
164 |
+
for k, v in row.items():
|
165 |
+
colname = k.split('.')[0]
|
166 |
+
if colname in {'ade20k', 'depthcm', 'gestalt'}:
|
167 |
+
if colname in out:
|
168 |
+
out[colname].append(v)
|
169 |
+
else:
|
170 |
+
out[colname] = [v]
|
171 |
+
elif colname in {'wireframe', 'mesh'}:
|
172 |
+
# out.update({a: b.tolist() for a,b in v.items()})
|
173 |
+
out.update({a: b for a,b in v.items()})
|
174 |
+
elif colname in 'kr':
|
175 |
+
out[colname.upper()] = v
|
176 |
+
else:
|
177 |
+
out[colname] = v
|
178 |
+
|
179 |
+
return Sample(out)
|
180 |
+
|
181 |
+
|
182 |
+
class Sample(Dict):
|
183 |
+
def __repr__(self):
|
184 |
+
return str({k: v.shape if hasattr(v, 'shape') else [type(v[0])] if isinstance(v, list) else type(v) for k,v in self.items()})
|
185 |
+
|
186 |
+
|
187 |
+
|
188 |
+
def get_params():
|
189 |
+
exmaple_param_dict = {
|
190 |
+
"competition_id": "usm3d/S23DR",
|
191 |
+
"competition_type": "script",
|
192 |
+
"metric": "custom",
|
193 |
+
"token": "hf_**********************************",
|
194 |
+
"team_id": "local-test-team_id",
|
195 |
+
"submission_id": "local-test-submission_id",
|
196 |
+
"submission_id_col": "__key__",
|
197 |
+
"submission_cols": [
|
198 |
+
"__key__",
|
199 |
+
"wf_edges",
|
200 |
+
"wf_vertices",
|
201 |
+
"edge_semantics"
|
202 |
+
],
|
203 |
+
"submission_rows": 180,
|
204 |
+
"output_path": ".",
|
205 |
+
"submission_repo": "<THE HF MODEL ID of THIS REPO",
|
206 |
+
"time_limit": 7200,
|
207 |
+
"dataset": "usm3d/usm-test-data-x",
|
208 |
+
"submission_filenames": [
|
209 |
+
"submission.parquet"
|
210 |
+
]
|
211 |
+
}
|
212 |
+
|
213 |
+
param_path = Path('params.json')
|
214 |
+
|
215 |
+
if not param_path.exists():
|
216 |
+
print('params.json not found (this means we probably aren\'t in the test env). Using example params.')
|
217 |
+
params = exmaple_param_dict
|
218 |
+
else:
|
219 |
+
print('found params.json (this means we are probably in the test env). Using params from file.')
|
220 |
+
with param_path.open() as f:
|
221 |
+
params = json.load(f)
|
222 |
+
print(params)
|
223 |
+
return params
|
224 |
+
|
225 |
+
|
226 |
+
|
227 |
+
import webdataset as wds
|
228 |
+
import numpy as np
|
229 |
+
|
230 |
+
def get_dataset(decode='pil', proc=proc, split='train', dataset_type='webdataset'):
|
231 |
+
if LOCAL_DATADIR is None:
|
232 |
+
raise ValueError('LOCAL_DATADIR is not set. Please run setup() first.')
|
233 |
+
|
234 |
+
local_dir = Path(LOCAL_DATADIR)
|
235 |
+
if split != 'all':
|
236 |
+
local_dir = local_dir / split
|
237 |
+
|
238 |
+
paths = [str(p) for p in local_dir.rglob('*.tar.gz')]
|
239 |
+
|
240 |
+
dataset = wds.WebDataset(paths)
|
241 |
+
if decode is not None:
|
242 |
+
dataset = dataset.decode(decode)
|
243 |
+
else:
|
244 |
+
dataset = dataset.decode()
|
245 |
+
|
246 |
+
dataset = dataset.map(proc)
|
247 |
+
|
248 |
+
if dataset_type == 'webdataset':
|
249 |
+
return dataset
|
250 |
+
|
251 |
+
if dataset_type == 'hf':
|
252 |
+
import datasets
|
253 |
+
from datasets import Features, Value, Sequence, Image, Array2D
|
254 |
+
|
255 |
+
if split == 'train':
|
256 |
+
return datasets.IterableDataset.from_generator(lambda: dataset.iterator())
|
257 |
+
elif split == 'val':
|
258 |
+
return datasets.IterableDataset.from_generator(lambda: dataset.iterator())
|
259 |
+
|
260 |
+
|
261 |
+
|
notebooks/.ipynb_checkpoints/example-checkpoint.ipynb
ADDED
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 2,
|
6 |
+
"id": "503c6bcb-aa46-46c6-8b86-566b0a470b43",
|
7 |
+
"metadata": {},
|
8 |
+
"outputs": [],
|
9 |
+
"source": [
|
10 |
+
"import sys\n",
|
11 |
+
"sys.path.append('..')\n",
|
12 |
+
"from handcrafted_solution import *\n",
|
13 |
+
"from viz3d import *\n",
|
14 |
+
"from read_write_colmap import *"
|
15 |
+
]
|
16 |
+
},
|
17 |
+
{
|
18 |
+
"cell_type": "code",
|
19 |
+
"execution_count": 4,
|
20 |
+
"id": "8bdcd910-bac0-44be-8344-cb901ea2f369",
|
21 |
+
"metadata": {},
|
22 |
+
"outputs": [
|
23 |
+
{
|
24 |
+
"name": "stdout",
|
25 |
+
"output_type": "stream",
|
26 |
+
"text": [
|
27 |
+
"Collecting webdataset\n",
|
28 |
+
" Downloading webdataset-0.2.86-py3-none-any.whl.metadata (29 kB)\n",
|
29 |
+
"Collecting braceexpand (from webdataset)\n",
|
30 |
+
" Downloading braceexpand-0.1.7-py2.py3-none-any.whl.metadata (3.0 kB)\n",
|
31 |
+
"Requirement already satisfied: numpy in /Users/dmytromishkin/miniconda3/envs/pytorch/lib/python3.9/site-packages (from webdataset) (1.24.4)\n",
|
32 |
+
"Requirement already satisfied: pyyaml in /Users/dmytromishkin/miniconda3/envs/pytorch/lib/python3.9/site-packages (from webdataset) (6.0)\n",
|
33 |
+
"Downloading webdataset-0.2.86-py3-none-any.whl (70 kB)\n",
|
34 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m70.4/70.4 kB\u001b[0m \u001b[31m3.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
35 |
+
"\u001b[?25hDownloading braceexpand-0.1.7-py2.py3-none-any.whl (5.9 kB)\n",
|
36 |
+
"Installing collected packages: braceexpand, webdataset\n",
|
37 |
+
"Successfully installed braceexpand-0.1.7 webdataset-0.2.86\n"
|
38 |
+
]
|
39 |
+
}
|
40 |
+
],
|
41 |
+
"source": [
|
42 |
+
"!pip install webdataset"
|
43 |
+
]
|
44 |
+
},
|
45 |
+
{
|
46 |
+
"cell_type": "code",
|
47 |
+
"execution_count": 5,
|
48 |
+
"id": "88f4fc8f-efa9-404b-9073-c7d4a73f9075",
|
49 |
+
"metadata": {},
|
50 |
+
"outputs": [],
|
51 |
+
"source": [
|
52 |
+
"import webdataset as wds \n",
|
53 |
+
"import numpy as np\n",
|
54 |
+
"import datasets\n",
|
55 |
+
"from datasets import Features, Value, Sequence, Image, Array2D\n"
|
56 |
+
]
|
57 |
+
},
|
58 |
+
{
|
59 |
+
"cell_type": "code",
|
60 |
+
"execution_count": 7,
|
61 |
+
"id": "080f1a12-06bf-4b97-8a52-d7cf416adede",
|
62 |
+
"metadata": {},
|
63 |
+
"outputs": [
|
64 |
+
{
|
65 |
+
"data": {
|
66 |
+
"application/vnd.jupyter.widget-view+json": {
|
67 |
+
"model_id": "809ae1d7cc0e48718433b6896bb84067",
|
68 |
+
"version_major": 2,
|
69 |
+
"version_minor": 0
|
70 |
+
},
|
71 |
+
"text/plain": [
|
72 |
+
"Resolving data files: 0%| | 0/25 [00:00<?, ?it/s]"
|
73 |
+
]
|
74 |
+
},
|
75 |
+
"metadata": {},
|
76 |
+
"output_type": "display_data"
|
77 |
+
},
|
78 |
+
{
|
79 |
+
"data": {
|
80 |
+
"application/vnd.jupyter.widget-view+json": {
|
81 |
+
"model_id": "86f66f66049746eeb98c9a15972c2ca2",
|
82 |
+
"version_major": 2,
|
83 |
+
"version_minor": 0
|
84 |
+
},
|
85 |
+
"text/plain": [
|
86 |
+
"Downloading data: 0%| | 0.00/1.01G [00:00<?, ?B/s]"
|
87 |
+
]
|
88 |
+
},
|
89 |
+
"metadata": {},
|
90 |
+
"output_type": "display_data"
|
91 |
+
},
|
92 |
+
{
|
93 |
+
"ename": "KeyboardInterrupt",
|
94 |
+
"evalue": "",
|
95 |
+
"output_type": "error",
|
96 |
+
"traceback": [
|
97 |
+
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
98 |
+
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
|
99 |
+
"Cell \u001b[0;32mIn[7], line 2\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mdatasets\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m load_dataset\n\u001b[0;32m----> 2\u001b[0m ds \u001b[38;5;241m=\u001b[39m \u001b[43mload_dataset\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mtest-org-usm3d/usm-training-data\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m)\u001b[49m\n",
|
100 |
+
"File \u001b[0;32m~/miniconda3/envs/pytorch/lib/python3.9/site-packages/datasets/load.py:2574\u001b[0m, in \u001b[0;36mload_dataset\u001b[0;34m(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, verification_mode, ignore_verifications, keep_in_memory, save_infos, revision, token, use_auth_token, task, streaming, num_proc, storage_options, trust_remote_code, **config_kwargs)\u001b[0m\n\u001b[1;32m 2571\u001b[0m try_from_hf_gcs \u001b[38;5;241m=\u001b[39m path \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m _PACKAGED_DATASETS_MODULES\n\u001b[1;32m 2573\u001b[0m \u001b[38;5;66;03m# Download and prepare data\u001b[39;00m\n\u001b[0;32m-> 2574\u001b[0m \u001b[43mbuilder_instance\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdownload_and_prepare\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2575\u001b[0m \u001b[43m \u001b[49m\u001b[43mdownload_config\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdownload_config\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2576\u001b[0m \u001b[43m \u001b[49m\u001b[43mdownload_mode\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdownload_mode\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2577\u001b[0m \u001b[43m \u001b[49m\u001b[43mverification_mode\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mverification_mode\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2578\u001b[0m \u001b[43m \u001b[49m\u001b[43mtry_from_hf_gcs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtry_from_hf_gcs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2579\u001b[0m \u001b[43m \u001b[49m\u001b[43mnum_proc\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mnum_proc\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2580\u001b[0m \u001b[43m \u001b[49m\u001b[43mstorage_options\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstorage_options\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2581\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2583\u001b[0m \u001b[38;5;66;03m# Build dataset for splits\u001b[39;00m\n\u001b[1;32m 2584\u001b[0m keep_in_memory \u001b[38;5;241m=\u001b[39m (\n\u001b[1;32m 2585\u001b[0m keep_in_memory \u001b[38;5;28;01mif\u001b[39;00m keep_in_memory \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m is_small_dataset(builder_instance\u001b[38;5;241m.\u001b[39minfo\u001b[38;5;241m.\u001b[39mdataset_size)\n\u001b[1;32m 2586\u001b[0m )\n",
|
101 |
+
"File \u001b[0;32m~/miniconda3/envs/pytorch/lib/python3.9/site-packages/datasets/builder.py:1005\u001b[0m, in \u001b[0;36mDatasetBuilder.download_and_prepare\u001b[0;34m(self, output_dir, download_config, download_mode, verification_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, file_format, max_shard_size, num_proc, storage_options, **download_and_prepare_kwargs)\u001b[0m\n\u001b[1;32m 1003\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m num_proc \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 1004\u001b[0m prepare_split_kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mnum_proc\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m num_proc\n\u001b[0;32m-> 1005\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_download_and_prepare\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1006\u001b[0m \u001b[43m \u001b[49m\u001b[43mdl_manager\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdl_manager\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1007\u001b[0m \u001b[43m \u001b[49m\u001b[43mverification_mode\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mverification_mode\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1008\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mprepare_split_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1009\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mdownload_and_prepare_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1010\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1011\u001b[0m \u001b[38;5;66;03m# Sync info\u001b[39;00m\n\u001b[1;32m 1012\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39minfo\u001b[38;5;241m.\u001b[39mdataset_size \u001b[38;5;241m=\u001b[39m \u001b[38;5;28msum\u001b[39m(split\u001b[38;5;241m.\u001b[39mnum_bytes \u001b[38;5;28;01mfor\u001b[39;00m split \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39minfo\u001b[38;5;241m.\u001b[39msplits\u001b[38;5;241m.\u001b[39mvalues())\n",
|
102 |
+
"File \u001b[0;32m~/miniconda3/envs/pytorch/lib/python3.9/site-packages/datasets/builder.py:1767\u001b[0m, in \u001b[0;36mGeneratorBasedBuilder._download_and_prepare\u001b[0;34m(self, dl_manager, verification_mode, **prepare_splits_kwargs)\u001b[0m\n\u001b[1;32m 1766\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_download_and_prepare\u001b[39m(\u001b[38;5;28mself\u001b[39m, dl_manager, verification_mode, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mprepare_splits_kwargs):\n\u001b[0;32m-> 1767\u001b[0m \u001b[38;5;28;43msuper\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_download_and_prepare\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1768\u001b[0m \u001b[43m \u001b[49m\u001b[43mdl_manager\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1769\u001b[0m \u001b[43m \u001b[49m\u001b[43mverification_mode\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1770\u001b[0m \u001b[43m \u001b[49m\u001b[43mcheck_duplicate_keys\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mverification_mode\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m==\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mVerificationMode\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mBASIC_CHECKS\u001b[49m\n\u001b[1;32m 1771\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;129;43;01mor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mverification_mode\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m==\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mVerificationMode\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mALL_CHECKS\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1772\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mprepare_splits_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1773\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n",
|
103 |
+
"File \u001b[0;32m~/miniconda3/envs/pytorch/lib/python3.9/site-packages/datasets/builder.py:1078\u001b[0m, in \u001b[0;36mDatasetBuilder._download_and_prepare\u001b[0;34m(self, dl_manager, verification_mode, **prepare_split_kwargs)\u001b[0m\n\u001b[1;32m 1076\u001b[0m split_dict \u001b[38;5;241m=\u001b[39m SplitDict(dataset_name\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdataset_name)\n\u001b[1;32m 1077\u001b[0m split_generators_kwargs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_make_split_generators_kwargs(prepare_split_kwargs)\n\u001b[0;32m-> 1078\u001b[0m split_generators \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_split_generators\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdl_manager\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43msplit_generators_kwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1080\u001b[0m \u001b[38;5;66;03m# Checksums verification\u001b[39;00m\n\u001b[1;32m 1081\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m verification_mode \u001b[38;5;241m==\u001b[39m VerificationMode\u001b[38;5;241m.\u001b[39mALL_CHECKS \u001b[38;5;129;01mand\u001b[39;00m dl_manager\u001b[38;5;241m.\u001b[39mrecord_checksums:\n",
|
104 |
+
"File \u001b[0;32m~/miniconda3/envs/pytorch/lib/python3.9/site-packages/datasets/packaged_modules/webdataset/webdataset.py:47\u001b[0m, in \u001b[0;36mWebDataset._split_generators\u001b[0;34m(self, dl_manager)\u001b[0m\n\u001b[1;32m 45\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mconfig\u001b[38;5;241m.\u001b[39mdata_files:\n\u001b[1;32m 46\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mAt least one data file must be specified, but got data_files=\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mconfig\u001b[38;5;241m.\u001b[39mdata_files\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m---> 47\u001b[0m data_files \u001b[38;5;241m=\u001b[39m \u001b[43mdl_manager\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdownload\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdata_files\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 48\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(data_files, (\u001b[38;5;28mstr\u001b[39m, \u001b[38;5;28mlist\u001b[39m, \u001b[38;5;28mtuple\u001b[39m)):\n\u001b[1;32m 49\u001b[0m tar_paths \u001b[38;5;241m=\u001b[39m data_files\n",
|
105 |
+
"File \u001b[0;32m~/miniconda3/envs/pytorch/lib/python3.9/site-packages/datasets/download/download_manager.py:434\u001b[0m, in \u001b[0;36mDownloadManager.download\u001b[0;34m(self, url_or_urls)\u001b[0m\n\u001b[1;32m 432\u001b[0m start_time \u001b[38;5;241m=\u001b[39m datetime\u001b[38;5;241m.\u001b[39mnow()\n\u001b[1;32m 433\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m stack_multiprocessing_download_progress_bars():\n\u001b[0;32m--> 434\u001b[0m downloaded_path_or_paths \u001b[38;5;241m=\u001b[39m \u001b[43mmap_nested\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 435\u001b[0m \u001b[43m \u001b[49m\u001b[43mdownload_func\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 436\u001b[0m \u001b[43m \u001b[49m\u001b[43murl_or_urls\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 437\u001b[0m \u001b[43m \u001b[49m\u001b[43mmap_tuple\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 438\u001b[0m \u001b[43m \u001b[49m\u001b[43mnum_proc\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdownload_config\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mnum_proc\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 439\u001b[0m \u001b[43m \u001b[49m\u001b[43mdesc\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mDownloading data files\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 440\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 441\u001b[0m duration \u001b[38;5;241m=\u001b[39m datetime\u001b[38;5;241m.\u001b[39mnow() \u001b[38;5;241m-\u001b[39m start_time\n\u001b[1;32m 442\u001b[0m logger\u001b[38;5;241m.\u001b[39minfo(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mDownloading took \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mduration\u001b[38;5;241m.\u001b[39mtotal_seconds()\u001b[38;5;250m \u001b[39m\u001b[38;5;241m/\u001b[39m\u001b[38;5;241m/\u001b[39m\u001b[38;5;250m \u001b[39m\u001b[38;5;241m60\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m min\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n",
|
106 |
+
"File \u001b[0;32m~/miniconda3/envs/pytorch/lib/python3.9/site-packages/datasets/utils/py_utils.py:466\u001b[0m, in \u001b[0;36mmap_nested\u001b[0;34m(function, data_struct, dict_only, map_list, map_tuple, map_numpy, num_proc, parallel_min_length, types, disable_tqdm, desc)\u001b[0m\n\u001b[1;32m 464\u001b[0m num_proc \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m1\u001b[39m\n\u001b[1;32m 465\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28many\u001b[39m(\u001b[38;5;28misinstance\u001b[39m(v, types) \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(v) \u001b[38;5;241m>\u001b[39m \u001b[38;5;28mlen\u001b[39m(iterable) \u001b[38;5;28;01mfor\u001b[39;00m v \u001b[38;5;129;01min\u001b[39;00m iterable):\n\u001b[0;32m--> 466\u001b[0m mapped \u001b[38;5;241m=\u001b[39m [\n\u001b[1;32m 467\u001b[0m map_nested(\n\u001b[1;32m 468\u001b[0m function\u001b[38;5;241m=\u001b[39mfunction,\n\u001b[1;32m 469\u001b[0m data_struct\u001b[38;5;241m=\u001b[39mobj,\n\u001b[1;32m 470\u001b[0m num_proc\u001b[38;5;241m=\u001b[39mnum_proc,\n\u001b[1;32m 471\u001b[0m parallel_min_length\u001b[38;5;241m=\u001b[39mparallel_min_length,\n\u001b[1;32m 472\u001b[0m types\u001b[38;5;241m=\u001b[39mtypes,\n\u001b[1;32m 473\u001b[0m )\n\u001b[1;32m 474\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m obj \u001b[38;5;129;01min\u001b[39;00m iterable\n\u001b[1;32m 475\u001b[0m ]\n\u001b[1;32m 476\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m num_proc \u001b[38;5;241m!=\u001b[39m \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m1\u001b[39m \u001b[38;5;129;01mand\u001b[39;00m num_proc \u001b[38;5;241m<\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;241m1\u001b[39m \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(iterable) \u001b[38;5;241m<\u001b[39m parallel_min_length:\n\u001b[1;32m 477\u001b[0m mapped \u001b[38;5;241m=\u001b[39m [\n\u001b[1;32m 478\u001b[0m _single_map_nested((function, obj, types, \u001b[38;5;28;01mNone\u001b[39;00m, \u001b[38;5;28;01mTrue\u001b[39;00m, \u001b[38;5;28;01mNone\u001b[39;00m))\n\u001b[1;32m 479\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m obj \u001b[38;5;129;01min\u001b[39;00m hf_tqdm(iterable, disable\u001b[38;5;241m=\u001b[39mdisable_tqdm, desc\u001b[38;5;241m=\u001b[39mdesc)\n\u001b[1;32m 480\u001b[0m ]\n",
|
107 |
+
"File \u001b[0;32m~/miniconda3/envs/pytorch/lib/python3.9/site-packages/datasets/utils/py_utils.py:467\u001b[0m, in \u001b[0;36m<listcomp>\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 464\u001b[0m num_proc \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m1\u001b[39m\n\u001b[1;32m 465\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28many\u001b[39m(\u001b[38;5;28misinstance\u001b[39m(v, types) \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(v) \u001b[38;5;241m>\u001b[39m \u001b[38;5;28mlen\u001b[39m(iterable) \u001b[38;5;28;01mfor\u001b[39;00m v \u001b[38;5;129;01min\u001b[39;00m iterable):\n\u001b[1;32m 466\u001b[0m mapped \u001b[38;5;241m=\u001b[39m [\n\u001b[0;32m--> 467\u001b[0m \u001b[43mmap_nested\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 468\u001b[0m \u001b[43m \u001b[49m\u001b[43mfunction\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mfunction\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 469\u001b[0m \u001b[43m \u001b[49m\u001b[43mdata_struct\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mobj\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 470\u001b[0m \u001b[43m \u001b[49m\u001b[43mnum_proc\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mnum_proc\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 471\u001b[0m \u001b[43m \u001b[49m\u001b[43mparallel_min_length\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mparallel_min_length\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 472\u001b[0m \u001b[43m \u001b[49m\u001b[43mtypes\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtypes\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 473\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 474\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m obj \u001b[38;5;129;01min\u001b[39;00m iterable\n\u001b[1;32m 475\u001b[0m ]\n\u001b[1;32m 476\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m num_proc \u001b[38;5;241m!=\u001b[39m \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m1\u001b[39m \u001b[38;5;129;01mand\u001b[39;00m num_proc \u001b[38;5;241m<\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;241m1\u001b[39m \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(iterable) \u001b[38;5;241m<\u001b[39m parallel_min_length:\n\u001b[1;32m 477\u001b[0m mapped \u001b[38;5;241m=\u001b[39m [\n\u001b[1;32m 478\u001b[0m _single_map_nested((function, obj, types, \u001b[38;5;28;01mNone\u001b[39;00m, \u001b[38;5;28;01mTrue\u001b[39;00m, \u001b[38;5;28;01mNone\u001b[39;00m))\n\u001b[1;32m 479\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m obj \u001b[38;5;129;01min\u001b[39;00m hf_tqdm(iterable, disable\u001b[38;5;241m=\u001b[39mdisable_tqdm, desc\u001b[38;5;241m=\u001b[39mdesc)\n\u001b[1;32m 480\u001b[0m ]\n",
|
108 |
+
"File \u001b[0;32m~/miniconda3/envs/pytorch/lib/python3.9/site-packages/datasets/utils/py_utils.py:477\u001b[0m, in \u001b[0;36mmap_nested\u001b[0;34m(function, data_struct, dict_only, map_list, map_tuple, map_numpy, num_proc, parallel_min_length, types, disable_tqdm, desc)\u001b[0m\n\u001b[1;32m 466\u001b[0m mapped \u001b[38;5;241m=\u001b[39m [\n\u001b[1;32m 467\u001b[0m map_nested(\n\u001b[1;32m 468\u001b[0m function\u001b[38;5;241m=\u001b[39mfunction,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 474\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m obj \u001b[38;5;129;01min\u001b[39;00m iterable\n\u001b[1;32m 475\u001b[0m ]\n\u001b[1;32m 476\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m num_proc \u001b[38;5;241m!=\u001b[39m \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m1\u001b[39m \u001b[38;5;129;01mand\u001b[39;00m num_proc \u001b[38;5;241m<\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;241m1\u001b[39m \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(iterable) \u001b[38;5;241m<\u001b[39m parallel_min_length:\n\u001b[0;32m--> 477\u001b[0m mapped \u001b[38;5;241m=\u001b[39m [\n\u001b[1;32m 478\u001b[0m _single_map_nested((function, obj, types, \u001b[38;5;28;01mNone\u001b[39;00m, \u001b[38;5;28;01mTrue\u001b[39;00m, \u001b[38;5;28;01mNone\u001b[39;00m))\n\u001b[1;32m 479\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m obj \u001b[38;5;129;01min\u001b[39;00m hf_tqdm(iterable, disable\u001b[38;5;241m=\u001b[39mdisable_tqdm, desc\u001b[38;5;241m=\u001b[39mdesc)\n\u001b[1;32m 480\u001b[0m ]\n\u001b[1;32m 481\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 482\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m warnings\u001b[38;5;241m.\u001b[39mcatch_warnings():\n",
|
109 |
+
"File \u001b[0;32m~/miniconda3/envs/pytorch/lib/python3.9/site-packages/datasets/utils/py_utils.py:478\u001b[0m, in \u001b[0;36m<listcomp>\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 466\u001b[0m mapped \u001b[38;5;241m=\u001b[39m [\n\u001b[1;32m 467\u001b[0m map_nested(\n\u001b[1;32m 468\u001b[0m function\u001b[38;5;241m=\u001b[39mfunction,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 474\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m obj \u001b[38;5;129;01min\u001b[39;00m iterable\n\u001b[1;32m 475\u001b[0m ]\n\u001b[1;32m 476\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m num_proc \u001b[38;5;241m!=\u001b[39m \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m1\u001b[39m \u001b[38;5;129;01mand\u001b[39;00m num_proc \u001b[38;5;241m<\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;241m1\u001b[39m \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(iterable) \u001b[38;5;241m<\u001b[39m parallel_min_length:\n\u001b[1;32m 477\u001b[0m mapped \u001b[38;5;241m=\u001b[39m [\n\u001b[0;32m--> 478\u001b[0m \u001b[43m_single_map_nested\u001b[49m\u001b[43m(\u001b[49m\u001b[43m(\u001b[49m\u001b[43mfunction\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mobj\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtypes\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 479\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m obj \u001b[38;5;129;01min\u001b[39;00m hf_tqdm(iterable, disable\u001b[38;5;241m=\u001b[39mdisable_tqdm, desc\u001b[38;5;241m=\u001b[39mdesc)\n\u001b[1;32m 480\u001b[0m ]\n\u001b[1;32m 481\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 482\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m warnings\u001b[38;5;241m.\u001b[39mcatch_warnings():\n",
|
110 |
+
"File \u001b[0;32m~/miniconda3/envs/pytorch/lib/python3.9/site-packages/datasets/utils/py_utils.py:370\u001b[0m, in \u001b[0;36m_single_map_nested\u001b[0;34m(args)\u001b[0m\n\u001b[1;32m 368\u001b[0m \u001b[38;5;66;03m# Singleton first to spare some computation\u001b[39;00m\n\u001b[1;32m 369\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(data_struct, \u001b[38;5;28mdict\u001b[39m) \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(data_struct, types):\n\u001b[0;32m--> 370\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunction\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdata_struct\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 372\u001b[0m \u001b[38;5;66;03m# Reduce logging to keep things readable in multiprocessing with tqdm\u001b[39;00m\n\u001b[1;32m 373\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m rank \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m logging\u001b[38;5;241m.\u001b[39mget_verbosity() \u001b[38;5;241m<\u001b[39m logging\u001b[38;5;241m.\u001b[39mWARNING:\n",
|
111 |
+
"File \u001b[0;32m~/miniconda3/envs/pytorch/lib/python3.9/site-packages/datasets/download/download_manager.py:459\u001b[0m, in \u001b[0;36mDownloadManager._download\u001b[0;34m(self, url_or_filename, download_config)\u001b[0m\n\u001b[1;32m 456\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m is_relative_path(url_or_filename):\n\u001b[1;32m 457\u001b[0m \u001b[38;5;66;03m# append the relative path to the base_path\u001b[39;00m\n\u001b[1;32m 458\u001b[0m url_or_filename \u001b[38;5;241m=\u001b[39m url_or_path_join(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_base_path, url_or_filename)\n\u001b[0;32m--> 459\u001b[0m out \u001b[38;5;241m=\u001b[39m \u001b[43mcached_path\u001b[49m\u001b[43m(\u001b[49m\u001b[43murl_or_filename\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdownload_config\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdownload_config\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 460\u001b[0m out \u001b[38;5;241m=\u001b[39m tracked_str(out)\n\u001b[1;32m 461\u001b[0m out\u001b[38;5;241m.\u001b[39mset_origin(url_or_filename)\n",
|
112 |
+
"File \u001b[0;32m~/miniconda3/envs/pytorch/lib/python3.9/site-packages/datasets/utils/file_utils.py:190\u001b[0m, in \u001b[0;36mcached_path\u001b[0;34m(url_or_filename, download_config, **download_kwargs)\u001b[0m\n\u001b[1;32m 186\u001b[0m url_or_filename \u001b[38;5;241m=\u001b[39m strip_protocol(url_or_filename)\n\u001b[1;32m 188\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m is_remote_url(url_or_filename):\n\u001b[1;32m 189\u001b[0m \u001b[38;5;66;03m# URL, so get it from the cache (downloading if necessary)\u001b[39;00m\n\u001b[0;32m--> 190\u001b[0m output_path \u001b[38;5;241m=\u001b[39m \u001b[43mget_from_cache\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 191\u001b[0m \u001b[43m \u001b[49m\u001b[43murl_or_filename\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 192\u001b[0m \u001b[43m \u001b[49m\u001b[43mcache_dir\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcache_dir\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 193\u001b[0m \u001b[43m \u001b[49m\u001b[43mforce_download\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdownload_config\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mforce_download\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 194\u001b[0m \u001b[43m \u001b[49m\u001b[43mproxies\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdownload_config\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mproxies\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 195\u001b[0m \u001b[43m \u001b[49m\u001b[43mresume_download\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdownload_config\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mresume_download\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 196\u001b[0m \u001b[43m \u001b[49m\u001b[43muser_agent\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdownload_config\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43muser_agent\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 197\u001b[0m \u001b[43m \u001b[49m\u001b[43mlocal_files_only\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdownload_config\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mlocal_files_only\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 198\u001b[0m \u001b[43m \u001b[49m\u001b[43muse_etag\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdownload_config\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43muse_etag\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 199\u001b[0m \u001b[43m \u001b[49m\u001b[43mmax_retries\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdownload_config\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmax_retries\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 200\u001b[0m \u001b[43m \u001b[49m\u001b[43mtoken\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdownload_config\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtoken\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 201\u001b[0m \u001b[43m \u001b[49m\u001b[43mignore_url_params\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdownload_config\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mignore_url_params\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 202\u001b[0m \u001b[43m \u001b[49m\u001b[43mstorage_options\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdownload_config\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mstorage_options\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 203\u001b[0m \u001b[43m \u001b[49m\u001b[43mdownload_desc\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdownload_config\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdownload_desc\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 204\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 205\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m os\u001b[38;5;241m.\u001b[39mpath\u001b[38;5;241m.\u001b[39mexists(url_or_filename):\n\u001b[1;32m 206\u001b[0m \u001b[38;5;66;03m# File, and it exists.\u001b[39;00m\n\u001b[1;32m 207\u001b[0m output_path \u001b[38;5;241m=\u001b[39m url_or_filename\n",
|
113 |
+
"File \u001b[0;32m~/miniconda3/envs/pytorch/lib/python3.9/site-packages/datasets/utils/file_utils.py:632\u001b[0m, in \u001b[0;36mget_from_cache\u001b[0;34m(url, cache_dir, force_download, proxies, etag_timeout, resume_download, user_agent, local_files_only, use_etag, max_retries, token, use_auth_token, ignore_url_params, storage_options, download_desc)\u001b[0m\n\u001b[1;32m 630\u001b[0m ftp_get(url, temp_file)\n\u001b[1;32m 631\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m scheme \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m (\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mhttp\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mhttps\u001b[39m\u001b[38;5;124m\"\u001b[39m):\n\u001b[0;32m--> 632\u001b[0m \u001b[43mfsspec_get\u001b[49m\u001b[43m(\u001b[49m\u001b[43murl\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtemp_file\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mstorage_options\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstorage_options\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdesc\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdownload_desc\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 633\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 634\u001b[0m http_get(\n\u001b[1;32m 635\u001b[0m url,\n\u001b[1;32m 636\u001b[0m temp_file\u001b[38;5;241m=\u001b[39mtemp_file,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 642\u001b[0m desc\u001b[38;5;241m=\u001b[39mdownload_desc,\n\u001b[1;32m 643\u001b[0m )\n",
|
114 |
+
"File \u001b[0;32m~/miniconda3/envs/pytorch/lib/python3.9/site-packages/datasets/utils/file_utils.py:352\u001b[0m, in \u001b[0;36mfsspec_get\u001b[0;34m(url, temp_file, storage_options, desc)\u001b[0m\n\u001b[1;32m 340\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mGET can be called with at most one path but was called with \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mpaths\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 341\u001b[0m callback \u001b[38;5;241m=\u001b[39m TqdmCallback(\n\u001b[1;32m 342\u001b[0m tqdm_kwargs\u001b[38;5;241m=\u001b[39m{\n\u001b[1;32m 343\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mdesc\u001b[39m\u001b[38;5;124m\"\u001b[39m: desc \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mDownloading\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 350\u001b[0m }\n\u001b[1;32m 351\u001b[0m )\n\u001b[0;32m--> 352\u001b[0m \u001b[43mfs\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget_file\u001b[49m\u001b[43m(\u001b[49m\u001b[43mpaths\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtemp_file\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcallback\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcallback\u001b[49m\u001b[43m)\u001b[49m\n",
|
115 |
+
"File \u001b[0;32m~/miniconda3/envs/pytorch/lib/python3.9/site-packages/fsspec/spec.py:914\u001b[0m, in \u001b[0;36mAbstractFileSystem.get_file\u001b[0;34m(self, rpath, lpath, callback, outfile, **kwargs)\u001b[0m\n\u001b[1;32m 912\u001b[0m data \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m\n\u001b[1;32m 913\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m data:\n\u001b[0;32m--> 914\u001b[0m data \u001b[38;5;241m=\u001b[39m \u001b[43mf1\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mread\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mblocksize\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 915\u001b[0m segment_len \u001b[38;5;241m=\u001b[39m outfile\u001b[38;5;241m.\u001b[39mwrite(data)\n\u001b[1;32m 916\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m segment_len \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
|
116 |
+
"File \u001b[0;32m~/miniconda3/envs/pytorch/lib/python3.9/site-packages/fsspec/spec.py:1856\u001b[0m, in \u001b[0;36mAbstractBufferedFile.read\u001b[0;34m(self, length)\u001b[0m\n\u001b[1;32m 1853\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m length \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m0\u001b[39m:\n\u001b[1;32m 1854\u001b[0m \u001b[38;5;66;03m# don't even bother calling fetch\u001b[39;00m\n\u001b[1;32m 1855\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;124mb\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m-> 1856\u001b[0m out \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcache\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_fetch\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mloc\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mloc\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m+\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mlength\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1857\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mloc \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;28mlen\u001b[39m(out)\n\u001b[1;32m 1858\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m out\n",
|
117 |
+
"File \u001b[0;32m~/miniconda3/envs/pytorch/lib/python3.9/site-packages/fsspec/caching.py:189\u001b[0m, in \u001b[0;36mReadAheadCache._fetch\u001b[0;34m(self, start, end)\u001b[0m\n\u001b[1;32m 187\u001b[0m part \u001b[38;5;241m=\u001b[39m \u001b[38;5;124mb\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 188\u001b[0m end \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mmin\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msize, end \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mblocksize)\n\u001b[0;32m--> 189\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcache \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfetcher\u001b[49m\u001b[43m(\u001b[49m\u001b[43mstart\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mend\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;66;03m# new block replaces old\u001b[39;00m\n\u001b[1;32m 190\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstart \u001b[38;5;241m=\u001b[39m start\n\u001b[1;32m 191\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mend \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstart \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mlen\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcache)\n",
|
118 |
+
"File \u001b[0;32m~/miniconda3/envs/pytorch/lib/python3.9/site-packages/huggingface_hub/hf_file_system.py:625\u001b[0m, in \u001b[0;36mHfFileSystemFile._fetch_range\u001b[0;34m(self, start, end)\u001b[0m\n\u001b[1;32m 614\u001b[0m headers \u001b[38;5;241m=\u001b[39m {\n\u001b[1;32m 615\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mrange\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mbytes=\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mstart\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m-\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mend\u001b[38;5;250m \u001b[39m\u001b[38;5;241m-\u001b[39m\u001b[38;5;250m \u001b[39m\u001b[38;5;241m1\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 616\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mfs\u001b[38;5;241m.\u001b[39m_api\u001b[38;5;241m.\u001b[39m_build_hf_headers(),\n\u001b[1;32m 617\u001b[0m }\n\u001b[1;32m 618\u001b[0m url \u001b[38;5;241m=\u001b[39m hf_hub_url(\n\u001b[1;32m 619\u001b[0m repo_id\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mresolved_path\u001b[38;5;241m.\u001b[39mrepo_id,\n\u001b[1;32m 620\u001b[0m revision\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mresolved_path\u001b[38;5;241m.\u001b[39mrevision,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 623\u001b[0m endpoint\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mfs\u001b[38;5;241m.\u001b[39mendpoint,\n\u001b[1;32m 624\u001b[0m )\n\u001b[0;32m--> 625\u001b[0m r \u001b[38;5;241m=\u001b[39m \u001b[43mhttp_backoff\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mGET\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43murl\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mheaders\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mheaders\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 626\u001b[0m hf_raise_for_status(r)\n\u001b[1;32m 627\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m r\u001b[38;5;241m.\u001b[39mcontent\n",
|
119 |
+
"File \u001b[0;32m~/miniconda3/envs/pytorch/lib/python3.9/site-packages/huggingface_hub/utils/_http.py:281\u001b[0m, in \u001b[0;36mhttp_backoff\u001b[0;34m(method, url, max_retries, base_wait_time, max_wait_time, retry_on_exceptions, retry_on_status_codes, **kwargs)\u001b[0m\n\u001b[1;32m 278\u001b[0m kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mdata\u001b[39m\u001b[38;5;124m\"\u001b[39m]\u001b[38;5;241m.\u001b[39mseek(io_obj_initial_pos)\n\u001b[1;32m 280\u001b[0m \u001b[38;5;66;03m# Perform request and return if status_code is not in the retry list.\u001b[39;00m\n\u001b[0;32m--> 281\u001b[0m response \u001b[38;5;241m=\u001b[39m \u001b[43msession\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrequest\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmethod\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmethod\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43murl\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43murl\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 282\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m response\u001b[38;5;241m.\u001b[39mstatus_code \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m retry_on_status_codes:\n\u001b[1;32m 283\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m response\n",
|
120 |
+
"File \u001b[0;32m~/miniconda3/envs/pytorch/lib/python3.9/site-packages/requests/sessions.py:589\u001b[0m, in \u001b[0;36mSession.request\u001b[0;34m(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)\u001b[0m\n\u001b[1;32m 584\u001b[0m send_kwargs \u001b[38;5;241m=\u001b[39m {\n\u001b[1;32m 585\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtimeout\u001b[39m\u001b[38;5;124m\"\u001b[39m: timeout,\n\u001b[1;32m 586\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mallow_redirects\u001b[39m\u001b[38;5;124m\"\u001b[39m: allow_redirects,\n\u001b[1;32m 587\u001b[0m }\n\u001b[1;32m 588\u001b[0m send_kwargs\u001b[38;5;241m.\u001b[39mupdate(settings)\n\u001b[0;32m--> 589\u001b[0m resp \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msend\u001b[49m\u001b[43m(\u001b[49m\u001b[43mprep\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43msend_kwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 591\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m resp\n",
|
121 |
+
"File \u001b[0;32m~/miniconda3/envs/pytorch/lib/python3.9/site-packages/requests/sessions.py:725\u001b[0m, in \u001b[0;36mSession.send\u001b[0;34m(self, request, **kwargs)\u001b[0m\n\u001b[1;32m 722\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m allow_redirects:\n\u001b[1;32m 723\u001b[0m \u001b[38;5;66;03m# Redirect resolving generator.\u001b[39;00m\n\u001b[1;32m 724\u001b[0m gen \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mresolve_redirects(r, request, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m--> 725\u001b[0m history \u001b[38;5;241m=\u001b[39m [resp \u001b[38;5;28;01mfor\u001b[39;00m resp \u001b[38;5;129;01min\u001b[39;00m gen]\n\u001b[1;32m 726\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 727\u001b[0m history \u001b[38;5;241m=\u001b[39m []\n",
|
122 |
+
"File \u001b[0;32m~/miniconda3/envs/pytorch/lib/python3.9/site-packages/requests/sessions.py:725\u001b[0m, in \u001b[0;36m<listcomp>\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 722\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m allow_redirects:\n\u001b[1;32m 723\u001b[0m \u001b[38;5;66;03m# Redirect resolving generator.\u001b[39;00m\n\u001b[1;32m 724\u001b[0m gen \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mresolve_redirects(r, request, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m--> 725\u001b[0m history \u001b[38;5;241m=\u001b[39m [resp \u001b[38;5;28;01mfor\u001b[39;00m resp \u001b[38;5;129;01min\u001b[39;00m gen]\n\u001b[1;32m 726\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 727\u001b[0m history \u001b[38;5;241m=\u001b[39m []\n",
|
123 |
+
"File \u001b[0;32m~/miniconda3/envs/pytorch/lib/python3.9/site-packages/requests/sessions.py:266\u001b[0m, in \u001b[0;36mSessionRedirectMixin.resolve_redirects\u001b[0;34m(self, resp, req, stream, timeout, verify, cert, proxies, yield_requests, **adapter_kwargs)\u001b[0m\n\u001b[1;32m 263\u001b[0m \u001b[38;5;28;01myield\u001b[39;00m req\n\u001b[1;32m 264\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 266\u001b[0m resp \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msend\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 267\u001b[0m \u001b[43m \u001b[49m\u001b[43mreq\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 268\u001b[0m \u001b[43m \u001b[49m\u001b[43mstream\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstream\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 269\u001b[0m \u001b[43m \u001b[49m\u001b[43mtimeout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtimeout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 270\u001b[0m \u001b[43m \u001b[49m\u001b[43mverify\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mverify\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 271\u001b[0m \u001b[43m \u001b[49m\u001b[43mcert\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcert\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 272\u001b[0m \u001b[43m \u001b[49m\u001b[43mproxies\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mproxies\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 273\u001b[0m \u001b[43m \u001b[49m\u001b[43mallow_redirects\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 274\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43madapter_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 275\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 277\u001b[0m extract_cookies_to_jar(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcookies, prepared_request, resp\u001b[38;5;241m.\u001b[39mraw)\n\u001b[1;32m 279\u001b[0m \u001b[38;5;66;03m# extract redirect url, if any, for the next loop\u001b[39;00m\n",
|
124 |
+
"File \u001b[0;32m~/miniconda3/envs/pytorch/lib/python3.9/site-packages/requests/sessions.py:747\u001b[0m, in \u001b[0;36mSession.send\u001b[0;34m(self, request, **kwargs)\u001b[0m\n\u001b[1;32m 744\u001b[0m \u001b[38;5;28;01mpass\u001b[39;00m\n\u001b[1;32m 746\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m stream:\n\u001b[0;32m--> 747\u001b[0m \u001b[43mr\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcontent\u001b[49m\n\u001b[1;32m 749\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m r\n",
|
125 |
+
"File \u001b[0;32m~/miniconda3/envs/pytorch/lib/python3.9/site-packages/requests/models.py:899\u001b[0m, in \u001b[0;36mResponse.content\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 897\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_content \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m 898\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 899\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_content \u001b[38;5;241m=\u001b[39m \u001b[38;5;124;43mb\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mjoin\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43miter_content\u001b[49m\u001b[43m(\u001b[49m\u001b[43mCONTENT_CHUNK_SIZE\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;124mb\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 901\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_content_consumed \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m\n\u001b[1;32m 902\u001b[0m \u001b[38;5;66;03m# don't need to release the connection; that's been handled by urllib3\u001b[39;00m\n\u001b[1;32m 903\u001b[0m \u001b[38;5;66;03m# since we exhausted the data.\u001b[39;00m\n",
|
126 |
+
"File \u001b[0;32m~/miniconda3/envs/pytorch/lib/python3.9/site-packages/requests/models.py:816\u001b[0m, in \u001b[0;36mResponse.iter_content.<locals>.generate\u001b[0;34m()\u001b[0m\n\u001b[1;32m 814\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mhasattr\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mraw, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mstream\u001b[39m\u001b[38;5;124m\"\u001b[39m):\n\u001b[1;32m 815\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 816\u001b[0m \u001b[38;5;28;01myield from\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mraw\u001b[38;5;241m.\u001b[39mstream(chunk_size, decode_content\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m)\n\u001b[1;32m 817\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m ProtocolError \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 818\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m ChunkedEncodingError(e)\n",
|
127 |
+
"File \u001b[0;32m~/miniconda3/envs/pytorch/lib/python3.9/site-packages/urllib3/response.py:628\u001b[0m, in \u001b[0;36mHTTPResponse.stream\u001b[0;34m(self, amt, decode_content)\u001b[0m\n\u001b[1;32m 626\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 627\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m is_fp_closed(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_fp):\n\u001b[0;32m--> 628\u001b[0m data \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mread\u001b[49m\u001b[43m(\u001b[49m\u001b[43mamt\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mamt\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdecode_content\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdecode_content\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 630\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m data:\n\u001b[1;32m 631\u001b[0m \u001b[38;5;28;01myield\u001b[39;00m data\n",
|
128 |
+
"File \u001b[0;32m~/miniconda3/envs/pytorch/lib/python3.9/site-packages/urllib3/response.py:567\u001b[0m, in \u001b[0;36mHTTPResponse.read\u001b[0;34m(self, amt, decode_content, cache_content)\u001b[0m\n\u001b[1;32m 564\u001b[0m fp_closed \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mgetattr\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_fp, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mclosed\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;28;01mFalse\u001b[39;00m)\n\u001b[1;32m 566\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_error_catcher():\n\u001b[0;32m--> 567\u001b[0m data \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_fp_read\u001b[49m\u001b[43m(\u001b[49m\u001b[43mamt\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m fp_closed \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;124mb\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 568\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m amt \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 569\u001b[0m flush_decoder \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m\n",
|
129 |
+
"File \u001b[0;32m~/miniconda3/envs/pytorch/lib/python3.9/site-packages/urllib3/response.py:533\u001b[0m, in \u001b[0;36mHTTPResponse._fp_read\u001b[0;34m(self, amt)\u001b[0m\n\u001b[1;32m 530\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m buffer\u001b[38;5;241m.\u001b[39mgetvalue()\n\u001b[1;32m 531\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 532\u001b[0m \u001b[38;5;66;03m# StringIO doesn't like amt=None\u001b[39;00m\n\u001b[0;32m--> 533\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_fp\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mread\u001b[49m\u001b[43m(\u001b[49m\u001b[43mamt\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;28;01mif\u001b[39;00m amt \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_fp\u001b[38;5;241m.\u001b[39mread()\n",
|
130 |
+
"File \u001b[0;32m~/miniconda3/envs/pytorch/lib/python3.9/http/client.py:463\u001b[0m, in \u001b[0;36mHTTPResponse.read\u001b[0;34m(self, amt)\u001b[0m\n\u001b[1;32m 460\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m amt \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 461\u001b[0m \u001b[38;5;66;03m# Amount is given, implement using readinto\u001b[39;00m\n\u001b[1;32m 462\u001b[0m b \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mbytearray\u001b[39m(amt)\n\u001b[0;32m--> 463\u001b[0m n \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mreadinto\u001b[49m\u001b[43m(\u001b[49m\u001b[43mb\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 464\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mmemoryview\u001b[39m(b)[:n]\u001b[38;5;241m.\u001b[39mtobytes()\n\u001b[1;32m 465\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 466\u001b[0m \u001b[38;5;66;03m# Amount is not given (unbounded read) so we must check self.length\u001b[39;00m\n\u001b[1;32m 467\u001b[0m \u001b[38;5;66;03m# and self.chunked\u001b[39;00m\n",
|
131 |
+
"File \u001b[0;32m~/miniconda3/envs/pytorch/lib/python3.9/http/client.py:507\u001b[0m, in \u001b[0;36mHTTPResponse.readinto\u001b[0;34m(self, b)\u001b[0m\n\u001b[1;32m 502\u001b[0m b \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mmemoryview\u001b[39m(b)[\u001b[38;5;241m0\u001b[39m:\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlength]\n\u001b[1;32m 504\u001b[0m \u001b[38;5;66;03m# we do not use _safe_read() here because this may be a .will_close\u001b[39;00m\n\u001b[1;32m 505\u001b[0m \u001b[38;5;66;03m# connection, and the user is reading more bytes than will be provided\u001b[39;00m\n\u001b[1;32m 506\u001b[0m \u001b[38;5;66;03m# (for example, reading in 1k chunks)\u001b[39;00m\n\u001b[0;32m--> 507\u001b[0m n \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfp\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mreadinto\u001b[49m\u001b[43m(\u001b[49m\u001b[43mb\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 508\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m n \u001b[38;5;129;01mand\u001b[39;00m b:\n\u001b[1;32m 509\u001b[0m \u001b[38;5;66;03m# Ideally, we would raise IncompleteRead if the content-length\u001b[39;00m\n\u001b[1;32m 510\u001b[0m \u001b[38;5;66;03m# wasn't satisfied, but it might break compatibility.\u001b[39;00m\n\u001b[1;32m 511\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_close_conn()\n",
|
132 |
+
"File \u001b[0;32m~/miniconda3/envs/pytorch/lib/python3.9/socket.py:704\u001b[0m, in \u001b[0;36mSocketIO.readinto\u001b[0;34m(self, b)\u001b[0m\n\u001b[1;32m 702\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;28;01mTrue\u001b[39;00m:\n\u001b[1;32m 703\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 704\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_sock\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrecv_into\u001b[49m\u001b[43m(\u001b[49m\u001b[43mb\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 705\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m timeout:\n\u001b[1;32m 706\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_timeout_occurred \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m\n",
|
133 |
+
"File \u001b[0;32m~/miniconda3/envs/pytorch/lib/python3.9/ssl.py:1242\u001b[0m, in \u001b[0;36mSSLSocket.recv_into\u001b[0;34m(self, buffer, nbytes, flags)\u001b[0m\n\u001b[1;32m 1238\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m flags \u001b[38;5;241m!=\u001b[39m \u001b[38;5;241m0\u001b[39m:\n\u001b[1;32m 1239\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[1;32m 1240\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mnon-zero flags not allowed in calls to recv_into() on \u001b[39m\u001b[38;5;132;01m%s\u001b[39;00m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;241m%\u001b[39m\n\u001b[1;32m 1241\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__class__\u001b[39m)\n\u001b[0;32m-> 1242\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mread\u001b[49m\u001b[43m(\u001b[49m\u001b[43mnbytes\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbuffer\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1243\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 1244\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28msuper\u001b[39m()\u001b[38;5;241m.\u001b[39mrecv_into(buffer, nbytes, flags)\n",
|
134 |
+
"File \u001b[0;32m~/miniconda3/envs/pytorch/lib/python3.9/ssl.py:1100\u001b[0m, in \u001b[0;36mSSLSocket.read\u001b[0;34m(self, len, buffer)\u001b[0m\n\u001b[1;32m 1098\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1099\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m buffer \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m-> 1100\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_sslobj\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mread\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mlen\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbuffer\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1101\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 1102\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_sslobj\u001b[38;5;241m.\u001b[39mread(\u001b[38;5;28mlen\u001b[39m)\n",
|
135 |
+
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
|
136 |
+
]
|
137 |
+
}
|
138 |
+
],
|
139 |
+
"source": [
|
140 |
+
"from datasets import load_dataset\n",
|
141 |
+
"ds = load_dataset('test-org-usm3d/usm-training-data')"
|
142 |
+
]
|
143 |
+
}
|
144 |
+
],
|
145 |
+
"metadata": {
|
146 |
+
"kernelspec": {
|
147 |
+
"display_name": "Python 3 (ipykernel)",
|
148 |
+
"language": "python",
|
149 |
+
"name": "python3"
|
150 |
+
},
|
151 |
+
"language_info": {
|
152 |
+
"codemirror_mode": {
|
153 |
+
"name": "ipython",
|
154 |
+
"version": 3
|
155 |
+
},
|
156 |
+
"file_extension": ".py",
|
157 |
+
"mimetype": "text/x-python",
|
158 |
+
"name": "python",
|
159 |
+
"nbconvert_exporter": "python",
|
160 |
+
"pygments_lexer": "ipython3",
|
161 |
+
"version": "3.9.16"
|
162 |
+
}
|
163 |
+
},
|
164 |
+
"nbformat": 4,
|
165 |
+
"nbformat_minor": 5
|
166 |
+
}
|
notebooks/.ipynb_checkpoints/example_on_training-checkpoint.ipynb
ADDED
The diff for this file is too large to render.
See raw diff
|
|
notebooks/example_on_training.ipynb
ADDED
The diff for this file is too large to render.
See raw diff
|
|
read_write_colmap.py
ADDED
@@ -0,0 +1,489 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Modified to read from bytes-like object by Dmytro Mishkin.
|
2 |
+
# The original license is below:
|
3 |
+
# Copyright (c) 2018, ETH Zurich and UNC Chapel Hill.
|
4 |
+
# All rights reserved.
|
5 |
+
#
|
6 |
+
# Redistribution and use in source and binary forms, with or without
|
7 |
+
# modification, are permitted provided that the following conditions are met:
|
8 |
+
#
|
9 |
+
# * Redistributions of source code must retain the above copyright
|
10 |
+
# notice, this list of conditions and the following disclaimer.
|
11 |
+
#
|
12 |
+
# * Redistributions in binary form must reproduce the above copyright
|
13 |
+
# notice, this list of conditions and the following disclaimer in the
|
14 |
+
# documentation and/or other materials provided with the distribution.
|
15 |
+
#
|
16 |
+
# * Neither the name of ETH Zurich and UNC Chapel Hill nor the names of
|
17 |
+
# its contributors may be used to endorse or promote products derived
|
18 |
+
# from this software without specific prior written permission.
|
19 |
+
#
|
20 |
+
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
21 |
+
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
22 |
+
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
23 |
+
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
|
24 |
+
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
25 |
+
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
26 |
+
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
27 |
+
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
28 |
+
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
29 |
+
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
30 |
+
# POSSIBILITY OF SUCH DAMAGE.
|
31 |
+
#
|
32 |
+
# Author: Johannes L. Schoenberger (jsch-at-demuc-dot-de)
|
33 |
+
|
34 |
+
import os
|
35 |
+
import collections
|
36 |
+
import numpy as np
|
37 |
+
import struct
|
38 |
+
import argparse
|
39 |
+
|
40 |
+
|
41 |
+
CameraModel = collections.namedtuple(
|
42 |
+
"CameraModel", ["model_id", "model_name", "num_params"])
|
43 |
+
Camera = collections.namedtuple(
|
44 |
+
"Camera", ["id", "model", "width", "height", "params"])
|
45 |
+
BaseImage = collections.namedtuple(
|
46 |
+
"Image", ["id", "qvec", "tvec", "camera_id", "name", "xys", "point3D_ids"])
|
47 |
+
Point3D = collections.namedtuple(
|
48 |
+
"Point3D", ["id", "xyz", "rgb", "error", "image_ids", "point2D_idxs"])
|
49 |
+
|
50 |
+
|
51 |
+
class Image(BaseImage):
|
52 |
+
def qvec2rotmat(self):
|
53 |
+
return qvec2rotmat(self.qvec)
|
54 |
+
|
55 |
+
|
56 |
+
CAMERA_MODELS = {
|
57 |
+
CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3),
|
58 |
+
CameraModel(model_id=1, model_name="PINHOLE", num_params=4),
|
59 |
+
CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4),
|
60 |
+
CameraModel(model_id=3, model_name="RADIAL", num_params=5),
|
61 |
+
CameraModel(model_id=4, model_name="OPENCV", num_params=8),
|
62 |
+
CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8),
|
63 |
+
CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12),
|
64 |
+
CameraModel(model_id=7, model_name="FOV", num_params=5),
|
65 |
+
CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4),
|
66 |
+
CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5),
|
67 |
+
CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12)
|
68 |
+
}
|
69 |
+
CAMERA_MODEL_IDS = dict([(camera_model.model_id, camera_model)
|
70 |
+
for camera_model in CAMERA_MODELS])
|
71 |
+
CAMERA_MODEL_NAMES = dict([(camera_model.model_name, camera_model)
|
72 |
+
for camera_model in CAMERA_MODELS])
|
73 |
+
|
74 |
+
|
75 |
+
def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"):
|
76 |
+
"""Read and unpack the next bytes from a binary file.
|
77 |
+
:param fid:
|
78 |
+
:param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc.
|
79 |
+
:param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}.
|
80 |
+
:param endian_character: Any of {@, =, <, >, !}
|
81 |
+
:return: Tuple of read and unpacked values.
|
82 |
+
"""
|
83 |
+
data = fid.read(num_bytes)
|
84 |
+
return struct.unpack(endian_character + format_char_sequence, data)
|
85 |
+
|
86 |
+
|
87 |
+
def write_next_bytes(fid, data, format_char_sequence, endian_character="<"):
|
88 |
+
"""pack and write to a binary file.
|
89 |
+
:param fid:
|
90 |
+
:param data: data to send, if multiple elements are sent at the same time,
|
91 |
+
they should be encapsuled either in a list or a tuple
|
92 |
+
:param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}.
|
93 |
+
should be the same length as the data list or tuple
|
94 |
+
:param endian_character: Any of {@, =, <, >, !}
|
95 |
+
"""
|
96 |
+
if isinstance(data, (list, tuple)):
|
97 |
+
bytes = struct.pack(endian_character + format_char_sequence, *data)
|
98 |
+
else:
|
99 |
+
bytes = struct.pack(endian_character + format_char_sequence, data)
|
100 |
+
fid.write(bytes)
|
101 |
+
|
102 |
+
|
103 |
+
def read_cameras_text(path):
|
104 |
+
"""
|
105 |
+
see: src/base/reconstruction.cc
|
106 |
+
void Reconstruction::WriteCamerasText(const std::string& path)
|
107 |
+
void Reconstruction::ReadCamerasText(const std::string& path)
|
108 |
+
"""
|
109 |
+
cameras = {}
|
110 |
+
with open(path, "r") as fid:
|
111 |
+
while True:
|
112 |
+
line = fid.readline()
|
113 |
+
if not line:
|
114 |
+
break
|
115 |
+
line = line.strip()
|
116 |
+
if len(line) > 0 and line[0] != "#":
|
117 |
+
elems = line.split()
|
118 |
+
camera_id = int(elems[0])
|
119 |
+
model = elems[1]
|
120 |
+
width = int(elems[2])
|
121 |
+
height = int(elems[3])
|
122 |
+
params = np.array(tuple(map(float, elems[4:])))
|
123 |
+
cameras[camera_id] = Camera(id=camera_id, model=model,
|
124 |
+
width=width, height=height,
|
125 |
+
params=params)
|
126 |
+
return cameras
|
127 |
+
|
128 |
+
|
129 |
+
def read_cameras_binary(path_to_model_file=None, fid=None):
|
130 |
+
"""
|
131 |
+
see: src/base/reconstruction.cc
|
132 |
+
void Reconstruction::WriteCamerasBinary(const std::string& path)
|
133 |
+
void Reconstruction::ReadCamerasBinary(const std::string& path)
|
134 |
+
"""
|
135 |
+
cameras = {}
|
136 |
+
if fid is None:
|
137 |
+
fid = open(path_to_model_file, "rb")
|
138 |
+
num_cameras = read_next_bytes(fid, 8, "Q")[0]
|
139 |
+
for _ in range(num_cameras):
|
140 |
+
camera_properties = read_next_bytes(
|
141 |
+
fid, num_bytes=24, format_char_sequence="iiQQ")
|
142 |
+
camera_id = camera_properties[0]
|
143 |
+
model_id = camera_properties[1]
|
144 |
+
model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name
|
145 |
+
width = camera_properties[2]
|
146 |
+
height = camera_properties[3]
|
147 |
+
num_params = CAMERA_MODEL_IDS[model_id].num_params
|
148 |
+
params = read_next_bytes(fid, num_bytes=8*num_params,
|
149 |
+
format_char_sequence="d"*num_params)
|
150 |
+
cameras[camera_id] = Camera(id=camera_id,
|
151 |
+
model=model_name,
|
152 |
+
width=width,
|
153 |
+
height=height,
|
154 |
+
params=np.array(params))
|
155 |
+
assert len(cameras) == num_cameras
|
156 |
+
if path_to_model_file is not None:
|
157 |
+
fid.close()
|
158 |
+
return cameras
|
159 |
+
|
160 |
+
|
161 |
+
def write_cameras_text(cameras, path):
|
162 |
+
"""
|
163 |
+
see: src/base/reconstruction.cc
|
164 |
+
void Reconstruction::WriteCamerasText(const std::string& path)
|
165 |
+
void Reconstruction::ReadCamerasText(const std::string& path)
|
166 |
+
"""
|
167 |
+
HEADER = "# Camera list with one line of data per camera:\n" + \
|
168 |
+
"# CAMERA_ID, MODEL, WIDTH, HEIGHT, PARAMS[]\n" + \
|
169 |
+
"# Number of cameras: {}\n".format(len(cameras))
|
170 |
+
with open(path, "w") as fid:
|
171 |
+
fid.write(HEADER)
|
172 |
+
for _, cam in cameras.items():
|
173 |
+
to_write = [cam.id, cam.model, cam.width, cam.height, *cam.params]
|
174 |
+
line = " ".join([str(elem) for elem in to_write])
|
175 |
+
fid.write(line + "\n")
|
176 |
+
|
177 |
+
|
178 |
+
def write_cameras_binary(cameras, path_to_model_file):
|
179 |
+
"""
|
180 |
+
see: src/base/reconstruction.cc
|
181 |
+
void Reconstruction::WriteCamerasBinary(const std::string& path)
|
182 |
+
void Reconstruction::ReadCamerasBinary(const std::string& path)
|
183 |
+
"""
|
184 |
+
with open(path_to_model_file, "wb") as fid:
|
185 |
+
write_next_bytes(fid, len(cameras), "Q")
|
186 |
+
for _, cam in cameras.items():
|
187 |
+
model_id = CAMERA_MODEL_NAMES[cam.model].model_id
|
188 |
+
camera_properties = [cam.id,
|
189 |
+
model_id,
|
190 |
+
cam.width,
|
191 |
+
cam.height]
|
192 |
+
write_next_bytes(fid, camera_properties, "iiQQ")
|
193 |
+
for p in cam.params:
|
194 |
+
write_next_bytes(fid, float(p), "d")
|
195 |
+
return cameras
|
196 |
+
|
197 |
+
|
198 |
+
def read_images_text(path):
|
199 |
+
"""
|
200 |
+
see: src/base/reconstruction.cc
|
201 |
+
void Reconstruction::ReadImagesText(const std::string& path)
|
202 |
+
void Reconstruction::WriteImagesText(const std::string& path)
|
203 |
+
"""
|
204 |
+
images = {}
|
205 |
+
with open(path, "r") as fid:
|
206 |
+
while True:
|
207 |
+
line = fid.readline()
|
208 |
+
if not line:
|
209 |
+
break
|
210 |
+
line = line.strip()
|
211 |
+
if len(line) > 0 and line[0] != "#":
|
212 |
+
elems = line.split()
|
213 |
+
image_id = int(elems[0])
|
214 |
+
qvec = np.array(tuple(map(float, elems[1:5])))
|
215 |
+
tvec = np.array(tuple(map(float, elems[5:8])))
|
216 |
+
camera_id = int(elems[8])
|
217 |
+
image_name = elems[9]
|
218 |
+
elems = fid.readline().split()
|
219 |
+
xys = np.column_stack([tuple(map(float, elems[0::3])),
|
220 |
+
tuple(map(float, elems[1::3]))])
|
221 |
+
point3D_ids = np.array(tuple(map(int, elems[2::3])))
|
222 |
+
images[image_id] = Image(
|
223 |
+
id=image_id, qvec=qvec, tvec=tvec,
|
224 |
+
camera_id=camera_id, name=image_name,
|
225 |
+
xys=xys, point3D_ids=point3D_ids)
|
226 |
+
return images
|
227 |
+
|
228 |
+
|
229 |
+
def read_images_binary(path_to_model_file=None, fid=None):
|
230 |
+
"""
|
231 |
+
see: src/base/reconstruction.cc
|
232 |
+
void Reconstruction::ReadImagesBinary(const std::string& path)
|
233 |
+
void Reconstruction::WriteImagesBinary(const std::string& path)
|
234 |
+
"""
|
235 |
+
images = {}
|
236 |
+
if fid is None:
|
237 |
+
fid = open(path_to_model_file, "rb")
|
238 |
+
num_reg_images = read_next_bytes(fid, 8, "Q")[0]
|
239 |
+
for _ in range(num_reg_images):
|
240 |
+
binary_image_properties = read_next_bytes(
|
241 |
+
fid, num_bytes=64, format_char_sequence="idddddddi")
|
242 |
+
image_id = binary_image_properties[0]
|
243 |
+
qvec = np.array(binary_image_properties[1:5])
|
244 |
+
tvec = np.array(binary_image_properties[5:8])
|
245 |
+
camera_id = binary_image_properties[8]
|
246 |
+
image_name = ""
|
247 |
+
current_char = read_next_bytes(fid, 1, "c")[0]
|
248 |
+
while current_char != b"\x00": # look for the ASCII 0 entry
|
249 |
+
image_name += current_char.decode("utf-8")
|
250 |
+
current_char = read_next_bytes(fid, 1, "c")[0]
|
251 |
+
num_points2D = read_next_bytes(fid, num_bytes=8,
|
252 |
+
format_char_sequence="Q")[0]
|
253 |
+
x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D,
|
254 |
+
format_char_sequence="ddq"*num_points2D)
|
255 |
+
xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),
|
256 |
+
tuple(map(float, x_y_id_s[1::3]))])
|
257 |
+
point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))
|
258 |
+
images[image_id] = Image(
|
259 |
+
id=image_id, qvec=qvec, tvec=tvec,
|
260 |
+
camera_id=camera_id, name=image_name,
|
261 |
+
xys=xys, point3D_ids=point3D_ids)
|
262 |
+
if path_to_model_file is not None:
|
263 |
+
fid.close()
|
264 |
+
return images
|
265 |
+
|
266 |
+
|
267 |
+
def write_images_text(images, path):
|
268 |
+
"""
|
269 |
+
see: src/base/reconstruction.cc
|
270 |
+
void Reconstruction::ReadImagesText(const std::string& path)
|
271 |
+
void Reconstruction::WriteImagesText(const std::string& path)
|
272 |
+
"""
|
273 |
+
if len(images) == 0:
|
274 |
+
mean_observations = 0
|
275 |
+
else:
|
276 |
+
mean_observations = sum((len(img.point3D_ids) for _, img in images.items()))/len(images)
|
277 |
+
HEADER = "# Image list with two lines of data per image:\n" + \
|
278 |
+
"# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME\n" + \
|
279 |
+
"# POINTS2D[] as (X, Y, POINT3D_ID)\n" + \
|
280 |
+
"# Number of images: {}, mean observations per image: {}\n".format(len(images), mean_observations)
|
281 |
+
|
282 |
+
with open(path, "w") as fid:
|
283 |
+
fid.write(HEADER)
|
284 |
+
for _, img in images.items():
|
285 |
+
image_header = [img.id, *img.qvec, *img.tvec, img.camera_id, img.name]
|
286 |
+
first_line = " ".join(map(str, image_header))
|
287 |
+
fid.write(first_line + "\n")
|
288 |
+
|
289 |
+
points_strings = []
|
290 |
+
for xy, point3D_id in zip(img.xys, img.point3D_ids):
|
291 |
+
points_strings.append(" ".join(map(str, [*xy, point3D_id])))
|
292 |
+
fid.write(" ".join(points_strings) + "\n")
|
293 |
+
|
294 |
+
|
295 |
+
def write_images_binary(images, path_to_model_file):
|
296 |
+
"""
|
297 |
+
see: src/base/reconstruction.cc
|
298 |
+
void Reconstruction::ReadImagesBinary(const std::string& path)
|
299 |
+
void Reconstruction::WriteImagesBinary(const std::string& path)
|
300 |
+
"""
|
301 |
+
with open(path_to_model_file, "wb") as fid:
|
302 |
+
write_next_bytes(fid, len(images), "Q")
|
303 |
+
for _, img in images.items():
|
304 |
+
write_next_bytes(fid, img.id, "i")
|
305 |
+
write_next_bytes(fid, img.qvec.tolist(), "dddd")
|
306 |
+
write_next_bytes(fid, img.tvec.tolist(), "ddd")
|
307 |
+
write_next_bytes(fid, img.camera_id, "i")
|
308 |
+
for char in img.name:
|
309 |
+
write_next_bytes(fid, char.encode("utf-8"), "c")
|
310 |
+
write_next_bytes(fid, b"\x00", "c")
|
311 |
+
write_next_bytes(fid, len(img.point3D_ids), "Q")
|
312 |
+
for xy, p3d_id in zip(img.xys, img.point3D_ids):
|
313 |
+
write_next_bytes(fid, [*xy, p3d_id], "ddq")
|
314 |
+
|
315 |
+
|
316 |
+
def read_points3D_text(path):
|
317 |
+
"""
|
318 |
+
see: src/base/reconstruction.cc
|
319 |
+
void Reconstruction::ReadPoints3DText(const std::string& path)
|
320 |
+
void Reconstruction::WritePoints3DText(const std::string& path)
|
321 |
+
"""
|
322 |
+
points3D = {}
|
323 |
+
with open(path, "r") as fid:
|
324 |
+
while True:
|
325 |
+
line = fid.readline()
|
326 |
+
if not line:
|
327 |
+
break
|
328 |
+
line = line.strip()
|
329 |
+
if len(line) > 0 and line[0] != "#":
|
330 |
+
elems = line.split()
|
331 |
+
point3D_id = int(elems[0])
|
332 |
+
xyz = np.array(tuple(map(float, elems[1:4])))
|
333 |
+
rgb = np.array(tuple(map(int, elems[4:7])))
|
334 |
+
error = float(elems[7])
|
335 |
+
image_ids = np.array(tuple(map(int, elems[8::2])))
|
336 |
+
point2D_idxs = np.array(tuple(map(int, elems[9::2])))
|
337 |
+
points3D[point3D_id] = Point3D(id=point3D_id, xyz=xyz, rgb=rgb,
|
338 |
+
error=error, image_ids=image_ids,
|
339 |
+
point2D_idxs=point2D_idxs)
|
340 |
+
return points3D
|
341 |
+
|
342 |
+
|
343 |
+
def read_points3D_binary(path_to_model_file=None, fid=None):
|
344 |
+
"""
|
345 |
+
see: src/base/reconstruction.cc
|
346 |
+
void Reconstruction::ReadPoints3DBinary(const std::string& path)
|
347 |
+
void Reconstruction::WritePoints3DBinary(const std::string& path)
|
348 |
+
"""
|
349 |
+
points3D = {}
|
350 |
+
if fid is None:
|
351 |
+
fid = open(path_to_model_file, "rb")
|
352 |
+
num_points = read_next_bytes(fid, 8, "Q")[0]
|
353 |
+
for _ in range(num_points):
|
354 |
+
binary_point_line_properties = read_next_bytes(
|
355 |
+
fid, num_bytes=43, format_char_sequence="QdddBBBd")
|
356 |
+
point3D_id = binary_point_line_properties[0]
|
357 |
+
xyz = np.array(binary_point_line_properties[1:4])
|
358 |
+
rgb = np.array(binary_point_line_properties[4:7])
|
359 |
+
error = np.array(binary_point_line_properties[7])
|
360 |
+
track_length = read_next_bytes(
|
361 |
+
fid, num_bytes=8, format_char_sequence="Q")[0]
|
362 |
+
track_elems = read_next_bytes(
|
363 |
+
fid, num_bytes=8*track_length,
|
364 |
+
format_char_sequence="ii"*track_length)
|
365 |
+
image_ids = np.array(tuple(map(int, track_elems[0::2])))
|
366 |
+
point2D_idxs = np.array(tuple(map(int, track_elems[1::2])))
|
367 |
+
points3D[point3D_id] = Point3D(
|
368 |
+
id=point3D_id, xyz=xyz, rgb=rgb,
|
369 |
+
error=error, image_ids=image_ids,
|
370 |
+
point2D_idxs=point2D_idxs)
|
371 |
+
if path_to_model_file is not None:
|
372 |
+
fid.close()
|
373 |
+
return points3D
|
374 |
+
|
375 |
+
|
376 |
+
def write_points3D_text(points3D, path):
|
377 |
+
"""
|
378 |
+
see: src/base/reconstruction.cc
|
379 |
+
void Reconstruction::ReadPoints3DText(const std::string& path)
|
380 |
+
void Reconstruction::WritePoints3DText(const std::string& path)
|
381 |
+
"""
|
382 |
+
if len(points3D) == 0:
|
383 |
+
mean_track_length = 0
|
384 |
+
else:
|
385 |
+
mean_track_length = sum((len(pt.image_ids) for _, pt in points3D.items()))/len(points3D)
|
386 |
+
HEADER = "# 3D point list with one line of data per point:\n" + \
|
387 |
+
"# POINT3D_ID, X, Y, Z, R, G, B, ERROR, TRACK[] as (IMAGE_ID, POINT2D_IDX)\n" + \
|
388 |
+
"# Number of points: {}, mean track length: {}\n".format(len(points3D), mean_track_length)
|
389 |
+
|
390 |
+
with open(path, "w") as fid:
|
391 |
+
fid.write(HEADER)
|
392 |
+
for _, pt in points3D.items():
|
393 |
+
point_header = [pt.id, *pt.xyz, *pt.rgb, pt.error]
|
394 |
+
fid.write(" ".join(map(str, point_header)) + " ")
|
395 |
+
track_strings = []
|
396 |
+
for image_id, point2D in zip(pt.image_ids, pt.point2D_idxs):
|
397 |
+
track_strings.append(" ".join(map(str, [image_id, point2D])))
|
398 |
+
fid.write(" ".join(track_strings) + "\n")
|
399 |
+
|
400 |
+
|
401 |
+
def write_points3D_binary(points3D, path_to_model_file):
|
402 |
+
"""
|
403 |
+
see: src/base/reconstruction.cc
|
404 |
+
void Reconstruction::ReadPoints3DBinary(const std::string& path)
|
405 |
+
void Reconstruction::WritePoints3DBinary(const std::string& path)
|
406 |
+
"""
|
407 |
+
with open(path_to_model_file, "wb") as fid:
|
408 |
+
write_next_bytes(fid, len(points3D), "Q")
|
409 |
+
for _, pt in points3D.items():
|
410 |
+
write_next_bytes(fid, pt.id, "Q")
|
411 |
+
write_next_bytes(fid, pt.xyz.tolist(), "ddd")
|
412 |
+
write_next_bytes(fid, pt.rgb.tolist(), "BBB")
|
413 |
+
write_next_bytes(fid, pt.error, "d")
|
414 |
+
track_length = pt.image_ids.shape[0]
|
415 |
+
write_next_bytes(fid, track_length, "Q")
|
416 |
+
for image_id, point2D_id in zip(pt.image_ids, pt.point2D_idxs):
|
417 |
+
write_next_bytes(fid, [image_id, point2D_id], "ii")
|
418 |
+
|
419 |
+
|
420 |
+
def detect_model_format(path, ext):
|
421 |
+
if os.path.isfile(os.path.join(path, "cameras" + ext)) and \
|
422 |
+
os.path.isfile(os.path.join(path, "images" + ext)) and \
|
423 |
+
os.path.isfile(os.path.join(path, "points3D" + ext)):
|
424 |
+
print("Detected model format: '" + ext + "'")
|
425 |
+
return True
|
426 |
+
|
427 |
+
return False
|
428 |
+
|
429 |
+
|
430 |
+
def read_model(path, ext=""):
|
431 |
+
# try to detect the extension automatically
|
432 |
+
if ext == "":
|
433 |
+
if detect_model_format(path, ".bin"):
|
434 |
+
ext = ".bin"
|
435 |
+
elif detect_model_format(path, ".txt"):
|
436 |
+
ext = ".txt"
|
437 |
+
else:
|
438 |
+
print("Provide model format: '.bin' or '.txt'")
|
439 |
+
return
|
440 |
+
|
441 |
+
if ext == ".txt":
|
442 |
+
cameras = read_cameras_text(os.path.join(path, "cameras" + ext))
|
443 |
+
images = read_images_text(os.path.join(path, "images" + ext))
|
444 |
+
points3D = read_points3D_text(os.path.join(path, "points3D") + ext)
|
445 |
+
else:
|
446 |
+
cameras = read_cameras_binary(os.path.join(path, "cameras" + ext))
|
447 |
+
images = read_images_binary(os.path.join(path, "images" + ext))
|
448 |
+
points3D = read_points3D_binary(os.path.join(path, "points3D") + ext)
|
449 |
+
return cameras, images, points3D
|
450 |
+
|
451 |
+
|
452 |
+
def write_model(cameras, images, points3D, path, ext=".bin"):
|
453 |
+
if ext == ".txt":
|
454 |
+
write_cameras_text(cameras, os.path.join(path, "cameras" + ext))
|
455 |
+
write_images_text(images, os.path.join(path, "images" + ext))
|
456 |
+
write_points3D_text(points3D, os.path.join(path, "points3D") + ext)
|
457 |
+
else:
|
458 |
+
write_cameras_binary(cameras, os.path.join(path, "cameras" + ext))
|
459 |
+
write_images_binary(images, os.path.join(path, "images" + ext))
|
460 |
+
write_points3D_binary(points3D, os.path.join(path, "points3D") + ext)
|
461 |
+
return cameras, images, points3D
|
462 |
+
|
463 |
+
|
464 |
+
def qvec2rotmat(qvec):
|
465 |
+
return np.array([
|
466 |
+
[1 - 2 * qvec[2]**2 - 2 * qvec[3]**2,
|
467 |
+
2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],
|
468 |
+
2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],
|
469 |
+
[2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],
|
470 |
+
1 - 2 * qvec[1]**2 - 2 * qvec[3]**2,
|
471 |
+
2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],
|
472 |
+
[2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],
|
473 |
+
2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],
|
474 |
+
1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]])
|
475 |
+
|
476 |
+
|
477 |
+
def rotmat2qvec(R):
|
478 |
+
Rxx, Ryx, Rzx, Rxy, Ryy, Rzy, Rxz, Ryz, Rzz = R.flat
|
479 |
+
K = np.array([
|
480 |
+
[Rxx - Ryy - Rzz, 0, 0, 0],
|
481 |
+
[Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0],
|
482 |
+
[Rzx + Rxz, Rzy + Ryz, Rzz - Rxx - Ryy, 0],
|
483 |
+
[Ryz - Rzy, Rzx - Rxz, Rxy - Ryx, Rxx + Ryy + Rzz]]) / 3.0
|
484 |
+
eigvals, eigvecs = np.linalg.eigh(K)
|
485 |
+
qvec = eigvecs[[3, 0, 1, 2], np.argmax(eigvals)]
|
486 |
+
if qvec[0] < 0:
|
487 |
+
qvec *= -1
|
488 |
+
return qvec
|
489 |
+
|
script.py
ADDED
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
### This is example of the script that will be run in the test environment.
|
2 |
+
### Some parts of the code are compulsory and you should NOT CHANGE THEM.
|
3 |
+
### They are between '''---compulsory---''' comments.
|
4 |
+
### You can change the rest of the code to define and test your solution.
|
5 |
+
### However, you should not change the signature of the provided function.
|
6 |
+
### The script would save "submission.parquet" file in the current directory.
|
7 |
+
### The actual logic of the solution is implemented in the `handcrafted_solution.py` file.
|
8 |
+
### The `handcrafted_solution.py` file is a placeholder for your solution.
|
9 |
+
### You should implement the logic of your solution in that file.
|
10 |
+
### You can use any additional files and subdirectories to organize your code.
|
11 |
+
|
12 |
+
'''---compulsory---'''
|
13 |
+
import hoho; hoho.setup() # YOU MUST CALL hoho.setup() BEFORE ANYTHING ELSE
|
14 |
+
import subprocess
|
15 |
+
import importlib
|
16 |
+
from pathlib import Path
|
17 |
+
import subprocess
|
18 |
+
|
19 |
+
|
20 |
+
### The function below is useful for installing additional python wheels.
|
21 |
+
def install_package_from_local_file(package_name, folder='packages'):
|
22 |
+
"""
|
23 |
+
Installs a package from a local .whl file or a directory containing .whl files using pip.
|
24 |
+
|
25 |
+
Parameters:
|
26 |
+
path_to_file_or_directory (str): The path to the .whl file or the directory containing .whl files.
|
27 |
+
"""
|
28 |
+
try:
|
29 |
+
pth = str(Path(folder) / package_name)
|
30 |
+
subprocess.check_call([subprocess.sys.executable, "-m", "pip", "install",
|
31 |
+
"--no-index", # Do not use package index
|
32 |
+
"--find-links", pth, # Look for packages in the specified directory or at the file
|
33 |
+
package_name]) # Specify the package to install
|
34 |
+
print(f"Package installed successfully from {pth}")
|
35 |
+
except subprocess.CalledProcessError as e:
|
36 |
+
print(f"Failed to install package from {pth}. Error: {e}")
|
37 |
+
|
38 |
+
|
39 |
+
# pip download webdataset -d packages/webdataset --platform manylinux1_x86_64 --python-version 38 --only-binary=:all:
|
40 |
+
install_package_from_local_file('webdataset')
|
41 |
+
# install_package_from_local_file('tqdm')
|
42 |
+
|
43 |
+
### Here you can import any library or module you want.
|
44 |
+
### The code below is used to read and parse the input dataset.
|
45 |
+
### Please, do not modify it.
|
46 |
+
|
47 |
+
import webdataset as wds
|
48 |
+
from tqdm import tqdm
|
49 |
+
from typing import Dict
|
50 |
+
import pandas as pd
|
51 |
+
from transformers import AutoTokenizer
|
52 |
+
import os
|
53 |
+
import time
|
54 |
+
import io
|
55 |
+
from read_write_colmap import read_cameras_binary, read_images_binary, read_points3D_binary
|
56 |
+
from PIL import Image as PImage
|
57 |
+
import numpy as np
|
58 |
+
|
59 |
+
|
60 |
+
def proc(row, split='train'):
|
61 |
+
out = {}
|
62 |
+
for k, v in row.items():
|
63 |
+
colname = k.split('.')[0]
|
64 |
+
if colname in {'ade20k', 'depthcm', 'gestalt'}:
|
65 |
+
if colname in out:
|
66 |
+
out[colname].append(v)
|
67 |
+
else:
|
68 |
+
out[colname] = [v]
|
69 |
+
elif colname in {'wireframe', 'mesh'}:
|
70 |
+
# out.update({a: b.tolist() for a,b in v.items()})
|
71 |
+
out.update({a: b for a,b in v.items()})
|
72 |
+
elif colname in 'kr':
|
73 |
+
out[colname.upper()] = v
|
74 |
+
else:
|
75 |
+
out[colname] = v
|
76 |
+
|
77 |
+
return Sample(out)
|
78 |
+
|
79 |
+
|
80 |
+
class Sample(Dict):
|
81 |
+
def __repr__(self):
|
82 |
+
return str({k: v.shape if hasattr(v, 'shape') else [type(v[0])] if isinstance(v, list) else type(v) for k,v in self.items()})
|
83 |
+
|
84 |
+
def convert_entry_to_human_readable(entry):
|
85 |
+
out = {}
|
86 |
+
already_good = ['__key__', 'wf_vertices', 'wf_edges', 'edge_semantics', 'mesh_vertices', 'mesh_faces', 'face_semantics', 'K', 'R', 't']
|
87 |
+
for k, v in entry.items():
|
88 |
+
if k in already_good:
|
89 |
+
out[k] = v
|
90 |
+
continue
|
91 |
+
if k == 'points3d':
|
92 |
+
out[k] = read_points3D_binary(fid=io.BytesIO(v))
|
93 |
+
if k == 'cameras':
|
94 |
+
out[k] = read_cameras_binary(fid=io.BytesIO(v))
|
95 |
+
if k == 'images':
|
96 |
+
out[k] = read_images_binary(fid=io.BytesIO(v))
|
97 |
+
if k in ['ade20k', 'gestalt']:
|
98 |
+
out[k] = [PImage.open(io.BytesIO(x)).convert('RGB') for x in v]
|
99 |
+
if k == 'depthcm':
|
100 |
+
out[k] = [PImage.open(io.BytesIO(x)) for x in entry['depthcm']]
|
101 |
+
return out
|
102 |
+
|
103 |
+
'''---end of compulsory---'''
|
104 |
+
|
105 |
+
def download_package(package_name, path_to_save='packages'):
|
106 |
+
"""
|
107 |
+
Downloads a package using pip and saves it to a specified directory.
|
108 |
+
|
109 |
+
Parameters:
|
110 |
+
package_name (str): The name of the package to download.
|
111 |
+
path_to_save (str): The path to the directory where the package will be saved.
|
112 |
+
"""
|
113 |
+
try:
|
114 |
+
# pip download webdataset -d packages/webdataset --platform manylinux1_x86_64 --python-version 38 --only-binary=:all:
|
115 |
+
subprocess.check_call([subprocess.sys.executable, "-m", "pip", "download", package_name,
|
116 |
+
"-d", str(Path(path_to_save)/package_name), # Download the package to the specified directory
|
117 |
+
"--platform", "manylinux1_x86_64", # Specify the platform
|
118 |
+
"--python-version", "38", # Specify the Python version
|
119 |
+
"--only-binary=:all:"]) # Download only binary packages
|
120 |
+
print(f'Package "{package_name}" downloaded successfully')
|
121 |
+
except subprocess.CalledProcessError as e:
|
122 |
+
print(f'Failed to downloaded package "{package_name}". Error: {e}')
|
123 |
+
|
124 |
+
|
125 |
+
### The part below is used to define and test your solution.
|
126 |
+
|
127 |
+
if __name__ == "__main__":
|
128 |
+
from handcrafted_solution import predict
|
129 |
+
print ("------------ Loading dataset------------ ")
|
130 |
+
params = hoho.get_params()
|
131 |
+
dataset = hoho.get_dataset(decode=None, split='all', dataset_type='webdataset')
|
132 |
+
print('------------ Now you can do your solution ---------------')
|
133 |
+
solution = []
|
134 |
+
for i, sample in enumerate(tqdm(dataset)):
|
135 |
+
pred_vertices, pred_edges, semantics = predict(sample, visualize=False)
|
136 |
+
solution.append({
|
137 |
+
'__key__': sample['__key__'],
|
138 |
+
'wf_vertices': pred_vertices.tolist(),
|
139 |
+
'wf_edges': pred_edges,
|
140 |
+
'edge_semantics': semantics,
|
141 |
+
})
|
142 |
+
print('------------ Saving results ---------------')
|
143 |
+
sub = pd.DataFrame(solution, columns=["__key__", "wf_vertices", "wf_edges", "edge_semantics"])
|
144 |
+
sub.to_parquet(Path(params['output_path']) / "submission.parquet")
|
145 |
+
print("------------ Done ------------ ")
|
viz3d.py
ADDED
@@ -0,0 +1,302 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
"""
|
3 |
+
Copyright [2022] [Paul-Edouard Sarlin and Philipp Lindenberger]
|
4 |
+
|
5 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
you may not use this file except in compliance with the License.
|
7 |
+
You may obtain a copy of the License at
|
8 |
+
|
9 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
|
11 |
+
Unless required by applicable law or agreed to in writing, software
|
12 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
See the License for the specific language governing permissions and
|
15 |
+
limitations under the License.
|
16 |
+
|
17 |
+
3D visualization based on plotly.
|
18 |
+
Works for a small number of points and cameras, might be slow otherwise.
|
19 |
+
|
20 |
+
1) Initialize a figure with `init_figure`
|
21 |
+
2) Add 3D points, camera frustums, or both as a pycolmap.Reconstruction
|
22 |
+
|
23 |
+
Written by Paul-Edouard Sarlin and Philipp Lindenberger.
|
24 |
+
"""
|
25 |
+
# Slightly modified by Dmytro Mishkin
|
26 |
+
|
27 |
+
from typing import Optional
|
28 |
+
import numpy as np
|
29 |
+
import pycolmap
|
30 |
+
import plotly.graph_objects as go
|
31 |
+
|
32 |
+
|
33 |
+
### Some helper functions for geometry
|
34 |
+
def qvec2rotmat(qvec):
|
35 |
+
return np.array([
|
36 |
+
[1 - 2 * qvec[2]**2 - 2 * qvec[3]**2,
|
37 |
+
2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],
|
38 |
+
2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],
|
39 |
+
[2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],
|
40 |
+
1 - 2 * qvec[1]**2 - 2 * qvec[3]**2,
|
41 |
+
2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],
|
42 |
+
[2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],
|
43 |
+
2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],
|
44 |
+
1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]])
|
45 |
+
|
46 |
+
|
47 |
+
def to_homogeneous(points):
|
48 |
+
pad = np.ones((points.shape[:-1]+(1,)), dtype=points.dtype)
|
49 |
+
return np.concatenate([points, pad], axis=-1)
|
50 |
+
|
51 |
+
def t_to_proj_center(qvec, tvec):
|
52 |
+
Rr = qvec2rotmat(qvec)
|
53 |
+
tt = (-Rr.T) @ tvec
|
54 |
+
return tt
|
55 |
+
|
56 |
+
def calib(params):
|
57 |
+
out = np.eye(3)
|
58 |
+
if len(params) == 3:
|
59 |
+
out[0,0] = params[0]
|
60 |
+
out[1,1] = params[0]
|
61 |
+
out[0,2] = params[1]
|
62 |
+
out[1,2] = params[2]
|
63 |
+
else:
|
64 |
+
out[0,0] = params[0]
|
65 |
+
out[1,1] = params[1]
|
66 |
+
out[0,2] = params[2]
|
67 |
+
out[1,2] = params[3]
|
68 |
+
return out
|
69 |
+
|
70 |
+
|
71 |
+
### Plotting functions
|
72 |
+
|
73 |
+
def init_figure(height: int = 800) -> go.Figure:
|
74 |
+
"""Initialize a 3D figure."""
|
75 |
+
fig = go.Figure()
|
76 |
+
axes = dict(
|
77 |
+
visible=False,
|
78 |
+
showbackground=False,
|
79 |
+
showgrid=False,
|
80 |
+
showline=False,
|
81 |
+
showticklabels=True,
|
82 |
+
autorange=True,
|
83 |
+
)
|
84 |
+
fig.update_layout(
|
85 |
+
template="plotly_dark",
|
86 |
+
height=height,
|
87 |
+
scene_camera=dict(
|
88 |
+
eye=dict(x=0., y=-.1, z=-2),
|
89 |
+
up=dict(x=0, y=-1., z=0),
|
90 |
+
projection=dict(type="orthographic")),
|
91 |
+
scene=dict(
|
92 |
+
xaxis=axes,
|
93 |
+
yaxis=axes,
|
94 |
+
zaxis=axes,
|
95 |
+
aspectmode='data',
|
96 |
+
dragmode='orbit',
|
97 |
+
),
|
98 |
+
margin=dict(l=0, r=0, b=0, t=0, pad=0),
|
99 |
+
legend=dict(
|
100 |
+
orientation="h",
|
101 |
+
yanchor="top",
|
102 |
+
y=0.99,
|
103 |
+
xanchor="left",
|
104 |
+
x=0.1
|
105 |
+
),
|
106 |
+
)
|
107 |
+
return fig
|
108 |
+
|
109 |
+
|
110 |
+
def plot_lines_3d(
|
111 |
+
fig: go.Figure,
|
112 |
+
pts: np.ndarray,
|
113 |
+
color: str = 'rgba(255, 255, 255, 1)',
|
114 |
+
ps: int = 2,
|
115 |
+
colorscale: Optional[str] = None,
|
116 |
+
name: Optional[str] = None):
|
117 |
+
"""Plot a set of 3D points."""
|
118 |
+
x = pts[..., 0]
|
119 |
+
y = pts[..., 1]
|
120 |
+
z = pts[..., 2]
|
121 |
+
traces = [go.Scatter3d(x=x1, y=y1, z=z1,
|
122 |
+
mode='lines',
|
123 |
+
line=dict(color=color, width=2)) for x1, y1, z1 in zip(x,y,z)]
|
124 |
+
for t in traces:
|
125 |
+
fig.add_trace(t)
|
126 |
+
fig.update_traces(showlegend=False)
|
127 |
+
|
128 |
+
|
129 |
+
def plot_points(
|
130 |
+
fig: go.Figure,
|
131 |
+
pts: np.ndarray,
|
132 |
+
color: str = 'rgba(255, 0, 0, 1)',
|
133 |
+
ps: int = 2,
|
134 |
+
colorscale: Optional[str] = None,
|
135 |
+
name: Optional[str] = None):
|
136 |
+
"""Plot a set of 3D points."""
|
137 |
+
x, y, z = pts.T
|
138 |
+
tr = go.Scatter3d(
|
139 |
+
x=x, y=y, z=z, mode='markers', name=name, legendgroup=name,
|
140 |
+
marker=dict(
|
141 |
+
size=ps, color=color, line_width=0.0, colorscale=colorscale))
|
142 |
+
fig.add_trace(tr)
|
143 |
+
|
144 |
+
def plot_camera(
|
145 |
+
fig: go.Figure,
|
146 |
+
R: np.ndarray,
|
147 |
+
t: np.ndarray,
|
148 |
+
K: np.ndarray,
|
149 |
+
color: str = 'rgb(0, 0, 255)',
|
150 |
+
name: Optional[str] = None,
|
151 |
+
legendgroup: Optional[str] = None,
|
152 |
+
size: float = 1.0):
|
153 |
+
"""Plot a camera frustum from pose and intrinsic matrix."""
|
154 |
+
W, H = K[0, 2]*2, K[1, 2]*2
|
155 |
+
corners = np.array([[0, 0], [W, 0], [W, H], [0, H], [0, 0]])
|
156 |
+
if size is not None:
|
157 |
+
image_extent = max(size * W / 1024.0, size * H / 1024.0)
|
158 |
+
world_extent = max(W, H) / (K[0, 0] + K[1, 1]) / 0.5
|
159 |
+
scale = 0.5 * image_extent / world_extent
|
160 |
+
else:
|
161 |
+
scale = 1.0
|
162 |
+
corners = to_homogeneous(corners) @ np.linalg.inv(K).T
|
163 |
+
corners = (corners / 2 * scale) @ R.T + t
|
164 |
+
|
165 |
+
x, y, z = corners.T
|
166 |
+
rect = go.Scatter3d(
|
167 |
+
x=x, y=y, z=z, line=dict(color=color), legendgroup=legendgroup,
|
168 |
+
name=name, marker=dict(size=0.0001), showlegend=False)
|
169 |
+
fig.add_trace(rect)
|
170 |
+
|
171 |
+
x, y, z = np.concatenate(([t], corners)).T
|
172 |
+
i = [0, 0, 0, 0]
|
173 |
+
j = [1, 2, 3, 4]
|
174 |
+
k = [2, 3, 4, 1]
|
175 |
+
|
176 |
+
pyramid = go.Mesh3d(
|
177 |
+
x=x, y=y, z=z, color=color, i=i, j=j, k=k,
|
178 |
+
legendgroup=legendgroup, name=name, showlegend=False)
|
179 |
+
fig.add_trace(pyramid)
|
180 |
+
triangles = np.vstack((i, j, k)).T
|
181 |
+
vertices = np.concatenate(([t], corners))
|
182 |
+
tri_points = np.array([
|
183 |
+
vertices[i] for i in triangles.reshape(-1)
|
184 |
+
])
|
185 |
+
|
186 |
+
x, y, z = tri_points.T
|
187 |
+
pyramid = go.Scatter3d(
|
188 |
+
x=x, y=y, z=z, mode='lines', legendgroup=legendgroup,
|
189 |
+
name=name, line=dict(color=color, width=1), showlegend=False)
|
190 |
+
fig.add_trace(pyramid)
|
191 |
+
|
192 |
+
|
193 |
+
def plot_camera_colmap(
|
194 |
+
fig: go.Figure,
|
195 |
+
image: pycolmap.Image,
|
196 |
+
camera: pycolmap.Camera,
|
197 |
+
name: Optional[str] = None,
|
198 |
+
**kwargs):
|
199 |
+
"""Plot a camera frustum from PyCOLMAP objects"""
|
200 |
+
intr = calib(camera.params)
|
201 |
+
if intr[0][0] > 10000:
|
202 |
+
print("Bad camera")
|
203 |
+
return
|
204 |
+
plot_camera(
|
205 |
+
fig,
|
206 |
+
qvec2rotmat(image.qvec).T,
|
207 |
+
t_to_proj_center(image.qvec, image.tvec),
|
208 |
+
intr,#calibration_matrix(),
|
209 |
+
name=name or str(image.id),
|
210 |
+
**kwargs)
|
211 |
+
|
212 |
+
|
213 |
+
def plot_cameras(
|
214 |
+
fig: go.Figure,
|
215 |
+
reconstruction,#: pycolmap.Reconstruction,
|
216 |
+
**kwargs):
|
217 |
+
"""Plot a camera as a cone with camera frustum."""
|
218 |
+
for image_id, image in reconstruction["images"].items():
|
219 |
+
plot_camera_colmap(
|
220 |
+
fig, image, reconstruction["cameras"][image.camera_id], **kwargs)
|
221 |
+
|
222 |
+
|
223 |
+
def plot_reconstruction(
|
224 |
+
fig: go.Figure,
|
225 |
+
rec,
|
226 |
+
color: str = 'rgb(0, 0, 255)',
|
227 |
+
name: Optional[str] = None,
|
228 |
+
points: bool = True,
|
229 |
+
cameras: bool = True,
|
230 |
+
cs: float = 1.0,
|
231 |
+
single_color_points=False,
|
232 |
+
camera_color='rgba(0, 255, 0, 0.5)'):
|
233 |
+
# rec is result of loading reconstruction from "read_write_colmap.py"
|
234 |
+
# Filter outliers
|
235 |
+
xyzs = []
|
236 |
+
rgbs = []
|
237 |
+
for k, p3D in rec['points'].items():
|
238 |
+
xyzs.append(p3D.xyz)
|
239 |
+
rgbs.append(p3D.rgb)
|
240 |
+
|
241 |
+
if points:
|
242 |
+
plot_points(fig, np.array(xyzs), color=color if single_color_points else np.array(rgbs), ps=1, name=name)
|
243 |
+
if cameras:
|
244 |
+
plot_cameras(fig, rec, color=camera_color, legendgroup=name, size=cs)
|
245 |
+
|
246 |
+
|
247 |
+
def plot_pointcloud(
|
248 |
+
fig: go.Figure,
|
249 |
+
pts: np.ndarray,
|
250 |
+
colors: np.ndarray,
|
251 |
+
ps: int = 2,
|
252 |
+
name: Optional[str] = None):
|
253 |
+
"""Plot a set of 3D points."""
|
254 |
+
plot_points(fig, np.array(pts), color=colors, ps=ps, name=name)
|
255 |
+
|
256 |
+
|
257 |
+
def plot_triangle_mesh(
|
258 |
+
fig: go.Figure,
|
259 |
+
vert: np.ndarray,
|
260 |
+
colors: np.ndarray,
|
261 |
+
triangles: np.ndarray,
|
262 |
+
name: Optional[str] = None):
|
263 |
+
"""Plot a triangle mesh."""
|
264 |
+
tr = go.Mesh3d(
|
265 |
+
x=vert[:,0],
|
266 |
+
y=vert[:,1],
|
267 |
+
z=vert[:,2],
|
268 |
+
vertexcolor = np.clip(255*colors, 0, 255),
|
269 |
+
# i, j and k give the vertices of triangles
|
270 |
+
# here we represent the 4 triangles of the tetrahedron surface
|
271 |
+
i=triangles[:,0],
|
272 |
+
j=triangles[:,1],
|
273 |
+
k=triangles[:,2],
|
274 |
+
name=name,
|
275 |
+
showscale=False
|
276 |
+
)
|
277 |
+
fig.add_trace(tr)
|
278 |
+
|
279 |
+
def plot_estimate_and_gt(pred_vertices, pred_connections, gt_vertices=None, gt_connections=None):
|
280 |
+
fig3d = init_figure()
|
281 |
+
c1 = (30, 20, 255)
|
282 |
+
img_color = [c1 for _ in range(len(pred_vertices))]
|
283 |
+
plot_points(fig3d, pred_vertices, color = img_color, ps = 10)
|
284 |
+
lines = []
|
285 |
+
for c in pred_connections:
|
286 |
+
v1 = pred_vertices[c[0]]
|
287 |
+
v2 = pred_vertices[c[1]]
|
288 |
+
lines.append(np.stack([v1, v2], axis=0))
|
289 |
+
plot_lines_3d(fig3d, np.array(lines), img_color, ps=4)
|
290 |
+
if gt_vertices is not None:
|
291 |
+
c2 = (30, 255, 20)
|
292 |
+
img_color2 = [c2 for _ in range(len(gt_vertices))]
|
293 |
+
plot_points(fig3d, gt_vertices, color = img_color2, ps = 10)
|
294 |
+
if gt_connections is not None:
|
295 |
+
gt_lines = []
|
296 |
+
for c in gt_connections:
|
297 |
+
v1 = gt_vertices[c[0]]
|
298 |
+
v2 = gt_vertices[c[1]]
|
299 |
+
gt_lines.append(np.stack([v1, v2], axis=0))
|
300 |
+
plot_lines_3d(fig3d, np.array(gt_lines), img_color2, ps=4)
|
301 |
+
fig3d.show()
|
302 |
+
return fig3d
|