diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..464776505561c5dc12bdc4e1616b33358ee10bcc 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +assets/compare_zoedepth.png filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md index e0d7706fcf98d9a4fe73532b7b68234964016502..42bcd1aec24f3926863146a99e68f1e2c82085da 100644 --- a/README.md +++ b/README.md @@ -1,14 +1,114 @@ ---- -title: Diffuse2PBR -emoji: 🏆 -colorFrom: pink -colorTo: yellow -sdk: gradio -sdk_version: 5.6.0 -app_file: app.py -pinned: false -license: cc-by-nc-2.0 -short_description: Convert Diffuse Textures to Height and Normal maps ---- - -Check out the configuration reference at https://huggingface.co./docs/hub/spaces-config-reference +# Depth Anything V2 for Metric Depth Estimation + +![teaser](./assets/compare_zoedepth.png) + +We here provide a simple codebase to fine-tune our Depth Anything V2 pre-trained encoder for metric depth estimation. Built on our powerful encoder, we use a simple DPT head to regress the depth. We fine-tune our pre-trained encoder on synthetic Hypersim / Virtual KITTI datasets for indoor / outdoor metric depth estimation, respectively. + + +# Pre-trained Models + +We provide **six metric depth models** of three scales for indoor and outdoor scenes, respectively. + +| Base Model | Params | Indoor (Hypersim) | Outdoor (Virtual KITTI 2) | +|:-|-:|:-:|:-:| +| Depth-Anything-V2-Small | 24.8M | [Download](https://huggingface.co./depth-anything/Depth-Anything-V2-Metric-Hypersim-Small/resolve/main/depth_anything_v2_metric_hypersim_vits.pth?download=true) | [Download](https://huggingface.co./depth-anything/Depth-Anything-V2-Metric-VKITTI-Small/resolve/main/depth_anything_v2_metric_vkitti_vits.pth?download=true) | +| Depth-Anything-V2-Base | 97.5M | [Download](https://huggingface.co./depth-anything/Depth-Anything-V2-Metric-Hypersim-Base/resolve/main/depth_anything_v2_metric_hypersim_vitb.pth?download=true) | [Download](https://huggingface.co./depth-anything/Depth-Anything-V2-Metric-VKITTI-Base/resolve/main/depth_anything_v2_metric_vkitti_vitb.pth?download=true) | +| Depth-Anything-V2-Large | 335.3M | [Download](https://huggingface.co./depth-anything/Depth-Anything-V2-Metric-Hypersim-Large/resolve/main/depth_anything_v2_metric_hypersim_vitl.pth?download=true) | [Download](https://huggingface.co./depth-anything/Depth-Anything-V2-Metric-VKITTI-Large/resolve/main/depth_anything_v2_metric_vkitti_vitl.pth?download=true) | + +*We recommend to first try our larger models (if computational cost is affordable) and the indoor version.* + +## Usage + +### Prepraration + +```bash +git clone https://github.com/DepthAnything/Depth-Anything-V2 +cd Depth-Anything-V2/metric_depth +pip install -r requirements.txt +``` + +Download the checkpoints listed [here](#pre-trained-models) and put them under the `checkpoints` directory. + +### Use our models +```python +import cv2 +import torch + +from depth_anything_v2.dpt import DepthAnythingV2 + +model_configs = { + 'vits': {'encoder': 'vits', 'features': 64, 'out_channels': [48, 96, 192, 384]}, + 'vitb': {'encoder': 'vitb', 'features': 128, 'out_channels': [96, 192, 384, 768]}, + 'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]} +} + +encoder = 'vitl' # or 'vits', 'vitb' +dataset = 'hypersim' # 'hypersim' for indoor model, 'vkitti' for outdoor model +max_depth = 20 # 20 for indoor model, 80 for outdoor model + +model = DepthAnythingV2(**{**model_configs[encoder], 'max_depth': max_depth}) +model.load_state_dict(torch.load(f'checkpoints/depth_anything_v2_metric_{dataset}_{encoder}.pth', map_location='cpu')) +model.eval() + +raw_img = cv2.imread('your/image/path') +depth = model.infer_image(raw_img) # HxW depth map in meters in numpy +``` + +### Running script on images + +Here, we take the `vitl` encoder as an example. You can also use `vitb` or `vits` encoders. + +```bash +# indoor scenes +python run.py \ + --encoder vitl \ + --load-from checkpoints/depth_anything_v2_metric_hypersim_vitl.pth \ + --max-depth 20 \ + --img-path --outdir [--input-size ] [--save-numpy] + +# outdoor scenes +python run.py \ + --encoder vitl \ + --load-from checkpoints/depth_anything_v2_metric_vkitti_vitl.pth \ + --max-depth 80 \ + --img-path --outdir [--input-size ] [--save-numpy] +``` + +### Project 2D images to point clouds: + +```bash +python depth_to_pointcloud.py \ + --encoder vitl \ + --load-from checkpoints/depth_anything_v2_metric_hypersim_vitl.pth \ + --max-depth 20 \ + --img-path --outdir +``` + +### Reproduce training + +Please first prepare the [Hypersim](https://github.com/apple/ml-hypersim) and [Virtual KITTI 2](https://europe.naverlabs.com/research/computer-vision/proxy-virtual-worlds-vkitti-2/) datasets. Then: + +```bash +bash dist_train.sh +``` + + +## Citation + +If you find this project useful, please consider citing: + +```bibtex +@article{depth_anything_v2, + title={Depth Anything V2}, + author={Yang, Lihe and Kang, Bingyi and Huang, Zilong and Zhao, Zhen and Xu, Xiaogang and Feng, Jiashi and Zhao, Hengshuang}, + journal={arXiv:2406.09414}, + year={2024} +} + +@inproceedings{depth_anything_v1, + title={Depth Anything: Unleashing the Power of Large-Scale Unlabeled Data}, + author={Yang, Lihe and Kang, Bingyi and Huang, Zilong and Xu, Xiaogang and Feng, Jiashi and Zhao, Hengshuang}, + booktitle={CVPR}, + year={2024} +} +``` diff --git a/assets/compare_zoedepth.png b/assets/compare_zoedepth.png new file mode 100644 index 0000000000000000000000000000000000000000..bf04e58dd253393a9a1f12229a493bac9e44e98b --- /dev/null +++ b/assets/compare_zoedepth.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8044e39ef6cb4aaabea9a81333fa1ff2d3e07448e7f9f43f77f471aba72a12e0 +size 9187108 diff --git a/depth_anything_v2/__pycache__/dinov2.cpython-311.pyc b/depth_anything_v2/__pycache__/dinov2.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e30162ef189feebbdaca250f3ea9084d760ac9c7 Binary files /dev/null and b/depth_anything_v2/__pycache__/dinov2.cpython-311.pyc differ diff --git a/depth_anything_v2/__pycache__/dinov2.cpython-39.pyc b/depth_anything_v2/__pycache__/dinov2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a8ed9e74d390dee46df20c4818dc2609237ba2c Binary files /dev/null and b/depth_anything_v2/__pycache__/dinov2.cpython-39.pyc differ diff --git a/depth_anything_v2/__pycache__/dpt.cpython-311.pyc b/depth_anything_v2/__pycache__/dpt.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d24c25e118e73d7253cbfd4578070b6aa3461609 Binary files /dev/null and b/depth_anything_v2/__pycache__/dpt.cpython-311.pyc differ diff --git a/depth_anything_v2/__pycache__/dpt.cpython-39.pyc b/depth_anything_v2/__pycache__/dpt.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b49ae903cf2cef8a4990402d1ec544d64533247 Binary files /dev/null and b/depth_anything_v2/__pycache__/dpt.cpython-39.pyc differ diff --git a/depth_anything_v2/dinov2.py b/depth_anything_v2/dinov2.py new file mode 100644 index 0000000000000000000000000000000000000000..b336796480cd9c25afa869c79ee8f19af88b11c9 --- /dev/null +++ b/depth_anything_v2/dinov2.py @@ -0,0 +1,415 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the Apache License, Version 2.0 +# found in the LICENSE file in the root directory of this source tree. + +# References: +# https://github.com/facebookresearch/dino/blob/main/vision_transformer.py +# https://github.com/rwightman/pytorch-image-models/tree/master/timm/models/vision_transformer.py + +from functools import partial +import math +import logging +from typing import Sequence, Tuple, Union, Callable + +import torch +import torch.nn as nn +import torch.utils.checkpoint +from torch.nn.init import trunc_normal_ + +from .dinov2_layers import Mlp, PatchEmbed, SwiGLUFFNFused, MemEffAttention, NestedTensorBlock as Block + + +logger = logging.getLogger("dinov2") + + +def named_apply(fn: Callable, module: nn.Module, name="", depth_first=True, include_root=False) -> nn.Module: + if not depth_first and include_root: + fn(module=module, name=name) + for child_name, child_module in module.named_children(): + child_name = ".".join((name, child_name)) if name else child_name + named_apply(fn=fn, module=child_module, name=child_name, depth_first=depth_first, include_root=True) + if depth_first and include_root: + fn(module=module, name=name) + return module + + +class BlockChunk(nn.ModuleList): + def forward(self, x): + for b in self: + x = b(x) + return x + + +class DinoVisionTransformer(nn.Module): + def __init__( + self, + img_size=224, + patch_size=16, + in_chans=3, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4.0, + qkv_bias=True, + ffn_bias=True, + proj_bias=True, + drop_path_rate=0.0, + drop_path_uniform=False, + init_values=None, # for layerscale: None or 0 => no layerscale + embed_layer=PatchEmbed, + act_layer=nn.GELU, + block_fn=Block, + ffn_layer="mlp", + block_chunks=1, + num_register_tokens=0, + interpolate_antialias=False, + interpolate_offset=0.1, + ): + """ + Args: + img_size (int, tuple): input image size + patch_size (int, tuple): patch size + in_chans (int): number of input channels + embed_dim (int): embedding dimension + depth (int): depth of transformer + num_heads (int): number of attention heads + mlp_ratio (int): ratio of mlp hidden dim to embedding dim + qkv_bias (bool): enable bias for qkv if True + proj_bias (bool): enable bias for proj in attn if True + ffn_bias (bool): enable bias for ffn if True + drop_path_rate (float): stochastic depth rate + drop_path_uniform (bool): apply uniform drop rate across blocks + weight_init (str): weight init scheme + init_values (float): layer-scale init values + embed_layer (nn.Module): patch embedding layer + act_layer (nn.Module): MLP activation layer + block_fn (nn.Module): transformer block class + ffn_layer (str): "mlp", "swiglu", "swiglufused" or "identity" + block_chunks: (int) split block sequence into block_chunks units for FSDP wrap + num_register_tokens: (int) number of extra cls tokens (so-called "registers") + interpolate_antialias: (str) flag to apply anti-aliasing when interpolating positional embeddings + interpolate_offset: (float) work-around offset to apply when interpolating positional embeddings + """ + super().__init__() + norm_layer = partial(nn.LayerNorm, eps=1e-6) + + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + self.num_tokens = 1 + self.n_blocks = depth + self.num_heads = num_heads + self.patch_size = patch_size + self.num_register_tokens = num_register_tokens + self.interpolate_antialias = interpolate_antialias + self.interpolate_offset = interpolate_offset + + self.patch_embed = embed_layer(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) + num_patches = self.patch_embed.num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim)) + assert num_register_tokens >= 0 + self.register_tokens = ( + nn.Parameter(torch.zeros(1, num_register_tokens, embed_dim)) if num_register_tokens else None + ) + + if drop_path_uniform is True: + dpr = [drop_path_rate] * depth + else: + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + + if ffn_layer == "mlp": + logger.info("using MLP layer as FFN") + ffn_layer = Mlp + elif ffn_layer == "swiglufused" or ffn_layer == "swiglu": + logger.info("using SwiGLU layer as FFN") + ffn_layer = SwiGLUFFNFused + elif ffn_layer == "identity": + logger.info("using Identity layer as FFN") + + def f(*args, **kwargs): + return nn.Identity() + + ffn_layer = f + else: + raise NotImplementedError + + blocks_list = [ + block_fn( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + proj_bias=proj_bias, + ffn_bias=ffn_bias, + drop_path=dpr[i], + norm_layer=norm_layer, + act_layer=act_layer, + ffn_layer=ffn_layer, + init_values=init_values, + ) + for i in range(depth) + ] + if block_chunks > 0: + self.chunked_blocks = True + chunked_blocks = [] + chunksize = depth // block_chunks + for i in range(0, depth, chunksize): + # this is to keep the block index consistent if we chunk the block list + chunked_blocks.append([nn.Identity()] * i + blocks_list[i : i + chunksize]) + self.blocks = nn.ModuleList([BlockChunk(p) for p in chunked_blocks]) + else: + self.chunked_blocks = False + self.blocks = nn.ModuleList(blocks_list) + + self.norm = norm_layer(embed_dim) + self.head = nn.Identity() + + self.mask_token = nn.Parameter(torch.zeros(1, embed_dim)) + + self.init_weights() + + def init_weights(self): + trunc_normal_(self.pos_embed, std=0.02) + nn.init.normal_(self.cls_token, std=1e-6) + if self.register_tokens is not None: + nn.init.normal_(self.register_tokens, std=1e-6) + named_apply(init_weights_vit_timm, self) + + def interpolate_pos_encoding(self, x, w, h): + previous_dtype = x.dtype + npatch = x.shape[1] - 1 + N = self.pos_embed.shape[1] - 1 + if npatch == N and w == h: + return self.pos_embed + pos_embed = self.pos_embed.float() + class_pos_embed = pos_embed[:, 0] + patch_pos_embed = pos_embed[:, 1:] + dim = x.shape[-1] + w0 = w // self.patch_size + h0 = h // self.patch_size + # we add a small number to avoid floating point error in the interpolation + # see discussion at https://github.com/facebookresearch/dino/issues/8 + # DINOv2 with register modify the interpolate_offset from 0.1 to 0.0 + w0, h0 = w0 + self.interpolate_offset, h0 + self.interpolate_offset + # w0, h0 = w0 + 0.1, h0 + 0.1 + + sqrt_N = math.sqrt(N) + sx, sy = float(w0) / sqrt_N, float(h0) / sqrt_N + patch_pos_embed = nn.functional.interpolate( + patch_pos_embed.reshape(1, int(sqrt_N), int(sqrt_N), dim).permute(0, 3, 1, 2), + scale_factor=(sx, sy), + # (int(w0), int(h0)), # to solve the upsampling shape issue + mode="bicubic", + antialias=self.interpolate_antialias + ) + + assert int(w0) == patch_pos_embed.shape[-2] + assert int(h0) == patch_pos_embed.shape[-1] + patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) + return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1).to(previous_dtype) + + def prepare_tokens_with_masks(self, x, masks=None): + B, nc, w, h = x.shape + x = self.patch_embed(x) + if masks is not None: + x = torch.where(masks.unsqueeze(-1), self.mask_token.to(x.dtype).unsqueeze(0), x) + + x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1) + x = x + self.interpolate_pos_encoding(x, w, h) + + if self.register_tokens is not None: + x = torch.cat( + ( + x[:, :1], + self.register_tokens.expand(x.shape[0], -1, -1), + x[:, 1:], + ), + dim=1, + ) + + return x + + def forward_features_list(self, x_list, masks_list): + x = [self.prepare_tokens_with_masks(x, masks) for x, masks in zip(x_list, masks_list)] + for blk in self.blocks: + x = blk(x) + + all_x = x + output = [] + for x, masks in zip(all_x, masks_list): + x_norm = self.norm(x) + output.append( + { + "x_norm_clstoken": x_norm[:, 0], + "x_norm_regtokens": x_norm[:, 1 : self.num_register_tokens + 1], + "x_norm_patchtokens": x_norm[:, self.num_register_tokens + 1 :], + "x_prenorm": x, + "masks": masks, + } + ) + return output + + def forward_features(self, x, masks=None): + if isinstance(x, list): + return self.forward_features_list(x, masks) + + x = self.prepare_tokens_with_masks(x, masks) + + for blk in self.blocks: + x = blk(x) + + x_norm = self.norm(x) + return { + "x_norm_clstoken": x_norm[:, 0], + "x_norm_regtokens": x_norm[:, 1 : self.num_register_tokens + 1], + "x_norm_patchtokens": x_norm[:, self.num_register_tokens + 1 :], + "x_prenorm": x, + "masks": masks, + } + + def _get_intermediate_layers_not_chunked(self, x, n=1): + x = self.prepare_tokens_with_masks(x) + # If n is an int, take the n last blocks. If it's a list, take them + output, total_block_len = [], len(self.blocks) + blocks_to_take = range(total_block_len - n, total_block_len) if isinstance(n, int) else n + for i, blk in enumerate(self.blocks): + x = blk(x) + if i in blocks_to_take: + output.append(x) + assert len(output) == len(blocks_to_take), f"only {len(output)} / {len(blocks_to_take)} blocks found" + return output + + def _get_intermediate_layers_chunked(self, x, n=1): + x = self.prepare_tokens_with_masks(x) + output, i, total_block_len = [], 0, len(self.blocks[-1]) + # If n is an int, take the n last blocks. If it's a list, take them + blocks_to_take = range(total_block_len - n, total_block_len) if isinstance(n, int) else n + for block_chunk in self.blocks: + for blk in block_chunk[i:]: # Passing the nn.Identity() + x = blk(x) + if i in blocks_to_take: + output.append(x) + i += 1 + assert len(output) == len(blocks_to_take), f"only {len(output)} / {len(blocks_to_take)} blocks found" + return output + + def get_intermediate_layers( + self, + x: torch.Tensor, + n: Union[int, Sequence] = 1, # Layers or n last layers to take + reshape: bool = False, + return_class_token: bool = False, + norm=True + ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]]]: + if self.chunked_blocks: + outputs = self._get_intermediate_layers_chunked(x, n) + else: + outputs = self._get_intermediate_layers_not_chunked(x, n) + if norm: + outputs = [self.norm(out) for out in outputs] + class_tokens = [out[:, 0] for out in outputs] + outputs = [out[:, 1 + self.num_register_tokens:] for out in outputs] + if reshape: + B, _, w, h = x.shape + outputs = [ + out.reshape(B, w // self.patch_size, h // self.patch_size, -1).permute(0, 3, 1, 2).contiguous() + for out in outputs + ] + if return_class_token: + return tuple(zip(outputs, class_tokens)) + return tuple(outputs) + + def forward(self, *args, is_training=False, **kwargs): + ret = self.forward_features(*args, **kwargs) + if is_training: + return ret + else: + return self.head(ret["x_norm_clstoken"]) + + +def init_weights_vit_timm(module: nn.Module, name: str = ""): + """ViT weight initialization, original timm impl (for reproducibility)""" + if isinstance(module, nn.Linear): + trunc_normal_(module.weight, std=0.02) + if module.bias is not None: + nn.init.zeros_(module.bias) + + +def vit_small(patch_size=16, num_register_tokens=0, **kwargs): + model = DinoVisionTransformer( + patch_size=patch_size, + embed_dim=384, + depth=12, + num_heads=6, + mlp_ratio=4, + block_fn=partial(Block, attn_class=MemEffAttention), + num_register_tokens=num_register_tokens, + **kwargs, + ) + return model + + +def vit_base(patch_size=16, num_register_tokens=0, **kwargs): + model = DinoVisionTransformer( + patch_size=patch_size, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + block_fn=partial(Block, attn_class=MemEffAttention), + num_register_tokens=num_register_tokens, + **kwargs, + ) + return model + + +def vit_large(patch_size=16, num_register_tokens=0, **kwargs): + model = DinoVisionTransformer( + patch_size=patch_size, + embed_dim=1024, + depth=24, + num_heads=16, + mlp_ratio=4, + block_fn=partial(Block, attn_class=MemEffAttention), + num_register_tokens=num_register_tokens, + **kwargs, + ) + return model + + +def vit_giant2(patch_size=16, num_register_tokens=0, **kwargs): + """ + Close to ViT-giant, with embed-dim 1536 and 24 heads => embed-dim per head 64 + """ + model = DinoVisionTransformer( + patch_size=patch_size, + embed_dim=1536, + depth=40, + num_heads=24, + mlp_ratio=4, + block_fn=partial(Block, attn_class=MemEffAttention), + num_register_tokens=num_register_tokens, + **kwargs, + ) + return model + + +def DINOv2(model_name): + model_zoo = { + "vits": vit_small, + "vitb": vit_base, + "vitl": vit_large, + "vitg": vit_giant2 + } + + return model_zoo[model_name]( + img_size=518, + patch_size=14, + init_values=1.0, + ffn_layer="mlp" if model_name != "vitg" else "swiglufused", + block_chunks=0, + num_register_tokens=0, + interpolate_antialias=False, + interpolate_offset=0.1 + ) \ No newline at end of file diff --git a/depth_anything_v2/dinov2_layers/__init__.py b/depth_anything_v2/dinov2_layers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e59a83eb90512d763b03e4d38536b6ae07e87541 --- /dev/null +++ b/depth_anything_v2/dinov2_layers/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from .mlp import Mlp +from .patch_embed import PatchEmbed +from .swiglu_ffn import SwiGLUFFN, SwiGLUFFNFused +from .block import NestedTensorBlock +from .attention import MemEffAttention diff --git a/depth_anything_v2/dinov2_layers/__pycache__/__init__.cpython-311.pyc b/depth_anything_v2/dinov2_layers/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e4840a6070e9434a40ee66e584aa6d9b351ddd5 Binary files /dev/null and b/depth_anything_v2/dinov2_layers/__pycache__/__init__.cpython-311.pyc differ diff --git a/depth_anything_v2/dinov2_layers/__pycache__/__init__.cpython-39.pyc b/depth_anything_v2/dinov2_layers/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd27ab574b4ce03c6427a6f5b9291a49b9e96a5f Binary files /dev/null and b/depth_anything_v2/dinov2_layers/__pycache__/__init__.cpython-39.pyc differ diff --git a/depth_anything_v2/dinov2_layers/__pycache__/attention.cpython-311.pyc b/depth_anything_v2/dinov2_layers/__pycache__/attention.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f8def4f318fec607079e17c4410100f2ff59b4a Binary files /dev/null and b/depth_anything_v2/dinov2_layers/__pycache__/attention.cpython-311.pyc differ diff --git a/depth_anything_v2/dinov2_layers/__pycache__/attention.cpython-39.pyc b/depth_anything_v2/dinov2_layers/__pycache__/attention.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de779c10e5b8a241dae8b7cd85d65dfeef2219ea Binary files /dev/null and b/depth_anything_v2/dinov2_layers/__pycache__/attention.cpython-39.pyc differ diff --git a/depth_anything_v2/dinov2_layers/__pycache__/block.cpython-311.pyc b/depth_anything_v2/dinov2_layers/__pycache__/block.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41ab2b7c4437c68d2459b3a2177834b100d8b739 Binary files /dev/null and b/depth_anything_v2/dinov2_layers/__pycache__/block.cpython-311.pyc differ diff --git a/depth_anything_v2/dinov2_layers/__pycache__/block.cpython-39.pyc b/depth_anything_v2/dinov2_layers/__pycache__/block.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f66fac1cca32795f9dd64efe7bd1d341c45edd88 Binary files /dev/null and b/depth_anything_v2/dinov2_layers/__pycache__/block.cpython-39.pyc differ diff --git a/depth_anything_v2/dinov2_layers/__pycache__/drop_path.cpython-311.pyc b/depth_anything_v2/dinov2_layers/__pycache__/drop_path.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cabc7a155ae342ad9cfdb35263d91955ade21a89 Binary files /dev/null and b/depth_anything_v2/dinov2_layers/__pycache__/drop_path.cpython-311.pyc differ diff --git a/depth_anything_v2/dinov2_layers/__pycache__/drop_path.cpython-39.pyc b/depth_anything_v2/dinov2_layers/__pycache__/drop_path.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..48adf2b2c31cc65bdfe82a9ecfaecd8b10640a03 Binary files /dev/null and b/depth_anything_v2/dinov2_layers/__pycache__/drop_path.cpython-39.pyc differ diff --git a/depth_anything_v2/dinov2_layers/__pycache__/layer_scale.cpython-311.pyc b/depth_anything_v2/dinov2_layers/__pycache__/layer_scale.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26fda0c6441b7db4d19b24e6d6a86bbc0377dc9b Binary files /dev/null and b/depth_anything_v2/dinov2_layers/__pycache__/layer_scale.cpython-311.pyc differ diff --git a/depth_anything_v2/dinov2_layers/__pycache__/layer_scale.cpython-39.pyc b/depth_anything_v2/dinov2_layers/__pycache__/layer_scale.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..66605f52f50026b2e487fcdcebcf45c4db5f6076 Binary files /dev/null and b/depth_anything_v2/dinov2_layers/__pycache__/layer_scale.cpython-39.pyc differ diff --git a/depth_anything_v2/dinov2_layers/__pycache__/mlp.cpython-311.pyc b/depth_anything_v2/dinov2_layers/__pycache__/mlp.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d87d437807a69b42f47fb6c50991aa284758eadb Binary files /dev/null and b/depth_anything_v2/dinov2_layers/__pycache__/mlp.cpython-311.pyc differ diff --git a/depth_anything_v2/dinov2_layers/__pycache__/mlp.cpython-39.pyc b/depth_anything_v2/dinov2_layers/__pycache__/mlp.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b8c1245f4fdec89e1cf52650e8466b53fb91aea Binary files /dev/null and b/depth_anything_v2/dinov2_layers/__pycache__/mlp.cpython-39.pyc differ diff --git a/depth_anything_v2/dinov2_layers/__pycache__/patch_embed.cpython-311.pyc b/depth_anything_v2/dinov2_layers/__pycache__/patch_embed.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d59604d320e25130ed0c7aeadec253d69c3358fc Binary files /dev/null and b/depth_anything_v2/dinov2_layers/__pycache__/patch_embed.cpython-311.pyc differ diff --git a/depth_anything_v2/dinov2_layers/__pycache__/patch_embed.cpython-39.pyc b/depth_anything_v2/dinov2_layers/__pycache__/patch_embed.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e0ab465ba7cb25189a0b27c217e4a164edc8abc Binary files /dev/null and b/depth_anything_v2/dinov2_layers/__pycache__/patch_embed.cpython-39.pyc differ diff --git a/depth_anything_v2/dinov2_layers/__pycache__/swiglu_ffn.cpython-311.pyc b/depth_anything_v2/dinov2_layers/__pycache__/swiglu_ffn.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14149b596f45de5d3827024c4d86fcdafe8c4891 Binary files /dev/null and b/depth_anything_v2/dinov2_layers/__pycache__/swiglu_ffn.cpython-311.pyc differ diff --git a/depth_anything_v2/dinov2_layers/__pycache__/swiglu_ffn.cpython-39.pyc b/depth_anything_v2/dinov2_layers/__pycache__/swiglu_ffn.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..990421234ad0b410e39b5a2f4d60504c18b54594 Binary files /dev/null and b/depth_anything_v2/dinov2_layers/__pycache__/swiglu_ffn.cpython-39.pyc differ diff --git a/depth_anything_v2/dinov2_layers/attention.py b/depth_anything_v2/dinov2_layers/attention.py new file mode 100644 index 0000000000000000000000000000000000000000..dea0c82d55f052bf4bcb5896ad8c37158ef523d5 --- /dev/null +++ b/depth_anything_v2/dinov2_layers/attention.py @@ -0,0 +1,83 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +# References: +# https://github.com/facebookresearch/dino/blob/master/vision_transformer.py +# https://github.com/rwightman/pytorch-image-models/tree/master/timm/models/vision_transformer.py + +import logging + +from torch import Tensor +from torch import nn + + +logger = logging.getLogger("dinov2") + + +try: + from xformers.ops import memory_efficient_attention, unbind, fmha + + XFORMERS_AVAILABLE = True +except ImportError: + logger.warning("xFormers not available") + XFORMERS_AVAILABLE = False + + +class Attention(nn.Module): + def __init__( + self, + dim: int, + num_heads: int = 8, + qkv_bias: bool = False, + proj_bias: bool = True, + attn_drop: float = 0.0, + proj_drop: float = 0.0, + ) -> None: + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim**-0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim, bias=proj_bias) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x: Tensor) -> Tensor: + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + + q, k, v = qkv[0] * self.scale, qkv[1], qkv[2] + attn = q @ k.transpose(-2, -1) + + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class MemEffAttention(Attention): + def forward(self, x: Tensor, attn_bias=None) -> Tensor: + if not XFORMERS_AVAILABLE: + assert attn_bias is None, "xFormers is required for nested tensors usage" + return super().forward(x) + + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads) + + q, k, v = unbind(qkv, 2) + + x = memory_efficient_attention(q, k, v, attn_bias=attn_bias) + x = x.reshape([B, N, C]) + + x = self.proj(x) + x = self.proj_drop(x) + return x + + \ No newline at end of file diff --git a/depth_anything_v2/dinov2_layers/block.py b/depth_anything_v2/dinov2_layers/block.py new file mode 100644 index 0000000000000000000000000000000000000000..f91f3f07bd15fba91c67068c8dce2bb22d505bf7 --- /dev/null +++ b/depth_anything_v2/dinov2_layers/block.py @@ -0,0 +1,252 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +# References: +# https://github.com/facebookresearch/dino/blob/master/vision_transformer.py +# https://github.com/rwightman/pytorch-image-models/tree/master/timm/layers/patch_embed.py + +import logging +from typing import Callable, List, Any, Tuple, Dict + +import torch +from torch import nn, Tensor + +from .attention import Attention, MemEffAttention +from .drop_path import DropPath +from .layer_scale import LayerScale +from .mlp import Mlp + + +logger = logging.getLogger("dinov2") + + +try: + from xformers.ops import fmha + from xformers.ops import scaled_index_add, index_select_cat + + XFORMERS_AVAILABLE = True +except ImportError: + logger.warning("xFormers not available") + XFORMERS_AVAILABLE = False + + +class Block(nn.Module): + def __init__( + self, + dim: int, + num_heads: int, + mlp_ratio: float = 4.0, + qkv_bias: bool = False, + proj_bias: bool = True, + ffn_bias: bool = True, + drop: float = 0.0, + attn_drop: float = 0.0, + init_values=None, + drop_path: float = 0.0, + act_layer: Callable[..., nn.Module] = nn.GELU, + norm_layer: Callable[..., nn.Module] = nn.LayerNorm, + attn_class: Callable[..., nn.Module] = Attention, + ffn_layer: Callable[..., nn.Module] = Mlp, + ) -> None: + super().__init__() + # print(f"biases: qkv: {qkv_bias}, proj: {proj_bias}, ffn: {ffn_bias}") + self.norm1 = norm_layer(dim) + self.attn = attn_class( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + proj_bias=proj_bias, + attn_drop=attn_drop, + proj_drop=drop, + ) + self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() + self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = ffn_layer( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop, + bias=ffn_bias, + ) + self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() + self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + + self.sample_drop_ratio = drop_path + + def forward(self, x: Tensor) -> Tensor: + def attn_residual_func(x: Tensor) -> Tensor: + return self.ls1(self.attn(self.norm1(x))) + + def ffn_residual_func(x: Tensor) -> Tensor: + return self.ls2(self.mlp(self.norm2(x))) + + if self.training and self.sample_drop_ratio > 0.1: + # the overhead is compensated only for a drop path rate larger than 0.1 + x = drop_add_residual_stochastic_depth( + x, + residual_func=attn_residual_func, + sample_drop_ratio=self.sample_drop_ratio, + ) + x = drop_add_residual_stochastic_depth( + x, + residual_func=ffn_residual_func, + sample_drop_ratio=self.sample_drop_ratio, + ) + elif self.training and self.sample_drop_ratio > 0.0: + x = x + self.drop_path1(attn_residual_func(x)) + x = x + self.drop_path1(ffn_residual_func(x)) # FIXME: drop_path2 + else: + x = x + attn_residual_func(x) + x = x + ffn_residual_func(x) + return x + + +def drop_add_residual_stochastic_depth( + x: Tensor, + residual_func: Callable[[Tensor], Tensor], + sample_drop_ratio: float = 0.0, +) -> Tensor: + # 1) extract subset using permutation + b, n, d = x.shape + sample_subset_size = max(int(b * (1 - sample_drop_ratio)), 1) + brange = (torch.randperm(b, device=x.device))[:sample_subset_size] + x_subset = x[brange] + + # 2) apply residual_func to get residual + residual = residual_func(x_subset) + + x_flat = x.flatten(1) + residual = residual.flatten(1) + + residual_scale_factor = b / sample_subset_size + + # 3) add the residual + x_plus_residual = torch.index_add(x_flat, 0, brange, residual.to(dtype=x.dtype), alpha=residual_scale_factor) + return x_plus_residual.view_as(x) + + +def get_branges_scales(x, sample_drop_ratio=0.0): + b, n, d = x.shape + sample_subset_size = max(int(b * (1 - sample_drop_ratio)), 1) + brange = (torch.randperm(b, device=x.device))[:sample_subset_size] + residual_scale_factor = b / sample_subset_size + return brange, residual_scale_factor + + +def add_residual(x, brange, residual, residual_scale_factor, scaling_vector=None): + if scaling_vector is None: + x_flat = x.flatten(1) + residual = residual.flatten(1) + x_plus_residual = torch.index_add(x_flat, 0, brange, residual.to(dtype=x.dtype), alpha=residual_scale_factor) + else: + x_plus_residual = scaled_index_add( + x, brange, residual.to(dtype=x.dtype), scaling=scaling_vector, alpha=residual_scale_factor + ) + return x_plus_residual + + +attn_bias_cache: Dict[Tuple, Any] = {} + + +def get_attn_bias_and_cat(x_list, branges=None): + """ + this will perform the index select, cat the tensors, and provide the attn_bias from cache + """ + batch_sizes = [b.shape[0] for b in branges] if branges is not None else [x.shape[0] for x in x_list] + all_shapes = tuple((b, x.shape[1]) for b, x in zip(batch_sizes, x_list)) + if all_shapes not in attn_bias_cache.keys(): + seqlens = [] + for b, x in zip(batch_sizes, x_list): + for _ in range(b): + seqlens.append(x.shape[1]) + attn_bias = fmha.BlockDiagonalMask.from_seqlens(seqlens) + attn_bias._batch_sizes = batch_sizes + attn_bias_cache[all_shapes] = attn_bias + + if branges is not None: + cat_tensors = index_select_cat([x.flatten(1) for x in x_list], branges).view(1, -1, x_list[0].shape[-1]) + else: + tensors_bs1 = tuple(x.reshape([1, -1, *x.shape[2:]]) for x in x_list) + cat_tensors = torch.cat(tensors_bs1, dim=1) + + return attn_bias_cache[all_shapes], cat_tensors + + +def drop_add_residual_stochastic_depth_list( + x_list: List[Tensor], + residual_func: Callable[[Tensor, Any], Tensor], + sample_drop_ratio: float = 0.0, + scaling_vector=None, +) -> Tensor: + # 1) generate random set of indices for dropping samples in the batch + branges_scales = [get_branges_scales(x, sample_drop_ratio=sample_drop_ratio) for x in x_list] + branges = [s[0] for s in branges_scales] + residual_scale_factors = [s[1] for s in branges_scales] + + # 2) get attention bias and index+concat the tensors + attn_bias, x_cat = get_attn_bias_and_cat(x_list, branges) + + # 3) apply residual_func to get residual, and split the result + residual_list = attn_bias.split(residual_func(x_cat, attn_bias=attn_bias)) # type: ignore + + outputs = [] + for x, brange, residual, residual_scale_factor in zip(x_list, branges, residual_list, residual_scale_factors): + outputs.append(add_residual(x, brange, residual, residual_scale_factor, scaling_vector).view_as(x)) + return outputs + + +class NestedTensorBlock(Block): + def forward_nested(self, x_list: List[Tensor]) -> List[Tensor]: + """ + x_list contains a list of tensors to nest together and run + """ + assert isinstance(self.attn, MemEffAttention) + + if self.training and self.sample_drop_ratio > 0.0: + + def attn_residual_func(x: Tensor, attn_bias=None) -> Tensor: + return self.attn(self.norm1(x), attn_bias=attn_bias) + + def ffn_residual_func(x: Tensor, attn_bias=None) -> Tensor: + return self.mlp(self.norm2(x)) + + x_list = drop_add_residual_stochastic_depth_list( + x_list, + residual_func=attn_residual_func, + sample_drop_ratio=self.sample_drop_ratio, + scaling_vector=self.ls1.gamma if isinstance(self.ls1, LayerScale) else None, + ) + x_list = drop_add_residual_stochastic_depth_list( + x_list, + residual_func=ffn_residual_func, + sample_drop_ratio=self.sample_drop_ratio, + scaling_vector=self.ls2.gamma if isinstance(self.ls1, LayerScale) else None, + ) + return x_list + else: + + def attn_residual_func(x: Tensor, attn_bias=None) -> Tensor: + return self.ls1(self.attn(self.norm1(x), attn_bias=attn_bias)) + + def ffn_residual_func(x: Tensor, attn_bias=None) -> Tensor: + return self.ls2(self.mlp(self.norm2(x))) + + attn_bias, x = get_attn_bias_and_cat(x_list) + x = x + attn_residual_func(x, attn_bias=attn_bias) + x = x + ffn_residual_func(x) + return attn_bias.split(x) + + def forward(self, x_or_x_list): + if isinstance(x_or_x_list, Tensor): + return super().forward(x_or_x_list) + elif isinstance(x_or_x_list, list): + assert XFORMERS_AVAILABLE, "Please install xFormers for nested tensors usage" + return self.forward_nested(x_or_x_list) + else: + raise AssertionError diff --git a/depth_anything_v2/dinov2_layers/drop_path.py b/depth_anything_v2/dinov2_layers/drop_path.py new file mode 100644 index 0000000000000000000000000000000000000000..10c3bea8e40eec258bbe59087770d230a6375481 --- /dev/null +++ b/depth_anything_v2/dinov2_layers/drop_path.py @@ -0,0 +1,35 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +# References: +# https://github.com/facebookresearch/dino/blob/master/vision_transformer.py +# https://github.com/rwightman/pytorch-image-models/tree/master/timm/layers/drop.py + + +from torch import nn + + +def drop_path(x, drop_prob: float = 0.0, training: bool = False): + if drop_prob == 0.0 or not training: + return x + keep_prob = 1 - drop_prob + shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets + random_tensor = x.new_empty(shape).bernoulli_(keep_prob) + if keep_prob > 0.0: + random_tensor.div_(keep_prob) + output = x * random_tensor + return output + + +class DropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" + + def __init__(self, drop_prob=None): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) diff --git a/depth_anything_v2/dinov2_layers/layer_scale.py b/depth_anything_v2/dinov2_layers/layer_scale.py new file mode 100644 index 0000000000000000000000000000000000000000..76a4d0eedb1dc974a45e06fbe77ff3d909e36e55 --- /dev/null +++ b/depth_anything_v2/dinov2_layers/layer_scale.py @@ -0,0 +1,28 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +# Modified from: https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/vision_transformer.py#L103-L110 + +from typing import Union + +import torch +from torch import Tensor +from torch import nn + + +class LayerScale(nn.Module): + def __init__( + self, + dim: int, + init_values: Union[float, Tensor] = 1e-5, + inplace: bool = False, + ) -> None: + super().__init__() + self.inplace = inplace + self.gamma = nn.Parameter(init_values * torch.ones(dim)) + + def forward(self, x: Tensor) -> Tensor: + return x.mul_(self.gamma) if self.inplace else x * self.gamma diff --git a/depth_anything_v2/dinov2_layers/mlp.py b/depth_anything_v2/dinov2_layers/mlp.py new file mode 100644 index 0000000000000000000000000000000000000000..504987b635c9cd582a352fb2381228c9e6cd043c --- /dev/null +++ b/depth_anything_v2/dinov2_layers/mlp.py @@ -0,0 +1,41 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +# References: +# https://github.com/facebookresearch/dino/blob/master/vision_transformer.py +# https://github.com/rwightman/pytorch-image-models/tree/master/timm/layers/mlp.py + + +from typing import Callable, Optional + +from torch import Tensor, nn + + +class Mlp(nn.Module): + def __init__( + self, + in_features: int, + hidden_features: Optional[int] = None, + out_features: Optional[int] = None, + act_layer: Callable[..., nn.Module] = nn.GELU, + drop: float = 0.0, + bias: bool = True, + ) -> None: + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features, bias=bias) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features, bias=bias) + self.drop = nn.Dropout(drop) + + def forward(self, x: Tensor) -> Tensor: + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x diff --git a/depth_anything_v2/dinov2_layers/patch_embed.py b/depth_anything_v2/dinov2_layers/patch_embed.py new file mode 100644 index 0000000000000000000000000000000000000000..f880c042ee6a33ef520c6a8c8a686c1d065b8f49 --- /dev/null +++ b/depth_anything_v2/dinov2_layers/patch_embed.py @@ -0,0 +1,89 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +# References: +# https://github.com/facebookresearch/dino/blob/master/vision_transformer.py +# https://github.com/rwightman/pytorch-image-models/tree/master/timm/layers/patch_embed.py + +from typing import Callable, Optional, Tuple, Union + +from torch import Tensor +import torch.nn as nn + + +def make_2tuple(x): + if isinstance(x, tuple): + assert len(x) == 2 + return x + + assert isinstance(x, int) + return (x, x) + + +class PatchEmbed(nn.Module): + """ + 2D image to patch embedding: (B,C,H,W) -> (B,N,D) + + Args: + img_size: Image size. + patch_size: Patch token size. + in_chans: Number of input image channels. + embed_dim: Number of linear projection output channels. + norm_layer: Normalization layer. + """ + + def __init__( + self, + img_size: Union[int, Tuple[int, int]] = 224, + patch_size: Union[int, Tuple[int, int]] = 16, + in_chans: int = 3, + embed_dim: int = 768, + norm_layer: Optional[Callable] = None, + flatten_embedding: bool = True, + ) -> None: + super().__init__() + + image_HW = make_2tuple(img_size) + patch_HW = make_2tuple(patch_size) + patch_grid_size = ( + image_HW[0] // patch_HW[0], + image_HW[1] // patch_HW[1], + ) + + self.img_size = image_HW + self.patch_size = patch_HW + self.patches_resolution = patch_grid_size + self.num_patches = patch_grid_size[0] * patch_grid_size[1] + + self.in_chans = in_chans + self.embed_dim = embed_dim + + self.flatten_embedding = flatten_embedding + + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_HW, stride=patch_HW) + self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() + + def forward(self, x: Tensor) -> Tensor: + _, _, H, W = x.shape + patch_H, patch_W = self.patch_size + + assert H % patch_H == 0, f"Input image height {H} is not a multiple of patch height {patch_H}" + assert W % patch_W == 0, f"Input image width {W} is not a multiple of patch width: {patch_W}" + + x = self.proj(x) # B C H W + H, W = x.size(2), x.size(3) + x = x.flatten(2).transpose(1, 2) # B HW C + x = self.norm(x) + if not self.flatten_embedding: + x = x.reshape(-1, H, W, self.embed_dim) # B H W C + return x + + def flops(self) -> float: + Ho, Wo = self.patches_resolution + flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1]) + if self.norm is not None: + flops += Ho * Wo * self.embed_dim + return flops diff --git a/depth_anything_v2/dinov2_layers/swiglu_ffn.py b/depth_anything_v2/dinov2_layers/swiglu_ffn.py new file mode 100644 index 0000000000000000000000000000000000000000..155a3dd9f6f1a7d0f7bdf9c8f1981e58acb3b19c --- /dev/null +++ b/depth_anything_v2/dinov2_layers/swiglu_ffn.py @@ -0,0 +1,63 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Callable, Optional + +from torch import Tensor, nn +import torch.nn.functional as F + + +class SwiGLUFFN(nn.Module): + def __init__( + self, + in_features: int, + hidden_features: Optional[int] = None, + out_features: Optional[int] = None, + act_layer: Callable[..., nn.Module] = None, + drop: float = 0.0, + bias: bool = True, + ) -> None: + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.w12 = nn.Linear(in_features, 2 * hidden_features, bias=bias) + self.w3 = nn.Linear(hidden_features, out_features, bias=bias) + + def forward(self, x: Tensor) -> Tensor: + x12 = self.w12(x) + x1, x2 = x12.chunk(2, dim=-1) + hidden = F.silu(x1) * x2 + return self.w3(hidden) + + +try: + from xformers.ops import SwiGLU + + XFORMERS_AVAILABLE = True +except ImportError: + SwiGLU = SwiGLUFFN + XFORMERS_AVAILABLE = False + + +class SwiGLUFFNFused(SwiGLU): + def __init__( + self, + in_features: int, + hidden_features: Optional[int] = None, + out_features: Optional[int] = None, + act_layer: Callable[..., nn.Module] = None, + drop: float = 0.0, + bias: bool = True, + ) -> None: + out_features = out_features or in_features + hidden_features = hidden_features or in_features + hidden_features = (int(hidden_features * 2 / 3) + 7) // 8 * 8 + super().__init__( + in_features=in_features, + hidden_features=hidden_features, + out_features=out_features, + bias=bias, + ) diff --git a/depth_anything_v2/dpt.py b/depth_anything_v2/dpt.py new file mode 100644 index 0000000000000000000000000000000000000000..69e57cc78cbc3297938ac4a49f9cf6a3b04d8eff --- /dev/null +++ b/depth_anything_v2/dpt.py @@ -0,0 +1,222 @@ +import cv2 +import torch +import torch.nn as nn +import torch.nn.functional as F +from torchvision.transforms import Compose + +from .dinov2 import DINOv2 +from .util.blocks import FeatureFusionBlock, _make_scratch +from .util.transform import Resize, NormalizeImage, PrepareForNet + + +def _make_fusion_block(features, use_bn, size=None): + return FeatureFusionBlock( + features, + nn.ReLU(False), + deconv=False, + bn=use_bn, + expand=False, + align_corners=True, + size=size, + ) + + +class ConvBlock(nn.Module): + def __init__(self, in_feature, out_feature): + super().__init__() + + self.conv_block = nn.Sequential( + nn.Conv2d(in_feature, out_feature, kernel_size=3, stride=1, padding=1), + nn.BatchNorm2d(out_feature), + nn.ReLU(True) + ) + + def forward(self, x): + return self.conv_block(x) + + +class DPTHead(nn.Module): + def __init__( + self, + in_channels, + features=256, + use_bn=False, + out_channels=[256, 512, 1024, 1024], + use_clstoken=False + ): + super(DPTHead, self).__init__() + + self.use_clstoken = use_clstoken + + self.projects = nn.ModuleList([ + nn.Conv2d( + in_channels=in_channels, + out_channels=out_channel, + kernel_size=1, + stride=1, + padding=0, + ) for out_channel in out_channels + ]) + + self.resize_layers = nn.ModuleList([ + nn.ConvTranspose2d( + in_channels=out_channels[0], + out_channels=out_channels[0], + kernel_size=4, + stride=4, + padding=0), + nn.ConvTranspose2d( + in_channels=out_channels[1], + out_channels=out_channels[1], + kernel_size=2, + stride=2, + padding=0), + nn.Identity(), + nn.Conv2d( + in_channels=out_channels[3], + out_channels=out_channels[3], + kernel_size=3, + stride=2, + padding=1) + ]) + + if use_clstoken: + self.readout_projects = nn.ModuleList() + for _ in range(len(self.projects)): + self.readout_projects.append( + nn.Sequential( + nn.Linear(2 * in_channels, in_channels), + nn.GELU())) + + self.scratch = _make_scratch( + out_channels, + features, + groups=1, + expand=False, + ) + + self.scratch.stem_transpose = None + + self.scratch.refinenet1 = _make_fusion_block(features, use_bn) + self.scratch.refinenet2 = _make_fusion_block(features, use_bn) + self.scratch.refinenet3 = _make_fusion_block(features, use_bn) + self.scratch.refinenet4 = _make_fusion_block(features, use_bn) + + head_features_1 = features + head_features_2 = 32 + + self.scratch.output_conv1 = nn.Conv2d(head_features_1, head_features_1 // 2, kernel_size=3, stride=1, padding=1) + self.scratch.output_conv2 = nn.Sequential( + nn.Conv2d(head_features_1 // 2, head_features_2, kernel_size=3, stride=1, padding=1), + nn.ReLU(True), + nn.Conv2d(head_features_2, 1, kernel_size=1, stride=1, padding=0), + nn.Sigmoid() + ) + + def forward(self, out_features, patch_h, patch_w): + out = [] + for i, x in enumerate(out_features): + if self.use_clstoken: + x, cls_token = x[0], x[1] + readout = cls_token.unsqueeze(1).expand_as(x) + x = self.readout_projects[i](torch.cat((x, readout), -1)) + else: + x = x[0] + + x = x.permute(0, 2, 1).reshape((x.shape[0], x.shape[-1], patch_h, patch_w)) + + x = self.projects[i](x) + x = self.resize_layers[i](x) + + out.append(x) + + layer_1, layer_2, layer_3, layer_4 = out + + layer_1_rn = self.scratch.layer1_rn(layer_1) + layer_2_rn = self.scratch.layer2_rn(layer_2) + layer_3_rn = self.scratch.layer3_rn(layer_3) + layer_4_rn = self.scratch.layer4_rn(layer_4) + + path_4 = self.scratch.refinenet4(layer_4_rn, size=layer_3_rn.shape[2:]) + path_3 = self.scratch.refinenet3(path_4, layer_3_rn, size=layer_2_rn.shape[2:]) + path_2 = self.scratch.refinenet2(path_3, layer_2_rn, size=layer_1_rn.shape[2:]) + path_1 = self.scratch.refinenet1(path_2, layer_1_rn) + + out = self.scratch.output_conv1(path_1) + out = F.interpolate(out, (int(patch_h * 14), int(patch_w * 14)), mode="bilinear", align_corners=True) + out = self.scratch.output_conv2(out) + + return out + + +class DepthAnythingV2(nn.Module): + def __init__( + self, + encoder='vitl', + features=256, + out_channels=[256, 512, 1024, 1024], + use_bn=False, + use_clstoken=False, + max_depth=20.0 + ): + super(DepthAnythingV2, self).__init__() + + self.intermediate_layer_idx = { + 'vits': [2, 5, 8, 11], + 'vitb': [2, 5, 8, 11], + 'vitl': [4, 11, 17, 23], + 'vitg': [9, 19, 29, 39] + } + + self.max_depth = max_depth + + self.encoder = encoder + self.pretrained = DINOv2(model_name=encoder) + + self.depth_head = DPTHead(self.pretrained.embed_dim, features, use_bn, out_channels=out_channels, use_clstoken=use_clstoken) + + def forward(self, x): + patch_h, patch_w = x.shape[-2] // 14, x.shape[-1] // 14 + + features = self.pretrained.get_intermediate_layers(x, self.intermediate_layer_idx[self.encoder], return_class_token=True) + + depth = self.depth_head(features, patch_h, patch_w) * self.max_depth + + return depth.squeeze(1) + + @torch.no_grad() + def infer_image(self, raw_image, input_size=518): + image, (h, w) = self.image2tensor(raw_image, input_size) + + depth = self.forward(image) + + depth = F.interpolate(depth[:, None], (h, w), mode="bilinear", align_corners=True)[0, 0] + + return depth.cpu().numpy() + + def image2tensor(self, raw_image, input_size=518): + transform = Compose([ + Resize( + width=input_size, + height=input_size, + resize_target=False, + keep_aspect_ratio=True, + ensure_multiple_of=14, + resize_method='lower_bound', + image_interpolation_method=cv2.INTER_CUBIC, + ), + NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + PrepareForNet(), + ]) + + h, w = raw_image.shape[:2] + + image = cv2.cvtColor(raw_image, cv2.COLOR_BGR2RGB) / 255.0 + + image = transform({'image': image})['image'] + image = torch.from_numpy(image).unsqueeze(0) + + DEVICE = 'cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu' + image = image.to(DEVICE) + + return image, (h, w) diff --git a/depth_anything_v2/util/__pycache__/blocks.cpython-311.pyc b/depth_anything_v2/util/__pycache__/blocks.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a036fdc174366b10722750ce57d38192dfe1c05 Binary files /dev/null and b/depth_anything_v2/util/__pycache__/blocks.cpython-311.pyc differ diff --git a/depth_anything_v2/util/__pycache__/blocks.cpython-39.pyc b/depth_anything_v2/util/__pycache__/blocks.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a29b840421f7af87307ebca097a538bdffec4047 Binary files /dev/null and b/depth_anything_v2/util/__pycache__/blocks.cpython-39.pyc differ diff --git a/depth_anything_v2/util/__pycache__/transform.cpython-311.pyc b/depth_anything_v2/util/__pycache__/transform.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fcc990d3d464d91a2bc3cc787697f2d61c5650e3 Binary files /dev/null and b/depth_anything_v2/util/__pycache__/transform.cpython-311.pyc differ diff --git a/depth_anything_v2/util/__pycache__/transform.cpython-39.pyc b/depth_anything_v2/util/__pycache__/transform.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec97e886d3147b2394f3490adf4cfeefb8d7d31c Binary files /dev/null and b/depth_anything_v2/util/__pycache__/transform.cpython-39.pyc differ diff --git a/depth_anything_v2/util/blocks.py b/depth_anything_v2/util/blocks.py new file mode 100644 index 0000000000000000000000000000000000000000..9fb66c03702d653f411c59ab9966916c348c7c6e --- /dev/null +++ b/depth_anything_v2/util/blocks.py @@ -0,0 +1,148 @@ +import torch.nn as nn + + +def _make_scratch(in_shape, out_shape, groups=1, expand=False): + scratch = nn.Module() + + out_shape1 = out_shape + out_shape2 = out_shape + out_shape3 = out_shape + if len(in_shape) >= 4: + out_shape4 = out_shape + + if expand: + out_shape1 = out_shape + out_shape2 = out_shape * 2 + out_shape3 = out_shape * 4 + if len(in_shape) >= 4: + out_shape4 = out_shape * 8 + + scratch.layer1_rn = nn.Conv2d(in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups) + scratch.layer2_rn = nn.Conv2d(in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups) + scratch.layer3_rn = nn.Conv2d(in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups) + if len(in_shape) >= 4: + scratch.layer4_rn = nn.Conv2d(in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups) + + return scratch + + +class ResidualConvUnit(nn.Module): + """Residual convolution module. + """ + + def __init__(self, features, activation, bn): + """Init. + + Args: + features (int): number of features + """ + super().__init__() + + self.bn = bn + + self.groups=1 + + self.conv1 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups) + + self.conv2 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups) + + if self.bn == True: + self.bn1 = nn.BatchNorm2d(features) + self.bn2 = nn.BatchNorm2d(features) + + self.activation = activation + + self.skip_add = nn.quantized.FloatFunctional() + + def forward(self, x): + """Forward pass. + + Args: + x (tensor): input + + Returns: + tensor: output + """ + + out = self.activation(x) + out = self.conv1(out) + if self.bn == True: + out = self.bn1(out) + + out = self.activation(out) + out = self.conv2(out) + if self.bn == True: + out = self.bn2(out) + + if self.groups > 1: + out = self.conv_merge(out) + + return self.skip_add.add(out, x) + + +class FeatureFusionBlock(nn.Module): + """Feature fusion block. + """ + + def __init__( + self, + features, + activation, + deconv=False, + bn=False, + expand=False, + align_corners=True, + size=None + ): + """Init. + + Args: + features (int): number of features + """ + super(FeatureFusionBlock, self).__init__() + + self.deconv = deconv + self.align_corners = align_corners + + self.groups=1 + + self.expand = expand + out_features = features + if self.expand == True: + out_features = features // 2 + + self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1) + + self.resConfUnit1 = ResidualConvUnit(features, activation, bn) + self.resConfUnit2 = ResidualConvUnit(features, activation, bn) + + self.skip_add = nn.quantized.FloatFunctional() + + self.size=size + + def forward(self, *xs, size=None): + """Forward pass. + + Returns: + tensor: output + """ + output = xs[0] + + if len(xs) == 2: + res = self.resConfUnit1(xs[1]) + output = self.skip_add.add(output, res) + + output = self.resConfUnit2(output) + + if (size is None) and (self.size is None): + modifier = {"scale_factor": 2} + elif size is None: + modifier = {"size": self.size} + else: + modifier = {"size": size} + + output = nn.functional.interpolate(output, **modifier, mode="bilinear", align_corners=self.align_corners) + + output = self.out_conv(output) + + return output diff --git a/depth_anything_v2/util/transform.py b/depth_anything_v2/util/transform.py new file mode 100644 index 0000000000000000000000000000000000000000..1cce234c86177e1ad5c84c81c7c1afb16877c9da --- /dev/null +++ b/depth_anything_v2/util/transform.py @@ -0,0 +1,158 @@ +import numpy as np +import cv2 + + +class Resize(object): + """Resize sample to given size (width, height). + """ + + def __init__( + self, + width, + height, + resize_target=True, + keep_aspect_ratio=False, + ensure_multiple_of=1, + resize_method="lower_bound", + image_interpolation_method=cv2.INTER_AREA, + ): + """Init. + + Args: + width (int): desired output width + height (int): desired output height + resize_target (bool, optional): + True: Resize the full sample (image, mask, target). + False: Resize image only. + Defaults to True. + keep_aspect_ratio (bool, optional): + True: Keep the aspect ratio of the input sample. + Output sample might not have the given width and height, and + resize behaviour depends on the parameter 'resize_method'. + Defaults to False. + ensure_multiple_of (int, optional): + Output width and height is constrained to be multiple of this parameter. + Defaults to 1. + resize_method (str, optional): + "lower_bound": Output will be at least as large as the given size. + "upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.) + "minimal": Scale as least as possible. (Output size might be smaller than given size.) + Defaults to "lower_bound". + """ + self.__width = width + self.__height = height + + self.__resize_target = resize_target + self.__keep_aspect_ratio = keep_aspect_ratio + self.__multiple_of = ensure_multiple_of + self.__resize_method = resize_method + self.__image_interpolation_method = image_interpolation_method + + def constrain_to_multiple_of(self, x, min_val=0, max_val=None): + y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int) + + if max_val is not None and y > max_val: + y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int) + + if y < min_val: + y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int) + + return y + + def get_size(self, width, height): + # determine new height and width + scale_height = self.__height / height + scale_width = self.__width / width + + if self.__keep_aspect_ratio: + if self.__resize_method == "lower_bound": + # scale such that output size is lower bound + if scale_width > scale_height: + # fit width + scale_height = scale_width + else: + # fit height + scale_width = scale_height + elif self.__resize_method == "upper_bound": + # scale such that output size is upper bound + if scale_width < scale_height: + # fit width + scale_height = scale_width + else: + # fit height + scale_width = scale_height + elif self.__resize_method == "minimal": + # scale as least as possbile + if abs(1 - scale_width) < abs(1 - scale_height): + # fit width + scale_height = scale_width + else: + # fit height + scale_width = scale_height + else: + raise ValueError(f"resize_method {self.__resize_method} not implemented") + + if self.__resize_method == "lower_bound": + new_height = self.constrain_to_multiple_of(scale_height * height, min_val=self.__height) + new_width = self.constrain_to_multiple_of(scale_width * width, min_val=self.__width) + elif self.__resize_method == "upper_bound": + new_height = self.constrain_to_multiple_of(scale_height * height, max_val=self.__height) + new_width = self.constrain_to_multiple_of(scale_width * width, max_val=self.__width) + elif self.__resize_method == "minimal": + new_height = self.constrain_to_multiple_of(scale_height * height) + new_width = self.constrain_to_multiple_of(scale_width * width) + else: + raise ValueError(f"resize_method {self.__resize_method} not implemented") + + return (new_width, new_height) + + def __call__(self, sample): + width, height = self.get_size(sample["image"].shape[1], sample["image"].shape[0]) + + # resize sample + sample["image"] = cv2.resize(sample["image"], (width, height), interpolation=self.__image_interpolation_method) + + if self.__resize_target: + if "depth" in sample: + sample["depth"] = cv2.resize(sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST) + + if "mask" in sample: + sample["mask"] = cv2.resize(sample["mask"].astype(np.float32), (width, height), interpolation=cv2.INTER_NEAREST) + + return sample + + +class NormalizeImage(object): + """Normlize image by given mean and std. + """ + + def __init__(self, mean, std): + self.__mean = mean + self.__std = std + + def __call__(self, sample): + sample["image"] = (sample["image"] - self.__mean) / self.__std + + return sample + + +class PrepareForNet(object): + """Prepare sample for usage as network input. + """ + + def __init__(self): + pass + + def __call__(self, sample): + image = np.transpose(sample["image"], (2, 0, 1)) + sample["image"] = np.ascontiguousarray(image).astype(np.float32) + + if "depth" in sample: + depth = sample["depth"].astype(np.float32) + sample["depth"] = np.ascontiguousarray(depth) + + if "mask" in sample: + sample["mask"] = sample["mask"].astype(np.float32) + sample["mask"] = np.ascontiguousarray(sample["mask"]) + + return sample \ No newline at end of file diff --git a/dist_train.sh b/dist_train.sh new file mode 100644 index 0000000000000000000000000000000000000000..6340c4ee02341223332872f1d1a279163bf80b94 --- /dev/null +++ b/dist_train.sh @@ -0,0 +1,26 @@ +#!/bin/bash +now=$(date +"%Y%m%d_%H%M%S") + +epoch=120 +bs=1 +gpus=1 +lr=0.000005 +encoder=vitl +dataset=pbr # Changed default to pbr +img_size=512 +min_depth=0.0 +max_depth=1.0 # Changed to 1.0 for normalized depth +pretrained_from=../checkpoints/depth_anything_v2_${encoder}.pth +save_path=exp/pbr # Changed to pbr + +mkdir -p $save_path + +python3 -m torch.distributed.launch \ + --nproc_per_node=$gpus \ + --nnodes 1 \ + --node_rank=0 \ + --master_addr=localhost \ + --master_port=20596 \ + train.py --epoch $epoch --encoder $encoder --bs $bs --lr $lr --save-path $save_path --dataset $dataset \ + --img-size $img_size --min-depth $min_depth --max-depth $max_depth --pretrained-from $pretrained_from \ + --port 20596 2>&1 | tee -a $save_path/$now.log diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..6079c7177838b434aeddabbdd7198622c2c031d1 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,5 @@ +matplotlib +opencv-python +open3d +torch +torchvision diff --git a/run.py b/run.py new file mode 100644 index 0000000000000000000000000000000000000000..dcf322d1817477b4b546921b9f53e491c6290b7d --- /dev/null +++ b/run.py @@ -0,0 +1,81 @@ +import argparse +import cv2 +import glob +import matplotlib +import numpy as np +import os +import torch + +from depth_anything_v2.dpt import DepthAnythingV2 + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Depth Anything V2 Metric Depth Estimation') + + parser.add_argument('--img-path', type=str) + parser.add_argument('--input-size', type=int, default=518) + parser.add_argument('--outdir', type=str, default='./vis_depth') + + parser.add_argument('--encoder', type=str, default='vitl', choices=['vits', 'vitb', 'vitl', 'vitg']) + parser.add_argument('--load-from', type=str, default='checkpoints/depth_anything_v2_metric_hypersim_vitl.pth') + parser.add_argument('--max-depth', type=float, default=20) + + parser.add_argument('--save-numpy', dest='save_numpy', action='store_true', help='save the model raw output') + parser.add_argument('--pred-only', dest='pred_only', action='store_true', help='only display the prediction') + parser.add_argument('--grayscale', dest='grayscale', action='store_true', help='do not apply colorful palette') + + args = parser.parse_args() + + DEVICE = 'cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu' + + model_configs = { + 'vits': {'encoder': 'vits', 'features': 64, 'out_channels': [48, 96, 192, 384]}, + 'vitb': {'encoder': 'vitb', 'features': 128, 'out_channels': [96, 192, 384, 768]}, + 'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]}, + 'vitg': {'encoder': 'vitg', 'features': 384, 'out_channels': [1536, 1536, 1536, 1536]} + } + + depth_anything = DepthAnythingV2(**{**model_configs[args.encoder], 'max_depth': args.max_depth}) + depth_anything.load_state_dict(torch.load(args.load_from, map_location='cpu')) + depth_anything = depth_anything.to(DEVICE).eval() + + if os.path.isfile(args.img_path): + if args.img_path.endswith('txt'): + with open(args.img_path, 'r') as f: + filenames = f.read().splitlines() + else: + filenames = [args.img_path] + else: + filenames = glob.glob(os.path.join(args.img_path, '**/*'), recursive=True) + + os.makedirs(args.outdir, exist_ok=True) + + cmap = matplotlib.colormaps.get_cmap('Spectral') + + for k, filename in enumerate(filenames): + print(f'Progress {k+1}/{len(filenames)}: {filename}') + + raw_image = cv2.imread(filename) + + depth = depth_anything.infer_image(raw_image, args.input_size) + + if args.save_numpy: + output_path = os.path.join(args.outdir, os.path.splitext(os.path.basename(filename))[0] + '_raw_depth_meter.npy') + np.save(output_path, depth) + + depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0 + depth = depth.astype(np.uint8) + + if args.grayscale: + depth = np.repeat(depth[..., np.newaxis], 3, axis=-1) + else: + depth = (cmap(depth)[:, :, :3] * 255)[:, :, ::-1].astype(np.uint8) + + output_path = os.path.join(args.outdir, os.path.splitext(os.path.basename(filename))[0] + '.png') + if args.pred_only: + cv2.imwrite(output_path, depth) + else: + split_region = np.ones((raw_image.shape[0], 50, 3), dtype=np.uint8) * 255 + combined_result = cv2.hconcat([raw_image, split_region, depth]) + + cv2.imwrite(output_path, combined_result) \ No newline at end of file diff --git a/run2.py b/run2.py new file mode 100644 index 0000000000000000000000000000000000000000..a6e324893206367e1aa6a2a33352fa0a250718b1 --- /dev/null +++ b/run2.py @@ -0,0 +1,54 @@ +import cv2 +import torch +import numpy as np + +from depth_anything_v2.dpt import DepthAnythingV2 + +model_configs = { + 'vits': {'encoder': 'vits', 'features': 64, 'out_channels': [48, 96, 192, 384]}, + 'vitb': {'encoder': 'vitb', 'features': 128, 'out_channels': [96, 192, 384, 768]}, + 'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]} +} + +encoder = 'vitl' # or 'vits', 'vitb' +dataset = 'pbr' # 'hypersim' for indoor model, 'vkitti' for outdoor model +max_depth = 1 # 20 for indoor model, 80 for outdoor model + +model = DepthAnythingV2(**{**model_configs[encoder], 'max_depth': max_depth}) + +# Load checkpoint and handle unexpected keys +checkpoint = torch.load(f'checkpoints/model2.pth', map_location='cpu') +print("Keys in checkpoint:", checkpoint.keys()) + +# Skip unexpected keys +expected_keys = ['model'] +state_dict = {} +for key in checkpoint.keys(): + if key not in ['optimizer', 'epoch', 'previous_best']: + state_dict = checkpoint[key] + print(f"Using weights from key: {key}") + else: + print(f"Skipping unexpected key: {key}") + +# Handle module prefix if present +my_state_dict = {} +for key in state_dict.keys(): + new_key = key.replace('module.', '') + my_state_dict[new_key] = state_dict[key] + +model.load_state_dict(my_state_dict) +model.eval() + +raw_img = cv2.imread('image.jpg') +depth = model.infer_image(raw_img) # HxW depth map in meters in numpy + +# Normalize depth for visualization (0-255) +depth_normalized = ((depth - depth.min()) / (depth.max() - depth.min()) * 255).astype(np.uint8) + +# Apply colormap for better visualization +depth_colormap = cv2.applyColorMap(depth_normalized, cv2.COLORMAP_INFERNO) + +# Save both raw depth and colored depth +cv2.imwrite('depth_raw.png', depth_normalized) +cv2.imwrite('depth_colored.png', depth_colormap) +print("Saved depth maps as 'depth_raw.png' and 'depth_colored.png'") diff --git a/train.py b/train.py new file mode 100644 index 0000000000000000000000000000000000000000..fa9c11e7d29ca961eedc37c5c3f86617df38b325 --- /dev/null +++ b/train.py @@ -0,0 +1,253 @@ +import argparse +import logging +import os +import pprint +import random + +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.distributed as dist +from torch.utils.data import DataLoader +from torch.optim import AdamW +import torch.nn.functional as F +from torch.utils.tensorboard import SummaryWriter + +from dataset.hypersim import Hypersim +from dataset.kitti import KITTI +from dataset.vkitti2 import VKITTI2 +from dataset.pbr import PBRDataset +from depth_anything_v2.dpt import DepthAnythingV2 +from util.dist_helper import setup_distributed +from util.loss import SiLogLoss +from util.metric import eval_depth +from util.utils import init_log + + +def rotate_sample(img, depth, valid_mask, angle): + """ + Rotate image, depth map and valid mask by specified angle (90, 180, or 270 degrees) + """ + k = angle // 90 # k=1 for 90°, k=2 for 180°, k=3 for 270° + img = torch.rot90(img, k, dims=[-2, -1]) + depth = torch.rot90(depth, k, dims=[-2, -1]) + valid_mask = torch.rot90(valid_mask, k, dims=[-2, -1]) + return img, depth, valid_mask + + +parser = argparse.ArgumentParser(description='Depth Anything V2 for Metric Depth Estimation') + +parser.add_argument('--encoder', default='vitl', choices=['vits', 'vitb', 'vitl', 'vitg']) +parser.add_argument('--dataset', default='hypersim', choices=['hypersim', 'vkitti', 'pbr']) +parser.add_argument('--img-size', default=512, type=int) +parser.add_argument('--min-depth', default=0.001, type=float) +parser.add_argument('--max-depth', default=1.0, type=float) +parser.add_argument('--epochs', default=40, type=int) +parser.add_argument('--bs', default=2, type=int) +parser.add_argument('--lr', default=0.000005, type=float) +parser.add_argument('--pretrained-from', type=str) +parser.add_argument('--save-path', type=str, required=True) +parser.add_argument('--local-rank', default=0, type=int) +parser.add_argument('--port', default=None, type=int) +parser.add_argument('--flip-prob', default=0.5, type=float, help='Probability of horizontal flip') +parser.add_argument('--rotate-prob', default=0.75, type=float, help='Probability of rotation') + + +def main(): + args = parser.parse_args() + rank = 0 + + logger = init_log('global', logging.INFO) + logger.propagate = 0 + + rank, world_size = setup_distributed(port=args.port) + + if rank == 0: + all_args = {**vars(args), 'ngpus': world_size} + logger.info('{}\n'.format(pprint.pformat(all_args))) + writer = SummaryWriter(args.save_path) + + cudnn.enabled = True + cudnn.benchmark = True + + size = (args.img_size, args.img_size) + if args.dataset == 'hypersim': + trainset = Hypersim('dataset/splits/hypersim/train.txt', 'train', size=size) + elif args.dataset == 'vkitti': + trainset = VKITTI2('dataset/splits/vkitti2/train.txt', 'train', size=size) + elif args.dataset == 'pbr': + trainset = PBRDataset('dataset/splits/pbr/train.txt', 'train', size=size) + else: + raise NotImplementedError + trainsampler = torch.utils.data.distributed.DistributedSampler(trainset) + trainloader = DataLoader(trainset, batch_size=args.bs, pin_memory=True, num_workers=4, drop_last=True, sampler=trainsampler) + + if args.dataset == 'hypersim': + valset = Hypersim('dataset/splits/hypersim/val.txt', 'val', size=size) + elif args.dataset == 'vkitti': + valset = KITTI('dataset/splits/kitti/val.txt', 'val', size=size) + elif args.dataset == 'pbr': + valset = PBRDataset('dataset/splits/pbr/val.txt', 'val', size=size) + else: + raise NotImplementedError + valsampler = torch.utils.data.distributed.DistributedSampler(valset) + valloader = DataLoader(valset, batch_size=1, pin_memory=True, num_workers=4, drop_last=True, sampler=valsampler) + + local_rank = int(os.environ["LOCAL_RANK"]) + + model_configs = { + 'vits': {'encoder': 'vits', 'features': 64, 'out_channels': [48, 96, 192, 384]}, + 'vitb': {'encoder': 'vitb', 'features': 128, 'out_channels': [96, 192, 384, 768]}, + 'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]}, + 'vitg': {'encoder': 'vitg', 'features': 384, 'out_channels': [1536, 1536, 1536, 1536]} + } + model = DepthAnythingV2(**{**model_configs[args.encoder], 'max_depth': args.max_depth}) + + if args.pretrained_from: + model.load_state_dict({k: v for k, v in torch.load(args.pretrained_from, map_location='cpu').items() if 'pretrained' in k}, strict=False) + + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) + model.cuda(local_rank) + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], broadcast_buffers=False, + output_device=local_rank, find_unused_parameters=True) + + criterion = SiLogLoss().cuda(local_rank) + + optimizer = AdamW([{'params': [param for name, param in model.named_parameters() if 'pretrained' in name], 'lr': args.lr}, + {'params': [param for name, param in model.named_parameters() if 'pretrained' not in name], 'lr': args.lr * 10.0}], + lr=args.lr, betas=(0.9, 0.999), weight_decay=0.01) + + total_iters = args.epochs * len(trainloader) + + # Initialize previous_best dictionary here + previous_best = {'d1': 0, 'd2': 0, 'd3': 0, 'abs_rel': 100, 'sq_rel': 100, + 'rmse': 100, 'rmse_log': 100, 'log10': 100, 'silog': 100} + + # Load checkpoint if exists + if os.path.exists(os.path.join(args.save_path, 'latest.pth')): + checkpoint = torch.load(os.path.join(args.save_path, 'latest.pth'), map_location='cpu') + model.load_state_dict(checkpoint['model']) + optimizer.load_state_dict(checkpoint['optimizer']) + previous_best = checkpoint['previous_best'] + start_epoch = checkpoint['epoch'] + 1 + else: + start_epoch = 0 + + for epoch in range(start_epoch, args.epochs): + if rank == 0: + logger.info('===========> Epoch: {:}/{:}, d1: {:.3f}, d2: {:.3f}, d3: {:.3f}'.format( + epoch, args.epochs, previous_best['d1'], previous_best['d2'], previous_best['d3'])) + logger.info('===========> Epoch: {:}/{:}, abs_rel: {:.3f}, sq_rel: {:.3f}, rmse: {:.3f}, rmse_log: {:.3f}, ' + 'log10: {:.3f}, silog: {:.3f}'.format( + epoch, args.epochs, previous_best['abs_rel'], previous_best['sq_rel'], previous_best['rmse'], + previous_best['rmse_log'], previous_best['log10'], previous_best['silog'])) + + trainloader.sampler.set_epoch(epoch + 1) + + model.train() + total_loss = 0 + + for i, sample in enumerate(trainloader): + optimizer.zero_grad() + + img, depth, valid_mask = sample['image'].cuda(), sample['depth'].cuda(), sample['valid_mask'].cuda() + + # Apply random horizontal flip + if random.random() < 0.5: + img = img.flip(-1) + depth = depth.flip(-1) + valid_mask = valid_mask.flip(-1) + + # Apply random rotation augmentation + rotation_prob = random.random() + if rotation_prob < 0.75: + if rotation_prob < 0.25: # 90° + img, depth, valid_mask = rotate_sample(img, depth, valid_mask, 90) + elif rotation_prob < 0.5: # 180° + img, depth, valid_mask = rotate_sample(img, depth, valid_mask, 180) + else: # 270° + img, depth, valid_mask = rotate_sample(img, depth, valid_mask, 270) + + pred = model(img) + + loss = criterion(pred, depth, (valid_mask == 1) & (depth >= args.min_depth) & (depth <= args.max_depth)) + + loss.backward() + optimizer.step() + + total_loss += loss.item() + + iters = epoch * len(trainloader) + i + + lr = args.lr * (1 - iters / total_iters) ** 0.9 + + optimizer.param_groups[0]["lr"] = lr + optimizer.param_groups[1]["lr"] = lr * 10.0 + + if rank == 0: + writer.add_scalar('train/loss', loss.item(), iters) + + if rank == 0 and i % 100 == 0: + logger.info('Iter: {}/{}, LR: {:.7f}, Loss: {:.3f}'.format( + i, len(trainloader), optimizer.param_groups[0]['lr'], loss.item())) + + model.eval() + + results = {'d1': torch.tensor([0.0]).cuda(), 'd2': torch.tensor([0.0]).cuda(), 'd3': torch.tensor([0.0]).cuda(), + 'abs_rel': torch.tensor([0.0]).cuda(), 'sq_rel': torch.tensor([0.0]).cuda(), 'rmse': torch.tensor([0.0]).cuda(), + 'rmse_log': torch.tensor([0.0]).cuda(), 'log10': torch.tensor([0.0]).cuda(), 'silog': torch.tensor([0.0]).cuda()} + nsamples = torch.tensor([0.0]).cuda() + + for i, sample in enumerate(valloader): + img, depth, valid_mask = sample['image'].cuda().float(), sample['depth'].cuda()[0], sample['valid_mask'].cuda()[0] + + with torch.no_grad(): + pred = model(img) + pred = F.interpolate(pred[:, None], depth.shape[-2:], mode='bilinear', align_corners=True)[0, 0] + + valid_mask = (valid_mask == 1) & (depth >= args.min_depth) & (depth <= args.max_depth) + + if valid_mask.sum() < 10: + continue + + cur_results = eval_depth(pred[valid_mask], depth[valid_mask]) + + for k in results.keys(): + results[k] += cur_results[k] + nsamples += 1 + + torch.distributed.barrier() + + for k in results.keys(): + dist.reduce(results[k], dst=0) + dist.reduce(nsamples, dst=0) + + if rank == 0: + logger.info('==========================================================================================') + logger.info('{:>8}, {:>8}, {:>8}, {:>8}, {:>8}, {:>8}, {:>8}, {:>8}, {:>8}'.format(*tuple(results.keys()))) + logger.info('{:8.3f}, {:8.3f}, {:8.3f}, {:8.3f}, {:8.3f}, {:8.3f}, {:8.3f}, {:8.3f}, {:8.3f}'.format( + *tuple([(v / nsamples).item() for v in results.values()]))) + logger.info('==========================================================================================') + print() + + for name, metric in results.items(): + writer.add_scalar(f'eval/{name}', (metric / nsamples).item(), epoch) + + for k in results.keys(): + if k in ['d1', 'd2', 'd3']: + previous_best[k] = max(previous_best[k], (results[k] / nsamples).item()) + else: + previous_best[k] = min(previous_best[k], (results[k] / nsamples).item()) + + if rank == 0: + checkpoint = { + 'model': model.state_dict(), + 'optimizer': optimizer.state_dict(), + 'epoch': epoch, + 'previous_best': previous_best, + } + torch.save(checkpoint, os.path.join(args.save_path, 'latest.pth')) + + +if __name__ == '__main__': + main() diff --git a/util/__pycache__/dist_helper.cpython-311.pyc b/util/__pycache__/dist_helper.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b4ce174aa48802fd91186091e3c4725eed71ec02 Binary files /dev/null and b/util/__pycache__/dist_helper.cpython-311.pyc differ diff --git a/util/__pycache__/dist_helper.cpython-39.pyc b/util/__pycache__/dist_helper.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cce2f7b8f5c46f2a46340cbe99c965ef7373a037 Binary files /dev/null and b/util/__pycache__/dist_helper.cpython-39.pyc differ diff --git a/util/__pycache__/loss.cpython-311.pyc b/util/__pycache__/loss.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be0e6d21d73d9b2ce3a9a21bc8f3c730b7f88bf5 Binary files /dev/null and b/util/__pycache__/loss.cpython-311.pyc differ diff --git a/util/__pycache__/loss.cpython-39.pyc b/util/__pycache__/loss.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9da1389772772763eb2ace6d478bfcd8323f5b8a Binary files /dev/null and b/util/__pycache__/loss.cpython-39.pyc differ diff --git a/util/__pycache__/metric.cpython-311.pyc b/util/__pycache__/metric.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30a173c734faa5dc59644be1ded0c4428bb07387 Binary files /dev/null and b/util/__pycache__/metric.cpython-311.pyc differ diff --git a/util/__pycache__/metric.cpython-39.pyc b/util/__pycache__/metric.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c044e22a025a5b58a6d5f3b193bc3498f640ebe Binary files /dev/null and b/util/__pycache__/metric.cpython-39.pyc differ diff --git a/util/__pycache__/utils.cpython-311.pyc b/util/__pycache__/utils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..87a5f9401c960eee741f90bcd7e1e28f8b69445b Binary files /dev/null and b/util/__pycache__/utils.cpython-311.pyc differ diff --git a/util/__pycache__/utils.cpython-39.pyc b/util/__pycache__/utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a31e84125b9cd8750301afa3efbfde110879024 Binary files /dev/null and b/util/__pycache__/utils.cpython-39.pyc differ diff --git a/util/dist_helper.py b/util/dist_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..49c65420b8d1496783c1c435bb4b6dba3416048f --- /dev/null +++ b/util/dist_helper.py @@ -0,0 +1,41 @@ +import os +import subprocess + +import torch +import torch.distributed as dist + + +def setup_distributed(backend="nccl", port=None): + """AdaHessian Optimizer + Lifted from https://github.com/BIGBALLON/distribuuuu/blob/master/distribuuuu/utils.py + Originally licensed MIT, Copyright (c) 2020 Wei Li + """ + num_gpus = torch.cuda.device_count() + + if "SLURM_JOB_ID" in os.environ: + rank = int(os.environ["SLURM_PROCID"]) + world_size = int(os.environ["SLURM_NTASKS"]) + node_list = os.environ["SLURM_NODELIST"] + addr = subprocess.getoutput(f"scontrol show hostname {node_list} | head -n1") + # specify master port + if port is not None: + os.environ["MASTER_PORT"] = str(port) + elif "MASTER_PORT" not in os.environ: + os.environ["MASTER_PORT"] = "10685" + if "MASTER_ADDR" not in os.environ: + os.environ["MASTER_ADDR"] = addr + os.environ["WORLD_SIZE"] = str(world_size) + os.environ["LOCAL_RANK"] = str(rank % num_gpus) + os.environ["RANK"] = str(rank) + else: + rank = int(os.environ["RANK"]) + world_size = int(os.environ["WORLD_SIZE"]) + + torch.cuda.set_device(rank % num_gpus) + + dist.init_process_group( + backend=backend, + world_size=world_size, + rank=rank, + ) + return rank, world_size diff --git a/util/loss.py b/util/loss.py new file mode 100644 index 0000000000000000000000000000000000000000..ab808798c0c607db4f64f248d8dbea7efcdee1f2 --- /dev/null +++ b/util/loss.py @@ -0,0 +1,16 @@ +import torch +from torch import nn + + +class SiLogLoss(nn.Module): + def __init__(self, lambd=0.5): + super().__init__() + self.lambd = lambd + + def forward(self, pred, target, valid_mask): + valid_mask = valid_mask.detach() + diff_log = torch.log(target[valid_mask]) - torch.log(pred[valid_mask]) + loss = torch.sqrt(torch.pow(diff_log, 2).mean() - + self.lambd * torch.pow(diff_log.mean(), 2)) + + return loss diff --git a/util/metric.py b/util/metric.py new file mode 100644 index 0000000000000000000000000000000000000000..b4c05b29100728e93836e6d2712a9412eff94f4d --- /dev/null +++ b/util/metric.py @@ -0,0 +1,26 @@ +import torch + + +def eval_depth(pred, target): + assert pred.shape == target.shape + + thresh = torch.max((target / pred), (pred / target)) + + d1 = torch.sum(thresh < 1.25).float() / len(thresh) + d2 = torch.sum(thresh < 1.25 ** 2).float() / len(thresh) + d3 = torch.sum(thresh < 1.25 ** 3).float() / len(thresh) + + diff = pred - target + diff_log = torch.log(pred) - torch.log(target) + + abs_rel = torch.mean(torch.abs(diff) / target) + sq_rel = torch.mean(torch.pow(diff, 2) / target) + + rmse = torch.sqrt(torch.mean(torch.pow(diff, 2))) + rmse_log = torch.sqrt(torch.mean(torch.pow(diff_log , 2))) + + log10 = torch.mean(torch.abs(torch.log10(pred) - torch.log10(target))) + silog = torch.sqrt(torch.pow(diff_log, 2).mean() - 0.5 * torch.pow(diff_log.mean(), 2)) + + return {'d1': d1.item(), 'd2': d2.item(), 'd3': d3.item(), 'abs_rel': abs_rel.item(), 'sq_rel': sq_rel.item(), + 'rmse': rmse.item(), 'rmse_log': rmse_log.item(), 'log10':log10.item(), 'silog':silog.item()} \ No newline at end of file diff --git a/util/utils.py b/util/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f97ab65ea06ba0062127a8400ab1dd0442259fcb --- /dev/null +++ b/util/utils.py @@ -0,0 +1,26 @@ +import os +import re +import numpy as np +import logging + +logs = set() + + +def init_log(name, level=logging.INFO): + if (name, level) in logs: + return + logs.add((name, level)) + logger = logging.getLogger(name) + logger.setLevel(level) + ch = logging.StreamHandler() + ch.setLevel(level) + if "SLURM_PROCID" in os.environ: + rank = int(os.environ["SLURM_PROCID"]) + logger.addFilter(lambda record: rank == 0) + else: + rank = 0 + format_str = "[%(asctime)s][%(levelname)8s] %(message)s" + formatter = logging.Formatter(format_str) + ch.setFormatter(formatter) + logger.addHandler(ch) + return logger