| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| |
|
| | from functools import partial |
| | import math |
| | import logging |
| | from typing import Sequence, Tuple, Union, Callable |
| |
|
| | import torch |
| | import torch.nn as nn |
| | import torch.utils.checkpoint |
| | from torch.nn.init import trunc_normal_ |
| | from einops import rearrange |
| |
|
| | from model.layers import Mlp, PatchEmbed, SwiGLUFFNFused, MemEffAttention, NestedTensorBlock as Block |
| | from model.resnet import resnet18 |
| |
|
| |
|
| | def named_apply(fn: Callable, module: nn.Module, name="", depth_first=True, include_root=False) -> nn.Module: |
| | if not depth_first and include_root: |
| | fn(module=module, name=name) |
| | for child_name, child_module in module.named_children(): |
| | child_name = ".".join((name, child_name)) if name else child_name |
| | named_apply(fn=fn, module=child_module, name=child_name, depth_first=depth_first, include_root=True) |
| | if depth_first and include_root: |
| | fn(module=module, name=name) |
| | return module |
| |
|
| |
|
| | class BlockChunk(nn.ModuleList): |
| | def forward(self, x): |
| | for b in self: |
| | x = b(x) |
| | return x |
| |
|
| |
|
| | class DinoVisionTransformer(nn.Module): |
| | def __init__( |
| | self, |
| | img_size=224, |
| | patch_size=16, |
| | in_chans=3, |
| | embed_dim=768, |
| | depth=12, |
| | num_heads=12, |
| | mlp_ratio=4.0, |
| | qkv_bias=True, |
| | ffn_bias=True, |
| | proj_bias=True, |
| | drop_path_rate=0.0, |
| | drop_path_uniform=False, |
| | init_values=None, |
| | embed_layer=PatchEmbed, |
| | act_layer=nn.GELU, |
| | block_fn=Block, |
| | ffn_layer="mlp", |
| | block_chunks=0, |
| | num_register_tokens=0, |
| | interpolate_antialias=False, |
| | interpolate_offset=0.1, |
| | ): |
| | """ |
| | Args: |
| | img_size (int, tuple): input image size |
| | patch_size (int, tuple): patch size |
| | in_chans (int): number of input channels |
| | embed_dim (int): embedding dimension |
| | depth (int): depth of transformer |
| | num_heads (int): number of attention heads |
| | mlp_ratio (int): ratio of mlp hidden dim to embedding dim |
| | qkv_bias (bool): enable bias for qkv if True |
| | proj_bias (bool): enable bias for proj in attn if True |
| | ffn_bias (bool): enable bias for ffn if True |
| | drop_path_rate (float): stochastic depth rate |
| | drop_path_uniform (bool): apply uniform drop rate across blocks |
| | weight_init (str): weight init scheme |
| | init_values (float): layer-scale init values |
| | embed_layer (nn.Module): patch embedding layer |
| | act_layer (nn.Module): MLP activation layer |
| | block_fn (nn.Module): transformer block class |
| | ffn_layer (str): "mlp", "swiglu", "swiglufused" or "identity" |
| | block_chunks: (int) split block sequence into block_chunks units for FSDP wrap |
| | num_register_tokens: (int) number of extra cls tokens (so-called "registers") |
| | interpolate_antialias: (str) flag to apply anti-aliasing when interpolating positional embeddings |
| | interpolate_offset: (float) work-around offset to apply when interpolating positional embeddings |
| | """ |
| | super().__init__() |
| | norm_layer = partial(nn.LayerNorm, eps=1e-6) |
| |
|
| | self.num_features = self.embed_dim = embed_dim |
| | self.n_blocks = depth |
| | self.num_heads = num_heads |
| | self.patch_size = patch_size |
| | self.num_register_tokens = num_register_tokens |
| | self.interpolate_antialias = interpolate_antialias |
| | self.interpolate_offset = interpolate_offset |
| |
|
| | self.patch_embed = embed_layer(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) |
| | num_patches = self.patch_embed.num_patches |
| |
|
| | self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) |
| | assert num_register_tokens >= 0 |
| | self.register_tokens = ( |
| | nn.Parameter(torch.zeros(1, num_register_tokens, embed_dim)) if num_register_tokens else None |
| | ) |
| |
|
| | if drop_path_uniform is True: |
| | dpr = [drop_path_rate] * depth |
| | else: |
| | dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] |
| |
|
| | if ffn_layer == "mlp": |
| | print("using MLP layer as FFN") |
| | ffn_layer = Mlp |
| | elif ffn_layer == "swiglufused" or ffn_layer == "swiglu": |
| | print("using SwiGLU layer as FFN") |
| | ffn_layer = SwiGLUFFNFused |
| | elif ffn_layer == "identity": |
| | print("using Identity layer as FFN") |
| |
|
| | def f(*args, **kwargs): |
| | return nn.Identity() |
| |
|
| | ffn_layer = f |
| | else: |
| | raise NotImplementedError |
| |
|
| | blocks_list = [ |
| | block_fn( |
| | dim=embed_dim, |
| | num_heads=num_heads, |
| | mlp_ratio=mlp_ratio, |
| | qkv_bias=qkv_bias, |
| | proj_bias=proj_bias, |
| | ffn_bias=ffn_bias, |
| | drop_path=dpr[i], |
| | norm_layer=norm_layer, |
| | act_layer=act_layer, |
| | ffn_layer=ffn_layer, |
| | init_values=init_values, |
| | ) |
| | for i in range(depth) |
| | ] |
| | if block_chunks > 0: |
| | self.chunked_blocks = True |
| | chunked_blocks = [] |
| | chunksize = depth // block_chunks |
| | for i in range(0, depth, chunksize): |
| | |
| | chunked_blocks.append([nn.Identity()] * i + blocks_list[i: i + chunksize]) |
| | self.blocks = nn.ModuleList([BlockChunk(p) for p in chunked_blocks]) |
| | else: |
| | self.chunked_blocks = False |
| | self.blocks = nn.ModuleList(blocks_list) |
| |
|
| | self.norm = norm_layer(embed_dim) |
| | self.head = nn.Identity() |
| |
|
| | self.mask_token = nn.Parameter(torch.zeros(1, embed_dim)) |
| |
|
| | self.init_weights() |
| |
|
| | def init_weights(self): |
| | trunc_normal_(self.pos_embed, std=0.02) |
| | if self.register_tokens is not None: |
| | nn.init.normal_(self.register_tokens, std=1e-6) |
| | named_apply(init_weights_vit_timm, self) |
| |
|
| | def interpolate_pos_encoding(self, x, w, h): |
| | previous_dtype = x.dtype |
| | npatch = x.shape[1] - 1 |
| | N = self.pos_embed.shape[1] |
| | if npatch == N and w == h: |
| | return self.pos_embed |
| | patch_pos_embed = self.pos_embed.float() |
| | dim = x.shape[-1] |
| | w0 = w // self.patch_size |
| | h0 = h // self.patch_size |
| | |
| | |
| | w0, h0 = w0 + self.interpolate_offset, h0 + self.interpolate_offset |
| |
|
| | sqrt_N = math.sqrt(N) |
| | sx, sy = float(w0) / sqrt_N, float(h0) / sqrt_N |
| | patch_pos_embed = nn.functional.interpolate( |
| | patch_pos_embed.reshape(1, int(sqrt_N), int(sqrt_N), dim).permute(0, 3, 1, 2), |
| | scale_factor=(sx, sy), |
| | mode="bicubic", |
| | antialias=self.interpolate_antialias, |
| | ) |
| |
|
| | assert int(w0) == patch_pos_embed.shape[-2] |
| | assert int(h0) == patch_pos_embed.shape[-1] |
| | patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) |
| | return patch_pos_embed.to(previous_dtype) |
| |
|
| | def prepare_tokens_with_masks(self, x, masks=None): |
| | B, nc, w, h = x.shape |
| | x = self.patch_embed(x) |
| | if masks is not None: |
| | x = torch.where(masks.unsqueeze(-1), self.mask_token.to(x.dtype).unsqueeze(0), x) |
| |
|
| | x = x + self.interpolate_pos_encoding(x, w, h) |
| |
|
| | if self.register_tokens is not None: |
| | x = torch.cat( |
| | ( |
| | x[:, :1], |
| | self.register_tokens.expand(x.shape[0], -1, -1), |
| | x[:, 1:], |
| | ), |
| | dim=1, |
| | ) |
| |
|
| | return x |
| |
|
| | def forward_features_list(self, x_list, masks_list): |
| | x = [self.prepare_tokens_with_masks(x, masks) for x, masks in zip(x_list, masks_list)] |
| | for blk in self.blocks: |
| | x = blk(x) |
| |
|
| | all_x = x |
| | output = [] |
| | for x, masks in zip(all_x, masks_list): |
| | x_norm = self.norm(x) |
| | output.append( |
| | { |
| | "x_norm_clstoken": x_norm[:, 0], |
| | "x_norm_regtokens": x_norm[:, 1: self.num_register_tokens + 1], |
| | "x_norm_patchtokens": x_norm[:, self.num_register_tokens + 1:], |
| | "x_prenorm": x, |
| | "masks": masks, |
| | } |
| | ) |
| | return output |
| |
|
| | def forward(self, x, masks=None): |
| | if isinstance(x, list): |
| | return self.forward_features_list(x, masks) |
| |
|
| | x = self.prepare_tokens_with_masks(x, masks) |
| |
|
| | for blk in self.blocks: |
| | x = blk(x) |
| |
|
| | x_norm = self.norm(x) |
| | return x_norm |
| |
|
| | def _get_intermediate_layers_not_chunked(self, x, n=1): |
| | x = self.prepare_tokens_with_masks(x) |
| | |
| | output, total_block_len = [], len(self.blocks) |
| | blocks_to_take = range(total_block_len - n, total_block_len) if isinstance(n, int) else n |
| | for i, blk in enumerate(self.blocks): |
| | x = blk(x) |
| | if i in blocks_to_take: |
| | output.append(x) |
| | assert len(output) == len(blocks_to_take), f"only {len(output)} / {len(blocks_to_take)} blocks found" |
| | return output |
| |
|
| | def _get_intermediate_layers_chunked(self, x, n=1): |
| | x = self.prepare_tokens_with_masks(x) |
| | output, i, total_block_len = [], 0, len(self.blocks[-1]) |
| | |
| | blocks_to_take = range(total_block_len - n, total_block_len) if isinstance(n, int) else n |
| | for block_chunk in self.blocks: |
| | for blk in block_chunk[i:]: |
| | x = blk(x) |
| | if i in blocks_to_take: |
| | output.append(x) |
| | i += 1 |
| | assert len(output) == len(blocks_to_take), f"only {len(output)} / {len(blocks_to_take)} blocks found" |
| | return output |
| |
|
| | def get_intermediate_layers( |
| | self, |
| | x: torch.Tensor, |
| | n: Union[int, Sequence] = 1, |
| | reshape: bool = False, |
| | return_class_token: bool = False, |
| | norm=True, |
| | ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]]]: |
| | if self.chunked_blocks: |
| | outputs = self._get_intermediate_layers_chunked(x, n) |
| | else: |
| | outputs = self._get_intermediate_layers_not_chunked(x, n) |
| | if norm: |
| | outputs = [self.norm(out) for out in outputs] |
| | class_tokens = [out[:, 0] for out in outputs] |
| | outputs = [out[:, 1 + self.num_register_tokens:] for out in outputs] |
| | if reshape: |
| | B, _, w, h = x.shape |
| | outputs = [ |
| | out.reshape(B, w // self.patch_size, h // self.patch_size, -1).permute(0, 3, 1, 2).contiguous() |
| | for out in outputs |
| | ] |
| | if return_class_token: |
| | return tuple(zip(outputs, class_tokens)) |
| | return tuple(outputs) |
| |
|
| |
|
| | def init_weights_vit_timm(module: nn.Module, name: str = ""): |
| | """ViT weight initialization, original timm impl (for reproducibility)""" |
| | if isinstance(module, nn.Linear): |
| | trunc_normal_(module.weight, std=0.02) |
| | if module.bias is not None: |
| | nn.init.zeros_(module.bias) |
| |
|
| |
|
| | class Encoder(nn.Module): |
| | def __init__(self, model_type='small'): |
| | super().__init__() |
| | if model_type == 'tiny': |
| | self.vit = DinoVisionTransformer( |
| | img_size=256, |
| | patch_size=16, |
| | embed_dim=192, |
| | depth=12, |
| | num_heads=6, |
| | mlp_ratio=4, |
| | block_fn=partial(Block, attn_class=MemEffAttention), |
| | num_register_tokens=0 |
| | ) |
| | path = "checkpoint/deit_tiny_patch16_224-a1311bcf.pth" |
| |
|
| | elif model_type == 'small': |
| | self.vit = DinoVisionTransformer( |
| | img_size=256, |
| | patch_size=16, |
| | embed_dim=384, |
| | depth=12, |
| | num_heads=6, |
| | mlp_ratio=4, |
| | block_fn=partial(Block, attn_class=MemEffAttention), |
| | num_register_tokens=0 |
| | ) |
| | path = "checkpoint/dinov2_vits14_pretrain.pth" |
| |
|
| | else: |
| | assert False, r'Encoder: check the vit model type' |
| |
|
| | state_dict = torch.load(path, map_location='cpu')['model'] \ |
| | if model_type == 'tiny' else torch.load(path, map_location='cpu') |
| |
|
| | for k in ['pos_embed', 'patch_embed.proj.weight']: |
| | del state_dict[k] |
| | msg = self.vit.load_state_dict(state_dict, strict=False) |
| | print(' missing_keys:{},\n unexpected_keys:{}'.format(msg.missing_keys, msg.unexpected_keys)) |
| | print('model_type: {},\n checkpoint_path: {}'.format(model_type, path)) |
| |
|
| | self.resnet = resnet18(pretrained=True) |
| | self.drop = nn.Dropout(p=0.01) |
| |
|
| | |
| | self.fusion_conv = nn.Sequential( |
| | nn.Conv2d(512 + 384, 384, kernel_size=1), |
| | nn.BatchNorm2d(384), |
| | nn.ReLU(inplace=True) |
| | ) |
| |
|
| | def detail_capture(self, x): |
| | x = self.resnet.conv1(x) |
| | x = self.resnet.bn1(x) |
| | x = self.resnet.relu(x) |
| |
|
| | x2 = self.drop(self.resnet.layer1(x)) |
| | x3 = self.resnet.layer2(x2) |
| | x4 = self.resnet.layer3(x3) |
| | x5 = self.resnet.layer4(x4) |
| | return [x2, x3, x4, x5] |
| |
|
| | def forward(self, x, y): |
| |
|
| | v_x = self.vit(x) |
| | v_y = self.vit(y) |
| |
|
| | v_x = rearrange(v_x, 'b (h w) c -> b c h w', h=16, w=16) |
| | v_y = rearrange(v_y, 'b (h w) c -> b c h w', h=16, w=16) |
| |
|
| | c_x = self.detail_capture(x) |
| | c_y = self.detail_capture(y) |
| |
|
| | fused_v_x = self.fusion_conv(torch.cat([c_x[-1], v_x], dim=1)) |
| | fused_v_y = self.fusion_conv(torch.cat([c_y[-1], v_y], dim=1)) |
| | return c_x[:-1] + [fused_v_x], c_y[:-1] + [fused_v_y] |
| |
|