| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
|
|
| """ |
| EVA-CLIP backbone used in BLIP2. |
| |
| Code adapted from: |
| https://github.com/salesforce/LAVIS/blob/main/lavis/models/eva_vit.py |
| """ |
|
|
|
|
| import math |
| from functools import partial |
| from logging import getLogger |
| from typing import Any, Optional, Tuple, Union |
|
|
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| import torch.utils.checkpoint as checkpoint |
|
|
| logger = getLogger(__file__) |
|
|
| TRANSFORMER_ENGINE_AVAILABLE = False |
| try: |
| import transformer_engine.pytorch as te |
| from transformer_engine.common.recipe import DelayedScaling, Format |
|
|
| TRANSFORMER_ENGINE_AVAILABLE = True |
| logger.info("Transformer Engine is available, can set `transformer_engine=True` in config " "for faster inference.") |
| except ImportError: |
| pass |
|
|
|
|
| def drop_path(x, drop_prob: float = 0.0, training: bool = False, scale_by_keep: bool = True): |
| """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). |
| |
| From https://github.com/huggingface/pytorch-image-models/blob/main/timm/layers/drop.py |
| """ |
| if drop_prob == 0.0 or not training: |
| return x |
| keep_prob = 1 - drop_prob |
| shape = (x.shape[0],) + (1,) * (x.ndim - 1) |
| random_tensor = x.new_empty(shape).bernoulli_(keep_prob) |
| if keep_prob > 0.0 and scale_by_keep: |
| random_tensor.div_(keep_prob) |
| return x * random_tensor |
|
|
|
|
| class DropPath(nn.Module): |
| """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" |
|
|
| def __init__(self, drop_prob: float) -> None: |
| super().__init__() |
| self.drop_prob = drop_prob |
|
|
| def forward(self, x: torch.Tensor) -> torch.Tensor: |
| return drop_path(x, self.drop_prob, self.training) |
|
|
| def extra_repr(self) -> str: |
| return "p={}".format(self.drop_prob) |
|
|
|
|
| class Mlp(nn.Module): |
| def __init__( |
| self, |
| in_features: int, |
| hidden_features: Optional[int] = None, |
| out_features: Optional[int] = None, |
| act_layer=nn.GELU, |
| drop: float = 0.0, |
| transformer_engine: bool = False, |
| ) -> None: |
| super().__init__() |
| out_features = out_features or in_features |
| hidden_features = hidden_features or in_features |
| fn = te.Linear if transformer_engine else nn.Linear |
| self.fc1 = fn(in_features, hidden_features) |
| self.act = act_layer() |
| self.fc2 = fn(hidden_features, out_features) |
| self.drop = nn.Dropout(drop) |
|
|
| def forward(self, x): |
| x = self.fc1(x) |
| x = self.act(x) |
| x = self.fc2(x) |
| x = self.drop(x) |
| return x |
|
|
|
|
| class Attention(nn.Module): |
| def __init__( |
| self, |
| dim, |
| num_heads=8, |
| qkv_bias=False, |
| qk_scale=None, |
| attn_drop=0.0, |
| proj_drop=0.0, |
| window_size=None, |
| attn_head_dim=None, |
| **kwargs, |
| ): |
| super().__init__() |
| self.num_heads = num_heads |
| head_dim = dim // num_heads |
| if attn_head_dim is not None: |
| head_dim = attn_head_dim |
| all_head_dim = head_dim * self.num_heads |
| self.scale = qk_scale or head_dim**-0.5 |
|
|
| self.qkv = nn.Linear(dim, all_head_dim * 3, bias=qkv_bias) |
|
|
| if window_size: |
| self.window_size = window_size |
| self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 |
| self.relative_position_bias_table = nn.Parameter( |
| torch.zeros(self.num_relative_distance, num_heads) |
| ) |
| |
|
|
| |
| coords_h = torch.arange(window_size[0]) |
| coords_w = torch.arange(window_size[1]) |
| coords = torch.stack(torch.meshgrid([coords_h, coords_w])) |
| coords_flatten = torch.flatten(coords, 1) |
| relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] |
| relative_coords = relative_coords.permute(1, 2, 0).contiguous() |
| relative_coords[:, :, 0] += window_size[0] - 1 |
| relative_coords[:, :, 1] += window_size[1] - 1 |
| relative_coords[:, :, 0] *= 2 * window_size[1] - 1 |
| relative_position_index = torch.zeros( |
| size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype |
| ) |
| relative_position_index[1:, 1:] = relative_coords.sum(-1) |
| relative_position_index[0, 0:] = self.num_relative_distance - 3 |
| relative_position_index[0:, 0] = self.num_relative_distance - 2 |
| relative_position_index[0, 0] = self.num_relative_distance - 1 |
|
|
| self.register_buffer("relative_position_index", relative_position_index) |
| else: |
| self.window_size = None |
| self.relative_position_bias_table = None |
| self.relative_position_index = None |
|
|
| self.attn_drop = nn.Dropout(attn_drop) |
| self.proj = nn.Linear(all_head_dim, dim) |
| self.proj_drop = nn.Dropout(proj_drop) |
|
|
| def forward(self, x, rel_pos_bias=None): |
| B, N, C = x.shape |
| qkv = self.qkv(x) |
| qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) |
| q, k, v = qkv[0], qkv[1], qkv[2] |
|
|
| q = q * self.scale |
| attn = q @ k.transpose(-2, -1) |
|
|
| if self.relative_position_bias_table is not None: |
| relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( |
| self.window_size[0] * self.window_size[1] + 1, self.window_size[0] * self.window_size[1] + 1, -1 |
| ) |
| relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() |
| attn = attn + relative_position_bias.unsqueeze(0) |
|
|
| if rel_pos_bias is not None: |
| attn = attn + rel_pos_bias |
|
|
| attn = attn.softmax(dim=-1) |
| attn = self.attn_drop(attn) |
|
|
| x = (attn @ v).transpose(1, 2).reshape(B, N, -1) |
| x = self.proj(x) |
| x = self.proj_drop(x) |
| return x |
|
|
|
|
| class TransformerEngineAttention(nn.Module): |
| def __init__( |
| self, |
| dim: int, |
| num_heads: int = 8, |
| qkv_bias: bool = False, |
| qk_scale: Optional[float] = None, |
| attn_drop: float = 0.0, |
| proj_drop: float = 0.0, |
| window_size: Optional[int] = None, |
| attn_head_dim: Optional[int] = None, |
| checkpoint_attention: bool = False, |
| ): |
| super().__init__() |
| self.num_heads = num_heads |
| self.checkpoint_attention = checkpoint_attention |
| head_dim = dim // num_heads |
| if attn_head_dim is not None: |
| head_dim = attn_head_dim |
| all_head_dim = head_dim * self.num_heads |
| self.scale = qk_scale or head_dim**-0.5 |
|
|
| |
| self.qkv = te.Linear(dim, all_head_dim * 3, bias=qkv_bias) |
|
|
| if window_size: |
| raise NotImplementedError("`window_size` not implemented for TE!") |
|
|
| self.te_attn = te.DotProductAttention( |
| num_attention_heads=num_heads, |
| kv_channels=head_dim, |
| attention_dropout=attn_drop, |
| qkv_format="bshd", |
| softmax_scale=self.scale, |
| attn_mask_type="no_mask", |
| ) |
|
|
| |
| self.proj = te.Linear(all_head_dim, dim) |
| self.proj_drop = nn.Dropout(proj_drop) |
|
|
| def forward(self, x: torch.Tensor, rel_pos_bias: Optional[torch.Tensor] = None) -> torch.Tensor: |
| """ |
| x: [B, N, C] |
| rel_pos_bias (optional): tensor of shape [num_heads, N, N] |
| """ |
| B, N, _ = x.shape |
| qkv = self.qkv(x) |
| qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 1, 3, 4) |
| q, k, v = qkv[0], qkv[1], qkv[2] |
|
|
| if rel_pos_bias is not None: |
| raise NotImplementedError("`rel_pos_bias` not implemented for TE!") |
|
|
| |
| y = self.te_attn(q, k, v, checkpoint_core_attention=self.checkpoint_attention) |
|
|
| |
| return self.proj_drop(self.proj(y)) |
|
|
|
|
| class Block(nn.Module): |
| def __init__( |
| self, |
| dim, |
| num_heads, |
| mlp_ratio=4.0, |
| qkv_bias=False, |
| qk_scale=None, |
| drop=0.0, |
| attn_drop=0.0, |
| drop_path=0.0, |
| init_values=None, |
| act_layer=nn.GELU, |
| norm_layer=nn.LayerNorm, |
| window_size=None, |
| attn_head_dim=None, |
| transformer_engine: bool = False, |
| checkpoint_attention: bool = False, |
| ): |
| super().__init__() |
| self.transformer_engine = transformer_engine |
| self.window_size = window_size |
| self.checkpoint_attention = checkpoint_attention |
|
|
| if checkpoint_attention and not transformer_engine: |
| raise ValueError("`checkpoint_attention` needs `transformer_engine`!") |
|
|
| self.norm1 = norm_layer(dim) |
| attn_fn = TransformerEngineAttention if transformer_engine else Attention |
| self.attn = attn_fn( |
| dim, |
| num_heads=num_heads, |
| qkv_bias=qkv_bias, |
| qk_scale=qk_scale, |
| attn_drop=attn_drop, |
| proj_drop=drop, |
| window_size=window_size, |
| attn_head_dim=attn_head_dim, |
| checkpoint_attention=checkpoint_attention, |
| ) |
|
|
| |
| self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() |
| self.norm2 = norm_layer(dim) |
| mlp_hidden_dim = int(dim * mlp_ratio) |
| self.mlp = Mlp( |
| in_features=dim, |
| hidden_features=mlp_hidden_dim, |
| act_layer=act_layer, |
| drop=drop, |
| transformer_engine=transformer_engine, |
| ) |
|
|
| if init_values is not None and init_values > 0: |
| self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True) |
| self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True) |
| else: |
| self.gamma_1, self.gamma_2 = None, None |
|
|
| def forward(self, x, rel_pos_bias=None): |
| if self.gamma_1 is None: |
| x = x + self.drop_path(self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias)) |
| x = x + self.drop_path(self.mlp(self.norm2(x))) |
| else: |
| x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias)) |
| x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) |
| return x |
|
|
|
|
| class PatchEmbed(nn.Module): |
| """Image to Patch Embedding""" |
|
|
| def __init__( |
| self, |
| img_size: Union[int, Tuple[int, int]] = 224, |
| patch_size: Union[int, Tuple[int, int]] = 16, |
| in_chans: int = 3, |
| embed_dim: int = 768, |
| ): |
| super().__init__() |
| img_size = (img_size, img_size) if isinstance(img_size, int) else img_size |
| patch_size = (patch_size, patch_size) if isinstance(patch_size, int) else patch_size |
| num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) |
| self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) |
| self.img_size = img_size |
| self.patch_size = patch_size |
| self.num_patches = num_patches |
|
|
| self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) |
|
|
| def forward(self, x, **kwargs): |
| B, C, H, W = x.shape |
| assert ( |
| H == self.img_size[0] and W == self.img_size[1] |
| ), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." |
| x = self.proj(x).flatten(2).transpose(1, 2) |
| return x |
|
|
|
|
| class RelativePositionBias(nn.Module): |
| def __init__(self, window_size, num_heads): |
| super().__init__() |
| self.window_size = window_size |
| self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 |
| self.relative_position_bias_table = nn.Parameter( |
| torch.zeros(self.num_relative_distance, num_heads) |
| ) |
| |
|
|
| |
| coords_h = torch.arange(window_size[0]) |
| coords_w = torch.arange(window_size[1]) |
| coords = torch.stack(torch.meshgrid([coords_h, coords_w])) |
| coords_flatten = torch.flatten(coords, 1) |
| relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] |
| relative_coords = relative_coords.permute(1, 2, 0).contiguous() |
| relative_coords[:, :, 0] += window_size[0] - 1 |
| relative_coords[:, :, 1] += window_size[1] - 1 |
| relative_coords[:, :, 0] *= 2 * window_size[1] - 1 |
| relative_position_index = torch.zeros( |
| size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype |
| ) |
| relative_position_index[1:, 1:] = relative_coords.sum(-1) |
| relative_position_index[0, 0:] = self.num_relative_distance - 3 |
| relative_position_index[0:, 0] = self.num_relative_distance - 2 |
| relative_position_index[0, 0] = self.num_relative_distance - 1 |
|
|
| self.register_buffer("relative_position_index", relative_position_index) |
|
|
| def forward(self): |
| relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( |
| self.window_size[0] * self.window_size[1] + 1, self.window_size[0] * self.window_size[1] + 1, -1 |
| ) |
| return relative_position_bias.permute(2, 0, 1).contiguous() |
|
|
|
|
| class VisionTransformer(nn.Module): |
| """Vision Transformer with support for patch or hybrid CNN input stage""" |
|
|
| def __init__( |
| self, |
| img_size=224, |
| patch_size=16, |
| in_chans=3, |
| num_classes=1000, |
| embed_dim=768, |
| depth=12, |
| num_heads=12, |
| mlp_ratio=4.0, |
| qkv_bias=False, |
| qk_scale=None, |
| drop_rate=0.0, |
| attn_drop_rate=0.0, |
| drop_path_rate=0.0, |
| norm_layer=nn.LayerNorm, |
| init_values=None, |
| use_abs_pos_emb=True, |
| use_rel_pos_bias=False, |
| use_shared_rel_pos_bias=False, |
| use_mean_pooling=True, |
| init_scale=0.001, |
| checkpoint_activations: bool = False, |
| checkpoint_attention: bool = False, |
| transformer_engine: bool = False, |
| use_fp8: bool = False, |
| ): |
| super().__init__() |
| self.image_size = img_size |
| self.patch_size = patch_size |
| self.num_classes = num_classes |
| self.num_features = self.embed_dim = embed_dim |
| self.transformer_engine = transformer_engine |
| self.use_fp8 = use_fp8 |
| self.fp8_recipe = None |
|
|
| if use_fp8 and not transformer_engine: |
| raise ValueError("`transformer_engine` must be enabled for `use_fp8`.") |
| if use_fp8: |
| |
| self.fp8_recipe = DelayedScaling(fp8_format=Format.HYBRID, amax_history_len=16, amax_compute_algo="max") |
|
|
| self.patch_embed = PatchEmbed(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) |
| num_patches = self.patch_embed.num_patches |
|
|
| self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) |
| if use_abs_pos_emb: |
| self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) |
| else: |
| self.pos_embed = None |
| self.pos_drop = nn.Dropout(p=drop_rate) |
|
|
| if use_shared_rel_pos_bias: |
| self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads) |
| else: |
| self.rel_pos_bias = None |
| self.checkpoint_activations = checkpoint_activations |
| self.checkpoint_attention = checkpoint_attention |
|
|
| dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] |
| self.use_rel_pos_bias = use_rel_pos_bias |
| self.blocks = nn.ModuleList( |
| [ |
| Block( |
| dim=embed_dim, |
| num_heads=num_heads, |
| mlp_ratio=mlp_ratio, |
| qkv_bias=qkv_bias, |
| qk_scale=qk_scale, |
| drop=drop_rate, |
| attn_drop=attn_drop_rate, |
| drop_path=dpr[i], |
| norm_layer=norm_layer, |
| init_values=init_values, |
| window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None, |
| transformer_engine=transformer_engine, |
| checkpoint_attention=self.checkpoint_attention, |
| ) |
| for i in range(depth) |
| ] |
| ) |
|
|
| if self.pos_embed is not None: |
| nn.init.trunc_normal_(self.pos_embed, std=0.02) |
| nn.init.trunc_normal_(self.cls_token, std=0.02) |
|
|
| self.apply(self._init_weights) |
| self.fix_init_weight() |
|
|
| def fix_init_weight(self): |
| def rescale(param, layer_id): |
| param.div_(math.sqrt(2.0 * layer_id)) |
|
|
| for layer_id, layer in enumerate(self.blocks): |
| rescale(layer.attn.proj.weight.data, layer_id + 1) |
| rescale(layer.mlp.fc2.weight.data, layer_id + 1) |
|
|
| def _init_weights(self, m): |
| if isinstance(m, nn.Linear): |
| nn.init.trunc_normal_(m.weight, std=0.02) |
| if isinstance(m, nn.Linear) and m.bias is not None: |
| nn.init.constant_(m.bias, 0) |
| elif isinstance(m, nn.LayerNorm): |
| nn.init.constant_(m.bias, 0) |
| nn.init.constant_(m.weight, 1.0) |
|
|
| def get_classifier(self): |
| return self.head |
|
|
| def reset_classifier(self, num_classes, global_pool=""): |
| self.num_classes = num_classes |
| self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() |
|
|
| def forward_features(self, x): |
| if self.transformer_engine and self.use_fp8: |
| with te.fp8_autocast(enabled=True, fp8_recipe=self.fp8_recipe): |
| return self._forward_uncast(x) |
| return self._forward_uncast(x) |
|
|
| def _forward_uncast(self, x): |
| x = self.patch_embed(x) |
| batch_size, seq_len, _ = x.size() |
|
|
| cls_tokens = self.cls_token.expand(batch_size, -1, -1) |
| x = torch.cat((cls_tokens, x), dim=1) |
| if self.pos_embed is not None: |
| x = x + self.pos_embed |
| x = self.pos_drop(x) |
|
|
| rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None |
| for blk in self.blocks: |
| if self.checkpoint_activations: |
| x = checkpoint.checkpoint(blk, x, rel_pos_bias) |
| else: |
| x = blk(x, rel_pos_bias) |
| return x |
|
|
| def forward(self, x): |
| x = self.forward_features(x) |
| return x |
|
|
| def get_intermediate_layers(self, x): |
| x = self.patch_embed(x) |
| batch_size, seq_len, _ = x.size() |
|
|
| cls_tokens = self.cls_token.expand(batch_size, -1, -1) |
| x = torch.cat((cls_tokens, x), dim=1) |
| if self.pos_embed is not None: |
| x = x + self.pos_embed |
| x = self.pos_drop(x) |
|
|
| features = [] |
| rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None |
| for blk in self.blocks: |
| x = blk(x, rel_pos_bias) |
| features.append(x) |
|
|
| return features |
|
|
| def get_num_layer(self, var_name=""): |
| if var_name in ("cls_token", "mask_token", "pos_embed"): |
| return 0 |
| elif var_name.startswith("patch_embed"): |
| return 0 |
| elif var_name.startswith("rel_pos_bias"): |
| return len(self.blocks) - 1 |
| elif var_name.startswith("blocks"): |
| layer_id = int(var_name.split(".")[1]) |
| return layer_id + 1 |
| else: |
| return len(self.blocks) |
|
|
|
|
| def interpolate_pos_embed( |
| pos_embed_key: str, |
| num_patches: int, |
| patch_embed_shape: torch.Size, |
| checkpoint_model: dict[str, torch.Tensor], |
| target_h: int = None, |
| target_w: int = None, |
| ) -> None: |
| if pos_embed_key in checkpoint_model: |
| pos_embed_checkpoint = checkpoint_model[pos_embed_key].float() |
| embedding_size = pos_embed_checkpoint.shape[-1] |
| num_extra_tokens = patch_embed_shape - num_patches |
| |
| orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) |
|
|
| |
| if target_h is not None and target_w is not None: |
| new_h, new_w = target_h, target_w |
| else: |
| |
| new_size = int(num_patches**0.5) |
| new_h, new_w = new_size, new_size |
|
|
| |
| if orig_size * orig_size != new_h * new_w: |
| logger.info("Positional interpolation from %dx%d to %dx%d" % (orig_size, orig_size, new_h, new_w)) |
| extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] |
| |
| pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] |
| pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) |
| pos_tokens = torch.nn.functional.interpolate( |
| pos_tokens, size=(new_h, new_w), mode="bicubic", align_corners=False |
| ) |
| pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) |
| new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) |
| checkpoint_model[pos_embed_key] = new_pos_embed |
|
|
|
|
| class PositionalEmbeddingHook: |
| def __init__(self, pos_embed_name, num_patches, patch_embed_shape, target_h=None, target_w=None): |
| self.pos_embed_name = pos_embed_name |
| self.num_patches = num_patches |
| self.patch_embed_shape = patch_embed_shape |
| self.target_h = target_h |
| self.target_w = target_w |
|
|
| def __call__(self, state_dict, prefix, *args, **kwargs) -> None: |
| logger.info("Calling `PositionalEmbeddingHook`") |
| pos_embed_key = f"{prefix}{self.pos_embed_name}" |
| interpolate_pos_embed( |
| pos_embed_key, self.num_patches, self.patch_embed_shape, state_dict, self.target_h, self.target_w |
| ) |
|
|
|
|
| class EvaViTG(VisionTransformer): |
| def __init__( |
| self, |
| img_size: Union[int, Tuple[int, int]] = 224, |
| drop_path_rate: float = 0.4, |
| pretrained: bool = False, |
| checkpoint_path: Optional[str] = None, |
| checkpoint_activations: bool = False, |
| checkpoint_attention: bool = False, |
| transformer_engine: bool = False, |
| use_fp8: bool = False, |
| **kwargs: Any, |
| ) -> None: |
| if not TRANSFORMER_ENGINE_AVAILABLE and transformer_engine: |
| raise ValueError( |
| "TransformerEngine is not available, " |
| "please install transformer-engine or set `transformer_engine=False` in config." |
| ) |
| if use_fp8 and not transformer_engine: |
| raise ValueError("`transformer_engine` must be enabled for `use_fp8`.") |
| super().__init__( |
| img_size=img_size, |
| patch_size=14, |
| use_mean_pooling=False, |
| embed_dim=1408, |
| depth=39, |
| num_heads=1408 // 88, |
| mlp_ratio=4.3637, |
| qkv_bias=True, |
| drop_path_rate=drop_path_rate, |
| norm_layer=partial(nn.LayerNorm, eps=1e-6), |
| checkpoint_activations=checkpoint_activations, |
| checkpoint_attention=checkpoint_attention, |
| transformer_engine=transformer_engine, |
| use_fp8=use_fp8, |
| ) |
| self.checkpoint_path = checkpoint_path |
|
|
| |
| self.register_pre_hooks() |
|
|
| |
| if pretrained: |
| self.load_checkpoint() |
|
|
| def load_checkpoint(self) -> None: |
| logger.info(f"Loading checkpoint from {self.checkpoint_path}") |
| state_dict = torch.load(self.checkpoint_path, map_location="cpu") |
| incompatible_keys = self.load_state_dict(state_dict, strict=False) |
| logger.info(f"Incompatible keys: {incompatible_keys}") |
| logger.info(f"Loaded visual encoder {type(self)} with state dict from {self.checkpoint_path}") |
|
|
| def register_pre_hooks(self) -> None: |
| """Register positional embedding interpolation when loading pre-trained checkpoints using different resolution.""" |
| |
| patch_h = self.patch_embed.patch_shape[0] |
| patch_w = self.patch_embed.patch_shape[1] |
|
|
| embed_hook = PositionalEmbeddingHook( |
| pos_embed_name="pos_embed", |
| num_patches=self.patch_embed.num_patches, |
| patch_embed_shape=self.pos_embed.shape[-2], |
| target_h=patch_h, |
| target_w=patch_w, |
| ) |
| self._register_load_state_dict_pre_hook(embed_hook) |
|
|
| def _initialize_weights(self, m): |
| if isinstance(m, nn.Linear): |
| nn.init.trunc_normal_(m.weight, std=0.02) |
| if isinstance(m, nn.Linear) and m.bias is not None: |
| nn.init.constant_(m.bias, 0) |
| elif isinstance(m, nn.LayerNorm): |
| nn.init.constant_(m.bias, 0) |
| nn.init.constant_(m.weight, 1.0) |
|
|