MLP-Mixerに関する検証用コード

MLP-Mixerに関して、GitHub – rishikksh20/MLP-Mixer-pytorch: Unofficial implementation of MLP-Mixer: An all-MLP Architecture for Visionにいくつかのコメントを付与したコードを自分用に保存する。

import torch
import numpy as np
from torch import nn
from einops.layers.torch import Rearrange

from torchviz import make_dot


class FeedForward(nn.Module):
    def __init__(self, dim, hidden_dim, dropout = 0.):
        super().__init__()
        self.net = nn.Sequential(
            nn.Linear(dim, hidden_dim),
            nn.GELU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_dim, dim),
            nn.Dropout(dropout)
        )
    def forward(self, x):
        return self.net(x)

# apply MixerBlock to a patch
class MixerBlock(nn.Module):

    def __init__(self, dim, num_patch, token_dim, channel_dim, dropout = 0.):
        # num patch is all of patches from a image
        # num_patch =  (image_size// patch_size) ** 2
        super().__init__()

        self.token_mix = nn.Sequential(
            nn.LayerNorm(dim),
            Rearrange('b n d -> b d n'),
            FeedForward(num_patch, token_dim, dropout), # FeedForward(196, 256, 0.0)
            Rearrange('b d n -> b n d')
        )

        self.channel_mix = nn.Sequential(
            nn.LayerNorm(dim),
            FeedForward(dim, channel_dim, dropout), # FeedForward(512, 2048, 0.0)
        )

    def forward(self, x):

        x = x + self.token_mix(x)

        x = x + self.channel_mix(x)

        return x


class MLPMixer(nn.Module):

    def __init__(self, in_channels, dim, num_classes, patch_size, image_size, depth, token_dim, channel_dim):
        super().__init__()

        assert image_size % patch_size == 0, 'Image dimensions must be divisible by the patch size.'
        self.num_patch =  (image_size// patch_size) ** 2
        self.to_patch_embedding = nn.Sequential(
            nn.Conv2d(in_channels, dim, patch_size, patch_size),
            Rearrange('b c h w -> b (h w) c'),
        )

        self.mixer_blocks = nn.ModuleList([])

        for _ in range(depth):
            self.mixer_blocks.append(MixerBlock(dim, self.num_patch, token_dim, channel_dim))

        self.layer_norm = nn.LayerNorm(dim)

        self.mlp_head = nn.Sequential(
            nn.Linear(dim, num_classes)
        )

    def forward(self, x):


        x = self.to_patch_embedding(x)
        # x.shape > torch.Size([1, 196, 512])
        # 196 means num of patches from a image
        # sqrt(196) = 14
        # 224 / 16 = 14
        # 512 is dim from a patch i.e. feature from a patch

        # weight sharering
        # https://pytorch.org/tutorials/beginner/examples_nn/dynamic_net.html
        for mixer_block in self.mixer_blocks:
            x = mixer_block(x)
            # x.shape > torch.Size([1, 196, 512])
        x = self.layer_norm(x)
        # x.shape > torch.Size([1, 196, 512])

        # Global average pooling
        x = x.mean(dim=1)
        # x.shape > torch.Size([1, 512])

        return self.mlp_head(x)




if __name__ == "__main__":
    img = torch.ones([1, 3, 224, 224])

    model = MLPMixer(in_channels=3, image_size=224, patch_size=16, num_classes=1000,
                     dim=512, depth=8, token_dim=256, channel_dim=2048)

    parameters = filter(lambda p: p.requires_grad, model.parameters())
    parameters = sum([np.prod(p.size()) for p in parameters]) / 1_000_000
    print('Trainable Parameters: %.3fM' % parameters)

    out_img = model(img)

    print("Shape of out :", out_img.shape)  # [B, in_channels, image_size, image_size]

    # for network viz
    image = make_dot(out_img, params=dict(model.named_parameters()))
    image.format = "png"
    image.render("mlp-mixer")



PyTorch

Posted by vastee