主要讲重点:CNN与transformer模块是怎么融合的
1.通用stem
输入图片卷积后为(b,256,56,56)。四倍下采样后为(b,384,14,14),再加上(1,384)维度的class_token成为(b, 197,384)
for i in range(2, self.fin_stage):
x, x_t = eval('self.conv_trans_' + str(i))(x, x_t)
# 这里要重复2-12次,输入是特征图x(b,256,56,56)与token x_t(b,197,384),输出也是这两个部分。下面拆开讲解循环部分:
2.初始下采样
def forward(self, x, x_t):
x, x2 = self.cnn_block(x)
# 第一次后维度x(b,256,56,56) x2(b,64,56,56) i=5时为 (512,28,28)(128, 28, 28)
# self.cnn_block作用是下采样,在i循环中(2-12),2-4不变,5-8不变,9不变,12-12不变
3.CNN–>Trans
_, _, H, W = x2.shape
x_st = self.squeeze_block(x2, x_t)
x_t = self.trans_block(x_st + x_t)
# 特征图x2经过2次卷积-->(b,196,384),叠加x_t的第一维,成为 x_st(b, 197, 384)。x_st 与 x_t相加,输入Trans模块,得到 x_t维度不变。
self.squeeze_block:(conv_project): Conv2d(128, 384, kernel_size=(1, 1), stride=(1, 1))
(sample_pooling): AvgPool2d(kernel_size=2, stride=2, padding=0)
(ln): LayerNorm((384,), eps=1e-06, elementwise_affine=True)
(act): GELU()
def forward(self, x, x_t):
x = self.conv_project(x) # [N, C, H, W]
x = self.sample_pooling(x).flatten(2).transpose(1, 2)
x = self.ln(x)
x = self.act(x)
x = torch.cat([x_t[:, 0][:, None, :], x], dim=1)
return x
4.Trans–>CNN
x_t_r = self.expand_block(x_t, H
x = self.fusion_block(x, x_t_r, return_x_2=False)
# 将token embed 进行双线性插值,得到 x_t_r(b, 64, 56, 56]),增大了分辨率
# 变成矩阵的 x_t_r 再加回到特征图x,得到x
return x, x_t
self.expand_block: (conv_project): Conv2d(384, 128, kernel_size=(1, 1), stride=(1, 1))
(bn): BatchNorm2d(128, eps=1e-06, momentum=0.1, affine=True, track_running_stats=True)
(act): ReLU()
def forward(self, x, H, W):
B, _, C = x.shape
# [N, 197, 384] -> [N, 196, 384] -> [N, 384, 196] -> [N, 384, 14, 14]
x_r = x[:, 1:].transpose(1, 2).reshape(B, C, H, W)
x_r = self.act(self.bn(self.conv_project(x_r)))
return F.interpolate(x_r, size=(H * self.up_stride, W * self.up_stride))
self.fusion_block:
(conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-06, momentum=0.1, affine=True, track_running_stats=True)
(act1): ReLU(inplace=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(128, eps=1e-06, momentum=0.1, affine=True, track_running_stats=True)
(act2): ReLU(inplace=True)
(conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(512, eps=1e-06, momentum=0.1, affine=True, track_running_stats=True)
(act3): ReLU(inplace=True
def forward(self, x, x_t=None, return_x_2=True):
residual = x # (b,256, 56, 56)
x = self.conv1(x)
x = self.bn1(x)
if self.drop_block is not None:
x = self.drop_block(x)
x = self.act1(x)
self.conv2(x + x_t)x = self.conv2(x) if x_t is not None else x = self.conv2(x)
x = self.bn2(x)
if self.drop_block is not None:
x = self.drop_block(x)
x2 = self.act2(x)
x = self.conv3(x2)
x = self.bn3(x)
if self.drop_block is not None:
x = self.drop_block(x)
if self.drop_path is not None:
x = self.drop_path(x)
if self.res_conv:
residual = self.residual_conv(residual)
residual = self.residual_bn(residual)
x += residual
x = self.act3(x)
return x
5. 分类阶段
在i=2-12时,输出维度依次为: (1) x :(b,256,56,56) (b,512,28,28) (b,1024,14,14) (b,1024,7,7) (2)x_t: (b,197,384) …(b,197,384) x:(b,1024,7,7) —avgPool–>(b,1024)----conv_cls_head—>(b,1000) x_t: (b,197,384)—取第一维—>(b,1,384)—trans_cls_head–>(b,1000) 最后结果取两个的平均
|