?网络结构的亮点:
通过堆叠多个3*3的卷积核来替代大尺度卷积核(减少所需参数)
论文中提到,可以通过堆叠两个3*3的卷积核替代5*5的卷积核,堆叠三个3*3的卷积核替代7*7的卷积核
拥有相同的感受野
什么是感受野呢?
在卷积神经网络中,决定某一层输出结果中? ? 一个元素? ?所对应的输入层的区域大小
被称做感受野
通俗的解释是,输出feature map(特征矩阵)上的一个单元对应输入层上区域的大小
?(9-3+2*0)/2+1=4
(4-2+2*0)/2+1=2
第三层(最上面一层)中一个单元在第二层的感受野就是一个2*2区域,在第一层的感受野就是一个5*5大小
感受野计算公式: F(i) = (F(i+1)-1) * stride+Ksize
F(i)为第i层感受野
F(i+1)表示上一层的感受野
stride 为第i层步距
Ksize为卷积核或池化核尺寸
Feature map:F=1
Pool1:F=(1-1)*2+2=2
Conv1:F=(2-1)*2+3=5
采用三个3*3的卷积核来替代一个7*7的卷积核原理
feature map : F=1
Conv3*3(3):F = (1-1)*1+3=3
Conv3*3(2):F = (3-1)*1+3=5
Conv3*3(1):F = (5-1)*1+3=7
这样特征矩阵的一个单元通过三个3*3卷积核的堆叠,所对应的感受野与一个7*7卷积核对应的感受野是一样大小的,都是7*7大小。
?先大概看一下网络结构:
首先导入一张224*224大小的RGB图片,
然后是两个3*3的卷积层,然后最大下采样
再通过两个3*3的卷积层,然后最大下采样
然后三个3*3的卷积层,然后最大下采样
然后三个3*3的卷积层,然后最大下采样
然后三个3*3的卷积层,然后最大下采样
然后三个全连接层
softmax处理
一共13层卷积层 加上 3个全连接层
其中卷积的步距为1 padding为1
通过卷积操作,特征图的高度和宽度是不变的? ? ?N=(3-3+2*1)/1+1=3 输入是3*3 输出还是3*3
池化核的大小为2? 步距也为2
通过池化操作,只将原来特征矩阵的高和宽转变成原来的一半 ,不改变深度
卷积核的个数是输出特征图的深度
三层全连接层:
前两个全连接层用了Relu激活函数
最后一层的1000个节点是不需要Relu激活函数,最后是通过softmax层进行激活
网络实现
将VGG网络分成两个部分:
第一个部分是提取特征网络结构(全连接层之前的模块)
第二个部分是分类网络结构(三层全连接层)
网络结构模块
import torch.nn as nn
import torch
# official pretrain weights
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth'
}
class VGG(nn.Module):
def __init__(self, features, num_classes=1000, init_weights=False):
super(VGG, self).__init__()
self.features = features
self.classifier = nn.Sequential(#三层全连接层
nn.Linear(512*7*7, 4096),#展平操作
nn.ReLU(True),
nn.Dropout(p=0.5),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(p=0.5),
nn.Linear(4096, num_classes)
)
if init_weights:
self._initialize_weights()
def forward(self, x):
# N x 3 x 224 x 224
x = self.features(x)
# N x 512 x 7 x 7
x = torch.flatten(x, start_dim=1)
# N x 512*7*7
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():#遍历网络的每一个子模块,也就是网络的每一层
if isinstance(m, nn.Conv2d):#如果是卷积层
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
nn.init.xavier_uniform_(m.weight)#用xavier方法初始化卷积核的权重
if m.bias is not None:#如果采用偏置
nn.init.constant_(m.bias, 0)#默认初始化为0
elif isinstance(m, nn.Linear):#如果是全连接层instance(实例)
nn.init.xavier_uniform_(m.weight)
# nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_features(cfg: list):#提取特征网络,传入的就是配置变量,是list类型
layers = [] #空列表,用来存放我们创建的每一层
in_channels = 3
for v in cfg:
if v == "M":
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
layers += [conv2d, nn.ReLU(True)]#将卷积层和Relu函数拼接在一起
in_channels = v #输出之后特征矩阵的深度就是卷积核的个数
return nn.Sequential(*layers)#Sequential(连续的)将列表通过非关键字参数的形式传入进去
cfgs = {#字典文件 每一个key都对应模型的配置文件 vgg11对应的就是A配置 11层结构的网络
'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],#数字64这些代表卷积核的个数,M代表池化层
'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],#对应B配置 13层
'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],#对应D配置 16层
'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],#对应E配置,19层
}
def vgg(model_name="vgg16", **kwargs):#vgg函数,实例化网络
assert model_name in cfgs, "Warning: model number {} not in cfgs dict!".format(model_name)
cfg = cfgs[model_name]
model = VGG(make_features(cfg), **kwargs)#VGG类 **对应的是可变长度的字典变量
return model
网络训练模块
import os
import sys
import json
import torch
import torch.nn as nn
from torchvision import transforms, datasets
import torch.optim as optim
from tqdm import tqdm
from model import vgg
def main():
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("using {} device.".format(device))
data_transform = {
"train": transforms.Compose([transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),#tensor格式
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]),
"val": transforms.Compose([transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])}
data_root = os.path.abspath(os.path.join(os.getcwd(), "../..")) # get data root path
image_path = os.path.join(data_root, "data_set", "flower_data") # flower data set path
assert os.path.exists(image_path), "{} path does not exist.".format(image_path)
train_dataset = datasets.ImageFolder(root=os.path.join(image_path, "train"),
transform=data_transform["train"])
train_num = len(train_dataset)
# {'daisy':0, 'dandelion':1, 'roses':2, 'sunflower':3, 'tulips':4}
flower_list = train_dataset.class_to_idx
cla_dict = dict((val, key) for key, val in flower_list.items())
# write dict into json file
json_str = json.dumps(cla_dict, indent=4)
with open('class_indices.json', 'w') as json_file:
json_file.write(json_str)
batch_size = 32
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers
print('Using {} dataloader workers every process'.format(nw))
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size, shuffle=True,
num_workers=0)
validate_dataset = datasets.ImageFolder(root=os.path.join(image_path, "val"),
transform=data_transform["val"])
val_num = len(validate_dataset)
validate_loader = torch.utils.data.DataLoader(validate_dataset,
batch_size=batch_size, shuffle=False,
num_workers=0)
print("using {} images for training, {} images for validation.".format(train_num,
val_num))
# test_data_iter = iter(validate_loader)
# test_image, test_label = test_data_iter.next()
model_name = "vgg16"
net = vgg(model_name=model_name, num_classes=5, init_weights=True)
net.to(device)
loss_function = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=0.0001)
epochs = 3
best_acc = 0.0
save_path = './{}Net.pth'.format(model_name)
train_steps = len(train_loader)
for epoch in range(epochs):
# train
net.train()
running_loss = 0.0
train_bar = tqdm(train_loader, file=sys.stdout)
for step, data in enumerate(train_bar):
images, labels = data
optimizer.zero_grad()
outputs = net(images.to(device))
loss = loss_function(outputs, labels.to(device))
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
train_bar.desc = "train epoch[{}/{}] loss:{:.3f}".format(epoch + 1,
epochs,
loss)
# validate
net.eval()
acc = 0.0 # accumulate accurate number / epoch
with torch.no_grad():
val_bar = tqdm(validate_loader, file=sys.stdout)
for val_data in val_bar:
val_images, val_labels = val_data
outputs = net(val_images.to(device))
predict_y = torch.max(outputs, dim=1)[1]
acc += torch.eq(predict_y, val_labels.to(device)).sum().item()
val_accurate = acc / val_num
print('[epoch %d] train_loss: %.3f val_accuracy: %.3f' %
(epoch + 1, running_loss / train_steps, val_accurate))
if val_accurate > best_acc:
best_acc = val_accurate
torch.save(net.state_dict(), save_path)
print('Finished Training')
if __name__ == '__main__':
main()
训练结果
?
?预测模块
import os
import json
import torch
from PIL import Image
from torchvision import transforms
import matplotlib.pyplot as plt
from model import vgg
def main():
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
data_transform = transforms.Compose(
[transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
# load image
img_path = "../tulip.jpg"
assert os.path.exists(img_path), "file: '{}' dose not exist.".format(img_path)
img = Image.open(img_path)
plt.imshow(img)
# [N, C, H, W]
img = data_transform(img)
# expand batch dimension
img = torch.unsqueeze(img, dim=0)
# read class_indict
json_path = './class_indices.json'
assert os.path.exists(json_path), "file: '{}' dose not exist.".format(json_path)
with open(json_path, "r") as f:
class_indict = json.load(f)
# create model
model = vgg(model_name="vgg16", num_classes=5).to(device)
# load model weights
weights_path = "./vgg16Net.pth"
assert os.path.exists(weights_path), "file: '{}' dose not exist.".format(weights_path)
model.load_state_dict(torch.load(weights_path, map_location=device))
model.eval()
with torch.no_grad():
# predict class
output = torch.squeeze(model(img.to(device))).cpu()
predict = torch.softmax(output, dim=0)
predict_cla = torch.argmax(predict).numpy()
print_res = "class: {} prob: {:.3}".format(class_indict[str(predict_cla)],
predict[predict_cla].numpy())
plt.title(print_res)
for i in range(len(predict)):
print("class: {:10} prob: {:.3}".format(class_indict[str(i)],
predict[i].numpy()))
plt.show()
if __name__ == '__main__':
main()
预测结果
?
|