搭建神经网络步骤
- 训练一个图片分类器
- 通过 torchvision 加载 CIFAR10 里面的训练和测试数据集,并对数据进行标准化
- 定义卷积神经网络
- 定义损失函数
- 利用训练数据训练网络
- 利用测试数据测试网络
需要导入的包
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as data
import ssl
import matplotlib.pyplot as plt
import numpy as np
下载训练集
该部分在首次运行时会自动下载,在 linux 环境下 num_workers 可以进行参数设置
ssl._create_default_https_context = ssl._create_unverified_context
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
trainloader = data.DataLoader(trainset, batch_size=4, shuffle=True, num_workers=0)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
testloader = data.DataLoader(testset, batch_size=4, shuffle=False, num_workers=0)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
搭建神经网络
这里以 (3, 32, 32) 的图片为例,其中 3 指三通道的图片,即红黄蓝,像素大小为 32 × 32
卷积层计算公式:(像素长宽 - 卷积核长宽) / 步长 + 1 self.conv1 = nn.Conv2d(3, 6, 5) 经过卷积层,输入 3 通道,输出 6 通道,步长 1 × 1,卷积核为 5 × 5 图像变成 (6, 28, 28) 的结构
self.pool = nn.MaxPool2d(2, 2) 经过池化层,步长 2 × 2,图像进行缩小 图像变成 (6, 14, 14) 的结构
self.conv2 = nn.Conv2d(6, 16, 5) 经过卷积层,输入 6 通道,输出 16 通道,卷积核为 5 × 5 原图变成 (16, 10, 10) 的结构
self.pool = nn.MaxPool2d(2, 2) 经过池化层,步长 2 × 2,图像进行缩小 图像变成 (16, 5, 5) 的结构
self.fc1 = nn.Linear(16 * 5 * 5, 120) 经过全连接层,后面要跟 relu 激活函数 空间从 16 × 5 × 5 压缩为 120
self.fc2 = nn.Linear(120, 84) 经过全连接层,后面要跟 relu 激活函数 空间从 120 压缩为 84
self.fc3 = nn.Linear(84, 10) 经过全连接层,后面不跟激活函数 空间从 84 压缩为 10
def imshow(img):
img = img / 2 + 0.5
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net()
print(net)
运行结果
Net(
(conv1): Conv2d(3, 6, kernel_size=(5, 5), stride=(1, 1))
(pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(conv2): Conv2d(6, 16, kernel_size=(5, 5), stride=(1, 1))
(fc1): Linear(in_features=400, out_features=120, bias=True)
(fc2): Linear(in_features=120, out_features=84, bias=True)
(fc3): Linear(in_features=84, out_features=10, bias=True)
)
神经网络训练
def train():
for epoch in range(3):
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
inputs, labels = data
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if (i + 1) % 2000 == 0:
print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
torch.save(net.state_dict(), PATH)
print('Finished Saving')
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
PATH = './cifar_net.pth'
train()
运行结果
[1, 2000] loss: 2.127
[1, 4000] loss: 1.800
[1, 6000] loss: 1.656
[1, 8000] loss: 1.566
[1, 10000] loss: 1.501
[1, 12000] loss: 1.442
[2, 2000] loss: 1.403
[2, 4000] loss: 1.364
[2, 6000] loss: 1.340
[2, 8000] loss: 1.323
[2, 10000] loss: 1.309
[2, 12000] loss: 1.293
[3, 2000] loss: 1.204
[3, 4000] loss: 1.225
[3, 6000] loss: 1.223
[3, 8000] loss: 1.200
[3, 10000] loss: 1.198
[3, 12000] loss: 1.207
Finished Training
Finished Saving
测试网络
在该部分中发现对不同标签的训练效果不同,跟图像和训练数量都有一定的关系,未能达到 100% 是很正常的现象。
dataiter = iter(testloader)
images, labels = dataiter.next()
imshow(torchvision.utils.make_grid(images))
print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(4)))
net = Net()
net.load_state_dict(torch.load(PATH))
outputs = net(images)
_, predicted = torch.max(outputs, 1)
print('Predicted: ', ' '.join('%5s' % classes[predicted[j]] for j in range(4)))
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (100 * correct / total))
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = net(images)
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
for i in range(4):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
for i in range(10):
print('Accuracy of %5s : %2d %%' % (
classes[i], 100 * class_correct[i] / class_total[i]))
运行结果
GroundTruth: cat ship ship plane
Predicted: ship ship ship ship
Accuracy of the network on the 10000 test images: 56 %
Accuracy of plane : 42 %
Accuracy of car : 85 %
Accuracy of bird : 47 %
Accuracy of cat : 27 %
Accuracy of deer : 42 %
Accuracy of dog : 56 %
Accuracy of frog : 80 %
Accuracy of horse : 59 %
Accuracy of ship : 80 %
Accuracy of truck : 41 %
在GPU上训练
通过 to() 方法进行适配,依然可以在 CPU 上运行
net.to(device)
inputs, labels = inputs.to(device), labels.to(device)
完整代码
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as data
import ssl
import matplotlib.pyplot as plt
import numpy as np
ssl._create_default_https_context = ssl._create_unverified_context
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
trainloader = data.DataLoader(trainset, batch_size=4, shuffle=True, num_workers=0)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
testloader = data.DataLoader(testset, batch_size=4, shuffle=False, num_workers=0)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
def imshow(img):
img = img / 2 + 0.5
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def train():
for epoch in range(3):
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
inputs, labels = data
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if (i + 1) % 2000 == 0:
print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
torch.save(net.state_dict(), PATH)
print('Finished Saving')
net = Net()
print(net)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
PATH = './cifar_net.pth'
train()
dataiter = iter(testloader)
images, labels = dataiter.next()
imshow(torchvision.utils.make_grid(images))
print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(4)))
net = Net()
net.load_state_dict(torch.load(PATH))
outputs = net(images)
_, predicted = torch.max(outputs, 1)
print('Predicted: ', ' '.join('%5s' % classes[predicted[j]] for j in range(4)))
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (100 * correct / total))
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = net(images)
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
for i in range(4):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
for i in range(10):
print('Accuracy of %5s : %2d %%' % (
classes[i], 100 * class_correct[i] / class_total[i]))
|