1.Lenet
LeNet-5 这个网络虽然很小,但是它包含了深度学习的基本模块:卷积层,池化层,全连接层。是其他深度学习模型的基础, 这里我们对LeNet-5进行深入分析。同时,通过实例分析,加深对与卷积层和池化层的理解。
C1层: 类型:卷积层 输入图片:3232 卷积核大小:55 卷积核种类:6 输出featuremap大小:28*28
S2层: 类型:池化层 输入:2828 采样大小:22 输出featuremap大小:14*14
C3层 类型:卷积层 输入:1414 卷积核大小:55 卷积核种类:16 输出featuremap大小:10*10
S4层: 类型:池化层 输入:1010 采样大小:22 输出featuremap大小:5*5
C5,F6,OUTPUT层: 类型:全连接层 [b,400]–>[b,120]–>[b,84]–>[b,10]
2.用cifar10进行训练和测试
先写Lenet类,
import torch
from torch import nn
from torch.nn import functional as F
class Lenet5(nn.Module):
def __init__(self):
super(Lenet5, self).__init__()
self.model = nn.Sequential(
nn.Conv2d(3, 6, kernel_size=(5, 5), stride=1, padding=0),
nn.AvgPool2d(kernel_size=2, stride=2, padding=0),
nn.Conv2d(6, 16, kernel_size=5, stride=1, padding=0),
nn.AvgPool2d(kernel_size=2, stride=2, padding=0),
nn.Flatten(),
nn.Linear(400, 120),
nn.ReLU(),
nn.Linear(120, 84),
nn.ReLU(),
nn.Linear(84, 10),
nn.ReLU(),
)
def forward(self, x):
x = self.model(x)
return x
主函数,训练和测试
import torch
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
from lenet5 import Lenet5
from torch import nn, optim
def main():
batch_size = 50
cifar_train = datasets.CIFAR10('cifar', train=True, transform=transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
]), download=True)
cifar_train = DataLoader(cifar_train, batch_size=batch_size, shuffle=True)
cifar_test = datasets.CIFAR10('cifar', train=False, transform=transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
]), download=True)
cifar_test = DataLoader(cifar_test, batch_size=batch_size, shuffle=True)
x, label = iter(cifar_train).next()
print(x.shape, label)
device = torch.device('cuda')
model = Lenet5().to(device)
criteon = nn.CrossEntropyLoss().to(device)
optimizer = optim.Adam(model.parameters(), lr=1e-3)
for epoch in range(1000):
model.train()
for i, (x, label) in enumerate(cifar_train):
x, label = x.to(device), label.to(device)
logits = model(x)
loss = criteon(logits, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(i)
print(epoch, loss.item())
model.eval()
with torch.no_grad():
total_correct = 0
total_number = 0
for i,(x,label) in enumerate(cifar_test):
x, label = x.to(device), label.to(device)
logits = model(x)
pred = logits.argmax(dim=1)
total_correct += torch.eq(pred, label).float().sum().item()
total_number += x.size(0)
accuracy = total_correct / total_number
print(i)
print(epoch, accuracy)
if __name__ == '__main__':
main()
|