提示:文章写完后,目录可以自动生成,如何生成可参考右边的帮助文档
前言
继续pytorch
提示:以下是本篇文章正文内容,下面案例可供参考
一、定义超参
import os
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import torch.optim as optimizer
device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
batch_size = 256
num_workers = 0
lr = 1e-4
epochs = 20
二、读取数据
from torchvision import transforms
from torchvision import datasets
image_size = 28
data_transform = transforms.Compose([
transforms.Resize(image_size),
transforms.ToTensor()
])
train_data = datasets.FashionMNIST(root='./', train=True, download=True, transform=data_transform)
test_data = datasets.FashionMNIST(root='./', train=False, download=True, transform=data_transform)
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=num_workers, drop_last=True)
test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False, num_workers=num_workers)
三、定义模型
代码如下(示例):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(1, 32, 5),
nn.ReLU(),
nn.MaxPool2d(2, stride=2),
nn.Dropout(0.3),
nn.Conv2d(32, 64, 5),
nn.ReLU(),
nn.MaxPool2d(2, stride=2),
nn.Dropout(0.3)
)
self.fc = nn.Sequential(
nn.Linear(64*4*4, 512),
nn.ReLU(),
nn.Linear(512, 10)
)
def forward(self, x):
x = self.conv(x)
x = x.view(-1, 64*4*4)
x = self.fc(x)
# x = nn.functional.normalize(x)
return x
model = Net()
model = model.cuda()
四、定义损失函数和优化器-训练模型
import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
def train(epoch):
model.train()
train_loss = 0
for data, label in train_loader:
data, label = data.cuda(), label.cuda()
optimizer.zero_grad()
output = model(data)
loss = criterion(output, label)
loss.backward()
optimizer.step()
train_loss += loss.item()*data.size(0)
train_loss = train_loss/len(train_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(epoch, train_loss))
def val(epoch):
model.eval()
val_loss = 0
gt_labels = []
pred_labels = []
with torch.no_grad():
for data, label in test_loader:
data, label = data.cuda(), label.cuda()
output = model(data)
preds = torch.argmax(output, 1)
gt_labels.append(label.cpu().data.numpy())
pred_labels.append(preds.cpu().data.numpy())
loss = criterion(output, label)
val_loss += loss.item()*data.size(0)
val_loss = val_loss/len(test_loader.dataset)
gt_labels, pred_labels = np.concatenate(gt_labels), np.concatenate(pred_labels)
acc = np.sum(gt_labels==pred_labels)/len(pred_labels)
print('Epoch: {} \tValidation Loss: {:.6f}, Accuracy: {:6f}'.format(epoch, val_loss, acc))
for epoch in range(1, epochs+1):
train(epoch)
val(epoch)
结果
/usr/local/lib/python3.7/dist-packages/torch/nn/functional.py:718: UserWarning: Named tensors and all their associated APIs are an experimental feature and subject to change. Please do not use them for anything important until they are released as stable. (Triggered internally at /pytorch/c10/core/TensorImpl.h:1156.)
return torch.max_pool2d(input, kernel_size, stride, padding, dilation, ceil_mode)
Epoch: 1 Training Loss: 0.677033
Epoch: 1 Validation Loss: 0.495211, Accuracy: 0.818800
Epoch: 2 Training Loss: 0.425615
Epoch: 2 Validation Loss: 0.358788, Accuracy: 0.871400
Epoch: 3 Training Loss: 0.362201
Epoch: 3 Validation Loss: 0.326068, Accuracy: 0.881500
Epoch: 4 Training Loss: 0.327386
Epoch: 4 Validation Loss: 0.305909, Accuracy: 0.890500
Epoch: 5 Training Loss: 0.305946
Epoch: 5 Validation Loss: 0.285962, Accuracy: 0.897400
Epoch: 6 Training Loss: 0.285503
Epoch: 6 Validation Loss: 0.280432, Accuracy: 0.896500
Epoch: 7 Training Loss: 0.274258
Epoch: 7 Validation Loss: 0.275422, Accuracy: 0.898300
Epoch: 8 Training Loss: 0.262215
Epoch: 8 Validation Loss: 0.253080, Accuracy: 0.908600
Epoch: 9 Training Loss: 0.254621
Epoch: 9 Validation Loss: 0.257004, Accuracy: 0.905500
Epoch: 10 Training Loss: 0.240819
Epoch: 10 Validation Loss: 0.243566, Accuracy: 0.911500
Epoch: 11 Training Loss: 0.234381
Epoch: 11 Validation Loss: 0.250187, Accuracy: 0.908900
Epoch: 12 Training Loss: 0.226367
Epoch: 12 Validation Loss: 0.248466, Accuracy: 0.910400
Epoch: 13 Training Loss: 0.220683
Epoch: 13 Validation Loss: 0.237766, Accuracy: 0.912500
Epoch: 14 Training Loss: 0.212676
Epoch: 14 Validation Loss: 0.237252, Accuracy: 0.910600
Epoch: 15 Training Loss: 0.204036
Epoch: 15 Validation Loss: 0.233667, Accuracy: 0.915500
Epoch: 16 Training Loss: 0.201117
Epoch: 16 Validation Loss: 0.235281, Accuracy: 0.911800
Epoch: 17 Training Loss: 0.192603
Epoch: 17 Validation Loss: 0.224099, Accuracy: 0.917600
Epoch: 18 Training Loss: 0.189722
Epoch: 18 Validation Loss: 0.239020, Accuracy: 0.909800
Epoch: 19 Training Loss: 0.186247
Epoch: 19 Validation Loss: 0.229205, Accuracy: 0.917100
Epoch: 20 Training Loss: 0.175355
Epoch: 20 Validation Loss: 0.220682, Accuracy: 0.920900
|