以前一直纠结数据处理的部分,究竟数据集要处理成什么样子才能被卷积,害~现在看来是我多虑,把它看复杂了……如果又小伙伴有同样的困惑,可以留言。 代码:有注释,应该容易看懂
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
BATCH_SIZE = 28
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
EPOCHS = 20
pipeline = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
train_set = datasets.MNIST(root='Mnistdata',
train=True,
transform=pipeline,
download=True)
test_set = datasets.MNIST(root='Mnistdata',
train=False,
transform=pipeline,
download=True)
print(train_set)
train_loader = DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=True)
test_loader = DataLoader(test_set, batch_size=BATCH_SIZE, shuffle=True)
class Digit(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 10, 5)
self.conv2 = nn.Conv2d(10, 20, 3)
self.fc1 = nn.Linear(20 * 10 * 10, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
input_size = x.size(0)
x = self.conv1(x)
x = F.relu(x)
x = F.max_pool2d(x, 2, 2)
x = self.conv2(x)
x = F.relu(x)
x = x.view(input_size, -1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
model = Digit().to(DEVICE)
optimizer = optim.Adam(model.parameters())
def train_model(model, device, train_loader, optimizer, epoch):
model.train()
for batch_index, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
output = output.argmax(dim=1)
if batch_index % 3000 == 0:
print("Train Epoch : {} \t Loss : {:.6f}".format(epoch, loss.item()))
def test_model(model, device, test_loader):
model.eval()
correct = 0.0
test_loss = 0.0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print("Test-Average loss : {:.4f}, Accuracy : {:.3f}\n".format(test_loss, 100.0*correct / len(test_loader.dataset)))
for epoch in range(1, EPOCHS+1):
train_model(model, DEVICE, train_loader, optimizer, epoch)
test_model(model, DEVICE, test_loader)
|