这是一个PyG官网的例子,先让代码跑起来!
PyG用于图神经网络建模。图神经网络的训练过程基本与一般的CNN或RNN一样,关键性的步骤为:
1. 确定图结构 graph structure: structural vs non-structural scenario 2. 确定图类型和规模 graph type and scale: directed vs undirected, homogeneous vs heterogeneous, static vs dynamic 3. 设计损失函数 design loss function 4. 使用计算模块建模 build model using computational modules
直接上代码!
import torch
import torch.nn.functional as F
from torch_geometric.nn import GCNConv
from torch_geometric.datasets import Planetoid
#下载Cora数据集
dataset = Planetoid(root="../dataset/Cora", name="Cora")
print(len(dataset))
print(dataset.num_classes)
print(dataset.num_node_features)
# 定义GCN模型
class GCN(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = GCNConv(dataset.num_node_features, 16)
self.conv2 = GCNConv(16, dataset.num_classes)
def forward(self, data):
x, edge_index = data.x, data.edge_index
x = self.conv1(x, edge_index)
x = F.relu(x) # 单独的非线性函数,不放到conv卷积中
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index)
return F.log_softmax(x, dim=1)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = GCN().to(device)
data = dataset[0].to(device)
print(data.x[:100])
print(data.y[:100])
print(data.train_mask[:100])
print(data.test_mask[:100])
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)
model.train() #Sets the module in training mode.
for epoch in range(200):
optimizer.zero_grad()
out = model(data)
loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])
loss.backward()
optimizer.step()
# TEST
model.eval() #Sets the module in evaluation mode.
pred = model(data).argmax(dim=1)
correct = (pred[data.test_mask] == data.y[data.test_mask]).sum()
acc = int(correct) / int(data.test_mask.sum())
print('Accuracy: {:.4f}'.format(acc))
|