一般都是self.xxx作为命名,如果是conv2d,linear这些基本运算单元,那一半会有参数weight和bias.如果是nn.Squential,则是xxx.1.weight,xxx2.weight这样命名。如果是直接调用另外一个模块,那么就是xxx.xxx2......
from torch.autograd import Variable
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from collections import OrderedDict
from model_meta import common
class g(nn.Module):
def __init__(self):
super(g, self).__init__()
self.k1 = nn.Sequential(nn.Conv2d(in_channels=2, out_channels=2, kernel_size=3, padding=1, bias=True),
nn.BatchNorm2d(2),
nn.LeakyReLU(0.1),
nn.Conv2d(in_channels=2, out_channels=2, kernel_size=3, padding=1, bias=True),
nn.BatchNorm2d(2),
nn.LeakyReLU(0.1),
)
self.bn = nn.BatchNorm2d(2)
self.act = nn.LeakyReLU(0.1)
self.k = nn.Conv2d(in_channels=2, out_channels=2, kernel_size=3, padding=1, bias=True)
self.ad = nn.AdaptiveAvgPool2d(1)
self.bn1 = nn.BatchNorm2d(2)
self.ln =nn.Linear(2,2)
def forward(self, z, weights,c):
z = self.k1(z)
z = self.k(z)
z = self.bn1(z)
z = self.act(z)
z = self.ad(z).squeeze(-1).squeeze(-1)
z = self.ln(z)
return z
net =g().eval()
c = 2
h = 5
w = 5
num=255.
weights = OrderedDict(
(name, param ) for (name, param) in net.named_parameters())
print(weights)
OrderedDict([('k1.0.weight', Parameter containing: tensor([[[[ 0.1514, ?0.1849, ?0.1946], ? ? ? ? ? [-0.0680, -0.1371, -0.0092], ? ? ? ? ? [ 0.1808, ?0.1401, ?0.1083]],
? ? ? ? ?[[ 0.1889, -0.1326, ?0.0965], ? ? ? ? ? [-0.0016, -0.0098, ?0.1693], ? ? ? ? ? [ 0.1578, ?0.0111, ?0.1994]]],
? ? ? ? [[[ 0.0117, -0.1922, -0.1463], ? ? ? ? ? [ 0.0827, -0.0198, -0.0200], ? ? ? ? ? [-0.2247, -0.0270, -0.0081]],
? ? ? ? ?[[ 0.1823, -0.1011, -0.0574], ? ? ? ? ? [-0.2221, ?0.1957, -0.1433], ? ? ? ? ? [ 0.1434, -0.2261, -0.1099]]]], requires_grad=True)), ('k1.0.bias', Parameter containing: tensor([-0.2290, -0.0108], requires_grad=True)), ('k1.1.weight', Parameter containing: tensor([0.3988, 0.2880], requires_grad=True)), ('k1.1.bias', Parameter containing: tensor([0., 0.], requires_grad=True)), ('k1.3.weight', Parameter containing: tensor([[[[-0.0875, ?0.2319, -0.1471], ? ? ? ? ? [ 0.1164, -0.1140, -0.0544], ? ? ? ? ? [ 0.1885, -0.1859, ?0.1573]],
? ? ? ? ?[[-0.1952, ?0.1834, ?0.0729], ? ? ? ? ? [ 0.1846, ?0.1412, ?0.1133], ? ? ? ? ? [-0.0151, -0.0438, -0.0792]]],
? ? ? ? [[[-0.1711, -0.0076, ?0.1490], ? ? ? ? ? [ 0.0215, ?0.0504, ?0.1331], ? ? ? ? ? [-0.0438, -0.0271, ?0.0088]],
? ? ? ? ?[[ 0.1351, -0.1812, -0.2039], ? ? ? ? ? [-0.1755, ?0.0814, ?0.1414], ? ? ? ? ? [-0.2234, ?0.1268, ?0.1193]]]], requires_grad=True)), ('k1.3.bias', Parameter containing: tensor([3.3504e-02, 2.9281e-05], requires_grad=True)), ('k1.4.weight', Parameter containing: tensor([0.4196, 0.3621], requires_grad=True)), ('k1.4.bias', Parameter containing: tensor([0., 0.], requires_grad=True)), ('bn.weight', Parameter containing: tensor([0.7338, 0.8517], requires_grad=True)), ('bn.bias', Parameter containing: tensor([0., 0.], requires_grad=True)), ('k.weight', Parameter containing: tensor([[[[ 0.1813, ?0.1933, -0.0131], ? ? ? ? ? [-0.0368, ?0.1157, ?0.0239], ? ? ? ? ? [ 0.1396, ?0.2008, -0.1140]],
? ? ? ? ?[[-0.0239, -0.0474, -0.2350], ? ? ? ? ? [ 0.0077, ?0.2279, ?0.0326], ? ? ? ? ? [-0.2285, -0.2208, -0.1953]]],
? ? ? ? [[[-0.0500, ?0.1928, ?0.1226], ? ? ? ? ? [ 0.0325, ?0.0296, ?0.1168], ? ? ? ? ? [ 0.0048, ?0.1123, -0.0120]],
? ? ? ? ?[[-0.1520, -0.2144, ?0.0397], ? ? ? ? ? [ 0.0662, -0.0458, ?0.0303], ? ? ? ? ? [ 0.0304, -0.0905, -0.0273]]]], requires_grad=True)), ('k.bias', Parameter containing: tensor([ 0.1290, -0.1853], requires_grad=True)), ('bn1.weight', Parameter containing: tensor([0.0804, 0.5270], requires_grad=True)), ('bn1.bias', Parameter containing: tensor([0., 0.], requires_grad=True)), ('ln.weight', Parameter containing: tensor([[-0.1914, ?0.0692], ? ? ? ? [-0.5830, -0.6674]], requires_grad=True)), ('ln.bias', Parameter containing: tensor([0.4058, 0.3513], requires_grad=True))])
|