最近在B站看沐神的动手学深度学习视频,记录一下学习过程 查看本文的jupyter notebook格式,更加清晰美观哦!
模型构造
Block类是nn模块里提供的一个模型构造类,使用它可以用来定义我们想要的模型。下面继承Block类构造多层感知机。这里定义的MLP类重载了Block类的__init__函数和forward函数。它们分别用于创造模型参数和定义前向计算。
from mxnet import gluon, nd
from mxnet.gluon import nn
class MLP(nn.Block):
def __init__(self, **kwargs):
super(MLP, self).__init__(**kwargs)
self.hidden = nn.Dense(256, activation='relu')
self.output = nn.Dense(10)
def forward(self, x):
return self.output(self.hidden(x))
实例化MLP类得到模型变量net。其中net(X)会自动调用MLP继承自Block类的__call__函数,这个函数将调用MLP类定义的forward函数来完成前向计算。
X = nd.random.uniform(shape=(2, 20))
net = MLP()
net.initialize()
net(X)
[[ 0.02394443 0.05150647 0.01035163 -0.06435341 -0.05801985 0.064192
0.04472664 -0.01852541 -0.03237379 0.07389369]
[ 0.05207362 0.04186264 0.04021508 -0.06558423 -0.02249499 0.0341314
0.02135914 -0.06898528 0.02329672 0.0033668 ]]
<NDArray 2x10 @cpu(0)>
定义一个与Sequential类有相同功能的MySequential类。
class MySequential(nn.Block):
def __init__(self, **kwargs):
super(MySequential, self).__init__(**kwargs)
def add(self, block):
"""block是Block子类实例,假设它有一个独一无二的名字。我们将它保存在Block类的成员变量_children里,
其类型是OrderedDict。当MySequential实例调用initialize函数时,
系统会自动对_children变量里的所有成员初始化"""
self._children[block.name] = block
def forward(self, x):
"""OrderedDict会保证按照成员添加时的顺序遍历成员"""
for block in self._children.values():
x = block(x)
return x
net = MySequential()
net.add(nn.Dense(256, activation='relu'))
net.add(nn.Dense(10))
net.initialize()
net(X)
[[-0.03358278 0.00098312 0.03334405 -0.00663612 0.07881726 -0.01704565
-0.01302506 -0.05449733 0.04149391 0.00170795]
[ 0.01879605 -0.04185785 0.02918838 -0.00970372 0.05835275 -0.031299
-0.00644606 -0.02542868 0.0442826 0.01446365]]
<NDArray 2x10 @cpu(0)>
构造一个稍微复杂一点的网络FancyMLP。在这个网络中,通过get_constant函数创建训练中不被迭代的参数,即常数参数。
class FancyMLP(nn.Block):
def __init__(self, **kwargs):
super(FancyMLP, self).__init__(**kwargs)
self.rand_weight = self.params.get_constant(
'rand_weight', nd.random.uniform(shape=(20, 20)))
self.dense = nn.Dense(20, activation='relu')
def forward(self, x):
x = self.dense(x)
x = nd.relu(nd.dot(x, self.rand_weight.data())+1)
x = self.dense(x)
while x.norm().asscalar() > 1:
x/=2
if x.norm().asscalar() < 0.8:
x*=10
return x.sum()
net = FancyMLP()
net.initialize()
net(X)
[26.667124]
<NDArray 1 @cpu(0)>
class NestMLP(nn.Block):
def __init__(self, **kwargs):
super(NestMLP, self).__init__(**kwargs)
self.net = nn.Sequential()
self.net.add(nn.Dense(64, activation='relu'),
nn.Dense(32, activation='relu'))
self.dense = nn.Dense(16, activation='relu')
def forward(self, x):
return self.dense(self.net(x))
net = nn.Sequential()
net.add(NestMLP(), nn.Dense(20), FancyMLP())
net.initialize()
net(X)
[26.0023]
<NDArray 1 @cpu(0)>
|