从感知机到神经网络到神经网络学习
函数形式:
def 函数定义:
阶跃函数:隐藏层激活函数
def step_function(x):
return np.array(x > 0, dtype=np.int)
平滑函数:隐藏层激活函数
def sigmoid(x):
return 1 / (1 + np.exp(-x))
softmax函数:输出层激活函数
def softmax(a):
c = np.max(a)
exp_a = np.exp(a-c)
sum_exp_a = sum(exp_a)
y = exp_a/sum_exp_a
return y
def softmax(x):
if x.ndim == 2:
x = x.T
x = x - np.max(x, axis=0)
y = np.exp(x) / np.sum(np.exp(x), axis=0)
return y.T
x = x - np.max(x)
return np.exp(x) / np.sum(np.exp(x))
输出损失函数:
def softmax_loss(X, t):
y = softmax(X)
return cross_entropy_error(y, t)
ReLU函数:激活函数
def relu(x):
return np.maximum(0, x)
推测,输出函数:
def predict(self, x):
W1, W2 = self.params['W1'], self.params['W2']
b1, b2 = self.params['b1'], self.params['b2']
a1 = np.dot(x, W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + b2
y = softmax(a2)
return y
识别精度:
def accuracy(self, x, t):
y = self.predict(x)
y = np.argmax(y, axis=1)
t = np.argmax(t, axis=1)
accuracy = np.sum(y == t) / float(x.shape[0])
return accuracy
初始化:
def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01):
self.params = {}
self.params['W1'] = weight_init_std * np.random.randn(input_size, hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['W2'] = weight_init_std * np.random.randn(hidden_size, output_size)
self.params['b2'] = np.zeros(output_size)
损失函数:
def loss(self, x, t):
y = self.predict(x)
return cross_entropy_error(y, t)
均方误差:
def mean_squared_error(y,t):
return 0.5*np.sum((y-t)**2)
交叉熵误差:
def cross_entropy_error(y,t):
delta = 1e-7
return -np.sum(t*np.log(y+delta))
mini-batch版交叉熵误差
def cross_entropy_error(y,t):
if y.ndim == 1:
t = t.reshape(1,t.size)
y = y.reshape(1,y.size)
bitch_size = y.shape[0]
return -np.sum(t*np.log(y+1e-7))/bitch_size
交叉熵误差,非one-hot表示:
def cross_entropy_error(y, t):
if y.ndim == 1:
t = t.reshape(1, t.size)
y = y.reshape(1, y.size)
batch_size = y.shape[0]
return -np.sum(np.log(y[np.array(batch_size),t]+1e-7))/batch_size
def cross_entropy_error(y, t):
if y.ndim == 1:
t = t.reshape(1, t.size)
y = y.reshape(1, y.size)
if t.size == y.size:
t = t.argmax(axis=1)
batch_size = y.shape[0]
return -np.sum(np.log(y[np.arange(batch_size), t] + 1e-7)) / batch_size
数值微分:求导
def numerical_diff(f, x):
h = 1e-4
return (f(x+h) - f(x-h)) / (2*h)
偏导数(全部变量的偏导数汇总而成的向量称为梯度):
def numerical_gradient(f, x):
h = 1e-4
grad = np.zeros_like(x)
for idx in range(x.size):
tmp_val = x[idx]
x[idx] = tmp_val + h
fxh1 = f(x)
x[idx] = tmp_val - h
fxh2 = f(x)
grad[idx] = (fxh1 - fxh2) / (2*h)
x[idx] = tmp_val
return grad
梯度法:
def gradient_descent(f, init_x, lr=0.01, step_num=100):
x = init_x
for i in range(step_num):
grad = numerical_gradient(f, x)
x -= lr * grad
return x
|