参数
-
学习率:alpha -
迭代次数:iteration -
梯度下降要乘以负号,若是梯度上升则为正号
公式
代码
from sklearn import datasets
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt1
import numpy as np
import random
data = datasets.load_boston()
X, Y = data['data'][:,5],data['target']
iteration = 20000
learningRate = 1e-3
k = random.randint(0, 50)
b = random.randint(-50, 50)
best_k, best_b = None, None
min_loss = float('inf')
def partial_k(y_true, y_predict, X):
return -2*np.mean((np.array(y_true) - np.array(y_predict))*np.array(X))
def partial_b(y_true, y_predict):
return -2*np.mean(np.array(y_true) - np.array(y_predict))
def loss(y_true, y_predict):
return np.mean((np.array(y_true) - np.array(y_predict))**2)
def predict(k, X, b):
return k * X + b
plt.figure()
plt.scatter(X, Y, color='red', alpha=0.5)
plt.figure()
plt.scatter(X, Y, color='red', alpha=0.5)
plt.plot(X, predict(k, X, b), color='green', linewidth=3)
plt.show()
for i in range(iteration):
y_predict = predict(k, X, b)
get_loss = loss(Y, y_predict)
if(get_loss < min_loss):
best_k = k
best_b = b
min_loss = get_loss
k = k - partial_k(Y, y_predict, X) * learningRate
b = b - partial_b(Y, y_predict) * learningRate
print("min_loss = ",min_loss)
plt.figure()
plt.scatter(X, Y, color = "red")
plt.plot(X, predict(best_k, X, best_b), color = "blue")
plt.show()
|