该代码还没添加预测集,先留存备忘
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers.core import Dense, Activation,Dropout
from sklearn import preprocessing
import numpy as np
import pandas as pd
X_min_max_scaler = preprocessing.MinMaxScaler()
Y_min_max_scaler = preprocessing.MinMaxScaler()
len_in=200 #定义数据长度
#创建自变量
x1=np.mat(np.linspace(0,20,len_in))#np.linspace(-3,3,20)#+np.round(np.random.normal(-1,1,20),3)
x2=np.mat(np.linspace(0,45,len_in))
x=np.hstack((x1.T,x2.T))
print(x.shape)
#创建因变量
y=np.mat([0.0 for i in range(len_in)])
for i in range(len_in):
y[0,i]=(x[i,0])**2+np.sin(x[i,1])
y=y.T
print(y.shape)
x = X_min_max_scaler.fit_transform(x) #创建自变量归一化函数
y = Y_min_max_scaler.fit_transform(y) #创建因变量归一化函数
model = Sequential() #创建神经网络
model.add(Dense(100,activation='tanh', input_dim=(2))) #变量为二维,即每行两列,行数为数据长度
model.add(Dense(100,activation='tanh'))
model.add(Dropout(0.2)) #随机遗忘神经元,防止过拟合
model.add(Dense(100,activation='tanh'))
model.add(Dropout(0.2))
model.add(Dense(100,activation='tanh'))
model.add(Dropout(0.2))
model.add(Dense(1,activation='linear'))
model.compile(optimizer="adam", loss="mse") #优化器为adam,评价函数为mse
model.summary()
model.fit(x, y, batch_size=100,epochs=1000, verbose=1) #训练
plt.plot(Y_min_max_scaler.inverse_transform(y),'b--')
plt.plot(Y_min_max_scaler.inverse_transform(model.predict(x)),'r') #预测数据要进行反归一化
plt.show()
|