MNIST手写数字识别_CNN
加载数据集得到训练集和测试集:
mnist = input_data.read_data_sets('D:\pythonProject1\MNIST\MNIST_data',one_hot=True)
train_X = mnist.train.images
train_Y = mnist.train.labels
test_X = mnist.test.images
test_Y = mnist.test.labels
数据预处理:
train_X = train_X.reshape(train_X.shape[0],28,28,1)
test_X = test_X.reshape(test_X.shape[0],28,28,1)
print(train_X.shape, type(train_X))
print(test_X.shape, type(test_X))
将数据的格式处理为(55000,28,28,1)的形式。彩色图片一般会有Width,Hight,Channel信息。图像的维度信息分为两种channels_first和channels_last。其中channels_first为(Channel, Width, Hight),channels_last为(Width, Hight,Channel).系统默认为channels_last.
train_X /= 255
test_X /= 255
构建模型加编译模型:
def create_model():
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(5, 5), activation='relu',
input_shape=(28, 28, 1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam')
return model
model = create_model()
查看模型:
model.summary()
for layer in model.layers:
print(layer.get_output_at(0).get_shape().as_list())
训练模型:
history =model.fit(train_X,train_Y,epochs=10,batch_size=200,verbose=2,validation_data=(test_X, test_Y))
绘制折线图:
fig = plt.figure()
plt.subplot(2, 1, 1)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model Accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='lower right')
plt.subplot(2, 1, 2)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')
plt.tight_layout()
plt.show()
保存模型:
gfile = tf.io.gfile
save_dir = "./MNIST_model/"
if gfile.exists(save_dir):
gfile.rmtree(save_dir)
gfile.mkdir(save_dir)
model_name = 'keras_mnist.h5'
model_path = os.path.join(save_dir, model_name)
model.save(model_path)
print('Saved trained model at %s ' % model_path)
加载模型并检测正确率:
mnist_model = tf.keras.models.load_model(model_path)
loss_and_metrics = mnist_model.evaluate(test_X, test_Y, verbose=2)
print("Test Loss: {}".format(loss_and_metrics[0]))
print("Test Accuracy: {}".format(loss_and_metrics[1] * 100))
模型训练结果为95.81%,结果还没有MLP好,没事,我们还有更复杂的卷积神经网络。
完整代码:
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import matplotlib.pyplot as plt
import numpy as np
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Activation
from keras.layers import Conv2D
from keras.layers import Flatten
from keras.layers import MaxPooling2D
from keras.layers import Dropout
import os
mnist = input_data.read_data_sets('D:\pythonProject1\MNIST\MNIST_data',one_hot=True)
train_X = mnist.train.images
train_Y = mnist.train.labels
test_X = mnist.test.images
test_Y = mnist.test.labels
print(train_X.shape, type(train_X))
print(test_X.shape, type(test_X))
train_X = train_X.reshape(train_X.shape[0],28,28,1)
test_X = test_X.reshape(test_X.shape[0],28,28,1)
print(train_X.shape, type(train_X))
print(test_X.shape, type(test_X))
train_X /= 255
test_X /= 255
def create_model():
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(5, 5), activation='relu',
input_shape=(28, 28, 1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam')
return model
model = create_model()
model.summary()
for layer in model.layers:
print(layer.get_output_at(0).get_shape().as_list())
history =model.fit(train_X,train_Y,epochs=10,batch_size=200,verbose=2,validation_data=(test_X, test_Y))
fig = plt.figure()
plt.subplot(2, 1, 1)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model Accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='lower right')
plt.subplot(2, 1, 2)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')
plt.tight_layout()
plt.show()
gfile = tf.io.gfile
save_dir = "./MNIST_model/"
if gfile.exists(save_dir):
gfile.rmtree(save_dir)
gfile.mkdir(save_dir)
model_name = 'keras_mnist.h5'
model_path = os.path.join(save_dir, model_name)
model.save(model_path)
print('Saved trained model at %s ' % model_path)
mnist_model = tf.keras.models.load_model(model_path)
loss_and_metrics = mnist_model.evaluate(test_X, test_Y, verbose=2)
print("Test Loss: {}".format(loss_and_metrics[0]))
print("Test Accuracy: {}".format(loss_and_metrics[1] * 100))
|