TensorFlow训练网络有两种方式,一种是基于tensor(array),另外一种是迭代器 两种方式区别是: 第一种是要加载全部数据形成一个tensor,然后调用model.fit() 然后指定参数batch_size 进行将所有数据进行分批训练 第二种是自己先将数据分批形成一个迭代器,然后遍历这个迭代器,分别训练每个批次的数据
方式一:通过迭代器
IMAGE_SIZE = 1000
(train_images, train_labels), (val_images, val_labels) = tf.keras.datasets.mnist.load_data()
train_images, val_images = train_images / 255.0, val_images / 255.0
train_images = train_images[:IMAGE_SIZE]
val_images = val_images[:IMAGE_SIZE]
train_labels = train_labels[:IMAGE_SIZE]
val_labels = val_labels[:IMAGE_SIZE]
train_images = tf.expand_dims(train_images, axis=3)
val_images = tf.expand_dims(val_images, axis=3)
train_images = tf.image.resize(train_images, [32, 32])
val_images = tf.image.resize(val_images, [32, 32])
train_loader = tf.data.Dataset.from_tensor_slices((train_images, train_labels)).batch(32)
val_loader = tf.data.Dataset.from_tensor_slices((val_images, val_labels)).batch(IMAGE_SIZE)
model = LeNet5()
model.build(input_shape=(1, 32, 32, 1))
model.call(Input(shape=(32, 32, 1)))
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
checkpoint_path = "./weight/cp.ckpt"
save_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
save_best_only=True,
save_weights_only=True,
monitor='val_loss',
verbose=0)
EPOCHS = 11
for epoch in range(1, EPOCHS):
train_epoch_loss_avg = tf.keras.metrics.Mean()
train_epoch_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
val_epoch_loss_avg = tf.keras.metrics.Mean()
val_epoch_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
for x, y in train_loader:
history = model.fit(x,
y,
validation_data=val_loader,
callbacks=[save_callback],
verbose=0)
train_epoch_loss_avg.update_state(history.history['loss'][0])
train_epoch_accuracy.update_state(y, model(x, training=True))
val_epoch_loss_avg.update_state(history.history['val_loss'][0])
val_epoch_accuracy.update_state(next(iter(val_loader))[1], model(next(iter(val_loader))[0], training=True))
print("Epoch {:d}: trainLoss: {:.3f}, trainAccuracy: {:.3%} valLoss: {:.3f}, valAccuracy: {:.3%}".format(epoch,
train_epoch_loss_avg.result(),
train_epoch_accuracy.result(),
val_epoch_loss_avg.result(),
val_epoch_accuracy.result()))
方式二:适用model.fit()进行分批训练
import model_sequential
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()
train_images, test_images = train_images / 255.0, test_images / 255.0
train_images = tf.expand_dims(train_images, axis=3)
test_images = tf.expand_dims(test_images, axis=3)
train_images = tf.image.resize(train_images, [32, 32])
test_images = tf.image.resize(test_images, [32, 32])
history = model_sequential.LeNet()
history.build(input_shape=(1, 32, 32, 1))
history.call(Input(shape=(32, 32, 1)))
history.summary()
history.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
checkpoint_path = "./weight/cp.ckpt"
save_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
save_best_only=True,
save_weights_only=True,
monitor='val_loss',
verbose=1)
history = history.fit(train_images,
train_labels,
epochs=10,
batch_size=32,
validation_data=(test_images, test_labels),
callbacks=[save_callback])
|