IT数码 购物 网址 头条 软件 日历 阅读 图书馆
TxT小说阅读器
↓语音阅读,小说下载,古典文学↓
图片批量下载器
↓批量下载图片,美女图库↓
图片自动播放器
↓图片自动播放器↓
一键清除垃圾
↓轻轻一点,清除系统垃圾↓
开发: C++知识库 Java知识库 JavaScript Python PHP知识库 人工智能 区块链 大数据 移动开发 嵌入式 开发工具 数据结构与算法 开发测试 游戏开发 网络协议 系统运维
教程: HTML教程 CSS教程 JavaScript教程 Go语言教程 JQuery教程 VUE教程 VUE3教程 Bootstrap教程 SQL数据库教程 C语言教程 C++教程 Java教程 Python教程 Python3教程 C#教程
数码: 电脑 笔记本 显卡 显示器 固态硬盘 硬盘 耳机 手机 iphone vivo oppo 小米 华为 单反 装机 图拉丁
 
   -> Python知识库 -> tf.keras关于autoencoder的实现含GPU测试 -> 正文阅读

[Python知识库]tf.keras关于autoencoder的实现含GPU测试

作者:recommend-item-box type_blog clearfix

参考:
https://github.com/AFAgarap/autoencoders/blob/master/notebooks/0-autoencoder.ipynb

实现1

"""Implementation of vanila autoencoder in TensorFlow 2.0 Subclassing API"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

__version__ = "1.0.0"
__author__ = "Abien Fred Agarap"

import tensorflow as tf


class Encoder(tf.keras.layers.Layer):
    def __init__(self, intermediate_dim=128, code_dim=64):
        super(Encoder, self).__init__()
        self.hidden_layer = tf.keras.layers.Dense(
            units=intermediate_dim, activation=tf.nn.relu
        )
        self.output_layer = tf.keras.layers.Dense(
            units=code_dim, activation=tf.nn.sigmoid
        )

    def call(self, input_features):
        activation = self.hidden_layer(input_features)
        return self.output_layer(activation)


class Decoder(tf.keras.layers.Layer):
    def __init__(self, original_dim, code_dim=64):
        super(Decoder, self).__init__()
        self.hidden_layer = tf.keras.layers.Dense(units=code_dim, activation=tf.nn.relu)
        self.output_layer = tf.keras.layers.Dense(
            units=original_dim, activation=tf.nn.sigmoid
        )

    def call(self, code):
        activation = self.hidden_layer(code)
        return self.output_layer(activation)


class Autoencoder(tf.keras.Model):
    def __init__(self, code_dim=64, intermediate_dim=128, original_dim=784):
        super(Autoencoder, self).__init__()
        self.loss = []
        self.encoder = Encoder(code_dim=code_dim, intermediate_dim=intermediate_dim)
        self.decoder = Decoder(code_dim=code_dim, original_dim=original_dim)

    def call(self, features):
        code = self.encoder(features)
        reconstructed = self.decoder(code)
        return reconstructed
## 本地mac CPU
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals

__author__ = "Richard Ricardo, Abien Fred Agarap"
__version__ = "1.0.0"

import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
import tensorflow_datasets as tfds
#from models.autoencoder import Autoencoder

tf.compat.v1.enable_eager_execution()

# tf.config.experimental.set_memory_growth(
#     tf.config.experimental.list_physical_devices('GPU')[0], True
# )
SEED = 42
tf.random.set_seed(SEED)
np.random.seed(SEED)
BATCH_SIZE = 64
EPOCHS = 10
train_dataset = tfds.load("mnist", split=tfds.Split.TRAIN)
def normalize(example):
    features = example["image"]
    features = tf.reshape(features, [-1, 784])
    features = tf.cast(features, tf.float32)
    features = features / 255.
    return features, features

train_dataset = train_dataset.map(normalize)
train_dataset = train_dataset.shuffle(1024)
train_dataset = train_dataset.batch(BATCH_SIZE, True)
train_dataset = train_dataset.prefetch(tf.data.experimental.AUTOTUNE)
model = Autoencoder()
for batch_features, _ in train_dataset.take(1):
    model(batch_features)
    break
model.summary()

结果如下
在这里插入图片描述

model.compile(loss=tf.losses.mean_squared_error,
              optimizer=tf.optimizers.Adam(learning_rate=1e-2))
history = model.fit(train_dataset, epochs=EPOCHS, verbose=2)

结果如下
在这里插入图片描述

loss = history.history["loss"]

sns.set_style("darkgrid")

plt.figure(figsize=(8, 8))
plt.plot(loss)
plt.ylabel("Mean Squared Error (MSE)")
plt.xlabel("Epoch")
plt.title("Training Loss")
plt.show()

在这里插入图片描述

test_dataset = tfds.load("mnist", split=tfds.Split.TEST, batch_size=-1)
test_dataset = tfds.as_numpy(test_dataset)
test_features = test_dataset["image"]
test_features = test_features.astype("float32") / 255.
number = 10
plt.figure(figsize=(20, 4))

for index in range(number):
    # display original
    ax = plt.subplot(2, number, index + 1)
    test_image = test_features[index]
    test_image = test_image.reshape(28, 28)
    plt.imshow(test_image)
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)

    # display reconstruction
    ax = plt.subplot(2, number, index + 1 + number)
    reconstructed = model(test_features[index].reshape(-1, 784))
    reconstructed = reconstructed.numpy().reshape(28, 28)
    plt.imshow(reconstructed)
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)
plt.show()

结果如下
在这里插入图片描述

实现2

参考 https://blog.csdn.net/flame_alone/article/details/106469112
提前创建好ae_models文件夹存储图像,否则会报错

import  os
import  tensorflow as tf
import  numpy as np
from    tensorflow import keras
from    tensorflow.keras import Sequential, layers
from    PIL import Image
from    matplotlib import pyplot as plt

tf.random.set_seed(42)
np.random.seed(42)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
assert tf.__version__.startswith('2.')

h_dim = 36
batchsz = 512
epochs = 50
lr = tf.linspace(0.001,0.001,epochs)
buffersize = batchsz*5

(x_train, y_train), (x_val, y_val) = keras.datasets.fashion_mnist.load_data()
x_train, x_val = x_train.astype(np.float32) / 255., x_val.astype(np.float32) / 255.

train_db = tf.data.Dataset.from_tensor_slices(x_train)
train_db = train_db.shuffle(batchsz * 5).batch(batchsz)
val_db = tf.data.Dataset.from_tensor_slices(x_val)
val_db = val_db.batch(batchsz)

class AutoEncoder(keras.Model):

    def __init__(self,input_shape,hidden_list,activation=tf.nn.relu):
        super(AutoEncoder, self).__init__()
        
        # Encoders
        center = hidden_list.pop()
        self.encoder = Sequential([layers.Dense(num, activation=tf.nn.relu) for num in hidden_list] 
                                + [layers.Dense(center)])

        # Decoders
        hidden_list.reverse()
        self.decoder = Sequential([layers.Dense(num, activation=tf.nn.relu) for num in hidden_list]
                                + [layers.Dense(input_shape)])


    def call(self, inputs, training=None):
        # [b, 784] => [b, 10]
        h = self.encoder(inputs)
        # [b, 10] => [b, 784]
        x_hat = self.decoder(h)
        return x_hat

def train(train_db,val_db,input_shape=784):
    model = AutoEncoder(input_shape,[392,196,98,36])
    model.build(input_shape=(None, input_shape))
    model.summary()

    train_list = []
    val_list = []
    for epoch in range(epochs):
        optimizer = tf.optimizers.Adam(lr=lr[epoch])
        train_losses = 0
        val_losses = 0
        for step, x in enumerate(train_db):
            x = tf.reshape(x, [-1, input_shape])
            with tf.GradientTape() as tape:
                x_rec_logits = tf.sigmoid(model(x))
                rec_loss = tf.losses.mean_squared_error(x, x_rec_logits)
                rec_loss = tf.reduce_mean(rec_loss)
            train_losses += rec_loss
            grads = tape.gradient(rec_loss, model.trainable_variables)
            optimizer.apply_gradients(zip(grads, model.trainable_variables))

        for x in val_db:
            x = tf.reshape(x, [-1, input_shape])
            x_rec_logits = tf.sigmoid(model(x))
            rec_loss = tf.losses.mean_squared_error(x, x_rec_logits)
            rec_loss = tf.reduce_mean(rec_loss)
            val_losses += rec_loss

        print(epoch,"train_losses :",float(train_losses),"val_losses :",float(val_losses))

        train_list.append(train_losses)
        val_list.append(val_losses)

    model.save('/tmp/model')

    x = [i for i in range(0, epochs)]
    # 绘制曲线
    plt.figure()
    plt.plot(x, train_list, color='blue', label='vaildation')
    plt.plot(x, val_list, color='red', label='training')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy')
    plt.legend()
    plt.show()
    plt.close()

def save_images(imgs, name, shape=(32,32)):
    new_im = Image.new('L', (28*shape[0], 28*shape[1]))

    index = 0
    for i in range(0, 28*shape[0], 28):
        for j in range(0, 28*shape[1], 28):
            im = imgs[index]
            im = Image.fromarray(im, mode='L')
            new_im.paste(im, (i, j))
            index += 1

    new_im.save(name)


def showImage(dataset,input_shape=784):
    model = tf.keras.models.load_model('/tmp/model',compile=False)

    for step,x in enumerate(val_db):
        x_hat = tf.sigmoid(model(tf.reshape(x, [-1, input_shape])))
        x_hat = tf.reshape(x_hat, [-1, 28, 28])
        x_concat = tf.concat([x, x_hat], axis=0)
        if(x_concat.shape[0] < batchsz * 2):
            break
        x_concat = x_concat.numpy() * 255.
        x_concat = x_concat.astype(np.uint8)
        shape=(int(tf.sqrt(batchsz*2.)),int(tf.sqrt(batchsz*2.)))
        save_images(x_concat, 'ae_images/rec_epoch_%d.png'%step,shape)

train(train_db,val_db)
showImage(val_db)

在这里插入图片描述
在这里插入图片描述

测试tensorflow_GPU

"""Implementation of vanila autoencoder in TensorFlow 2.0 Subclassing API"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

__version__ = "1.0.0"
__author__ = "Abien Fred Agarap"

import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0' ## 必须设置这个,否则会出现ValueError: Memory growth cannot differ between GPU devices
import tensorflow as tf


class Encoder(tf.keras.layers.Layer):
    def __init__(self, intermediate_dim=128, code_dim=64):
        super(Encoder, self).__init__()
        self.hidden_layer = tf.keras.layers.Dense(
            units=intermediate_dim, activation=tf.nn.relu
        )
        self.output_layer = tf.keras.layers.Dense(
            units=code_dim, activation=tf.nn.sigmoid
        )

    def call(self, input_features):
        activation = self.hidden_layer(input_features)
        return self.output_layer(activation)


class Decoder(tf.keras.layers.Layer):
    def __init__(self, original_dim, code_dim=64):
        super(Decoder, self).__init__()
        self.hidden_layer = tf.keras.layers.Dense(units=code_dim, activation=tf.nn.relu)
        self.output_layer = tf.keras.layers.Dense(
            units=original_dim, activation=tf.nn.sigmoid
        )

    def call(self, code):
        activation = self.hidden_layer(code)
        return self.output_layer(activation)


class Autoencoder(tf.keras.Model):
    def __init__(self, code_dim=64, intermediate_dim=128, original_dim=784):
        super(Autoencoder, self).__init__()
        self.loss = []
        self.encoder = Encoder(code_dim=code_dim, intermediate_dim=intermediate_dim)
        self.decoder = Decoder(code_dim=code_dim, original_dim=original_dim)

    def call(self, features):
        code = self.encoder(features)
        reconstructed = self.decoder(code)
        return reconstructed
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals

__author__ = "Richard Ricardo, Abien Fred Agarap"
__version__ = "1.0.0"

import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
import tensorflow_datasets as tfds
#from models.autoencoder import Autoencoder

tf.compat.v1.enable_eager_execution()

tf.config.experimental.set_memory_growth(
    tf.config.experimental.list_physical_devices('GPU')[0], True
)
SEED = 42
tf.random.set_seed(SEED)
np.random.seed(SEED)
BATCH_SIZE = 64
EPOCHS = 10
train_dataset = tfds.load("mnist", split=tfds.Split.TRAIN)
def normalize(example):
    features = example["image"]
    features = tf.reshape(features, [-1, 784])
    features = tf.cast(features, tf.float32)
    features = features / 255.
    return features, features
train_dataset = train_dataset.map(normalize)
train_dataset = train_dataset.shuffle(1024)
train_dataset = train_dataset.batch(BATCH_SIZE, True)
train_dataset = train_dataset.prefetch(tf.data.experimental.AUTOTUNE)
model = Autoencoder()
for batch_features, _ in train_dataset.take(1):
    model(batch_features)
    break
model.summary()

结果如下
在这里插入图片描述

model.compile(loss=tf.losses.mean_squared_error,
              optimizer=tf.optimizers.Adam(learning_rate=1e-2))
history = model.fit(train_dataset, epochs=EPOCHS, verbose=2)

结果如下:
在这里插入图片描述不知道为什么GPU跑的比本地的CPU还要慢,但是至少我可以知道它是在GPU上跑的,因为我从terminal可以观察到
在这里插入图片描述
其中GPU[0]就是我的程序,未启动程序前,我检测过0号GPU是程序调用的,而且我还发现,仅仅关闭jupyter其实并没有结束GPU上的程序,真正关闭jupyter的方式是
在这里插入图片描述
只有点击shutdown之后才能真正关闭一个jupyter,这个需要注意

loss = history.history["loss"]

sns.set_style("darkgrid")

plt.figure(figsize=(8, 8))
plt.plot(loss)
plt.ylabel("Mean Squared Error (MSE)")
plt.xlabel("Epoch")
plt.title("Training Loss")
plt.show()

在这里插入图片描述

test_dataset = tfds.load("mnist", split=tfds.Split.TEST, batch_size=-1)
test_dataset = tfds.as_numpy(test_dataset)
test_features = test_dataset["image"]
test_features = test_features.astype("float32") / 255.
number = 10
plt.figure(figsize=(20, 4))

for index in range(number):
    # display original
    ax = plt.subplot(2, number, index + 1)
    test_image = test_features[index]
    test_image = test_image.reshape(28, 28)
    plt.imshow(test_image)
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)

    # display reconstruction
    ax = plt.subplot(2, number, index + 1 + number)
    reconstructed = model(test_features[index].reshape(-1, 784))
    reconstructed = reconstructed.numpy().reshape(28, 28)
    plt.imshow(reconstructed)
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)
plt.show()

结果如下
在这里插入图片描述

  Python知识库 最新文章
Python中String模块
【Python】 14-CVS文件操作
python的panda库读写文件
使用Nordic的nrf52840实现蓝牙DFU过程
【Python学习记录】numpy数组用法整理
Python学习笔记
python字符串和列表
python如何从txt文件中解析出有效的数据
Python编程从入门到实践自学/3.1-3.2
python变量
上一篇文章      下一篇文章      查看所有文章
加:2022-05-10 11:52:00  更:2022-05-10 11:53:25 
 
开发: C++知识库 Java知识库 JavaScript Python PHP知识库 人工智能 区块链 大数据 移动开发 嵌入式 开发工具 数据结构与算法 开发测试 游戏开发 网络协议 系统运维
教程: HTML教程 CSS教程 JavaScript教程 Go语言教程 JQuery教程 VUE教程 VUE3教程 Bootstrap教程 SQL数据库教程 C语言教程 C++教程 Java教程 Python教程 Python3教程 C#教程
数码: 电脑 笔记本 显卡 显示器 固态硬盘 硬盘 耳机 手机 iphone vivo oppo 小米 华为 单反 装机 图拉丁

360图书馆 购物 三丰科技 阅读网 日历 万年历 2024年11日历 -2024/11/15 14:57:02-

图片自动播放器
↓图片自动播放器↓
TxT小说阅读器
↓语音阅读,小说下载,古典文学↓
一键清除垃圾
↓轻轻一点,清除系统垃圾↓
图片批量下载器
↓批量下载图片,美女图库↓
  网站联系: qq:121756557 email:121756557@qq.com  IT数码