tensorflow框架介绍
数据流图
图与TensorBoard
图包含了一组tf.Operation代表的计算单元对象和tf.Tensor代表的计算单元之间流动的数据
默认图
通常tensorflow会默认帮我们创建一张图 查看默认图的两种方法:
- 通过调用**tf.get_default_graph()**访问,要将操作添加到默认图形中,直接创建OP即可
- op、sess都含有graph属性,默认都在一张图中
import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def graph_demo():
"""
图的演示
:return:
"""
a_t = tf.constant(2)
b_t = tf.constant(3)
c_t = a_t + b_t
print("c_t:", c_t)
default_g = tf.get_default_graph()
print("default_g:", default_g)
print("a_t的图属性:", a_t.graph)
print("c_t的图属性:", c_t.graph)
with tf.Session() as sess:
c_t_value = sess.run(c_t)
print("c_t_value:", c_t_value)
print("sess的图属性:", sess.graph)
return None
if __name__ == "__main__":
graph_demo()
创建图
- 可以通过**tf.Graph()**自定义创建图
- 如果要在这张图中创建OP,典型用法是使用**tf.Graph.as_default()**上下文管理器
import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def graph_demo():
"""
图的演示
:return:
"""
a_t = tf.constant(2)
b_t = tf.constant(3)
c_t = a_t + b_t
print("c_t:", c_t)
default_g = tf.get_default_graph()
print("default_g:", default_g)
print("a_t的图属性:", a_t.graph)
print("c_t的图属性:", c_t.graph)
with tf.Session() as sess:
c_t_value = sess.run(c_t)
print("c_t_value:", c_t_value)
print("sess的图属性:", sess.graph)
print("--------------------------------")
new_g = tf.Graph()
with new_g.as_default():
a_new = tf.constant(20)
b_new = tf.constant(30)
c_new = a_new + b_new
print("c_new:", c_new)
print("a_new的图属性:", a_new.graph)
print("c_new的图属性:", c_new.graph)
with tf.Session(graph=new_g) as new_sess:
c_new_value = new_sess.run(c_new)
print("c_new_value:", c_new_value)
print("new_sess的图属性:", new_sess.graph)
return None
if __name__ == "__main__":
graph_demo()
TensorBoard可视化
tensorflow可用于训练大规模深度神经网络所需的计算,使用该工具设计的计算往往复杂而深奥。为了更方便tensorflow程序的理解、调试与优化,tensorflow提供了TensorBoard可视化工具
TensorBoard通过读取TensorFlow的事件文件来运行,需要将数据生成一个序列化的Summary protobuf对象
tf.summary.FileWriter(path, graph=sess.graph)
tensorboard --logdir=path
import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def graph_demo():
"""
图的演示
:return:
"""
a_t = tf.constant(2)
b_t = tf.constant(3)
c_t = a_t + b_t
print("c_t:", c_t)
default_g = tf.get_default_graph()
print("default_g:", default_g)
print("a_t的图属性:", a_t.graph)
print("c_t的图属性:", c_t.graph)
with tf.Session() as sess:
c_t_value = sess.run(c_t)
print("c_t_value:", c_t_value)
print("sess的图属性:", sess.graph)
tf.summary.FileWriter("summary", graph=sess.graph)
print("--------------------------------")
new_g = tf.Graph()
with new_g.as_default():
a_new = tf.constant(20)
b_new = tf.constant(30)
c_new = a_new + b_new
print("c_new:", c_new)
print("a_new的图属性:", a_new.graph)
print("c_new的图属性:", c_new.graph)
with tf.Session(graph=new_g) as new_sess:
c_new_value = new_sess.run(c_new)
print("c_new_value:", c_new_value)
print("new_sess的图属性:", new_sess.graph)
return None
if __name__ == "__main__":
graph_demo()
OP
即操作对象
- 常见OP
一个图一个命名空间,互不干扰影响
import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def graph_demo():
"""
图的演示
:return:
"""
a_t = tf.constant(2, name="a_t")
b_t = tf.constant(3, name="b_t")
c_t = tf.add(a_t, b_t, name="c_t")
print("c_t:", c_t)
default_g = tf.get_default_graph()
print("default_g:", default_g)
print("a_t的图属性:", a_t.graph)
print("c_t的图属性:", c_t.graph)
with tf.Session() as sess:
c_t_value = sess.run(c_t)
print("c_t_value:", c_t_value)
print("sess的图属性:", sess.graph)
tf.summary.FileWriter("logs", graph=sess.graph)
print("--------------------------------")
new_g = tf.Graph()
with new_g.as_default():
a_new = tf.constant(20, name="a_new")
b_new = tf.constant(30, name="b_new")
c_new = tf.add(a_new, b_new, name="c_new")
print("c_new:", c_new)
print("a_new的图属性:", a_new.graph)
print("c_new的图属性:", c_new.graph)
tf.summary.FileWriter("log2", graph=sess.graph)
with tf.Session(graph=new_g) as new_sess:
c_new_value = new_sess.run(c_new)
print("c_new_value:", c_new_value)
print("new_sess的图属性:", new_sess.graph)
return None
if __name__ == "__main__":
graph_demo()
会话
上下文管理器
with tf.Session() as sess:
sess.run(sth)
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True))
run()
通过使用sess.run(0)来运行operation
run(fetches, feed_dict=None, options=None, run_metadata=None)
- fetches:单一的operation,或者列表、元组(其他不属于tensorflow的类型不行)
- feed_dict:参数运行调用者覆盖图中张量的值,运行时赋值,与tf.placeholder搭配使用,则会检查值的形式是否与占位符兼容
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a + b
sess = tf.Session()
print(sess.run(c))
print(c.eval(session=sess))
feed操作 placeholder提供占位符,run时候通过feed_dict指定参数
import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def session_demo():
"""
会话的演示:打印设备信息
:return:
"""
a_t = tf.constant(2, name="a_t")
b_t = tf.constant(3, name="b_t")
c_t = tf.add(a_t, b_t, name="c_t")
print("a_t", a_t)
print("b_t", b_t)
print("c_t:", c_t)
print("------------------------------")
a_ph = tf.placeholder(tf.float32)
b_ph = tf.placeholder(tf.float32)
c_ph = tf.add(a_ph, b_ph)
print("a_ph:", a_ph)
print("b_ph:", b_ph)
print("c_ph:", c_ph)
print("------------------------------")
default_g = tf.get_default_graph()
print("default_g:", default_g)
print("a_t的图属性:", a_t.graph)
print("c_t的图属性:", c_t.graph)
print("-------------------------------")
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
log_device_placement=True)) as sess:
c_ph_value = sess.run(c_ph, feed_dict={a_ph:3.9, b_ph:4.8})
print('c_ph_value', c_ph_value)
print("------------------------------")
abc = sess.run([a_t, b_t, c_t])
print("abc:", abc)
print("c_t_value;", c_t.eval())
print("sess的图属性:", sess.graph)
if __name__ == "__main__":
session_demo()
张量
张量类型
张量的阶
张量指令
类型改变
形状改变
什么情况下可以改变静态形状:只有在形状还没有完全固定下来的情况下;转换形状的时候,只能一维到一维,二维到二维,而不能跨维度改变形状 形状还没有完全固定下来的情况:使用placeholder占位
import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def tensor_demo():
"""
张量的演示
:return:
"""
tensor1 = tf.constant(4.0)
tensor2 = tf.constant([1,2,3,4])
linear_squares = tf.constant([[4],[9],[16],[25]],dtype=tf.int32)
print("tensor1:", tensor1)
print("tensor2:", tensor2)
print("linear_square:", linear_squares)
print("----------------")
l_cast = tf.cast(linear_squares, dtype=tf.float32)
print("linear_square_after:", linear_squares)
print('l_cast:', l_cast)
print('------------------')
a_p = tf.placeholder(dtype=tf.float32, shape=[None, None])
b_p = tf.placeholder(dtype=tf.float32, shape=[None, 10])
c_p = tf.placeholder(dtype=tf.float32, shape=[3, 2])
print("a_p:", a_p)
print("b_p:", b_p)
print("c_p:", c_p)
print("-----------------------")
a_p.set_shape([2,3])
b_p.set_shape([2,10])
print("a_p:", a_p)
print("b_p:", b_p)
print('-------------')
a_p_reshape = tf.reshape(a_p, shape=[2, 3, 1])
print("a_p:", a_p)
print("a_p_reshape:", a_p_reshape)
c_p_reshape = tf.reshape(c_p, shape=[2, 3, 1])
print("c_p:", c_p)
print("c_p_reshape:", c_p_reshape)
return None
if __name__ == "__main__":
tensor_demo()
张量的数学运算
https://tensorflow.google.cn/versions/r2.0/api_docs/python/tf
变量OP
tf.Variable(initia_value=None, trainable=True, collections=None, name=None)
变量需要显示初始化,才能运行值
import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def variable_demo():
"""
变量的演示
:return:
"""
a = tf.Variable(initial_value=50)
b = tf.Variable(initial_value=40)
c = tf.add(a, b)
print("a:", a)
print("b", b)
print("c", c)
print('----------------------')
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
a_value, b_value, c_value = sess.run([a,b,c])
print("a_value:", a_value)
print("b_value", b_value)
print("c_value", c_value)
return None
if __name__ == "__main__":
variable_demo()
修改变量命名空间(tf.variable_scope())
import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def variable_demo():
"""
变量的演示
:return:
"""
with tf.variable_scope("my_scope"):
a = tf.Variable(initial_value=50)
b = tf.Variable(initial_value=40)
with tf.variable_scope("your_scope"):
c = tf.add(a, b)
print("a:", a)
print("b", b)
print("c", c)
print('----------------------')
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
a_value, b_value, c_value = sess.run([a,b,c])
print("a_value:", a_value)
print("b_value", b_value)
print("c_value", c_value)
return None
if __name__ == "__main__":
variable_demo()
高级api
数据读取、神经网络(tensorflow1.x)
文件读取流程
- 构造文件名队列
- 读取与解码
读取文件内容 内容解码 - 批处理
- 线程操作
图片数据
import tensorflow as tf
import os
def picture_read(file_list):
"""
狗图片读取案例
:return:
"""
file_queue = tf.train.string_input_producer(file_list)
reader = tf.WholeFileReader()
key, value = reader.read(file_queue)
print("key:", key)
print("value:", value)
image = tf.image.decode_jpeg(value)
print("image;", image)
image_resize = tf.image.resize_images(image, [200,200])
print("image_resize:", image_resize)
image_resize.set_shape(shape=[200,200,3])
image_batch = tf.train.batch([image_resize], batch_size=100, num_threads=1, capacity=100)
print("image_batch:", image_batch)
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
key_new, value_new, image_new, image_resize_new, image_batch_new = \
sess.run([key, value, image, image_resize, image_batch])
print("key_new:", key_new)
print("value_new:", value_new)
print("image_new:", image_new)
print("image_resize_new:", image_resize_new)
print("image_batch_new:", image_batch_new)
coord.request_stop()
coord.join(threads)
return None
if __name__ == "__main__":
filename = os.listdir("./dog")
file_list = [os.path.join("dog", file) for file in filename]
picture_read(file_list)
二进制文件读取
CIFAR10二进制数据集
CIFAR10二进制数据读取
class Cifar():
def __init__(self):
self.height = 32
self.width = 32
self.channel = 3
self.image = self.height * self.width * self.channel
self.label = 1
self.sample = self.image + self.label
def read_binary(self):
"""
读取二进制文件
:return:
"""
filename_list = os.listdir("./cifar-10-batches-bin")
file_list = [os.path.join("./cifar-10-batches-bin/", i) for i in filename_list if i[-3:]=="bin"]
file_queue = tf.train.string_input_producer(file_list)
reader = tf.FixedLengthRecordReader(self.sample)
key, value = reader.read(file_queue)
image_decoded = tf.decode_raw(value, tf.uint8)
print("image_decoded:\n", image_decoded)
label = tf.slice(image_decoded, [0], [self.label])
image = tf.slice(image_decoded, [self.label], [self.image])
print("label:\n", label)
print("image:\n", image)
image_reshaped = tf.reshape(image, [self.channel, self.height, self.width])
print("image_reshaped:\n", image_reshaped)
image_transposed = tf.transpose(image_reshaped, [1, 2, 0])
print("image_transposed:\n", image_transposed)
image_batch, label_batch = tf.train.batch([image_transposed, label], batch_size=100, num_threads=2, capacity=100)
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
label_value, image_value = sess.run([label_batch, image_batch])
print("label_value:\n", label_value)
print("image:\n", image_value)
coord.request_stop()
coord.join(threads)
return image_value, label_value
def write_to_tfrecords(self, image_batch, label_batch):
"""
将样本的特征值和目标值一起写入tfrecords文件
:param image:
:param label:
:return:
"""
with tf.python_io.TFRecordWriter("cifar10.tfrecords") as writer:
for i in range(100):
image = image_batch[i].tostring()
label = label_batch[i][0]
example = tf.train.Example(features=tf.train.Features(feature={
"image": tf.train.Feature(bytes_list=tf.train.BytesList(value=[image])),
"label": tf.train.Feature(int64_list=tf.train.Int64List(value=[label])),
}))
writer.write(example.SerializeToString())
return None
def read_tfrecords(self):
"""
读取TFRecords文件
:return:
"""
file_queue = tf.train.string_input_producer(["cifar10.tfrecords"])
reader = tf.TFRecordReader()
key, value = reader.read(file_queue)
feature = tf.parse_single_example(value, features={
"image": tf.FixedLenFeature([], tf.string),
"label": tf.FixedLenFeature([], tf.int64)
})
image = feature["image"]
label = feature["label"]
print("read_tf_image:\n", image)
print("read_tf_label:\n", label)
image_decoded = tf.decode_raw(image, tf.uint8)
print("image_decoded:\n", image_decoded)
image_reshaped = tf.reshape(image_decoded, [self.height, self.width, self.channel])
print("image_reshaped:\n", image_reshaped)
image_batch, label_batch = tf.train.batch([image_reshaped, label], batch_size=100, num_threads=2, capacity=100)
print("image_batch:\n", image_batch)
print("label_batch:\n", label_batch)
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
image_value, label_value = sess.run([image_batch, label_batch])
print("image_value:\n", image_value)
print("label_value:\n", label_value)
coord.request_stop()
coord.join(threads)
return None
if __name__ == "__main__":
cifar = Cifar()
cifar.read_tfrecords()
TFRecords
结构
|