7.1张量
a=tf.constant([[1.,2.,3.],[4.,5.,6.]])#matrix
print(a)
b=tf.constant(42)#创建一个常量tensor
print(b.dtype)#看类型
print(a[:,1:])#切片a@
print(a+10)#+10python自动会广播
print(tf.square(a))#平方
print(a@tf.transpose(a))#转置 @ @运算符是在Python 3.5中添加的,用于矩阵乘法,等效于调用tf.matmul()函数,tf.transpose在tenssorfow中是赋值另外一个张量来做的
print(K.square(K.transpose(a))+10)#使用keras的底层api
# tensorfow和numpy的张量互用
a=np.array([2.,4.,5.])
print(tf.constant(a))#转成tensor
'''
不同类型的tensor不能转换,想转可以用tf.cast()
print(tf.constant(2.,dtype='float32')+tf.constant(1))
'''
print(tf.constant(2.,dtype='float32')+tf.cast(tf.constant(1),tf.float32))
#变量张量
v=tf.Variable([[1.,2.,3.],[4.,5.,6.]])#变量张量
v=v.assign(2*v)#乘法
v[0,1].assign(42)#某行某列赋值
v[:,2].assign([0.,1.])#切片
print(v)
7.2自定义
import tensorflow as tf
from tensorflow import keras
import tensorflow.keras.backend as K
import numpy as np
'''
轮子:
(使用keras.losses.Huber类的实例)
'''
def huber_fn(y_true,y_pred):
error = y_true-y_pred
is_small_error = tf.abs(error)<1
squared_loss = tf.square(error)/2
linear_loss = tf.abs(error)-0.5
return tf.where(is_small_error,squared_loss,linear_loss)#返回一个布尔张量中真值的位置。对于非布尔型张量,非0的元素都判为True
#使用这个函数 model.compile(loss=huber_fn,optimizer= 'nadam')
model = keras.models.load_model("my_model_with_a_custom_loss.h5",custom_objects={"huber_fn": huber_fn})
'''
你想要一个不同的阈值怎么办?一种解决方案是创建一个函
数,该函数创建已配置的损失函数
'''
def create_huber(threshold=1.0):
def huber_fn(y_true, y_pred):
error = y_true - y_pred
is_small_error = tf.abs(error) < threshold
squared_loss = tf.square(error) / 2
linear_loss = threshold * tf.abs(error) - threshold**2 / 2
return tf.where(is_small_error, squared_loss, linear_loss)
return huber_fn
model.compile(loss=create_huber(2.0), optimizer="nadam")
model = keras.models.load_model("my_model_with_a_custom_loss_threshold_2.h5",custom_objects={"huber_fn": create_huber(2.0)})
'''
像初始化时候就有参数的配置
'''
class HuberLoss(keras.losses.Loss):
def __init__(self, threshold=1.0, **kwargs):
self.threshold = threshold
super().__init__(**kwargs)
def call(self, y_true, y_pred):
error = y_true - y_pred
is_small_error = tf.abs(error) < self.threshold
squared_loss = tf.square(error) / 2
linear_loss = self.threshold * tf.abs(error) - self.threshold**2 / 2
return tf.where(is_small_error, squared_loss, linear_loss)
def get_config(self):
base_config = super().get_config()
return {**base_config, "threshold": self.threshold}
'''
1.自定义激活函数、初始化、正则化和约束
略
'''
def my_softplus(z): # return value is just tf.nn.softplus(z)
return tf.math.log(tf.exp(z) + 1.0)
def my_glorot_initializer(shape, dtype=tf.float32):
stddev = tf.sqrt(2. / (shape[0] + shape[1]))
return tf.random.normal(shape, stddev=stddev, dtype=dtype)
def my_l1_regularizer(weights):
return tf.reduce_sum(tf.abs(0.01 * weights))
def my_positive_weights(weights): # return value is just tf.nn.relu(weights)
return tf.where(weights < 0., tf.zeros_like(weights), weights)
layer = keras.layers.Dense(30, activation=my_softplus,kernel_initializer=my_glorot_initializer,kernel_regularizer=my_l1_regularizer,kernel_constraint=my_positive_weights)
'''
2.自定义指标
用来评估模型的
略
'''
model.compile(loss="mse", optimizer="nadam", metrics=[create_huber(2.0)])
precision = keras.metrics.Precision()
precision([0, 1, 0, 0, 1, 0, 1, 1], [1, 0, 1, 1, 0, 0, 0, 0])
precision.result()
precision.variables
class HuberMetric(keras.metrics.Metric):
def __init__(self, threshold=1.0, **kwargs):
super().__init__(**kwargs) # handles base args (e.g., dtype)
self.threshold = threshold
self.huber_fn = create_huber(threshold)
self.total = self.add_weight("total", initializer="zeros")
self.count = self.add_weight("count", initializer="zeros")
def update_state(self, y_true, y_pred, sample_weight=None):
metric = self.huber_fn(y_true, y_pred)
self.total.assign_add(tf.reduce_sum(metric))
self.count.assign_add(tf.cast(tf.size(y_true), tf.float32))
def result(self):
return self.total / self.count
def get_config(self):
base_config = super().get_config()
return {**base_config, "threshold": self.threshold}
'''
3.自定义层
略
'''
'''
4.自定义模型
略
'''
|