Focal Loss的介绍见?Focal Loss(RetinaNet)原理与代码解析_00000cj的博客-CSDN博客
from sklearn.preprocessing import LabelBinarizer
from math import log
import tensorflow as tf
import os
os.environ['CUDA_VISIBLE_DEVICES'] = str(0)
tf.enable_eager_execution()
labels = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] # 所有标签
y_true = ['1', '4', '5'] # 样本的真实标签
y_pred = [[0.1, 0.6, 0.3, 0, 0, 0, 0, 0, 0, 0],
[0, 0.3, 0.2, 0, 0.5, 0, 0, 0, 0, 0],
[0.6, 0.3, 0, 0, 0, 0.1, 0, 0, 0, 0]] # 样本的预测概率
lb = LabelBinarizer()
lb.fit(labels)
transformed_labels = lb.transform(y_true)
# print(transformed_labels)
# [[0 1 0 0 0 0 0 0 0 0]
# [0 0 0 0 1 0 0 0 0 0]
# [0 0 0 0 0 1 0 0 0 0]]
""" multiclass cross-entropy loss """
N = len(y_true) # 样本个数
K = len(labels) # 标签个数
eps = 1e-15 # 预测概率的控制值
Loss = 0 # 损失值初始化
for i in range(N):
for k in range(K):
# 控制预测概率在[eps, 1-eps]内,避免求对数时出现问题
if y_pred[i][k] < eps:
y_pred[i][k] = eps
if y_pred[i][k] > 1-eps:
y_pred[i][k] = 1-eps
# 多分类问题的交叉熵计算公式
Loss -= transformed_labels[i][k] * log(y_pred[i][k])
Loss /= N # 交叉熵loss
""" multiclass focal loss"""
epsilon = 1e-9
y_true = transformed_labels
gamma = 2
alpha = 4
# alpha = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
y_true = tf.convert_to_tensor(y_true, tf.float32)
y_pred = tf.convert_to_tensor(y_pred, tf.float32)
model_out = tf.add(y_pred, epsilon)
ce = tf.multiply(y_true, -tf.log(model_out))
weight = tf.multiply(y_true, tf.pow(tf.subtract(1., model_out), gamma))
fl = tf.multiply(alpha, tf.multiply(weight, ce))
reduced_fl = tf.reduce_max(fl, axis=1)
focal_loss = tf.reduce_mean(reduced_fl)
参考
多分类问题的交叉熵计算 - SegmentFault 思否
|