回归算法的损失函数:
""" 回归算法的损失函数,并绘制 """ import os import matplotlib.pyplot as plt import tensorflow as tf os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' s = tf.Session() # linspace 在 [start, stop] 范围内产生 num 个数的等差数列 x_vals = tf.linspace(-1., 1., 500) target = tf.constant(0.) # L2正则损失函数 l2_y_vals = tf.square(target - x_vals) l2_y_out = s.run(l2_y_vals) # L1正则损失函数 l1_y_vals = tf.abs(target - x_vals) l1_y_out = s.run(l1_y_vals) # Pseudo-Huber 损失函数,它的表达式依赖参数 delta # 分别设置 delta1 = 0.25 和 delta2 = 5 delta1 = tf.constant(0.25) phuber1_y_vals = tf.multiply(tf.square(delta1), tf.sqrt(1. + tf.square((target - x_vals) / delta1)) - 1.) phuber1_y_out = s.run(phuber1_y_vals) delta2 = tf.constant(5.) phuber2_y_vals = tf.multiply(tf.square(delta2), tf.sqrt(1. + tf.square((target - x_vals) / delta2)) - 1.) phuber2_y_out = s.run(phuber2_y_vals) # 使用 matplotlib 绘制回归算法的损失函数 x_array = s.run(x_vals) plt.plot(x_array, l2_y_out, 'b-', label='L2 Loss') plt.plot(x_array, l1_y_out, 'r--', label='L1 Loss') plt.plot(x_array, phuber1_y_out, 'k-', label='P-Huber Loss (0.25)') plt.plot(x_array, phuber2_y_out, 'g:', label='P-Huber Loss (5.0)') plt.ylim(-0.2, 0.4) plt.legend(loc='lower right', prop={'size': 11}) plt.show()分类算法的损失函数:
注意:交叉熵损失函数的计算,需要使用预测结果表达正确的标签
""" 分类算法的损失函数 """ import os import tensorflow as tf import matplotlib.pyplot as plt os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' s = tf.Session() x_vals = tf.linspace(-3., 5., 500) target = tf.constant(1.) # 创建一个形状大小为 500 的 tensor,其初始值为 1 targets = tf.fill([500], 1.) # Hinge 损失函数,主要用来评估支持向量机算法,有时也用来评估神经网络算法 hinge_y_vals = tf.maximum(0., 1. - tf.multiply(target, x_vals)) hinge_y_out = s.run(hinge_y_vals) # 交叉熵损失函数,有时也作为逻辑损失函数 xentropy_y_vals = -tf.multiply(target, tf.log(x_vals)) - tf.multiply((1. - target), tf.log(1. - x_vals)) xentropy_y_out = s.run(xentropy_y_vals) # Sigmoid 交叉熵损失函数 xentropy_sigmoid_y_vals = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets, logits=x_vals) xentropy_sigmoid_y_out = s.run(xentropy_sigmoid_y_vals) # 加权交叉熵损失,对正目标加权 weight = tf.constant(0.5) xentropy_weighted_y_vals = tf.nn.weighted_cross_entropy_with_logits(targets, x_vals, weight) xentropy_weighted_y_out = s.run(xentropy_weighted_y_vals) # Softmax 交叉熵损失函数,作用于非归一化的输出结果,只针对单个目标分类的计算损失 unscaled_logits = tf.constant([[1., -3., 10.]]) target_dist = tf.constant([[0.1, 0.02, 0.88]]) softmax_xentropy = tf.nn.softmax_cross_entropy_with_logits(labels=target_dist, logits=unscaled_logits) print(s.run(softmax_xentropy)) # 稀疏 Softmax 交叉熵损失函数, 它是把目标分类为true的转化成 index spares_target_dist = tf.constant([2]) spares_xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=spares_target_dist, logits=unscaled_logits) print(s.run(spares_xentropy)) # 绘制分类算法损失函数 x_array = s.run(x_vals) plt.plot(x_array, hinge_y_out, 'b-', label='Hinge Loss') plt.plot(x_array, xentropy_y_out, 'r--', label='Cross Entropy Loss') plt.plot(x_array, xentropy_sigmoid_y_out, 'k-.', label='Cross Entropy Sigmoid Loss') plt.plot(x_array, xentropy_weighted_y_out, 'g:', label='Weighted Cross Enropy Loss(x0.5)') plt.ylim(-1.5, 3) plt.legend(loc='lower right', prop={'size': 11}) plt.show()