机器学习系列(四) 梯度下降法 2020.6.6

前言

本节学习梯度下降法

  • 基于搜索的最优化方法
  • 用来最小化损失函数

1、梯度下降法的原理与简单实现

在这里插入图片描述
看图理解

  • 计算梯度
  • 每次根据学习率进行梯度下降
  • 最终得到最优解

学习率的取值影响最优解的速度

  • 太小则收敛太慢
  • 太大则可能不收敛
  • 需要调整学习率和初始点

实现如下

import numpy as np
import matplotlib.pyplot as plt
"""模拟梯度下降法"""
# 以一个二次函数为损失函数
plot_x = np.linspace(-1., 6., 141)
plot_y = (plot_x-2.5)**2 - 1
# 损失函数
def J(theta):
    try:
        return (theta-2.5)**2 - 1.
    except:
        return float('inf')
# 导数
def dJ(theta):
    return 2 * (theta - 2.5)
"""
# 梯度下降法
eta = 0.1 #学习率
theta = 0.0 #起始点
epsilon = 1e-8 #判断
theta_history = [theta]
while True:
    gradient = dJ(theta) #梯度
    last_theta = theta
    theta = theta - eta * gradient #梯度下降移一步
    theta_history.append(theta)
    if (abs(J(theta) - J(last_theta)) < epsilon):
        break
plt.plot(plot_x, J(plot_x))
plt.plot(np.array(theta_history), J(np.array(theta_history)), color="r", marker='+')
plt.show()
print(theta)
print(J(theta))"""
# 梯度下降法函数封装
theta_history = []
def gradient_descent(initial_theta, eta, n_iters = 1e4,epsilon=1e-8):
    theta = initial_theta
    theta_history.append(initial_theta)
    i_iter = 0
    while i_iter < n_iters:
        gradient = dJ(theta)
        last_theta = theta
        theta = theta - eta * gradient
        theta_history.append(theta)
        if (abs(J(theta) - J(last_theta)) < epsilon):
            break
        i_iter += 1
    return
def plot_theta_history():
    plt.plot(plot_x, J(plot_x))
    plt.plot(np.array(theta_history), J(np.array(theta_history)), color="r", marker='+')
    plt.show()
eta = 0.01
theta_history = []
gradient_descent(0, eta)
plot_theta_history()

2、线性回归中的梯度下降法

在这里插入图片描述
公式如下
在这里插入图片描述
在这里插入图片描述
实现如下

import numpy as np
import matplotlib.pyplot as plt
"""线性回归中的梯度下降"""
# 为了可视化,搞个一维数组
np.random.seed(666) #随机种子
x = 2 * np.random.random(size=100)
y = x * 3. + 4. + np.random.normal(size=100)
X = x.reshape(-1, 1)
# 损失函数
def J(theta, X_b, y):
    try:
        return np.sum((y - X_b.dot(theta))**2) / len(X_b)
    except:
        return float('inf')
# 梯度
def dJ(theta, X_b, y):
    res = np.empty(len(theta))
    res[0] = np.sum(X_b.dot(theta) - y)
    for i in range(1, len(theta)):
        res[i] = (X_b.dot(theta) - y).dot(X_b[:,i])
    return res * 2 / len(X_b)
# 梯度下降
def gradient_descent(X_b, y, initial_theta, eta, n_iters=1e4, epsilon=1e-8):
    theta = initial_theta
    cur_iter = 0
    while cur_iter < n_iters:
        gradient = dJ(theta, X_b, y)
        last_theta = theta
        theta = theta - eta * gradient
        if (abs(J(theta, X_b, y) - J(last_theta, X_b, y)) < epsilon):
            break
        cur_iter += 1
    return theta
# 参数
X_b = np.hstack([np.ones((len(x), 1)), x.reshape(-1,1)])
initial_theta = np.zeros(X_b.shape[1])
eta = 0.01
theta = gradient_descent(X_b, y, initial_theta, eta)
print(theta)

3、随机梯度下降法

随机梯度下降

  • 方向随机,可以跳出局部最优解
  • 精度换时间
  • 学习率很重要,要逐渐递减
  • 模拟退火的思想
  • scikit里的算法更复杂更优化

实现如下

import numpy as np
import matplotlib.pyplot as plt
"""随机梯度下降法"""
# 数据
m = 100000
x = np.random.normal(size=m)
X = x.reshape(-1,1)
y = 4.*x + 3. + np.random.normal(0, 3, size=m)
# 损失函数
def J(theta, X_b, y):
    try:
        return np.sum((y - X_b.dot(theta)) ** 2) / len(y)
    except:
        return float('inf')
# 梯度
def dJ_sgd(theta, X_b_i, y_i): #不是整个数据传入,而是一行
    return 2 * X_b_i.T.dot(X_b_i.dot(theta) - y_i)
# 随机梯度下降
def sgd(X_b, y, initial_theta, n_iters):
    # 公式里ab两个超参数
    t0, t1 = 5, 50
    # 学习率
    def learning_rate(t):
        return t0 / (t + t1)
    # 起始点
    theta = initial_theta
    for cur_iter in range(n_iters):
        rand_i = np.random.randint(len(X_b)) #随机索引
        gradient = dJ_sgd(theta, X_b[rand_i], y[rand_i]) #对应的梯度
        theta = theta - learning_rate(cur_iter) * gradient
    return theta
X_b = np.hstack([np.ones((len(X), 1)), X])
initial_theta = np.zeros(X_b.shape[1])
theta = sgd(X_b, y, initial_theta, n_iters=m//3) #是个很重要的超参数
print(theta)

4、在前一篇的线性回归函数里封装进梯度下降法

import numpy as np
from sklearn.metrics import r2_score

"""用了梯度下降法的线性回归函数"""
class LinearRegression:
    def __init__(self):
        """初始化Linear Regression模型"""
        self.coef_ = None
        self.intercept_ = None
        self._theta = None
    
    def fit_normal(self, X_train, y_train):
        """根据训练数据集X_train, y_train训练Linear Regression模型"""
        assert X_train.shape[0] == y_train.shape[0], \
            "the size of X_train must be equal to the size of y_train"
        X_b = np.hstack([np.ones((len(X_train), 1)), X_train])
        self._theta = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y_train)
        self.intercept_ = self._theta[0]
        self.coef_ = self._theta[1:]
        return self
    
    def fit_gd(self, X_train, y_train, eta=0.01, n_iters=1e4):
        """根据训练数据集X_train, y_train, 使用梯度下降法训练Linear Regression模型"""
        assert X_train.shape[0] == y_train.shape[0], \
            "the size of X_train must be equal to the size of y_train"
        # 损失函数
        def J(theta, X_b, y):
            try:
                return np.sum((y - X_b.dot(theta)) ** 2) / len(y)
            except:
                return float('inf')
        # 梯度
        def dJ(theta, X_b, y):
            return X_b.T.dot(X_b.dot(theta) - y) * 2 / len(y) #向量化
        # 梯度下降
        def gradient_descent(X_b, y, initial_theta, eta, n_iters=1e4, epsilon=1e-8):
            # 参数
            theta = initial_theta
            cur_iter = 0
            while cur_iter < n_iters:
                gradient = dJ(theta, X_b, y)
                last_theta = theta
                theta = theta - eta * gradient
                if (abs(J(theta, X_b, y) - J(last_theta, X_b, y)) < epsilon):
                    break
                cur_iter += 1
            return theta
        X_b = np.hstack([np.ones((len(X_train), 1)), X_train])
        initial_theta = np.zeros(X_b.shape[1])
        self._theta = gradient_descent(X_b, y_train, initial_theta, eta, n_iters)
        self.intercept_ = self._theta[0]
        self.coef_ = self._theta[1:]
        return self
    
    def fit_sgd(self, X_train, y_train, n_iters=50, t0=5, t1=50):
        """根据训练数据集X_train, y_train, 使用随机梯度下降法训练Linear Regression模型"""
        assert X_train.shape[0] == y_train.shape[0], \
            "the size of X_train must be equal to the size of y_train"
        assert n_iters >= 1
        # 梯度
        def dJ_sgd(theta, X_b_i, y_i):
            return X_b_i * (X_b_i.dot(theta) - y_i) * 2
        # 随机梯度下降
        def sgd(X_b, y, initial_theta, n_iters=5, t0=5, t1=50): #t0,t1是学习率的超参数
            def learning_rate(t):
                return t0 / (t + t1)
            theta = initial_theta
            m = len(X_b)
            for i_iter in range(n_iters):
                # 保证所有样本都看一次
                indexes = np.random.permutation(m)
                X_b_new = X_b[indexes,:]
                y_new = y[indexes]
                for i in range(m):
                    gradient = dJ_sgd(theta, X_b_new[i], y_new[i])
                    theta = theta - learning_rate(i_iter * m + i) * gradient
            return theta
        X_b = np.hstack([np.ones((len(X_train), 1)), X_train])
        initial_theta = np.random.randn(X_b.shape[1])
        self._theta = sgd(X_b, y_train, initial_theta, n_iters, t0, t1)
        self.intercept_ = self._theta[0]
        self.coef_ = self._theta[1:]
        return self
    
    def predict(self, X_predict):
        """给定待预测数据集X_predict,返回表示X_predict的结果向量"""
        assert self.intercept_ is not None and self.coef_ is not None, \
            "must fit before predict!"
        assert X_predict.shape[1] == len(self.coef_), \
            "the feature number of X_predict must be equal to X_train"
        X_b = np.hstack([np.ones((len(X_predict), 1)), X_predict])
        return X_b.dot(self._theta)
    
    def score(self, X_test, y_test):
        """根据测试数据集 X_test 和 y_test 确定当前模型的准确度"""
        y_predict = self.predict(X_test)
        return r2_score(y_test, y_predict)
   
    def __repr__(self):
        return "LinearRegression()"

5、梯度下降法的调试

  • 效果佳
  • 速度慢

实现如下

import numpy as np
import matplotlib.pyplot as plt
import datetime

"""对梯度下降法的调试"""
# 数据
np.random.seed(666)
X = np.random.random(size=(1000, 10))
true_theta = np.arange(1, 12, dtype=float) #最终应该得到的
X_b = np.hstack([np.ones((len(X), 1)), X])
y = X_b.dot(true_theta) + np.random.normal(size=1000) #加上一个噪音
# 损失函数
def J(theta, X_b, y):
    try:
        return np.sum((y - X_b.dot(theta))**2) / len(X_b)
    except:
        return float('inf')
# 之前数学法的梯度
def dJ_math(theta, X_b, y):
    return X_b.T.dot(X_b.dot(theta) - y) * 2. / len(y)
# 调试得到的梯度
def dJ_debug(theta, X_b, y, epsilon=0.01):
    res = np.empty(len(theta))
    for i in range(len(theta)):
        # 每次求一个维度的值
        theta_1 = theta.copy()
        theta_1[i] += epsilon
        theta_2 = theta.copy()
        theta_2[i] -= epsilon
        res[i] = (J(theta_1, X_b, y) - J(theta_2, X_b, y)) / (2 * epsilon)
    return res
# 梯度下降
def gradient_descent(dJ, X_b, y, initial_theta, eta, n_iters=1e4, epsilon=1e-8):
    theta = initial_theta
    cur_iter = 0
    while cur_iter < n_iters:
        gradient = dJ(theta, X_b, y)
        last_theta = theta
        theta = theta - eta * gradient
        if (abs(J(theta, X_b, y) - J(last_theta, X_b, y)) < epsilon):
            break
        cur_iter += 1
    return theta
X_b = np.hstack([np.ones((len(X), 1)), X])
initial_theta = np.zeros(X_b.shape[1])
eta = 0.01
startTime = datetime.datetime.now()
theta1 = gradient_descent(dJ_debug, X_b, y, initial_theta, eta)
print(theta1)
endTime = datetime.datetime.now()
print("运行的时间是:%ss" % (endTime - startTime).seconds)
startTime = datetime.datetime.now()
theta2 = gradient_descent(dJ_math, X_b, y, initial_theta, eta)
print(theta2)
endTime = datetime.datetime.now()
print("运行的时间是:%ss" % (endTime - startTime).seconds)

结语

本节学习了梯度下降法
包括批量梯度下降法和随机梯度下降法
并用进了前一篇的线性回归
梯度下降法是许多机器学习获取最优解的良好手段

还有小批量梯度下降法
显然是增加了一个批量大小的超参数

猜你喜欢

转载自blog.csdn.net/weixin_44604541/article/details/106586471