梯度下降训练参数w,b

import numpy as np

#1采样数据
data = []#保存样本集的列表
for i in range(100):#循环采样100个点
    x = np.random.uniform(-10,10)#随机采样输入X
    # 采样高斯噪声
    eps = np.random.normal(0.,0.1)
    #得到模型的输出
    y = 1.477*x +0.089 +eps
    data.append([x,y])#保存样本点
data = np.array(data)#转换为2维数组
print(data)

#2计算误差
def mse(b,w,points):
    #根据当前的w,b参数计算军方差损失
    totalError = 0
    for i in range(0,len(points)):#循环迭代所有点
        x = points[i,0]# 获得i号店的输入x
        y = points[i,1]# 获得i号点的输出y
        totalError+=(y-(w*x+b))**2
    return totalError/float(len(points))#得到均方差

#3,计算梯度
def step_gradient(b_current,w_current,points,lr):
    #计算误差函数在所有点上的导数,并更新w,b
    b_gradient = 0
    w_gradient =0
    M = float(len(points))
    for i in range(0,len(points)):
        x = points[i,0]
        y=points[i,1]
        b_gradient +=(2/M)*((w_current*x+b_current)-y)
        w_gradient += (2/M) * x*((w_current*x+b_current) - y)
    #根据梯度下降算法更新w,b,lr为xuexilv
    new_b = b_current -(lr*b_gradient)
    new_w = w_current -(lr * w_gradient)
    return [new_b,new_w]

#梯度更新
def gradient_descent(points,starting_b,starting_w,lr,num_iterations):
    b = starting_b
    w = starting_w
    for step in range(num_iterations):
        b,w = step_gradient(b,w,np.array(points),lr)
        loss = mse(b,w,points)
        if step%50 == 0:
            print("iteration:{},loss:{},w:{},b:{}".format(step,loss,w,b))
    return [b,w]
def main():
    lr = 0.01
    initial_b=0
    initial_w = 0
    num_iterations = 1000
    [b,w] = gradient_descent(data,initial_b,initial_w,lr,num_iterations)
    loss = mse(b,w,data)
    print("final loss:{},w:{},b:{}".format(loss,w,b))
原创文章 35 获赞 58 访问量 5891

猜你喜欢

转载自blog.csdn.net/weixin_43221560/article/details/103087566