DL_3——多层感知机

1 感知机

1.1 感知机模型

给 定 输 入 x , 权 重 w , 和 偏 移 b , 感 知 机 输 出 : o = σ ( ⟨ w , x ⟩ + b ) σ ( x ) = { 1  if  x > 0 − 1  otherwise  给定输入 \mathbf{x} , 权重 \mathbf{w} , 和偏移 b , 感知机输出:\\ o=\sigma(\langle\mathbf{w}, \mathbf{x}\rangle+b) \quad \sigma(x)=\left\{\begin{array}{ll} 1 & \text { if } x>0 \\ -1 & \text { otherwise } \end{array}\right. x,w,b,:o=σ(w,x+b)σ(x)={ 11 if x>0 otherwise 

1.2 总结

  • 感知机是一个二分类模型,是最早的AI模型之一
  • 它的求解算法等价于使用批量大小为1的梯度下降
  • 它不能拟合XOR函数,导致的第一次AI寒冬

2 多层感知机

2.1 多层感知机模型

  • 单隐藏层
    在这里插入图片描述
    其中激活函数不能是线性的。

2.2 激活函数

  • sigmoid激活函数
    sigmoid ⁡ ( x ) = 1 1 + exp ⁡ ( − x ) \operatorname{sigmoid}(x)=\frac{1}{1+\exp (-x)} sigmoid(x)=1+exp(x)1
    在这里插入图片描述
  • Tanh激活函数
    tanh ⁡ ( x ) = 1 − exp ⁡ ( − 2 x ) 1 + exp ⁡ ( − 2 x ) \tanh (x)=\frac{1-\exp (-2 x)}{1+\exp (-2 x)} tanh(x)=1+exp(2x)1exp(2x)
    在这里插入图片描述
  • ReLU激活函数
    ReLU ⁡ ( x ) = max ⁡ ( x , 0 ) \operatorname{ReLU}(x)=\max (x, 0) ReLU(x)=max(x,0)
    在这里插入图片描述

2.3 总结

  • 多层感知机使用隐藏层和激活函数来得到非线性模型
  • 常用激活函数是Sigmoid,Tanh,ReLU
  • 使用Softmax来处理多分类
  • 超参数为隐藏层层数,和各个隐藏层大小

3 多层感知机从零开始

其中有的函数,在之前的博客中已经使用过了,所以就直接使用d2l库中封装好的,其源代码和我之前使用的一样。

# -*- coding: utf-8 -*- 
# @Time : 2021/9/12 20:43 
# @Author : Amonologue
# @software : pycharm   
# @File : MLP_from_zero.py
import torch
from torch import nn
from d2l import torch as d2l


def relu(X):
    a = torch.zeros_like(X)  # 创建一个和X的shape相同且全为0的向量
    return torch.max(a, X)  # 两者中的每一个位置的最大值所组成的向量


def net(X):
    X = X.reshape((-1, num_inputs))
    H = relu(X @ W1 + b1)
    return H @ W2 + b2


if __name__ == '__main__':
    batch_size = 256
    train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)

    num_inputs, num_outputs, num_hiddens = 784, 10, 256
    # Fashion-MNIST 数据集中, 输入的为28x28的图像, 将其拉伸成一维, 则输入为784
    # 输出对应10个类别
    # 隐藏层设置256个隐藏单元
    W1 = nn.Parameter(
        torch.randn(num_inputs, num_hiddens, requires_grad=True) * 0.01
    )
    b1 = nn.Parameter(
        torch.zeros(num_hiddens, requires_grad=True)
    )
    W2 = nn.Parameter(
        torch.randn(num_hiddens, num_outputs, requires_grad=True) * 0.01
    )
    b2 = nn.Parameter(
        torch.zeros(num_outputs, requires_grad=True)
    )
    params = [W1, b1, W2, b2]

    loss = nn.CrossEntropyLoss()  # 采用交叉熵损失
    num_epochs = 10  # 训练5个批次
    lr = 0.1
    updater = torch.optim.SGD(params, lr=lr)  # 采用SGD优化
    d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)

在这里插入图片描述

4 多层感知机简洁实现

# -*- coding: utf-8 -*- 
# @Time : 2021/9/12 21:47 
# @Author : Amonologue
# @software : pycharm   
# @File : MLP_simple.py
import torch
from torch import nn
from d2l import torch as d2l


def init_weight(m):
    if type(m) == nn.Linear:
        nn.init.normal_(m.weight, std=0.01)


if __name__ == '__main__':
    net = nn.Sequential(
        nn.Flatten(),
        nn.Linear(784, 256),
        nn.ReLU(),
        nn.Linear(256, 10)
    )
    # 其中nn.Flatten将输入拉成一个向量, 从而输入为784的向量
    net.apply(init_weight)
    batch_size = 256
    lr = 0.1
    num_epochs = 10
    loss = nn.CrossEntropyLoss()
    trainer = torch.optim.SGD(net.parameters(), lr=lr)
    train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
    d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)

在这里插入图片描述

5 使用Numpy实现多层感知机(MLP)并解决异或问题

# -*- coding: utf-8 -*- 
# @Time : 2021/9/23 8:42
# @Author : Amonologue
# @software : pycharm   
'''
多层感知机解决异或问题
'''
import numpy as np
from matplotlib import pyplot as plt

def MSE(y, y_hat):
    '''均方误差'''
    loss = np.sum((y - y_hat) ** 2) / 2
    return loss


class SGD:
    def __init__(self, lr):
        self.lr = lr

    def __call__(self, grad):
        '''魔术方法, 更新量'''
        return self.lr * grad


class Layer:
    def __init__(self, input_channel, output_channel, optimizer):
        self.weight = np.random.normal(size=(input_channel, output_channel))
        self.bias = np.ones((1, output_channel), dtype=np.float64)
        self.optimizer = optimizer

    def forward(self, data):
        self.input = data  # 上一层的输出矩阵 a_{l-1}
        self.output = self.input @ self.weight + self.bias  # 本层汇集 z_{l}
        return self.output

    def backward(self, delta):
        self.weight_grad = self.input.T @ delta
        self.bias_grad = np.sum(delta, axis=0)
        delta = delta @ self.weight.T  # 传递给激活函数
        return delta

    def step(self):
        '''an epoch'''
        self.weight += self.optimizer(self.weight_grad)
        self.bias += self.optimizer(self.bias_grad)


class Sigmoid:
    def __init__(self):
        self.result = None

    def forward(self, data):
        self.result = 1 / (1 + np.exp(-data))
        return self.result

    def backward(self, delta):
        '''反向传播梯度'''
        return delta * self.result * (1 - self.result)  # 传递给下一层的残差

    def step(self):
        pass


class Model:
    def __init__(self, sequential):
        self.Sequential = sequential

    def forward(self, X):
        for layer in self.Sequential:
            X = layer.forward(X)
        return X

    def backward(self, delta):
        for layer in self.Sequential[::-1]:
            delta = layer.backward(delta)
        return delta

    def step(self):
        for layer in self.Sequential:
            layer.step()


def train():
    # 开始训练
    loss_x, loss_y = [], []
    acc_x, acc_y = [], []
    print(f'training...')
    for epoch in range(num_epochs):
        y_hat = net.forward(train_data)
        loss = MSE(train_label, y_hat)
        if epoch % 100 == 0:
            print(f'loss = {
      
      loss}')
            loss_x.append(epoch), loss_y.append(loss)
            acc_x.append(epoch), acc_y.append(accuracy(train_label, y_hat))
        net.backward(train_label - y_hat)
        net.step()
    line1, = plt.plot(loss_x, loss_y)
    line2, = plt.plot(acc_x, acc_y)
    plt.legend([line1, line2], ['loss', 'acc'], loc=2)
    plt.savefig('loss&acc.png')
    # plt.show()
    print(f'result = {
      
      net.forward(train_data)}')
    print(f'result = {
      
      [1 if i > 0.5 else 0 for i in net.forward(train_data)]}')
    print(f'finished')


def solve_xor():
    print(f'x1 = 0, x2 = 0, y = {
      
      1 if net.forward(np.array([0, 0], dtype=np.float64).reshape(1, 2)) > 0.5 else 0}')
    print(f'x1 = 1, x2 = 0, y = {
      
      1 if net.forward(np.array([1, 0], dtype=np.float64).reshape(1, 2)) > 0.5 else 0}')
    print(f'x1 = 0, x2 = 1, y = {
      
      1 if net.forward(np.array([0, 1], dtype=np.float64).reshape(1, 2)) > 0.5 else 0}')
    print(f'x1 = 1, x2 = 1, y = {
      
      1 if net.forward(np.array([1, 1], dtype=np.float64).reshape(1, 2)) > 0.5 else 0}')


def accuracy(y, y_hat):
    y_hat = [1 if i > 0.5 else 0 for i in y_hat]
    cnt = 0
    for i in range(len(y)):
        if y[i] == y_hat[i]:
            cnt += 1
    return cnt / len(y)


if __name__ == '__main__':
    # 超参数
    lr = 1
    num_epochs = 1000
    # 创建网络
    net = Model([Layer(2, 2, SGD(lr)),
                 Sigmoid(),
                 Layer(2, 1, SGD(lr)),
                 Sigmoid()])
    # 训练数据
    train_data = np.array([0, 0, 1, 0, 0, 1, 1, 1]).reshape(4, 2)
    train_label = np.array([0, 1, 1, 0]).reshape(4, 1)

    train()
    solve_xor()

猜你喜欢

转载自blog.csdn.net/CesareBorgia/article/details/120254748