机器学习实战代码_Python3.6_支持向量机

import numpy as np

def load_data_set(file_name):
    data_mat = []
    label_mat = []
    fr = open(file_name)
    for line in fr.readlines():
        line_arr = line.strip().split('\t')
        data_mat.append([float(line_arr[0]), float(line_arr[1])])
        label_mat.append(float(line_arr[2]))

    return data_mat, label_mat

def select_jrand(i, m):
    j = i
    while( j == i ):
        j = int(np.random.uniform(0,m))
    return j

def clip_alpha(aj, H, L):
    if aj > H:
        aj = H
    if L > aj:
        aj = L
    return aj
#五个输入参数:数据集、类别标签、常数C、容错率、退出前最大循环次数
def smo_simple(data_mat_in, class_labels, C, toler, max_iter):
    data_matrix = np.mat(data_mat_in)       #输入转为矩阵,使用numpy库
    label_mat = np.mat(class_labels).transpose()
    b = 0
    m, n = np.shape(data_matrix)            #矩阵行和列
    alphas = np.mat(np.zeros((m, 1)))       #m维向量初始化全0,同时将向量转为矩阵
    iter = 0                                #迭代次数计数器
    while iter < max_iter:                  #当前迭代次数小于最大迭代次数执行循环
        alpha_pairs_changed = 0             #alpha参数修改次数计数器
        for i in range(m):                  #内循环
            '''
            multiply将两个矩阵的每个对应元素相乘
            .T方法完成矩阵的转置
            ‘:’操作可以用来取出矩阵中某一行或者某一列
            这里转置的目的是为了将两个向量相乘,做向量乘法
            '''
            #fxi:预测结果
            fxi = float(np.multiply(alphas, label_mat).T * (data_matrix*data_matrix[i, :].T)) + b
            Ei = fxi - float(label_mat[i])  #预测结果和真实结果进行对比得到误差
            #if判断误差是否满足输入参数的容错率,不满足则进行优化,判断包括正间隔和负间隔
            if ((label_mat[i]*Ei < -toler) and (alphas[i] < C )) or ((label_mat[i]*Ei > toler) and (alphas[i] > 0)):
                j = select_jrand(i, m) #随机选择第二个alpha
                #计算预测结果,同时与实际对比得出误差
                fxj = float(np.multiply(alphas, label_mat).T*(data_matrix*data_matrix[j, :].T)) + b
                Ej = fxj - float(label_mat[j])  #误差
                #保存旧的alpha值,后面需要新老alpha进行对比
                alpha_i_old = alphas[i].copy()
                alpha_j_old = alphas[j].copy()
                #------计算L和H,L和H的作用是调整alpha的值在0和C之间------
                if label_mat[i] != label_mat[j]:
                    L = max(0, alphas[j]-alphas[i])
                    H = min(C, C+alphas[j]-alphas[i])
                else:
                    L = max(0, alphas[j]+alphas[i]-C)
                    H = min(C, alphas[j]+alphas[i])
                # ------计算L和H,L和H的作用是调整alpha的值在0和C之间------
                #如何L和H相等,不做任何改变
                if L == H:
                    print('L == H')
                    continue
                #eta是alpha[j]的最优修改量 ?????why??????????????
                eta = 2.0 * data_matrix[i, :] * data_matrix[j, :].T - data_matrix[i, :]*data_matrix[i, :].T - \
                      data_matrix[j, :]*data_matrix[j, :].T
                if eta >= 0 :
                    print(' eta >= 0')
                    continue
                #d对alphas[j]进行相应的修改。。。。。why??????????????????
                alphas[j] -= label_mat[j]*(Ei - Ej)/eta
                alphas[j] = clip_alpha(alphas[j], H, L)
                if abs(alphas[j]-alpha_j_old) < 0.00001:    #如果j基本不变,表示本次无优化效果,进行下一次优化
                    print('j not moving enough')
                    continue
                #对i进行的修改和对j进行的修改是一样的,只不过改变的方向相反
                alphas[i] += label_mat[j] * label_mat[i] * (alpha_j_old-alphas[j])
                #优化完成之后,给优化的两个alpha设置一个常数项b
                b1 = b - Ei - label_mat[i]*(alphas[i] - alpha_i_old) * data_matrix[i, :] * data_matrix[i, :].T - \
                     label_mat[j] * (alphas[j] - alpha_j_old) * data_matrix[i, :] * data_matrix[j,:].T
                b2 = b - Ej - label_mat[i] * (alphas[i]-alpha_i_old) * data_matrix[i, :] * data_matrix[j, :].T -\
                     label_mat[j] * (alphas[j] - alpha_j_old) * data_matrix[j, :] * data_matrix[j, :].T
                if 0 < alphas[i] and C > alphas[i] :
                    b = b1
                elif 0 < alphas[j] and C > alphas[j]:
                    b = b2
                else:
                    b = (b1 + b2 ) / 2.0
                alpha_pairs_changed += 1
                print('iter: %d  i: %d, pairs changed %d '%(iter, i, alpha_pairs_changed))
        if alpha_pairs_changed == 0:
            iter += 1
        else:
            iter = 0
    print('iteration number: %d'%iter)

    return b, alphas


# class opt_struct:
#   def __init__(self, data_mat_in, class_labels, C, toler):
#       self.X = data_mat_in
#       self.label_mat = class_labels
#       self.C = C
#       self.tol = toler
#       self.m = np.shape(data_mat_in)[0]
#       self.alphas = np.mat(np.zeros((self.m, 1)))
#       self.b = 0
#       self.ecache = np.mat(np.zeros((self.m, 2)))

# def calc_ek(oS, k):
#   fxk = float(np.multiply(oS.alphas, oS.label_mat).T * (oS.X*oS.X[k, :].T)) + oS.b
#   ek = fxk - float(oS.label_mat[k])
#   return ek

def selectj(i, oS, Ei):
    max_k = -1
    max_delta_e = 0
    ej = 0
    oS.ecache[i] = [1, Ei]
    valid_ecache_list = np.nonzero(oS.ecache[:, 0].A)[0]
    if len(valid_ecache_list) > 1:
        for k in valid_ecache_list:
            if k == i :
                continue
            ek = calc_ek(oS, k)
            delta_e = abs(Ei-ek)
            if delta_e > max_delta_e:
                max_k = k
                max_delta_e = delta_e
                ej = ek
        return max_k, ej
    else:
        j = select_jrand(i, oS.m)
        ej = calc_ek(oS, j)
        return j, ej

def updateEk(oS, k):
    Ek = calc_ek(oS, k)
    oS.ecache[k] = [1, Ek]


# def innerL(i, oS):
#   Ei = calc_ek(oS, i)
#   if ((oS.label_mat[i]*Ei < -oS.tol) and (oS.alphas[i]<oS.C)) or \
#       ((oS.label_mat[i]*Ei > oS.tol) and (oS.alphas[i] > 0)):
#       j, Ej = selectj(i, oS, Ei)
#       alpha_i_old = oS.alphas[i].copy()
#       alpha_j_old = oS.alphas[j].copy()
#       if oS.label_mat[i] != oS.label_mat[j]:
#           L = max(0, oS.alphas[j]-oS.alphas[i])
#           H = min(oS.C, oS.C+oS.alphas[j]-oS.alphas[i])
#       else:
#           L = max(0, oS.alphas[j]+oS.alphas[i]-oS.C)
#           H = min(oS.C, oS.alphas[j]+oS.alphas[i])
#       if L == H:
#           print("L==H")
#           return 0
#       eta = 2.0 * oS.X[i, :] * oS.X[j, :].T - oS.X[i, :]*oS.X[i, :].T - oS.X[j, :]*oS.X[j, :].T
#       if eta >= 0:
#           print('eta >= 0 ')
#           return 0
#       oS.alphas[j] -= oS.label_mat[j] * (Ei - Ej)/eta
#       oS.alphas[j] = clip_alpha(oS.alphas[j], H, L)
#       updateEk(oS, j)
#       b1 = oS.b - Ei - oS.label_mat[i]*(oS.alphas[i]-alpha_i_old) * oS.X[i, :]*oS.X[i, :].T - \
#            oS.label_mat[j] * (oS.alphas[j]-alpha_j_old) * oS.X[i,:]*oS.X[i,:].T
#       b2 = oS.b - Ej - oS.label_mat[i]*(oS.alphas[i]-alpha_i_old) * oS.X[i, :]*oS.X[j, :].T - \
#            oS.label_mat[j] * (oS.alphas[j]-alpha_j_old) * oS.X[j,:]*oS.X[j,:].T
#       if 0 < oS.alphas[i] and oS.C > oS.alphas[i]:
#           oS.b = b1
#       elif 0 < oS.alphas[j] and oS.C > oS.alphas[j]:
#           oS.b = b2
#       else:
#           oS.b = (b1 + b2)/2.0
#       return 1
#   else:
#       return 0

def smoP(data_mat_in, class_labels, C, toler, max_iter, k_tup=('lin', 0)):
    oS = opt_struct(np.mat(data_mat_in), np.mat(class_labels).transpose(), C, toler, k_tup )
    iter = 0
    entire_set = True
    alpha_pairs_changed = 0
    while iter < max_iter and ( (alpha_pairs_changed>0) or entire_set):
        alpha_pairs_changed = 0
        if entire_set:
            for i in range(oS.m):
                alpha_pairs_changed += innerL(i, oS)
            print('fullSet, iter: %d  i: %d, pairs changed: %d'%(iter, i, alpha_pairs_changed))
            iter += 1
        else:
            non_bound_ls = np.nonzero((oS.alphas.A > 0 ) * (oS.alphas.A < C ))[0]
            for i in non_bound_ls:
                alpha_pairs_changed += innerL(i, oS)
                print('non-bound, iter: %d i:%d, pairs changed %d'%(iter, i, alpha_pairs_changed))
            iter += 1
        if entire_set :
            entire_set = False
        elif alpha_pairs_changed == 0:
            entire_set = True
        print('iteration numbers: %d'%iter)
    return oS.b, oS.alphas


def kernel_trans(x, a, k_tup):
    m, n = np.shape(x)
    k = np.mat(np.zeros((m,1)))
    if k_tup[0] == 'lin':
        k = x * a.T
    elif k_tup[0] == 'rbf':
        for j in range(m):
            delta_row = x[j,:] - a
            k[j] = delta_row * delta_row.T
        k = np.exp(k/(-1*k_tup[1]**2))
    else:
        raise NameError('Houston We Have a Problem---That Kernel is not recognized')
    return k

class opt_struct:
    def __init__(self, data_mat_in, class_labels, C, toler, k_tup):
        self.X = data_mat_in
        self.label_mat = class_labels
        self.C = C
        self.tol = toler
        self.m = np.shape(data_mat_in)[0]
        self.alphas = np.mat(np.zeros((self.m, 1)))
        self.b = 0
        self.ecache = np.mat(np.zeros((self.m, 2)))
        self.K = np.mat(np.zeros((self.m, self.m)))
        for i in range(self.m):
            self.K[:, i] = kernel_trans(self.X, self.X[i,:], k_tup)

def innerL(i, oS):
    Ei = calc_ek(oS, i)
    if ((oS.label_mat[i]*Ei < -oS.tol) and (oS.alphas[i]<oS.C)) or \
        ((oS.label_mat[i]*Ei > oS.tol) and (oS.alphas[i] > 0)):
        j, Ej = selectj(i, oS, Ei)
        alpha_i_old = oS.alphas[i].copy()
        alpha_j_old = oS.alphas[j].copy()
        if oS.label_mat[i] != oS.label_mat[j]:
            L = max(0, oS.alphas[j]-oS.alphas[i])
            H = min(oS.C, oS.C+oS.alphas[j]-oS.alphas[i])
        else:
            L = max(0, oS.alphas[j]+oS.alphas[i]-oS.C)
            H = min(oS.C, oS.alphas[j]+oS.alphas[i])
        if L == H:
            print("L==H")
            return 0
        eta = 2.0 * oS.K[i, j] - oS.K[i, i] - oS.K[j, j]
        if eta >= 0:
            print('eta >= 0 ')
            return 0
        oS.alphas[j] -= oS.label_mat[j] * (Ei - Ej)/eta
        oS.alphas[j] = clip_alpha(oS.alphas[j], H, L)
        updateEk(oS, j)
        b1 = oS.b - Ei - oS.label_mat[i]*(oS.alphas[i]-alpha_i_old) * oS.K[i, i] - \
             oS.label_mat[j] * (oS.alphas[j]-alpha_j_old) * oS.K[i, j]
        b2 = oS.b - Ej - oS.label_mat[i]*(oS.alphas[i]-alpha_i_old) * oS.K[i, j]- \
             oS.label_mat[j] * (oS.alphas[j]-alpha_j_old) * oS.K[j, j]
        if 0 < oS.alphas[i] and oS.C > oS.alphas[i]:
            oS.b = b1
        elif 0 < oS.alphas[j] and oS.C > oS.alphas[j]:
            oS.b = b2
        else:
            oS.b = (b1 + b2)/2.0
        return 1
    else:
        return 0


def calc_ek(oS, k):
    fxk = float(np.multiply(oS.alphas, oS.label_mat).T * oS.K[:, k] + oS.b)
    ek = fxk - float(oS.label_mat[k])
    return ek

def test_rbf(k1 = 1.3):
    data_arr, label_arr = load_data_set('testSetRBF.txt')
    b, alphas = smoP(data_arr, label_arr, 200, 0.0001, 10000, ('rbf', k1))
    dat_mat = np.mat(data_arr)
    label_mat = np.mat(label_arr).transpose()
    svInd = np.nonzero(alphas.A > 0)[0]
    sVs = dat_mat[svInd]
    label_sv = label_mat[svInd]
    print('there are %d supporter vectors'%np.shape(sVs)[0])
    m, n= np.shape(dat_mat)
    error_count = 0
    for i in range(m):
        kernel_eval = kernel_trans(sVs, dat_mat[i,:], ('rbf', k1))
        predict = kernel_eval.T * np.multiply(label_sv, alphas[svInd]) + b
        if np.sign(predict) != np.sign(label_arr[i]):
            error_count += 1
    print('the training error rate is : %f' %(float(error_count)/m))
    data_arr, label_arr = load_data_set('testSetRBF2.txt')
    error_count = 0
    dat_mat = np.mat(data_arr)
    label_mat = np.mat(label_arr).transpose()
    m, n = np.shape(dat_mat)
    for i in range(m):
        kernel_eval = kernel_trans(sVs, dat_mat[i, :], ('rbf', k1))
        predict = kernel_eval.T * np.multiply(label_sv, alphas[svInd]) + b
        if np.sign(predict) != np.sign(label_arr[i]):
            error_count += 1
    print('the test error rate is : %f'%(float(error_count)/m))


def img_to_vector(filename):
    return_vector = np.zeros((1, 1024))
    fr = open(filename)
    for i in range(32):
        line_str = fr.readline()
        for j in range(32):
            return_vector[0, 32*i+j] = int(line_str[j])
    return return_vector

def load_images(dir_name):
    from os import listdir
    hw_labels = []
    training_file_list = listdir(dir_name)
    m = len(training_file_list)
    training_mat = np.zeros((m, 1024))
    for i in range(m):
        file_name_str = training_file_list[i]
        file_str = file_name_str.split('.')[0]
        class_num_str = int(file_str.split('_')[0])
        if class_num_str == 9:
            hw_labels.append(-1)
        else:
            hw_labels.append(1)
        training_mat[i, :] = img_to_vector('%s/%s'%(dir_name, file_name_str))
    return training_mat, hw_labels

def test_digits(k_tup=('rbf', 10)):
    data_arr, label_arr = load_images('digits/trainingDigits')
    b, alphas = smoP(data_arr, label_arr, 200, 0.0001, 10000, k_tup)
    data_mat = np.mat(data_arr)
    label_mat = np.mat(label_arr).transpose()
    svInd = np.nonzero(alphas.A>0)[0]
    sVs = data_mat[svInd]
    label_sv = label_mat[svInd]
    print('there are %d support vectors'%np.shape(sVs)[0])
    m, n = np.shape(data_mat)
    error_count = 0
    for i in range(m):
        kernel_eval = kernel_trans(sVs, data_mat[i,:], k_tup)
        predict = kernel_eval.T * np.multiply(label_sv, alphas[svInd]) + b
        if np.sign(predict) != np.sign(label_arr[i]):
            error_count += 1
    print('the training error rate is : %f'%(float(error_count)/m))
    data_arr, label_arr = load_images('digits/testDigits')
    error_count = 0
    data_mat = np.mat(data_arr)
    label_mat = np.mat(label_arr).transpose()
    m ,n = np.shape(data_mat)
    for i in range(m):
        kernel_eval = kernel_trans(sVs, data_mat[i, :], k_tup)
        predict = kernel_eval.T * np.multiply(label_sv, alphas[svInd]) + b
        if np.sign(predict) != np.sign(label_arr[i]):
            error_count += 1
    print('the test error rate is : %f' % (float(error_count) / m))



if __name__ == '__main__':
    # data_arr, label_arr = load_data_set('testSet.txt')
    # b, alphas = smoP(data_arr, label_arr, 0.6, 0.01, 40)
    # print(b)
    # print(alphas[alphas>0])
    #test_rbf()
    test_digits(('rbf', 20))

猜你喜欢

转载自blog.csdn.net/liyuanshuo_nuc/article/details/82704004