SMO是求解软间隔线性SVM(带核函数)的对偶问题的一种启发式算法,运算效率很高,大大推动了SVM算法的实际应用。
SMO算法的原理在之前的文章:支持向量机(SVM)凸二次规划的求解——序列最小最优化算法(SMO)原理及python实现有十分详细的介绍,这里直接展示代码
from sklearn import datasets
import matplotlib.pyplot as plt # 画图工具
import numpy as np
X, y = datasets.make_classification(n_samples=100, n_features=2, n_informative=2, n_redundant=0, n_repeated=0, n_classes=2, n_clusters_per_class=1, random_state=2)
for i in range(X.shape[0]):
if y[i] == 0:
y[i] = -1
#plt.scatter(X[:, 0], X[:, 1], c=y)
# plt.show()
class SMO:
def __init__(self, X, y, C, toler, maxIter): # samples labels constance tolerate maxIteration
self.X = X
self.y = y
self.C = C
self.toler = toler
self.maxIter = maxIter
self.N = self.X.shape[0]
self.b = 0
self.a = np.zeros(self.N)
self.K = np.zeros((self.N, self.N))
self.o = 0.5
for i in range(self.N):
for j in range(self.N):
t = 0
for k in range(self.X.shape[1]):
t += (self.X[i][k] - self.X[j][k])**2
self.K[i][j] = np.exp(-t/(2*self.o**2))
def select_J(self, i): # random choose j which is not equal to i.
j = i
while j == i:
j = np.random.randint(0, self.N)
return j
def fix_alpha(self, a, H, L):
if a > H:
a = H
if a < L:
a = L
return a
def cal_Ei(self, j):
t = 0
for i in range(self.N):
t += self.a[i]*self.y[i]*self.K[i][j]
return t + self.b - self.y[j]
def update(self):
iter = 0
while iter < self.maxIter:
alphaPairsChanged = 0
for i in range(self.N):
Ei = self.cal_Ei(i)
if (Ei < -self.toler and self.a[i] < self.C) or (Ei > self.toler and self.a[i] > 0):
j = self.select_J(i) # choose j != i
Ej = self.cal_Ei(j)
aiold = self.a[i]
ajold = self.a[j]
if self.y[i] != self.y[j]:
L = max(0, self.a[j] - self.a[i])
H = min(self.C, self.C + self.a[j] - self.a[i])
else:
L = max(0, self.a[j] + self.a[i] - self.C)
H = min(self.C, self.a[j] + self.a[i])
if L == H:
print("L==H")
continue
eta = self.K[i][i] + self.K[j][j] - 2*self.K[i][j]
if eta <= 0 :
print('eta <= 0')
continue
ajnew = ajold + self.y[j]*(Ei - Ej)/eta
ajnew = self.fix_alpha(ajnew, H, L)
if abs(ajnew - ajold) < 0.00001:
print("j not moving enough")
continue
self.a[j] = self.fix_alpha(ajnew, H, L)
self.a[i] = aiold + self.y[i]*self.y[j]*(ajold - ajnew)
b1 = -Ei - self.y[i]*self.K[i][i]*(self.a[i] - aiold) - self.y[j]*self.K[i][j]*(ajnew - ajold) + self.b
b2 = -Ej - self.y[i]*self.K[i][j]*(self.a[i] - aiold) - self.y[j]*self.K[j][j]*(ajnew - ajold) + self.b
if 0 < self.a[i] < self.C:
self.b = b1
elif 0 < self.a[j] < self.C:
self.b = b2
else:
self.b = (b1 + b2)/2
alphaPairsChanged += 1
if alphaPairsChanged == 0:
iter += 1
else:
iter = 0
return self.b, self.a
def score(self, X, y):
K = np.zeros((X.shape[0], self.N))
for i in range(X.shape[0]):
for j in range(self.N):
t = 0
for k in range(self.X.shape[1]):
t += (self.X[i][k] - self.X[j][k]) ** 2
K[i][j] = np.exp(-t / (2 * self.o ** 2))
y_pre = []
for i in range(X.shape[0]):
t = 0
for j in range(self.N):
t += self.a[j]*self.y[j]*K[i][j]
y_pre.append(t + self.b)
s = 0
for i in range(X.shape[0]):
if y_pre[i] < 0 and y[i] == -1:
s += 1
elif y_pre[i] > 0 and y[i] == 1:
s += 1
return s/X.shape[0]
L = SMO(X, y, 1, 0.001, 100)
b, a = L.update()
score = L.score(X, y)
print(b)
print(a)
print('the accuracy of this SVM is {}'.format(score))
最终,代码运行的结果为
输出了对偶问题的参数
,以及最终计算出的SVM的精确度。