版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/Ericsson_Liu/article/details/82530587
数据与代码来源:Python数据分析与挖掘实战
数据如下:共940条
Id | R | F | M |
1 | 27 | 6 | 232.61 |
2 | 3 | 5 | 1507.11 |
3 | 4 | 16 | 817.62 |
4 | 3 | 11 | 232.81 |
5 | 14 | 7 | 1913.05 |
6 | 19 | 6 | 220.07 |
7 | 5 | 2 | 615.83 |
8 | 26 | 2 | 1059.66 |
9 | 21 | 9 | 304.82 |
10 | 2 | 21 | 1227.96 |
11 | 15 | 2 | 521.02 |
12 | 26 | 3 | 438.22 |
13 | 17 | 11 | 1744.55 |
14 | 30 | 16 | 1957.44 |
15 | 5 | 7 | 1713.79 |
16 | 4 | 21 | 1768.11 |
17 | 93 | 2 | 1016.34 |
18 | 16 | 3 | 950.36 |
19 | 4 | 1 | 754.93 |
代码如下:
#-*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
import pandas as pd
inputfile = '../data/consumption_data.xls'
k = 3 #聚类的类别
threshold = 2 #离散点阈值
iteration = 500 #聚类最大循环次数
data = pd.read_excel(inputfile, index_col='Id')
data_zs = 1.0*(data-data.mean())/data.std() #数据标准化
from sklearn.cluster import KMeans
model = KMeans(n_clusters=k, n_jobs=4, max_iter=iteration) #分为k类,最大并发为4
model.fit(data_zs)
#print(pd.Series(model.labels_, index=data.index))
r = pd.concat([data_zs, pd.Series(model.labels_, index=data.index)], axis=1) #每个样本对应的类别
r.columns = list(data.columns) + [u'聚类类别'] #重命名表头
norm = []
for i in range(k): #逐一处理
norm_tmp = r[['R', 'F', 'M']][r[u'聚类类别']==i] - model.cluster_centers_[i]
norm_tmp = norm_tmp.apply(np.linalg.norm, axis=1) #求出绝对距离
norm.append(norm_tmp/norm_tmp.median()) #求相对距离并添加
norm = pd.concat(norm) #数据到簇中心的相对距离
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
norm[norm<=threshold].plot(style='go') #正常点
discrete_points = norm[norm>threshold] #离群点
discrete_points.plot(style='ro')
for i in range(len(discrete_points)): #离群点做标记
id = discrete_points.index[i]
n = discrete_points.iloc[i]
plt.annotate('(%s, %0.2f)' % (id, n), xy=(id, n), xytext=(id, n))
plt.xlabel(u'编号')
plt.ylabel(u'相对距离')
plt.show()