找了个图帮助理解https://blog.csdn.net/qq_40771567/article/details/108400598
下面是对应的一元异常点检测的编程实现:
(部分代码修改自上面链接中)
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# 随机生成的数据
data = np.random.randn(50000)*50 + 20
sns.boxplot(data=data)
#随便敲了一些数字
data2=np.array([29.6,26.5,28.9,28.9,29.0,29.1,29.1,29.2,29.2,29.3,29.4,29,28,29.2,29.3,29,29.5,29.3,29,29,29,29,29,29.4,29.1,30])
data_x=np.arange(0,len(data2)).astype("int32")
plt.subplot(1,2,1)
plt.plot(data_x,data2)
plt.subplot(1,2,2)
sns.boxplot(data=data2)
下面是编写的一元异常检测点函数,会输出异常点值
#计算中位数
def count_median(lis):
#排序一下
lis=np.sort(lis)
if len(lis) % 2 == 0:
mid = float((lis[int(len(lis) / 2)] + lis[int(len(lis) / 2 - 1)])) / 2
else:
mid = lis[int(len(lis) / 2)]
return mid
#计算上下四分位数
def count_quartiles(lis):
#排序一下
lis=np.sort(lis)
q1 = lis[int(len(lis) * 1 / 4)]
q3 = lis[int(len(lis) * 3 / 4)]
return q1, q3
#计算上下边缘
def count_margin(q1, q3):
q4 = q3 + 1.5 * (q3 - q1) #上
q5 = q1 - 1.5 * (q3 - q1) #下
return q4,q5
#一元异常点查找
def abnormal_1d(lis):
mid = count_median(lis)
q1, q3 = count_quartiles(lis)
q4, q5 = count_margin(q1,q3)
data_abnormal=[]
for x in lis:
if x>q4 or x<q5:
data_abnormal.append(x)
print(data_abnormal)
return data_abnormal
data_abnormal=abnormal_1d(data2)
输出:[26.5, 28.0, 30.0]
可以参考这篇博文https://blog.csdn.net/chikily_yongfeng/article/details/105750861
多元异常点检测代码:
转自:代码链接,懒得下载数据集了,用的我自己生成的a.txt(一个高斯分布的二维混合数据点,缺少上面链接中代码需要的标签)
import numpy as np
import random
import matplotlib.pyplot as plt
def GaussianParamEstimation(npArr, GaussianType = 'Normal'):
'''
:param npArr: shape=(n_examples, n_features)
:param GaussianType: 'Normal' or 'Multi'
:return:
'''
n_features = npArr.shape[1]
# mean = np.zeros(n_features)
mean = np.average(npArr, axis=0)
if GaussianType == 'Normal':
# std = np.zeros(n_features)
std = np.std(npArr, axis=0)
return mean, std
elif GaussianType == 'Multi':
sigma = np.cov(npArr - mean, rowvar=0)
return mean, sigma
def NormalGaussion(X, mean, std):
'''
:param X: shape=(1, n_features)
:param mean: shape=(1, n_features)
:param std: shape=(1, n_features)
:return:
'''
n_feature = X.shape[1]
P = 1;
for i in range(0,n_feature):
temp1 = ( 1 / (np.sqrt(2*np.pi) * std[i]))
temp2 = np.exp( -pow(X[:,i] - mean[i], 2) / (2 * pow(std[i],2)))
P = P * (temp1 * temp2)
return P
def MultiGaussion(X, mean, sigma):
'''
:param X: shape=(1, n_features)
:param mean: shape=(1, n_features)
:param sigma: shape=(n_features, n_features)
:return:
'''
temp1 = ( 1 / (pow(2*np.pi, np.pi/2) * np.sqrt(np.linalg.det(sigma))))
temp2 = np.dot((X-mean), np.linalg.inv(sigma))
temp3 = np.exp( (-1/2) * np.dot(temp2, (X-mean).T))
P = temp1 * temp3
return P
def AnomalyDetection(npArr, labels, iterations, lamda_step=0.001, lamda=0.001):
'''
:param npArr: shape=(n_examples, n_features)
:param labels: shape=(n_examples, 1)
:param iterations:
:param lamda_step:
:param lamda:
:return:
'''
n_examples = npArr.shape[0]
n_features = npArr.shape[1]
# 将labels的列表类型转为numpy类型
labels = np.array(labels).reshape(n_examples, 1)
# 找出标记为非0(异常样本)的索引
anomalyIndex = []
for i in range(0, n_examples):
if(labels[i:i+1,:] != 0):
anomalyIndex.append(i)
# 根据异常样本索引得到异常数据和异常标记
anomalyArr = npArr[anomalyIndex, :]
anomalyLabels = labels[anomalyIndex, :]
# 获得异常样本的测试数据
n_anomaly = anomalyArr.shape[0]
n_anomalyTest = int(n_anomaly/2)
anomalyTestArr = anomalyArr[0:n_anomalyTest, :]
anomalyTestLabels = anomalyLabels[0:n_anomalyTest, :]
# 获得异常样本的验证数据
anomalyDevArr = anomalyArr[n_anomalyTest:, :]
anomalyDevLabels = anomalyLabels[n_anomalyTest:, :]
# 去掉异常数据得到正常数据
NormalArr = np.delete(npArr, anomalyIndex, axis=0)
NormalLabels = np.delete(labels, anomalyIndex ,axis=0)
# 样本数更新为正常样本数量
n_examples = NormalArr.shape[0]
# 正常样本中测试数据和验证数据集的大小
n_test = int(n_examples * 0.2)
n_dev = int(n_examples * 0.2)
# 划分训练数据,验证数据,测试数据
testIndex = random.sample(range(0, n_examples), n_test) # 获得测试数据索引
NormalTestArr = NormalArr[testIndex, :] # 获得正常样本的测试数据
NormalTestLabels = NormalLabels[testIndex, :] # 获得正常样本的测试数据标签
# 训练数据+验证数据,用于交叉验证
delNormalTestArr = np.delete(NormalArr, testIndex, axis=0)
delNormalTestLabels = np.delete(NormalLabels, testIndex, axis=0)
# 在正常样本中,去掉测试数据,得到其余数据的大小
n_delNormalTest = delNormalTestArr.shape[0]
# 存储各lamda值,和其对应的F1值
lamdaToF1 = np.zeros((iterations, 2))
# 不断更新lamda值
for iter in range(0, iterations):
# 交叉验证10次
F1Arr = np.zeros(10)
for cross_i in range(0,10):
# 获得正常样本的验证数据
devNormalIndex = random.sample(range(0, n_delNormalTest), n_dev) # 获得正常样本的验证数据索引
devNormalArr = delNormalTestArr[devNormalIndex, :] # 获得正常样本的验证数据
devNormalLabels = delNormalTestLabels[devNormalIndex, :] # 获得验证数据标签
# 获得正常样本的训练数据
trainArr = np.delete(delNormalTestArr, devNormalIndex, axis=0) # 获得训练数据
mean, std = GaussianParamEstimation(trainArr, GaussianType='Multi')
# 正常样本为负样本,异常样本为正样本
# 检查正常样本验证数据的效果
F1Arr[cross_i] = computeF1(anomalyDevArr, devNormalArr, mean, std, lamda, GaussionType=MultiGaussion)
# 计算交叉验证的平均F1值
F1 = np.average(F1Arr)
lamdaToF1[iter, 0] = lamda
lamdaToF1[iter, 1] = F1
# 更新lamda
lamda = lamda + lamda_step
MaxF1Index = np.argmax(lamdaToF1[:, 1])
lamda = lamdaToF1[MaxF1Index, 0]
# 检测测试数据效果
F1_test = computeF1(anomalyTestArr, NormalTestArr, mean, std, lamda, GaussionType=MultiGaussion)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(trainArr[:,0], trainArr[:,1], marker='^', c='red')
ax.scatter(anomalyTestArr[:,0], anomalyTestArr[:,1], marker='o', c='blue')
plt.show()
return mean, std, lamda
def computeF1(PData, NData, mean, std, lamda, GaussionType = NormalGaussion):
'''
:param PData: Positive category
:param NData: Negative category
:param mean:
:param std: std(NormalGaussion) or sigma(MultiGaussion)
:param lamda: Min probability
:param GaussionType: NormalGaussion or MultiGaussion
:return: F1 combine Precision and Recall
'''
n_PData = PData.shape[0]
n_NData = NData.shape[0]
TP = FP = FN = TN = 0.0
for i in range(0, n_PData):
P = GaussionType(PData[i:i+1, :], mean, std)
if P < lamda: # True Positive
TP += 1.0
elif P >= lamda: # False Negative
FN += 1.0
for i in range(0, n_NData):
P = GaussionType(NData[i:i+1, :], mean, std)
if P >= lamda: # True Negative
TN += 1.0
elif P < lamda: # False Positive
FP += 1.0
Precision = TP / (TP + FP)
Recall = TP / (TP + FN)
F1 = 2 * Precision * Recall / (Precision + Recall)
return F1
if __name__ == '__main__':
filename = 'a.txt'# 这个是我自己生成的一个数据集
data=np.loadtxt(filename)
data_use=np.array([data[:1010,0],data[:1010,1]]).T.astype('float32')
#————————————我自己生成的标签,前1000个是一个分布,后10个是另一个————————
labels=np.hstack([np.zeros((1,1000)), np.ones((1,10))]).T.astype('int32')
mean, std, lamda = AnomalyDetection(data_use, labels, iterations=10)
结果:
下面:使用PyOD库生成toy example并调用HBOS
from pyod.models.hbos import HBOS
from pyod.utils.data import generate_data
from pyod.utils.data import evaluate_print
from pyod.utils.example import visualize
if __name__ == "__main__":
contamination = 0.1 # percentage of outliers
n_train = 2000 # number of training points
n_test = 1000 # number of testing points
# Generate sample data
X_train, y_train, X_test, y_test = \
generate_data(n_train=n_train,
n_test=n_test,
n_features=2,
contamination=contamination,
random_state=42)
# train HBOS detector
clf_name = 'HBOS'
clf = HBOS()
clf.fit(X_train)
# get the prediction labels and outlier scores of the training data
y_train_pred = clf.labels_ # binary labels (0: inliers, 1: outliers)
y_train_scores = clf.decision_scores_ # raw outlier scores
# get the prediction on the test data
y_test_pred = clf.predict(X_test) # outlier labels (0 or 1)
y_test_scores = clf.decision_function(X_test) # outlier scores
# evaluate and print the results
print("\nOn Training Data:")
evaluate_print(clf_name, y_train, y_train_scores)
print("\nOn Test Data:")
evaluate_print(clf_name, y_test, y_test_scores)
# visualize the results
visualize(clf_name, X_train, y_train, X_test, y_test, y_train_pred,
y_test_pred, show_figure=True, save_figure=False)