数据分离与混淆矩阵

import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from sklearn.covariance import EllipticEnvelope
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix

data = pd.read_csv('data/data_class_raw.csv')
x = data.drop(['y'], axis = 1)
y = data.loc[:, 'y']

# # 数据分布可视化
# fig1 = plt.figure()
# bad = plt.scatter(x.loc[:, 'x1'][y==0], x.loc[:, 'x2'][y==0])
# good = plt.scatter(x.loc[:, 'x1'][y==1], x.loc[:, 'x2'][y==1])
# plt.legend((good, bad), ('good', 'bad'))
# plt.title('raw data')
# plt.xlabel('x1')
# plt.ylabel('x2')
# plt.show()

# 异常检测
ad_model = EllipticEnvelope(contamination=0.02)
ad_model.fit(x[y==0])
y_predict_bad = ad_model.predict(x[y==0])
# print(y_predict_bad)

# fig2 = plt.figure()
# bad = plt.scatter(x.loc[:, 'x1'][y==0], x.loc[:, 'x2'][y==0])
# good = plt.scatter(x.loc[:, 'x1'][y==1], x.loc[:, 'x2'][y==1])
# plt.scatter(x.loc[:, 'x1'][y==0][y_predict_bad==-1], x.loc[:, 'x2'][y==0][y_predict_bad==-1], 
#             marker = 'x', s = 150)
# plt.legend((good, bad), ('good', 'bad'))
# plt.title('raw data')
# plt.xlabel('x1')
# plt.ylabel('x2')
# plt.show()

# 剔除异常点后的数据
data = pd.read_csv('data/data_class_processed.csv')
x = data.drop(['y'], axis = 1)
y = data.loc[:, 'y']

# PCA
x_norm = StandardScaler().fit_transform(x)
pca = PCA(n_components=2)
x_reduced = pca.fit_transform(x_norm)
var_ratio = pca.explained_variance_ratio_   # [0.5369408 0.4630592]-->无法降维
# print(var_ratio)
# fig3 = plt.figure()
# plt.bar([1,2], var_ratio)
# plt.show()

# 数据分离  random_state = 4   test_size = 0.4
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=4, test_size=0.4)
# print(x_train.shape, x_test.shape, x.shape)   # (21, 2) (14, 2) (35, 2)

# knn model
knn_10 = KNeighborsClassifier(n_neighbors=10)  # 试了一下n_neighbors=5可能更高(也可尝试其他参数)
knn_10.fit(x_train, y_train)
y_train_predict = knn_10.predict(x_train)
y_test_predict = knn_10.predict(x_test)

# 准确率
accuracy_train = accuracy_score(y_train, y_train_predict)
accuracy_test = accuracy_score(y_test, y_test_predict)
# print('training accuracy:', accuracy_train)
# print('testing accuracy:', accuracy_test)
# # training accuracy: 0.9047619047619048
# # testing accuracy: 0.6428571428571429

# 可视化knn分类边界
xx, yy = np.meshgrid(np.arange(0, 10, 0.05), np.arange(0, 10, 0.05))
x_range = np.c_[xx.ravel(), yy.ravel()]
y_range_predict=knn_10.predict(x_range)

# fig4 = plt.figure()
# knn_bad = plt.scatter(x_range[:,0][y_range_predict==0],x_range[:,1][y_range_predict==0])
# knn_good = plt.scatter(x_range[:,0][y_range_predict==1],x_range[:,1][y_range_predict==1])

# bad = plt.scatter(x.loc[:,'x1'][y==0],x.loc[:,'x2'][y==0])
# good = plt.scatter(x.loc[:,'x1'][y==1],x.loc[:,'x2'][y==1])
# plt.legend((good,bad,knn_good,knn_bad),('good','bad','knn_good','knn_bad'))
# plt.title('prediction result')
# plt.xlabel('x1')
# plt.ylabel('x2')
# plt.show()

# 混淆矩阵
cm = confusion_matrix(y_test, y_test_predict)
# print(cm)

TP = cm[1, 1]
TN = cm[0, 0]
FP = cm[0, 1]
FN = cm[1, 0]
accuracy = (TP+TN)/(TP+TN+FP+FN)
# print(accuracy)

recall = TP/(TP+FN)
specificity = TN/(TN+FP)
precision = TP/(TP+FP)
f1 = 2*precision*recall/(precision+recall)
# print(f1)

# 尝试不同KNN n_neighbors
n = [i for i in range(1, 21)]
accuracy_train = []
accuracy_test = []
for i in n:
    knn = KNeighborsClassifier(n_neighbors=i)
    knn.fit(x_train, y_train)
    y_train_predict = knn.predict(x_train)
    y_test_predict = knn.predict(x_test)
    accuracy_train_i = accuracy_score(y_train, y_train_predict)
    accuracy_test_i = accuracy_score(y_test, y_test_predict)
    accuracy_train.append(accuracy_train_i)
    accuracy_test.append(accuracy_test_i)
# print(accuracy_train, accuracy_test)

fig5 = plt.figure()
plt.subplot(121)
plt.plot(n, accuracy_train, marker = 'o')
plt.title('training accuracy vs n_neighbors')
plt.xlabel('n_neighbors')
plt.ylabel('accuracy')
plt.subplot(122)
plt.plot(n, accuracy_test, marker = 'o')
plt.title('testing accuracy vs n_neighbors')
plt.xlabel('n_neighbors')
plt.ylabel('accuracy')
plt.show()

data_class_raw.
提取码:rrk8

data_class_processed.
提取码:ozhx

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值